@@ -1447,6 +1447,7 @@ OBJS = \
gimple-iterator.o \
gimple-fold.o \
gimple-harden-conditionals.o \
+ gimple-harden-control-flow.o \
gimple-laddress.o \
gimple-loop-interchange.o \
gimple-loop-jam.o \
@@ -363,6 +363,11 @@ basic blocks take note as control flows through them, and, before
returning, subprograms verify that the taken notes are consistent with
the control-flow graph.
+The performance impact of verification on leaf subprograms can be much
+higher, while the averted risks are much lower on them.
+Instrumentation can be disabled for leaf subprograms with
+:switch:`-fhardcfr-skip-leaf`.
+
Functions with too many basic blocks, or with multiple return points,
call a run-time function to perform the verification. Other functions
perform the verification inline before returning.
@@ -487,17 +492,20 @@ gets modified as follows:
end;
-Verification may also be performed before No_Return calls, whether
-only nothrow ones, with
-:switch:`-fhardcfr-check-noreturn-calls=nothrow`, or all of them, with
-:switch:`-fhardcfr-check-noreturn-calls=always`. The default is
-:switch:`-fhardcfr-check-noreturn-calls=never` for this feature, that
-disables checking before No_Return calls.
+Verification may also be performed before No_Return calls, whether all
+of them, with :switch:`-fhardcfr-check-noreturn-calls=always`; all but
+internal subprograms involved in exception-raising or -reraising or
+subprograms explicitly marked with both :samp:`No_Return` and
+:samp:`Machine_Attribute` :samp:`expected_throw` pragmas, with
+:switch:`-fhardcfr-check-noreturn-calls=no-xthrow` (default); only
+nothrow ones, with :switch:`-fhardcfr-check-noreturn-calls=nothrow`;
+or none, with :switch:`-fhardcfr-check-noreturn-calls=never`.
When a No_Return call returns control to its caller through an
exception, verification may have already been performed before the
-call, if :switch:`-fhardcfr-check-noreturn-calls=always` is in effect.
-The compiler arranges for already-checked No_Return calls without a
+call, if :switch:`-fhardcfr-check-noreturn-calls=always` or
+:switch:`-fhardcfr-check-noreturn-calls=no-xthrow` is in effect. The
+compiler arranges for already-checked No_Return calls without a
preexisting handler to bypass the implicitly-added cleanup handler and
thus the redundant check, but a local exception or cleanup handler, if
present, will modify the set of visited blocks, and checking will take
@@ -519,6 +519,7 @@ gigi (Node_Id gnat_root,
ftype, NULL_TREE,
is_default, true, true, true, false, false, NULL,
Empty);
+ set_call_expr_flags (reraise_zcx_decl, ECF_NORETURN | ECF_XTHROW);
/* Dummy objects to materialize "others" and "all others" in the exception
tables. These are exported by a-exexpr-gcc.adb, so see this unit for
@@ -721,6 +722,7 @@ build_raise_check (int check, enum exception_info_kind kind)
= create_subprog_decl (get_identifier (Name_Buffer), NULL_TREE, ftype,
NULL_TREE, is_default, true, true, true, false,
false, NULL, Empty);
+ set_call_expr_flags (result, ECF_NORETURN | ECF_XTHROW);
return result;
}
@@ -87,6 +87,7 @@ tree gnat_raise_decls_ext[(int) LAST_REASON_CODE + 1];
/* Forward declarations for handlers of attributes. */
static tree handle_const_attribute (tree *, tree, tree, int, bool *);
static tree handle_nothrow_attribute (tree *, tree, tree, int, bool *);
+static tree handle_expected_throw_attribute (tree *, tree, tree, int, bool *);
static tree handle_pure_attribute (tree *, tree, tree, int, bool *);
static tree handle_novops_attribute (tree *, tree, tree, int, bool *);
static tree handle_nonnull_attribute (tree *, tree, tree, int, bool *);
@@ -143,6 +144,8 @@ const struct attribute_spec gnat_internal_attribute_table[] =
handle_const_attribute, NULL },
{ "nothrow", 0, 0, true, false, false, false,
handle_nothrow_attribute, NULL },
+ { "expected_throw", 0, 0, true, false, false, false,
+ handle_expected_throw_attribute, NULL },
{ "pure", 0, 0, true, false, false, false,
handle_pure_attribute, NULL },
{ "no vops", 0, 0, true, false, false, false,
@@ -6379,6 +6382,22 @@ handle_nothrow_attribute (tree *node, tree ARG_UNUSED (name),
return NULL_TREE;
}
+/* Handle a "expected_throw" attribute; arguments as in
+ struct attribute_spec.handler. */
+
+static tree
+handle_expected_throw_attribute (tree *node, tree ARG_UNUSED (name),
+ tree ARG_UNUSED (args), int ARG_UNUSED (flags),
+ bool *no_add_attrs)
+{
+ if (TREE_CODE (*node) == FUNCTION_DECL)
+ /* No flag to set here. */;
+ else
+ *no_add_attrs = true;
+
+ return NULL_TREE;
+}
+
/* Handle a "pure" attribute; arguments as in
struct attribute_spec.handler. */
@@ -229,6 +229,7 @@ package body Ada.Exceptions is
procedure Propagate_Exception (Excep : Exception_Occurrence);
pragma No_Return (Propagate_Exception);
+ pragma Machine_Attribute (Propagate_Exception, "expected_throw");
-- This procedure propagates the exception represented by Excep
end Exception_Propagation;
@@ -256,6 +257,8 @@ package body Ada.Exceptions is
procedure Complete_And_Propagate_Occurrence (X : EOA);
pragma No_Return (Complete_And_Propagate_Occurrence);
+ pragma Machine_Attribute (Complete_And_Propagate_Occurrence,
+ "expected_throw");
-- This is a simple wrapper to Complete_Occurrence and
-- Exception_Propagation.Propagate_Exception.
@@ -280,6 +283,7 @@ package body Ada.Exceptions is
(Ada, Raise_Exception_No_Defer,
"ada__exceptions__raise_exception_no_defer");
pragma No_Return (Raise_Exception_No_Defer);
+ pragma Machine_Attribute (Raise_Exception_No_Defer, "expected_throw");
-- Similar to Raise_Exception, but with no abort deferral
procedure Raise_From_Signal_Handler
@@ -288,6 +292,7 @@ package body Ada.Exceptions is
pragma Export
(C, Raise_From_Signal_Handler, "__gnat_raise_from_signal_handler");
pragma No_Return (Raise_From_Signal_Handler);
+ pragma Machine_Attribute (Raise_From_Signal_Handler, "expected_throw");
-- This routine is used to raise an exception from a signal handler. The
-- signal handler has already stored the machine state (i.e. the state that
-- corresponds to the location at which the signal was raised). E is the
@@ -301,6 +306,7 @@ package body Ada.Exceptions is
procedure Raise_With_Msg (E : Exception_Id);
pragma No_Return (Raise_With_Msg);
+ pragma Machine_Attribute (Raise_With_Msg, "expected_throw");
pragma Export (C, Raise_With_Msg, "__gnat_raise_with_msg");
-- Raises an exception with given exception id value. A message
-- is associated with the raise, and has already been stored in the
@@ -314,6 +320,7 @@ package body Ada.Exceptions is
C : Integer := 0;
M : System.Address := System.Null_Address);
pragma No_Return (Raise_With_Location_And_Msg);
+ pragma Machine_Attribute (Raise_With_Location_And_Msg, "expected_throw");
-- Raise an exception with given exception id value. A filename and line
-- number is associated with the raise and is stored in the exception
-- occurrence and in addition a column and a string message M may be
@@ -321,6 +328,7 @@ package body Ada.Exceptions is
procedure Raise_Constraint_Error (File : System.Address; Line : Integer);
pragma No_Return (Raise_Constraint_Error);
+ pragma Machine_Attribute (Raise_Constraint_Error, "expected_throw");
pragma Export (C, Raise_Constraint_Error, "__gnat_raise_constraint_error");
-- Raise constraint error with file:line information
@@ -330,12 +338,14 @@ package body Ada.Exceptions is
Column : Integer;
Msg : System.Address);
pragma No_Return (Raise_Constraint_Error_Msg);
+ pragma Machine_Attribute (Raise_Constraint_Error_Msg, "expected_throw");
pragma Export
(C, Raise_Constraint_Error_Msg, "__gnat_raise_constraint_error_msg");
-- Raise constraint error with file:line:col + msg information
procedure Raise_Program_Error (File : System.Address; Line : Integer);
pragma No_Return (Raise_Program_Error);
+ pragma Machine_Attribute (Raise_Program_Error, "expected_throw");
pragma Export (C, Raise_Program_Error, "__gnat_raise_program_error");
-- Raise program error with file:line information
@@ -344,12 +354,14 @@ package body Ada.Exceptions is
Line : Integer;
Msg : System.Address);
pragma No_Return (Raise_Program_Error_Msg);
+ pragma Machine_Attribute (Raise_Program_Error_Msg, "expected_throw");
pragma Export
(C, Raise_Program_Error_Msg, "__gnat_raise_program_error_msg");
-- Raise program error with file:line + msg information
procedure Raise_Storage_Error (File : System.Address; Line : Integer);
pragma No_Return (Raise_Storage_Error);
+ pragma Machine_Attribute (Raise_Storage_Error, "expected_throw");
pragma Export (C, Raise_Storage_Error, "__gnat_raise_storage_error");
-- Raise storage error with file:line information
@@ -358,6 +370,7 @@ package body Ada.Exceptions is
Line : Integer;
Msg : System.Address);
pragma No_Return (Raise_Storage_Error_Msg);
+ pragma Machine_Attribute (Raise_Storage_Error_Msg, "expected_throw");
pragma Export
(C, Raise_Storage_Error_Msg, "__gnat_raise_storage_error_msg");
-- Raise storage error with file:line + reason msg information
@@ -385,6 +398,7 @@ package body Ada.Exceptions is
procedure Reraise;
pragma No_Return (Reraise);
+ pragma Machine_Attribute (Reraise, "expected_throw");
pragma Export (C, Reraise, "__gnat_reraise");
-- Reraises the exception referenced by the Current_Excep field
-- of the TSD (all fields of this exception occurrence are set).
@@ -632,6 +646,96 @@ package body Ada.Exceptions is
pragma No_Return (Rcheck_CE_Invalid_Data_Ext);
pragma No_Return (Rcheck_CE_Range_Check_Ext);
+ -- These procedures are all expected to raise an exception.
+ -- These attributes are not visible to callers; they are made
+ -- visible in trans.c:build_raise_check.
+
+ pragma Machine_Attribute (Rcheck_CE_Access_Check,
+ "expected_throw");
+ pragma Machine_Attribute (Rcheck_CE_Null_Access_Parameter,
+ "expected_throw");
+ pragma Machine_Attribute (Rcheck_CE_Discriminant_Check,
+ "expected_throw");
+ pragma Machine_Attribute (Rcheck_CE_Divide_By_Zero,
+ "expected_throw");
+ pragma Machine_Attribute (Rcheck_CE_Explicit_Raise,
+ "expected_throw");
+ pragma Machine_Attribute (Rcheck_CE_Index_Check,
+ "expected_throw");
+ pragma Machine_Attribute (Rcheck_CE_Invalid_Data,
+ "expected_throw");
+ pragma Machine_Attribute (Rcheck_CE_Length_Check,
+ "expected_throw");
+ pragma Machine_Attribute (Rcheck_CE_Null_Exception_Id,
+ "expected_throw");
+ pragma Machine_Attribute (Rcheck_CE_Null_Not_Allowed,
+ "expected_throw");
+ pragma Machine_Attribute (Rcheck_CE_Overflow_Check,
+ "expected_throw");
+ pragma Machine_Attribute (Rcheck_CE_Partition_Check,
+ "expected_throw");
+ pragma Machine_Attribute (Rcheck_CE_Range_Check,
+ "expected_throw");
+ pragma Machine_Attribute (Rcheck_CE_Tag_Check,
+ "expected_throw");
+ pragma Machine_Attribute (Rcheck_PE_Access_Before_Elaboration,
+ "expected_throw");
+ pragma Machine_Attribute (Rcheck_PE_Accessibility_Check,
+ "expected_throw");
+ pragma Machine_Attribute (Rcheck_PE_Address_Of_Intrinsic,
+ "expected_throw");
+ pragma Machine_Attribute (Rcheck_PE_Aliased_Parameters,
+ "expected_throw");
+ pragma Machine_Attribute (Rcheck_PE_All_Guards_Closed,
+ "expected_throw");
+ pragma Machine_Attribute (Rcheck_PE_Bad_Predicated_Generic_Type,
+ "expected_throw");
+ pragma Machine_Attribute (Rcheck_PE_Build_In_Place_Mismatch,
+ "expected_throw");
+ pragma Machine_Attribute (Rcheck_PE_Current_Task_In_Entry_Body,
+ "expected_throw");
+ pragma Machine_Attribute (Rcheck_PE_Duplicated_Entry_Address,
+ "expected_throw");
+ pragma Machine_Attribute (Rcheck_PE_Explicit_Raise,
+ "expected_throw");
+ pragma Machine_Attribute (Rcheck_PE_Implicit_Return,
+ "expected_throw");
+ pragma Machine_Attribute (Rcheck_PE_Misaligned_Address_Value,
+ "expected_throw");
+ pragma Machine_Attribute (Rcheck_PE_Missing_Return,
+ "expected_throw");
+ pragma Machine_Attribute (Rcheck_PE_Non_Transportable_Actual,
+ "expected_throw");
+ pragma Machine_Attribute (Rcheck_PE_Overlaid_Controlled_Object,
+ "expected_throw");
+ pragma Machine_Attribute (Rcheck_PE_Potentially_Blocking_Operation,
+ "expected_throw");
+ pragma Machine_Attribute (Rcheck_PE_Stream_Operation_Not_Allowed,
+ "expected_throw");
+ pragma Machine_Attribute (Rcheck_PE_Stubbed_Subprogram_Called,
+ "expected_throw");
+ pragma Machine_Attribute (Rcheck_PE_Unchecked_Union_Restriction,
+ "expected_throw");
+ pragma Machine_Attribute (Rcheck_PE_Finalize_Raised_Exception,
+ "expected_throw");
+ pragma Machine_Attribute (Rcheck_SE_Empty_Storage_Pool,
+ "expected_throw");
+ pragma Machine_Attribute (Rcheck_SE_Explicit_Raise,
+ "expected_throw");
+ pragma Machine_Attribute (Rcheck_SE_Infinite_Recursion,
+ "expected_throw");
+ pragma Machine_Attribute (Rcheck_SE_Object_Too_Large,
+ "expected_throw");
+
+ pragma Machine_Attribute (Rcheck_CE_Access_Check_Ext,
+ "expected_throw");
+ pragma Machine_Attribute (Rcheck_CE_Index_Check_Ext,
+ "expected_throw");
+ pragma Machine_Attribute (Rcheck_CE_Invalid_Data_Ext,
+ "expected_throw");
+ pragma Machine_Attribute (Rcheck_CE_Range_Check_Ext,
+ "expected_throw");
+
-- Make all of these procedures callable from strub contexts.
-- These attributes are not visible to callers; they are made
-- visible in trans.c:build_raise_check.
@@ -163,6 +163,14 @@ private
Null_Id : constant Exception_Id := null;
+ pragma Machine_Attribute (Raise_Exception, "expected_throw");
+ pragma Machine_Attribute (Reraise_Occurrence, "expected_throw");
+ -- Tell the compiler that an exception is likely after calling
+ -- these subprograms. This could eventually be used for hot/cold
+ -- partitioning. For now, this only enables the control flow
+ -- redundancy to avoid duplicating a check before the No_Return
+ -- call and in the exception handler for the call.
+
-------------------------
-- Private Subprograms --
-------------------------
@@ -177,6 +185,7 @@ private
procedure Raise_Exception_Always (E : Exception_Id; Message : String := "");
pragma No_Return (Raise_Exception_Always);
+ pragma Machine_Attribute (Raise_Exception_Always, "expected_throw");
pragma Export (Ada, Raise_Exception_Always, "__gnat_raise_exception");
-- This differs from Raise_Exception only in that the caller has determined
-- that for sure the parameter E is not null, and that therefore no check
@@ -195,6 +204,9 @@ private
"__gnat_raise_from_controlled_operation");
-- Raise Program_Error, providing information about X (an exception raised
-- during a controlled operation) in the exception message.
+ pragma Machine_Attribute (Raise_From_Controlled_Operation,
+ "expected_throw");
+ -- Mark it like internal exception-raising subprograms
procedure Reraise_Library_Exception_If_Any;
pragma Export
@@ -205,6 +217,7 @@ private
procedure Reraise_Occurrence_Always (X : Exception_Occurrence);
pragma No_Return (Reraise_Occurrence_Always);
+ pragma Machine_Attribute (Reraise_Occurrence_Always, "expected_throw");
-- This differs from Raise_Occurrence only in that the caller guarantees
-- that for sure the parameter X is not the null occurrence, and that
-- therefore this procedure cannot return. The expander uses this routine
@@ -212,6 +225,7 @@ private
procedure Reraise_Occurrence_No_Defer (X : Exception_Occurrence);
pragma No_Return (Reraise_Occurrence_No_Defer);
+ pragma Machine_Attribute (Reraise_Occurrence_No_Defer, "expected_throw");
-- Exactly like Reraise_Occurrence, except that abort is not deferred
-- before the call and the parameter X is known not to be the null
-- occurrence. This is used in generated code when it is known that abort
@@ -1178,6 +1178,9 @@ DEF_GCC_BUILTIN (BUILT_IN_FILE, "FILE", BT_FN_CONST_STRING, ATTR_NOTHROW_LEAF_LI
DEF_GCC_BUILTIN (BUILT_IN_FUNCTION, "FUNCTION", BT_FN_CONST_STRING, ATTR_NOTHROW_LEAF_LIST)
DEF_GCC_BUILTIN (BUILT_IN_LINE, "LINE", BT_FN_INT, ATTR_NOTHROW_LEAF_LIST)
+/* Control Flow Redundancy hardening out-of-line checker. */
+DEF_BUILTIN_STUB (BUILT_IN___HARDCFR_CHECK, "__builtin___hardcfr_check")
+
/* Synchronization Primitives. */
#include "sync-builtins.def"
@@ -136,6 +136,7 @@ static tree handle_vector_mask_attribute (tree *, tree, tree, int,
static tree handle_nonnull_attribute (tree *, tree, tree, int, bool *);
static tree handle_nonstring_attribute (tree *, tree, tree, int, bool *);
static tree handle_nothrow_attribute (tree *, tree, tree, int, bool *);
+static tree handle_expected_throw_attribute (tree *, tree, tree, int, bool *);
static tree handle_cleanup_attribute (tree *, tree, tree, int, bool *);
static tree handle_warn_unused_result_attribute (tree *, tree, tree, int,
bool *);
@@ -437,6 +438,8 @@ const struct attribute_spec c_common_attribute_table[] =
handle_nonstring_attribute, NULL },
{ "nothrow", 0, 0, true, false, false, false,
handle_nothrow_attribute, NULL },
+ { "expected_throw", 0, 0, true, false, false, false,
+ handle_expected_throw_attribute, NULL },
{ "may_alias", 0, 0, false, true, false, false, NULL, NULL },
{ "cleanup", 1, 1, true, false, false, false,
handle_cleanup_attribute, NULL },
@@ -5412,6 +5415,25 @@ handle_nothrow_attribute (tree *node, tree name, tree ARG_UNUSED (args),
return NULL_TREE;
}
+/* Handle a "nothrow" attribute; arguments as in
+ struct attribute_spec.handler. */
+
+static tree
+handle_expected_throw_attribute (tree *node, tree name, tree ARG_UNUSED (args),
+ int ARG_UNUSED (flags), bool *no_add_attrs)
+{
+ if (TREE_CODE (*node) == FUNCTION_DECL)
+ /* No flag to set here. */;
+ /* ??? TODO: Support types. */
+ else
+ {
+ warning (OPT_Wattributes, "%qE attribute ignored", name);
+ *no_add_attrs = true;
+ }
+
+ return NULL_TREE;
+}
+
/* Handle a "cleanup" attribute; arguments as in
struct attribute_spec.handler. */
@@ -848,6 +848,9 @@ flags_from_decl_or_type (const_tree exp)
flags |= ECF_TM_PURE;
}
+ if (lookup_attribute ("expected_throw", DECL_ATTRIBUTES (exp)))
+ flags |= ECF_XTHROW;
+
flags = special_function_p (exp, flags);
}
else if (TYPE_P (exp))
@@ -1823,6 +1823,41 @@ fharden-conditional-branches
Common Var(flag_harden_conditional_branches) Optimization
Harden conditional branches by checking reversed conditions.
+fharden-control-flow-redundancy
+Common Var(flag_harden_control_flow_redundancy) Optimization
+Harden control flow by recording and checking execution paths.
+
+fhardcfr-skip-leaf
+Common Var(flag_harden_control_flow_redundancy_skip_leaf) Optimization
+Disable CFR in leaf functions.
+
+fhardcfr-check-returning-calls
+Common Var(flag_harden_control_flow_redundancy_check_returning_calls) Init(-1) Optimization
+Check CFR execution paths also before calls followed by returns of their results.
+
+fhardcfr-check-exceptions
+Common Var(flag_harden_control_flow_redundancy_check_exceptions) Init(-1) Optimization
+Check CFR execution paths also when exiting a function through an exception.
+
+fhardcfr-check-noreturn-calls=
+Common Joined RejectNegative Enum(hardcfr_check_noreturn_calls) Var(flag_harden_control_flow_redundancy_check_noreturn) Init(HCFRNR_UNSPECIFIED) Optimization
+-fhardcfr-check-noreturn-calls=[always|no-xthrow|nothrow|never] Check CFR execution paths also before calling noreturn functions.
+
+Enum
+Name(hardcfr_check_noreturn_calls) Type(enum hardcfr_noret) UnknownError(unknown hardcfr noreturn checking level %qs)
+
+EnumValue
+Enum(hardcfr_check_noreturn_calls) String(never) Value(HCFRNR_NEVER)
+
+EnumValue
+Enum(hardcfr_check_noreturn_calls) String(nothrow) Value(HCFRNR_NOTHROW)
+
+EnumValue
+Enum(hardcfr_check_noreturn_calls) String(no-xthrow) Value(HCFRNR_NO_XTHROW)
+
+EnumValue
+Enum(hardcfr_check_noreturn_calls) String(always) Value(HCFRNR_ALWAYS)
+
; Nonzero means ignore `#ident' directives. 0 means handle them.
; Generate position-independent code for executables if possible
; On SVR4 targets, it also controls whether or not to emit a
@@ -5253,7 +5253,8 @@ push_cp_library_fn (enum tree_code operator_code, tree type,
tree
push_throw_library_fn (tree name, tree type)
{
- tree fn = push_library_fn (name, type, NULL_TREE, ECF_NORETURN | ECF_COLD);
+ tree fn = push_library_fn (name, type, NULL_TREE,
+ ECF_NORETURN | ECF_XTHROW | ECF_COLD);
return fn;
}
@@ -657,12 +657,13 @@ build_throw (location_t loc, tree exp)
tree args[3] = {ptr_type_node, ptr_type_node, cleanup_type};
throw_fn = declare_library_fn_1 ("__cxa_throw",
- ECF_NORETURN | ECF_COLD,
+ ECF_NORETURN | ECF_XTHROW | ECF_COLD,
void_type_node, 3, args);
if (flag_tm && throw_fn != error_mark_node)
{
tree itm_fn = declare_library_fn_1 ("_ITM_cxa_throw",
- ECF_NORETURN | ECF_COLD,
+ ECF_NORETURN | ECF_XTHROW
+ | ECF_COLD,
void_type_node, 3, args);
if (itm_fn != error_mark_node)
{
@@ -797,7 +798,8 @@ build_throw (location_t loc, tree exp)
if (!rethrow_fn)
{
rethrow_fn = declare_library_fn_1 ("__cxa_rethrow",
- ECF_NORETURN | ECF_COLD,
+ ECF_NORETURN | ECF_XTHROW
+ | ECF_COLD,
void_type_node, 0, NULL);
if (flag_tm && rethrow_fn != error_mark_node)
apply_tm_attr (rethrow_fn, get_identifier ("transaction_pure"));
@@ -3001,6 +3001,17 @@ when using these attributes the problem is diagnosed
earlier and with exact location of the call even in presence of inline
functions or when not emitting debugging information.
+@cindex @code{expected_throw} function attribute
+@item expected_throw
+This attribute, attached to a function, tells the compiler the function
+is more likely to raise or propagate an exception than to return, loop
+forever, or terminate the program.
+
+This hint is mostly ignored by the compiler. The only effect is when
+it's applied to @code{noreturn} functions and
+@samp{-fharden-control-flow-redundancy} is enabled, and
+@samp{-fhardcfr-check-noreturn-calls=not-always} is not overridden.
+
@cindex @code{externally_visible} function attribute
@item externally_visible
This attribute, attached to a global variable or function, nullifies
@@ -637,6 +637,9 @@ Objective-C and Objective-C++ Dialects}.
-fsanitize-undefined-trap-on-error -fbounds-check
-fcf-protection=@r{[}full@r{|}branch@r{|}return@r{|}none@r{|}check@r{]}
-fharden-compares -fharden-conditional-branches
+-fharden-control-flow-redundancy -fhardcfr-skip-leaf
+-fhardcfr-check-exceptions -fhardcfr-check-returning-calls
+-fhardcfr-check-noreturn-calls=@r{[}always@r{|}no-xthrow@r{|}nothrow@r{|}never@r{]}
-fstack-protector -fstack-protector-all -fstack-protector-strong
-fstack-protector-explicit -fstack-check
-fstack-limit-register=@var{reg} -fstack-limit-symbol=@var{sym}
@@ -15845,6 +15848,16 @@ A value of zero can be used to lift
the bound. A variable whose value is unknown at compilation time and
defined outside a SCoP is a parameter of the SCoP.
+@item hardcfr-max-blocks
+Disable @option{-fharden-control-flow-redundancy} for functions with a
+larger number of blocks than the specified value. Zero removes any
+limit.
+
+@item hardcfr-max-inline-blocks
+Force @option{-fharden-control-flow-redundancy} to use out-of-line
+checking for functions with a larger number of basic blocks than the
+specified value.
+
@item loop-block-tile-size
Loop blocking or strip mining transforms, enabled with
@option{-floop-block} or @option{-floop-strip-mine}, strip mine each
@@ -17290,6 +17303,86 @@ condition, and to call @code{__builtin_trap} if the result is
unexpected. Use with @samp{-fharden-compares} to cover all
conditionals.
+@opindex fharden-control-flow-redundancy
+@item -fharden-control-flow-redundancy
+Emit extra code to set booleans when entering basic blocks, and to
+verify and trap, at function exits, when the booleans do not form an
+execution path that is compatible with the control flow graph.
+
+Verification takes place before returns, before mandatory tail calls
+(see below) and, optionally, before escaping exceptions with
+@option{-fhardcfr-check-exceptions}, before returning calls with
+@option{-fhardcfr-check-returning-calls}, and before noreturn calls with
+@option{-fhardcfr-check-noreturn-calls}). Tuning options
+@option{--param hardcfr-max-blocks} and @option{--param
+hardcfr-max-inline-blocks} are available.
+
+Tail call optimization takes place too late to affect control flow
+redundancy, but calls annotated as mandatory tail calls by language
+front-ends, and any calls marked early enough as potential tail calls
+would also have verification issued before the call, but these
+possibilities are merely theoretical, as these conditions can only be
+met when using custom compiler plugins.
+
+@opindex fhardcfr-skip-leaf
+@item -fhardcfr-skip-leaf
+Disable @option{-fharden-control-flow-redundancy} in leaf functions.
+
+@opindex fhardcfr-check-exceptions
+@opindex fno-hardcfr-check-exceptions
+@item -fhardcfr-check-exceptions
+When @option{-fharden-control-flow-redundancy} is active, check the
+recorded execution path against the control flow graph at exception
+escape points, as if the function body was wrapped with a cleanup
+handler that performed the check and reraised. This option is enabled
+by default; use @option{-fno-hardcfr-check-exceptions} to disable it.
+
+@opindex fhardcfr-check-returning-calls
+@opindex fno-hardcfr-check-returning-calls
+@item -fhardcfr-check-returning-calls
+When @option{-fharden-control-flow-redundancy} is active, check the
+recorded execution path against the control flow graph before any
+function call immediately followed by a return of its result, if any, so
+as to not prevent tail-call optimization, whether or not it is
+ultimately optimized to a tail call.
+
+This option is enabled by default whenever sibling call optimizations
+are enabled (see @option{-foptimize-sibling-calls}), but it can be
+enabled (or disabled, using its negated form) explicitly, regardless of
+the optimizations.
+
+@opindex fhardcfr-check-noreturn-calls
+@item -fhardcfr-check-noreturn-calls=@r{[}always@r{|}no-xthrow@r{|}nothrow@r{|}never@r{]}
+When @option{-fharden-control-flow-redundancy} is active, check the
+recorded execution path against the control flow graph before
+@code{noreturn} calls, either all of them (@option{always}), those that
+aren't expected to return control to the caller through an exception
+(@option{no-xthrow}, the default), those that may not return control to
+the caller through an exception either (@option{nothrow}), or none of
+them (@option{never}).
+
+Checking before a @code{noreturn} function that may return control to
+the caller through an exception may cause checking to be performed more
+than once, if the exception is caught in the caller, whether by a
+handler or a cleanup. When @option{-fhardcfr-check-exceptions} is also
+enabled, the compiler will avoid associating a @code{noreturn} call with
+the implicitly-added cleanup handler, since it would be redundant with
+the check performed before the call, but other handlers or cleanups in
+the function, if activated, will modify the recorded execution path and
+check it again when another checkpoint is hit. The checkpoint may even
+be another @code{noreturn} call, so checking may end up performed
+multiple times.
+
+Various optimizers may cause calls to be marked as @code{noreturn}
+and/or @code{nothrow}, even in the absence of the corresponding
+attributes, which may affect the placement of checks before calls, as
+well as the addition of implicit cleanup handlers for them. This
+unpredictability, and the fact that raising and reraising exceptions
+frequently amounts to implicitly calling @code{noreturn} functions, have
+made @option{no-xthrow} the default setting for this option: it excludes
+from the @code{noreturn} treatment only internal functions used to
+(re)raise exceptions, that are not affected by these optimizations.
+
@opindex fstack-protector
@item -fstack-protector
Emit extra code to check for buffer overflows, such as stack smashing
@@ -157,6 +157,16 @@ enum stack_reuse_level
SR_ALL
};
+/* Control Flow Redundancy hardening options for noreturn calls. */
+enum hardcfr_noret
+{
+ HCFRNR_NEVER,
+ HCFRNR_NOTHROW,
+ HCFRNR_NO_XTHROW,
+ HCFRNR_UNSPECIFIED,
+ HCFRNR_ALWAYS,
+};
+
/* The live patching level. */
enum live_patching_level
{
new file mode 100644
@@ -0,0 +1,1469 @@
+/* Control flow redundancy hardening.
+ Copyright (C) 2022 Free Software Foundation, Inc.
+ Contributed by Alexandre Oliva <oliva@adacore.com>.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#include "config.h"
+#define INCLUDE_ALGORITHM /* find */
+#include "system.h"
+#include "coretypes.h"
+#include "backend.h"
+#include "tree.h"
+#include "fold-const.h"
+#include "gimple.h"
+#include "gimplify.h"
+#include "tree-pass.h"
+#include "ssa.h"
+#include "gimple-iterator.h"
+#include "gimple-pretty-print.h"
+#include "tree-cfg.h"
+#include "tree-cfgcleanup.h"
+#include "tree-eh.h"
+#include "except.h"
+#include "sbitmap.h"
+#include "basic-block.h"
+#include "cfghooks.h"
+#include "cfgloop.h"
+#include "cgraph.h"
+#include "alias.h"
+#include "varasm.h"
+#include "output.h"
+#include "langhooks.h"
+#include "diagnostic.h"
+#include "intl.h"
+
+namespace {
+
+/* This pass introduces verification, at function exits, that booleans
+ set in each basic block during function execution reflect the
+ control flow graph: for each visited block, check that at least one
+ predecessor and at least one successor were also visited. This
+ sort of hardening may detect various kinds of attacks. */
+
+/* Define a pass to harden code through control flow redundancy. */
+
+const pass_data pass_data_harden_control_flow_redundancy = {
+ GIMPLE_PASS,
+ "hardcfr",
+ OPTGROUP_NONE,
+ TV_NONE,
+ PROP_cfg | PROP_ssa, // properties_required
+ 0, // properties_provided
+ 0, // properties_destroyed
+ TODO_cleanup_cfg, // properties_start
+ 0, // properties_finish
+};
+
+class pass_harden_control_flow_redundancy : public gimple_opt_pass
+{
+public:
+ pass_harden_control_flow_redundancy (gcc::context *ctxt)
+ : gimple_opt_pass (pass_data_harden_control_flow_redundancy, ctxt)
+ {}
+ opt_pass *clone () { return new pass_harden_control_flow_redundancy (m_ctxt); }
+ virtual bool gate (function *fun) {
+ /* Return quickly if the pass is disabled, without checking any of
+ the conditions that might give rise to warnings that would only
+ be appropriate if hardening was requested. */
+ if (!flag_harden_control_flow_redundancy)
+ return false;
+
+ /* Functions that return more than once, like setjmp and vfork
+ (that also gets this flag set), will start recording a path
+ after the first return, and then may take another path when
+ they return again. The unterminated path may then be flagged
+ as an error. ??? We could save the visited array before the
+ call and restore it if it returns again. */
+ if (fun->calls_setjmp)
+ {
+ warning_at (DECL_SOURCE_LOCATION (fun->decl), 0,
+ "%qD calls %<setjmp%> or similar,"
+ " %<-fharden-control-flow-redundancy%> is not supported",
+ fun->decl);
+ return false;
+ }
+
+ /* Some targets bypass the abnormal dispatcher block in nonlocal
+ gotos, and then we'd miss its visited bit. It might be doable
+ to make it work uniformly, but this feature is not used often
+ enough to make it worthwhile. */
+ if (fun->has_nonlocal_label)
+ {
+ warning_at (DECL_SOURCE_LOCATION (fun->decl), 0,
+ "%qD receives nonlocal gotos,"
+ " %<-fharden-control-flow-redundancy%> is not supported",
+ fun->decl);
+ return false;
+ }
+
+ if (fun->cfg && param_hardcfr_max_blocks > 0
+ && (n_basic_blocks_for_fn (fun) - NUM_FIXED_BLOCKS
+ > param_hardcfr_max_blocks))
+ {
+ warning_at (DECL_SOURCE_LOCATION (fun->decl), 0,
+ "%qD has more than %u blocks, the requested"
+ " maximum for %<-fharden-control-flow-redundancy%>",
+ fun->decl, param_hardcfr_max_blocks);
+ return false;
+ }
+
+ return true;
+ }
+ virtual unsigned int execute (function *);
+};
+
+}
+
+/* Return TRUE iff CFR checks should be inserted before returning
+ calls. */
+
+static bool
+check_returning_calls_p ()
+{
+ return
+ flag_harden_control_flow_redundancy_check_returning_calls > 0
+ || (flag_harden_control_flow_redundancy_check_returning_calls < 0
+ /* Gates pass_tail_calls. */
+ && flag_optimize_sibling_calls
+ /* Gates pass_all_optimizations. */
+ && optimize >= 1 && !optimize_debug);
+}
+
+/* Scan BB from the end, updating *RETPTR if given as return stmts and
+ copies are found. Return a call or a stmt that cannot appear after
+ a tail call, or NULL if the top of the block is reached without
+ finding any. */
+
+static gimple *
+hardcfr_scan_block (basic_block bb, tree **retptr)
+{
+ gimple_stmt_iterator gsi;
+ for (gsi = gsi_last_bb (bb); !gsi_end_p (gsi); gsi_prev (&gsi))
+ {
+ gimple *stmt = gsi_stmt (gsi);
+
+ /* Ignore labels, returns, nops, clobbers and debug stmts. */
+ if (gimple_code (stmt) == GIMPLE_LABEL
+ || gimple_code (stmt) == GIMPLE_NOP
+ || gimple_code (stmt) == GIMPLE_PREDICT
+ || gimple_clobber_p (stmt)
+ || is_gimple_debug (stmt))
+ continue;
+
+ if (gimple_code (stmt) == GIMPLE_RETURN)
+ {
+ greturn *gret = as_a <greturn *> (stmt);
+ if (retptr)
+ {
+ gcc_checking_assert (!*retptr);
+ *retptr = gimple_return_retval_ptr (gret);
+ }
+ continue;
+ }
+
+ /* Check for a call. */
+ if (is_gimple_call (stmt))
+ return stmt;
+
+ /* Allow simple copies to the return value, updating the return
+ value to be found in earlier assignments. */
+ if (retptr && *retptr && gimple_assign_single_p (stmt)
+ && **retptr == gimple_assign_lhs (stmt))
+ {
+ *retptr = gimple_assign_rhs1_ptr (stmt);
+ continue;
+ }
+
+ return stmt;
+ }
+
+ /* Any other kind of stmt will prevent a tail call. */
+ return NULL;
+}
+
+/* Return TRUE iff CALL is to be preceded by a CFR checkpoint, i.e.,
+ if it's a returning call (one whose result is ultimately returned
+ without intervening non-copy statements) and we're checking
+ returning calls, a __builtin_return call (noreturn with a path to
+ the exit block), a must-tail call, or a tail call. */
+
+static bool
+returning_call_p (gcall *call)
+{
+ if (!(gimple_call_noreturn_p (call)
+ || gimple_call_must_tail_p (call)
+ || gimple_call_tail_p (call)
+ || check_returning_calls_p ()))
+ return false;
+
+ /* Quickly check that there's a path to exit compatible with a
+ returning call. Detect infinite loops by limiting the path
+ length to the basic block count, and by looking for duplicate
+ blocks before allocating more memory for the path, for amortized
+ O(n). */
+ auto_vec<basic_block, 10> path;
+ for (basic_block bb = gimple_bb (call);
+ bb != EXIT_BLOCK_PTR_FOR_FN (cfun);
+ bb = single_succ (bb))
+ if (!single_succ_p (bb)
+ || (single_succ_edge (bb)->flags & EDGE_EH) != 0
+ || n_basic_blocks_for_fn (cfun) - path.length () <= NUM_FIXED_BLOCKS
+ || (path.length () == path.allocated ()
+ && std::find (path.begin (), path.end (), bb) != path.end ()))
+ return false;
+ else
+ path.safe_push (bb);
+
+ /* Check the stmts in the blocks and trace the return value. */
+ tree *retptr = NULL;
+ for (;;)
+ {
+ gcc_checking_assert (!path.is_empty ());
+ basic_block bb = path.pop ();
+ gimple *stop = hardcfr_scan_block (bb, &retptr);
+ if (stop)
+ {
+ if (stop != call)
+ return false;
+ gcc_checking_assert (path.is_empty ());
+ break;
+ }
+
+ gphi *retphi = NULL;
+ if (retptr && *retptr && TREE_CODE (*retptr) == SSA_NAME
+ && !SSA_NAME_IS_DEFAULT_DEF (*retptr)
+ && SSA_NAME_DEF_STMT (*retptr)
+ && is_a <gphi *> (SSA_NAME_DEF_STMT (*retptr))
+ && gimple_bb (SSA_NAME_DEF_STMT (*retptr)) == bb)
+ {
+ retphi = as_a <gphi *> (SSA_NAME_DEF_STMT (*retptr));
+ gcc_checking_assert (gimple_phi_result (retphi) == *retptr);
+ }
+ else
+ continue;
+
+ gcc_checking_assert (!path.is_empty ());
+ edge e = single_succ_edge (path.last ());
+ int i = EDGE_COUNT (bb->preds);
+ while (i--)
+ if (EDGE_PRED (bb, i) == e)
+ break;
+ gcc_checking_assert (i >= 0);
+ retptr = gimple_phi_arg_def_ptr (retphi, i);
+ }
+
+ return (gimple_call_noreturn_p (call)
+ || gimple_call_must_tail_p (call)
+ || gimple_call_tail_p (call)
+ || (gimple_call_lhs (call) == (retptr ? *retptr : NULL)
+ && check_returning_calls_p ()));
+}
+
+typedef auto_vec<edge, 10> chk_edges_t;
+
+/* Declare for mutual recursion. */
+static bool hardcfr_sibcall_search_preds (basic_block bb,
+ chk_edges_t &chk_edges,
+ int &count_chkcall,
+ auto_sbitmap &chkcall_blocks,
+ int &count_postchk,
+ auto_sbitmap &postchk_blocks,
+ tree *retptr);
+
+/* Search backwards from the end of BB for a mandatory or potential
+ sibcall. Schedule the block to be handled sort-of like noreturn if
+ so. Recurse to preds, with updated RETPTR, if the block only
+ contains stmts that may follow such a call, scheduling checking at
+ edges and marking blocks as post-check as needed. Return true iff,
+ at the end of the block, a check will have already been
+ performed. */
+
+static bool
+hardcfr_sibcall_search_block (basic_block bb,
+ chk_edges_t &chk_edges,
+ int &count_chkcall,
+ auto_sbitmap &chkcall_blocks,
+ int &count_postchk,
+ auto_sbitmap &postchk_blocks,
+ tree *retptr)
+{
+ /* Conditionals and internal exceptions rule out tail calls. */
+ if (!single_succ_p (bb)
+ || (single_succ_edge (bb)->flags & EDGE_EH) != 0)
+ return false;
+
+ gimple *stmt = hardcfr_scan_block (bb, &retptr);
+ if (!stmt)
+ return hardcfr_sibcall_search_preds (bb, chk_edges,
+ count_chkcall, chkcall_blocks,
+ count_postchk, postchk_blocks,
+ retptr);
+
+ if (!is_a <gcall *> (stmt))
+ return false;
+
+ /* Avoid disrupting mandatory or early-marked tail calls,
+ inserting the check before them. This works for
+ must-tail calls, but tail calling as an optimization is
+ detected too late for us.
+
+ Also check for noreturn calls here. Noreturn calls won't
+ normally have edges to exit, so they won't be found here,
+ but __builtin_return does, and we must check before
+ it, so handle it like a tail call. */
+ gcall *call = as_a <gcall *> (stmt);
+ if (!(gimple_call_noreturn_p (call)
+ || gimple_call_must_tail_p (call)
+ || gimple_call_tail_p (call)
+ || (gimple_call_lhs (call) == (retptr ? *retptr : NULL)
+ && check_returning_calls_p ())))
+ return false;
+
+ gcc_checking_assert (returning_call_p (call));
+
+ /* We found a call that is to be preceded by checking. */
+ if (bitmap_set_bit (chkcall_blocks, bb->index))
+ ++count_chkcall;
+ else
+ gcc_unreachable ();
+ return true;
+}
+
+
+/* Search preds of BB for a mandatory or potential sibcall or
+ returning call, and arrange for the blocks containing them to have
+ a check inserted before the call, like noreturn calls. If any
+ preds are found to perform checking, schedule checks at the edges
+ of those that don't, and mark BB as postcheck.. */
+
+static bool
+hardcfr_sibcall_search_preds (basic_block bb,
+ chk_edges_t &chk_edges,
+ int &count_chkcall,
+ auto_sbitmap &chkcall_blocks,
+ int &count_postchk,
+ auto_sbitmap &postchk_blocks,
+ tree *retptr)
+{
+ /* For the exit block, we wish to force a check at every
+ predecessor, so pretend we've already found a pred that had
+ checking, so that we schedule checking at every one of its pred
+ edges. */
+ bool first = bb->index >= NUM_FIXED_BLOCKS;
+ bool postchecked = true;
+
+ gphi *retphi = NULL;
+ if (retptr && *retptr && TREE_CODE (*retptr) == SSA_NAME
+ && !SSA_NAME_IS_DEFAULT_DEF (*retptr)
+ && SSA_NAME_DEF_STMT (*retptr)
+ && is_a <gphi *> (SSA_NAME_DEF_STMT (*retptr))
+ && gimple_bb (SSA_NAME_DEF_STMT (*retptr)) == bb)
+ {
+ retphi = as_a <gphi *> (SSA_NAME_DEF_STMT (*retptr));
+ gcc_checking_assert (gimple_phi_result (retphi) == *retptr);
+ }
+
+ for (int i = EDGE_COUNT (bb->preds); i--; first = false)
+ {
+ edge e = EDGE_PRED (bb, i);
+
+ bool checked
+ = hardcfr_sibcall_search_block (e->src, chk_edges,
+ count_chkcall, chkcall_blocks,
+ count_postchk, postchk_blocks,
+ !retphi ? retptr
+ : gimple_phi_arg_def_ptr (retphi, i));
+
+ if (first)
+ {
+ postchecked = checked;
+ continue;
+ }
+
+ /* When we first find a checked block, force a check at every
+ other incoming edge we've already visited, and those we
+ visit afterwards that don't have their own check, so that
+ when we reach BB, the check has already been performed. */
+ if (!postchecked && checked)
+ {
+ for (int j = EDGE_COUNT (bb->preds); --j > i; )
+ chk_edges.safe_push (EDGE_PRED (bb, j));
+ postchecked = true;
+ }
+ if (postchecked && !checked)
+ chk_edges.safe_push (EDGE_PRED (bb, i));
+ }
+
+ if (postchecked && bb->index >= NUM_FIXED_BLOCKS)
+ {
+ if (bitmap_set_bit (postchk_blocks, bb->index))
+ count_postchk++;
+ else
+ gcc_unreachable ();
+ }
+
+ return postchecked;
+}
+
+
+class rt_bb_visited
+{
+ /* Use a sufficiently wide unsigned type to hold basic block numbers. */
+ typedef size_t blknum;
+
+ /* Record the original block count of the function. */
+ blknum nblocks;
+ /* Record the number of bits per VWORD (short for VISITED WORD), an
+ efficient mode to set and test bits for blocks we visited, and to
+ encode the CFG in case out-of-line verification is used. */
+ unsigned vword_bits;
+
+ /* Hold the unsigned integral VWORD type. */
+ tree vword_type;
+ /* Hold a pointer-to-VWORD type. */
+ tree vword_ptr;
+
+ /* Hold a growing sequence used to check, inline or out-of-line,
+ that VISITED encodes an expected execution path. */
+ gimple_seq ckseq;
+ /* If nonNULL, hold a growing representation of the CFG for
+ out-of-line testing. */
+ tree rtcfg;
+
+ /* Hold the declaration of an array of VWORDs, used as an array of
+ NBLOCKS-2 bits. */
+ tree visited;
+
+ /* If performing inline checking, hold a declarations of boolean
+ variables used for inline checking. CKBLK holds the result of
+ testing whether the VISITED bit corresponding to a predecessor or
+ successor is set, CKINV inverts that bit, CKPART gets cleared if
+ a block was not visited or if CKINV for any of its predecessors
+ or successors is set, and CKFAIL gets set if CKPART remains set
+ at the end of a block's predecessors or successors list. */
+ tree ckfail, ckpart, ckinv, ckblk;
+
+ /* Convert a block index N to a block vindex, the index used to
+ identify it in the VISITED array. Check that it's in range:
+ neither ENTRY nor EXIT, but maybe one-past-the-end, to compute
+ the visited array length. */
+ blknum num2idx (blknum n) {
+ gcc_checking_assert (n >= NUM_FIXED_BLOCKS && n <= nblocks);
+ return (n - NUM_FIXED_BLOCKS);
+ }
+ /* Return the block vindex for BB, that must not be ENTRY or
+ EXIT. */
+ blknum bb2idx (basic_block bb) {
+ gcc_checking_assert (bb != ENTRY_BLOCK_PTR_FOR_FN (cfun)
+ && bb != EXIT_BLOCK_PTR_FOR_FN (cfun));
+ gcc_checking_assert (blknum (bb->index) < nblocks);
+ return num2idx (bb->index);
+ }
+
+ /* Compute the type to be used for the VISITED array. */
+ tree vtype ()
+ {
+ blknum n = num2idx (nblocks);
+ return build_array_type_nelts (vword_type,
+ (n + vword_bits - 1) / vword_bits);
+ }
+
+ /* Compute and return the index into VISITED for block BB. If BITP
+ is non-NULL, also compute and store the bit mask corresponding to
+ block BB in *BITP, so that (visited[index] & mask) tells whether
+ BB was visited. */
+ tree vwordidx (basic_block bb, tree *bitp = NULL)
+ {
+ blknum idx = bb2idx (bb);
+ if (bitp)
+ {
+ unsigned bit = idx % vword_bits;
+ if (BITS_BIG_ENDIAN)
+ bit = vword_bits - bit - 1;
+ wide_int wbit = wi::set_bit_in_zero (bit, vword_bits);
+ *bitp = wide_int_to_tree (vword_type, wbit);
+ }
+ return build_int_cst (vword_ptr, idx / vword_bits);
+ }
+
+ /* Return an expr to accesses the visited element that holds
+ information about BB. If BITP is non-NULL, set it to the mask to
+ tell which bit in that expr refers to BB. */
+ tree vword (basic_block bb, tree *bitp = NULL)
+ {
+ return build2 (MEM_REF, vword_type,
+ build1 (ADDR_EXPR, vword_ptr, visited),
+ int_const_binop (MULT_EXPR, vwordidx (bb, bitp),
+ fold_convert (vword_ptr,
+ TYPE_SIZE_UNIT
+ (vword_type))));
+ }
+
+ /* Return an expr that evaluates to true iff BB was marked as
+ VISITED. Add any gimple stmts to SEQP. */
+ tree vindex (basic_block bb, gimple_seq *seqp)
+ {
+ if (bb == ENTRY_BLOCK_PTR_FOR_FN (cfun)
+ || bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
+ return boolean_true_node;
+
+ tree bit, setme = vword (bb, &bit);
+ tree temp = create_tmp_var (vword_type, ".cfrtemp");
+
+ gassign *vload = gimple_build_assign (temp, setme);
+ gimple_seq_add_stmt (seqp, vload);
+
+ gassign *vmask = gimple_build_assign (temp, BIT_AND_EXPR, temp, bit);
+ gimple_seq_add_stmt (seqp, vmask);
+
+ return build2 (NE_EXPR, boolean_type_node,
+ temp, build_int_cst (vword_type, 0));
+ }
+
+ /* Set the bit corresponding to BB in VISITED. Add to SEQ any
+ required gimple stmts, and return SEQ, possibly modified. */
+ gimple_seq vset (basic_block bb, gimple_seq seq = NULL)
+ {
+ tree bit, setme = vword (bb, &bit);
+ tree temp = create_tmp_var (vword_type, ".cfrtemp");
+
+ gassign *vload = gimple_build_assign (temp, setme);
+ gimple_seq_add_stmt (&seq, vload);
+
+ gassign *vbitset = gimple_build_assign (temp, BIT_IOR_EXPR, temp, bit);
+ gimple_seq_add_stmt (&seq, vbitset);
+
+ gassign *vstore = gimple_build_assign (unshare_expr (setme), temp);
+ gimple_seq_add_stmt (&seq, vstore);
+
+ /* Prevent stores into visited from being deferred, forcing
+ subsequent bitsets to reload the word rather than reusing
+ values already in register. The purpose is threefold: make the
+ bitset get to memory in this block, so that control flow
+ attacks in functions called in this block don't easily bypass
+ the bitset; prevent the bitset word from being retained in a
+ register across blocks, which could, in an attack scenario,
+ make a later block set more than one bit; and prevent hoisting
+ or sinking loads or stores of bitset words out of loops or even
+ throughout functions, which could significantly weaken the
+ verification. This is equivalent to making the bitsetting
+ volatile within the function body, but without changing its
+ type; making the bitset volatile would make inline checking far
+ less optimizable for no reason. */
+ vec<tree, va_gc> *inputs = NULL;
+ vec<tree, va_gc> *outputs = NULL;
+ vec_safe_push (outputs,
+ build_tree_list
+ (build_tree_list
+ (NULL_TREE, build_string (2, "=m")),
+ visited));
+ vec_safe_push (inputs,
+ build_tree_list
+ (build_tree_list
+ (NULL_TREE, build_string (1, "m")),
+ visited));
+ gasm *stabilize = gimple_build_asm_vec ("", inputs, outputs,
+ NULL, NULL);
+ gimple_seq_add_stmt (&seq, stabilize);
+
+ return seq;
+ }
+
+public:
+ /* Prepare to add control flow redundancy testing to CFUN. */
+ rt_bb_visited (int checkpoints)
+ : nblocks (n_basic_blocks_for_fn (cfun)),
+ vword_type (NULL), ckseq (NULL), rtcfg (NULL)
+ {
+ /* If we've already added a declaration for the builtin checker,
+ extract vword_type and vword_bits from its declaration. */
+ if (tree checkfn = builtin_decl_explicit (BUILT_IN___HARDCFR_CHECK))
+ {
+ tree check_arg_list = TYPE_ARG_TYPES (TREE_TYPE (checkfn));
+ tree vword_const_ptr_type = TREE_VALUE (TREE_CHAIN (check_arg_list));
+ vword_type = TYPE_MAIN_VARIANT (TREE_TYPE (vword_const_ptr_type));
+ vword_bits = tree_to_shwi (TYPE_SIZE (vword_type));
+ }
+ /* Otherwise, select vword_bits, vword_type et al, and use it to
+ declare the builtin checker. */
+ else
+ {
+ /* This setting needs to be kept in sync with libgcc/hardcfr.c.
+ We aim for at least 28 bits, which enables us to refer to as
+ many as 28 << 28 blocks in a function's CFG. That's way over
+ 4G blocks. */
+ machine_mode VWORDmode;
+ if (BITS_PER_UNIT >= 28)
+ {
+ VWORDmode = QImode;
+ vword_bits = BITS_PER_UNIT;
+ }
+ else if (BITS_PER_UNIT >= 14)
+ {
+ VWORDmode = HImode;
+ vword_bits = 2 * BITS_PER_UNIT;
+ }
+ else
+ {
+ VWORDmode = SImode;
+ vword_bits = 4 * BITS_PER_UNIT;
+ }
+
+ vword_type = lang_hooks.types.type_for_mode (VWORDmode, 1);
+ gcc_checking_assert (vword_bits == tree_to_shwi (TYPE_SIZE
+ (vword_type)));
+
+ vword_type = build_variant_type_copy (vword_type);
+ TYPE_ALIAS_SET (vword_type) = new_alias_set ();
+
+ tree vword_const = build_qualified_type (vword_type, TYPE_QUAL_CONST);
+ tree vword_const_ptr = build_pointer_type (vword_const);
+ tree type = build_function_type_list (void_type_node, sizetype,
+ vword_const_ptr, vword_const_ptr,
+ NULL_TREE);
+ tree decl = add_builtin_function_ext_scope
+ ("__builtin___hardcfr_check",
+ type, BUILT_IN___HARDCFR_CHECK, BUILT_IN_NORMAL,
+ "__hardcfr_check", NULL_TREE);
+ TREE_NOTHROW (decl) = true;
+ set_builtin_decl (BUILT_IN___HARDCFR_CHECK, decl, true);
+ }
+
+ /* The checker uses a qualified pointer, so we can't reuse it,
+ so build a new one. */
+ vword_ptr = build_pointer_type (vword_type);
+
+ tree visited_type = vtype ();
+ visited = create_tmp_var (visited_type, ".cfrvisited");
+
+ if (nblocks - NUM_FIXED_BLOCKS > blknum (param_hardcfr_max_inline_blocks)
+ || checkpoints > 1)
+ {
+ /* Make sure vword_bits is wide enough for the representation
+ of nblocks in rtcfg. Compare with vword_bits << vword_bits,
+ but avoiding overflows, shifting nblocks right instead. If
+ vword_bits is wider than HOST_WIDE_INT, assume it fits, so
+ as to avoid undefined shifts. */
+ gcc_assert (HOST_BITS_PER_WIDE_INT <= vword_bits
+ || (((unsigned HOST_WIDE_INT)(num2idx (nblocks))
+ >> vword_bits) < vword_bits));
+
+ /* Build a terminator for the constructor list. */
+ rtcfg = build_tree_list (NULL_TREE, NULL_TREE);
+ return;
+ }
+
+ ckfail = create_tmp_var (boolean_type_node, ".cfrfail");
+ ckpart = create_tmp_var (boolean_type_node, ".cfrpart");
+ ckinv = create_tmp_var (boolean_type_node, ".cfrinv");
+ ckblk = create_tmp_var (boolean_type_node, ".cfrblk");
+
+ gassign *ckfail_init = gimple_build_assign (ckfail, boolean_false_node);
+ gimple_seq_add_stmt (&ckseq, ckfail_init);
+ }
+
+ /* Insert SEQ before a resx or a call in INSBB. */
+ void insert_exit_check_in_block (gimple_seq seq, basic_block insbb)
+ {
+ gimple_stmt_iterator gsi = gsi_last_bb (insbb);
+
+ while (!gsi_end_p (gsi))
+ if (is_a <gresx *> (gsi_stmt (gsi))
+ || is_a <gcall *> (gsi_stmt (gsi)))
+ break;
+ else
+ gsi_prev (&gsi);
+
+ gsi_insert_seq_before (&gsi, seq, GSI_SAME_STMT);
+ }
+
+ /* Insert SEQ on E. */
+ void insert_exit_check_on_edge (gimple_seq seq, edge e)
+ {
+ gsi_insert_seq_on_edge_immediate (e, seq);
+ }
+
+ /* Add checking code to CHK_EDGES and CHKCALL_BLOCKS, and
+ initialization code on the entry edge. Before this point, the
+ CFG has been undisturbed, and all the needed data has been
+ collected and safely stowed. */
+ void check (chk_edges_t &chk_edges,
+ int count_chkcall, auto_sbitmap const &chkcall_blocks)
+ {
+ /* If we're using out-of-line checking, create and statically
+ initialize the CFG checking representation, generate the
+ checker call for the checking sequence, and insert it in all
+ exit edges, if there's more than one. If there's only one, we
+ use the same logic as the inline case to insert the check
+ sequence. */
+ if (rtcfg)
+ {
+ /* Unreverse the list, and drop the tail node turned into head. */
+ rtcfg = TREE_CHAIN (nreverse (rtcfg));
+
+ /* Turn the indices stored in TREE_PURPOSE into separate
+ nodes. It was useful to keep them together to enable
+ combination of masks and for clear separation of
+ terminators while constructing it, but now we have to turn
+ it into a sequence of words. */
+ for (tree node = rtcfg; node; node = TREE_CHAIN (node))
+ {
+ tree wordidx = TREE_PURPOSE (node);
+ if (!wordidx)
+ continue;
+
+ TREE_PURPOSE (node) = NULL_TREE;
+ TREE_CHAIN (node) = tree_cons (NULL_TREE,
+ fold_convert (vword_type, wordidx),
+ TREE_CHAIN (node));
+ }
+
+ /* Build the static initializer for the array with the CFG
+ representation for out-of-line checking. */
+ tree init = build_constructor_from_list (NULL_TREE, rtcfg);
+ TREE_TYPE (init) = build_array_type_nelts (vword_type,
+ CONSTRUCTOR_NELTS (init));
+ char buf[32];
+ ASM_GENERATE_INTERNAL_LABEL (buf, "Lhardcfg",
+ current_function_funcdef_no);
+ rtcfg = build_decl (UNKNOWN_LOCATION, VAR_DECL,
+ get_identifier (buf),
+ TREE_TYPE (init));
+ TREE_READONLY (rtcfg) = 1;
+ TREE_STATIC (rtcfg) = 1;
+ TREE_ADDRESSABLE (rtcfg) = 1;
+ TREE_USED (rtcfg) = 1;
+ DECL_ARTIFICIAL (rtcfg) = 1;
+ DECL_IGNORED_P (rtcfg) = 1;
+ DECL_INITIAL (rtcfg) = init;
+ make_decl_rtl (rtcfg);
+ varpool_node::finalize_decl (rtcfg);
+
+ /* Add the checker call to ckseq. */
+ gcall *call_chk = gimple_build_call (builtin_decl_explicit
+ (BUILT_IN___HARDCFR_CHECK), 3,
+ build_int_cst (sizetype,
+ num2idx (nblocks)),
+ build1 (ADDR_EXPR, vword_ptr,
+ visited),
+ build1 (ADDR_EXPR, vword_ptr,
+ rtcfg));
+ gimple_seq_add_stmt (&ckseq, call_chk);
+
+ gimple *clobber = gimple_build_assign (visited,
+ build_clobber
+ (TREE_TYPE (visited)));
+ gimple_seq_add_stmt (&ckseq, clobber);
+
+ /* If we have multiple exit edges, insert (copies of)
+ ckseq in all of them. */
+ for (int i = chk_edges.length (); i--; )
+ {
+ gimple_seq seq = ckseq;
+ /* Copy the sequence, unless we're dealing with the
+ last edge (we're counting down to zero). */
+ if (i || count_chkcall)
+ seq = gimple_seq_copy (seq);
+
+ edge e = chk_edges[i];
+
+ if (dump_file)
+ {
+ if (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
+ fprintf (dump_file,
+ "Inserting out-of-line check in"
+ " block %i's edge to exit.\n",
+ e->src->index);
+ else
+ fprintf (dump_file,
+ "Inserting out-of-line check in"
+ " block %i's edge to postcheck block %i.\n",
+ e->src->index, e->dest->index);
+ }
+
+ insert_exit_check_on_edge (seq, e);
+
+ gcc_checking_assert (!bitmap_bit_p (chkcall_blocks, e->src->index));
+ }
+
+ sbitmap_iterator it;
+ unsigned i;
+ EXECUTE_IF_SET_IN_BITMAP (chkcall_blocks, 0, i, it)
+ {
+ basic_block bb = BASIC_BLOCK_FOR_FN (cfun, i);
+
+ gimple_seq seq = ckseq;
+ gcc_checking_assert (count_chkcall > 0);
+ if (--count_chkcall)
+ seq = gimple_seq_copy (seq);
+
+ if (dump_file)
+ fprintf (dump_file,
+ "Inserting out-of-line check before stmt in block %i.\n",
+ bb->index);
+
+ insert_exit_check_in_block (seq, bb);
+ }
+
+ gcc_checking_assert (count_chkcall == 0);
+ }
+ else
+ {
+ /* Inline checking requires a single exit edge. */
+ gimple *last = gimple_build_assign (visited,
+ build_clobber
+ (TREE_TYPE (visited)));
+ gimple_seq_add_stmt (&ckseq, last);
+
+ if (!count_chkcall)
+ {
+ edge e = single_pred_edge (EXIT_BLOCK_PTR_FOR_FN (cfun));
+
+ if (dump_file)
+ {
+ if (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
+ fprintf (dump_file,
+ "Inserting out-of-line check in"
+ " block %i's edge to postcheck block %i.\n",
+ e->src->index, e->dest->index);
+ else
+ fprintf (dump_file,
+ "Inserting inline check in"
+ " block %i's edge to exit.\n",
+ e->src->index);
+ }
+
+ insert_exit_check_on_edge (ckseq, e);
+ }
+ else
+ {
+ gcc_checking_assert (count_chkcall == 1);
+
+ sbitmap_iterator it;
+ unsigned i;
+ EXECUTE_IF_SET_IN_BITMAP (chkcall_blocks, 0, i, it)
+ {
+ basic_block bb = BASIC_BLOCK_FOR_FN (cfun, i);
+
+ gimple_seq seq = ckseq;
+ gcc_checking_assert (count_chkcall > 0);
+ if (--count_chkcall)
+ seq = gimple_seq_copy (seq);
+
+ if (dump_file)
+ fprintf (dump_file,
+ "Inserting inline check before stmt in block %i.\n",
+ bb->index);
+
+ insert_exit_check_in_block (seq, bb);
+ }
+
+ gcc_checking_assert (count_chkcall == 0);
+ }
+
+ /* The inserted ckseq computes CKFAIL at LAST. Now we have to
+ conditionally trap on it. */
+ basic_block insbb = gimple_bb (last);
+
+ /* Create a block with the unconditional trap. */
+ basic_block trp = create_empty_bb (insbb);
+ gimple_stmt_iterator gsit = gsi_after_labels (trp);
+
+ gcall *trap = gimple_build_call (builtin_decl_explicit
+ (BUILT_IN_TRAP), 0);
+ gsi_insert_before (&gsit, trap, GSI_SAME_STMT);
+
+ if (BB_PARTITION (insbb))
+ BB_SET_PARTITION (trp, BB_COLD_PARTITION);
+
+ if (current_loops)
+ add_bb_to_loop (trp, current_loops->tree_root);
+
+ /* Insert a conditional branch to the trap block. If the
+ conditional wouldn't be the last stmt, split the block. */
+ gimple_stmt_iterator gsi = gsi_for_stmt (last);
+ if (!gsi_one_before_end_p (gsi))
+ split_block (gsi_bb (gsi), gsi_stmt (gsi));
+
+ gcond *cond = gimple_build_cond (NE_EXPR, ckfail,
+ fold_convert (TREE_TYPE (ckfail),
+ boolean_false_node),
+ NULL, NULL);
+ gsi_insert_after (&gsi, cond, GSI_SAME_STMT);
+
+ /* Adjust the edges. */
+ single_succ_edge (gsi_bb (gsi))->flags &= ~EDGE_FALLTHRU;
+ single_succ_edge (gsi_bb (gsi))->flags |= EDGE_FALSE_VALUE;
+ single_succ_edge (gsi_bb (gsi))->probability
+ = profile_probability::always ();
+ edge e = make_edge (gsi_bb (gsi), trp, EDGE_TRUE_VALUE);
+ e->probability = profile_probability::never ();
+
+ /* Set the trap's dominator after splitting. */
+ if (dom_info_available_p (CDI_DOMINATORS))
+ set_immediate_dominator (CDI_DOMINATORS, trp, gimple_bb (last));
+ }
+
+ /* Insert initializers for visited at the entry. Do this after
+ other insertions, to avoid messing with block numbers. */
+ gimple_seq iseq = NULL;
+
+ gcall *vinit = gimple_build_call (builtin_decl_explicit
+ (BUILT_IN_MEMSET), 3,
+ build1 (ADDR_EXPR,
+ build_pointer_type
+ (TREE_TYPE (visited)),
+ visited),
+ integer_zero_node,
+ TYPE_SIZE_UNIT (TREE_TYPE (visited)));
+ gimple_seq_add_stmt (&iseq, vinit);
+
+ gsi_insert_seq_on_edge_immediate (single_succ_edge
+ (ENTRY_BLOCK_PTR_FOR_FN (cfun)),
+ iseq);
+ }
+
+ /* Push onto RTCFG a (mask, index) pair to test for IBB when BB is
+ visited. XSELF is to be the ENTRY or EXIT block (depending on
+ whether we're looking at preds or succs), to be remapped to BB
+ because we can't represent them, and there's no point in testing
+ them anyway. Return true if no further blocks need to be visited
+ in the list, because we've already encountered a
+ self-reference. */
+ bool
+ push_rtcfg_pair (basic_block ibb, basic_block bb,
+ basic_block xself)
+ {
+ /* We don't have a bit to test for the entry and exit
+ blocks, but it is always visited, so we test for the
+ block itself, which gets us the right result and
+ enables the self-test optimization below. */
+ if (ibb == xself)
+ ibb = bb;
+
+ tree mask, idx = vwordidx (ibb, &mask);
+ /* Combine masks with the same idx, but not if we're going
+ to optimize for self-test. */
+ if (ibb != bb && TREE_PURPOSE (rtcfg)
+ && tree_int_cst_equal (idx, TREE_PURPOSE (rtcfg)))
+ TREE_VALUE (rtcfg) = int_const_binop (BIT_IOR_EXPR, mask,
+ TREE_VALUE (rtcfg));
+ else
+ rtcfg = tree_cons (idx, mask, rtcfg);
+
+ /* For self-tests (i.e., tests that the block itself was
+ also visited), testing anything else is pointless,
+ because it's a tautology, so just drop other edges. */
+ if (ibb == bb)
+ {
+ while (TREE_PURPOSE (TREE_CHAIN (rtcfg)))
+ TREE_CHAIN (rtcfg) = TREE_CHAIN (TREE_CHAIN (rtcfg));
+ return true;
+ }
+
+ return false;
+ }
+
+ /* Add to CKSEQ stmts to clear CKPART if OBB is visited. */
+ void
+ build_block_check (basic_block obb)
+ {
+ tree vobb = fold_convert (TREE_TYPE (ckblk),
+ vindex (obb, &ckseq));
+ gassign *blkrunp = gimple_build_assign (ckblk, vobb);
+ gimple_seq_add_stmt (&ckseq, blkrunp);
+
+ gassign *blknotrunp = gimple_build_assign (ckinv,
+ EQ_EXPR,
+ ckblk,
+ fold_convert
+ (TREE_TYPE (ckblk),
+ boolean_false_node));
+ gimple_seq_add_stmt (&ckseq, blknotrunp);
+
+ gassign *andblk = gimple_build_assign (ckpart,
+ BIT_AND_EXPR,
+ ckpart, ckinv);
+ gimple_seq_add_stmt (&ckseq, andblk);
+ }
+
+ /* Add to BB code to set its bit in VISITED, and add to RTCFG or
+ CKSEQ the data or code needed to check BB's predecessors and
+ successors. If CHECKPOINT, assume the block is a checkpoint,
+ whether or not it has an edge to EXIT. If POSTCHECK, assume the
+ block post-dominates checkpoints and therefore no bitmap setting
+ or checks are to be performed in or for it. Do NOT change the
+ CFG. */
+ void visit (basic_block bb, bool checkpoint, bool postcheck)
+ {
+ /* Set the bit in VISITED when entering the block. */
+ gimple_stmt_iterator gsi = gsi_after_labels (bb);
+ if (!postcheck)
+ gsi_insert_seq_before (&gsi, vset (bb), GSI_SAME_STMT);
+
+ if (rtcfg)
+ {
+ if (!postcheck)
+ {
+ /* Build a list of (index, mask) terminated by (NULL, 0).
+ Consolidate masks with the same index when they're
+ adjacent. First, predecessors. Count backwards, because
+ we're going to reverse the list. The order shouldn't
+ matter, but let's not make it surprising. */
+ for (int i = EDGE_COUNT (bb->preds); i--; )
+ if (push_rtcfg_pair (EDGE_PRED (bb, i)->src, bb,
+ ENTRY_BLOCK_PTR_FOR_FN (cfun)))
+ break;
+ }
+ rtcfg = tree_cons (NULL_TREE, build_int_cst (vword_type, 0), rtcfg);
+
+ if (!postcheck)
+ {
+ /* Then, successors. */
+ if (!checkpoint
+ || !push_rtcfg_pair (EXIT_BLOCK_PTR_FOR_FN (cfun),
+ bb, EXIT_BLOCK_PTR_FOR_FN (cfun)))
+ for (int i = EDGE_COUNT (bb->succs); i--; )
+ if (push_rtcfg_pair (EDGE_SUCC (bb, i)->dest, bb,
+ EXIT_BLOCK_PTR_FOR_FN (cfun)))
+ break;
+ }
+ rtcfg = tree_cons (NULL_TREE, build_int_cst (vword_type, 0), rtcfg);
+ }
+ else if (!postcheck)
+ {
+ /* Schedule test to fail if the block was reached but somehow none
+ of its predecessors were. */
+ tree bit = fold_convert (TREE_TYPE (ckpart), vindex (bb, &ckseq));
+ gassign *blkrunp = gimple_build_assign (ckpart, bit);
+ gimple_seq_add_stmt (&ckseq, blkrunp);
+
+ for (int i = 0, e = EDGE_COUNT (bb->preds); i < e; i++)
+ build_block_check (EDGE_PRED (bb, i)->src);
+ gimple *orfailp = gimple_build_assign (ckfail, BIT_IOR_EXPR,
+ ckfail, ckpart);
+ gimple_seq_add_stmt (&ckseq, orfailp);
+
+ /* Likewise for successors. */
+ gassign *blkruns = gimple_build_assign (ckpart, unshare_expr (bit));
+ gimple_seq_add_stmt (&ckseq, blkruns);
+
+ if (checkpoint)
+ build_block_check (EXIT_BLOCK_PTR_FOR_FN (cfun));
+ for (int i = 0, e = EDGE_COUNT (bb->succs); i < e; i++)
+ build_block_check (EDGE_SUCC (bb, i)->dest);
+
+ gimple *orfails = gimple_build_assign (ckfail, BIT_IOR_EXPR,
+ ckfail, ckpart);
+ gimple_seq_add_stmt (&ckseq, orfails);
+ }
+ }
+};
+
+/* Avoid checking before noreturn calls that are known (expected,
+ really) to finish by throwing an exception, rather than by ending
+ the program or looping forever. Such functions have to be
+ annotated, with an attribute (expected_throw) or flag (ECF_XTHROW),
+ so that exception-raising functions, such as C++'s __cxa_throw,
+ __cxa_rethrow, and Ada's gnat_rcheck_*, gnat_reraise*,
+ ada.exception.raise_exception*, and the language-independent
+ unwinders could be detected here and handled differently from other
+ noreturn functions. */
+static bool
+always_throwing_noreturn_call_p (gimple *stmt)
+{
+ if (!is_a <gcall *> (stmt))
+ return is_a <gresx *> (stmt);
+
+ gcall *call = as_a <gcall *> (stmt);
+ return (gimple_call_noreturn_p (call)
+ && gimple_call_expected_throw_p (call));
+}
+
+/* Control flow redundancy hardening: record the execution path, and
+ verify at exit that an expect path was taken. */
+
+unsigned int
+pass_harden_control_flow_redundancy::execute (function *fun)
+{
+ bool const check_at_escaping_exceptions
+ = (flag_exceptions
+ && flag_harden_control_flow_redundancy_check_exceptions);
+ bool const check_before_noreturn_calls
+ = flag_harden_control_flow_redundancy_check_noreturn > HCFRNR_NEVER;
+ bool const check_before_nothrow_noreturn_calls
+ = (check_before_noreturn_calls
+ && flag_harden_control_flow_redundancy_check_noreturn >= HCFRNR_NOTHROW);
+ bool const check_before_throwing_noreturn_calls
+ = (flag_exceptions
+ && check_before_noreturn_calls
+ && flag_harden_control_flow_redundancy_check_noreturn > HCFRNR_NOTHROW);
+ bool const check_before_always_throwing_noreturn_calls
+ = (flag_exceptions
+ && check_before_noreturn_calls
+ && flag_harden_control_flow_redundancy_check_noreturn >= HCFRNR_ALWAYS);
+ basic_block bb;
+ basic_block bb_eh_cleanup = NULL;
+
+ if (flag_harden_control_flow_redundancy_skip_leaf)
+ {
+ bool found_calls_p = false;
+
+ FOR_EACH_BB_FN (bb, fun)
+ {
+ for (gimple_stmt_iterator gsi = gsi_last_bb (bb);
+ !gsi_end_p (gsi); gsi_prev (&gsi))
+ if (is_a <gcall *> (gsi_stmt (gsi)))
+ {
+ found_calls_p = true;
+ break;
+ }
+ if (found_calls_p)
+ break;
+ }
+
+ if (!found_calls_p)
+ {
+ if (dump_file)
+ fprintf (dump_file,
+ "Disabling CFR for leaf function, as requested\n");
+
+ return 0;
+ }
+ }
+
+ if (check_at_escaping_exceptions)
+ {
+ int lp_eh_cleanup = -1;
+
+ /* Record the preexisting blocks, to avoid visiting newly-created
+ blocks. */
+ auto_sbitmap to_visit (last_basic_block_for_fn (fun));
+ bitmap_clear (to_visit);
+
+ FOR_EACH_BB_FN (bb, fun)
+ bitmap_set_bit (to_visit, bb->index);
+
+ /* Scan the blocks for stmts with escaping exceptions, that
+ wouldn't be denoted in the CFG, and associate them with an
+ empty cleanup handler around the whole function. Walk
+ backwards, so that even when we split the block, */
+ sbitmap_iterator it;
+ unsigned i;
+ EXECUTE_IF_SET_IN_BITMAP (to_visit, 0, i, it)
+ {
+ bb = BASIC_BLOCK_FOR_FN (fun, i);
+
+ for (gimple_stmt_iterator gsi = gsi_last_bb (bb);
+ !gsi_end_p (gsi); gsi_prev (&gsi))
+ {
+ gimple *stmt = gsi_stmt (gsi);
+ if (!stmt_could_throw_p (fun, stmt))
+ continue;
+
+ /* If it must not throw, or if it already has a handler,
+ we need not worry about it. */
+ if (lookup_stmt_eh_lp (stmt) != 0)
+ continue;
+
+ /* Don't split blocks at, nor add EH edges to, tail
+ calls, we will add verification before the call
+ anyway. */
+ if (is_a <gcall *> (stmt)
+ && (gimple_call_must_tail_p (as_a <gcall *> (stmt))
+ || gimple_call_tail_p (as_a <gcall *> (stmt))
+ || returning_call_p (as_a <gcall *> (stmt))))
+ continue;
+
+ if (!gsi_one_before_end_p (gsi))
+ split_block (bb, stmt);
+ /* A resx or noreturn call needs not be associated with
+ the cleanup handler if we're going to add checking
+ before it. We only test cases that didn't require
+ block splitting because noreturn calls would always
+ be at the end of blocks, and we test for zero
+ successors because if there is an edge, it's not
+ noreturn, as any EH edges would have already been
+ caught by the lookup_stmt_eh_lp test above. */
+ else if (check_before_noreturn_calls
+ && EDGE_COUNT (bb->succs) == 0
+ && (is_a <gresx *> (stmt)
+ ? check_before_always_throwing_noreturn_calls
+ : (!is_a <gcall *> (stmt)
+ || !gimple_call_noreturn_p (stmt))
+ ? (gcc_unreachable (), false)
+ : (!flag_exceptions
+ || gimple_call_nothrow_p (as_a <gcall *> (stmt)))
+ ? check_before_nothrow_noreturn_calls
+ : always_throwing_noreturn_call_p (stmt)
+ ? check_before_always_throwing_noreturn_calls
+ : check_before_throwing_noreturn_calls))
+ {
+ if (dump_file)
+ {
+ fprintf (dump_file,
+ "Bypassing cleanup for noreturn stmt"
+ " in block %i:\n",
+ bb->index);
+ print_gimple_stmt (dump_file, stmt, 0);
+ }
+ continue;
+ }
+
+ if (!bb_eh_cleanup)
+ {
+ bb_eh_cleanup = create_empty_bb (bb);
+ if (dom_info_available_p (CDI_DOMINATORS))
+ set_immediate_dominator (CDI_DOMINATORS, bb_eh_cleanup, bb);
+ if (current_loops)
+ add_bb_to_loop (bb_eh_cleanup, current_loops->tree_root);
+
+ /* Make the new block an EH cleanup for the call. */
+ eh_region new_r = gen_eh_region_cleanup (NULL);
+ eh_landing_pad lp = gen_eh_landing_pad (new_r);
+ tree label = gimple_block_label (bb_eh_cleanup);
+ lp->post_landing_pad = label;
+ EH_LANDING_PAD_NR (label) = lp_eh_cleanup = lp->index;
+
+ /* Just propagate the exception.
+ We will later insert the verifier call. */
+ gimple_stmt_iterator ehgsi;
+ ehgsi = gsi_after_labels (bb_eh_cleanup);
+ gresx *resx = gimple_build_resx (new_r->index);
+ gsi_insert_before (&ehgsi, resx, GSI_SAME_STMT);
+
+ if (dump_file)
+ fprintf (dump_file,
+ "Created cleanup block %i:\n",
+ bb_eh_cleanup->index);
+ }
+ else if (dom_info_available_p (CDI_DOMINATORS))
+ {
+ basic_block immdom;
+ immdom = get_immediate_dominator (CDI_DOMINATORS,
+ bb_eh_cleanup);
+ if (!dominated_by_p (CDI_DOMINATORS, bb, immdom))
+ {
+ immdom = nearest_common_dominator (CDI_DOMINATORS,
+ immdom, bb);
+ set_immediate_dominator (CDI_DOMINATORS,
+ bb_eh_cleanup, immdom);
+ }
+ }
+
+ if (dump_file)
+ {
+ fprintf (dump_file,
+ "Associated cleanup block with stmt in block %i:\n",
+ bb->index);
+ print_gimple_stmt (dump_file, stmt, 0);
+ }
+
+ add_stmt_to_eh_lp (stmt, lp_eh_cleanup);
+ /* Finally, wire the EH cleanup block into the CFG. */
+ make_eh_edges (stmt);
+ }
+ }
+
+ if (bb_eh_cleanup)
+ {
+ /* A cfg_cleanup after bb_eh_cleanup makes for a more compact
+ rtcfg, and it avoids bb numbering differences when we split
+ blocks because of trailing debug insns only. */
+ cleanup_tree_cfg ();
+ gcc_checking_assert (EDGE_COUNT (bb_eh_cleanup->succs) == 0);
+ }
+ }
+
+ /* These record blocks with calls that are to be preceded by
+ checkpoints, such as noreturn calls (if so chosen), must-tail
+ calls, potential early-marked tail calls, and returning calls (if
+ so chosen). */
+ int count_chkcall = 0;
+ auto_sbitmap chkcall_blocks (last_basic_block_for_fn (fun));
+ bitmap_clear (chkcall_blocks);
+
+ /* We wish to add verification at blocks without successors, such as
+ noreturn calls (raising or not) and the reraise at the cleanup
+ block, but not other reraises: they will go through the cleanup
+ block. */
+ if (check_before_noreturn_calls)
+ FOR_EACH_BB_FN (bb, fun)
+ {
+ gimple_stmt_iterator gsi = gsi_last_bb (bb);
+ if (gsi_end_p (gsi))
+ continue;
+ gimple *stmt = gsi_stmt (gsi);
+
+ if (EDGE_COUNT (bb->succs) == 0)
+ {
+ /* A stmt at the end of a block without any successors is
+ either a resx or a noreturn call without a local
+ handler. Check that it's one of the desired
+ checkpoints. */
+ if (flag_exceptions && is_a <gresx *> (stmt)
+ ? (check_before_always_throwing_noreturn_calls
+ || bb == bb_eh_cleanup)
+ : (!is_a <gcall *> (stmt)
+ || !gimple_call_noreturn_p (stmt))
+ ? (/* Catch cases in which successors would be
+ expected. */
+ gcc_unreachable (), false)
+ : (!flag_exceptions
+ || gimple_call_nothrow_p (as_a <gcall *> (stmt)))
+ ? check_before_nothrow_noreturn_calls
+ : always_throwing_noreturn_call_p (stmt)
+ ? check_before_always_throwing_noreturn_calls
+ : check_before_throwing_noreturn_calls)
+ {
+ if (dump_file)
+ {
+ fprintf (dump_file,
+ "Scheduling check before stmt"
+ " in succ-less block %i:\n",
+ bb->index);
+ print_gimple_stmt (dump_file, stmt, 0);
+ }
+
+ if (bitmap_set_bit (chkcall_blocks, bb->index))
+ count_chkcall++;
+ else
+ gcc_unreachable ();
+ }
+ continue;
+ }
+
+ /* If there are no exceptions, it would seem like any noreturn
+ call must have zero successor edges, but __builtin_return
+ gets successor edges. We don't want to handle it here, it
+ will be dealt with in sibcall_search_preds. Otherwise,
+ check for blocks without non-EH successors, but skip those
+ with resx stmts and edges (i.e., those other than that in
+ bb_eh_cleanup), since those will go through bb_eh_cleanup,
+ that will have been counted as noreturn above because it
+ has no successors. */
+ gcc_checking_assert (bb != bb_eh_cleanup
+ || !check_at_escaping_exceptions);
+ if (flag_exceptions && is_a <gresx *> (stmt)
+ ? check_before_always_throwing_noreturn_calls
+ : (!is_a <gcall *> (stmt)
+ || !gimple_call_noreturn_p (stmt))
+ ? false
+ : (!flag_exceptions
+ || gimple_call_nothrow_p (as_a <gcall *> (stmt)))
+ ? false /* rather than check_before_nothrow_noreturn_calls */
+ : always_throwing_noreturn_call_p (stmt)
+ ? check_before_always_throwing_noreturn_calls
+ : check_before_throwing_noreturn_calls)
+ {
+ gcc_checking_assert (single_succ_p (bb)
+ && (single_succ_edge (bb)->flags & EDGE_EH));
+
+ if (dump_file)
+ {
+ fprintf (dump_file,
+ "Scheduling check before stmt"
+ " in EH-succ block %i:\n",
+ bb->index);
+ print_gimple_stmt (dump_file, stmt, 0);
+ }
+
+ if (bitmap_set_bit (chkcall_blocks, bb->index))
+ count_chkcall++;
+ else
+ gcc_unreachable ();
+ }
+ }
+ else if (bb_eh_cleanup)
+ {
+ if (bitmap_set_bit (chkcall_blocks, bb_eh_cleanup->index))
+ count_chkcall++;
+ else
+ gcc_unreachable ();
+ }
+
+ gcc_checking_assert (!bb_eh_cleanup
+ || bitmap_bit_p (chkcall_blocks, bb_eh_cleanup->index));
+
+ /* If we don't have edges to exit nor noreturn calls (including the
+ cleanup reraise), then we may skip instrumentation: that would
+ amount to a function that ends with an infinite loop. */
+ if (!count_chkcall
+ && EDGE_COUNT (EXIT_BLOCK_PTR_FOR_FN (fun)->preds) == 0)
+ {
+ if (dump_file)
+ fprintf (dump_file,
+ "Disabling CFR, no exit paths to check\n");
+
+ return 0;
+ }
+
+ /* Search for must-tail calls, early-marked potential tail calls,
+ and, if requested, returning calls. As we introduce early
+ checks, */
+ int count_postchk = 0;
+ auto_sbitmap postchk_blocks (last_basic_block_for_fn (fun));
+ bitmap_clear (postchk_blocks);
+ chk_edges_t chk_edges;
+ hardcfr_sibcall_search_preds (EXIT_BLOCK_PTR_FOR_FN (fun), chk_edges,
+ count_chkcall, chkcall_blocks,
+ count_postchk, postchk_blocks,
+ NULL);
+
+ rt_bb_visited vstd (chk_edges.length () + count_chkcall);
+
+ auto_sbitmap combined_blocks (last_basic_block_for_fn (fun));
+ bitmap_copy (combined_blocks, chkcall_blocks);
+ int i;
+ edge *e;
+ FOR_EACH_VEC_ELT (chk_edges, i, e)
+ if (!bitmap_set_bit (combined_blocks, (*e)->src->index))
+ /* There may be multiple chk_edges with the same src block;
+ guard againt overlaps with chkcall_blocks only. */
+ gcc_assert (!bitmap_bit_p (chkcall_blocks, (*e)->src->index));
+
+ /* Visit blocks in index order, because building rtcfg depends on
+ that. Blocks must be compact, which the cleanup_cfg requirement
+ ensures. This would also enable FOR_EACH_BB_FN to be used to
+ iterate in index order, but bb_eh_cleanup block splits and
+ insertions changes that. */
+ gcc_checking_assert (n_basic_blocks_for_fn (fun)
+ == last_basic_block_for_fn (fun));
+ for (int i = NUM_FIXED_BLOCKS; i < n_basic_blocks_for_fn (fun); i++)
+ {
+ bb = BASIC_BLOCK_FOR_FN (fun, i);
+ gcc_checking_assert (bb->index == i);
+ vstd.visit (bb, bitmap_bit_p (combined_blocks, i),
+ bitmap_bit_p (postchk_blocks, i));
+ }
+
+ vstd.check (chk_edges, count_chkcall, chkcall_blocks);
+
+ return
+ TODO_update_ssa
+ | TODO_cleanup_cfg
+ | TODO_verify_il;
+}
+
+/* Instantiate a hardcfr pass. */
+
+gimple_opt_pass *
+make_pass_harden_control_flow_redundancy (gcc::context *ctxt)
+{
+ return new pass_harden_control_flow_redundancy (ctxt);
+}
@@ -399,6 +399,10 @@ gimple_build_call_from_tree (tree t, tree fnptrtype)
gimple_call_set_from_thunk (call, CALL_FROM_THUNK_P (t));
gimple_call_set_va_arg_pack (call, CALL_EXPR_VA_ARG_PACK (t));
gimple_call_set_nothrow (call, TREE_NOTHROW (t));
+ if (fndecl)
+ gimple_call_set_expected_throw (call,
+ flags_from_decl_or_type (fndecl)
+ & ECF_XTHROW);
gimple_call_set_by_descriptor (call, CALL_EXPR_BY_DESCRIPTOR (t));
copy_warning (call, t);
@@ -1535,6 +1539,8 @@ gimple_call_flags (const gimple *stmt)
if (stmt->subcode & GF_CALL_NOTHROW)
flags |= ECF_NOTHROW;
+ if (stmt->subcode & GF_CALL_XTHROW)
+ flags |= ECF_XTHROW;
if (stmt->subcode & GF_CALL_BY_DESCRIPTOR)
flags |= ECF_BY_DESCRIPTOR;
@@ -150,6 +150,7 @@ enum gf_mask {
GF_CALL_BY_DESCRIPTOR = 1 << 10,
GF_CALL_NOCF_CHECK = 1 << 11,
GF_CALL_FROM_NEW_OR_DELETE = 1 << 12,
+ GF_CALL_XTHROW = 1 << 13,
GF_OMP_PARALLEL_COMBINED = 1 << 0,
GF_OMP_TASK_TASKLOOP = 1 << 0,
GF_OMP_TASK_TASKWAIT = 1 << 1,
@@ -3559,6 +3560,28 @@ gimple_call_nothrow_p (gcall *s)
return (gimple_call_flags (s) & ECF_NOTHROW) != 0;
}
+/* If EXPECTED_THROW_P is true, GIMPLE_CALL S is a call that is known
+ to be more likely to throw than to run forever, terminate the
+ program or return by other means. */
+
+static inline void
+gimple_call_set_expected_throw (gcall *s, bool expected_throw_p)
+{
+ if (expected_throw_p)
+ s->subcode |= GF_CALL_XTHROW;
+ else
+ s->subcode &= ~GF_CALL_XTHROW;
+}
+
+/* Return true if S is a call that is more likely to end by
+ propagating an exception than by other means. */
+
+static inline bool
+gimple_call_expected_throw_p (gcall *s)
+{
+ return (gimple_call_flags (s) & ECF_XTHROW) != 0;
+}
+
/* If FOR_VAR is true, GIMPLE_CALL S is a call to builtin_alloca that
is known to be emitted for VLA objects. Those are wrapped by
stack_save/stack_restore calls and hence can't lead to unbounded
@@ -174,6 +174,14 @@ Maximum number of arrays per SCoP.
Common Joined UInteger Var(param_graphite_max_nb_scop_params) Init(10) Param Optimization
Maximum number of parameters in a SCoP.
+-param=hardcfr-max-blocks=
+Common Joined UInteger Var(param_hardcfr_max_blocks) Init(0) Param Optimization
+Maximum number of blocks for -fharden-control-flow-redundancy.
+
+-param=hardcfr-max-inline-blocks=
+Common Joined UInteger Var(param_hardcfr_max_inline_blocks) Init(16) Param Optimization
+Maximum number of blocks for in-line -fharden-control-flow-redundancy.
+
-param=hash-table-verification-limit=
Common Joined UInteger Var(param_hash_table_verification_limit) Init(10) Param
The number of elements for which hash table verification is done for each searched element.
@@ -193,6 +193,7 @@ along with GCC; see the file COPYING3. If not see
NEXT_PASS (pass_omp_device_lower);
NEXT_PASS (pass_omp_target_link);
NEXT_PASS (pass_adjust_alignment);
+ NEXT_PASS (pass_harden_control_flow_redundancy);
NEXT_PASS (pass_all_optimizations);
PUSH_INSERT_PASSES_WITHIN (pass_all_optimizations)
NEXT_PASS (pass_remove_cgraph_callee_edges);
new file mode 100644
@@ -0,0 +1,12 @@
+/* { dg-do compile } */
+/* { dg-options "-fharden-control-flow-redundancy -fhardcfr-check-noreturn-calls=never -O0 -fdump-tree-hardcfr -ffat-lto-objects" } */
+
+/* Check that we don't insert checking before noreturn calls. -O0 is tested
+ separately because h is not found to be noreturn without optimization. */
+
+#include "torture/harden-cfr-noret.c"
+
+/* No out-of-line checks. */
+/* { dg-final { scan-tree-dump-times "hardcfr_check" 0 "hardcfr" } } */
+/* Only one inline check at the end of f and of h2. */
+/* { dg-final { scan-tree-dump-times "__builtin_trap" 2 "hardcfr" } } */
new file mode 100644
@@ -0,0 +1,11 @@
+/* { dg-do compile } */
+/* { dg-options "-fharden-control-flow-redundancy -fhardcfr-check-noreturn-calls=always -fdump-tree-hardcfr -ffat-lto-objects" } */
+
+/* Check the noreturn handling of a builtin call with always. */
+
+#include "harden-cfr-abrt.c"
+
+/* Out-of-line checking, before both builtin_abort and return in f. */
+/* { dg-final { scan-tree-dump-times "__hardcfr_check" 2 "hardcfr" } } */
+/* Inline checking before builtin_abort in g. */
+/* { dg-final { scan-tree-dump-times "__builtin_trap" 1 "hardcfr" } } */
new file mode 100644
@@ -0,0 +1,11 @@
+/* { dg-do compile } */
+/* { dg-options "-fharden-control-flow-redundancy -fhardcfr-check-noreturn-calls=never -fdump-tree-hardcfr -ffat-lto-objects" } */
+
+/* Check the noreturn handling of a builtin call with never. */
+
+#include "harden-cfr-abrt.c"
+
+/* No out-of-line checking. */
+/* { dg-final { scan-tree-dump-times "__hardcfr_check" 0 "hardcfr" } } */
+/* Inline checking only before return in f. */
+/* { dg-final { scan-tree-dump-times "__builtin_trap" 1 "hardcfr" } } */
new file mode 100644
@@ -0,0 +1,11 @@
+/* { dg-do compile } */
+/* { dg-options "-fharden-control-flow-redundancy -fhardcfr-check-noreturn-calls=no-xthrow -fdump-tree-hardcfr -ffat-lto-objects" } */
+
+/* Check the noreturn handling of a builtin call with no-xthrow. */
+
+#include "harden-cfr-abrt.c"
+
+/* Out-of-line checking, before both builtin_abort and return in f. */
+/* { dg-final { scan-tree-dump-times "__hardcfr_check" 2 "hardcfr" } } */
+/* Inline checking before builtin_abort in g. */
+/* { dg-final { scan-tree-dump-times "__builtin_trap" 1 "hardcfr" } } */
new file mode 100644
@@ -0,0 +1,11 @@
+/* { dg-do compile } */
+/* { dg-options "-fharden-control-flow-redundancy -fhardcfr-check-noreturn-calls=nothrow -fdump-tree-hardcfr -ffat-lto-objects" } */
+
+/* Check the noreturn handling of a builtin call with =nothrow. */
+
+#include "harden-cfr-abrt.c"
+
+/* Out-of-line checking, before both builtin_abort and return in f. */
+/* { dg-final { scan-tree-dump-times "__hardcfr_check" 2 "hardcfr" } } */
+/* Inline checking before builtin_abort in g. */
+/* { dg-final { scan-tree-dump-times "__builtin_trap" 1 "hardcfr" } } */
new file mode 100644
@@ -0,0 +1,19 @@
+/* { dg-do compile } */
+/* { dg-options "-fharden-control-flow-redundancy -fdump-tree-hardcfr -ffat-lto-objects" } */
+
+/* Check the noreturn handling of a builtin call. */
+
+int f(int i) {
+ if (!i)
+ __builtin_abort ();
+ return i;
+}
+
+int g() {
+ __builtin_abort ();
+}
+
+/* Out-of-line checking, before both builtin_abort and return in f. */
+/* { dg-final { scan-tree-dump-times "__hardcfr_check" 2 "hardcfr" } } */
+/* Inline checking before builtin_return in g. */
+/* { dg-final { scan-tree-dump-times "__builtin_trap" 1 "hardcfr" } } */
new file mode 100644
@@ -0,0 +1,13 @@
+/* { dg-do compile } */
+/* { dg-options "-fharden-control-flow-redundancy -fhardcfr-check-noreturn-calls=always -fdump-tree-hardcfr --param hardcfr-max-blocks=9 --param hardcfr-max-inline-blocks=5 -ffat-lto-objects -w" } */
+
+/* Check the instrumentation and the parameters with checking before
+ all noreturn calls. */
+
+#include "harden-cfr.c"
+
+/* Inlined checking thus trap for f. */
+/* { dg-final { scan-tree-dump-times "__builtin_trap" 1 "hardcfr" } } */
+/* Out-of-line checking for g (param), and before both noreturn calls in main. */
+/* { dg-final { scan-tree-dump-times "__hardcfr_check" 3 "hardcfr" } } */
+/* No checking for h (too many blocks). */
new file mode 100644
@@ -0,0 +1,13 @@
+/* { dg-do compile } */
+/* { dg-options "-fharden-control-flow-redundancy -fhardcfr-check-noreturn-calls=always -fdump-tree-hardcfr -ffat-lto-objects" } */
+
+/* Check that, even enabling all checks before noreturn calls (leaving
+ returning calls enabled), we get checks before __builtin_return without
+ duplication (__builtin_return is both noreturn and a returning call). */
+
+#include "harden-cfr-bret.c"
+
+/* Out-of-line checking, before both builtin_return and return in f. */
+/* { dg-final { scan-tree-dump-times "__hardcfr_check" 2 "hardcfr" } } */
+/* Inline checking before builtin_return in g. */
+/* { dg-final { scan-tree-dump-times "__builtin_trap" 1 "hardcfr" } } */
new file mode 100644
@@ -0,0 +1,13 @@
+/* { dg-do compile } */
+/* { dg-options "-fharden-control-flow-redundancy -fhardcfr-check-noreturn-calls=never -fdump-tree-hardcfr -ffat-lto-objects" } */
+
+/* Check that, even enabling checks before never noreturn calls (leaving
+ returning calls enabled), we get checks before __builtin_return without
+ duplication (__builtin_return is both noreturn and a returning call). */
+
+#include "harden-cfr-bret.c"
+
+/* Out-of-line checking, before both builtin_return and return in f. */
+/* { dg-final { scan-tree-dump-times "__hardcfr_check" 2 "hardcfr" } } */
+/* Inline checking before builtin_return in g. */
+/* { dg-final { scan-tree-dump-times "__builtin_trap" 1 "hardcfr" } } */
new file mode 100644
@@ -0,0 +1,14 @@
+/* { dg-do compile } */
+/* { dg-options "-fharden-control-flow-redundancy -fhardcfr-check-noreturn-calls=no-xthrow -fdump-tree-hardcfr -ffat-lto-objects" } */
+
+/* Check that, even enabling checks before no-xthrow-throwing noreturn calls
+ (leaving returning calls enabled), we get checks before __builtin_return
+ without duplication (__builtin_return is both noreturn and a returning
+ call). */
+
+#include "harden-cfr-bret.c"
+
+/* Out-of-line checking, before both builtin_return and return in f. */
+/* { dg-final { scan-tree-dump-times "__hardcfr_check" 2 "hardcfr" } } */
+/* Inline checking before builtin_return in g. */
+/* { dg-final { scan-tree-dump-times "__builtin_trap" 1 "hardcfr" } } */
new file mode 100644
@@ -0,0 +1,12 @@
+/* { dg-do compile } */
+/* { dg-options "-fharden-control-flow-redundancy -fhardcfr-check-noreturn-calls=never -fno-hardcfr-check-returning-calls -fdump-tree-hardcfr -ffat-lto-objects" } */
+
+/* Check that, even disabling checks before both noreturn and returning
+ calls, we still get checks before __builtin_return. */
+
+#include "harden-cfr-bret.c"
+
+/* Out-of-line checking, before both builtin_return and return in f. */
+/* { dg-final { scan-tree-dump-times "__hardcfr_check" 2 "hardcfr" } } */
+/* Inline checking before builtin_return in g. */
+/* { dg-final { scan-tree-dump-times "__builtin_trap" 1 "hardcfr" } } */
new file mode 100644
@@ -0,0 +1,12 @@
+/* { dg-do compile } */
+/* { dg-options "-fharden-control-flow-redundancy -fno-hardcfr-check-returning-calls -fdump-tree-hardcfr -ffat-lto-objects" } */
+
+/* Check that, even disabling checks before returning calls (leaving noreturn
+ calls enabled), we still get checks before __builtin_return. */
+
+#include "harden-cfr-bret.c"
+
+/* Out-of-line checking, before both builtin_return and return in f. */
+/* { dg-final { scan-tree-dump-times "__hardcfr_check" 2 "hardcfr" } } */
+/* Inline checking before builtin_return in g. */
+/* { dg-final { scan-tree-dump-times "__builtin_trap" 1 "hardcfr" } } */
new file mode 100644
@@ -0,0 +1,13 @@
+/* { dg-do compile } */
+/* { dg-options "-fharden-control-flow-redundancy -fhardcfr-check-noreturn-calls=nothrow -fdump-tree-hardcfr -ffat-lto-objects" } */
+
+/* Check that, even enabling checks before nothrow noreturn calls (leaving
+ returning calls enabled), we get checks before __builtin_return without
+ duplication (__builtin_return is both noreturn and a returning call). */
+
+#include "harden-cfr-bret.c"
+
+/* Out-of-line checking, before both builtin_return and return in f. */
+/* { dg-final { scan-tree-dump-times "__hardcfr_check" 2 "hardcfr" } } */
+/* Inline checking before builtin_return in g. */
+/* { dg-final { scan-tree-dump-times "__builtin_trap" 1 "hardcfr" } } */
new file mode 100644
@@ -0,0 +1,12 @@
+/* { dg-do compile } */
+/* { dg-options "-fharden-control-flow-redundancy -fhardcfr-check-noreturn-calls=never -fdump-tree-hardcfr -ffat-lto-objects" } */
+
+/* Check that, even disabling checks before noreturn calls (leaving returning
+ calls enabled), we still get checks before __builtin_return. */
+
+#include "harden-cfr-bret.c"
+
+/* Out-of-line checking, before both builtin_return and return in f. */
+/* { dg-final { scan-tree-dump-times "__hardcfr_check" 2 "hardcfr" } } */
+/* Inline checking before builtin_return in g. */
+/* { dg-final { scan-tree-dump-times "__builtin_trap" 1 "hardcfr" } } */
new file mode 100644
@@ -0,0 +1,17 @@
+/* { dg-do compile } */
+/* { dg-options "-fharden-control-flow-redundancy -fdump-tree-hardcfr -ffat-lto-objects" } */
+
+int f(int i) {
+ if (i)
+ __builtin_return (&i);
+ return i;
+}
+
+int g(int i) {
+ __builtin_return (&i);
+}
+
+/* Out-of-line checking, before both builtin_return and return in f. */
+/* { dg-final { scan-tree-dump-times "__hardcfr_check" 2 "hardcfr" } } */
+/* Inline checking before builtin_return in g. */
+/* { dg-final { scan-tree-dump-times "__builtin_trap" 1 "hardcfr" } } */
new file mode 100644
@@ -0,0 +1,13 @@
+/* { dg-do compile } */
+/* { dg-options "-fharden-control-flow-redundancy -fhardcfr-check-noreturn-calls=never -fdump-tree-hardcfr --param hardcfr-max-blocks=9 --param hardcfr-max-inline-blocks=5 -ffat-lto-objects -w" } */
+
+/* Check the instrumentation and the parameters without checking before
+ noreturn calls. */
+
+#include "harden-cfr.c"
+
+/* Inlined checking thus trap for f. */
+/* { dg-final { scan-tree-dump-times "__builtin_trap" 1 "hardcfr" } } */
+/* Out-of-line checking for g (param). */
+/* { dg-final { scan-tree-dump-times "__hardcfr_check" 1 "hardcfr" } } */
+/* No checking for h (too many blocks) or main (no edges to exit block). */
new file mode 100644
@@ -0,0 +1,13 @@
+/* { dg-do compile } */
+/* { dg-options "-fharden-control-flow-redundancy -fhardcfr-check-noreturn-calls=no-xthrow -fdump-tree-hardcfr --param hardcfr-max-blocks=9 --param hardcfr-max-inline-blocks=5 -ffat-lto-objects -w" } */
+
+/* Check the instrumentation and the parameters with checking before
+ all noreturn calls that aren't expected to throw. */
+
+#include "harden-cfr.c"
+
+/* Inlined checking thus trap for f. */
+/* { dg-final { scan-tree-dump-times "__builtin_trap" 1 "hardcfr" } } */
+/* Out-of-line checking for g (param), and before both noreturn calls in main. */
+/* { dg-final { scan-tree-dump-times "__hardcfr_check" 3 "hardcfr" } } */
+/* No checking for h (too many blocks). */
new file mode 100644
@@ -0,0 +1,18 @@
+/* { dg-do compile } */
+/* { dg-options "-fharden-control-flow-redundancy -fhardcfr-check-noreturn-calls=never -fdump-tree-hardcfr -ffat-lto-objects" } */
+
+/* Check that we don't insert checking before noreturn calls. -O0 is tested
+ separately because h is not found to be noreturn without optimization, which
+ affects codegen for h2, so h2 is omitted here at -O0. */
+
+#if !__OPTIMIZE__
+# define OMIT_H2
+#endif
+
+#include "harden-cfr-noret.c"
+
+
+/* No out-of-line checks. */
+/* { dg-final { scan-tree-dump-times "hardcfr_check" 0 "hardcfr" } } */
+/* Only one inline check at the end of f. */
+/* { dg-final { scan-tree-dump-times "__builtin_trap" 1 "hardcfr" } } */
new file mode 100644
@@ -0,0 +1,16 @@
+/* { dg-do compile } */
+/* { dg-options "-fharden-control-flow-redundancy -fhardcfr-check-noreturn-calls=nothrow -fno-exceptions -fdump-tree-hardcfr -ffat-lto-objects" } */
+
+/* Check that -fno-exceptions makes for implicit nothrow in noreturn
+ handling. */
+
+#define ATTR_NOTHROW_OPT
+
+#include "harden-cfr-noret.c"
+
+/* One out-of-line check before the noreturn call in f, and another at the end
+ of f. */
+/* { dg-final { scan-tree-dump-times "hardcfr_check" 2 "hardcfr" } } */
+/* One inline check in h, before the noreturn call, and another in h2, before
+ or after the call, depending on noreturn detection. */
+/* { dg-final { scan-tree-dump-times "__builtin_trap" 2 "hardcfr" } } */
new file mode 100644
@@ -0,0 +1,13 @@
+/* { dg-do compile } */
+/* { dg-options "-fharden-control-flow-redundancy -fhardcfr-check-noreturn-calls=nothrow -fdump-tree-hardcfr -ffat-lto-objects" } */
+
+/* Check that we insert checking before nothrow noreturn calls. */
+
+#include "harden-cfr-noret.c"
+
+/* One out-of-line check before the noreturn call in f, and another at the end
+ of f. */
+/* { dg-final { scan-tree-dump-times "hardcfr_check" 2 "hardcfr" } } */
+/* One inline check in h, before the noreturn call, and another in h2, before
+ or after the call, depending on noreturn detection. */
+/* { dg-final { scan-tree-dump-times "__builtin_trap" 2 "hardcfr" } } */
new file mode 100644
@@ -0,0 +1,38 @@
+/* { dg-do compile } */
+/* { dg-options "-fharden-control-flow-redundancy -fhardcfr-check-noreturn-calls=always -fdump-tree-hardcfr -ffat-lto-objects" } */
+
+/* Check that we insert checking before all noreturn calls. */
+
+#ifndef ATTR_NOTHROW_OPT /* Overridden in harden-cfr-noret-noexcept. */
+#define ATTR_NOTHROW_OPT __attribute__ ((__nothrow__))
+#endif
+
+extern void __attribute__ ((__noreturn__)) ATTR_NOTHROW_OPT g (void);
+
+void f(int i) {
+ if (i)
+ /* Out-of-line checks here... */
+ g ();
+ /* ... and here. */
+}
+
+void __attribute__ ((__noinline__, __noclone__))
+h(void) {
+ /* Inline check here. */
+ g ();
+}
+
+#ifndef OMIT_H2 /* from harden-cfr-noret-never. */
+void h2(void) {
+ /* Inline check either here, whether because of noreturn or tail call... */
+ h ();
+ /* ... or here, if not optimizing. */
+}
+#endif
+
+/* One out-of-line check before the noreturn call in f, and another at the end
+ of f. */
+/* { dg-final { scan-tree-dump-times "hardcfr_check" 2 "hardcfr" } } */
+/* One inline check in h, before the noreturn call, and another in h2, before
+ or after the call, depending on noreturn detection. */
+/* { dg-final { scan-tree-dump-times "__builtin_trap" 2 "hardcfr" } } */
new file mode 100644
@@ -0,0 +1,8 @@
+/* { dg-do compile } */
+/* { dg-options "-fharden-control-flow-redundancy -fno-hardcfr-check-exceptions -fno-hardcfr-check-returning-calls -fdump-tree-hardcfr -ffat-lto-objects" } */
+
+#include "harden-cfr-tail.c"
+
+/* Inline checking after the calls, disabling tail calling. */
+/* { dg-final { scan-tree-dump-times "__builtin_trap" 5 "hardcfr" } } */
+/* { dg-final { scan-tree-dump-times "Inserting inline check before stmt" 0 "hardcfr" } } */
new file mode 100644
@@ -0,0 +1,13 @@
+/* { dg-do compile } */
+/* { dg-options "-fharden-control-flow-redundancy -fhardcfr-check-noreturn-calls=nothrow -fdump-tree-hardcfr --param hardcfr-max-blocks=9 --param hardcfr-max-inline-blocks=5 -ffat-lto-objects -w" } */
+
+/* Check the instrumentation and the parameters without checking before
+ nothrow noreturn calls. */
+
+#include "harden-cfr.c"
+
+/* Inlined checking thus trap for f. */
+/* { dg-final { scan-tree-dump-times "__builtin_trap" 1 "hardcfr" } } */
+/* Out-of-line checking for g (param), and before both noreturn calls in main. */
+/* { dg-final { scan-tree-dump-times "__hardcfr_check" 3 "hardcfr" } } */
+/* No checking for h (too many blocks). */
new file mode 100644
@@ -0,0 +1,35 @@
+/* { dg-do compile } */
+/* { dg-options "-fharden-control-flow-redundancy -fhardcfr-check-returning-calls -fno-exceptions -fdump-tree-hardcfr -ffat-lto-objects" } */
+
+/* Check that we insert checks before returning calls and alternate paths, even
+ at -O0, because of the explicit command-line flag. */
+
+void g (void);
+void g2 (void);
+void g3 (void);
+
+void f (int i) {
+ if (!i)
+ /* Out-of-line checks here... */
+ g ();
+ else if (i > 0)
+ /* here... */
+ g2 ();
+ /* else */
+ /* and in the implicit else here. */
+}
+
+void f2 (int i) {
+ if (!i)
+ /* Out-of-line check here... */
+ g ();
+ else if (i > 0)
+ /* here... */
+ g2 ();
+ else
+ /* and here. */
+ g3 ();
+}
+
+/* { dg-final { scan-tree-dump-times "hardcfr_check" 6 "hardcfr" } } */
+/* { dg-final { scan-tree-dump-times "__builtin_trap" 0 "hardcfr" } } */
new file mode 100644
@@ -0,0 +1,10 @@
+/* { dg-do run } */
+/* { dg-options "-fharden-control-flow-redundancy -fhardcfr-skip-leaf -fdump-tree-hardcfr -ffat-lto-objects" } */
+
+/* Test skipping instrumentation of leaf functions. */
+
+#include "harden-cfr.c"
+
+/* { dg-final { scan-tree-dump-times "__builtin_trap" 0 "hardcfr" } } */
+/* Only main isn't leaf. */
+/* { dg-final { scan-tree-dump-times "__hardcfr_check" 2 "hardcfr" } } */
new file mode 100644
@@ -0,0 +1,52 @@
+/* { dg-do compile } */
+/* { dg-options "-fharden-control-flow-redundancy -fhardcfr-check-returning-calls -fno-hardcfr-check-exceptions -fdump-tree-hardcfr -ffat-lto-objects -Wno-return-type" } */
+
+/* Check that we insert CFR checking so as to not disrupt tail calls.
+ Mandatory tail calls are not available in C, and optimizing calls as tail
+ calls only takes place after hardcfr, so we insert checking before calls
+ followed by copies and return stmts with the same return value, that might
+ (or might not) end up optimized to tail calls. */
+
+extern int g (int i);
+
+int f1(int i) {
+ /* Inline check before the returning call. */
+ return g (i);
+}
+
+extern void g2 (int i);
+
+void f2(int i) {
+ /* Inline check before the returning call, that ignores the returned value,
+ matching the value-less return. */
+ g2 (i);
+ return;
+}
+
+void f3(int i) {
+ /* Inline check before the returning call. */
+ g (i);
+}
+
+void f4(int i) {
+ if (i)
+ /* Out-of-line check before the returning call. */
+ return g2 (i);
+ /* Out-of-line check before implicit return. */
+}
+
+int f5(int i) {
+ /* Not regarded as a returning call, returning value other than callee's
+ returned value. */
+ g (i);
+ /* Inline check after the non-returning call. */
+ return i;
+}
+
+/* Out-of-line checks in f4, before returning calls and before return. */
+/* { dg-final { scan-tree-dump-times "hardcfr_check" 2 "hardcfr" } } */
+/* Inline checking in all other functions. */
+/* { dg-final { scan-tree-dump-times "__builtin_trap" 4 "hardcfr" } } */
+/* Check before tail-call in all but f5, but f4 is out-of-line. */
+/* { dg-final { scan-tree-dump-times "Inserting inline check before stmt" 3 "hardcfr" } } */
+/* { dg-final { scan-tree-dump-times "Inserting out-of-line check before stmt" 1 "hardcfr" } } */
new file mode 100644
@@ -0,0 +1,84 @@
+/* { dg-do run } */
+/* { dg-options "-fharden-control-flow-redundancy -fdump-tree-hardcfr --param hardcfr-max-blocks=9 --param hardcfr-max-inline-blocks=5 -ffat-lto-objects" } */
+
+/* Check the instrumentation and the parameters. */
+
+int
+f (int i, int j)
+{
+ if (i < j)
+ return 2 * i;
+ else
+ return 3 * j;
+}
+
+int
+g (unsigned i, int j)
+{
+ switch (i)
+ {
+ case 0:
+ return j * 2;
+
+ case 1:
+ return j * 3;
+
+ case 2:
+ return j * 5;
+
+ default:
+ return j * 7;
+ }
+}
+
+int
+h (unsigned i, int j) /* { dg-warning "has more than 9 blocks, the requested maximum" } */
+{
+ switch (i)
+ {
+ case 0:
+ return j * 2;
+
+ case 1:
+ return j * 3;
+
+ case 2:
+ return j * 5;
+
+ case 3:
+ return j * 7;
+
+ case 4:
+ return j * 11;
+
+ case 5:
+ return j * 13;
+
+ case 6:
+ return j * 17;
+
+ case 7:
+ return j * 19;
+
+ default:
+ return j * 23;
+ }
+}
+
+int
+main (int argc, char *argv[])
+{
+ if (f (1, 2) != 2 || g (2, 5) != 25 || h (4, 3) != 33
+ || argc < 0)
+ __builtin_abort ();
+ /* Call exit, instead of returning, to avoid an edge to the exit block and
+ thus implicitly disable hardening of main, when checking before noreturn
+ calls is disabled. */
+ __builtin_exit (0);
+}
+
+/* Inlined checking thus trap for f. */
+/* { dg-final { scan-tree-dump-times "__builtin_trap" 1 "hardcfr" } } */
+/* Out-of-line checking for g (param), and before both noreturn calls in main. */
+/* { dg-final { scan-tree-dump-times "__hardcfr_check" 3 "hardcfr" } } */
+/* No checking for h (too many blocks). */
new file mode 100644
@@ -0,0 +1,13 @@
+/* { dg-do compile } */
+/* { dg-options "-fharden-control-flow-redundancy -fhardcfr-check-noreturn-calls=always -fdump-tree-hardcfr -ffat-lto-objects -O0" } */
+
+/* Check that we insert cleanups for checking around the bodies of
+ maybe-throwing functions, and also checking before noreturn
+ calls. h2 and h2b get an extra resx without ehcleanup. */
+
+#define NO_OPTIMIZE
+
+#include "torture/harden-cfr-throw.C"
+
+/* { dg-final { scan-tree-dump-times "hardcfr_check" 16 "hardcfr" } } */
+/* { dg-final { scan-tree-dump-times "builtin_trap" 1 "hardcfr" } } */
new file mode 100644
@@ -0,0 +1,12 @@
+/* { dg-do compile } */
+/* { dg-options "-fharden-control-flow-redundancy -foptimize-sibling-calls -fdump-tree-hardcfr -O0" } */
+
+/* -fhardcfr-check-returning-calls gets implicitly disabled because,
+ -at O0, -foptimize-sibling-calls has no effect. */
+
+#define NO_OPTIMIZE
+
+#include "torture/harden-cfr-throw.C"
+
+/* { dg-final { scan-tree-dump-times "hardcfr_check" 12 "hardcfr" } } */
+/* { dg-final { scan-tree-dump-times "builtin_trap" 1 "hardcfr" } } */
new file mode 100644
@@ -0,0 +1,11 @@
+/* { dg-do compile } */
+/* { dg-options "-fharden-control-flow-redundancy -fhardcfr-check-returning-calls -fdump-tree-hardcfr -O0" } */
+
+/* Explicitly enable -fhardcfr-check-returning-calls -at O0. */
+
+#include "torture/harden-cfr-throw.C"
+
+/* Same expectations as those in torture/harden-cfr-throw-returning.C. */
+
+/* { dg-final { scan-tree-dump-times "hardcfr_check" 10 "hardcfr" } } */
+/* { dg-final { scan-tree-dump-times "builtin_trap" 2 "hardcfr" } } */
new file mode 100644
@@ -0,0 +1,16 @@
+/* { dg-do compile } */
+/* { dg-options "-fharden-control-flow-redundancy -fhardcfr-check-noreturn-calls=always -fdump-tree-hardcfr -ffat-lto-objects" } */
+
+/* Check that C++ does NOT make for implicit nothrow in noreturn
+ handling. */
+
+#include "harden-cfr-noret-no-nothrow.C"
+
+/* All 3 noreturn calls. */
+/* { dg-final { scan-tree-dump-times "Bypassing cleanup" 3 "hardcfr" } } */
+/* Out-of-line checks in f. */
+/* { dg-final { scan-tree-dump-times "Inserting out-of-line check in block \[0-9]*'s edge to exit" 1 "hardcfr" } } */
+/* { dg-final { scan-tree-dump-times "hardcfr_check" 2 "hardcfr" } } */
+/* Inline checks in h and h2. */
+/* { dg-final { scan-tree-dump-times "Inserting inline check before stmt" 2 "hardcfr" } } */
+/* { dg-final { scan-tree-dump-times "__builtin_trap" 2 "hardcfr" } } */
new file mode 100644
@@ -0,0 +1,18 @@
+/* { dg-do compile } */
+/* { dg-options "-fharden-control-flow-redundancy -fhardcfr-check-noreturn-calls=never -fdump-tree-hardcfr -ffat-lto-objects" } */
+
+/* Check that C++ does NOT make for implicit nothrow in noreturn
+ handling. Expected results for =never and =nothrow are the same,
+ since the functions are not nothrow. */
+
+#include "harden-cfr-noret-no-nothrow.C"
+
+/* All 3 noreturn calls. */
+/* { dg-final { scan-tree-dump-times "Associated cleanup" 3 "hardcfr" } } */
+/* Out-of-line checks in f. */
+/* { dg-final { scan-tree-dump-times "Inserting out-of-line check in block \[0-9]*'s edge to exit" 1 "hardcfr" } } */
+/* { dg-final { scan-tree-dump-times "Inserting out-of-line check before stmt" 1 "hardcfr" } } */
+/* { dg-final { scan-tree-dump-times "hardcfr_check" 2 "hardcfr" } } */
+/* Inline checks in h and h2. */
+/* { dg-final { scan-tree-dump-times "Inserting inline check before stmt" 2 "hardcfr" } } */
+/* { dg-final { scan-tree-dump-times "__builtin_trap" 2 "hardcfr" } } */
new file mode 100644
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-options "-fharden-control-flow-redundancy -fhardcfr-check-noreturn-calls=nothrow -fdump-tree-hardcfr -ffat-lto-objects" } */
+
+/* Check that C++ does NOT make for implicit nothrow in noreturn
+ handling. */
+
+#define ATTR_NOTHROW_OPT
+
+#if ! __OPTIMIZE__
+void __attribute__ ((__noreturn__)) h (void);
+#endif
+
+#include "../../c-c++-common/torture/harden-cfr-noret.c"
+
+/* All 3 noreturn calls. */
+/* { dg-final { scan-tree-dump-times "Associated cleanup" 3 "hardcfr" } } */
+/* Out-of-line checks in f. */
+/* { dg-final { scan-tree-dump-times "Inserting out-of-line check in block \[0-9]*'s edge to exit" 1 "hardcfr" } } */
+/* { dg-final { scan-tree-dump-times "Inserting out-of-line check before stmt" 1 "hardcfr" } } */
+/* { dg-final { scan-tree-dump-times "hardcfr_check" 2 "hardcfr" } } */
+/* Inline checks in h and h2. */
+/* { dg-final { scan-tree-dump-times "Inserting inline check before stmt" 2 "hardcfr" } } */
+/* { dg-final { scan-tree-dump-times "__builtin_trap" 2 "hardcfr" } } */
new file mode 100644
@@ -0,0 +1,13 @@
+/* { dg-do compile } */
+/* { dg-options "-fharden-control-flow-redundancy -fno-hardcfr-check-returning-calls -fhardcfr-check-noreturn-calls=always -fdump-tree-hardcfr -ffat-lto-objects" } */
+
+/* Check that we insert cleanups for checking around the bodies of
+ maybe-throwing functions, and also checking before noreturn
+ calls. */
+
+#include "harden-cfr-throw.C"
+
+/* { dg-final { scan-tree-dump-times "hardcfr_check" 14 "hardcfr" } } */
+/* { dg-final { scan-tree-dump-times "builtin_trap" 1 "hardcfr" } } */
+/* h, h2, h2b, and h4. */
+/* { dg-final { scan-tree-dump-times "Bypassing" 4 "hardcfr" } } */
new file mode 100644
@@ -0,0 +1,12 @@
+/* { dg-do compile } */
+/* { dg-options "-fharden-control-flow-redundancy -fno-hardcfr-check-returning-calls -fhardcfr-check-noreturn-calls=never -fdump-tree-hardcfr -ffat-lto-objects" } */
+
+/* Check that we insert cleanups for checking around the bodies of
+ maybe-throwing functions, without checking before noreturn
+ calls. */
+
+#include "harden-cfr-throw.C"
+
+/* { dg-final { scan-tree-dump-times "hardcfr_check" 12 "hardcfr" } } */
+/* { dg-final { scan-tree-dump-times "builtin_trap" 1 "hardcfr" } } */
+/* { dg-final { scan-tree-dump-times "Bypassing" 0 "hardcfr" } } */
new file mode 100644
@@ -0,0 +1,16 @@
+/* { dg-do compile } */
+/* { dg-options "-fharden-control-flow-redundancy -fno-hardcfr-check-returning-calls -fhardcfr-check-noreturn-calls=no-xthrow -fdump-tree-hardcfr -ffat-lto-objects" } */
+
+/* Check that we insert cleanups for checking around the bodies of
+ maybe-throwing functions, and also checking before noreturn
+ calls. */
+
+extern void __attribute__ ((__noreturn__, __expected_throw__)) g (void);
+extern void __attribute__ ((__noreturn__, __expected_throw__)) g2 (void);
+
+#include "harden-cfr-throw.C"
+
+/* In f and h3, there are checkpoints at return and exception escape. . */
+/* { dg-final { scan-tree-dump-times "hardcfr_check" 4 "hardcfr" } } */
+/* Other functions get a single cleanup checkpoint. */
+/* { dg-final { scan-tree-dump-times "builtin_trap" 5 "hardcfr" } } */
new file mode 100644
@@ -0,0 +1,12 @@
+/* { dg-do compile } */
+/* { dg-options "-fharden-control-flow-redundancy -fno-hardcfr-check-returning-calls -fhardcfr-check-noreturn-calls=no-xthrow -fdump-tree-hardcfr -ffat-lto-objects" } */
+
+/* Check that we insert cleanups for checking around the bodies of
+ maybe-throwing functions, and also checking before noreturn
+ calls. */
+
+#include "harden-cfr-throw.C"
+
+/* { dg-final { scan-tree-dump-times "hardcfr_check" 12 "hardcfr" } } */
+/* { dg-final { scan-tree-dump-times "builtin_trap" 1 "hardcfr" } } */
+/* { dg-final { scan-tree-dump-times "Bypassing" 0 "hardcfr" } } */
new file mode 100644
@@ -0,0 +1,11 @@
+/* { dg-do compile } */
+/* { dg-options "-fharden-control-flow-redundancy -fhardcfr-check-noreturn-calls=never -fno-hardcfr-check-exceptions -fno-hardcfr-check-returning-calls -fdump-tree-hardcfr -ffat-lto-objects" } */
+
+/* Check that we do not insert cleanups for checking around the bodies
+ of maybe-throwing functions. h4 doesn't get any checks, because we
+ don't have noreturn checking enabled. */
+
+#include "harden-cfr-throw.C"
+
+/* { dg-final { scan-tree-dump-times "hardcfr_check" 0 "hardcfr" } } */
+/* { dg-final { scan-tree-dump-times "builtin_trap" 6 "hardcfr" } } */
new file mode 100644
@@ -0,0 +1,11 @@
+/* { dg-do compile } */
+/* { dg-options "-fharden-control-flow-redundancy -fno-hardcfr-check-returning-calls -fhardcfr-check-noreturn-calls=nothrow -fdump-tree-hardcfr -ffat-lto-objects" } */
+
+/* Check that we insert cleanups for checking around the bodies of
+ maybe-throwing functions, without checking before noreturn
+ calls. */
+
+#include "harden-cfr-throw.C"
+
+/* { dg-final { scan-tree-dump-times "hardcfr_check" 12 "hardcfr" } } */
+/* { dg-final { scan-tree-dump-times "builtin_trap" 1 "hardcfr" } } */
new file mode 100644
@@ -0,0 +1,31 @@
+/* { dg-do compile } */
+/* { dg-options "-fharden-control-flow-redundancy -fhardcfr-check-noreturn-calls=never -foptimize-sibling-calls -fdump-tree-hardcfr -ffat-lto-objects" } */
+
+/* Check that we insert cleanups for checking around the bodies of
+ maybe-throwing functions. These results depend on checking before
+ returning calls, which is only enabled when sibcall optimizations
+ are enabled, so change the optimization mode to -O1 for f and f2,
+ so that -foptimize-sibling-calls can take effect and enable
+ -fhardcfr-check-returning-calls, so that we get the same results.
+ There is a separate test for -O0. */
+
+#if ! __OPTIMIZE__
+void __attribute__ ((__optimize__ (1, "-foptimize-sibling-calls"))) f(int i);
+void __attribute__ ((__optimize__ (1, "-foptimize-sibling-calls"))) f2(int i);
+void __attribute__ ((__optimize__ (1, "-foptimize-sibling-calls"))) h3(void);
+#endif
+
+#include "harden-cfr-throw.C"
+
+/* f gets out-of-line checks before the unwrapped tail call and in the
+ else edge. */
+/* f2 gets out-of-line checks before both unwrapped tail calls. */
+/* h gets out-of-line checks before the implicit return and in the
+ cleanup block. */
+/* h2 and h2b get out-of-line checks before the cleanup returning
+ call, and in the cleanup block. */
+/* h3 gets an inline check before the __cxa_end_catch returning call. */
+/* h4 gets an inline check in the cleanup block. */
+
+/* { dg-final { scan-tree-dump-times "hardcfr_check" 10 "hardcfr" } } */
+/* { dg-final { scan-tree-dump-times "builtin_trap" 2 "hardcfr" } } */
new file mode 100644
@@ -0,0 +1,73 @@
+/* { dg-do compile } */
+/* { dg-options "-fharden-control-flow-redundancy -fno-hardcfr-check-returning-calls -fdump-tree-hardcfr -ffat-lto-objects" } */
+
+#if ! __OPTIMIZE__ && ! defined NO_OPTIMIZE
+/* Without optimization, functions with cleanups end up with an extra
+ resx that is not optimized out, so arrange to optimize them. */
+void __attribute__ ((__optimize__ (1))) h2(void);
+void __attribute__ ((__optimize__ (1))) h2b(void);
+#endif
+
+/* Check that we insert cleanups for checking around the bodies of
+ maybe-throwing functions. */
+
+extern void g (void);
+extern void g2 (void);
+
+void f(int i) {
+ if (i)
+ g ();
+ /* Out-of-line checks here, and in the implicit handler. */
+}
+
+void f2(int i) {
+ if (i)
+ g ();
+ else
+ g2 ();
+ /* Out-of-line checks here, and in the implicit handler. */
+}
+
+void h(void) {
+ try {
+ g ();
+ } catch (...) {
+ throw;
+ }
+ /* Out-of-line checks here, and in the implicit handler. */
+}
+
+struct needs_cleanup {
+ ~needs_cleanup();
+};
+
+void h2(void) {
+ needs_cleanup y; /* No check in the cleanup handler. */
+ g();
+ /* Out-of-line checks here, and in the implicit handler. */
+}
+
+extern void __attribute__ ((__nothrow__)) another_cleanup (void*);
+
+void h2b(void) {
+ int x __attribute__ ((cleanup (another_cleanup)));
+ g();
+ /* Out-of-line checks here, and in the implicit handler. */
+}
+
+void h3(void) {
+ try {
+ throw 1;
+ } catch (...) {
+ }
+ /* Out-of-line checks here, and in the implicit handler. */
+}
+
+void h4(void) {
+ throw 1;
+ /* Inline check in the cleanup around the __cxa_throw noreturn call. */
+}
+
+/* { dg-final { scan-tree-dump-times "hardcfr_check" 12 "hardcfr" } } */
+/* { dg-final { scan-tree-dump-times "builtin_trap" 1 "hardcfr" } } */
+/* { dg-final { scan-tree-dump-times "Bypassing" 0 "hardcfr" } } */
new file mode 100644
@@ -0,0 +1,15 @@
+/* { dg-do compile } */
+/* { dg-options "-fharden-control-flow-redundancy -fhardcfr-check-noreturn-calls=nothrow -fdump-tree-hardcfr -ffat-lto-objects" } */
+
+/* Check that C makes for implicit nothrow in noreturn handling. */
+
+#define ATTR_NOTHROW_OPT
+
+#include "../../c-c++-common/torture/harden-cfr-noret.c"
+
+/* One out-of-line check before the noreturn call in f, and another at the end
+ of f. */
+/* { dg-final { scan-tree-dump-times "hardcfr_check" 2 "hardcfr" } } */
+/* One inline check in h, before the noreturn call, and another in h2, before
+ or after the call, depending on noreturn detection. */
+/* { dg-final { scan-tree-dump-times "__builtin_trap" 2 "hardcfr" } } */
new file mode 100644
@@ -0,0 +1,40 @@
+/* { dg-do compile } */
+/* { dg-options "-fharden-control-flow-redundancy -fhardcfr-check-returning-calls -fno-hardcfr-check-exceptions -fdump-tree-hardcfr -ffat-lto-objects -Wno-return-type" } */
+
+/* In C only, check some additional cases (comparing with
+ c-c++-common/torture/harden-cfr-tail.c) of falling off the end of non-void
+ function. C++ would issue an unreachable call in these cases. */
+
+extern int g (int i);
+
+int f1(int i) {
+ /* Inline check before the returning call, that doesn't return anything. */
+ g (i);
+ /* Implicit return without value, despite the return type; this combination
+ enables tail-calling of g, and is recognized as a returning call. */
+}
+
+extern void g2 (int i);
+
+int f2(int i) {
+ /* Inline check before the returning call, that disregards its return
+ value. */
+ g2 (i);
+ /* Implicit return without value, despite the return type; this combination
+ enables tail-calling of g2, and is recognized as a returning call. */
+}
+
+int f3(int i) {
+ if (i)
+ /* Out-of-line check before the returning call. */
+ return g (i);
+ /* Out-of-line check before implicit return. */
+}
+
+/* Out-of-line checks in f3, before returning calls and before return. */
+/* { dg-final { scan-tree-dump-times "hardcfr_check" 2 "hardcfr" } } */
+/* Inline checking in all other functions. */
+/* { dg-final { scan-tree-dump-times "__builtin_trap" 2 "hardcfr" } } */
+/* Check before tail-call in all functions, but f3 is out-of-line. */
+/* { dg-final { scan-tree-dump-times "Inserting inline check before stmt" 2 "hardcfr" } } */
+/* { dg-final { scan-tree-dump-times "Inserting out-of-line check before stmt" 1 "hardcfr" } } */
new file mode 100644
@@ -0,0 +1,76 @@
+-- { dg-do run }
+-- { dg-options "-fharden-control-flow-redundancy -fno-hardcfr-check-exceptions -fdump-tree-hardcfr --param=hardcfr-max-blocks=22 --param=hardcfr-max-inline-blocks=12 -O0" }
+
+procedure HardCFR is
+ function F (I, J : Integer) return Integer is
+ begin
+ if (I < J) then
+ return 2 * I;
+ else
+ return 3 * J;
+ end if;
+ end F;
+
+ function G (I : Natural; J : Integer) return Integer is
+ begin
+ case I is
+ when 0 =>
+ return J * 2;
+
+ when 1 =>
+ return J * 3;
+
+ when 2 =>
+ return J * 5;
+
+ when others =>
+ return J * 7;
+ end case;
+ end G;
+
+ function H (I : Natural; -- { dg-warning "has more than 22 blocks, the requested maximum" }
+ J : Integer)
+ return Integer is
+ begin
+ case I is
+ when 0 =>
+ return J * 2;
+
+ when 1 =>
+ return J * 3;
+
+ when 2 =>
+ return J * 5;
+
+ when 3 =>
+ return J * 7;
+
+ when 4 =>
+ return J * 11;
+
+ when 5 =>
+ return J * 13;
+
+ when 6 =>
+ return J * 17;
+
+ when 7 =>
+ return J * 19;
+
+ when others =>
+ return J * 23;
+ end case;
+ end H;
+begin
+ if (F (1, 2) /= 2 or else F (3, 2) /= 6
+ or else G (2, 5) /= 25 or else H (4, 3) /= 33)
+ then
+ raise Program_Error;
+ end if;
+end HardCFR;
+
+-- HardCFR and HardCFR.F:
+-- { dg-final { scan-tree-dump-times ".builtin_trap" 2 "hardcfr" } }
+
+-- This is __builtin___hardcfr_check in HardCFR.G:
+-- { dg-final { scan-tree-dump-times ".builtin " 1 "hardcfr" } }
@@ -95,6 +95,9 @@ struct die_struct;
/* Nonzero if this is a cold function. */
#define ECF_COLD (1 << 15)
+/* Nonzero if this is a function expected to end with an exception. */
+#define ECF_XTHROW (1 << 16)
+
/* Call argument flags. */
/* Nonzero if the argument is not used by the function. */
@@ -652,6 +652,8 @@ extern gimple_opt_pass *make_pass_gimple_isel (gcc::context *ctxt);
extern gimple_opt_pass *make_pass_harden_compares (gcc::context *ctxt);
extern gimple_opt_pass *make_pass_harden_conditional_branches (gcc::context
*ctxt);
+extern gimple_opt_pass *make_pass_harden_control_flow_redundancy (gcc::context
+ *ctxt);
/* Current optimization pass. */
extern opt_pass *current_pass;
@@ -9700,6 +9700,10 @@ set_call_expr_flags (tree decl, int flags)
DECL_ATTRIBUTES (decl));
if ((flags & ECF_TM_PURE) && flag_tm)
apply_tm_attr (decl, get_identifier ("transaction_pure"));
+ if ((flags & ECF_XTHROW))
+ DECL_ATTRIBUTES (decl)
+ = tree_cons (get_identifier ("expected_throw"),
+ NULL, DECL_ATTRIBUTES (decl));
/* Looping const or pure is implied by noreturn.
There is currently no way to declare looping const or looping pure alone. */
gcc_assert (!(flags & ECF_LOOPING_CONST_OR_PURE)
@@ -9912,7 +9916,8 @@ build_common_builtin_nodes (void)
ftype = build_function_type_list (void_type_node, NULL_TREE);
local_define_builtin ("__builtin_cxa_end_cleanup", ftype,
BUILT_IN_CXA_END_CLEANUP,
- "__cxa_end_cleanup", ECF_NORETURN | ECF_LEAF);
+ "__cxa_end_cleanup",
+ ECF_NORETURN | ECF_XTHROW | ECF_LEAF);
}
ftype = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
@@ -9921,7 +9926,7 @@ build_common_builtin_nodes (void)
((targetm_common.except_unwind_info (&global_options)
== UI_SJLJ)
? "_Unwind_SjLj_Resume" : "_Unwind_Resume"),
- ECF_NORETURN);
+ ECF_NORETURN | ECF_XTHROW);
if (builtin_decl_explicit (BUILT_IN_RETURN_ADDRESS) == NULL_TREE)
{
@@ -430,6 +430,9 @@ endif
LIB2ADD += enable-execute-stack.c
+# Control Flow Redundancy hardening out-of-line checker.
+LIB2ADD += $(srcdir)/hardcfr.c
+
# While emutls.c has nothing to do with EH, it is in LIB2ADDEH*
# instead of LIB2ADD because that's the way to be sure on some targets
# (e.g. *-*-darwin*) only one copy of it is linked.
new file mode 100644
@@ -0,0 +1,279 @@
+/* Control flow redundancy hardening
+ Copyright (C) 2022 Free Software Foundation, Inc.
+ Contributed by Alexandre Oliva <oliva@adacore.com>
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+/* Avoid infinite recursion. */
+#pragma GCC optimize ("-fno-harden-control-flow-redundancy")
+
+#include <stddef.h>
+#include <stdbool.h>
+
+/* This should be kept in sync with gcc/gimple-harden-control-flow.cc. */
+#if __CHAR_BIT__ >= 28
+# define VWORDmode __QI__
+#elif __CHHAR_BIT__ >= 14
+# define VWORDmode __HI__
+#else
+# define VWORDmode __SI__
+#endif
+
+typedef unsigned int __attribute__ ((__mode__ (VWORDmode))) vword;
+
+/* This function is optionally called at the end of a function to verify that
+ the VISITED array represents a sensible execution path in the CFG. It is
+ always expected to pass; the purpose is to detect attempts to subvert
+ execution by taking unexpected paths, or other execution errors. The
+ function, instrumented by pass_harden_control_flow_redundancy at a time in
+ which it had BLOCKS basic blocks (not counting ENTER and EXIT, so block 2
+ maps to index 0, the first bit of the first VWORD), sets a bit in the bit
+ array VISITED as it enters the corresponding basic block. CFG holds a
+ representation of the control flow graph at the time of the instrumentation:
+ an array of VWORDs holding, for each block, a sequence of predecessors, and
+ a sequence of successors. Each pred and succ sequence is represented as a
+ sequence of pairs (mask, index), terminated by an index-less all-zero mask.
+ If the bit corresponding to the block is set, then at least one of the pred
+ masks, and at least one of the succ masks, must have a bit set in
+ VISITED[index]. An ENTRY block predecessor and an EXIT block successor are
+ represented in a (mask, index) pair that tests the block's own bit. */
+extern void __hardcfr_check (size_t blocks,
+ vword const *visited,
+ vword const *cfg);
+
+/* Compute the MASK for the bit representing BLOCK in WORDIDX's vword in a
+ visited blocks bit array. */
+static inline void
+block2mask (size_t const block, vword *const mask, size_t *const wordidx)
+{
+ size_t wbits = __CHAR_BIT__ * sizeof (vword);
+ *wordidx = block / wbits;
+ *mask = (vword)1 << (block % wbits);
+}
+
+/* Check whether the bit corresponding to BLOCK is set in VISITED. */
+static inline bool
+visited_p (size_t const block, vword const *const visited)
+{
+ vword mask;
+ size_t wordidx;
+ block2mask (block, &mask, &wordidx);
+ vword w = visited[wordidx];
+ return (w & mask) != 0;
+}
+
+/* Read and consume a mask from **CFG_IT. (Consume meaning advancing the
+ iterator to the next word). If the mask is zero, return FALSE. Otherwise,
+ also read and consume an index, and set *MASK and/or *WORDIDX, whichever are
+ nonNULL, to the corresponding read values, and finally return TRUE. */
+static inline bool
+next_pair (vword const **const cfg_it,
+ vword *const mask,
+ size_t *const wordidx)
+{
+ vword m = **cfg_it;
+ ++*cfg_it;
+ if (!m)
+ return false;
+
+ if (mask)
+ *mask = m;
+
+ size_t word = **cfg_it;
+ ++*cfg_it;
+
+ if (wordidx)
+ *wordidx = word;
+
+ return true;
+}
+
+/* Return TRUE iff any of the bits in MASK is set in VISITED[WORDIDX]. */
+static inline bool
+test_mask (vword const *const visited,
+ vword const mask, size_t const wordidx)
+{
+ return (visited[wordidx] & mask) != 0;
+}
+
+/* Scan a sequence of pairs (mask, index) at **CFG_IT until its terminator is
+ reached and consumed. */
+static inline void
+consume_seq (vword const **const cfg_it)
+{
+ while (next_pair (cfg_it, NULL, NULL))
+ /* Do nothing. */;
+}
+
+/* Check that at least one of the MASK bits in a sequence of pairs (mask,
+ index) at **CFG_IT is set in the corresponding VISITED[INDEX] word. Trap if
+ we reach the terminator without finding any. Consume the entire sequence
+ otherwise, so that *CFG_IT points just past the terminator, which may be the
+ beginning of the next sequence. */
+static inline bool
+check_seq (vword const *const visited, vword const **const cfg_it)
+{
+ vword mask;
+ size_t wordidx;
+
+ /* If the block was visited, check that at least one of the
+ preds/succs was also visited. */
+ do
+ /* If we get to the end of the sequence without finding any
+ match, something is amiss. */
+ if (!next_pair (cfg_it, &mask, &wordidx))
+ return false;
+ /* Keep searching until we find a match, at which point the
+ condition is satisfied. */
+ while (!test_mask (visited, mask, wordidx));
+
+ /* Consume the remaining entries in the sequence, whether we found a match or
+ skipped the block, so as to position the iterator at the beginning of the
+ next . */
+ consume_seq (cfg_it);
+
+ return true;
+}
+
+/* Print out the CFG with BLOCKS blocks, presumed to be associated with CALLER.
+ This is expected to be optimized out entirely, unless the verbose part of
+ __hardcfr_check_fail is enabled. */
+static inline void
+__hardcfr_debug_cfg (size_t const blocks,
+ void const *const caller,
+ vword const *const cfg)
+{
+ __builtin_printf ("CFG at %p, for %p", cfg, caller);
+ vword const *cfg_it = cfg;
+ for (size_t i = 0; i < blocks; i++)
+ {
+ vword mask; size_t wordidx;
+ block2mask (i, &mask, &wordidx);
+ __builtin_printf ("\nblock %lu (%lu/0x%lx)\npreds: ",
+ (unsigned long)i,
+ (unsigned long)wordidx, (unsigned long)mask);
+ while (next_pair (&cfg_it, &mask, &wordidx))
+ __builtin_printf (" (%lu/0x%lx)",
+ (unsigned long)wordidx, (unsigned long)mask);
+ __builtin_printf ("\nsuccs: ");
+ while (next_pair (&cfg_it, &mask, &wordidx))
+ __builtin_printf (" (%lu/0x%lx)",
+ (unsigned long)wordidx, (unsigned long)mask);
+ }
+ __builtin_printf ("\n");
+}
+
+#ifndef ATTRIBUTE_UNUSED
+# define ATTRIBUTE_UNUSED __attribute__ ((__unused__))
+#endif
+
+/* This is called when an out-of-line hardcfr check fails. All the arguments
+ are ignored, and it just traps, unless HARDCFR_VERBOSE_FAIL is enabled. IF
+ it is, it prints the PART of the CFG, expected to have BLOCKS blocks, that
+ failed at CALLER's BLOCK, and the VISITED bitmap. When the verbose mode is
+ enabled, it also forces __hardcfr_debug_cfg (above) to be compiled into an
+ out-of-line function, that could be called from a debugger.
+ */
+static inline void
+__hardcfr_check_fail (size_t const blocks ATTRIBUTE_UNUSED,
+ vword const *const visited,
+ vword const *const cfg ATTRIBUTE_UNUSED,
+ size_t const block ATTRIBUTE_UNUSED,
+ int const part ATTRIBUTE_UNUSED,
+ void const *const caller ATTRIBUTE_UNUSED)
+{
+#if HARDCFR_VERBOSE_FAIL
+ static const char *parts[] = { "preds", "succs" };
+
+ vword mask; size_t wordidx;
+ block2mask (block, &mask, &wordidx);
+ __builtin_printf ("hardcfr fail at %p block %lu (%lu/0x%lx), expected %s:",
+ caller, (unsigned long)block,
+ (unsigned long)wordidx, (unsigned long)mask,
+ parts[part]);
+
+ /* Skip data for previous blocks. */
+ vword const *cfg_it = cfg;
+ for (size_t i = block; i--; )
+ {
+ consume_seq (&cfg_it);
+ consume_seq (&cfg_it);
+ }
+ for (size_t i = part; i--; )
+ consume_seq (&cfg_it);
+
+ while (next_pair (&cfg_it, &mask, &wordidx))
+ __builtin_printf (" (%lu/0x%lx)",
+ (unsigned long)wordidx, (unsigned long)mask);
+
+ __builtin_printf ("\nvisited:");
+ block2mask (blocks, &mask, &wordidx);
+ for (size_t i = 0; i <= wordidx; i++)
+ __builtin_printf (" (%lu/0x%lx)",
+ (unsigned long)i, (unsigned long)visited[i]);
+ __builtin_printf ("\n");
+
+ /* Reference __hardcfr_debug_cfg so that it's output out-of-line, so that it
+ can be called from a debugger. */
+ if (!caller || caller == __hardcfr_debug_cfg)
+ return;
+#endif
+ __builtin_trap ();
+}
+
+/* Check that, for each of the BLOCKS basic blocks, if its bit is set in
+ VISITED, at least one of its predecessors in CFG is also set, and at also
+ that at least one of its successors in CFG is also set. */
+void
+__hardcfr_check (size_t const blocks,
+ vword const *const visited,
+ vword const *const cfg)
+{
+ vword const *cfg_it = cfg;
+ for (size_t i = 0; i < blocks; i++)
+ {
+ bool v = visited_p (i, visited);
+
+ /* For each block, there are two sequences of pairs (mask, index), each
+ sequence terminated by a single all-zero mask (no index). The first
+ sequence is for predecessor blocks, the second is for successors. At
+ least one of each must be set. */
+ if (!v)
+ {
+ /* Consume predecessors. */
+ consume_seq (&cfg_it);
+ /* Consume successors. */
+ consume_seq (&cfg_it);
+ }
+ else
+ {
+ /* Check predecessors. */
+ if (!check_seq (visited, &cfg_it))
+ __hardcfr_check_fail (blocks, visited, cfg, i, 0,
+ __builtin_return_address (0));
+ /* Check successors. */
+ if (!check_seq (visited, &cfg_it))
+ __hardcfr_check_fail (blocks, visited, cfg, i, 1,
+ __builtin_return_address (0));
+ }
+ }
+}