blob: d8b61955aa64a16e3be4b811da36bfd21a8181f0 [file] [log] [blame]
/* The analysis "engine".
Copyright (C) 2019-2022 Free Software Foundation, Inc.
Contributed by David Malcolm <dmalcolm@redhat.com>.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3, or (at your option)
any later version.
GCC is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
#include "config.h"
#define INCLUDE_MEMORY
#include "system.h"
#include "coretypes.h"
#include "tree.h"
#include "fold-const.h"
#include "gcc-rich-location.h"
#include "alloc-pool.h"
#include "fibonacci_heap.h"
#include "shortest-paths.h"
#include "diagnostic-core.h"
#include "diagnostic-event-id.h"
#include "diagnostic-path.h"
#include "function.h"
#include "pretty-print.h"
#include "sbitmap.h"
#include "bitmap.h"
#include "tristate.h"
#include "ordered-hash-map.h"
#include "selftest.h"
#include "json.h"
#include "analyzer/analyzer.h"
#include "analyzer/analyzer-logging.h"
#include "analyzer/call-string.h"
#include "analyzer/program-point.h"
#include "analyzer/store.h"
#include "analyzer/region-model.h"
#include "analyzer/constraint-manager.h"
#include "analyzer/sm.h"
#include "analyzer/pending-diagnostic.h"
#include "analyzer/diagnostic-manager.h"
#include "cfg.h"
#include "basic-block.h"
#include "gimple.h"
#include "gimple-iterator.h"
#include "gimple-pretty-print.h"
#include "cgraph.h"
#include "digraph.h"
#include "analyzer/supergraph.h"
#include "analyzer/program-state.h"
#include "analyzer/exploded-graph.h"
#include "analyzer/analysis-plan.h"
#include "analyzer/checker-path.h"
#include "analyzer/state-purge.h"
#include "analyzer/bar-chart.h"
#include "analyzer/call-info.h"
#include <zlib.h>
#include "plugin.h"
#include "target.h"
#include <memory>
#include "stringpool.h"
#include "attribs.h"
#include "tree-dfa.h"
/* For an overview, see gcc/doc/analyzer.texi. */
#if ENABLE_ANALYZER
namespace ana {
/* class impl_region_model_context : public region_model_context. */
impl_region_model_context::
impl_region_model_context (exploded_graph &eg,
exploded_node *enode_for_diag,
const program_state *old_state,
program_state *new_state,
uncertainty_t *uncertainty,
path_context *path_ctxt,
const gimple *stmt,
stmt_finder *stmt_finder)
: m_eg (&eg), m_logger (eg.get_logger ()),
m_enode_for_diag (enode_for_diag),
m_old_state (old_state),
m_new_state (new_state),
m_stmt (stmt),
m_stmt_finder (stmt_finder),
m_ext_state (eg.get_ext_state ()),
m_uncertainty (uncertainty),
m_path_ctxt (path_ctxt)
{
}
impl_region_model_context::
impl_region_model_context (program_state *state,
const extrinsic_state &ext_state,
uncertainty_t *uncertainty,
logger *logger)
: m_eg (NULL), m_logger (logger), m_enode_for_diag (NULL),
m_old_state (NULL),
m_new_state (state),
m_stmt (NULL),
m_stmt_finder (NULL),
m_ext_state (ext_state),
m_uncertainty (uncertainty),
m_path_ctxt (NULL)
{
}
bool
impl_region_model_context::warn (pending_diagnostic *d)
{
LOG_FUNC (get_logger ());
if (m_stmt == NULL && m_stmt_finder == NULL)
{
if (get_logger ())
get_logger ()->log ("rejecting diagnostic: no stmt");
delete d;
return false;
}
if (m_eg)
return m_eg->get_diagnostic_manager ().add_diagnostic
(m_enode_for_diag, m_enode_for_diag->get_supernode (),
m_stmt, m_stmt_finder, d);
else
{
delete d;
return false;
}
}
void
impl_region_model_context::add_note (pending_note *pn)
{
LOG_FUNC (get_logger ());
if (m_eg)
m_eg->get_diagnostic_manager ().add_note (pn);
else
delete pn;
}
void
impl_region_model_context::on_svalue_leak (const svalue *sval)
{
for (sm_state_map *smap : m_new_state->m_checker_states)
smap->on_svalue_leak (sval, this);
}
void
impl_region_model_context::
on_liveness_change (const svalue_set &live_svalues,
const region_model *model)
{
for (sm_state_map *smap : m_new_state->m_checker_states)
smap->on_liveness_change (live_svalues, model, this);
}
void
impl_region_model_context::on_unknown_change (const svalue *sval,
bool is_mutable)
{
for (sm_state_map *smap : m_new_state->m_checker_states)
smap->on_unknown_change (sval, is_mutable, m_ext_state);
}
void
impl_region_model_context::on_escaped_function (tree fndecl)
{
m_eg->on_escaped_function (fndecl);
}
uncertainty_t *
impl_region_model_context::get_uncertainty ()
{
return m_uncertainty;
}
/* Purge state involving SVAL. The region_model has already been purged,
so we only need to purge other state in the program_state:
the sm-state. */
void
impl_region_model_context::purge_state_involving (const svalue *sval)
{
int i;
sm_state_map *smap;
FOR_EACH_VEC_ELT (m_new_state->m_checker_states, i, smap)
smap->purge_state_involving (sval, m_ext_state);
}
void
impl_region_model_context::bifurcate (custom_edge_info *info)
{
if (m_path_ctxt)
m_path_ctxt->bifurcate (info);
else
delete info;
}
void
impl_region_model_context::terminate_path ()
{
if (m_path_ctxt)
return m_path_ctxt->terminate_path ();
}
bool
impl_region_model_context::get_malloc_map (sm_state_map **out_smap,
const state_machine **out_sm,
unsigned *out_sm_idx)
{
unsigned malloc_sm_idx;
if (!m_ext_state.get_sm_idx_by_name ("malloc", &malloc_sm_idx))
return false;
*out_smap = m_new_state->m_checker_states[malloc_sm_idx];
*out_sm = &m_ext_state.get_sm (malloc_sm_idx);
*out_sm_idx = malloc_sm_idx;
return true;
}
bool
impl_region_model_context::get_taint_map (sm_state_map **out_smap,
const state_machine **out_sm,
unsigned *out_sm_idx)
{
if (!m_new_state)
return false;
unsigned taint_sm_idx;
if (!m_ext_state.get_sm_idx_by_name ("taint", &taint_sm_idx))
return false;
*out_smap = m_new_state->m_checker_states[taint_sm_idx];
*out_sm = &m_ext_state.get_sm (taint_sm_idx);
*out_sm_idx = taint_sm_idx;
return true;
}
/* struct setjmp_record. */
int
setjmp_record::cmp (const setjmp_record &rec1, const setjmp_record &rec2)
{
if (int cmp_enode = rec1.m_enode->m_index - rec2.m_enode->m_index)
return cmp_enode;
gcc_assert (&rec1 == &rec2);
return 0;
}
/* class setjmp_svalue : public svalue. */
/* Implementation of svalue::accept vfunc for setjmp_svalue. */
void
setjmp_svalue::accept (visitor *v) const
{
v->visit_setjmp_svalue (this);
}
/* Implementation of svalue::dump_to_pp vfunc for setjmp_svalue. */
void
setjmp_svalue::dump_to_pp (pretty_printer *pp, bool simple) const
{
if (simple)
pp_printf (pp, "SETJMP(EN: %i)", get_enode_index ());
else
pp_printf (pp, "setjmp_svalue(EN%i)", get_enode_index ());
}
/* Get the index of the stored exploded_node. */
int
setjmp_svalue::get_enode_index () const
{
return m_setjmp_record.m_enode->m_index;
}
/* Concrete implementation of sm_context, wiring it up to the rest of this
file. */
class impl_sm_context : public sm_context
{
public:
impl_sm_context (exploded_graph &eg,
int sm_idx,
const state_machine &sm,
exploded_node *enode_for_diag,
const program_state *old_state,
program_state *new_state,
const sm_state_map *old_smap,
sm_state_map *new_smap,
path_context *path_ctxt,
stmt_finder *stmt_finder = NULL,
bool unknown_side_effects = false)
: sm_context (sm_idx, sm),
m_logger (eg.get_logger ()),
m_eg (eg), m_enode_for_diag (enode_for_diag),
m_old_state (old_state), m_new_state (new_state),
m_old_smap (old_smap), m_new_smap (new_smap),
m_path_ctxt (path_ctxt),
m_stmt_finder (stmt_finder),
m_unknown_side_effects (unknown_side_effects)
{
}
logger *get_logger () const { return m_logger.get_logger (); }
tree get_fndecl_for_call (const gcall *call) FINAL OVERRIDE
{
impl_region_model_context old_ctxt
(m_eg, m_enode_for_diag, NULL, NULL, NULL/*m_enode->get_state ()*/,
NULL, call);
region_model *model = m_new_state->m_region_model;
return model->get_fndecl_for_call (call, &old_ctxt);
}
state_machine::state_t get_state (const gimple *stmt ATTRIBUTE_UNUSED,
tree var)
{
logger * const logger = get_logger ();
LOG_FUNC (logger);
/* Use NULL ctxt on this get_rvalue call to avoid triggering
uninitialized value warnings. */
const svalue *var_old_sval
= m_old_state->m_region_model->get_rvalue (var, NULL);
state_machine::state_t current
= m_old_smap->get_state (var_old_sval, m_eg.get_ext_state ());
return current;
}
state_machine::state_t get_state (const gimple *stmt ATTRIBUTE_UNUSED,
const svalue *sval)
{
logger * const logger = get_logger ();
LOG_FUNC (logger);
state_machine::state_t current
= m_old_smap->get_state (sval, m_eg.get_ext_state ());
return current;
}
void set_next_state (const gimple *stmt,
tree var,
state_machine::state_t to,
tree origin)
{
logger * const logger = get_logger ();
LOG_FUNC (logger);
impl_region_model_context new_ctxt (m_eg, m_enode_for_diag,
m_old_state, m_new_state,
NULL, NULL,
stmt);
const svalue *var_new_sval
= m_new_state->m_region_model->get_rvalue (var, &new_ctxt);
const svalue *origin_new_sval
= m_new_state->m_region_model->get_rvalue (origin, &new_ctxt);
/* We use the new sval here to avoid issues with uninitialized values. */
state_machine::state_t current
= m_old_smap->get_state (var_new_sval, m_eg.get_ext_state ());
if (logger)
logger->log ("%s: state transition of %qE: %s -> %s",
m_sm.get_name (),
var,
current->get_name (),
to->get_name ());
m_new_smap->set_state (m_new_state->m_region_model, var_new_sval,
to, origin_new_sval, m_eg.get_ext_state ());
}
void set_next_state (const gimple *stmt,
const svalue *sval,
state_machine::state_t to,
tree origin)
{
logger * const logger = get_logger ();
LOG_FUNC (logger);
impl_region_model_context old_ctxt
(m_eg, m_enode_for_diag, NULL, NULL, NULL/*m_enode->get_state ()*/,
NULL, stmt);
impl_region_model_context new_ctxt (m_eg, m_enode_for_diag,
m_old_state, m_new_state,
NULL, NULL,
stmt);
const svalue *origin_new_sval
= m_new_state->m_region_model->get_rvalue (origin, &new_ctxt);
state_machine::state_t current
= m_old_smap->get_state (sval, m_eg.get_ext_state ());
if (logger)
{
logger->start_log_line ();
logger->log_partial ("%s: state transition of ",
m_sm.get_name ());
sval->dump_to_pp (logger->get_printer (), true);
logger->log_partial (": %s -> %s",
current->get_name (),
to->get_name ());
logger->end_log_line ();
}
m_new_smap->set_state (m_new_state->m_region_model, sval,
to, origin_new_sval, m_eg.get_ext_state ());
}
void warn (const supernode *snode, const gimple *stmt,
tree var, pending_diagnostic *d) FINAL OVERRIDE
{
LOG_FUNC (get_logger ());
gcc_assert (d); // take ownership
impl_region_model_context old_ctxt
(m_eg, m_enode_for_diag, m_old_state, m_new_state, NULL, NULL, NULL);
const svalue *var_old_sval
= m_old_state->m_region_model->get_rvalue (var, &old_ctxt);
state_machine::state_t current
= (var
? m_old_smap->get_state (var_old_sval, m_eg.get_ext_state ())
: m_old_smap->get_global_state ());
m_eg.get_diagnostic_manager ().add_diagnostic
(&m_sm, m_enode_for_diag, snode, stmt, m_stmt_finder,
var, var_old_sval, current, d);
}
/* Hook for picking more readable trees for SSA names of temporaries,
so that rather than e.g.
"double-free of '<unknown>'"
we can print:
"double-free of 'inbuf.data'". */
tree get_diagnostic_tree (tree expr) FINAL OVERRIDE
{
/* Only for SSA_NAMEs of temporaries; otherwise, return EXPR, as it's
likely to be the least surprising tree to report. */
if (TREE_CODE (expr) != SSA_NAME)
return expr;
if (SSA_NAME_VAR (expr) != NULL)
return expr;
gcc_assert (m_new_state);
const svalue *sval = m_new_state->m_region_model->get_rvalue (expr, NULL);
/* Find trees for all regions storing the value. */
if (tree t = m_new_state->m_region_model->get_representative_tree (sval))
return t;
else
return expr;
}
tree get_diagnostic_tree (const svalue *sval) FINAL OVERRIDE
{
return m_new_state->m_region_model->get_representative_tree (sval);
}
state_machine::state_t get_global_state () const FINAL OVERRIDE
{
return m_old_state->m_checker_states[m_sm_idx]->get_global_state ();
}
void set_global_state (state_machine::state_t state) FINAL OVERRIDE
{
m_new_state->m_checker_states[m_sm_idx]->set_global_state (state);
}
void on_custom_transition (custom_transition *transition) FINAL OVERRIDE
{
transition->impl_transition (&m_eg,
const_cast<exploded_node *> (m_enode_for_diag),
m_sm_idx);
}
tree is_zero_assignment (const gimple *stmt) FINAL OVERRIDE
{
const gassign *assign_stmt = dyn_cast <const gassign *> (stmt);
if (!assign_stmt)
return NULL_TREE;
impl_region_model_context old_ctxt
(m_eg, m_enode_for_diag, m_old_state, m_new_state, NULL, NULL, stmt);
if (const svalue *sval
= m_new_state->m_region_model->get_gassign_result (assign_stmt,
&old_ctxt))
if (tree cst = sval->maybe_get_constant ())
if (::zerop(cst))
return gimple_assign_lhs (assign_stmt);
return NULL_TREE;
}
path_context *get_path_context () const FINAL OVERRIDE
{
return m_path_ctxt;
}
bool unknown_side_effects_p () const FINAL OVERRIDE
{
return m_unknown_side_effects;
}
const program_state *get_old_program_state () const FINAL OVERRIDE
{
return m_old_state;
}
log_user m_logger;
exploded_graph &m_eg;
exploded_node *m_enode_for_diag;
const program_state *m_old_state;
program_state *m_new_state;
const sm_state_map *m_old_smap;
sm_state_map *m_new_smap;
path_context *m_path_ctxt;
stmt_finder *m_stmt_finder;
/* Are we handling an external function with unknown side effects? */
bool m_unknown_side_effects;
};
/* Subclass of stmt_finder for finding the best stmt to report the leak at,
given the emission path. */
class leak_stmt_finder : public stmt_finder
{
public:
leak_stmt_finder (const exploded_graph &eg, tree var)
: m_eg (eg), m_var (var) {}
stmt_finder *clone () const FINAL OVERRIDE
{
return new leak_stmt_finder (m_eg, m_var);
}
const gimple *find_stmt (const exploded_path &epath)
FINAL OVERRIDE
{
logger * const logger = m_eg.get_logger ();
LOG_FUNC (logger);
if (m_var && TREE_CODE (m_var) == SSA_NAME)
{
/* Locate the final write to this SSA name in the path. */
const gimple *def_stmt = SSA_NAME_DEF_STMT (m_var);
int idx_of_def_stmt;
bool found = epath.find_stmt_backwards (def_stmt, &idx_of_def_stmt);
if (!found)
goto not_found;
/* What was the next write to the underlying var
after the SSA name was set? (if any). */
for (unsigned idx = idx_of_def_stmt + 1;
idx < epath.m_edges.length ();
++idx)
{
const exploded_edge *eedge = epath.m_edges[idx];
if (logger)
logger->log ("eedge[%i]: EN %i -> EN %i",
idx,
eedge->m_src->m_index,
eedge->m_dest->m_index);
const exploded_node *dst_node = eedge->m_dest;
const program_point &dst_point = dst_node->get_point ();
const gimple *stmt = dst_point.get_stmt ();
if (!stmt)
continue;
if (const gassign *assign = dyn_cast <const gassign *> (stmt))
{
tree lhs = gimple_assign_lhs (assign);
if (TREE_CODE (lhs) == SSA_NAME
&& SSA_NAME_VAR (lhs) == SSA_NAME_VAR (m_var))
return assign;
}
}
}
not_found:
/* Look backwards for the first statement with a location. */
int i;
const exploded_edge *eedge;
FOR_EACH_VEC_ELT_REVERSE (epath.m_edges, i, eedge)
{
if (logger)
logger->log ("eedge[%i]: EN %i -> EN %i",
i,
eedge->m_src->m_index,
eedge->m_dest->m_index);
const exploded_node *dst_node = eedge->m_dest;
const program_point &dst_point = dst_node->get_point ();
const gimple *stmt = dst_point.get_stmt ();
if (stmt)
if (get_pure_location (stmt->location) != UNKNOWN_LOCATION)
return stmt;
}
gcc_unreachable ();
return NULL;
}
private:
const exploded_graph &m_eg;
tree m_var;
};
/* A measurement of how good EXPR is for presenting to the user, so
that e.g. we can say prefer printing
"leak of 'tmp.m_ptr'"
over:
"leak of '<unknown>'". */
static int
readability (const_tree expr)
{
/* Arbitrarily-chosen "high readability" value. */
const int HIGH_READABILITY = 65536;
gcc_assert (expr);
switch (TREE_CODE (expr))
{
case COMPONENT_REF:
case MEM_REF:
/* Impose a slight readability penalty relative to that of
operand 0. */
return readability (TREE_OPERAND (expr, 0)) - 16;
case SSA_NAME:
{
if (tree var = SSA_NAME_VAR (expr))
{
if (DECL_ARTIFICIAL (var))
{
/* If we have an SSA name for an artificial var,
only use it if it has a debug expr associated with
it that fixup_tree_for_diagnostic can use. */
if (VAR_P (var) && DECL_HAS_DEBUG_EXPR_P (var))
return readability (DECL_DEBUG_EXPR (var)) - 1;
}
else
{
/* Slightly favor the underlying var over the SSA name to
avoid having them compare equal. */
return readability (var) - 1;
}
}
/* Avoid printing '<unknown>' for SSA names for temporaries. */
return -1;
}
break;
case PARM_DECL:
case VAR_DECL:
if (DECL_NAME (expr))
return HIGH_READABILITY;
else
/* We don't want to print temporaries. For example, the C FE
prints them as e.g. "<Uxxxx>" where "xxxx" is the low 16 bits
of the tree pointer (see pp_c_tree_decl_identifier). */
return -1;
case RESULT_DECL:
/* Printing "<return-value>" isn't ideal, but is less awful than
trying to print a temporary. */
return HIGH_READABILITY / 2;
case NOP_EXPR:
{
/* Impose a moderate readability penalty for casts. */
const int CAST_PENALTY = 32;
return readability (TREE_OPERAND (expr, 0)) - CAST_PENALTY;
}
case INTEGER_CST:
return HIGH_READABILITY;
default:
return 0;
}
return 0;
}
/* A qsort comparator for trees to sort them into most user-readable to
least user-readable. */
int
readability_comparator (const void *p1, const void *p2)
{
path_var pv1 = *(path_var const *)p1;
path_var pv2 = *(path_var const *)p2;
const int tree_r1 = readability (pv1.m_tree);
const int tree_r2 = readability (pv2.m_tree);
/* Favor items that are deeper on the stack and hence more recent;
this also favors locals over globals. */
const int COST_PER_FRAME = 64;
const int depth_r1 = pv1.m_stack_depth * COST_PER_FRAME;
const int depth_r2 = pv2.m_stack_depth * COST_PER_FRAME;
/* Combine the scores from the tree and from the stack depth.
This e.g. lets us have a slightly penalized cast in the most
recent stack frame "beat" an uncast value in an older stack frame. */
const int sum_r1 = tree_r1 + depth_r1;
const int sum_r2 = tree_r2 + depth_r2;
if (int cmp = sum_r2 - sum_r1)
return cmp;
/* Otherwise, more readable trees win. */
if (int cmp = tree_r2 - tree_r1)
return cmp;
/* Otherwise, if they have the same readability, then impose an
arbitrary deterministic ordering on them. */
if (int cmp = TREE_CODE (pv1.m_tree) - TREE_CODE (pv2.m_tree))
return cmp;
switch (TREE_CODE (pv1.m_tree))
{
default:
break;
case SSA_NAME:
if (int cmp = (SSA_NAME_VERSION (pv1.m_tree)
- SSA_NAME_VERSION (pv2.m_tree)))
return cmp;
break;
case PARM_DECL:
case VAR_DECL:
case RESULT_DECL:
if (int cmp = DECL_UID (pv1.m_tree) - DECL_UID (pv2.m_tree))
return cmp;
break;
}
/* TODO: We ought to find ways of sorting such cases. */
return 0;
}
/* Return true is SNODE is the EXIT node of a function, or is one
of the final snodes within its function.
Specifically, handle the final supernodes before the EXIT node,
for the case of clobbers that happen immediately before exiting.
We need a run of snodes leading to the return_p snode, where all edges are
intraprocedural, and every snode has just one successor.
We use this when suppressing leak reports at the end of "main". */
static bool
returning_from_function_p (const supernode *snode)
{
if (!snode)
return false;
unsigned count = 0;
const supernode *iter = snode;
while (true)
{
if (iter->return_p ())
return true;
if (iter->m_succs.length () != 1)
return false;
const superedge *sedge = iter->m_succs[0];
if (sedge->get_kind () != SUPEREDGE_CFG_EDGE)
return false;
iter = sedge->m_dest;
/* Impose a limit to ensure we terminate for pathological cases.
We only care about the final 3 nodes, due to cases like:
BB:
(clobber causing leak)
BB:
<label>:
return _val;
EXIT BB.*/
if (++count > 3)
return false;
}
}
/* Find the best tree for SVAL and call SM's on_leak vfunc with it.
If on_leak returns a pending_diagnostic, queue it up to be reported,
so that we potentially complain about a leak of SVAL in the given STATE. */
void
impl_region_model_context::on_state_leak (const state_machine &sm,
const svalue *sval,
state_machine::state_t state)
{
logger * const logger = get_logger ();
LOG_SCOPE (logger);
if (logger)
{
logger->start_log_line ();
logger->log_partial ("considering leak of ");
sval->dump_to_pp (logger->get_printer (), true);
logger->end_log_line ();
}
if (!m_eg)
return;
/* m_old_state also needs to be non-NULL so that the sm_ctxt can look
up the old state of SVAL. */
gcc_assert (m_old_state);
/* SVAL has leaked within the new state: it is not used by any reachable
regions.
We need to convert it back to a tree, but since it's likely no regions
use it, we have to find the "best" tree for it in the old_state. */
svalue_set visited;
path_var leaked_pv
= m_old_state->m_region_model->get_representative_path_var (sval,
&visited);
/* Strip off top-level casts */
if (leaked_pv.m_tree && TREE_CODE (leaked_pv.m_tree) == NOP_EXPR)
leaked_pv.m_tree = TREE_OPERAND (leaked_pv.m_tree, 0);
/* This might be NULL; the pending_diagnostic subclasses need to cope
with this. */
tree leaked_tree = leaked_pv.m_tree;
if (logger)
{
if (leaked_tree)
logger->log ("best leaked_tree: %qE", leaked_tree);
else
logger->log ("best leaked_tree: NULL");
}
leak_stmt_finder stmt_finder (*m_eg, leaked_tree);
gcc_assert (m_enode_for_diag);
/* Don't complain about leaks when returning from "main". */
if (returning_from_function_p (m_enode_for_diag->get_supernode ()))
{
tree fndecl = m_enode_for_diag->get_function ()->decl;
if (id_equal (DECL_NAME (fndecl), "main"))
{
if (logger)
logger->log ("not reporting leak from main");
return;
}
}
tree leaked_tree_for_diag = fixup_tree_for_diagnostic (leaked_tree);
pending_diagnostic *pd = sm.on_leak (leaked_tree_for_diag);
if (pd)
m_eg->get_diagnostic_manager ().add_diagnostic
(&sm, m_enode_for_diag, m_enode_for_diag->get_supernode (),
m_stmt, &stmt_finder,
leaked_tree_for_diag, sval, state, pd);
}
/* Implementation of region_model_context::on_condition vfunc.
Notify all state machines about the condition, which could lead to
state transitions. */
void
impl_region_model_context::on_condition (const svalue *lhs,
enum tree_code op,
const svalue *rhs)
{
int sm_idx;
sm_state_map *smap;
FOR_EACH_VEC_ELT (m_new_state->m_checker_states, sm_idx, smap)
{
const state_machine &sm = m_ext_state.get_sm (sm_idx);
impl_sm_context sm_ctxt (*m_eg, sm_idx, sm, m_enode_for_diag,
m_old_state, m_new_state,
m_old_state->m_checker_states[sm_idx],
m_new_state->m_checker_states[sm_idx],
m_path_ctxt);
sm.on_condition (&sm_ctxt,
(m_enode_for_diag
? m_enode_for_diag->get_supernode ()
: NULL),
m_stmt,
lhs, op, rhs);
}
}
/* Implementation of region_model_context::on_phi vfunc.
Notify all state machines about the phi, which could lead to
state transitions. */
void
impl_region_model_context::on_phi (const gphi *phi, tree rhs)
{
int sm_idx;
sm_state_map *smap;
FOR_EACH_VEC_ELT (m_new_state->m_checker_states, sm_idx, smap)
{
const state_machine &sm = m_ext_state.get_sm (sm_idx);
impl_sm_context sm_ctxt (*m_eg, sm_idx, sm, m_enode_for_diag,
m_old_state, m_new_state,
m_old_state->m_checker_states[sm_idx],
m_new_state->m_checker_states[sm_idx],
m_path_ctxt);
sm.on_phi (&sm_ctxt, m_enode_for_diag->get_supernode (), phi, rhs);
}
}
/* Implementation of region_model_context::on_unexpected_tree_code vfunc.
Mark the new state as being invalid for further exploration.
TODO(stage1): introduce a warning for when this occurs. */
void
impl_region_model_context::on_unexpected_tree_code (tree t,
const dump_location_t &loc)
{
logger * const logger = get_logger ();
if (logger)
logger->log ("unhandled tree code: %qs in %qs at %s:%i",
get_tree_code_name (TREE_CODE (t)),
loc.get_impl_location ().m_function,
loc.get_impl_location ().m_file,
loc.get_impl_location ().m_line);
if (m_new_state)
m_new_state->m_valid = false;
}
/* struct point_and_state. */
/* Assert that this object is sane. */
void
point_and_state::validate (const extrinsic_state &ext_state) const
{
/* Skip this in a release build. */
#if !CHECKING_P
return;
#endif
m_point.validate ();
m_state.validate (ext_state);
/* Verify that the callstring's model of the stack corresponds to that
of the region_model. */
/* They should have the same depth. */
gcc_assert (m_point.get_stack_depth ()
== m_state.m_region_model->get_stack_depth ());
/* Check the functions in the callstring vs those in the frames
at each depth. */
for (const frame_region *iter_frame
= m_state.m_region_model->get_current_frame ();
iter_frame; iter_frame = iter_frame->get_calling_frame ())
{
int index = iter_frame->get_index ();
gcc_assert (m_point.get_function_at_depth (index)
== iter_frame->get_function ());
}
}
/* Subroutine of print_enode_indices: print a run of indices from START_IDX
to END_IDX to PP, using and updating *FIRST_RUN. */
static void
print_run (pretty_printer *pp, int start_idx, int end_idx,
bool *first_run)
{
if (!(*first_run))
pp_string (pp, ", ");
*first_run = false;
if (start_idx == end_idx)
pp_printf (pp, "EN: %i", start_idx);
else
pp_printf (pp, "EN: %i-%i", start_idx, end_idx);
}
/* Print the indices within ENODES to PP, collecting them as
runs/singletons e.g. "EN: 4-7, EN: 20-23, EN: 42". */
static void
print_enode_indices (pretty_printer *pp,
const auto_vec<exploded_node *> &enodes)
{
int cur_start_idx = -1;
int cur_finish_idx = -1;
bool first_run = true;
unsigned i;
exploded_node *enode;
FOR_EACH_VEC_ELT (enodes, i, enode)
{
if (cur_start_idx == -1)
{
gcc_assert (cur_finish_idx == -1);
cur_start_idx = cur_finish_idx = enode->m_index;
}
else
{
if (enode->m_index == cur_finish_idx + 1)
/* Continuation of a run. */
cur_finish_idx = enode->m_index;
else
{
/* Finish existing run, start a new one. */
gcc_assert (cur_start_idx >= 0);
gcc_assert (cur_finish_idx >= 0);
print_run (pp, cur_start_idx, cur_finish_idx,
&first_run);
cur_start_idx = cur_finish_idx = enode->m_index;
}
}
}
/* Finish any existing run. */
if (cur_start_idx >= 0)
{
gcc_assert (cur_finish_idx >= 0);
print_run (pp, cur_start_idx, cur_finish_idx,
&first_run);
}
}
/* struct eg_traits::dump_args_t. */
/* The <FILENAME>.eg.dot output can quickly become unwieldy if we show
full details for all enodes (both in terms of CPU time to render it,
and in terms of being meaningful to a human viewing it).
If we show just the IDs then the resulting graph is usually viewable,
but then we have to keep switching back and forth between the .dot
view and other dumps.
This function implements a heuristic for showing detail at the enodes
that (we hope) matter, and just the ID at other enodes, fixing the CPU
usage of the .dot viewer, and drawing the attention of the viewer
to these enodes.
Return true if ENODE should be shown in detail in .dot output.
Return false if no detail should be shown for ENODE. */
bool
eg_traits::dump_args_t::show_enode_details_p (const exploded_node &enode) const
{
/* If the number of exploded nodes isn't too large, we may as well show
all enodes in full detail in the .dot output. */
if (m_eg.m_nodes.length ()
<= (unsigned) param_analyzer_max_enodes_for_full_dump)
return true;
/* Otherwise, assume that what's most interesting are state explosions,
and thus the places where this happened.
Expand enodes at program points where we hit the per-enode limit, so we
can investigate what exploded. */
const per_program_point_data *per_point_data
= m_eg.get_per_program_point_data (enode.get_point ());
return per_point_data->m_excess_enodes > 0;
}
/* class exploded_node : public dnode<eg_traits>. */
const char *
exploded_node::status_to_str (enum status s)
{
switch (s)
{
default: gcc_unreachable ();
case STATUS_WORKLIST: return "WORKLIST";
case STATUS_PROCESSED: return "PROCESSED";
case STATUS_MERGER: return "MERGER";
case STATUS_BULK_MERGED: return "BULK_MERGED";
}
}
/* exploded_node's ctor. */
exploded_node::exploded_node (const point_and_state &ps,
int index)
: m_ps (ps), m_status (STATUS_WORKLIST), m_index (index),
m_num_processed_stmts (0)
{
gcc_checking_assert (ps.get_state ().m_region_model->canonicalized_p ());
}
/* Get the stmt that was processed in this enode at index IDX.
IDX is an index within the stmts processed at this enode, rather
than within those of the supernode. */
const gimple *
exploded_node::get_processed_stmt (unsigned idx) const
{
gcc_assert (idx < m_num_processed_stmts);
const program_point &point = get_point ();
gcc_assert (point.get_kind () == PK_BEFORE_STMT);
const supernode *snode = get_supernode ();
const unsigned int point_stmt_idx = point.get_stmt_idx ();
const unsigned int idx_within_snode = point_stmt_idx + idx;
const gimple *stmt = snode->m_stmts[idx_within_snode];
return stmt;
}
/* For use by dump_dot, get a value for the .dot "fillcolor" attribute.
Colorize by sm-state, to make it easier to see how sm-state propagates
through the exploded_graph. */
const char *
exploded_node::get_dot_fillcolor () const
{
const program_state &state = get_state ();
/* We want to be able to easily distinguish the no-sm-state case,
and to be able to distinguish cases where there's a single state
from each other.
Sum the sm_states, and use the result to choose from a table,
modulo table-size, special-casing the "no sm-state" case. */
int total_sm_state = 0;
int i;
sm_state_map *smap;
FOR_EACH_VEC_ELT (state.m_checker_states, i, smap)
{
for (sm_state_map::iterator_t iter = smap->begin ();
iter != smap->end ();
++iter)
total_sm_state += (*iter).second.m_state->get_id ();
total_sm_state += smap->get_global_state ()->get_id ();
}
if (total_sm_state > 0)
{
/* An arbitrarily-picked collection of light colors. */
const char * const colors[]
= {"azure", "coral", "cornsilk", "lightblue", "yellow",
"honeydew", "lightpink", "lightsalmon", "palegreen1",
"wheat", "seashell"};
const int num_colors = sizeof (colors) / sizeof (colors[0]);
return colors[total_sm_state % num_colors];
}
else
/* No sm-state. */
return "lightgrey";
}
/* Implementation of dnode::dump_dot vfunc for exploded_node. */
void
exploded_node::dump_dot (graphviz_out *gv, const dump_args_t &args) const
{
pretty_printer *pp = gv->get_pp ();
dump_dot_id (pp);
pp_printf (pp, " [shape=none,margin=0,style=filled,fillcolor=%s,label=\"",
get_dot_fillcolor ());
pp_write_text_to_stream (pp);
pp_printf (pp, "EN: %i", m_index);
if (m_status == STATUS_MERGER)
pp_string (pp, " (merger)");
else if (m_status == STATUS_BULK_MERGED)
pp_string (pp, " (bulk merged)");
pp_newline (pp);
if (args.show_enode_details_p (*this))
{
format f (true);
m_ps.get_point ().print (pp, f);
pp_newline (pp);
const extrinsic_state &ext_state = args.m_eg.get_ext_state ();
const program_state &state = m_ps.get_state ();
state.dump_to_pp (ext_state, false, true, pp);
pp_newline (pp);
dump_processed_stmts (pp);
}
dump_saved_diagnostics (pp);
args.dump_extra_info (this, pp);
pp_write_text_as_dot_label_to_stream (pp, /*for_record=*/true);
pp_string (pp, "\"];\n\n");
pp_flush (pp);
}
/* Show any stmts that were processed within this enode,
and their index within the supernode. */
void
exploded_node::dump_processed_stmts (pretty_printer *pp) const
{
if (m_num_processed_stmts > 0)
{
const program_point &point = get_point ();
gcc_assert (point.get_kind () == PK_BEFORE_STMT);
const supernode *snode = get_supernode ();
const unsigned int point_stmt_idx = point.get_stmt_idx ();
pp_printf (pp, "stmts: %i", m_num_processed_stmts);
pp_newline (pp);
for (unsigned i = 0; i < m_num_processed_stmts; i++)
{
const unsigned int idx_within_snode = point_stmt_idx + i;
const gimple *stmt = snode->m_stmts[idx_within_snode];
pp_printf (pp, " %i: ", idx_within_snode);
pp_gimple_stmt_1 (pp, stmt, 0, (dump_flags_t)0);
pp_newline (pp);
}
}
}
/* Dump any saved_diagnostics at this enode to PP. */
void
exploded_node::dump_saved_diagnostics (pretty_printer *pp) const
{
unsigned i;
const saved_diagnostic *sd;
FOR_EACH_VEC_ELT (m_saved_diagnostics, i, sd)
{
pp_printf (pp, "DIAGNOSTIC: %s (sd: %i)",
sd->m_d->get_kind (), sd->get_index ());
pp_newline (pp);
}
}
/* Dump this to PP in a form suitable for use as an id in .dot output. */
void
exploded_node::dump_dot_id (pretty_printer *pp) const
{
pp_printf (pp, "exploded_node_%i", m_index);
}
/* Dump a multiline representation of this node to PP. */
void
exploded_node::dump_to_pp (pretty_printer *pp,
const extrinsic_state &ext_state) const
{
pp_printf (pp, "EN: %i", m_index);
pp_newline (pp);
format f (true);
m_ps.get_point ().print (pp, f);
pp_newline (pp);
m_ps.get_state ().dump_to_pp (ext_state, false, true, pp);
pp_newline (pp);
}
/* Dump a multiline representation of this node to FILE. */
void
exploded_node::dump (FILE *fp,
const extrinsic_state &ext_state) const
{
pretty_printer pp;
pp_format_decoder (&pp) = default_tree_printer;
pp_show_color (&pp) = pp_show_color (global_dc->printer);
pp.buffer->stream = fp;
dump_to_pp (&pp, ext_state);
pp_flush (&pp);
}
/* Dump a multiline representation of this node to stderr. */
DEBUG_FUNCTION void
exploded_node::dump (const extrinsic_state &ext_state) const
{
dump (stderr, ext_state);
}
/* Return a new json::object of the form
{"point" : object for program_point,
"state" : object for program_state,
"status" : str,
"idx" : int,
"processed_stmts" : int}. */
json::object *
exploded_node::to_json (const extrinsic_state &ext_state) const
{
json::object *enode_obj = new json::object ();
enode_obj->set ("point", get_point ().to_json ());
enode_obj->set ("state", get_state ().to_json (ext_state));
enode_obj->set ("status", new json::string (status_to_str (m_status)));
enode_obj->set ("idx", new json::integer_number (m_index));
enode_obj->set ("processed_stmts",
new json::integer_number (m_num_processed_stmts));
return enode_obj;
}
} // namespace ana
/* Return true if FNDECL has a gimple body. */
// TODO: is there a pre-canned way to do this?
bool
fndecl_has_gimple_body_p (tree fndecl)
{
if (fndecl == NULL_TREE)
return false;
cgraph_node *n = cgraph_node::get (fndecl);
if (!n)
return false;
return n->has_gimple_body_p ();
}
namespace ana {
/* Modify STATE in place, applying the effects of the stmt at this node's
point. */
exploded_node::on_stmt_flags
exploded_node::on_stmt (exploded_graph &eg,
const supernode *snode,
const gimple *stmt,
program_state *state,
uncertainty_t *uncertainty,
path_context *path_ctxt)
{
logger *logger = eg.get_logger ();
LOG_SCOPE (logger);
if (logger)
{
logger->start_log_line ();
pp_gimple_stmt_1 (logger->get_printer (), stmt, 0, (dump_flags_t)0);
logger->end_log_line ();
}
/* Update input_location in case of ICE: make it easier to track down which
source construct we're failing to handle. */
input_location = stmt->location;
gcc_assert (state->m_region_model);
/* Preserve the old state. It is used here for looking
up old checker states, for determining state transitions, and
also within impl_region_model_context and impl_sm_context for
going from tree to svalue_id. */
const program_state old_state (*state);
impl_region_model_context ctxt (eg, this,
&old_state, state, uncertainty,
path_ctxt, stmt);
bool unknown_side_effects = false;
bool terminate_path = false;
on_stmt_pre (eg, stmt, state, &terminate_path,
&unknown_side_effects, &ctxt);
if (terminate_path)
return on_stmt_flags::terminate_path ();
int sm_idx;
sm_state_map *smap;
FOR_EACH_VEC_ELT (old_state.m_checker_states, sm_idx, smap)
{
const state_machine &sm = eg.get_ext_state ().get_sm (sm_idx);
const sm_state_map *old_smap
= old_state.m_checker_states[sm_idx];
sm_state_map *new_smap = state->m_checker_states[sm_idx];
impl_sm_context sm_ctxt (eg, sm_idx, sm, this, &old_state, state,
old_smap, new_smap, path_ctxt, NULL,
unknown_side_effects);
/* Allow the state_machine to handle the stmt. */
if (sm.on_stmt (&sm_ctxt, snode, stmt))
unknown_side_effects = false;
}
if (path_ctxt->terminate_path_p ())
return on_stmt_flags::terminate_path ();
on_stmt_post (stmt, state, unknown_side_effects, &ctxt);
return on_stmt_flags ();
}
/* Handle the pre-sm-state part of STMT, modifying STATE in-place.
Write true to *OUT_TERMINATE_PATH if the path should be terminated.
Write true to *OUT_UNKNOWN_SIDE_EFFECTS if the stmt has unknown
side effects. */
void
exploded_node::on_stmt_pre (exploded_graph &eg,
const gimple *stmt,
program_state *state,
bool *out_terminate_path,
bool *out_unknown_side_effects,
region_model_context *ctxt)
{
/* Handle special-case calls that require the full program_state. */
if (const gcall *call = dyn_cast <const gcall *> (stmt))
{
if (is_special_named_call_p (call, "__analyzer_dump", 0))
{
/* Handle the builtin "__analyzer_dump" by dumping state
to stderr. */
state->dump (eg.get_ext_state (), true);
return;
}
else if (is_special_named_call_p (call, "__analyzer_dump_state", 2))
{
state->impl_call_analyzer_dump_state (call, eg.get_ext_state (),
ctxt);
return;
}
else if (is_setjmp_call_p (call))
{
state->m_region_model->on_setjmp (call, this, ctxt);
return;
}
else if (is_longjmp_call_p (call))
{
on_longjmp (eg, call, state, ctxt);
*out_terminate_path = true;
return;
}
}
/* Otherwise, defer to m_region_model. */
state->m_region_model->on_stmt_pre (stmt,
out_terminate_path,
out_unknown_side_effects,
ctxt);
}
/* Handle the post-sm-state part of STMT, modifying STATE in-place. */
void
exploded_node::on_stmt_post (const gimple *stmt,
program_state *state,
bool unknown_side_effects,
region_model_context *ctxt)
{
if (const gcall *call = dyn_cast <const gcall *> (stmt))
state->m_region_model->on_call_post (call, unknown_side_effects, ctxt);
}
/* Consider the effect of following superedge SUCC from this node.
Return true if it's feasible to follow the edge, or false
if it's infeasible.
Examples: if it's the "true" branch within
a CFG and we know the conditional is false, we know it's infeasible.
If it's one of multiple interprocedual "return" edges, then only
the edge back to the most recent callsite is feasible.
Update NEXT_STATE accordingly (e.g. to record that a condition was
true or false, or that the NULL-ness of a pointer has been checked,
pushing/popping stack frames, etc).
Update NEXT_POINT accordingly (updating the call string). */
bool
exploded_node::on_edge (exploded_graph &eg,
const superedge *succ,
program_point *next_point,
program_state *next_state,
uncertainty_t *uncertainty)
{
LOG_FUNC (eg.get_logger ());
if (!next_point->on_edge (eg, succ))
return false;
if (!next_state->on_edge (eg, this, succ, uncertainty))
return false;
return true;
}
/* Verify that the stack at LONGJMP_POINT is still valid, given a call
to "setjmp" at SETJMP_POINT - the stack frame that "setjmp" was
called in must still be valid.
Caveat: this merely checks the call_strings in the points; it doesn't
detect the case where a frame returns and is then called again. */
static bool
valid_longjmp_stack_p (const program_point &longjmp_point,
const program_point &setjmp_point)
{
const call_string &cs_at_longjmp = longjmp_point.get_call_string ();
const call_string &cs_at_setjmp = setjmp_point.get_call_string ();
if (cs_at_longjmp.length () < cs_at_setjmp.length ())
return false;
/* Check that the call strings match, up to the depth of the
setjmp point. */
for (unsigned depth = 0; depth < cs_at_setjmp.length (); depth++)
if (cs_at_longjmp[depth] != cs_at_setjmp[depth])
return false;
return true;
}
/* A pending_diagnostic subclass for complaining about bad longjmps,
where the enclosing function of the "setjmp" has returned (and thus
the stack frame no longer exists). */
class stale_jmp_buf : public pending_diagnostic_subclass<stale_jmp_buf>
{
public:
stale_jmp_buf (const gcall *setjmp_call, const gcall *longjmp_call,
const program_point &setjmp_point)
: m_setjmp_call (setjmp_call), m_longjmp_call (longjmp_call),
m_setjmp_point (setjmp_point), m_stack_pop_event (NULL)
{}
int get_controlling_option () const FINAL OVERRIDE
{
return OPT_Wanalyzer_stale_setjmp_buffer;
}
bool emit (rich_location *richloc) FINAL OVERRIDE
{
return warning_at
(richloc, get_controlling_option (),
"%qs called after enclosing function of %qs has returned",
get_user_facing_name (m_longjmp_call),
get_user_facing_name (m_setjmp_call));
}
const char *get_kind () const FINAL OVERRIDE
{ return "stale_jmp_buf"; }
bool operator== (const stale_jmp_buf &other) const
{
return (m_setjmp_call == other.m_setjmp_call
&& m_longjmp_call == other.m_longjmp_call);
}
bool
maybe_add_custom_events_for_superedge (const exploded_edge &eedge,
checker_path *emission_path)
FINAL OVERRIDE
{
/* Detect exactly when the stack first becomes invalid,
and issue an event then. */
if (m_stack_pop_event)
return false;
const exploded_node *src_node = eedge.m_src;
const program_point &src_point = src_node->get_point ();
const exploded_node *dst_node = eedge.m_dest;
const program_point &dst_point = dst_node->get_point ();
if (valid_longjmp_stack_p (src_point, m_setjmp_point)
&& !valid_longjmp_stack_p (dst_point, m_setjmp_point))
{
/* Compare with diagnostic_manager::add_events_for_superedge. */
const int src_stack_depth = src_point.get_stack_depth ();
m_stack_pop_event = new precanned_custom_event
(src_point.get_location (),
src_point.get_fndecl (),
src_stack_depth,
"stack frame is popped here, invalidating saved environment");
emission_path->add_event (m_stack_pop_event);
return false;
}
return false;
}
label_text describe_final_event (const evdesc::final_event &ev)
{
if (m_stack_pop_event)
return ev.formatted_print
("%qs called after enclosing function of %qs returned at %@",
get_user_facing_name (m_longjmp_call),
get_user_facing_name (m_setjmp_call),
m_stack_pop_event->get_id_ptr ());
else
return ev.formatted_print
("%qs called after enclosing function of %qs has returned",
get_user_facing_name (m_longjmp_call),
get_user_facing_name (m_setjmp_call));;
}
private:
const gcall *m_setjmp_call;
const gcall *m_longjmp_call;
program_point m_setjmp_point;
custom_event *m_stack_pop_event;
};
/* Handle LONGJMP_CALL, a call to longjmp or siglongjmp.
Attempt to locate where setjmp/sigsetjmp was called on the jmp_buf and build
an exploded_node and exploded_edge to it representing a rewind to that frame,
handling the various kinds of failure that can occur. */
void
exploded_node::on_longjmp (exploded_graph &eg,
const gcall *longjmp_call,
program_state *new_state,
region_model_context *ctxt)
{
tree buf_ptr = gimple_call_arg (longjmp_call, 0);
gcc_assert (POINTER_TYPE_P (TREE_TYPE (buf_ptr)));
region_model *new_region_model = new_state->m_region_model;
const svalue *buf_ptr_sval = new_region_model->get_rvalue (buf_ptr, ctxt);
const region *buf = new_region_model->deref_rvalue (buf_ptr_sval, buf_ptr,
ctxt);
const svalue *buf_content_sval
= new_region_model->get_store_value (buf, ctxt);
const setjmp_svalue *setjmp_sval
= buf_content_sval->dyn_cast_setjmp_svalue ();
if (!setjmp_sval)
return;
const setjmp_record tmp_setjmp_record = setjmp_sval->get_setjmp_record ();
/* Build a custom enode and eedge for rewinding from the longjmp/siglongjmp
call back to the setjmp/sigsetjmp. */
rewind_info_t rewind_info (tmp_setjmp_record, longjmp_call);
const gcall *setjmp_call = rewind_info.get_setjmp_call ();
const program_point &setjmp_point = rewind_info.get_setjmp_point ();
const program_point &longjmp_point = get_point ();
/* Verify that the setjmp's call_stack hasn't been popped. */
if (!valid_longjmp_stack_p (longjmp_point, setjmp_point))
{
ctxt->warn (new stale_jmp_buf (setjmp_call, longjmp_call, setjmp_point));
return;
}
gcc_assert (longjmp_point.get_stack_depth ()
>= setjmp_point.get_stack_depth ());
/* Update the state for use by the destination node. */
/* Stash the current number of diagnostics so that we can update
any that this adds to show where the longjmp is rewinding to. */
diagnostic_manager *dm = &eg.get_diagnostic_manager ();
unsigned prev_num_diagnostics = dm->get_num_diagnostics ();
new_region_model->on_longjmp (longjmp_call, setjmp_call,
setjmp_point.get_stack_depth (), ctxt);
/* Detect leaks in the new state relative to the old state. */
program_state::detect_leaks (get_state (), *new_state, NULL,
eg.get_ext_state (), ctxt);
program_point next_point
= program_point::after_supernode (setjmp_point.get_supernode (),
setjmp_point.get_call_string ());
exploded_node *next
= eg.get_or_create_node (next_point, *new_state, this);
/* Create custom exploded_edge for a longjmp. */
if (next)
{
exploded_edge *eedge
= eg.add_edge (const_cast<exploded_node *> (this), next, NULL,
new rewind_info_t (tmp_setjmp_record, longjmp_call));
/* For any diagnostics that were queued here (such as leaks) we want
the checker_path to show the rewinding events after the "final event"
so that the user sees where the longjmp is rewinding to (otherwise the
path is meaningless).
For example, we want to emit something like:
| NN | {
| NN | longjmp (env, 1);
| | ~~~~~~~~~~~~~~~~
| | |
| | (10) 'ptr' leaks here; was allocated at (7)
| | (11) rewinding from 'longjmp' in 'inner'...
|
<-------------+
|
'outer': event 12
|
| NN | i = setjmp(env);
| | ^~~~~~
| | |
| | (12) ...to 'setjmp' in 'outer' (saved at (2))
where the "final" event above is event (10), but we want to append
events (11) and (12) afterwards.
Do this by setting m_trailing_eedge on any diagnostics that were
just saved. */
unsigned num_diagnostics = dm->get_num_diagnostics ();
for (unsigned i = prev_num_diagnostics; i < num_diagnostics; i++)
{
saved_diagnostic *sd = dm->get_saved_diagnostic (i);
sd->m_trailing_eedge = eedge;
}
}
}
/* Subroutine of exploded_graph::process_node for finding the successors
of the supernode for a function exit basic block.
Ensure that pop_frame is called, potentially queuing diagnostics about
leaks. */
void
exploded_node::detect_leaks (exploded_graph &eg)
{
LOG_FUNC_1 (eg.get_logger (), "EN: %i", m_index);
gcc_assert (get_point ().get_supernode ()->return_p ());
/* If we're not a "top-level" function, do nothing; pop_frame
will be called when handling the return superedge. */
if (get_point ().get_stack_depth () > 1)
return;
/* We have a "top-level" function. */
gcc_assert (get_point ().get_stack_depth () == 1);
const program_state &old_state = get_state ();
/* Work with a temporary copy of the state: pop the frame, and see
what leaks (via purge_unused_svalues). */
program_state new_state (old_state);
gcc_assert (new_state.m_region_model);
uncertainty_t uncertainty;
impl_region_model_context ctxt (eg, this,
&old_state, &new_state, &uncertainty, NULL,
get_stmt ());
const svalue *result = NULL;
new_state.m_region_model->pop_frame (NULL, &result, &ctxt);
program_state::detect_leaks (old_state, new_state, result,
eg.get_ext_state (), &ctxt);
}
/* Dump the successors and predecessors of this enode to OUTF. */
void
exploded_node::dump_succs_and_preds (FILE *outf) const
{
unsigned i;
exploded_edge *e;
{
auto_vec<exploded_node *> preds (m_preds.length ());
FOR_EACH_VEC_ELT (m_preds, i, e)
preds.quick_push (e->m_src);
pretty_printer pp;
print_enode_indices (&pp, preds);
fprintf (outf, "preds: %s\n",
pp_formatted_text (&pp));
}
{
auto_vec<exploded_node *> succs (m_succs.length ());
FOR_EACH_VEC_ELT (m_succs, i, e)
succs.quick_push (e->m_dest);
pretty_printer pp;
print_enode_indices (&pp, succs);
fprintf (outf, "succs: %s\n",
pp_formatted_text (&pp));
}
}
/* class dynamic_call_info_t : public custom_edge_info. */
/* Implementation of custom_edge_info::update_model vfunc
for dynamic_call_info_t.
Update state for the dynamically discorverd calls */
bool
dynamic_call_info_t::update_model (region_model *model,
const exploded_edge *eedge,
region_model_context *) const
{
gcc_assert (eedge);
const program_state &dest_state = eedge->m_dest->get_state ();
*model = *dest_state.m_region_model;
return true;
}
/* Implementation of custom_edge_info::add_events_to_path vfunc
for dynamic_call_info_t. */
void
dynamic_call_info_t::add_events_to_path (checker_path *emission_path,
const exploded_edge &eedge) const
{
const exploded_node *src_node = eedge.m_src;
const program_point &src_point = src_node->get_point ();
const int src_stack_depth = src_point.get_stack_depth ();
const exploded_node *dest_node = eedge.m_dest;
const program_point &dest_point = dest_node->get_point ();
const int dest_stack_depth = dest_point.get_stack_depth ();
if (m_is_returning_call)
emission_path->add_event (new return_event (eedge, (m_dynamic_call
? m_dynamic_call->location
: UNKNOWN_LOCATION),
dest_point.get_fndecl (),
dest_stack_depth));
else
emission_path->add_event (new call_event (eedge, (m_dynamic_call
? m_dynamic_call->location
: UNKNOWN_LOCATION),
src_point.get_fndecl (),
src_stack_depth));
}
/* class rewind_info_t : public custom_edge_info. */
/* Implementation of custom_edge_info::update_model vfunc
for rewind_info_t.
Update state for the special-case of a rewind of a longjmp
to a setjmp (which doesn't have a superedge, but does affect
state). */
bool
rewind_info_t::update_model (region_model *model,
const exploded_edge *eedge,
region_model_context *) const
{
gcc_assert (eedge);
const program_point &longjmp_point = eedge->m_src->get_point ();
const program_point &setjmp_point = eedge->m_dest->get_point ();
gcc_assert (longjmp_point.get_stack_depth ()
>= setjmp_point.get_stack_depth ());
model->on_longjmp (get_longjmp_call (),
get_setjmp_call (),
setjmp_point.get_stack_depth (), NULL);
return true;
}
/* Implementation of custom_edge_info::add_events_to_path vfunc
for rewind_info_t. */
void
rewind_info_t::add_events_to_path (checker_path *emission_path,
const exploded_edge &eedge) const
{
const exploded_node *src_node = eedge.m_src;
const program_point &src_point = src_node->get_point ();
const int src_stack_depth = src_point.get_stack_depth ();
const exploded_node *dst_node = eedge.m_dest;
const program_point &dst_point = dst_node->get_point ();
const int dst_stack_depth = dst_point.get_stack_depth ();
emission_path->add_event
(new rewind_from_longjmp_event
(&eedge, get_longjmp_call ()->location,
src_point.get_fndecl (),
src_stack_depth, this));
emission_path->add_event
(new rewind_to_setjmp_event
(&eedge, get_setjmp_call ()->location,
dst_point.get_fndecl (),
dst_stack_depth, this));
}
/* class exploded_edge : public dedge<eg_traits>. */
/* exploded_edge's ctor. */
exploded_edge::exploded_edge (exploded_node *src, exploded_node *dest,
const superedge *sedge,
custom_edge_info *custom_info)
: dedge<eg_traits> (src, dest), m_sedge (sedge),
m_custom_info (custom_info)
{
}
/* exploded_edge's dtor. */
exploded_edge::~exploded_edge ()
{
delete m_custom_info;
}
/* Implementation of dedge::dump_dot vfunc for exploded_edge.
Use the label of the underlying superedge, if any. */
void
exploded_edge::dump_dot (graphviz_out *gv, const dump_args_t &) const
{
pretty_printer *pp = gv->get_pp ();
m_src->dump_dot_id (pp);
pp_string (pp, " -> ");
m_dest->dump_dot_id (pp);
dump_dot_label (pp);
}
/* Second half of exploded_edge::dump_dot. This is split out
for use by trimmed_graph::dump_dot and base_feasible_edge::dump_dot. */
void
exploded_edge::dump_dot_label (pretty_printer *pp) const
{
const char *style = "\"solid,bold\"";
const char *color = "black";
int weight = 10;
const char *constraint = "true";
if (m_sedge)
switch (m_sedge->m_kind)
{
default:
gcc_unreachable ();
case SUPEREDGE_CFG_EDGE:
break;
case SUPEREDGE_CALL:
color = "red";
//constraint = "false";
break;
case SUPEREDGE_RETURN:
color = "green";
//constraint = "false";
break;
case SUPEREDGE_INTRAPROCEDURAL_CALL:
style = "\"dotted\"";
break;
}
if (m_custom_info)
{
color = "red";
style = "\"dotted\"";
}
pp_printf (pp,
(" [style=%s, color=%s, weight=%d, constraint=%s,"
" headlabel=\""),
style, color, weight, constraint);
if (m_sedge)
m_sedge->dump_label_to_pp (pp, false);
else if (m_custom_info)
m_custom_info->print (pp);
//pp_write_text_as_dot_label_to_stream (pp, /*for_record=*/false);
pp_printf (pp, "\"];\n");
}
/* Return a new json::object of the form
{"src_idx": int, the index of the source exploded edge,
"dst_idx": int, the index of the destination exploded edge,
"sedge": (optional) object for the superedge, if any,
"custom": (optional) str, a description, if this is a custom edge}. */
json::object *
exploded_edge::to_json () const
{
json::object *eedge_obj = new json::object ();
eedge_obj->set ("src_idx", new json::integer_number (m_src->m_index));
eedge_obj->set ("dst_idx", new json::integer_number (m_dest->m_index));
if (m_sedge)
eedge_obj->set ("sedge", m_sedge->to_json ());
if (m_custom_info)
{
pretty_printer pp;
pp_format_decoder (&pp) = default_tree_printer;
m_custom_info->print (&pp);
eedge_obj->set ("custom", new json::string (pp_formatted_text (&pp)));
}
return eedge_obj;
}
/* struct stats. */
/* stats' ctor. */
stats::stats (int num_supernodes)
: m_node_reuse_count (0),
m_node_reuse_after_merge_count (0),
m_num_supernodes (num_supernodes)
{
for (int i = 0; i < NUM_POINT_KINDS; i++)
m_num_nodes[i] = 0;
}
/* Log these stats in multiline form to LOGGER. */
void
stats::log (logger *logger) const
{
gcc_assert (logger);
for (int i = 0; i < NUM_POINT_KINDS; i++)
if (m_num_nodes[i] > 0)
logger->log ("m_num_nodes[%s]: %i",
point_kind_to_string (static_cast <enum point_kind> (i)),
m_num_nodes[i]);
logger->log ("m_node_reuse_count: %i", m_node_reuse_count);
logger->log ("m_node_reuse_after_merge_count: %i",
m_node_reuse_after_merge_count);
}
/* Dump these stats in multiline form to OUT. */
void
stats::dump (FILE *out) const
{
for (int i = 0; i < NUM_POINT_KINDS; i++)
if (m_num_nodes[i] > 0)
fprintf (out, "m_num_nodes[%s]: %i\n",
point_kind_to_string (static_cast <enum point_kind> (i)),
m_num_nodes[i]);
fprintf (out, "m_node_reuse_count: %i\n", m_node_reuse_count);
fprintf (out, "m_node_reuse_after_merge_count: %i\n",
m_node_reuse_after_merge_count);
if (m_num_supernodes > 0)
fprintf (out, "PK_AFTER_SUPERNODE nodes per supernode: %.2f\n",
(float)m_num_nodes[PK_AFTER_SUPERNODE] / (float)m_num_supernodes);
}
/* Return the total number of enodes recorded within this object. */
int
stats::get_total_enodes () const
{
int result = 0;
for (int i = 0; i < NUM_POINT_KINDS; i++)
result += m_num_nodes[i];
return result;
}
/* strongly_connected_components's ctor. Tarjan's SCC algorithm. */
strongly_connected_components::
strongly_connected_components (const supergraph &sg, logger *logger)
: m_sg (sg), m_per_node (m_sg.num_nodes ())
{
LOG_SCOPE (logger);
auto_timevar tv (TV_ANALYZER_SCC);
for (int i = 0; i < m_sg.num_nodes (); i++)
m_per_node.quick_push (per_node_data ());
for (int i = 0; i < m_sg.num_nodes (); i++)
if (m_per_node[i].m_index == -1)
strong_connect (i);
if (0)
dump ();
}
/* Dump this object to stderr. */
DEBUG_FUNCTION void
strongly_connected_components::dump () const
{
for (int i = 0; i < m_sg.num_nodes (); i++)
{
const per_node_data &v = m_per_node[i];
fprintf (stderr, "SN %i: index: %i lowlink: %i on_stack: %i\n",
i, v.m_index, v.m_lowlink, v.m_on_stack);
}
}
/* Return a new json::array of per-snode SCC ids. */
json::array *
strongly_connected_components::to_json () const
{
json::array *scc_arr = new json::array ();
for (int i = 0; i < m_sg.num_nodes (); i++)
scc_arr->append (new json::integer_number (get_scc_id (i)));
return scc_arr;
}
/* Subroutine of strongly_connected_components's ctor, part of Tarjan's
SCC algorithm. */
void
strongly_connected_components::strong_connect (unsigned index)
{
supernode *v_snode = m_sg.get_node_by_index (index);
/* Set the depth index for v to the smallest unused index. */
per_node_data *v = &m_per_node[index];
v->m_index = index;
v->m_lowlink = index;
m_stack.safe_push (index);
v->m_on_stack = true;
index++;
/* Consider successors of v. */
unsigned i;
superedge *sedge;
FOR_EACH_VEC_ELT (v_snode->m_succs, i, sedge)
{
if (sedge->get_kind () != SUPEREDGE_CFG_EDGE
&& sedge->get_kind () != SUPEREDGE_INTRAPROCEDURAL_CALL)
continue;
supernode *w_snode = sedge->m_dest;
per_node_data *w = &m_per_node[w_snode->m_index];
if (w->m_index == -1)
{
/* Successor w has not yet been visited; recurse on it. */
strong_connect (w_snode->m_index);
v->m_lowlink = MIN (v->m_lowlink, w->m_lowlink);
}
else if (w->m_on_stack)
{
/* Successor w is in stack S and hence in the current SCC
If w is not on stack, then (v, w) is a cross-edge in the DFS
tree and must be ignored. */
v->m_lowlink = MIN (v->m_lowlink, w->m_index);
}
}
/* If v is a root node, pop the stack and generate an SCC. */
if (v->m_lowlink == v->m_index)
{
per_node_data *w;
do {
int idx = m_stack.pop ();
w = &m_per_node[idx];
w->m_on_stack = false;
} while (w != v);
}
}
/* worklist's ctor. */
worklist::worklist (const exploded_graph &eg, const analysis_plan &plan)
: m_scc (eg.get_supergraph (), eg.get_logger ()),
m_plan (plan),
m_queue (key_t (*this, NULL))
{
}
/* Return the number of nodes in the worklist. */
unsigned
worklist::length () const
{
return m_queue.nodes ();
}
/* Return the next node in the worklist, removing it. */
exploded_node *
worklist::take_next ()
{
return m_queue.extract_min ();
}
/* Return the next node in the worklist without removing it. */
exploded_node *
worklist::peek_next ()
{
return m_queue.min ();
}
/* Add ENODE to the worklist. */
void
worklist::add_node (exploded_node *enode)
{
gcc_assert (enode->get_status () == exploded_node::STATUS_WORKLIST);
m_queue.insert (key_t (*this, enode), enode);
}
/* Comparator for implementing worklist::key_t comparison operators.
Return negative if KA is before KB
Return positive if KA is after KB
Return 0 if they are equal.
The ordering of the worklist is critical for performance and for
avoiding node explosions. Ideally we want all enodes at a CFG join-point
with the same callstring to be sorted next to each other in the worklist
so that a run of consecutive enodes can be merged and processed "in bulk"
rather than individually or pairwise, minimizing the number of new enodes
created. */
int
worklist::key_t::cmp (const worklist::key_t &ka, const worklist::key_t &kb)
{
const program_point &point_a = ka.m_enode->get_point ();
const program_point &point_b = kb.m_enode->get_point ();
const call_string &call_string_a = point_a.get_call_string ();
const call_string &call_string_b = point_b.get_call_string ();
/* Order empty-callstring points with different functions based on the
analysis_plan, so that we generate summaries before they are used. */
if (flag_analyzer_call_summaries
&& call_string_a.empty_p ()
&& call_string_b.empty_p ()
&& point_a.get_function () != NULL
&& point_b.get_function () != NULL
&& point_a.get_function () != point_b.get_function ())
{
if (int cmp = ka.m_worklist.m_plan.cmp_function (point_a.get_function (),
point_b.get_function ()))
return cmp;
}
/* Sort by callstring, so that nodes with deeper call strings are processed
before those with shallower call strings.
If we have
splitting BB
/ \
/ \
fn call no fn call
\ /
\ /
join BB
then we want the path inside the function call to be fully explored up
to the return to the join BB before we explore on the "no fn call" path,
so that both enodes at the join BB reach the front of the worklist at
the same time and thus have a chance of being merged. */
int cs_cmp = call_string::cmp (call_string_a, call_string_b);
if (cs_cmp)
return cs_cmp;
/* Order by SCC. */
int scc_id_a = ka.get_scc_id (ka.m_enode);
int scc_id_b = kb.get_scc_id (kb.m_enode);
if (scc_id_a != scc_id_b)
return scc_id_a - scc_id_b;
/* If in same SCC, order by supernode index (an arbitrary but stable
ordering). */
const supernode *snode_a = ka.m_enode->get_supernode ();
const supernode *snode_b = kb.m_enode->get_supernode ();
if (snode_a == NULL)
{
if (snode_b != NULL)
/* One is NULL. */
return -1;
else
/* Both are NULL. */
return 0;
}
if (snode_b == NULL)
/* One is NULL. */
return 1;
/* Neither are NULL. */
gcc_assert (snode_a && snode_b);
if (snode_a->m_index != snode_b->m_index)
return snode_a->m_index - snode_b->m_index;
gcc_assert (snode_a == snode_b);
/* Order within supernode via program point. */
int within_snode_cmp
= function_point::cmp_within_supernode (point_a.get_function_point (),
point_b.get_function_point ());
if (within_snode_cmp)
return within_snode_cmp;
/* Otherwise, we ought to have the same program_point. */
gcc_assert (point_a == point_b);
const program_state &state_a = ka.m_enode->get_state ();
const program_state &state_b = kb.m_enode->get_state ();
/* Sort by sm-state, so that identical sm-states are grouped
together in the worklist. */
for (unsigned sm_idx = 0; sm_idx < state_a.m_checker_states.length ();
++sm_idx)
{
sm_state_map *smap_a = state_a.m_checker_states[sm_idx];
sm_state_map *smap_b = state_b.m_checker_states[sm_idx];
if (int smap_cmp = sm_state_map::cmp (*smap_a, *smap_b))
return smap_cmp;
}
/* Otherwise, we have two enodes at the same program point but with
different states. We don't have a good total ordering on states,
so order them by enode index, so that we have at least have a
stable sort. */
return ka.m_enode->m_index - kb.m_enode->m_index;
}
/* Return a new json::object of the form
{"scc" : [per-snode-IDs]}, */
json::object *
worklist::to_json () const
{
json::object *worklist_obj = new json::object ();
worklist_obj->set ("scc", m_scc.to_json ());
/* The following field isn't yet being JSONified:
queue_t m_queue; */
return worklist_obj;
}
/* exploded_graph's ctor. */
exploded_graph::exploded_graph (const supergraph &sg, logger *logger,
const extrinsic_state &ext_state,
const state_purge_map *purge_map,
const analysis_plan &plan,
int verbosity)
: m_sg (sg), m_logger (logger),
m_worklist (*this, plan),
m_ext_state (ext_state),
m_purge_map (purge_map),
m_plan (plan),
m_diagnostic_manager (logger, ext_state.get_engine (), verbosity),
m_global_stats (m_sg.num_nodes ()),
m_functionless_stats (m_sg.num_nodes ()),
m_PK_AFTER_SUPERNODE_per_snode (m_sg.num_nodes ())
{
m_origin = get_or_create_node (program_point::origin (),
program_state (ext_state), NULL);
for (int i = 0; i < m_sg.num_nodes (); i++)
m_PK_AFTER_SUPERNODE_per_snode.quick_push (i);
}
/* exploded_graph's dtor. */
exploded_graph::~exploded_graph ()
{
for (function_stat_map_t::iterator iter = m_per_function_stats.begin ();
iter != m_per_function_stats.end ();
++iter)
delete (*iter).second;
for (point_map_t::iterator iter = m_per_point_data.begin ();
iter != m_per_point_data.end ();
++iter)
delete (*iter).second;
}
/* Subroutine for use when implementing __attribute__((tainted_args))
on functions and on function pointer fields in structs.
Called on STATE representing a call to FNDECL.
Mark all params of FNDECL in STATE as "tainted". Mark the value of all
regions pointed to by params of FNDECL as "tainted".
Return true if successful; return false if the "taint" state machine
was not found. */
static bool
mark_params_as_tainted (program_state *state, tree fndecl,
const extrinsic_state &ext_state)
{
unsigned taint_sm_idx;
if (!ext_state.get_sm_idx_by_name ("taint", &taint_sm_idx))
return false;
sm_state_map *smap = state->m_checker_states[taint_sm_idx];
const state_machine &sm = ext_state.get_sm (taint_sm_idx);
state_machine::state_t tainted = sm.get_state_by_name ("tainted");
region_model_manager *mgr = ext_state.get_model_manager ();
function *fun = DECL_STRUCT_FUNCTION (fndecl);
gcc_assert (fun);
for (tree iter_parm = DECL_ARGUMENTS (fndecl); iter_parm;
iter_parm = DECL_CHAIN (iter_parm))
{
tree param = iter_parm;
if (tree parm_default_ssa = ssa_default_def (fun, iter_parm))
param = parm_default_ssa;
const region *param_reg = state->m_region_model->get_lvalue (param, NULL);
const svalue *init_sval = mgr->get_or_create_initial_value (param_reg);
smap->set_state (state->m_region_model, init_sval,
tainted, NULL /*origin_new_sval*/, ext_state);
if (POINTER_TYPE_P (TREE_TYPE (param)))
{
const region *pointee_reg = mgr->get_symbolic_region (init_sval);
/* Mark "*param" as tainted. */
const svalue *init_pointee_sval
= mgr->get_or_create_initial_value (pointee_reg);
smap->set_state (state->m_region_model, init_pointee_sval,
tainted, NULL /*origin_new_sval*/, ext_state);
}
}
return true;
}
/* Custom event for use by tainted_args_function_info when a function
has been marked with __attribute__((tainted_args)). */
class tainted_args_function_custom_event : public custom_event
{
public:
tainted_args_function_custom_event (location_t loc, tree fndecl, int depth)
: custom_event (loc, fndecl, depth),
m_fndecl (fndecl)
{
}
label_text get_desc (bool can_colorize) const FINAL OVERRIDE
{
return make_label_text
(can_colorize,
"function %qE marked with %<__attribute__((tainted_args))%>",
m_fndecl);
}
private:
tree m_fndecl;
};
/* Custom exploded_edge info for top-level calls to a function
marked with __attribute__((tainted_args)). */
class tainted_args_function_info : public custom_edge_info
{
public:
tainted_args_function_info (tree fndecl)
: m_fndecl (fndecl)
{}
void print (pretty_printer *pp) const FINAL OVERRIDE
{
pp_string (pp, "call to tainted_args function");
};
bool update_model (region_model *,
const exploded_edge *,
region_model_context *) const FINAL OVERRIDE
{
/* No-op. */
return true;
}
void add_events_to_path (checker_path *emission_path,
const exploded_edge &) const FINAL OVERRIDE
{
emission_path->add_event
(new tainted_args_function_custom_event
(DECL_SOURCE_LOCATION (m_fndecl), m_fndecl, 0));
}
private:
tree m_fndecl;
};
/* Ensure that there is an exploded_node representing an external call to
FUN, adding it to the worklist if creating it.
Add an edge from the origin exploded_node to the function entrypoint
exploded_node.
Return the exploded_node for the entrypoint to the function. */
exploded_node *
exploded_graph::add_function_entry (function *fun)
{
gcc_assert (gimple_has_body_p (fun->decl));
/* Be idempotent. */
if (m_functions_with_enodes.contains (fun))
{
logger * const logger = get_logger ();
if (logger)
logger->log ("entrypoint for %qE already exists", fun->decl);
return NULL;
}
program_point point = program_point::from_function_entry (m_sg, fun);
program_state state (m_ext_state);
state.push_frame (m_ext_state, fun);
custom_edge_info *edge_info = NULL;
if (lookup_attribute ("tainted_args", DECL_ATTRIBUTES (fun->decl)))
{
if (mark_params_as_tainted (&state, fun->decl, m_ext_state))
edge_info = new tainted_args_function_info (fun->decl);
}
if (!state.m_valid)
return NULL;
exploded_node *enode = get_or_create_node (point, state, NULL);
if (!enode)
{
delete edge_info;
return NULL;
}
add_edge (m_origin, enode, NULL, edge_info);
m_functions_with_enodes.add (fun);
return enode;
}
/* Get or create an exploded_node for (POINT, STATE).
If a new node is created, it is added to the worklist.
Use ENODE_FOR_DIAG, a pre-existing enode, for any diagnostics
that need to be emitted (e.g. when purging state *before* we have
a new enode). */
exploded_node *
exploded_graph::get_or_create_node (const program_point &point,
const program_state &state,
exploded_node *enode_for_diag)
{
logger * const logger = get_logger ();
LOG_FUNC (logger);
if (logger)
{
format f (false);
pretty_printer *pp = logger->get_printer ();
logger->start_log_line ();
pp_string (pp, "point: ");
point.print (pp, f);
logger->end_log_line ();
logger->start_log_line ();
pp_string (pp, "state: ");
state.dump_to_pp (m_ext_state, true, false, pp);
logger->end_log_line ();
}
/* Stop exploring paths for which we don't know how to effectively
model the state. */
if (!state.m_valid)
{
if (logger)
logger->log ("invalid state; not creating node");
return NULL;
}
auto_cfun sentinel (point.get_function ());
state.validate (get_ext_state ());
//state.dump (get_ext_state ());
/* Prune state to try to improve the chances of a cache hit,
avoiding generating redundant nodes. */
uncertainty_t uncertainty;
program_state pruned_state
= state.prune_for_point (*this, point, enode_for_diag, &uncertainty);
pruned_state.validate (get_ext_state ());
//pruned_state.dump (get_ext_state ());
if (logger)
{
pretty_printer *pp = logger->get_printer ();
logger->start_log_line ();
pp_string (pp, "pruned_state: ");
pruned_state.dump_to_pp (m_ext_state, true, false, pp);
logger->end_log_line ();
pruned_state.m_region_model->dump_to_pp (logger->get_printer (), true,
false);
}
stats *per_fn_stats = get_or_create_function_stats (point.get_function ());
stats *per_cs_stats
= &get_or_create_per_call_string_data (point.get_call_string ())->m_stats;
point_and_state ps (point, pruned_state);
ps.validate (m_ext_state);
if (exploded_node **slot = m_point_and_state_to_node.get (&ps))
{
/* An exploded_node for PS already exists. */
if (logger)
logger->log ("reused EN: %i", (*slot)->m_index);
m_global_stats.m_node_reuse_count++;
per_fn_stats->m_node_reuse_count++;
per_cs_stats->m_node_reuse_count++;
return *slot;
}
per_program_point_data *per_point_data
= get_or_create_per_program_point_data (point);
/* Consider merging state with another enode at this program_point. */
if (flag_analyzer_state_merge)
{
exploded_node *existing_enode;
unsigned i;
FOR_EACH_VEC_ELT (per_point_data->m_enodes, i, existing_enode)
{
if (logger)
logger->log ("considering merging with existing EN: %i for point",
existing_enode->m_index);
gcc_assert (existing_enode->get_point () == point);
const program_state &existing_state = existing_enode->get_state ();
/* This merges successfully within the loop. */
program_state merged_state (m_ext_state);
if (pruned_state.can_merge_with_p (existing_state, m_ext_state, point,
&merged_state))
{
merged_state.validate (m_ext_state);
if (logger)
logger->log ("merging new state with that of EN: %i",
existing_enode->m_index);
/* Try again for a cache hit.
Whether we get one or not, merged_state's value_ids have no
relationship to those of the input state, and thus to those
of CHANGE, so we must purge any svalue_ids from *CHANGE. */
ps.set_state (merged_state);
if (exploded_node **slot = m_point_and_state_to_node.get (&ps))
{
/* An exploded_node for PS already exists. */
if (logger)
logger->log ("reused EN: %i", (*slot)->m_index);
m_global_stats.m_node_reuse_after_merge_count++;
per_fn_stats->m_node_reuse_after_merge_count++;
per_cs_stats->m_node_reuse_after_merge_count++;
return *slot;
}
}
else
if (logger)
logger->log ("not merging new state with that of EN: %i",
existing_enode->m_index);
}
}
/* Impose a limit on the number of enodes per program point, and
simply stop if we exceed it. */
if ((int)per_point_data->m_enodes.length ()
>= param_analyzer_max_enodes_per_program_point)
{
pretty_printer pp;
point.print (&pp, format (false));
print_enode_indices (&pp, per_point_data->m_enodes);
if (logger)
logger->log ("not creating enode; too many at program point: %s",
pp_formatted_text (&pp));
warning_at (point.get_location (), OPT_Wanalyzer_too_complex,
"terminating analysis for this program point: %s",
pp_formatted_text (&pp));
per_point_data->m_excess_enodes++;
return NULL;
}
ps.validate (m_ext_state);
/* An exploded_node for "ps" doesn't already exist; create one. */
exploded_node *node = new exploded_node (ps, m_nodes.length ());
add_node (node);
m_point_and_state_to_node.put (node->get_ps_key (), node);
/* Update per-program_point data. */
per_point_data->m_enodes.safe_push (node);
const enum point_kind node_pk = node->get_point ().get_kind ();
m_global_stats.m_num_nodes[node_pk]++;
per_fn_stats->m_num_nodes[node_pk]++;
per_cs_stats->m_num_nodes[node_pk]++;
if (node_pk == PK_AFTER_SUPERNODE)
m_PK_AFTER_SUPERNODE_per_snode[point.get_supernode ()->m_index]++;
if (logger)
{
format f (false);
pretty_printer *pp = logger->get_printer ();
logger->log ("created EN: %i", node->m_index);
logger->start_log_line ();
pp_string (pp, "point: ");
point.print (pp, f);
logger->end_log_line ();
logger->start_log_line ();
pp_string (pp, "pruned_state: ");
pruned_state.dump_to_pp (m_ext_state, true, false, pp);
logger->end_log_line ();
}
/* Add the new node to the worlist. */
m_worklist.add_node (node);
return node;
}
/* Add an exploded_edge from SRC to DEST, recording its association
with SEDGE (which may be NULL), and, if non-NULL, taking ownership
of REWIND_INFO.
Return the newly-created eedge. */
exploded_edge *
exploded_graph::add_edge (exploded_node *src, exploded_node *dest,
const superedge *sedge,
custom_edge_info *custom_info)
{
if (get_logger ())
get_logger ()->log ("creating edge EN: %i -> EN: %i",
src->m_index, dest->m_index);
exploded_edge *e = new exploded_edge (src, dest, sedge, custom_info);
digraph<eg_traits>::add_edge (e);
return e;
}
/* Ensure that this graph has per-program_point-data for POINT;
borrow a pointer to it. */
per_program_point_data *
exploded_graph::
get_or_create_per_program_point_data (const program_point &point)
{
if (per_program_point_data **slot = m_per_point_data.get (&point))
return *slot;
per_program_point_data *per_point_data = new per_program_point_data (point);
m_per_point_data.put (&per_point_data->m_key, per_point_data);
return per_point_data;
}
/* Get this graph's per-program-point-data for POINT if there is any,
otherwise NULL. */
per_program_point_data *
exploded_graph::get_per_program_point_data (const program_point &point) const
{
if (per_program_point_data **slot
= const_cast <point_map_t &> (m_per_point_data).get (&point))
return *slot;
return NULL;
}
/* Ensure that this graph has per-call_string-data for CS;
borrow a pointer to it. */
per_call_string_data *
exploded_graph::get_or_create_per_call_string_data (const call_string &cs)
{
if (per_call_string_data **slot = m_per_call_string_data.get (&cs))
return *slot;
per_call_string_data *data = new per_call_string_data (cs, m_sg.num_nodes ());
m_per_call_string_data.put (&data->m_key,
data);
return data;
}
/* Ensure that this graph has per-function-data for FUN;
borrow a pointer to it. */
per_function_data *
exploded_graph::get_or_create_per_function_data (function *fun)
{
if (per_function_data **slot = m_per_function_data.get (fun))
return *slot;
per_function_data *data = new per_function_data ();
m_per_function_data.put (fun, data);
return data;
}
/* Get this graph's per-function-data for FUN if there is any,
otherwise NULL. */
per_function_data *
exploded_graph::get_per_function_data (function *fun) const
{
if (per_function_data **slot
= const_cast <per_function_data_t &> (m_per_function_data).get (fun))
return *slot;
return NULL;
}
/* Return true if FUN should be traversed directly, rather than only as
called via other functions. */
static bool
toplevel_function_p (function *fun, logger *logger)
{
/* Don't directly traverse into functions that have an "__analyzer_"
prefix. Doing so is useful for the analyzer test suite, allowing
us to have functions that are called in traversals, but not directly
explored, thus testing how the analyzer handles calls and returns.
With this, we can have DejaGnu directives that cover just the case
of where a function is called by another function, without generating
excess messages from the case of the first function being traversed
directly. */
#define ANALYZER_PREFIX "__analyzer_"
if (!strncmp (IDENTIFIER_POINTER (DECL_NAME (fun->decl)), ANALYZER_PREFIX,
strlen (ANALYZER_PREFIX)))
{
if (logger)
logger->log ("not traversing %qE (starts with %qs)",
fun->decl, ANALYZER_PREFIX);
return false;
}
if (logger)
logger->log ("traversing %qE (all checks passed)", fun->decl);
return true;
}
/* Custom event for use by tainted_call_info when a callback field has been
marked with __attribute__((tainted_args)), for labelling the field. */
class tainted_args_field_custom_event : public custom_event
{
public:
tainted_args_field_custom_event (tree field)
: custom_event (DECL_SOURCE_LOCATION (field), NULL_TREE, 0),
m_field (field)
{
}
label_text get_desc (bool can_colorize) const FINAL OVERRIDE
{
return make_label_text (can_colorize,
"field %qE of %qT"
" is marked with %<__attribute__((tainted_args))%>",
m_field, DECL_CONTEXT (m_field));
}
private:
tree m_field;
};
/* Custom event for use by tainted_call_info when a callback field has been
marked with __attribute__((tainted_args)), for labelling the function used
in that callback. */
class tainted_args_callback_custom_event : public custom_event
{
public:
tainted_args_callback_custom_event (location_t loc, tree fndecl, int depth,
tree field)
: custom_event (loc, fndecl, depth),
m_field (field)
{
}
label_text get_desc (bool can_colorize) const FINAL OVERRIDE
{
return make_label_text (can_colorize,
"function %qE used as initializer for field %qE"
" marked with %<__attribute__((tainted_args))%>",
m_fndecl, m_field);
}
private:
tree m_field;
};
/* Custom edge info for use when adding a function used by a callback field
marked with '__attribute__((tainted_args))'. */
class tainted_args_call_info : public custom_edge_info
{
public:
tainted_args_call_info (tree field, tree fndecl, location_t loc)
: m_field (field), m_fndecl (fndecl), m_loc (loc)
{}
void print (pretty_printer *pp) const FINAL OVERRIDE
{
pp_string (pp, "call to tainted field");
};
bool update_model (region_model *,
const exploded_edge *,
region_model_context *) const FINAL OVERRIDE
{
/* No-op. */
return true;
}
void add_events_to_path (checker_path *emission_path,
const exploded_edge &) const FINAL OVERRIDE
{
/* Show the field in the struct declaration, e.g.
"(1) field 'store' is marked with '__attribute__((tainted_args))'" */
emission_path->add_event
(new tainted_args_field_custom_event (m_field));
/* Show the callback in the initializer
e.g.
"(2) function 'gadget_dev_desc_UDC_store' used as initializer
for field 'store' marked with '__attribute__((tainted_args))'". */
emission_path->add_event
(new tainted_args_callback_custom_event (m_loc, m_fndecl, 0, m_field));
}
private:
tree m_field;
tree m_fndecl;
location_t m_loc;
};
/* Given an initializer at LOC for FIELD marked with
'__attribute__((tainted_args))' initialized with FNDECL, add an
entrypoint to FNDECL to EG (and to its worklist) where the params to
FNDECL are marked as tainted. */
static void
add_tainted_args_callback (exploded_graph *eg, tree field, tree fndecl,
location_t loc)
{
logger *logger = eg->get_logger ();
LOG_SCOPE (logger);
if (!gimple_has_body_p (fndecl))
return;
const extrinsic_state &ext_state = eg->get_ext_state ();
function *fun = DECL_STRUCT_FUNCTION (fndecl);
gcc_assert (fun);
program_point point
= program_point::from_function_entry (eg->get_supergraph (), fun);
program_state state (ext_state);
state.push_frame (ext_state, fun);
if (!mark_params_as_tainted (&state, fndecl, ext_state))
return;
if (!state.m_valid)
return;
exploded_node *enode = eg->get_or_create_node (point, state, NULL);
if (logger)
{
if (enode)
logger->log ("created EN %i for tainted_args %qE entrypoint",
enode->m_index, fndecl);
else
{
logger->log ("did not create enode for tainted_args %qE entrypoint",
fndecl);
return;
}
}
tainted_args_call_info *info
= new tainted_args_call_info (field, fndecl, loc);
eg->add_edge (eg->get_origin (), enode, NULL, info);
}
/* Callback for walk_tree for finding callbacks within initializers;
ensure that any callback initializer where the corresponding field is
marked with '__attribute__((tainted_args))' is treated as an entrypoint
to the analysis, special-casing that the inputs to the callback are
untrustworthy. */
static tree
add_any_callbacks (tree *tp, int *, void *data)
{
exploded_graph *eg = (exploded_graph *)data;
if (TREE_CODE (*tp) == CONSTRUCTOR)
{
/* Find fields with the "tainted_args" attribute.
walk_tree only walks the values, not the index values;
look at the index values. */
unsigned HOST_WIDE_INT idx;
constructor_elt *ce;
for (idx = 0; vec_safe_iterate (CONSTRUCTOR_ELTS (*tp), idx, &ce);
idx++)
if (ce->index && TREE_CODE (ce->index) == FIELD_DECL)
if (lookup_attribute ("tainted_args", DECL_ATTRIBUTES (ce->index)))
{
tree value = ce->value;
if (TREE_CODE (value) == ADDR_EXPR
&& TREE_CODE (TREE_OPERAND (value, 0)) == FUNCTION_DECL)
add_tainted_args_callback (eg, ce->index,
TREE_OPERAND (value, 0),
EXPR_LOCATION (value));
}
}
return NULL_TREE;
}
/* Add initial nodes to EG, with entrypoints for externally-callable
functions. */
void
exploded_graph::build_initial_worklist ()
{
logger * const logger = get_logger ();
LOG_SCOPE (logger);
cgraph_node *node;
FOR_EACH_FUNCTION_WITH_GIMPLE_BODY (node)
{
function *fun = node->get_fun ();
if (!toplevel_function_p (fun, logger))
continue;
exploded_node *enode = add_function_entry (fun);
if (logger)
{
if (enode)
logger->log ("created EN %i for %qE entrypoint",
enode->m_index, fun->decl);
else
logger->log ("did not create enode for %qE entrypoint", fun->decl);
}
}
/* Find callbacks that are reachable from global initializers. */
varpool_node *vpnode;
FOR_EACH_VARIABLE (vpnode)
{
tree decl = vpnode->decl;
tree init = DECL_INITIAL (decl);
if (!init)
continue;
walk_tree (&init, add_any_callbacks, this, NULL);
}
}
/* The main loop of the analysis.
Take freshly-created exploded_nodes from the worklist, calling
process_node on them to explore the <point, state> graph.
Add edges to their successors, potentially creating new successors
(which are also added to the worklist). */
void
exploded_graph::process_worklist ()
{
logger * const logger = get_logger ();
LOG_SCOPE (logger);
auto_timevar tv (TV_ANALYZER_WORKLIST);
while (m_worklist.length () > 0)
{
exploded_node *node = m_worklist.take_next ();
gcc_assert (node->get_status () == exploded_node::STATUS_WORKLIST);
gcc_assert (node->m_succs.length () == 0
|| node == m_origin);
if (logger)
logger->log ("next to process: EN: %i", node->m_index);
/* If we have a run of nodes that are before-supernode, try merging and
processing them together, rather than pairwise or individually. */
if (flag_analyzer_state_merge && node != m_origin)
if (maybe_process_run_of_before_supernode_enodes (node))
goto handle_limit;
/* Avoid exponential explosions of nodes by attempting to merge
nodes that are at the same program point and which have
sufficiently similar state. */
if (flag_analyzer_state_merge && node != m_origin)
if (exploded_node *node_2 = m_worklist.peek_next ())
{
gcc_assert (node_2->get_status ()
== exploded_node::STATUS_WORKLIST);
gcc_assert (node->m_succs.length () == 0);
gcc_assert (node_2->m_succs.length () == 0);
gcc_assert (node != node_2);
if (logger)
logger->log ("peek worklist: EN: %i", node_2->m_index);
if (node->get_point () == node_2->get_point ())
{
const program_point &point = node->get_point ();
if (logger)
{
format f (false);
pretty_printer *pp = logger->get_printer ();
logger->start_log_line ();
logger->log_partial
("got potential merge EN: %i and EN: %i at ",
node->m_index, node_2->m_index);
point.print (pp, f);
logger->end_log_line ();
}
const program_state &state = node->get_state ();
const program_state &state_2 = node_2->get_state ();
/* They shouldn't be equal, or we wouldn't have two
separate nodes. */
gcc_assert (state != state_2);
program_state merged_state (m_ext_state);
if (state.can_merge_with_p (state_2, m_ext_state,
point, &merged_state))
{
if (logger)
logger->log ("merging EN: %i and EN: %i",
node->m_index, node_2->m_index);
if (merged_state == state)
{
/* Then merge node_2 into node by adding an edge. */
add_edge (node_2, node, NULL);
/* Remove node_2 from the worklist. */
m_worklist.take_next ();
node_2->set_status (exploded_node::STATUS_MERGER);
/* Continue processing "node" below. */
}
else if (merged_state == state_2)
{
/* Then merge node into node_2, and leave node_2
in the worklist, to be processed on the next
iteration. */
add_edge (node, node_2, NULL);
node->set_status (exploded_node::STATUS_MERGER);
continue;
}
else
{
/* We have a merged state that differs from
both state and state_2. */
/* Remove node_2 from the worklist. */
m_worklist.take_next ();
/* Create (or get) an exploded node for the merged
states, adding to the worklist. */
exploded_node *merged_enode
= get_or_create_node (node->get_point (),
merged_state, node);
if (merged_enode == NULL)
continue;
if (logger)
logger->log ("merged EN: %i and EN: %i into EN: %i",
node->m_index, node_2->m_index,
merged_enode->m_index);
/* "node" and "node_2" have both now been removed
from the worklist; we should not process them.
"merged_enode" may be a new node; if so it will be
processed in a subsequent iteration.
Alternatively, "merged_enode" could be an existing
node; one way the latter can
happen is if we end up merging a succession of
similar nodes into one. */
/* If merged_node is one of the two we were merging,
add it back to the worklist to ensure it gets
processed.
Add edges from the merged nodes to it (but not a
self-edge). */
if (merged_enode == node)
m_worklist.add_node (merged_enode);
else
{
add_edge (node, merged_enode, NULL);
node->set_status (exploded_node::STATUS_MERGER);
}
if (merged_enode == node_2)
m_worklist.add_node (merged_enode);
else
{
add_edge (node_2, merged_enode, NULL);
node_2->set_status (exploded_node::STATUS_MERGER);
}
continue;
}
}
/* TODO: should we attempt more than two nodes,
or just do pairs of nodes? (and hope that we get
a cascade of mergers). */
}
}
process_node (node);
handle_limit:
/* Impose a hard limit on the number of exploded nodes, to ensure
that the analysis terminates in the face of pathological state
explosion (or bugs).
Specifically, the limit is on the number of PK_AFTER_SUPERNODE
exploded nodes, looking at supernode exit events.
We use exit rather than entry since there can be multiple
entry ENs, one per phi; the number of PK_AFTER_SUPERNODE ought
to be equivalent to the number of supernodes multiplied by the
number of states. */
const int limit = m_sg.num_nodes () * param_analyzer_bb_explosion_factor;
if (m_global_stats.m_num_nodes[PK_AFTER_SUPERNODE] > limit)
{
if (logger)
logger->log ("bailing out; too many nodes");
warning_at (node->get_point ().get_location (),
OPT_Wanalyzer_too_complex,
"analysis bailed out early"
" (%i 'after-snode' enodes; %i enodes)",
m_global_stats.m_num_nodes[PK_AFTER_SUPERNODE],
m_nodes.length ());
return;
}
}
}
/* Attempt to process a consecutive run of sufficiently-similar nodes in
the worklist at a CFG join-point (having already popped ENODE from the
head of the worklist).
If ENODE's point is of the form (before-supernode, SNODE) and the next
nodes in the worklist are a consecutive run of enodes of the same form,
for the same supernode as ENODE (but potentially from different in-edges),
process them all together, setting their status to STATUS_BULK_MERGED,
and return true.
Otherwise, return false, in which case ENODE must be processed in the
normal way.
When processing them all together, generate successor states based
on phi nodes for the appropriate CFG edges, and then attempt to merge
these states into a minimal set of merged successor states, partitioning
the inputs by merged successor state.
Create new exploded nodes for all of the merged states, and add edges
connecting the input enodes to the corresponding merger exploded nodes.
We hope we have a much smaller number of merged successor states
compared to the number of input enodes - ideally just one,
if all successor states can be merged.
Processing and merging many together as one operation rather than as
pairs avoids scaling issues where per-pair mergers could bloat the
graph with merger nodes (especially so after switch statements). */
bool
exploded_graph::
maybe_process_run_of_before_supernode_enodes (exploded_node *enode)
{
/* A struct for tracking per-input state. */
struct item
{
item (exploded_node *input_enode)
: m_input_enode (input_enode),
m_processed_state (input_enode->get_state ()),
m_merger_idx (-1)
{}