blob: a14d107709cd923b2c3b28419689ec2432878d0b [file] [log] [blame]
/* Classes for modeling the state of memory.
Copyright (C) 2019-2021 Free Software Foundation, Inc.
Contributed by David Malcolm <dmalcolm@redhat.com>.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3, or (at your option)
any later version.
GCC is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
#include "config.h"
#include "system.h"
#include "coretypes.h"
#include "tree.h"
#include "function.h"
#include "basic-block.h"
#include "gimple.h"
#include "gimple-iterator.h"
#include "diagnostic-core.h"
#include "graphviz.h"
#include "options.h"
#include "cgraph.h"
#include "tree-dfa.h"
#include "stringpool.h"
#include "convert.h"
#include "target.h"
#include "fold-const.h"
#include "tree-pretty-print.h"
#include "diagnostic-color.h"
#include "diagnostic-metadata.h"
#include "tristate.h"
#include "bitmap.h"
#include "selftest.h"
#include "function.h"
#include "json.h"
#include "analyzer/analyzer.h"
#include "analyzer/analyzer-logging.h"
#include "ordered-hash-map.h"
#include "options.h"
#include "cgraph.h"
#include "cfg.h"
#include "digraph.h"
#include "analyzer/supergraph.h"
#include "sbitmap.h"
#include "analyzer/call-string.h"
#include "analyzer/program-point.h"
#include "analyzer/store.h"
#include "analyzer/region-model.h"
#include "analyzer/constraint-manager.h"
#include "diagnostic-event-id.h"
#include "analyzer/sm.h"
#include "diagnostic-event-id.h"
#include "analyzer/sm.h"
#include "analyzer/pending-diagnostic.h"
#include "analyzer/region-model-reachability.h"
#include "analyzer/analyzer-selftests.h"
#include "stor-layout.h"
#include "attribs.h"
#include "tree-object-size.h"
#if ENABLE_ANALYZER
namespace ana {
/* Dump T to PP in language-independent form, for debugging/logging/dumping
purposes. */
void
dump_tree (pretty_printer *pp, tree t)
{
dump_generic_node (pp, t, 0, TDF_SLIM, 0);
}
/* Dump T to PP in language-independent form in quotes, for
debugging/logging/dumping purposes. */
void
dump_quoted_tree (pretty_printer *pp, tree t)
{
pp_begin_quote (pp, pp_show_color (pp));
dump_tree (pp, t);
pp_end_quote (pp, pp_show_color (pp));
}
/* Equivalent to pp_printf (pp, "%qT", t), to avoid nesting pp_printf
calls within other pp_printf calls.
default_tree_printer handles 'T' and some other codes by calling
dump_generic_node (pp, t, 0, TDF_SLIM, 0);
dump_generic_node calls pp_printf in various places, leading to
garbled output.
Ideally pp_printf could be made to be reentrant, but in the meantime
this function provides a workaround. */
void
print_quoted_type (pretty_printer *pp, tree t)
{
pp_begin_quote (pp, pp_show_color (pp));
dump_generic_node (pp, t, 0, TDF_SLIM, 0);
pp_end_quote (pp, pp_show_color (pp));
}
/* class region_to_value_map. */
/* Assignment operator for region_to_value_map. */
region_to_value_map &
region_to_value_map::operator= (const region_to_value_map &other)
{
m_hash_map.empty ();
for (auto iter : other.m_hash_map)
{
const region *reg = iter.first;
const svalue *sval = iter.second;
m_hash_map.put (reg, sval);
}
return *this;
}
/* Equality operator for region_to_value_map. */
bool
region_to_value_map::operator== (const region_to_value_map &other) const
{
if (m_hash_map.elements () != other.m_hash_map.elements ())
return false;
for (auto iter : *this)
{
const region *reg = iter.first;
const svalue *sval = iter.second;
const svalue * const *other_slot = other.get (reg);
if (other_slot == NULL)
return false;
if (sval != *other_slot)
return false;
}
return true;
}
/* Dump this object to PP. */
void
region_to_value_map::dump_to_pp (pretty_printer *pp, bool simple,
bool multiline) const
{
auto_vec<const region *> regs;
for (iterator iter = begin (); iter != end (); ++iter)
regs.safe_push ((*iter).first);
regs.qsort (region::cmp_ptr_ptr);
if (multiline)
pp_newline (pp);
else
pp_string (pp, " {");
unsigned i;
const region *reg;
FOR_EACH_VEC_ELT (regs, i, reg)
{
if (multiline)
pp_string (pp, " ");
else if (i > 0)
pp_string (pp, ", ");
reg->dump_to_pp (pp, simple);
pp_string (pp, ": ");
const svalue *sval = *get (reg);
sval->dump_to_pp (pp, true);
if (multiline)
pp_newline (pp);
}
if (!multiline)
pp_string (pp, "}");
}
/* Dump this object to stderr. */
DEBUG_FUNCTION void
region_to_value_map::dump (bool simple) const
{
pretty_printer pp;
pp_format_decoder (&pp) = default_tree_printer;
pp_show_color (&pp) = pp_show_color (global_dc->printer);
pp.buffer->stream = stderr;
dump_to_pp (&pp, simple, true);
pp_newline (&pp);
pp_flush (&pp);
}
/* Attempt to merge THIS with OTHER, writing the result
to OUT.
For now, write (region, value) mappings that are in common between THIS
and OTHER to OUT, effectively taking the intersection, rather than
rejecting differences. */
bool
region_to_value_map::can_merge_with_p (const region_to_value_map &other,
region_to_value_map *out) const
{
for (auto iter : *this)
{
const region *iter_reg = iter.first;
const svalue *iter_sval = iter.second;
const svalue * const * other_slot = other.get (iter_reg);
if (other_slot)
if (iter_sval == *other_slot)
out->put (iter_reg, iter_sval);
}
return true;
}
/* Purge any state involving SVAL. */
void
region_to_value_map::purge_state_involving (const svalue *sval)
{
auto_vec<const region *> to_purge;
for (auto iter : *this)
{
const region *iter_reg = iter.first;
const svalue *iter_sval = iter.second;
if (iter_reg->involves_p (sval) || iter_sval->involves_p (sval))
to_purge.safe_push (iter_reg);
}
for (auto iter : to_purge)
m_hash_map.remove (iter);
}
/* class region_model. */
/* Ctor for region_model: construct an "empty" model. */
region_model::region_model (region_model_manager *mgr)
: m_mgr (mgr), m_store (), m_current_frame (NULL),
m_dynamic_extents ()
{
m_constraints = new constraint_manager (mgr);
}
/* region_model's copy ctor. */
region_model::region_model (const region_model &other)
: m_mgr (other.m_mgr), m_store (other.m_store),
m_constraints (new constraint_manager (*other.m_constraints)),
m_current_frame (other.m_current_frame),
m_dynamic_extents (other.m_dynamic_extents)
{
}
/* region_model's dtor. */
region_model::~region_model ()
{
delete m_constraints;
}
/* region_model's assignment operator. */
region_model &
region_model::operator= (const region_model &other)
{
/* m_mgr is const. */
gcc_assert (m_mgr == other.m_mgr);
m_store = other.m_store;
delete m_constraints;
m_constraints = new constraint_manager (*other.m_constraints);
m_current_frame = other.m_current_frame;
m_dynamic_extents = other.m_dynamic_extents;
return *this;
}
/* Equality operator for region_model.
Amongst other things this directly compares the stores and the constraint
managers, so for this to be meaningful both this and OTHER should
have been canonicalized. */
bool
region_model::operator== (const region_model &other) const
{
/* We can only compare instances that use the same manager. */
gcc_assert (m_mgr == other.m_mgr);
if (m_store != other.m_store)
return false;
if (*m_constraints != *other.m_constraints)
return false;
if (m_current_frame != other.m_current_frame)
return false;
if (m_dynamic_extents != other.m_dynamic_extents)
return false;
gcc_checking_assert (hash () == other.hash ());
return true;
}
/* Generate a hash value for this region_model. */
hashval_t
region_model::hash () const
{
hashval_t result = m_store.hash ();
result ^= m_constraints->hash ();
return result;
}
/* Dump a representation of this model to PP, showing the
stack, the store, and any constraints.
Use SIMPLE to control how svalues and regions are printed. */
void
region_model::dump_to_pp (pretty_printer *pp, bool simple,
bool multiline) const
{
/* Dump stack. */
pp_printf (pp, "stack depth: %i", get_stack_depth ());
if (multiline)
pp_newline (pp);
else
pp_string (pp, " {");
for (const frame_region *iter_frame = m_current_frame; iter_frame;
iter_frame = iter_frame->get_calling_frame ())
{
if (multiline)
pp_string (pp, " ");
else if (iter_frame != m_current_frame)
pp_string (pp, ", ");
pp_printf (pp, "frame (index %i): ", iter_frame->get_index ());
iter_frame->dump_to_pp (pp, simple);
if (multiline)
pp_newline (pp);
}
if (!multiline)
pp_string (pp, "}");
/* Dump store. */
if (!multiline)
pp_string (pp, ", {");
m_store.dump_to_pp (pp, simple, multiline,
m_mgr->get_store_manager ());
if (!multiline)
pp_string (pp, "}");
/* Dump constraints. */
pp_string (pp, "constraint_manager:");
if (multiline)
pp_newline (pp);
else
pp_string (pp, " {");
m_constraints->dump_to_pp (pp, multiline);
if (!multiline)
pp_string (pp, "}");
/* Dump sizes of dynamic regions, if any are known. */
if (!m_dynamic_extents.is_empty ())
{
pp_string (pp, "dynamic_extents:");
m_dynamic_extents.dump_to_pp (pp, simple, multiline);
}
}
/* Dump a representation of this model to FILE. */
void
region_model::dump (FILE *fp, bool simple, bool multiline) const
{
pretty_printer pp;
pp_format_decoder (&pp) = default_tree_printer;
pp_show_color (&pp) = pp_show_color (global_dc->printer);
pp.buffer->stream = fp;
dump_to_pp (&pp, simple, multiline);
pp_newline (&pp);
pp_flush (&pp);
}
/* Dump a multiline representation of this model to stderr. */
DEBUG_FUNCTION void
region_model::dump (bool simple) const
{
dump (stderr, simple, true);
}
/* Dump a multiline representation of this model to stderr. */
DEBUG_FUNCTION void
region_model::debug () const
{
dump (true);
}
/* Assert that this object is valid. */
void
region_model::validate () const
{
m_store.validate ();
}
/* Canonicalize the store and constraints, to maximize the chance of
equality between region_model instances. */
void
region_model::canonicalize ()
{
m_store.canonicalize (m_mgr->get_store_manager ());
m_constraints->canonicalize ();
}
/* Return true if this region_model is in canonical form. */
bool
region_model::canonicalized_p () const
{
region_model copy (*this);
copy.canonicalize ();
return *this == copy;
}
/* See the comment for store::loop_replay_fixup. */
void
region_model::loop_replay_fixup (const region_model *dst_state)
{
m_store.loop_replay_fixup (dst_state->get_store (), m_mgr);
}
/* A subclass of pending_diagnostic for complaining about uses of
poisoned values. */
class poisoned_value_diagnostic
: public pending_diagnostic_subclass<poisoned_value_diagnostic>
{
public:
poisoned_value_diagnostic (tree expr, enum poison_kind pkind)
: m_expr (expr), m_pkind (pkind)
{}
const char *get_kind () const FINAL OVERRIDE { return "poisoned_value_diagnostic"; }
bool use_of_uninit_p () const FINAL OVERRIDE
{
return m_pkind == POISON_KIND_UNINIT;
}
bool operator== (const poisoned_value_diagnostic &other) const
{
return m_expr == other.m_expr;
}
bool emit (rich_location *rich_loc) FINAL OVERRIDE
{
switch (m_pkind)
{
default:
gcc_unreachable ();
case POISON_KIND_UNINIT:
{
diagnostic_metadata m;
m.add_cwe (457); /* "CWE-457: Use of Uninitialized Variable". */
return warning_meta (rich_loc, m,
OPT_Wanalyzer_use_of_uninitialized_value,
"use of uninitialized value %qE",
m_expr);
}
break;
case POISON_KIND_FREED:
{
diagnostic_metadata m;
m.add_cwe (416); /* "CWE-416: Use After Free". */
return warning_meta (rich_loc, m,
OPT_Wanalyzer_use_after_free,
"use after %<free%> of %qE",
m_expr);
}
break;
case POISON_KIND_POPPED_STACK:
{
/* TODO: which CWE? */
return warning_at
(rich_loc,
OPT_Wanalyzer_use_of_pointer_in_stale_stack_frame,
"dereferencing pointer %qE to within stale stack frame",
m_expr);
}
break;
}
}
label_text describe_final_event (const evdesc::final_event &ev) FINAL OVERRIDE
{
switch (m_pkind)
{
default:
gcc_unreachable ();
case POISON_KIND_UNINIT:
return ev.formatted_print ("use of uninitialized value %qE here",
m_expr);
case POISON_KIND_FREED:
return ev.formatted_print ("use after %<free%> of %qE here",
m_expr);
case POISON_KIND_POPPED_STACK:
return ev.formatted_print
("dereferencing pointer %qE to within stale stack frame",
m_expr);
}
}
private:
tree m_expr;
enum poison_kind m_pkind;
};
/* A subclass of pending_diagnostic for complaining about shifts
by negative counts. */
class shift_count_negative_diagnostic
: public pending_diagnostic_subclass<shift_count_negative_diagnostic>
{
public:
shift_count_negative_diagnostic (const gassign *assign, tree count_cst)
: m_assign (assign), m_count_cst (count_cst)
{}
const char *get_kind () const FINAL OVERRIDE
{
return "shift_count_negative_diagnostic";
}
bool operator== (const shift_count_negative_diagnostic &other) const
{
return (m_assign == other.m_assign
&& same_tree_p (m_count_cst, other.m_count_cst));
}
bool emit (rich_location *rich_loc) FINAL OVERRIDE
{
return warning_at (rich_loc, OPT_Wanalyzer_shift_count_negative,
"shift by negative count (%qE)", m_count_cst);
}
label_text describe_final_event (const evdesc::final_event &ev) FINAL OVERRIDE
{
return ev.formatted_print ("shift by negative amount here (%qE)", m_count_cst);
}
private:
const gassign *m_assign;
tree m_count_cst;
};
/* A subclass of pending_diagnostic for complaining about shifts
by counts >= the width of the operand type. */
class shift_count_overflow_diagnostic
: public pending_diagnostic_subclass<shift_count_overflow_diagnostic>
{
public:
shift_count_overflow_diagnostic (const gassign *assign,
int operand_precision,
tree count_cst)
: m_assign (assign), m_operand_precision (operand_precision),
m_count_cst (count_cst)
{}
const char *get_kind () const FINAL OVERRIDE
{
return "shift_count_overflow_diagnostic";
}
bool operator== (const shift_count_overflow_diagnostic &other) const
{
return (m_assign == other.m_assign
&& m_operand_precision == other.m_operand_precision
&& same_tree_p (m_count_cst, other.m_count_cst));
}
bool emit (rich_location *rich_loc) FINAL OVERRIDE
{
return warning_at (rich_loc, OPT_Wanalyzer_shift_count_overflow,
"shift by count (%qE) >= precision of type (%qi)",
m_count_cst, m_operand_precision);
}
label_text describe_final_event (const evdesc::final_event &ev) FINAL OVERRIDE
{
return ev.formatted_print ("shift by count %qE here", m_count_cst);
}
private:
const gassign *m_assign;
int m_operand_precision;
tree m_count_cst;
};
/* If ASSIGN is a stmt that can be modelled via
set_value (lhs_reg, SVALUE, CTXT)
for some SVALUE, get the SVALUE.
Otherwise return NULL. */
const svalue *
region_model::get_gassign_result (const gassign *assign,
region_model_context *ctxt)
{
tree lhs = gimple_assign_lhs (assign);
tree rhs1 = gimple_assign_rhs1 (assign);
enum tree_code op = gimple_assign_rhs_code (assign);
switch (op)
{
default:
return NULL;
case POINTER_PLUS_EXPR:
{
/* e.g. "_1 = a_10(D) + 12;" */
tree ptr = rhs1;
tree offset = gimple_assign_rhs2 (assign);
const svalue *ptr_sval = get_rvalue (ptr, ctxt);
const svalue *offset_sval = get_rvalue (offset, ctxt);
/* Quoting tree.def, "the second operand [of a POINTER_PLUS_EXPR]
is an integer of type sizetype". */
offset_sval = m_mgr->get_or_create_cast (size_type_node, offset_sval);
const svalue *sval_binop
= m_mgr->get_or_create_binop (TREE_TYPE (lhs), op,
ptr_sval, offset_sval);
return sval_binop;
}
break;
case POINTER_DIFF_EXPR:
{
/* e.g. "_1 = p_2(D) - q_3(D);". */
tree rhs2 = gimple_assign_rhs2 (assign);
const svalue *rhs1_sval = get_rvalue (rhs1, ctxt);
const svalue *rhs2_sval = get_rvalue (rhs2, ctxt);
// TODO: perhaps fold to zero if they're known to be equal?
const svalue *sval_binop
= m_mgr->get_or_create_binop (TREE_TYPE (lhs), op,
rhs1_sval, rhs2_sval);
return sval_binop;
}
break;
/* Assignments of the form
set_value (lvalue (LHS), rvalue (EXPR))
for various EXPR.
We already have the lvalue for the LHS above, as "lhs_reg". */
case ADDR_EXPR: /* LHS = &RHS; */
case BIT_FIELD_REF:
case COMPONENT_REF: /* LHS = op0.op1; */
case MEM_REF:
case REAL_CST:
case COMPLEX_CST:
case VECTOR_CST:
case INTEGER_CST:
case ARRAY_REF:
case SSA_NAME: /* LHS = VAR; */
case VAR_DECL: /* LHS = VAR; */
case PARM_DECL:/* LHS = VAR; */
case REALPART_EXPR:
case IMAGPART_EXPR:
return get_rvalue (rhs1, ctxt);
case ABS_EXPR:
case ABSU_EXPR:
case CONJ_EXPR:
case BIT_NOT_EXPR:
case FIX_TRUNC_EXPR:
case FLOAT_EXPR:
case NEGATE_EXPR:
case NOP_EXPR:
case VIEW_CONVERT_EXPR:
{
/* Unary ops. */
const svalue *rhs_sval = get_rvalue (rhs1, ctxt);
const svalue *sval_unaryop
= m_mgr->get_or_create_unaryop (TREE_TYPE (lhs), op, rhs_sval);
return sval_unaryop;
}
case EQ_EXPR:
case GE_EXPR:
case LE_EXPR:
case NE_EXPR:
case GT_EXPR:
case LT_EXPR:
case UNORDERED_EXPR:
case ORDERED_EXPR:
{
tree rhs2 = gimple_assign_rhs2 (assign);
const svalue *rhs1_sval = get_rvalue (rhs1, ctxt);
const svalue *rhs2_sval = get_rvalue (rhs2, ctxt);
if (TREE_TYPE (lhs) == boolean_type_node)
{
/* Consider constraints between svalues. */
tristate t = eval_condition (rhs1_sval, op, rhs2_sval);
if (t.is_known ())
return m_mgr->get_or_create_constant_svalue
(t.is_true () ? boolean_true_node : boolean_false_node);
}
/* Otherwise, generate a symbolic binary op. */
const svalue *sval_binop
= m_mgr->get_or_create_binop (TREE_TYPE (lhs), op,
rhs1_sval, rhs2_sval);
return sval_binop;
}
break;
case PLUS_EXPR:
case MINUS_EXPR:
case MULT_EXPR:
case MULT_HIGHPART_EXPR:
case TRUNC_DIV_EXPR:
case CEIL_DIV_EXPR:
case FLOOR_DIV_EXPR:
case ROUND_DIV_EXPR:
case TRUNC_MOD_EXPR:
case CEIL_MOD_EXPR:
case FLOOR_MOD_EXPR:
case ROUND_MOD_EXPR:
case RDIV_EXPR:
case EXACT_DIV_EXPR:
case LSHIFT_EXPR:
case RSHIFT_EXPR:
case LROTATE_EXPR:
case RROTATE_EXPR:
case BIT_IOR_EXPR:
case BIT_XOR_EXPR:
case BIT_AND_EXPR:
case MIN_EXPR:
case MAX_EXPR:
case COMPLEX_EXPR:
{
/* Binary ops. */
tree rhs2 = gimple_assign_rhs2 (assign);
const svalue *rhs1_sval = get_rvalue (rhs1, ctxt);
const svalue *rhs2_sval = get_rvalue (rhs2, ctxt);
if (ctxt && (op == LSHIFT_EXPR || op == RSHIFT_EXPR))
{
/* "INT34-C. Do not shift an expression by a negative number of bits
or by greater than or equal to the number of bits that exist in
the operand." */
if (const tree rhs2_cst = rhs2_sval->maybe_get_constant ())
if (TREE_CODE (rhs2_cst) == INTEGER_CST)
{
if (tree_int_cst_sgn (rhs2_cst) < 0)
ctxt->warn (new shift_count_negative_diagnostic
(assign, rhs2_cst));
else if (compare_tree_int (rhs2_cst,
TYPE_PRECISION (TREE_TYPE (rhs1)))
>= 0)
ctxt->warn (new shift_count_overflow_diagnostic
(assign, TYPE_PRECISION (TREE_TYPE (rhs1)),
rhs2_cst));
}
}
const svalue *sval_binop
= m_mgr->get_or_create_binop (TREE_TYPE (lhs), op,
rhs1_sval, rhs2_sval);
return sval_binop;
}
/* Vector expressions. In theory we could implement these elementwise,
but for now, simply return unknown values. */
case VEC_DUPLICATE_EXPR:
case VEC_SERIES_EXPR:
case VEC_COND_EXPR:
case VEC_PERM_EXPR:
case VEC_WIDEN_MULT_HI_EXPR:
case VEC_WIDEN_MULT_LO_EXPR:
case VEC_WIDEN_MULT_EVEN_EXPR:
case VEC_WIDEN_MULT_ODD_EXPR:
case VEC_UNPACK_HI_EXPR:
case VEC_UNPACK_LO_EXPR:
case VEC_UNPACK_FLOAT_HI_EXPR:
case VEC_UNPACK_FLOAT_LO_EXPR:
case VEC_UNPACK_FIX_TRUNC_HI_EXPR:
case VEC_UNPACK_FIX_TRUNC_LO_EXPR:
case VEC_PACK_TRUNC_EXPR:
case VEC_PACK_SAT_EXPR:
case VEC_PACK_FIX_TRUNC_EXPR:
case VEC_PACK_FLOAT_EXPR:
case VEC_WIDEN_LSHIFT_HI_EXPR:
case VEC_WIDEN_LSHIFT_LO_EXPR:
return m_mgr->get_or_create_unknown_svalue (TREE_TYPE (lhs));
}
}
/* Check for SVAL being poisoned, adding a warning to CTXT.
Return SVAL, or, if a warning is added, another value, to avoid
repeatedly complaining about the same poisoned value in followup code. */
const svalue *
region_model::check_for_poison (const svalue *sval,
tree expr,
region_model_context *ctxt) const
{
if (!ctxt)
return sval;
if (const poisoned_svalue *poisoned_sval = sval->dyn_cast_poisoned_svalue ())
{
/* If we have an SSA name for a temporary, we don't want to print
'<unknown>'.
Poisoned values are shared by type, and so we can't reconstruct
the tree other than via the def stmts, using
fixup_tree_for_diagnostic. */
tree diag_arg = fixup_tree_for_diagnostic (expr);
enum poison_kind pkind = poisoned_sval->get_poison_kind ();
if (ctxt->warn (new poisoned_value_diagnostic (diag_arg, pkind)))
{
/* We only want to report use of a poisoned value at the first
place it gets used; return an unknown value to avoid generating
a chain of followup warnings. */
sval = m_mgr->get_or_create_unknown_svalue (sval->get_type ());
}
return sval;
}
return sval;
}
/* Update this model for the ASSIGN stmt, using CTXT to report any
diagnostics. */
void
region_model::on_assignment (const gassign *assign, region_model_context *ctxt)
{
tree lhs = gimple_assign_lhs (assign);
tree rhs1 = gimple_assign_rhs1 (assign);
const region *lhs_reg = get_lvalue (lhs, ctxt);
/* Most assignments are handled by:
set_value (lhs_reg, SVALUE, CTXT)
for some SVALUE. */
if (const svalue *sval = get_gassign_result (assign, ctxt))
{
tree expr = get_diagnostic_tree_for_gassign (assign);
check_for_poison (sval, expr, ctxt);
set_value (lhs_reg, sval, ctxt);
return;
}
enum tree_code op = gimple_assign_rhs_code (assign);
switch (op)
{
default:
{
if (0)
sorry_at (assign->location, "unhandled assignment op: %qs",
get_tree_code_name (op));
const svalue *unknown_sval
= m_mgr->get_or_create_unknown_svalue (TREE_TYPE (lhs));
set_value (lhs_reg, unknown_sval, ctxt);
}
break;
case CONSTRUCTOR:
{
if (TREE_CLOBBER_P (rhs1))
{
/* e.g. "x ={v} {CLOBBER};" */
clobber_region (lhs_reg);
}
else
{
/* Any CONSTRUCTOR that survives to this point is either
just a zero-init of everything, or a vector. */
if (!CONSTRUCTOR_NO_CLEARING (rhs1))
zero_fill_region (lhs_reg);
unsigned ix;
tree index;
tree val;
FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (rhs1), ix, index, val)
{
gcc_assert (TREE_CODE (TREE_TYPE (rhs1)) == VECTOR_TYPE);
if (!index)
index = build_int_cst (integer_type_node, ix);
gcc_assert (TREE_CODE (index) == INTEGER_CST);
const svalue *index_sval
= m_mgr->get_or_create_constant_svalue (index);
gcc_assert (index_sval);
const region *sub_reg
= m_mgr->get_element_region (lhs_reg,
TREE_TYPE (val),
index_sval);
const svalue *val_sval = get_rvalue (val, ctxt);
set_value (sub_reg, val_sval, ctxt);
}
}
}
break;
case STRING_CST:
{
/* e.g. "struct s2 x = {{'A', 'B', 'C', 'D'}};". */
const svalue *rhs_sval = get_rvalue (rhs1, ctxt);
m_store.set_value (m_mgr->get_store_manager(), lhs_reg, rhs_sval,
ctxt ? ctxt->get_uncertainty () : NULL);
}
break;
}
}
/* A pending_diagnostic subclass for implementing "__analyzer_dump_path". */
class dump_path_diagnostic
: public pending_diagnostic_subclass<dump_path_diagnostic>
{
public:
bool emit (rich_location *richloc) FINAL OVERRIDE
{
inform (richloc, "path");
return true;
}
const char *get_kind () const FINAL OVERRIDE { return "dump_path_diagnostic"; }
bool operator== (const dump_path_diagnostic &) const
{
return true;
}
};
/* Handle the pre-sm-state part of STMT, modifying this object in-place.
Write true to *OUT_TERMINATE_PATH if the path should be terminated.
Write true to *OUT_UNKNOWN_SIDE_EFFECTS if the stmt has unknown
side effects. */
void
region_model::on_stmt_pre (const gimple *stmt,
bool *out_terminate_path,
bool *out_unknown_side_effects,
region_model_context *ctxt)
{
switch (gimple_code (stmt))
{
default:
/* No-op for now. */
break;
case GIMPLE_ASSIGN:
{
const gassign *assign = as_a <const gassign *> (stmt);
on_assignment (assign, ctxt);
}
break;
case GIMPLE_ASM:
{
const gasm *asm_stmt = as_a <const gasm *> (stmt);
on_asm_stmt (asm_stmt, ctxt);
}
break;
case GIMPLE_CALL:
{
/* Track whether we have a gcall to a function that's not recognized by
anything, for which we don't have a function body, or for which we
don't know the fndecl. */
const gcall *call = as_a <const gcall *> (stmt);
/* Debugging/test support. */
if (is_special_named_call_p (call, "__analyzer_describe", 2))
impl_call_analyzer_describe (call, ctxt);
else if (is_special_named_call_p (call, "__analyzer_dump_capacity", 1))
impl_call_analyzer_dump_capacity (call, ctxt);
else if (is_special_named_call_p (call, "__analyzer_dump_path", 0))
{
/* Handle the builtin "__analyzer_dump_path" by queuing a
diagnostic at this exploded_node. */
ctxt->warn (new dump_path_diagnostic ());
}
else if (is_special_named_call_p (call, "__analyzer_dump_region_model",
0))
{
/* Handle the builtin "__analyzer_dump_region_model" by dumping
the region model's state to stderr. */
dump (false);
}
else if (is_special_named_call_p (call, "__analyzer_eval", 1))
impl_call_analyzer_eval (call, ctxt);
else if (is_special_named_call_p (call, "__analyzer_break", 0))
{
/* Handle the builtin "__analyzer_break" by triggering a
breakpoint. */
/* TODO: is there a good cross-platform way to do this? */
raise (SIGINT);
}
else if (is_special_named_call_p (call,
"__analyzer_dump_exploded_nodes",
1))
{
/* This is handled elsewhere. */
}
else
*out_unknown_side_effects = on_call_pre (call, ctxt,
out_terminate_path);
}
break;
case GIMPLE_RETURN:
{
const greturn *return_ = as_a <const greturn *> (stmt);
on_return (return_, ctxt);
}
break;
}
}
/* Update this model for the CALL stmt, using CTXT to report any
diagnostics - the first half.
Updates to the region_model that should be made *before* sm-states
are updated are done here; other updates to the region_model are done
in region_model::on_call_post.
Return true if the function call has unknown side effects (it wasn't
recognized and we don't have a body for it, or are unable to tell which
fndecl it is).
Write true to *OUT_TERMINATE_PATH if this execution path should be
terminated (e.g. the function call terminates the process). */
bool
region_model::on_call_pre (const gcall *call, region_model_context *ctxt,
bool *out_terminate_path)
{
call_details cd (call, this, ctxt);
bool unknown_side_effects = false;
/* Some of the cases below update the lhs of the call based on the
return value, but not all. Provide a default value, which may
get overwritten below. */
if (tree lhs = gimple_call_lhs (call))
{
const region *lhs_region = get_lvalue (lhs, ctxt);
const svalue *sval
= m_mgr->get_or_create_conjured_svalue (TREE_TYPE (lhs), call,
lhs_region);
purge_state_involving (sval, ctxt);
set_value (lhs_region, sval, ctxt);
}
if (gimple_call_internal_p (call))
{
switch (gimple_call_internal_fn (call))
{
default:
break;
case IFN_BUILTIN_EXPECT:
impl_call_builtin_expect (cd);
return false;
case IFN_UBSAN_BOUNDS:
return false;
}
}
if (tree callee_fndecl = get_fndecl_for_call (call, ctxt))
{
/* The various impl_call_* member functions are implemented
in region-model-impl-calls.cc.
Having them split out into separate functions makes it easier
to put breakpoints on the handling of specific functions. */
if (fndecl_built_in_p (callee_fndecl, BUILT_IN_NORMAL)
&& gimple_builtin_call_types_compatible_p (call, callee_fndecl))
switch (DECL_UNCHECKED_FUNCTION_CODE (callee_fndecl))
{
default:
if (!DECL_PURE_P (callee_fndecl))
unknown_side_effects = true;
break;
case BUILT_IN_ALLOCA:
case BUILT_IN_ALLOCA_WITH_ALIGN:
impl_call_alloca (cd);
return false;
case BUILT_IN_CALLOC:
impl_call_calloc (cd);
return false;
case BUILT_IN_EXPECT:
case BUILT_IN_EXPECT_WITH_PROBABILITY:
impl_call_builtin_expect (cd);
return false;
case BUILT_IN_FREE:
/* Handle in "on_call_post". */
break;
case BUILT_IN_MALLOC:
impl_call_malloc (cd);
return false;
case BUILT_IN_MEMCPY:
case BUILT_IN_MEMCPY_CHK:
impl_call_memcpy (cd);
return false;
case BUILT_IN_MEMSET:
case BUILT_IN_MEMSET_CHK:
impl_call_memset (cd);
return false;
break;
case BUILT_IN_REALLOC:
return false;
case BUILT_IN_STRCPY:
case BUILT_IN_STRCPY_CHK:
impl_call_strcpy (cd);
return false;
case BUILT_IN_STRLEN:
impl_call_strlen (cd);
return false;
case BUILT_IN_STACK_SAVE:
case BUILT_IN_STACK_RESTORE:
return false;
/* Stdio builtins. */
case BUILT_IN_FPRINTF:
case BUILT_IN_FPRINTF_UNLOCKED:
case BUILT_IN_PUTC:
case BUILT_IN_PUTC_UNLOCKED:
case BUILT_IN_FPUTC:
case BUILT_IN_FPUTC_UNLOCKED:
case BUILT_IN_FPUTS:
case BUILT_IN_FPUTS_UNLOCKED:
case BUILT_IN_FWRITE:
case BUILT_IN_FWRITE_UNLOCKED:
case BUILT_IN_PRINTF:
case BUILT_IN_PRINTF_UNLOCKED:
case BUILT_IN_PUTCHAR:
case BUILT_IN_PUTCHAR_UNLOCKED:
case BUILT_IN_PUTS:
case BUILT_IN_PUTS_UNLOCKED:
case BUILT_IN_VFPRINTF:
case BUILT_IN_VPRINTF:
/* These stdio builtins have external effects that are out
of scope for the analyzer: we only want to model the effects
on the return value. */
break;
}
else if (is_named_call_p (callee_fndecl, "malloc", call, 1))
{
impl_call_malloc (cd);
return false;
}
else if (is_named_call_p (callee_fndecl, "calloc", call, 2))
{
impl_call_calloc (cd);
return false;
}
else if (is_named_call_p (callee_fndecl, "alloca", call, 1))
{
impl_call_alloca (cd);
return false;
}
else if (is_named_call_p (callee_fndecl, "realloc", call, 2))
{
impl_call_realloc (cd);
return false;
}
else if (is_named_call_p (callee_fndecl, "error"))
{
if (impl_call_error (cd, 3, out_terminate_path))
return false;
else
unknown_side_effects = true;
}
else if (is_named_call_p (callee_fndecl, "error_at_line"))
{
if (impl_call_error (cd, 5, out_terminate_path))
return false;
else
unknown_side_effects = true;
}
else if (is_named_call_p (callee_fndecl, "fgets", call, 3)
|| is_named_call_p (callee_fndecl, "fgets_unlocked", call, 3))
{
impl_call_fgets (cd);
return false;
}
else if (is_named_call_p (callee_fndecl, "fread", call, 4))
{
impl_call_fread (cd);
return false;
}
else if (is_named_call_p (callee_fndecl, "getchar", call, 0))
{
/* No side-effects (tracking stream state is out-of-scope
for the analyzer). */
}
else if (is_named_call_p (callee_fndecl, "memset", call, 3)
&& POINTER_TYPE_P (cd.get_arg_type (0)))
{
impl_call_memset (cd);
return false;
}
else if (is_named_call_p (callee_fndecl, "strlen", call, 1)
&& POINTER_TYPE_P (cd.get_arg_type (0)))
{
impl_call_strlen (cd);
return false;
}
else if (is_named_call_p (callee_fndecl, "operator new", call, 1))
{
impl_call_operator_new (cd);
return false;
}
else if (is_named_call_p (callee_fndecl, "operator new []", call, 1))
{
impl_call_operator_new (cd);
return false;
}
else if (is_named_call_p (callee_fndecl, "operator delete", call, 1)
|| is_named_call_p (callee_fndecl, "operator delete", call, 2)
|| is_named_call_p (callee_fndecl, "operator delete []", call, 1))
{
/* Handle in "on_call_post". */
}
else if (!fndecl_has_gimple_body_p (callee_fndecl)
&& !DECL_PURE_P (callee_fndecl)
&& !fndecl_built_in_p (callee_fndecl))
unknown_side_effects = true;
}
else
unknown_side_effects = true;
return unknown_side_effects;
}
/* Update this model for the CALL stmt, using CTXT to report any
diagnostics - the second half.
Updates to the region_model that should be made *after* sm-states
are updated are done here; other updates to the region_model are done
in region_model::on_call_pre.
If UNKNOWN_SIDE_EFFECTS is true, also call handle_unrecognized_call
to purge state. */
void
region_model::on_call_post (const gcall *call,
bool unknown_side_effects,
region_model_context *ctxt)
{
if (tree callee_fndecl = get_fndecl_for_call (call, ctxt))
{
call_details cd (call, this, ctxt);
if (is_named_call_p (callee_fndecl, "free", call, 1))
{
impl_call_free (cd);
return;
}
if (is_named_call_p (callee_fndecl, "operator delete", call, 1)
|| is_named_call_p (callee_fndecl, "operator delete", call, 2)
|| is_named_call_p (callee_fndecl, "operator delete []", call, 1))
{
impl_call_operator_delete (cd);
return;
}
/* Was this fndecl referenced by
__attribute__((malloc(FOO)))? */
if (lookup_attribute ("*dealloc", DECL_ATTRIBUTES (callee_fndecl)))
{
impl_deallocation_call (cd);
return;
}
if (fndecl_built_in_p (callee_fndecl, BUILT_IN_NORMAL)
&& gimple_builtin_call_types_compatible_p (call, callee_fndecl))
switch (DECL_UNCHECKED_FUNCTION_CODE (callee_fndecl))
{
default:
break;
case BUILT_IN_REALLOC:
impl_call_realloc (cd);
return;
}
}
if (unknown_side_effects)
handle_unrecognized_call (call, ctxt);
}
/* Purge state involving SVAL from this region_model, using CTXT
(if non-NULL) to purge other state in a program_state.
For example, if we're at the def-stmt of an SSA name, then we need to
purge any state for svalues that involve that SSA name. This avoids
false positives in loops, since a symbolic value referring to the
SSA name will be referring to the previous value of that SSA name.
For example, in:
while ((e = hashmap_iter_next(&iter))) {
struct oid2strbuf *e_strbuf = (struct oid2strbuf *)e;
free (e_strbuf->value);
}
at the def-stmt of e_8:
e_8 = hashmap_iter_next (&iter);
we should purge the "freed" state of:
INIT_VAL(CAST_REG(‘struct oid2strbuf’, (*INIT_VAL(e_8))).value)
which is the "e_strbuf->value" value from the previous iteration,
or we will erroneously report a double-free - the "e_8" within it
refers to the previous value. */
void
region_model::purge_state_involving (const svalue *sval,
region_model_context *ctxt)
{
if (!sval->can_have_associated_state_p ())
return;
m_store.purge_state_involving (sval, m_mgr);
m_constraints->purge_state_involving (sval);
m_dynamic_extents.purge_state_involving (sval);
if (ctxt)
ctxt->purge_state_involving (sval);
}
/* Handle a call CALL to a function with unknown behavior.
Traverse the regions in this model, determining what regions are
reachable from pointer arguments to CALL and from global variables,
recursively.
Set all reachable regions to new unknown values and purge sm-state
from their values, and from values that point to them. */
void
region_model::handle_unrecognized_call (const gcall *call,
region_model_context *ctxt)
{
tree fndecl = get_fndecl_for_call (call, ctxt);
reachable_regions reachable_regs (this);
/* Determine the reachable regions and their mutability. */
{
/* Add globals and regions that already escaped in previous
unknown calls. */
m_store.for_each_cluster (reachable_regions::init_cluster_cb,
&reachable_regs);
/* Params that are pointers. */
tree iter_param_types = NULL_TREE;
if (fndecl)
iter_param_types = TYPE_ARG_TYPES (TREE_TYPE (fndecl));
for (unsigned arg_idx = 0; arg_idx < gimple_call_num_args (call); arg_idx++)
{
/* Track expected param type, where available. */
tree param_type = NULL_TREE;
if (iter_param_types)
{
param_type = TREE_VALUE (iter_param_types);
gcc_assert (param_type);
iter_param_types = TREE_CHAIN (iter_param_types);
}
tree parm = gimple_call_arg (call, arg_idx);
const svalue *parm_sval = get_rvalue (parm, ctxt);
reachable_regs.handle_parm (parm_sval, param_type);
}
}
uncertainty_t *uncertainty = ctxt ? ctxt->get_uncertainty () : NULL;
/* Purge sm-state for the svalues that were reachable,
both in non-mutable and mutable form. */
for (svalue_set::iterator iter
= reachable_regs.begin_reachable_svals ();
iter != reachable_regs.end_reachable_svals (); ++iter)
{
const svalue *sval = (*iter);
if (ctxt)
ctxt->on_unknown_change (sval, false);
}
for (svalue_set::iterator iter
= reachable_regs.begin_mutable_svals ();
iter != reachable_regs.end_mutable_svals (); ++iter)
{
const svalue *sval = (*iter);
if (ctxt)
ctxt->on_unknown_change (sval, true);
if (uncertainty)
uncertainty->on_mutable_sval_at_unknown_call (sval);
}
/* Mark any clusters that have escaped. */
reachable_regs.mark_escaped_clusters (ctxt);
/* Update bindings for all clusters that have escaped, whether above,
or previously. */
m_store.on_unknown_fncall (call, m_mgr->get_store_manager ());
/* Purge dynamic extents from any regions that have escaped mutably:
realloc could have been called on them. */
for (hash_set<const region *>::iterator
iter = reachable_regs.begin_mutable_base_regs ();
iter != reachable_regs.end_mutable_base_regs ();
++iter)
{
const region *base_reg = (*iter);
unset_dynamic_extents (base_reg);
}
}
/* Traverse the regions in this model, determining what regions are
reachable from the store and populating *OUT.
If EXTRA_SVAL is non-NULL, treat it as an additional "root"
for reachability (for handling return values from functions when
analyzing return of the only function on the stack).
If UNCERTAINTY is non-NULL, treat any svalues that were recorded
within it as being maybe-bound as additional "roots" for reachability.
Find svalues that haven't leaked. */
void
region_model::get_reachable_svalues (svalue_set *out,
const svalue *extra_sval,
const uncertainty_t *uncertainty)
{
reachable_regions reachable_regs (this);
/* Add globals and regions that already escaped in previous
unknown calls. */
m_store.for_each_cluster (reachable_regions::init_cluster_cb,
&reachable_regs);
if (extra_sval)
reachable_regs.handle_sval (extra_sval);
if (uncertainty)
for (uncertainty_t::iterator iter
= uncertainty->begin_maybe_bound_svals ();
iter != uncertainty->end_maybe_bound_svals (); ++iter)
reachable_regs.handle_sval (*iter);
/* Get regions for locals that have explicitly bound values. */
for (store::cluster_map_t::iterator iter = m_store.begin ();
iter != m_store.end (); ++iter)
{
const region *base_reg = (*iter).first;
if (const region *parent = base_reg->get_parent_region ())
if (parent->get_kind () == RK_FRAME)
reachable_regs.add (base_reg, false);
}
/* Populate *OUT based on the values that were reachable. */
for (svalue_set::iterator iter
= reachable_regs.begin_reachable_svals ();
iter != reachable_regs.end_reachable_svals (); ++iter)
out->add (*iter);
}
/* Update this model for the RETURN_STMT, using CTXT to report any
diagnostics. */
void
region_model::on_return (const greturn *return_stmt, region_model_context *ctxt)
{
tree callee = get_current_function ()->decl;
tree lhs = DECL_RESULT (callee);
tree rhs = gimple_return_retval (return_stmt);
if (lhs && rhs)
copy_region (get_lvalue (lhs, ctxt), get_lvalue (rhs, ctxt), ctxt);
}
/* Update this model for a call and return of setjmp/sigsetjmp at CALL within
ENODE, using CTXT to report any diagnostics.
This is for the initial direct invocation of setjmp/sigsetjmp (which returns
0), as opposed to any second return due to longjmp/sigsetjmp. */
void
region_model::on_setjmp (const gcall *call, const exploded_node *enode,
region_model_context *ctxt)
{
const svalue *buf_ptr = get_rvalue (gimple_call_arg (call, 0), ctxt);
const region *buf_reg = deref_rvalue (buf_ptr, gimple_call_arg (call, 0),
ctxt);
/* Create a setjmp_svalue for this call and store it in BUF_REG's
region. */
if (buf_reg)
{
setjmp_record r (enode, call);
const svalue *sval
= m_mgr->get_or_create_setjmp_svalue (r, buf_reg->get_type ());
set_value (buf_reg, sval, ctxt);
}
/* Direct calls to setjmp return 0. */
if (tree lhs = gimple_call_lhs (call))
{
const svalue *new_sval
= m_mgr->get_or_create_int_cst (TREE_TYPE (lhs), 0);
const region *lhs_reg = get_lvalue (lhs, ctxt);
set_value (lhs_reg, new_sval, ctxt);
}
}
/* Update this region_model for rewinding from a "longjmp" at LONGJMP_CALL
to a "setjmp" at SETJMP_CALL where the final stack depth should be
SETJMP_STACK_DEPTH. Pop any stack frames. Leak detection is *not*
done, and should be done by the caller. */
void
region_model::on_longjmp (const gcall *longjmp_call, const gcall *setjmp_call,
int setjmp_stack_depth, region_model_context *ctxt)
{
/* Evaluate the val, using the frame of the "longjmp". */
tree fake_retval = gimple_call_arg (longjmp_call, 1);
const svalue *fake_retval_sval = get_rvalue (fake_retval, ctxt);
/* Pop any frames until we reach the stack depth of the function where
setjmp was called. */
gcc_assert (get_stack_depth () >= setjmp_stack_depth);
while (get_stack_depth () > setjmp_stack_depth)
pop_frame (NULL, NULL, ctxt);
gcc_assert (get_stack_depth () == setjmp_stack_depth);
/* Assign to LHS of "setjmp" in new_state. */
if (tree lhs = gimple_call_lhs (setjmp_call))
{
/* Passing 0 as the val to longjmp leads to setjmp returning 1. */
const svalue *zero_sval
= m_mgr->get_or_create_int_cst (TREE_TYPE (fake_retval), 0);
tristate eq_zero = eval_condition (fake_retval_sval, EQ_EXPR, zero_sval);
/* If we have 0, use 1. */
if (eq_zero.is_true ())
{
const svalue *one_sval
= m_mgr->get_or_create_int_cst (TREE_TYPE (fake_retval), 1);
fake_retval_sval = one_sval;
}
else
{
/* Otherwise note that the value is nonzero. */
m_constraints->add_constraint (fake_retval_sval, NE_EXPR, zero_sval);
}
/* Decorate the return value from setjmp as being unmergeable,
so that we don't attempt to merge states with it as zero
with states in which it's nonzero, leading to a clean distinction
in the exploded_graph betweeen the first return and the second
return. */
fake_retval_sval = m_mgr->get_or_create_unmergeable (fake_retval_sval);
const region *lhs_reg = get_lvalue (lhs, ctxt);
set_value (lhs_reg, fake_retval_sval, ctxt);
}
}
/* Update this region_model for a phi stmt of the form
LHS = PHI <...RHS...>.
where RHS is for the appropriate edge.
Get state from OLD_STATE so that all of the phi stmts for a basic block
are effectively handled simultaneously. */
void
region_model::handle_phi (const gphi *phi,
tree lhs, tree rhs,
const region_model &old_state,
region_model_context *ctxt)
{
/* For now, don't bother tracking the .MEM SSA names. */
if (tree var = SSA_NAME_VAR (lhs))
if (TREE_CODE (var) == VAR_DECL)
if (VAR_DECL_IS_VIRTUAL_OPERAND (var))
return;
const svalue *src_sval = old_state.get_rvalue (rhs, ctxt);
const region *dst_reg = old_state.get_lvalue (lhs, ctxt);
set_value (dst_reg, src_sval, ctxt);
if (ctxt)
ctxt->on_phi (phi, rhs);
}
/* Implementation of region_model::get_lvalue; the latter adds type-checking.
Get the id of the region for PV within this region_model,
emitting any diagnostics to CTXT. */
const region *
region_model::get_lvalue_1 (path_var pv, region_model_context *ctxt) const
{
tree expr = pv.m_tree;
gcc_assert (expr);
switch (TREE_CODE (expr))
{
default:
return m_mgr->get_region_for_unexpected_tree_code (ctxt, expr,
dump_location_t ());
case ARRAY_REF:
{
tree array = TREE_OPERAND (expr, 0);
tree index = TREE_OPERAND (expr, 1);
const region *array_reg = get_lvalue (array, ctxt);
const svalue *index_sval = get_rvalue (index, ctxt);
return m_mgr->get_element_region (array_reg,
TREE_TYPE (TREE_TYPE (array)),
index_sval);
}
break;
case MEM_REF:
{
tree ptr = TREE_OPERAND (expr, 0);
tree offset = TREE_OPERAND (expr, 1);
const svalue *ptr_sval = get_rvalue (ptr, ctxt);
const svalue *offset_sval = get_rvalue (offset, ctxt);
const region *star_ptr = deref_rvalue (ptr_sval, ptr, ctxt);
return m_mgr->get_offset_region (star_ptr,
TREE_TYPE (expr),
offset_sval);
}
break;
case FUNCTION_DECL:
return m_mgr->get_region_for_fndecl (expr);
case LABEL_DECL:
return m_mgr->get_region_for_label (expr);
case VAR_DECL:
/* Handle globals. */
if (is_global_var (expr))
return m_mgr->get_region_for_global (expr);
/* Fall through. */
case SSA_NAME:
case PARM_DECL:
case RESULT_DECL:
{
gcc_assert (TREE_CODE (expr) == SSA_NAME
|| TREE_CODE (expr) == PARM_DECL
|| TREE_CODE (expr) == VAR_DECL
|| TREE_CODE (expr) == RESULT_DECL);
int stack_index = pv.m_stack_depth;
const frame_region *frame = get_frame_at_index (stack_index);
gcc_assert (frame);
return frame->get_region_for_local (m_mgr, expr);
}
case COMPONENT_REF:
{
/* obj.field */
tree obj = TREE_OPERAND (expr, 0);
tree field = TREE_OPERAND (expr, 1);
const region *obj_reg = get_lvalue (obj, ctxt);
return m_mgr->get_field_region (obj_reg, field);
}
break;
case STRING_CST:
return m_mgr->get_region_for_string (expr);
}
}
/* Assert that SRC_TYPE can be converted to DST_TYPE as a no-op. */
static void
assert_compat_types (tree src_type, tree dst_type)
{
if (src_type && dst_type && !VOID_TYPE_P (dst_type))
{
#if CHECKING_P
if (!(useless_type_conversion_p (src_type, dst_type)))
internal_error ("incompatible types: %qT and %qT", src_type, dst_type);
#endif
}
}
/* Return true if SRC_TYPE can be converted to DST_TYPE as a no-op. */
bool
compat_types_p (tree src_type, tree dst_type)
{
if (src_type && dst_type && !VOID_TYPE_P (dst_type))
if (!(useless_type_conversion_p (src_type, dst_type)))
return false;
return true;
}
/* Get the region for PV within this region_model,
emitting any diagnostics to CTXT. */
const region *
region_model::get_lvalue (path_var pv, region_model_context *ctxt) const
{
if (pv.m_tree == NULL_TREE)
return NULL;
const region *result_reg = get_lvalue_1 (pv, ctxt);
assert_compat_types (result_reg->get_type (), TREE_TYPE (pv.m_tree));
return result_reg;
}
/* Get the region for EXPR within this region_model (assuming the most
recent stack frame if it's a local). */
const region *
region_model::get_lvalue (tree expr, region_model_context *ctxt) const
{
return get_lvalue (path_var (expr, get_stack_depth () - 1), ctxt);
}
/* Implementation of region_model::get_rvalue; the latter adds type-checking.
Get the value of PV within this region_model,
emitting any diagnostics to CTXT. */
const svalue *
region_model::get_rvalue_1 (path_var pv, region_model_context *ctxt) const
{
gcc_assert (pv.m_tree);
switch (TREE_CODE (pv.m_tree))
{
default:
return m_mgr->get_or_create_unknown_svalue (TREE_TYPE (pv.m_tree));
case ADDR_EXPR:
{
/* "&EXPR". */
tree expr = pv.m_tree;
tree op0 = TREE_OPERAND (expr, 0);
const region *expr_reg = get_lvalue (op0, ctxt);
return m_mgr->get_ptr_svalue (TREE_TYPE (expr), expr_reg);
}
break;
case BIT_FIELD_REF:
{
tree expr = pv.m_tree;
tree op0 = TREE_OPERAND (expr, 0);
const region *reg = get_lvalue (op0, ctxt);
tree num_bits = TREE_OPERAND (expr, 1);
tree first_bit_offset = TREE_OPERAND (expr, 2);
gcc_assert (TREE_CODE (num_bits) == INTEGER_CST);
gcc_assert (TREE_CODE (first_bit_offset) == INTEGER_CST);
bit_range bits (TREE_INT_CST_LOW (first_bit_offset),
TREE_INT_CST_LOW (num_bits));
return get_rvalue_for_bits (TREE_TYPE (expr), reg, bits, ctxt);
}
case SSA_NAME:
case VAR_DECL:
case PARM_DECL:
case RESULT_DECL:
case ARRAY_REF:
{
const region *reg = get_lvalue (pv, ctxt);
return get_store_value (reg, ctxt);
}
case REALPART_EXPR:
case IMAGPART_EXPR:
case VIEW_CONVERT_EXPR:
{
tree expr = pv.m_tree;
tree arg = TREE_OPERAND (expr, 0);
const svalue *arg_sval = get_rvalue (arg, ctxt);
const svalue *sval_unaryop
= m_mgr->get_or_create_unaryop (TREE_TYPE (expr), TREE_CODE (expr),
arg_sval);
return sval_unaryop;
};
case INTEGER_CST:
case REAL_CST:
case COMPLEX_CST:
case VECTOR_CST:
case STRING_CST:
return m_mgr->get_or_create_constant_svalue (pv.m_tree);
case POINTER_PLUS_EXPR:
{
tree expr = pv.m_tree;
tree ptr = TREE_OPERAND (expr, 0);
tree offset = TREE_OPERAND (expr, 1);
const svalue *ptr_sval = get_rvalue (ptr, ctxt);
const svalue *offset_sval = get_rvalue (offset, ctxt);
const svalue *sval_binop
= m_mgr->get_or_create_binop (TREE_TYPE (expr), POINTER_PLUS_EXPR,
ptr_sval, offset_sval);
return sval_binop;
}
/* Binary ops. */
case PLUS_EXPR:
case MULT_EXPR:
{
tree expr = pv.m_tree;
tree arg0 = TREE_OPERAND (expr, 0);
tree arg1 = TREE_OPERAND (expr, 1);
const svalue *arg0_sval = get_rvalue (arg0, ctxt);
const svalue *arg1_sval = get_rvalue (arg1, ctxt);
const svalue *sval_binop
= m_mgr->get_or_create_binop (TREE_TYPE (expr), TREE_CODE (expr),
arg0_sval, arg1_sval);
return sval_binop;
}
case COMPONENT_REF:
case MEM_REF:
{
const region *ref_reg = get_lvalue (pv, ctxt);
return get_store_value (ref_reg, ctxt);
}
case OBJ_TYPE_REF:
{
tree expr = OBJ_TYPE_REF_EXPR (pv.m_tree);
return get_rvalue (expr, ctxt);
}
}
}
/* Get the value of PV within this region_model,
emitting any diagnostics to CTXT. */
const svalue *
region_model::get_rvalue (path_var pv, region_model_context *ctxt) const
{
if (pv.m_tree == NULL_TREE)
return NULL;
const svalue *result_sval = get_rvalue_1 (pv, ctxt);
assert_compat_types (result_sval->get_type (), TREE_TYPE (pv.m_tree));
result_sval = check_for_poison (result_sval, pv.m_tree, ctxt);
return result_sval;
}
/* Get the value of EXPR within this region_model (assuming the most
recent stack frame if it's a local). */
const svalue *
region_model::get_rvalue (tree expr, region_model_context *ctxt) const
{
return get_rvalue (path_var (expr, get_stack_depth () - 1), ctxt);
}
/* Return true if this model is on a path with "main" as the entrypoint
(as opposed to one in which we're merely analyzing a subset of the
path through the code). */
bool
region_model::called_from_main_p () const
{
if (!m_current_frame)
return false;
/* Determine if the oldest stack frame in this model is for "main". */
const frame_region *frame0 = get_frame_at_index (0);
gcc_assert (frame0);
return id_equal (DECL_NAME (frame0->get_function ()->decl), "main");
}
/* Subroutine of region_model::get_store_value for when REG is (or is within)
a global variable that hasn't been touched since the start of this path
(or was implicitly touched due to a call to an unknown function). */
const svalue *
region_model::get_initial_value_for_global (const region *reg) const
{
/* Get the decl that REG is for (or is within). */
const decl_region *base_reg
= reg->get_base_region ()->dyn_cast_decl_region ();
gcc_assert (base_reg);
tree decl = base_reg->get_decl ();
/* Special-case: to avoid having to explicitly update all previously
untracked globals when calling an unknown fn, they implicitly have
an unknown value if an unknown call has occurred, unless this is
static to-this-TU and hasn't escaped. Globals that have escaped
are explicitly tracked, so we shouldn't hit this case for them. */
if (m_store.called_unknown_fn_p ()
&& TREE_PUBLIC (decl)
&& !TREE_READONLY (decl))
return m_mgr->get_or_create_unknown_svalue (reg->get_type ());
/* If we are on a path from the entrypoint from "main" and we have a
global decl defined in this TU that hasn't been touched yet, then
the initial value of REG can be taken from the initialization value
of the decl. */
if (called_from_main_p () || TREE_READONLY (decl))
{
/* Attempt to get the initializer value for base_reg. */
if (const svalue *base_reg_init
= base_reg->get_svalue_for_initializer (m_mgr))
{
if (reg == base_reg)
return base_reg_init;
else
{
/* Get the value for REG within base_reg_init. */
binding_cluster c (base_reg);
c.bind (m_mgr->get_store_manager (), base_reg, base_reg_init);
const svalue *sval
= c.get_any_binding (m_mgr->get_store_manager (), reg);
if (sval)
{
if (reg->get_type ())
sval = m_mgr->get_or_create_cast (reg->get_type (),
sval);
return sval;
}
}
}
}
/* Otherwise, return INIT_VAL(REG). */
return m_mgr->get_or_create_initial_value (reg);
}
/* Get a value for REG, looking it up in the store, or otherwise falling
back to "initial" or "unknown" values.
Use CTXT to report any warnings associated with reading from REG. */
const svalue *
region_model::get_store_value (const region *reg,
region_model_context *ctxt) const
{
check_region_for_read (reg, ctxt);
/* Special-case: handle var_decls in the constant pool. */
if (const decl_region *decl_reg = reg->dyn_cast_decl_region ())
if (const svalue *sval = decl_reg->maybe_get_constant_value (m_mgr))
return sval;
const svalue *sval
= m_store.get_any_binding (m_mgr->get_store_manager (), reg);
if (sval)
{
if (reg->get_type ())
sval = m_mgr->get_or_create_cast (reg->get_type (), sval);
return sval;
}
/* Special-case: read at a constant index within a STRING_CST. */
if (const offset_region *offset_reg = reg->dyn_cast_offset_region ())
if (tree byte_offset_cst
= offset_reg->get_byte_offset ()->maybe_get_constant ())
if (const string_region *str_reg
= reg->get_parent_region ()->dyn_cast_string_region ())
{
tree string_cst = str_reg->get_string_cst ();
if (const svalue *char_sval
= m_mgr->maybe_get_char_from_string_cst (string_cst,
byte_offset_cst))
return m_mgr->get_or_create_cast (reg->get_type (), char_sval);
}
/* Special-case: read the initial char of a STRING_CST. */
if (const cast_region *cast_reg = reg->dyn_cast_cast_region ())
if (const string_region *str_reg
= cast_reg->get_original_region ()->dyn_cast_string_region ())
{
tree string_cst = str_reg->get_string_cst ();
tree byte_offset_cst = build_int_cst (integer_type_node, 0);
if (const svalue *char_sval
= m_mgr->maybe_get_char_from_string_cst (string_cst,
byte_offset_cst))
return m_mgr->get_or_create_cast (reg->get_type (), char_sval);
}
/* Otherwise we implicitly have the initial value of the region
(if the cluster had been touched, binding_cluster::get_any_binding,
would have returned UNKNOWN, and we would already have returned
that above). */
/* Handle globals. */
if (reg->get_base_region ()->get_parent_region ()->get_kind ()
== RK_GLOBALS)
return get_initial_value_for_global (reg);
return m_mgr->get_or_create_initial_value (reg);
}
/* Return false if REG does not exist, true if it may do.
This is for detecting regions within the stack that don't exist anymore
after frames are popped. */
bool
region_model::region_exists_p (const region *reg) const
{
/* If within a stack frame, check that the stack frame is live. */
if (const frame_region *enclosing_frame = reg->maybe_get_frame_region ())
{
/* Check that the current frame is the enclosing frame, or is called
by it. */
for (const frame_region *iter_frame = get_current_frame (); iter_frame;
iter_frame = iter_frame->get_calling_frame ())
if (iter_frame == enclosing_frame)
return true;
return false;
}
return true;
}
/* Get a region for referencing PTR_SVAL, creating a region if need be, and
potentially generating warnings via CTXT.
PTR_SVAL must be of pointer type.
PTR_TREE if non-NULL can be used when emitting diagnostics. */
const region *
region_model::deref_rvalue (const svalue *ptr_sval, tree ptr_tree,
region_model_context *ctxt) const
{
gcc_assert (ptr_sval);
gcc_assert (POINTER_TYPE_P (ptr_sval->get_type ()));
/* If we're dereferencing PTR_SVAL, assume that it is non-NULL; add this
as a constraint. This suppresses false positives from
-Wanalyzer-null-dereference for the case where we later have an
if (PTR_SVAL) that would occur if we considered the false branch
and transitioned the malloc state machine from start->null. */
tree null_ptr_cst = build_int_cst (ptr_sval->get_type (), 0);
const svalue *null_ptr = m_mgr->get_or_create_constant_svalue (null_ptr_cst);
m_constraints->add_constraint (ptr_sval, NE_EXPR, null_ptr);
switch (ptr_sval->get_kind ())
{
default:
break;
case SK_REGION:
{
const region_svalue *region_sval
= as_a <const region_svalue *> (ptr_sval);
return region_sval->get_pointee ();
}
case SK_BINOP:
{
const binop_svalue *binop_sval
= as_a <const binop_svalue *> (ptr_sval);
switch (binop_sval->get_op ())
{
case POINTER_PLUS_EXPR:
{
/* If we have a symbolic value expressing pointer arithmentic,
try to convert it to a suitable region. */
const region *parent_region
= deref_rvalue (binop_sval->get_arg0 (), NULL_TREE, ctxt);
const svalue *offset = binop_sval->get_arg1 ();
tree type= TREE_TYPE (ptr_sval->get_type ());
return m_mgr->get_offset_region (parent_region, type, offset);
}
default:
break;
}
}
break;
case SK_POISONED:
{
if (ctxt)
{
tree ptr = get_representative_tree (ptr_sval);
/* If we can't get a representative tree for PTR_SVAL
(e.g. if it hasn't been bound into the store), then
fall back on PTR_TREE, if non-NULL. */
if (!ptr)
ptr = ptr_tree;
if (ptr)
{
const poisoned_svalue *poisoned_sval
= as_a <const poisoned_svalue *> (ptr_sval);
enum poison_kind pkind = poisoned_sval->get_poison_kind ();
ctxt->warn (new poisoned_value_diagnostic (ptr, pkind));
}
}
}
break;
}
return m_mgr->get_symbolic_region (ptr_sval);
}
/* Attempt to get BITS within any value of REG, as TYPE.
In particular, extract values from compound_svalues for the case
where there's a concrete binding at BITS.
Return an unknown svalue if we can't handle the given case.
Use CTXT to report any warnings associated with reading from REG. */
const svalue *
region_model::get_rvalue_for_bits (tree type,
const region *reg,
const bit_range &bits,
region_model_context *ctxt) const
{
const svalue *sval = get_store_value (reg, ctxt);
return m_mgr->get_or_create_bits_within (type, bits, sval);
}
/* A subclass of pending_diagnostic for complaining about writes to
constant regions of memory. */
class write_to_const_diagnostic
: public pending_diagnostic_subclass<write_to_const_diagnostic>
{
public:
write_to_const_diagnostic (const region *reg, tree decl)
: m_reg (reg), m_decl (decl)
{}
const char *get_kind () const FINAL OVERRIDE
{
return "write_to_const_diagnostic";
}
bool operator== (const write_to_const_diagnostic &other) const
{
return (m_reg == other.m_reg
&& m_decl == other.m_decl);
}
bool emit (rich_location *rich_loc) FINAL OVERRIDE
{
bool warned = warning_at (rich_loc, OPT_Wanalyzer_write_to_const,
"write to %<const%> object %qE", m_decl);
if (warned)
inform (DECL_SOURCE_LOCATION (m_decl), "declared here");
return warned;
}
label_text describe_final_event (const evdesc::final_event &ev) FINAL OVERRIDE
{
return ev.formatted_print ("write to %<const%> object %qE here", m_decl);
}
private:
const region *m_reg;
tree m_decl;
};
/* A subclass of pending_diagnostic for complaining about writes to
string literals. */
class write_to_string_literal_diagnostic
: public pending_diagnostic_subclass<write_to_string_literal_diagnostic>
{
public:
write_to_string_literal_diagnostic (const region *reg)
: m_reg (reg)
{}
const char *get_kind () const FINAL OVERRIDE
{
return "write_to_string_literal_diagnostic";
}
bool operator== (const write_to_string_literal_diagnostic &other) const
{
return m_reg == other.m_reg;
}
bool emit (rich_location *rich_loc) FINAL OVERRIDE
{
return warning_at (rich_loc, OPT_Wanalyzer_write_to_string_literal,
"write to string literal");
/* Ideally we would show the location of the STRING_CST as well,
but it is not available at this point. */
}
label_text describe_final_event (const evdesc::final_event &ev) FINAL OVERRIDE
{
return ev.formatted_print ("write to string literal here");
}
private:
const region *m_reg;
};
/* Use CTXT to warn If DEST_REG is a region that shouldn't be written to. */
void
region_model::check_for_writable_region (const region* dest_reg,
region_model_context *ctxt) const
{
/* Fail gracefully if CTXT is NULL. */
if (!ctxt)
return;
const region *base_reg = dest_reg->get_base_region ();
switch (base_reg->get_kind ())
{
default:
break;
case RK_DECL:
{
const decl_region *decl_reg = as_a <const decl_region *> (base_reg);
tree decl = decl_reg->get_decl ();
/* Warn about writes to const globals.
Don't warn for writes to const locals, and params in particular,
since we would warn in push_frame when setting them up (e.g the
"this" param is "T* const"). */
if (TREE_READONLY (decl)
&& is_global_var (decl))
ctxt->warn (new write_to_const_diagnostic (dest_reg, decl));
}
break;
case RK_STRING:
ctxt->warn (new write_to_string_literal_diagnostic (dest_reg));
break;
}
}
/* Get the capacity of REG in bytes. */
const svalue *
region_model::get_capacity (const region *reg) const
{
switch (reg->get_kind ())
{
default:
break;
case RK_DECL:
{
const decl_region *decl_reg = as_a <const decl_region *> (reg);
tree decl = decl_reg->get_decl ();
if (TREE_CODE (decl) == SSA_NAME)
{
tree type = TREE_TYPE (decl);
tree size = TYPE_SIZE (type);
return get_rvalue (size, NULL);
}
else
{
tree size = decl_init_size (decl, false);
if (size)
return get_rvalue (size, NULL);
}
}
break;
case RK_SIZED:
/* Look through sized regions to get at the capacity
of the underlying regions. */
return get_capacity (reg->get_parent_region ());
}
if (const svalue *recorded = get_dynamic_extents (reg))
return recorded;
return m_mgr->get_or_create_unknown_svalue (sizetype);
}
/* If CTXT is non-NULL, use it to warn about any problems accessing REG,
using DIR to determine if this access is a read or write. */
void
region_model::check_region_access (const region *reg,
enum access_direction dir,
region_model_context *ctxt) const
{
/* Fail gracefully if CTXT is NULL. */
if (!ctxt)
return;
switch (dir)
{
default:
gcc_unreachable ();
case DIR_READ:
/* Currently a no-op. */
break;
case DIR_WRITE:
check_for_writable_region (reg, ctxt);
break;
}
}
/* If CTXT is non-NULL, use it to warn about any problems writing to REG. */
void
region_model::check_region_for_write (const region *dest_reg,
region_model_context *ctxt) const
{
check_region_access (dest_reg, DIR_WRITE, ctxt);
}
/* If CTXT is non-NULL, use it to warn about any problems reading from REG. */
void
region_model::check_region_for_read (const region *src_reg,
region_model_context *ctxt) const
{
check_region_access (src_reg, DIR_READ, ctxt);
}
/* Set the value of the region given by LHS_REG to the value given
by RHS_SVAL.
Use CTXT to report any warnings associated with writing to LHS_REG. */
void
region_model::set_value (const region *lhs_reg, const svalue *rhs_sval,
region_model_context *ctxt)
{
gcc_assert (lhs_reg);
gcc_assert (rhs_sval);
check_region_for_write (lhs_reg, ctxt);
m_store.set_value (m_mgr->get_store_manager(), lhs_reg, rhs_sval,
ctxt ? ctxt->get_uncertainty () : NULL);
}
/* Set the value of the region given by LHS to the value given by RHS. */
void
region_model::set_value (tree lhs, tree rhs, region_model_context *ctxt)
{
const region *lhs_reg = get_lvalue (lhs, ctxt);
const svalue *rhs_sval = get_rvalue (rhs, ctxt);
gcc_assert (lhs_reg);
gcc_assert (rhs_sval);
set_value (lhs_reg, rhs_sval, ctxt);
}
/* Remove all bindings overlapping REG within the store. */
void
region_model::clobber_region (const region *reg)
{
m_store.clobber_region (m_mgr->get_store_manager(), reg);
}
/* Remove any bindings for REG within the store. */
void
region_model::purge_region (const region *reg)
{
m_store.purge_region (m_mgr->get_store_manager(), reg);
}
/* Fill REG with SVAL. */
void
region_model::fill_region (const region *reg, const svalue *sval)
{
m_store.fill_region (m_mgr->get_store_manager(), reg, sval);
}
/* Zero-fill REG. */
void
region_model::zero_fill_region (const region *reg)
{
m_store.zero_fill_region (m_mgr->get_store_manager(), reg);
}
/* Mark REG as having unknown content. */
void
region_model::mark_region_as_unknown (const region *reg,
uncertainty_t *uncertainty)
{
m_store.mark_region_as_unknown (m_mgr->get_store_manager(), reg,
uncertainty);
}
/* Determine what is known about the condition "LHS_SVAL OP RHS_SVAL" within
this model. */
tristate
region_model::eval_condition (const svalue *lhs,
enum tree_code op,
const svalue *rhs) const
{
/* For now, make no attempt to capture constraints on floating-point
values. */
if ((lhs->get_type () && FLOAT_TYPE_P (lhs->get_type ()))
|| (rhs->get_type () && FLOAT_TYPE_P (rhs->get_type ())))
return tristate::unknown ();
tristate ts = eval_condition_without_cm (lhs, op, rhs);
if (ts.is_known ())
return ts;
/* Otherwise, try constraints. */
return m_constraints->eval_condition (lhs, op, rhs);
}
/* Determine what is known about the condition "LHS_SVAL OP RHS_SVAL" within
this model, without resorting to the constraint_manager.
This is exposed so that impl_region_model_context::on_state_leak can
check for equality part-way through region_model::purge_unused_svalues
without risking creating new ECs. */
tristate
region_model::eval_condition_without_cm (const svalue *lhs,
enum tree_code op,
const svalue *rhs) const
{
gcc_assert (lhs);
gcc_assert (rhs);
/* See what we know based on the values. */
/* For now, make no attempt to capture constraints on floating-point
values. */
if ((lhs->get_type () && FLOAT_TYPE_P (lhs->get_type ()))
|| (rhs->get_type () && FLOAT_TYPE_P (rhs->get_type ())))
return tristate::unknown ();
/* Unwrap any unmergeable values. */
lhs = lhs->unwrap_any_unmergeable ();
rhs = rhs->unwrap_any_unmergeable ();
if (lhs == rhs)
{
/* If we have the same svalue, then we have equality
(apart from NaN-handling).
TODO: should this definitely be the case for poisoned values? */
/* Poisoned and unknown values are "unknowable". */
if (lhs->get_kind () == SK_POISONED
|| lhs->get_kind () == SK_UNKNOWN)
return tristate::TS_UNKNOWN;
switch (op)
{
case EQ_EXPR:
case GE_EXPR:
case LE_EXPR:
return tristate::TS_TRUE;
case NE_EXPR:
case GT_EXPR:
case LT_EXPR:
return tristate::TS_FALSE;
default:
/* For other ops, use the logic below. */
break;
}
}
/* If we have a pair of region_svalues, compare them. */
if (const region_svalue *lhs_ptr = lhs->dyn_cast_region_svalue ())
if (const region_svalue *rhs_ptr = rhs->dyn_cast_region_svalue ())
{
tristate res = region_svalue::eval_condition (lhs_ptr, op, rhs_ptr);
if (res.is_known ())
return res;
/* Otherwise, only known through constraints. */
}
/* If we have a pair of constants, compare them. */
if (const constant_svalue *cst_lhs = lhs->dyn_cast_constant_svalue ())
if (const constant_svalue *cst_rhs = rhs->dyn_cast_constant_svalue ())
return constant_svalue::eval_condition (cst_lhs, op, cst_rhs);
/* Handle comparison against zero. */
if (const constant_svalue *cst_rhs = rhs->dyn_cast_constant_svalue ())
if (zerop (cst_rhs->get_constant ()))
{
if (const region_svalue *ptr = lhs->dyn_cast_region_svalue ())
{
/* A region_svalue is a non-NULL pointer, except in certain
special cases (see the comment for region::non_null_p). */
const region *pointee = ptr->get_pointee ();
if (pointee->non_null_p ())
{
switch (op)
{
default:
gcc_unreachable ();
case EQ_EXPR:
case GE_EXPR:
case LE_EXPR:
return tristate::TS_FALSE;
case NE_EXPR:
case GT_EXPR:
case LT_EXPR:
return tristate::TS_TRUE;
}
}
}
else if (const binop_svalue *binop = lhs->dyn_cast_binop_svalue ())
{
/* Treat offsets from a non-NULL pointer as being non-NULL. This
isn't strictly true, in that eventually ptr++ will wrap
around and be NULL, but it won't occur in practise and thus
can be used to suppress effectively false positives that we
shouldn't warn for. */
if (binop->get_op () == POINTER_PLUS_EXPR)
{
tristate lhs_ts
= eval_condition_without_cm (binop->get_arg0 (),
op, rhs);
if (lhs_ts.is_known ())
return lhs_ts;
}
}
}
/* Handle rejection of equality for comparisons of the initial values of
"external" values (such as params) with the address of locals. */
if (const initial_svalue *init_lhs = lhs->dyn_cast_initial_svalue ())
if (const region_svalue *rhs_ptr = rhs->dyn_cast_region_svalue ())
{
tristate res = compare_initial_and_pointer (init_lhs, rhs_ptr);
if (res.is_known ())
return res;
}
if (const initial_svalue *init_rhs = rhs->dyn_cast_initial_svalue ())
if (const region_svalue *lhs_ptr = lhs->dyn_cast_region_svalue ())
{
tristate res = compare_initial_and_pointer (init_rhs, lhs_ptr);
if (res.is_known ())
return res;
}
if (const widening_svalue *widen_lhs = lhs->dyn_cast_widening_svalue ())
if (tree rhs_cst = rhs->maybe_get_constant ())
{
tristate res = widen_lhs->eval_condition_without_cm (op, rhs_cst);
if (res.is_known ())
return res;
}
return tristate::TS_UNKNOWN;
}
/* Subroutine of region_model::eval_condition_without_cm, for rejecting
equality of INIT_VAL(PARM) with &LOCAL. */
tristate
region_model::compare_initial_and_pointer (const initial_svalue *init,
const region_svalue *ptr) const
{
const region *pointee = ptr->get_pointee ();
/* If we have a pointer to something within a stack frame, it can't be the
initial value of a param. */
if (pointee->maybe_get_frame_region ())
if (init->initial_value_of_param_p ())
return tristate::TS_FALSE;
return tristate::TS_UNKNOWN;
}
/* Handle various constraints of the form:
LHS: ((bool)INNER_LHS INNER_OP INNER_RHS))
OP : == or !=
RHS: zero
and (with a cast):
LHS: CAST([long]int, ((bool)INNER_LHS INNER_OP INNER_RHS))
OP : == or !=
RHS: zero
by adding constraints for INNER_LHS INNEROP INNER_RHS.
Return true if this function can fully handle the constraint; if
so, add the implied constraint(s) and write true to *OUT if they
are consistent with existing constraints, or write false to *OUT
if they contradicts existing constraints.
Return false for cases that this function doeesn't know how to handle.
For example, if we're checking a stored conditional, we'll have
something like:
LHS: CAST(long int, (&HEAP_ALLOCATED_REGION(8)!=(int *)0B))
OP : NE_EXPR
RHS: zero
which this function can turn into an add_constraint of:
(&HEAP_ALLOCATED_REGION(8) != (int *)0B)
Similarly, optimized && and || conditionals lead to e.g.
if (p && q)
becoming gimple like this:
_1 = p_6 == 0B;
_2 = q_8 == 0B
_3 = _1 | _2
On the "_3 is false" branch we can have constraints of the form:
((&HEAP_ALLOCATED_REGION(8)!=(int *)0B)
| (&HEAP_ALLOCATED_REGION(10)!=(int *)0B))
== 0
which implies that both _1 and _2 are false,
which this function can turn into a pair of add_constraints of
(&HEAP_ALLOCATED_REGION(8)!=(int *)0B)
and:
(&HEAP_ALLOCATED_REGION(10)!=(int *)0B). */
bool
region_model::add_constraints_from_binop (const svalue *outer_lhs,
enum tree_code outer_op,
const svalue *outer_rhs,
bool *out,
region_model_context *ctxt)
{
while (const svalue *cast = outer_lhs->maybe_undo_cast ())
outer_lhs = cast;
const binop_svalue *binop_sval = outer_lhs->dyn_cast_binop_svalue ();
if (!binop_sval)
return false;
if (!outer_rhs->all_zeroes_p ())
return false;
const svalue *inner_lhs = binop_sval->get_arg0 ();
enum tree_code inner_op = binop_sval->get_op ();
const svalue *inner_rhs = binop_sval->get_arg1 ();
if (outer_op != NE_EXPR && outer_op != EQ_EXPR)
return false;
/* We have either
- "OUTER_LHS != false" (i.e. OUTER is true), or
- "OUTER_LHS == false" (i.e. OUTER is false). */
bool is_true = outer_op == NE_EXPR;
switch (inner_op)
{
default:
return false;
case EQ_EXPR:
case NE_EXPR:
{
/* ...and "(inner_lhs OP inner_rhs) == 0"
then (inner_lhs OP inner_rhs) must have the same
logical value as LHS. */
if (!is_true)
inner_op = invert_tree_comparison (inner_op, false /* honor_nans */);
*out = add_constraint (inner_lhs, inner_op, inner_rhs, ctxt);
return true;
}
break;
case BIT_AND_EXPR:
if (is_true)
{
/* ...and "(inner_lhs & inner_rhs) != 0"
then both inner_lhs and inner_rhs must be true. */
const svalue *false_sval
= m_mgr->get_or_create_constant_svalue (boolean_false_node);
bool sat1 = add_constraint (inner_lhs, NE_EXPR, false_sval, ctxt);
bool sat2 = add_constraint (inner_rhs, NE_EXPR, false_sval, ctxt);
*out = sat1 && sat2;
return true;
}
return false;
case BIT_IOR_EXPR:
if (!is_true)
{
/* ...and "(inner_lhs | inner_rhs) == 0"
i.e. "(inner_lhs | inner_rhs)" is false
then both inner_lhs and inner_rhs must be false. */
const svalue *false_sval
= m_mgr->get_or_create_constant_svalue (boolean_false_node);
bool sat1 = add_constraint (inner_lhs, EQ_EXPR, false_sval, ctxt);
bool sat2 = add_constraint (inner_rhs, EQ_EXPR, false_sval, ctxt);
*out = sat1 && sat2;
return true;
}
return false;
}
}
/* Attempt to add the constraint "LHS OP RHS" to this region_model.
If it is consistent with existing constraints, add it, and return true.
Return false if it contradicts existing constraints.
Use CTXT for reporting any diagnostics associated with the accesses. */
bool
region_model::add_constraint (tree lhs, enum tree_code op, tree rhs,
region_model_context *ctxt)
{
/* For now, make no attempt to capture constraints on floating-point
values. */
if (FLOAT_TYPE_P (TREE_TYPE (lhs)) || FLOAT_TYPE_P (TREE_TYPE (rhs)))
return true;
const svalue *lhs_sval = get_rvalue (lhs, ctxt);
const svalue *rhs_sval = get_rvalue (rhs, ctxt);
return add_constraint (lhs_sval, op, rhs_sval, ctxt);
}
/* Attempt to add the constraint "LHS OP RHS" to this region_model.
If it is consistent with existing constraints, add it, and return true.
Return false if it contradicts existing constraints.
Use CTXT for reporting any diagnostics associated with the accesses. */
bool
region_model::add_constraint (const svalue *lhs,
enum tree_code op,
const svalue *rhs,
region_model_context *ctxt)
{
tristate t_cond = eval_condition (lhs, op, rhs);
/* If we already have the condition, do nothing. */
if (t_cond.is_true ())
return true;
/* Reject a constraint that would contradict existing knowledge, as
unsatisfiable. */
if (t_cond.is_false ())
return false;
bool out;
if (add_constraints_from_binop (lhs, op, rhs, &out, ctxt))
return out;
/* Store the constraint. */
m_constraints->add_constraint (lhs, op, rhs);
/* Notify the context, if any. This exists so that the state machines
in a program_state can be notified about the condition, and so can
set sm-state for e.g. unchecked->checked, both for cfg-edges, and
when synthesizing constraints as above. */
if (ctxt)
ctxt->on_condition (lhs, op, rhs);
/* If we have &REGION == NULL, then drop dynamic extents for REGION (for
the case where REGION is heap-allocated and thus could be NULL). */
if (tree rhs_cst = rhs->maybe_get_constant ())
if (op == EQ_EXPR && zerop (rhs_cst))
if (const region_svalue *region_sval = lhs->dyn_cast_region_svalue ())
unset_dynamic_extents (region_sval->get_pointee ());
return true;
}
/* As above, but when returning false, if OUT is non-NULL, write a
new rejected_constraint to *OUT. */
bool
region_model::add_constraint (tree lhs, enum tree_code op, tree rhs,
region_model_context *ctxt,
rejected_constraint **out)
{
bool sat = add_constraint (lhs, op, rhs, ctxt);
if (!sat && out)
*out = new rejected_op_constraint (*this, lhs, op, rhs);
return sat;
}
/* Determine what is known about the condition "LHS OP RHS" within
this model.
Use CTXT for reporting any diagnostics associated with the accesses. */
tristate
region_model::eval_condition (tree lhs,
enum tree_code op,
tree rhs,
region_model_context *ctxt)
{
/* For now, make no attempt to model constraints on floating-point
values. */
if (FLOAT_TYPE_P (TREE_TYPE (lhs)) || FLOAT_TYPE_P (TREE_TYPE (rhs)))
return tristate::unknown ();
return eval_condition (get_rvalue (lhs, ctxt), op, get_rvalue (rhs, ctxt));
}
/* Implementation of region_model::get_representative_path_var.
Attempt to return a path_var that represents SVAL, or return NULL_TREE.
Use VISITED to prevent infinite mutual recursion with the overload for
regions. */
path_var
region_model::get_representative_path_var_1 (const svalue *sval,
svalue_set *visited) const
{
gcc_assert (sval);
/* Prevent infinite recursion. */
if (visited->contains (sval))
return path_var (NULL_TREE, 0);
visited->add (sval);
/* Handle casts by recursion into get_representative_path_var. */
if (const svalue *cast_sval = sval->maybe_undo_cast ())
{
path_var result = get_representative_path_var (cast_sval, visited);
tree orig_type = sval->get_type ();
/* If necessary, wrap the result in a cast. */
if (result.m_tree && orig_type)
result.m_tree = build1 (NOP_EXPR, orig_type, result.m_tree);
return result;
}
auto_vec<path_var> pvs;
m_store.get_representative_path_vars (this, visited, sval, &pvs);
if (tree cst = sval->maybe_get_constant ())
pvs.safe_push (path_var (cst, 0));
/* Handle string literals and various other pointers. */
if (const region_svalue *ptr_sval = sval->dyn_cast_region_svalue ())
{
const region *reg = ptr_sval->get_pointee ();
if (path_var pv = get_representative_path_var (reg, visited))
return path_var (build1 (ADDR_EXPR,
sval->get_type (),
pv.m_tree),
pv.m_stack_depth);
}
/* If we have a sub_svalue, look for ways to represent the parent. */
if (const sub_svalue *sub_sval = sval->dyn_cast_sub_svalue ())
{
const svalue *parent_sval = sub_sval->get_parent ();
const region *subreg = sub_sval->get_subregion ();
if (path_var parent_pv
= get_representative_path_var (parent_sval, visited))
if (const field_region *field_reg = subreg->dyn_cast_field_region ())
return path_var (build3 (COMPONENT_REF,
sval->get_type (),
parent_pv.m_tree,
field_reg->get_field (),
NULL_TREE),
parent_pv.m_stack_depth);
}
if (pvs.length () < 1)
return path_var (NULL_TREE, 0);
pvs.qsort (readability_comparator);
return pvs[0];
}
/* Attempt to return a path_var that represents SVAL, or return NULL_TREE.
Use VISITED to prevent infinite mutual recursion with the overload for
regions
This function defers to get_representative_path_var_1 to do the work;
it adds verification that get_representative_path_var_1 returned a tree
of the correct type. */
path_var
region_model::get_representative_path_var (const svalue *sval,
svalue_set *visited) const
{
if (sval == NULL)
return path_var (NULL_TREE, 0);
tree orig_type = sval->get_type ();
path_var result = get_representative_path_var_1 (sval, visited);
/* Verify that the result has the same type as SVAL, if any. */
if (result.m_tree && orig_type)
gcc_assert (TREE_TYPE (result.m_tree) == orig_type);
return result;
}
/* Attempt to return a tree that represents SVAL, or return NULL_TREE.
Strip off any top-level cast, to avoid messages like
double-free of '(void *)ptr'
from analyzer diagnostics. */
tree
region_model::get_representative_tree (const svalue *sval) const
{
svalue_set visited;
tree expr = get_representative_path_var (sval, &visited).m_tree;
/* Strip off any top-level cast. */
if (expr && TREE_CODE (expr) == NOP_EXPR)
expr = TREE_OPERAND (expr, 0);
return fixup_tree_for_diagnostic (expr);
}
/* Implementation of region_model::get_representative_path_var.
Attempt to return a path_var that represents REG, or return
the NULL path_var.
For example, a region for a field of a local would be a path_var
wrapping a COMPONENT_REF.
Use VISITED to prevent infinite mutual recursion with the overload for
svalues. */
path_var
region_model::get_representative_path_var_1 (const region *reg,
svalue_set *visited) const
{
switch (reg->get_kind ())
{
default:
gcc_unreachable ();
case RK_FRAME:
case RK_GLOBALS:
case RK_CODE:
case RK_HEAP:
case RK_STACK:
case RK_ROOT:
/* Regions that represent memory spaces are not expressible as trees. */
return path_var (NULL_TREE, 0);
case RK_FUNCTION:
{
const function_region *function_reg
= as_a <const function_region *> (reg);
return path_var (function_reg->get_fndecl (), 0);
}
case RK_LABEL:
{
const label_region *label_reg = as_a <const label_region *> (reg);
return path_var (label_reg->get_label (), 0);
}
case RK_SYMBOLIC:
{
const symbolic_region *symbolic_reg
= as_a <const symbolic_region *> (reg);
const svalue *pointer = symbolic_reg->get_pointer ();
path_var pointer_pv = get_representative_path_var (pointer, visited);
if (!pointer_pv)
return path_var (NULL_TREE, 0);
tree offset = build_int_cst (pointer->get_type (), 0);
return path_var (build2 (MEM_REF,
reg->get_type (),
pointer_pv.m_tree,
offset),
pointer_pv.m_stack_depth);
}
case RK_DECL:
{
const decl_region *decl_reg = as_a <const decl_region *> (reg);
return path_var (decl_reg->get_decl (), decl_reg->get_stack_depth ());
}
case RK_FIELD:
{
const field_region *field_reg = as_a <const field_region *> (reg);
path_var parent_pv
= get_representative_path_var (reg->get_parent_region (), visited);
if (!parent_pv)
return path_var (NULL_TREE, 0);
return path_var (build3 (COMPONENT_REF,
reg->get_type (),
parent_pv.m_tree,
field_reg->get_field (),
NULL_TREE),
parent_pv.m_stack_depth);
}
case RK_ELEMENT:
{
const element_region *element_reg
= as_a <const element_region *> (reg);
path_var parent_pv
= get_representative_path_var (reg->get_parent_region (), visited);
if (!parent_pv)
return path_var (NULL_TREE, 0);
path_var index_pv
= get_representative_path_var (element_reg->get_index (), visited);
if (!index_pv)
return path_var (NULL_TREE, 0);
return path_var (build4 (ARRAY_REF,
reg->get_type (),
parent_pv.m_tree, index_pv.m_tree,
NULL_TREE, NULL_TREE),
parent_pv.m_stack_depth);
}
case RK_OFFSET:
{
const offset_region *offset_reg
= as_a <const offset_region *> (reg);
path_var parent_pv
= get_representative_path_var (reg->get_parent_region (), visited);
if (!parent_pv)
return path_var (NULL_TREE, 0);
path_var offset_pv
= get_representative_path_var (offset_reg->get_byte_offset (),
visited);
if (!offset_pv || TREE_CODE (offset_pv.m_tree) != INTEGER_CST)
return path_var (NULL_TREE, 0);
tree addr_parent = build1 (ADDR_EXPR,
build_pointer_type (reg->get_type ()),
parent_pv.m_tree);
return path_var (build2 (MEM_REF,
reg->get_type (),
addr_parent, offset_pv.m_tree),
parent_pv.m_stack_depth);
}
case RK_SIZED:
return path_var (NULL_TREE, 0);
case RK_CAST:
{
path_var parent_pv
= get_representative_path_var (reg->get_parent_region (), visited);
if (!parent_pv)
return path_var (NULL_TREE, 0);
return path_var (build1 (NOP_EXPR,
reg->get_type (),
parent_pv.m_tree),
parent_pv.m_stack_depth);
}
case RK_HEAP_ALLOCATED:
case RK_ALLOCA:
/* No good way to express heap-allocated/alloca regions as trees. */
return path_var (NULL_TREE, 0);
case RK_STRING:
{
const string_region *string_reg = as_a <const string_region *> (reg);
return path_var (string_reg->get_string_cst (), 0);
}
case RK_UNKNOWN:
return path_var (NULL_TREE, 0);
}
}
/* Attempt to return a path_var that represents REG, or return
the NULL path_var.
For example, a region for a field of a local would be a path_var
wrapping a COMPONENT_REF.
Use VISITED to prevent infinite mutual recursion with the overload for
svalues.
This function defers to get_representative_path_var_1 to do the work;
it adds verification that get_representative_path_var_1 returned a tree
of the correct type. */
path_var
region_model::get_representative_path_var (const region *reg,
svalue_set *visited) const
{
path_var result = get_representative_path_var_1 (reg, visited);
/* Verify that the result has the same type as REG, if any. */
if (result.m_tree && reg->get_type ())
gcc_assert (TREE_TYPE (result.m_tree) == reg->get_type ());
return result;
}
/* Update this model for any phis in SNODE, assuming we came from
LAST_CFG_SUPEREDGE. */
void
region_model::update_for_phis (const supernode *snode,
const cfg_superedge *last_cfg_superedge,
region_model_context *ctxt)
{
gcc_assert (last_cfg_superedge);
/* Copy this state and pass it to handle_phi so that all of the phi stmts
are effectively handled simultaneously. */
const region_model old_state (*this);
for (gphi_iterator gpi = const_cast<supernode *>(snode)->start_phis ();
!gsi_end_p (gpi); gsi_next (&gpi))
{
gphi *phi = gpi.phi ();
tree src = last_cfg_superedge->get_phi_arg (phi);
tree lhs = gimple_phi_result (phi);
/* Update next_state based on phi and old_state. */
handle_phi (phi, lhs, src, old_state, ctxt);
}
}
/* Attempt to update this model for taking EDGE (where the last statement
was LAST_STMT), returning true if the edge can be taken, false
otherwise.
When returning false, if OUT is non-NULL, write a new rejected_constraint
to it.
For CFG superedges where LAST_STMT is a conditional or a switch
statement, attempt to add the relevant conditions for EDGE to this
model, returning true if they are feasible, or false if they are
impossible.
For call superedges, push frame information and store arguments
into parameters.
For return superedges, pop frame information and store return
values into any lhs.
Rejection of call/return superedges happens elsewhere, in
program_point::on_edge (i.e. based on program point, rather
than program state). */
bool
region_model::maybe_update_for_edge (const superedge &edge,
const gimple *last_stmt,
region_model_context *ctxt,
rejected_constraint **out)
{
/* Handle frame updates for interprocedural edges. */
switch (edge.m_kind)
{
default:
break;
case SUPEREDGE_CALL:
{
const call_superedge *call_edge = as_a <const call_superedge *> (&edge);
update_for_call_superedge (*call_edge, ctxt);
}
break;
case SUPEREDGE_RETURN:
{
const return_superedge *return_edge
= as_a <const return_superedge *> (&edge);
update_for_return_superedge (*return_edge, ctxt);
}
break;
case SUPEREDGE_INTRAPROCEDURAL_CALL:
{
const callgraph_superedge *cg_sedge
= as_a <const callgraph_superedge *> (&edge);
update_for_call_summary (*cg_sedge, ctxt);
}
break;
}
if (last_stmt == NULL)
return true;
/* Apply any constraints for conditionals/switch statements. */
if (const gcond *cond_stmt = dyn_cast <const gcond *> (last_stmt))
{
const cfg_superedge *cfg_sedge = as_a <const cfg_superedge *> (&edge);
return apply_constraints_for_gcond (*cfg_sedge, cond_stmt, ctxt, out);
}
if (const gswitch *switch_stmt = dyn_cast <const gswitch *> (last_stmt))
{
const switch_cfg_superedge *switch_sedge
= as_a <const switch_cfg_superedge *> (&edge);
return apply_constraints_for_gswitch (*switch_sedge, switch_stmt,
ctxt, out);
}
/* Apply any constraints due to an exception being thrown. */
if (const cfg_superedge *cfg_sedge = dyn_cast <const cfg_superedge *> (&edge))
if (cfg_sedge->get_flags () & EDGE_EH)
return apply_constraints_for_exception (last_stmt, ctxt, out);
return true;
}
/* Push a new frame_region on to the stack region.
Populate the frame_region with child regions for the function call's
parameters, using values from the arguments at the callsite in the
caller's frame. */
void
region_model::update_for_gcall (const gcall *call_stmt,
region_model_context *ctxt,
function *callee)
{
/* Build a vec of argument svalues, using the current top
frame for resolving tree expressions. */
auto_vec<const svalue *> arg_svals (gimple_call_num_args (call_stmt));
for (unsigned i = 0; i < gimple_call_num_args (call_stmt); i++)
{
tree arg = gimple_call_arg (call_stmt, i);
arg_svals.quick_push (get_rvalue (arg, ctxt));
}
if(!callee)
{
/* Get the function * from the gcall. */
tree fn_decl = get_fndecl_for_call (call_stmt,ctxt);
callee = DECL_STRUCT_FUNCTION (fn_decl);
}
push_frame (callee, &arg_svals, ctxt);
}
/* Pop the top-most frame_region from the stack, and copy the return
region's values (if any) into the region for the lvalue of the LHS of
the call (if any). */
void
region_model::update_for_return_gcall (const gcall *call_stmt,
region_model_context *ctxt)
{
/* Get the region for the result of the call, within the caller frame. */
const region *result_dst_reg = NULL;
tree lhs = gimple_call_lhs (call_stmt);
if (lhs)
{
/* Normally we access the top-level frame, which is:
path_var (expr, get_stack_depth () - 1)
whereas here we need the caller frame, hence "- 2" here. */
gcc_assert (get_stack_depth () >= 2);
result_dst_reg = get_lvalue (path_var (lhs, get_stack_depth () - 2),
ctxt);
}
pop_frame (result_dst_reg, NULL, ctxt);
}
/* Extract calling information from the superedge and update the model for the
call */
void
region_model::update_for_call_superedge (const call_superedge &call_edge,
region_model_context *ctxt)
{
const gcall *call_stmt = call_edge.get_call_stmt ();
update_for_gcall (call_stmt, ctxt, call_edge.get_callee_function ());
}
/* Extract calling information from the return superedge and update the model
for the returning call */
void
region_model::update_for_return_superedge (const return_superedge &return_edge,
region_model_context *ctxt)
{
const gcall *call_stmt = return_edge.get_call_stmt ();
update_for_return_gcall (call_stmt,