blob: 4ca15450afe0344f9acf97ca73dcf837879650e3 [file] [log] [blame]
/* Target-struct-independent code to start (run) and stop an inferior
process.
Copyright (C) 1986-2024 Free Software Foundation, Inc.
This file is part of GDB.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>. */
#include "cli/cli-cmds.h"
#include "displaced-stepping.h"
#include "infrun.h"
#include <ctype.h>
#include "exceptions.h"
#include "symtab.h"
#include "frame.h"
#include "inferior.h"
#include "breakpoint.h"
#include "gdbcore.h"
#include "target.h"
#include "target-connection.h"
#include "gdbthread.h"
#include "annotate.h"
#include "symfile.h"
#include "top.h"
#include "ui.h"
#include "inf-loop.h"
#include "regcache.h"
#include "value.h"
#include "observable.h"
#include "language.h"
#include "solib.h"
#include "main.h"
#include "block.h"
#include "mi/mi-common.h"
#include "event-top.h"
#include "record.h"
#include "record-full.h"
#include "inline-frame.h"
#include "jit.h"
#include "tracepoint.h"
#include "skip.h"
#include "probe.h"
#include "objfiles.h"
#include "completer.h"
#include "target-descriptions.h"
#include "target-dcache.h"
#include "terminal.h"
#include "solist.h"
#include "gdbsupport/event-loop.h"
#include "thread-fsm.h"
#include "gdbsupport/enum-flags.h"
#include "progspace-and-thread.h"
#include <optional>
#include "arch-utils.h"
#include "gdbsupport/scope-exit.h"
#include "gdbsupport/forward-scope-exit.h"
#include "gdbsupport/gdb_select.h"
#include <unordered_map>
#include "async-event.h"
#include "gdbsupport/selftest.h"
#include "scoped-mock-context.h"
#include "test-target.h"
#include "gdbsupport/common-debug.h"
#include "gdbsupport/buildargv.h"
#include "extension.h"
#include "disasm.h"
#include "interps.h"
/* Prototypes for local functions */
static void sig_print_info (enum gdb_signal);
static void sig_print_header (void);
static void follow_inferior_reset_breakpoints (void);
static bool currently_stepping (struct thread_info *tp);
static void insert_hp_step_resume_breakpoint_at_frame (const frame_info_ptr &);
static void insert_step_resume_breakpoint_at_caller (const frame_info_ptr &);
static void insert_longjmp_resume_breakpoint (struct gdbarch *, CORE_ADDR);
static bool maybe_software_singlestep (struct gdbarch *gdbarch);
static void resume (gdb_signal sig);
static void wait_for_inferior (inferior *inf);
static void restart_threads (struct thread_info *event_thread,
inferior *inf = nullptr);
static bool start_step_over (void);
static bool step_over_info_valid_p (void);
static bool schedlock_applies (struct thread_info *tp);
/* Asynchronous signal handler registered as event loop source for
when we have pending events ready to be passed to the core. */
static struct async_event_handler *infrun_async_inferior_event_token;
/* Stores whether infrun_async was previously enabled or disabled.
Starts off as -1, indicating "never enabled/disabled". */
static int infrun_is_async = -1;
static CORE_ADDR update_line_range_start (CORE_ADDR pc,
struct execution_control_state *ecs);
/* See infrun.h. */
void
infrun_async (int enable)
{
if (infrun_is_async != enable)
{
infrun_is_async = enable;
infrun_debug_printf ("enable=%d", enable);
if (enable)
mark_async_event_handler (infrun_async_inferior_event_token);
else
clear_async_event_handler (infrun_async_inferior_event_token);
}
}
/* See infrun.h. */
void
mark_infrun_async_event_handler (void)
{
mark_async_event_handler (infrun_async_inferior_event_token);
}
/* When set, stop the 'step' command if we enter a function which has
no line number information. The normal behavior is that we step
over such function. */
bool step_stop_if_no_debug = false;
static void
show_step_stop_if_no_debug (struct ui_file *file, int from_tty,
struct cmd_list_element *c, const char *value)
{
gdb_printf (file, _("Mode of the step operation is %s.\n"), value);
}
/* proceed and normal_stop use this to notify the user when the
inferior stopped in a different thread than it had been running in.
It can also be used to find for which thread normal_stop last
reported a stop. */
static thread_info_ref previous_thread;
/* See infrun.h. */
void
update_previous_thread ()
{
if (inferior_ptid == null_ptid)
previous_thread = nullptr;
else
previous_thread = thread_info_ref::new_reference (inferior_thread ());
}
/* See infrun.h. */
thread_info *
get_previous_thread ()
{
return previous_thread.get ();
}
/* If set (default for legacy reasons), when following a fork, GDB
will detach from one of the fork branches, child or parent.
Exactly which branch is detached depends on 'set follow-fork-mode'
setting. */
static bool detach_fork = true;
bool debug_infrun = false;
static void
show_debug_infrun (struct ui_file *file, int from_tty,
struct cmd_list_element *c, const char *value)
{
gdb_printf (file, _("Inferior debugging is %s.\n"), value);
}
/* Support for disabling address space randomization. */
bool disable_randomization = true;
static void
show_disable_randomization (struct ui_file *file, int from_tty,
struct cmd_list_element *c, const char *value)
{
if (target_supports_disable_randomization ())
gdb_printf (file,
_("Disabling randomization of debuggee's "
"virtual address space is %s.\n"),
value);
else
gdb_puts (_("Disabling randomization of debuggee's "
"virtual address space is unsupported on\n"
"this platform.\n"), file);
}
static void
set_disable_randomization (const char *args, int from_tty,
struct cmd_list_element *c)
{
if (!target_supports_disable_randomization ())
error (_("Disabling randomization of debuggee's "
"virtual address space is unsupported on\n"
"this platform."));
}
/* User interface for non-stop mode. */
bool non_stop = false;
static bool non_stop_1 = false;
static void
set_non_stop (const char *args, int from_tty,
struct cmd_list_element *c)
{
if (target_has_execution ())
{
non_stop_1 = non_stop;
error (_("Cannot change this setting while the inferior is running."));
}
non_stop = non_stop_1;
}
static void
show_non_stop (struct ui_file *file, int from_tty,
struct cmd_list_element *c, const char *value)
{
gdb_printf (file,
_("Controlling the inferior in non-stop mode is %s.\n"),
value);
}
/* "Observer mode" is somewhat like a more extreme version of
non-stop, in which all GDB operations that might affect the
target's execution have been disabled. */
static bool observer_mode = false;
static bool observer_mode_1 = false;
static void
set_observer_mode (const char *args, int from_tty,
struct cmd_list_element *c)
{
if (target_has_execution ())
{
observer_mode_1 = observer_mode;
error (_("Cannot change this setting while the inferior is running."));
}
observer_mode = observer_mode_1;
may_write_registers = !observer_mode;
may_write_memory = !observer_mode;
may_insert_breakpoints = !observer_mode;
may_insert_tracepoints = !observer_mode;
/* We can insert fast tracepoints in or out of observer mode,
but enable them if we're going into this mode. */
if (observer_mode)
may_insert_fast_tracepoints = true;
may_stop = !observer_mode;
update_target_permissions ();
/* Going *into* observer mode we must force non-stop, then
going out we leave it that way. */
if (observer_mode)
{
pagination_enabled = false;
non_stop = non_stop_1 = true;
}
if (from_tty)
gdb_printf (_("Observer mode is now %s.\n"),
(observer_mode ? "on" : "off"));
}
static void
show_observer_mode (struct ui_file *file, int from_tty,
struct cmd_list_element *c, const char *value)
{
gdb_printf (file, _("Observer mode is %s.\n"), value);
}
/* This updates the value of observer mode based on changes in
permissions. Note that we are deliberately ignoring the values of
may-write-registers and may-write-memory, since the user may have
reason to enable these during a session, for instance to turn on a
debugging-related global. */
void
update_observer_mode (void)
{
bool newval = (!may_insert_breakpoints
&& !may_insert_tracepoints
&& may_insert_fast_tracepoints
&& !may_stop
&& non_stop);
/* Let the user know if things change. */
if (newval != observer_mode)
gdb_printf (_("Observer mode is now %s.\n"),
(newval ? "on" : "off"));
observer_mode = observer_mode_1 = newval;
}
/* Tables of how to react to signals; the user sets them. */
static unsigned char signal_stop[GDB_SIGNAL_LAST];
static unsigned char signal_print[GDB_SIGNAL_LAST];
static unsigned char signal_program[GDB_SIGNAL_LAST];
/* Table of signals that are registered with "catch signal". A
non-zero entry indicates that the signal is caught by some "catch
signal" command. */
static unsigned char signal_catch[GDB_SIGNAL_LAST];
/* Table of signals that the target may silently handle.
This is automatically determined from the flags above,
and simply cached here. */
static unsigned char signal_pass[GDB_SIGNAL_LAST];
#define SET_SIGS(nsigs,sigs,flags) \
do { \
int signum = (nsigs); \
while (signum-- > 0) \
if ((sigs)[signum]) \
(flags)[signum] = 1; \
} while (0)
#define UNSET_SIGS(nsigs,sigs,flags) \
do { \
int signum = (nsigs); \
while (signum-- > 0) \
if ((sigs)[signum]) \
(flags)[signum] = 0; \
} while (0)
/* Update the target's copy of SIGNAL_PROGRAM. The sole purpose of
this function is to avoid exporting `signal_program'. */
void
update_signals_program_target (void)
{
target_program_signals (signal_program);
}
/* Value to pass to target_resume() to cause all threads to resume. */
#define RESUME_ALL minus_one_ptid
/* Command list pointer for the "stop" placeholder. */
static struct cmd_list_element *stop_command;
/* Nonzero if we want to give control to the user when we're notified
of shared library events by the dynamic linker. */
int stop_on_solib_events;
/* Enable or disable optional shared library event breakpoints
as appropriate when the above flag is changed. */
static void
set_stop_on_solib_events (const char *args,
int from_tty, struct cmd_list_element *c)
{
update_solib_breakpoints ();
}
static void
show_stop_on_solib_events (struct ui_file *file, int from_tty,
struct cmd_list_element *c, const char *value)
{
gdb_printf (file, _("Stopping for shared library events is %s.\n"),
value);
}
/* True after stop if current stack frame should be printed. */
static bool stop_print_frame;
/* This is a cached copy of the target/ptid/waitstatus of the last
event returned by target_wait().
This information is returned by get_last_target_status(). */
static process_stratum_target *target_last_proc_target;
static ptid_t target_last_wait_ptid;
static struct target_waitstatus target_last_waitstatus;
void init_thread_stepping_state (struct thread_info *tss);
static const char follow_fork_mode_child[] = "child";
static const char follow_fork_mode_parent[] = "parent";
static const char *const follow_fork_mode_kind_names[] = {
follow_fork_mode_child,
follow_fork_mode_parent,
nullptr
};
static const char *follow_fork_mode_string = follow_fork_mode_parent;
static void
show_follow_fork_mode_string (struct ui_file *file, int from_tty,
struct cmd_list_element *c, const char *value)
{
gdb_printf (file,
_("Debugger response to a program "
"call of fork or vfork is \"%s\".\n"),
value);
}
/* Handle changes to the inferior list based on the type of fork,
which process is being followed, and whether the other process
should be detached. On entry inferior_ptid must be the ptid of
the fork parent. At return inferior_ptid is the ptid of the
followed inferior. */
static bool
follow_fork_inferior (bool follow_child, bool detach_fork)
{
INFRUN_SCOPED_DEBUG_ENTER_EXIT;
infrun_debug_printf ("follow_child = %d, detach_fork = %d",
follow_child, detach_fork);
target_waitkind fork_kind = inferior_thread ()->pending_follow.kind ();
gdb_assert (fork_kind == TARGET_WAITKIND_FORKED
|| fork_kind == TARGET_WAITKIND_VFORKED);
bool has_vforked = fork_kind == TARGET_WAITKIND_VFORKED;
ptid_t parent_ptid = inferior_ptid;
ptid_t child_ptid = inferior_thread ()->pending_follow.child_ptid ();
if (has_vforked
&& !non_stop /* Non-stop always resumes both branches. */
&& current_ui->prompt_state == PROMPT_BLOCKED
&& !(follow_child || detach_fork || sched_multi))
{
/* The parent stays blocked inside the vfork syscall until the
child execs or exits. If we don't let the child run, then
the parent stays blocked. If we're telling the parent to run
in the foreground, the user will not be able to ctrl-c to get
back the terminal, effectively hanging the debug session. */
gdb_printf (gdb_stderr, _("\
Can not resume the parent process over vfork in the foreground while\n\
holding the child stopped. Try \"set detach-on-fork\" or \
\"set schedule-multiple\".\n"));
return true;
}
inferior *parent_inf = current_inferior ();
inferior *child_inf = nullptr;
gdb_assert (parent_inf->thread_waiting_for_vfork_done == nullptr);
if (!follow_child)
{
/* Detach new forked process? */
if (detach_fork)
{
/* Before detaching from the child, remove all breakpoints
from it. If we forked, then this has already been taken
care of by infrun.c. If we vforked however, any
breakpoint inserted in the parent is visible in the
child, even those added while stopped in a vfork
catchpoint. This will remove the breakpoints from the
parent also, but they'll be reinserted below. */
if (has_vforked)
{
/* Keep breakpoints list in sync. */
remove_breakpoints_inf (current_inferior ());
}
if (print_inferior_events)
{
/* Ensure that we have a process ptid. */
ptid_t process_ptid = ptid_t (child_ptid.pid ());
target_terminal::ours_for_output ();
gdb_printf (_("[Detaching after %s from child %s]\n"),
has_vforked ? "vfork" : "fork",
target_pid_to_str (process_ptid).c_str ());
}
}
else
{
/* Add process to GDB's tables. */
child_inf = add_inferior (child_ptid.pid ());
child_inf->attach_flag = parent_inf->attach_flag;
copy_terminal_info (child_inf, parent_inf);
child_inf->set_arch (parent_inf->arch ());
child_inf->tdesc_info = parent_inf->tdesc_info;
child_inf->symfile_flags = SYMFILE_NO_READ;
/* If this is a vfork child, then the address-space is
shared with the parent. */
if (has_vforked)
{
child_inf->pspace = parent_inf->pspace;
child_inf->aspace = parent_inf->aspace;
exec_on_vfork (child_inf);
/* The parent will be frozen until the child is done
with the shared region. Keep track of the
parent. */
child_inf->vfork_parent = parent_inf;
child_inf->pending_detach = false;
parent_inf->vfork_child = child_inf;
parent_inf->pending_detach = false;
}
else
{
child_inf->pspace = new program_space (new_address_space ());
child_inf->aspace = child_inf->pspace->aspace;
child_inf->removable = true;
clone_program_space (child_inf->pspace, parent_inf->pspace);
}
}
if (has_vforked)
{
/* If we detached from the child, then we have to be careful
to not insert breakpoints in the parent until the child
is done with the shared memory region. However, if we're
staying attached to the child, then we can and should
insert breakpoints, so that we can debug it. A
subsequent child exec or exit is enough to know when does
the child stops using the parent's address space. */
parent_inf->thread_waiting_for_vfork_done
= detach_fork ? inferior_thread () : nullptr;
parent_inf->pspace->breakpoints_not_allowed = detach_fork;
infrun_debug_printf
("parent_inf->thread_waiting_for_vfork_done == %s",
(parent_inf->thread_waiting_for_vfork_done == nullptr
? "nullptr"
: (parent_inf->thread_waiting_for_vfork_done
->ptid.to_string ().c_str ())));
}
}
else
{
/* Follow the child. */
if (print_inferior_events)
{
std::string parent_pid = target_pid_to_str (parent_ptid);
std::string child_pid = target_pid_to_str (child_ptid);
target_terminal::ours_for_output ();
gdb_printf (_("[Attaching after %s %s to child %s]\n"),
parent_pid.c_str (),
has_vforked ? "vfork" : "fork",
child_pid.c_str ());
}
/* Add the new inferior first, so that the target_detach below
doesn't unpush the target. */
child_inf = add_inferior (child_ptid.pid ());
child_inf->attach_flag = parent_inf->attach_flag;
copy_terminal_info (child_inf, parent_inf);
child_inf->set_arch (parent_inf->arch ());
child_inf->tdesc_info = parent_inf->tdesc_info;
if (has_vforked)
{
/* If this is a vfork child, then the address-space is shared
with the parent. */
child_inf->aspace = parent_inf->aspace;
child_inf->pspace = parent_inf->pspace;
exec_on_vfork (child_inf);
}
else if (detach_fork)
{
/* We follow the child and detach from the parent: move the parent's
program space to the child. This simplifies some things, like
doing "next" over fork() and landing on the expected line in the
child (note, that is broken with "set detach-on-fork off").
Before assigning brand new spaces for the parent, remove
breakpoints from it: because the new pspace won't match
currently inserted locations, the normal detach procedure
wouldn't remove them, and we would leave them inserted when
detaching. */
remove_breakpoints_inf (parent_inf);
child_inf->aspace = parent_inf->aspace;
child_inf->pspace = parent_inf->pspace;
parent_inf->pspace = new program_space (new_address_space ());
parent_inf->aspace = parent_inf->pspace->aspace;
clone_program_space (parent_inf->pspace, child_inf->pspace);
/* The parent inferior is still the current one, so keep things
in sync. */
set_current_program_space (parent_inf->pspace);
}
else
{
child_inf->pspace = new program_space (new_address_space ());
child_inf->aspace = child_inf->pspace->aspace;
child_inf->removable = true;
child_inf->symfile_flags = SYMFILE_NO_READ;
clone_program_space (child_inf->pspace, parent_inf->pspace);
}
}
gdb_assert (current_inferior () == parent_inf);
/* If we are setting up an inferior for the child, target_follow_fork is
responsible for pushing the appropriate targets on the new inferior's
target stack and adding the initial thread (with ptid CHILD_PTID).
If we are not setting up an inferior for the child (because following
the parent and detach_fork is true), it is responsible for detaching
from CHILD_PTID. */
target_follow_fork (child_inf, child_ptid, fork_kind, follow_child,
detach_fork);
gdb::observers::inferior_forked.notify (parent_inf, child_inf, fork_kind);
/* target_follow_fork must leave the parent as the current inferior. If we
want to follow the child, we make it the current one below. */
gdb_assert (current_inferior () == parent_inf);
/* If there is a child inferior, target_follow_fork must have created a thread
for it. */
if (child_inf != nullptr)
gdb_assert (!child_inf->thread_list.empty ());
/* Clear the parent thread's pending follow field. Do this before calling
target_detach, so that the target can differentiate the two following
cases:
- We continue past a fork with "follow-fork-mode == child" &&
"detach-on-fork on", and therefore detach the parent. In that
case the target should not detach the fork child.
- We run to a fork catchpoint and the user types "detach". In that
case, the target should detach the fork child in addition to the
parent.
The former case will have pending_follow cleared, the later will have
pending_follow set. */
thread_info *parent_thread = parent_inf->find_thread (parent_ptid);
gdb_assert (parent_thread != nullptr);
parent_thread->pending_follow.set_spurious ();
/* Detach the parent if needed. */
if (follow_child)
{
/* If we're vforking, we want to hold on to the parent until
the child exits or execs. At child exec or exit time we
can remove the old breakpoints from the parent and detach
or resume debugging it. Otherwise, detach the parent now;
we'll want to reuse it's program/address spaces, but we
can't set them to the child before removing breakpoints
from the parent, otherwise, the breakpoints module could
decide to remove breakpoints from the wrong process (since
they'd be assigned to the same address space). */
if (has_vforked)
{
gdb_assert (child_inf->vfork_parent == nullptr);
gdb_assert (parent_inf->vfork_child == nullptr);
child_inf->vfork_parent = parent_inf;
child_inf->pending_detach = false;
parent_inf->vfork_child = child_inf;
parent_inf->pending_detach = detach_fork;
}
else if (detach_fork)
{
if (print_inferior_events)
{
/* Ensure that we have a process ptid. */
ptid_t process_ptid = ptid_t (parent_ptid.pid ());
target_terminal::ours_for_output ();
gdb_printf (_("[Detaching after fork from "
"parent %s]\n"),
target_pid_to_str (process_ptid).c_str ());
}
target_detach (parent_inf, 0);
}
}
/* If we ended up creating a new inferior, call post_create_inferior to inform
the various subcomponents. */
if (child_inf != nullptr)
{
/* If FOLLOW_CHILD, we leave CHILD_INF as the current inferior
(do not restore the parent as the current inferior). */
std::optional<scoped_restore_current_thread> maybe_restore;
if (!follow_child && !sched_multi)
maybe_restore.emplace ();
switch_to_thread (*child_inf->threads ().begin ());
post_create_inferior (0);
}
return false;
}
/* Set the last target status as TP having stopped. */
static void
set_last_target_status_stopped (thread_info *tp)
{
set_last_target_status (tp->inf->process_target (), tp->ptid,
target_waitstatus {}.set_stopped (GDB_SIGNAL_0));
}
/* Tell the target to follow the fork we're stopped at. Returns true
if the inferior should be resumed; false, if the target for some
reason decided it's best not to resume. */
static bool
follow_fork ()
{
INFRUN_SCOPED_DEBUG_ENTER_EXIT;
bool follow_child = (follow_fork_mode_string == follow_fork_mode_child);
bool should_resume = true;
/* Copy user stepping state to the new inferior thread. FIXME: the
followed fork child thread should have a copy of most of the
parent thread structure's run control related fields, not just these.
Initialized to avoid "may be used uninitialized" warnings from gcc. */
struct breakpoint *step_resume_breakpoint = nullptr;
struct breakpoint *exception_resume_breakpoint = nullptr;
CORE_ADDR step_range_start = 0;
CORE_ADDR step_range_end = 0;
int current_line = 0;
symtab *current_symtab = nullptr;
struct frame_id step_frame_id = { 0 };
if (!non_stop)
{
thread_info *cur_thr = inferior_thread ();
ptid_t resume_ptid
= user_visible_resume_ptid (cur_thr->control.stepping_command);
process_stratum_target *resume_target
= user_visible_resume_target (resume_ptid);
/* Check if there's a thread that we're about to resume, other
than the current, with an unfollowed fork/vfork. If so,
switch back to it, to tell the target to follow it (in either
direction). We'll afterwards refuse to resume, and inform
the user what happened. */
for (thread_info *tp : all_non_exited_threads (resume_target,
resume_ptid))
{
if (tp == cur_thr)
continue;
/* follow_fork_inferior clears tp->pending_follow, and below
we'll need the value after the follow_fork_inferior
call. */
target_waitkind kind = tp->pending_follow.kind ();
if (kind != TARGET_WAITKIND_SPURIOUS)
{
infrun_debug_printf ("need to follow-fork [%s] first",
tp->ptid.to_string ().c_str ());
switch_to_thread (tp);
/* Set up inferior(s) as specified by the caller, and
tell the target to do whatever is necessary to follow
either parent or child. */
if (follow_child)
{
/* The thread that started the execution command
won't exist in the child. Abort the command and
immediately stop in this thread, in the child,
inside fork. */
should_resume = false;
}
else
{
/* Following the parent, so let the thread fork its
child freely, it won't influence the current
execution command. */
if (follow_fork_inferior (follow_child, detach_fork))
{
/* Target refused to follow, or there's some
other reason we shouldn't resume. */
switch_to_thread (cur_thr);
set_last_target_status_stopped (cur_thr);
return false;
}
/* If we're following a vfork, when we need to leave
the just-forked thread as selected, as we need to
solo-resume it to collect the VFORK_DONE event.
If we're following a fork, however, switch back
to the original thread that we continue stepping
it, etc. */
if (kind != TARGET_WAITKIND_VFORKED)
{
gdb_assert (kind == TARGET_WAITKIND_FORKED);
switch_to_thread (cur_thr);
}
}
break;
}
}
}
thread_info *tp = inferior_thread ();
/* If there were any forks/vforks that were caught and are now to be
followed, then do so now. */
switch (tp->pending_follow.kind ())
{
case TARGET_WAITKIND_FORKED:
case TARGET_WAITKIND_VFORKED:
{
ptid_t parent, child;
std::unique_ptr<struct thread_fsm> thread_fsm;
/* If the user did a next/step, etc, over a fork call,
preserve the stepping state in the fork child. */
if (follow_child && should_resume)
{
step_resume_breakpoint = clone_momentary_breakpoint
(tp->control.step_resume_breakpoint);
step_range_start = tp->control.step_range_start;
step_range_end = tp->control.step_range_end;
current_line = tp->current_line;
current_symtab = tp->current_symtab;
step_frame_id = tp->control.step_frame_id;
exception_resume_breakpoint
= clone_momentary_breakpoint (tp->control.exception_resume_breakpoint);
thread_fsm = tp->release_thread_fsm ();
/* For now, delete the parent's sr breakpoint, otherwise,
parent/child sr breakpoints are considered duplicates,
and the child version will not be installed. Remove
this when the breakpoints module becomes aware of
inferiors and address spaces. */
delete_step_resume_breakpoint (tp);
tp->control.step_range_start = 0;
tp->control.step_range_end = 0;
tp->control.step_frame_id = null_frame_id;
delete_exception_resume_breakpoint (tp);
}
parent = inferior_ptid;
child = tp->pending_follow.child_ptid ();
/* If handling a vfork, stop all the inferior's threads, they will be
restarted when the vfork shared region is complete. */
if (tp->pending_follow.kind () == TARGET_WAITKIND_VFORKED
&& target_is_non_stop_p ())
stop_all_threads ("handling vfork", tp->inf);
process_stratum_target *parent_targ = tp->inf->process_target ();
/* Set up inferior(s) as specified by the caller, and tell the
target to do whatever is necessary to follow either parent
or child. */
if (follow_fork_inferior (follow_child, detach_fork))
{
/* Target refused to follow, or there's some other reason
we shouldn't resume. */
should_resume = 0;
}
else
{
/* If we followed the child, switch to it... */
if (follow_child)
{
tp = parent_targ->find_thread (child);
switch_to_thread (tp);
/* ... and preserve the stepping state, in case the
user was stepping over the fork call. */
if (should_resume)
{
tp->control.step_resume_breakpoint
= step_resume_breakpoint;
tp->control.step_range_start = step_range_start;
tp->control.step_range_end = step_range_end;
tp->current_line = current_line;
tp->current_symtab = current_symtab;
tp->control.step_frame_id = step_frame_id;
tp->control.exception_resume_breakpoint
= exception_resume_breakpoint;
tp->set_thread_fsm (std::move (thread_fsm));
}
else
{
/* If we get here, it was because we're trying to
resume from a fork catchpoint, but, the user
has switched threads away from the thread that
forked. In that case, the resume command
issued is most likely not applicable to the
child, so just warn, and refuse to resume. */
warning (_("Not resuming: switched threads "
"before following fork child."));
}
/* Reset breakpoints in the child as appropriate. */
follow_inferior_reset_breakpoints ();
}
}
}
break;
case TARGET_WAITKIND_SPURIOUS:
/* Nothing to follow. */
break;
default:
internal_error ("Unexpected pending_follow.kind %d\n",
tp->pending_follow.kind ());
break;
}
if (!should_resume)
set_last_target_status_stopped (tp);
return should_resume;
}
static void
follow_inferior_reset_breakpoints (void)
{
struct thread_info *tp = inferior_thread ();
/* Was there a step_resume breakpoint? (There was if the user
did a "next" at the fork() call.) If so, explicitly reset its
thread number. Cloned step_resume breakpoints are disabled on
creation, so enable it here now that it is associated with the
correct thread.
step_resumes are a form of bp that are made to be per-thread.
Since we created the step_resume bp when the parent process
was being debugged, and now are switching to the child process,
from the breakpoint package's viewpoint, that's a switch of
"threads". We must update the bp's notion of which thread
it is for, or it'll be ignored when it triggers. */
if (tp->control.step_resume_breakpoint)
{
breakpoint_re_set_thread (tp->control.step_resume_breakpoint);
tp->control.step_resume_breakpoint->first_loc ().enabled = 1;
}
/* Treat exception_resume breakpoints like step_resume breakpoints. */
if (tp->control.exception_resume_breakpoint)
{
breakpoint_re_set_thread (tp->control.exception_resume_breakpoint);
tp->control.exception_resume_breakpoint->first_loc ().enabled = 1;
}
/* Reinsert all breakpoints in the child. The user may have set
breakpoints after catching the fork, in which case those
were never set in the child, but only in the parent. This makes
sure the inserted breakpoints match the breakpoint list. */
breakpoint_re_set ();
insert_breakpoints ();
}
/* The child has exited or execed: resume THREAD, a thread of the parent,
if it was meant to be executing. */
static void
proceed_after_vfork_done (thread_info *thread)
{
if (thread->state == THREAD_RUNNING
&& !thread->executing ()
&& !thread->stop_requested
&& thread->stop_signal () == GDB_SIGNAL_0)
{
infrun_debug_printf ("resuming vfork parent thread %s",
thread->ptid.to_string ().c_str ());
switch_to_thread (thread);
clear_proceed_status (0);
proceed ((CORE_ADDR) -1, GDB_SIGNAL_DEFAULT);
}
}
/* Called whenever we notice an exec or exit event, to handle
detaching or resuming a vfork parent. */
static void
handle_vfork_child_exec_or_exit (int exec)
{
INFRUN_SCOPED_DEBUG_ENTER_EXIT;
struct inferior *inf = current_inferior ();
if (inf->vfork_parent)
{
inferior *resume_parent = nullptr;
/* This exec or exit marks the end of the shared memory region
between the parent and the child. Break the bonds. */
inferior *vfork_parent = inf->vfork_parent;
inf->vfork_parent->vfork_child = nullptr;
inf->vfork_parent = nullptr;
/* If the user wanted to detach from the parent, now is the
time. */
if (vfork_parent->pending_detach)
{
struct program_space *pspace;
/* follow-fork child, detach-on-fork on. */
vfork_parent->pending_detach = false;
scoped_restore_current_pspace_and_thread restore_thread;
/* We're letting loose of the parent. */
thread_info *tp = any_live_thread_of_inferior (vfork_parent);
switch_to_thread (tp);
/* We're about to detach from the parent, which implicitly
removes breakpoints from its address space. There's a
catch here: we want to reuse the spaces for the child,
but, parent/child are still sharing the pspace at this
point, although the exec in reality makes the kernel give
the child a fresh set of new pages. The problem here is
that the breakpoints module being unaware of this, would
likely chose the child process to write to the parent
address space. Swapping the child temporarily away from
the spaces has the desired effect. Yes, this is "sort
of" a hack. */
pspace = inf->pspace;
inf->pspace = nullptr;
address_space_ref_ptr aspace = std::move (inf->aspace);
if (print_inferior_events)
{
std::string pidstr
= target_pid_to_str (ptid_t (vfork_parent->pid));
target_terminal::ours_for_output ();
if (exec)
{
gdb_printf (_("[Detaching vfork parent %s "
"after child exec]\n"), pidstr.c_str ());
}
else
{
gdb_printf (_("[Detaching vfork parent %s "
"after child exit]\n"), pidstr.c_str ());
}
}
target_detach (vfork_parent, 0);
/* Put it back. */
inf->pspace = pspace;
inf->aspace = aspace;
}
else if (exec)
{
/* We're staying attached to the parent, so, really give the
child a new address space. */
inf->pspace = new program_space (maybe_new_address_space ());
inf->aspace = inf->pspace->aspace;
inf->removable = true;
set_current_program_space (inf->pspace);
resume_parent = vfork_parent;
}
else
{
/* If this is a vfork child exiting, then the pspace and
aspaces were shared with the parent. Since we're
reporting the process exit, we'll be mourning all that is
found in the address space, and switching to null_ptid,
preparing to start a new inferior. But, since we don't
want to clobber the parent's address/program spaces, we
go ahead and create a new one for this exiting
inferior. */
scoped_restore_current_thread restore_thread;
/* Temporarily switch to the vfork parent, to facilitate ptrace
calls done during maybe_new_address_space. */
switch_to_thread (any_live_thread_of_inferior (vfork_parent));
address_space_ref_ptr aspace = maybe_new_address_space ();
/* Switch back to the vfork child inferior. Switch to no-thread
while running clone_program_space, so that clone_program_space
doesn't want to read the selected frame of a dead process. */
switch_to_inferior_no_thread (inf);
inf->pspace = new program_space (std::move (aspace));
inf->aspace = inf->pspace->aspace;
set_current_program_space (inf->pspace);
inf->removable = true;
inf->symfile_flags = SYMFILE_NO_READ;
clone_program_space (inf->pspace, vfork_parent->pspace);
resume_parent = vfork_parent;
}
gdb_assert (current_program_space == inf->pspace);
if (non_stop && resume_parent != nullptr)
{
/* If the user wanted the parent to be running, let it go
free now. */
scoped_restore_current_thread restore_thread;
infrun_debug_printf ("resuming vfork parent process %d",
resume_parent->pid);
for (thread_info *thread : resume_parent->threads ())
proceed_after_vfork_done (thread);
}
}
}
/* Handle TARGET_WAITKIND_VFORK_DONE. */
static void
handle_vfork_done (thread_info *event_thread)
{
INFRUN_SCOPED_DEBUG_ENTER_EXIT;
/* We only care about this event if inferior::thread_waiting_for_vfork_done is
set, that is if we are waiting for a vfork child not under our control
(because we detached it) to exec or exit.
If an inferior has vforked and we are debugging the child, we don't use
the vfork-done event to get notified about the end of the shared address
space window. We rely instead on the child's exec or exit event, and the
inferior::vfork_{parent,child} fields are used instead. See
handle_vfork_child_exec_or_exit for that. */
if (event_thread->inf->thread_waiting_for_vfork_done == nullptr)
{
infrun_debug_printf ("not waiting for a vfork-done event");
return;
}
/* We stopped all threads (other than the vforking thread) of the inferior in
follow_fork and kept them stopped until now. It should therefore not be
possible for another thread to have reported a vfork during that window.
If THREAD_WAITING_FOR_VFORK_DONE is set, it has to be the same thread whose
vfork-done we are handling right now. */
gdb_assert (event_thread->inf->thread_waiting_for_vfork_done == event_thread);
event_thread->inf->thread_waiting_for_vfork_done = nullptr;
event_thread->inf->pspace->breakpoints_not_allowed = 0;
/* On non-stop targets, we stopped all the inferior's threads in follow_fork,
resume them now. On all-stop targets, everything that needs to be resumed
will be when we resume the event thread. */
if (target_is_non_stop_p ())
{
/* restart_threads and start_step_over may change the current thread, make
sure we leave the event thread as the current thread. */
scoped_restore_current_thread restore_thread;
insert_breakpoints ();
start_step_over ();
if (!step_over_info_valid_p ())
restart_threads (event_thread, event_thread->inf);
}
}
/* Enum strings for "set|show follow-exec-mode". */
static const char follow_exec_mode_new[] = "new";
static const char follow_exec_mode_same[] = "same";
static const char *const follow_exec_mode_names[] =
{
follow_exec_mode_new,
follow_exec_mode_same,
nullptr,
};
static const char *follow_exec_mode_string = follow_exec_mode_same;
static void
show_follow_exec_mode_string (struct ui_file *file, int from_tty,
struct cmd_list_element *c, const char *value)
{
gdb_printf (file, _("Follow exec mode is \"%s\".\n"), value);
}
/* EXEC_FILE_TARGET is assumed to be non-NULL. */
static void
follow_exec (ptid_t ptid, const char *exec_file_target)
{
int pid = ptid.pid ();
ptid_t process_ptid;
/* Switch terminal for any messages produced e.g. by
breakpoint_re_set. */
target_terminal::ours_for_output ();
/* This is an exec event that we actually wish to pay attention to.
Refresh our symbol table to the newly exec'd program, remove any
momentary bp's, etc.
If there are breakpoints, they aren't really inserted now,
since the exec() transformed our inferior into a fresh set
of instructions.
We want to preserve symbolic breakpoints on the list, since
we have hopes that they can be reset after the new a.out's
symbol table is read.
However, any "raw" breakpoints must be removed from the list
(e.g., the solib bp's), since their address is probably invalid
now.
And, we DON'T want to call delete_breakpoints() here, since
that may write the bp's "shadow contents" (the instruction
value that was overwritten with a TRAP instruction). Since
we now have a new a.out, those shadow contents aren't valid. */
mark_breakpoints_out (current_program_space);
/* The target reports the exec event to the main thread, even if
some other thread does the exec, and even if the main thread was
stopped or already gone. We may still have non-leader threads of
the process on our list. E.g., on targets that don't have thread
exit events (like remote) and nothing forces an update of the
thread list up to here. When debugging remotely, it's best to
avoid extra traffic, when possible, so avoid syncing the thread
list with the target, and instead go ahead and delete all threads
of the process but the one that reported the event. Note this must
be done before calling update_breakpoints_after_exec, as
otherwise clearing the threads' resources would reference stale
thread breakpoints -- it may have been one of these threads that
stepped across the exec. We could just clear their stepping
states, but as long as we're iterating, might as well delete
them. Deleting them now rather than at the next user-visible
stop provides a nicer sequence of events for user and MI
notifications. */
for (thread_info *th : all_threads_safe ())
if (th->ptid.pid () == pid && th->ptid != ptid)
delete_thread (th);
/* We also need to clear any left over stale state for the
leader/event thread. E.g., if there was any step-resume
breakpoint or similar, it's gone now. We cannot truly
step-to-next statement through an exec(). */
thread_info *th = inferior_thread ();
th->control.step_resume_breakpoint = nullptr;
th->control.exception_resume_breakpoint = nullptr;
th->control.single_step_breakpoints = nullptr;
th->control.step_range_start = 0;
th->control.step_range_end = 0;
/* The user may have had the main thread held stopped in the
previous image (e.g., schedlock on, or non-stop). Release
it now. */
th->stop_requested = 0;
update_breakpoints_after_exec ();
/* What is this a.out's name? */
process_ptid = ptid_t (pid);
gdb_printf (_("%s is executing new program: %s\n"),
target_pid_to_str (process_ptid).c_str (),
exec_file_target);
/* We've followed the inferior through an exec. Therefore, the
inferior has essentially been killed & reborn. */
breakpoint_init_inferior (current_inferior (), inf_execd);
gdb::unique_xmalloc_ptr<char> exec_file_host
= exec_file_find (exec_file_target, nullptr);
/* If we were unable to map the executable target pathname onto a host
pathname, tell the user that. Otherwise GDB's subsequent behavior
is confusing. Maybe it would even be better to stop at this point
so that the user can specify a file manually before continuing. */
if (exec_file_host == nullptr)
warning (_("Could not load symbols for executable %s.\n"
"Do you need \"set sysroot\"?"),
exec_file_target);
/* Reset the shared library package. This ensures that we get a
shlib event when the child reaches "_start", at which point the
dld will have had a chance to initialize the child. */
/* Also, loading a symbol file below may trigger symbol lookups, and
we don't want those to be satisfied by the libraries of the
previous incarnation of this process. */
no_shared_libraries (current_program_space);
inferior *execing_inferior = current_inferior ();
inferior *following_inferior;
if (follow_exec_mode_string == follow_exec_mode_new)
{
/* The user wants to keep the old inferior and program spaces
around. Create a new fresh one, and switch to it. */
/* Do exit processing for the original inferior before setting the new
inferior's pid. Having two inferiors with the same pid would confuse
find_inferior_p(t)id. Transfer the terminal state and info from the
old to the new inferior. */
following_inferior = add_inferior_with_spaces ();
swap_terminal_info (following_inferior, execing_inferior);
exit_inferior (execing_inferior);
following_inferior->pid = pid;
}
else
{
/* follow-exec-mode is "same", we continue execution in the execing
inferior. */
following_inferior = execing_inferior;
/* The old description may no longer be fit for the new image.
E.g, a 64-bit process exec'ed a 32-bit process. Clear the
old description; we'll read a new one below. No need to do
this on "follow-exec-mode new", as the old inferior stays
around (its description is later cleared/refetched on
restart). */
target_clear_description ();
}
target_follow_exec (following_inferior, ptid, exec_file_target);
gdb_assert (current_inferior () == following_inferior);
gdb_assert (current_program_space == following_inferior->pspace);
/* Attempt to open the exec file. SYMFILE_DEFER_BP_RESET is used
because the proper displacement for a PIE (Position Independent
Executable) main symbol file will only be computed by
solib_create_inferior_hook below. breakpoint_re_set would fail
to insert the breakpoints with the zero displacement. */
try_open_exec_file (exec_file_host.get (), following_inferior,
SYMFILE_DEFER_BP_RESET);
/* If the target can specify a description, read it. Must do this
after flipping to the new executable (because the target supplied
description must be compatible with the executable's
architecture, and the old executable may e.g., be 32-bit, while
the new one 64-bit), and before anything involving memory or
registers. */
target_find_description ();
gdb::observers::inferior_execd.notify (execing_inferior, following_inferior);
breakpoint_re_set ();
/* Reinsert all breakpoints. (Those which were symbolic have
been reset to the proper address in the new a.out, thanks
to symbol_file_command...). */
insert_breakpoints ();
/* The next resume of this inferior should bring it to the shlib
startup breakpoints. (If the user had also set bp's on
"main" from the old (parent) process, then they'll auto-
matically get reset there in the new process.). */
}
/* The chain of threads that need to do a step-over operation to get
past e.g., a breakpoint. What technique is used to step over the
breakpoint/watchpoint does not matter -- all threads end up in the
same queue, to maintain rough temporal order of execution, in order
to avoid starvation, otherwise, we could e.g., find ourselves
constantly stepping the same couple threads past their breakpoints
over and over, if the single-step finish fast enough. */
thread_step_over_list global_thread_step_over_list;
/* Bit flags indicating what the thread needs to step over. */
enum step_over_what_flag
{
/* Step over a breakpoint. */
STEP_OVER_BREAKPOINT = 1,
/* Step past a non-continuable watchpoint, in order to let the
instruction execute so we can evaluate the watchpoint
expression. */
STEP_OVER_WATCHPOINT = 2
};
DEF_ENUM_FLAGS_TYPE (enum step_over_what_flag, step_over_what);
/* Info about an instruction that is being stepped over. */
struct step_over_info
{
/* If we're stepping past a breakpoint, this is the address space
and address of the instruction the breakpoint is set at. We'll
skip inserting all breakpoints here. Valid iff ASPACE is
non-NULL. */
const address_space *aspace = nullptr;
CORE_ADDR address = 0;
/* The instruction being stepped over triggers a nonsteppable
watchpoint. If true, we'll skip inserting watchpoints. */
int nonsteppable_watchpoint_p = 0;
/* The thread's global number. */
int thread = -1;
};
/* The step-over info of the location that is being stepped over.
Note that with async/breakpoint always-inserted mode, a user might
set a new breakpoint/watchpoint/etc. exactly while a breakpoint is
being stepped over. As setting a new breakpoint inserts all
breakpoints, we need to make sure the breakpoint being stepped over
isn't inserted then. We do that by only clearing the step-over
info when the step-over is actually finished (or aborted).
Presently GDB can only step over one breakpoint at any given time.
Given threads that can't run code in the same address space as the
breakpoint's can't really miss the breakpoint, GDB could be taught
to step-over at most one breakpoint per address space (so this info
could move to the address space object if/when GDB is extended).
The set of breakpoints being stepped over will normally be much
smaller than the set of all breakpoints, so a flag in the
breakpoint location structure would be wasteful. A separate list
also saves complexity and run-time, as otherwise we'd have to go
through all breakpoint locations clearing their flag whenever we
start a new sequence. Similar considerations weigh against storing
this info in the thread object. Plus, not all step overs actually
have breakpoint locations -- e.g., stepping past a single-step
breakpoint, or stepping to complete a non-continuable
watchpoint. */
static struct step_over_info step_over_info;
/* Record the address of the breakpoint/instruction we're currently
stepping over.
N.B. We record the aspace and address now, instead of say just the thread,
because when we need the info later the thread may be running. */
static void
set_step_over_info (const address_space *aspace, CORE_ADDR address,
int nonsteppable_watchpoint_p,
int thread)
{
step_over_info.aspace = aspace;
step_over_info.address = address;
step_over_info.nonsteppable_watchpoint_p = nonsteppable_watchpoint_p;
step_over_info.thread = thread;
}
/* Called when we're not longer stepping over a breakpoint / an
instruction, so all breakpoints are free to be (re)inserted. */
static void
clear_step_over_info (void)
{
infrun_debug_printf ("clearing step over info");
step_over_info.aspace = nullptr;
step_over_info.address = 0;
step_over_info.nonsteppable_watchpoint_p = 0;
step_over_info.thread = -1;
}
/* See infrun.h. */
int
stepping_past_instruction_at (struct address_space *aspace,
CORE_ADDR address)
{
return (step_over_info.aspace != nullptr
&& breakpoint_address_match (aspace, address,
step_over_info.aspace,
step_over_info.address));
}
/* See infrun.h. */
int
thread_is_stepping_over_breakpoint (int thread)
{
return (step_over_info.thread != -1
&& thread == step_over_info.thread);
}
/* See infrun.h. */
int
stepping_past_nonsteppable_watchpoint (void)
{
return step_over_info.nonsteppable_watchpoint_p;
}
/* Returns true if step-over info is valid. */
static bool
step_over_info_valid_p (void)
{
return (step_over_info.aspace != nullptr
|| stepping_past_nonsteppable_watchpoint ());
}
/* Displaced stepping. */
/* In non-stop debugging mode, we must take special care to manage
breakpoints properly; in particular, the traditional strategy for
stepping a thread past a breakpoint it has hit is unsuitable.
'Displaced stepping' is a tactic for stepping one thread past a
breakpoint it has hit while ensuring that other threads running
concurrently will hit the breakpoint as they should.
The traditional way to step a thread T off a breakpoint in a
multi-threaded program in all-stop mode is as follows:
a0) Initially, all threads are stopped, and breakpoints are not
inserted.
a1) We single-step T, leaving breakpoints uninserted.
a2) We insert breakpoints, and resume all threads.
In non-stop debugging, however, this strategy is unsuitable: we
don't want to have to stop all threads in the system in order to
continue or step T past a breakpoint. Instead, we use displaced
stepping:
n0) Initially, T is stopped, other threads are running, and
breakpoints are inserted.
n1) We copy the instruction "under" the breakpoint to a separate
location, outside the main code stream, making any adjustments
to the instruction, register, and memory state as directed by
T's architecture.
n2) We single-step T over the instruction at its new location.
n3) We adjust the resulting register and memory state as directed
by T's architecture. This includes resetting T's PC to point
back into the main instruction stream.
n4) We resume T.
This approach depends on the following gdbarch methods:
- gdbarch_max_insn_length and gdbarch_displaced_step_location
indicate where to copy the instruction, and how much space must
be reserved there. We use these in step n1.
- gdbarch_displaced_step_copy_insn copies a instruction to a new
address, and makes any necessary adjustments to the instruction,
register contents, and memory. We use this in step n1.
- gdbarch_displaced_step_fixup adjusts registers and memory after
we have successfully single-stepped the instruction, to yield the
same effect the instruction would have had if we had executed it
at its original address. We use this in step n3.
The gdbarch_displaced_step_copy_insn and
gdbarch_displaced_step_fixup functions must be written so that
copying an instruction with gdbarch_displaced_step_copy_insn,
single-stepping across the copied instruction, and then applying
gdbarch_displaced_insn_fixup should have the same effects on the
thread's memory and registers as stepping the instruction in place
would have. Exactly which responsibilities fall to the copy and
which fall to the fixup is up to the author of those functions.
See the comments in gdbarch.sh for details.
Note that displaced stepping and software single-step cannot
currently be used in combination, although with some care I think
they could be made to. Software single-step works by placing
breakpoints on all possible subsequent instructions; if the
displaced instruction is a PC-relative jump, those breakpoints
could fall in very strange places --- on pages that aren't
executable, or at addresses that are not proper instruction
boundaries. (We do generally let other threads run while we wait
to hit the software single-step breakpoint, and they might
encounter such a corrupted instruction.) One way to work around
this would be to have gdbarch_displaced_step_copy_insn fully
simulate the effect of PC-relative instructions (and return NULL)
on architectures that use software single-stepping.
In non-stop mode, we can have independent and simultaneous step
requests, so more than one thread may need to simultaneously step
over a breakpoint. The current implementation assumes there is
only one scratch space per process. In this case, we have to
serialize access to the scratch space. If thread A wants to step
over a breakpoint, but we are currently waiting for some other
thread to complete a displaced step, we leave thread A stopped and
place it in the displaced_step_request_queue. Whenever a displaced
step finishes, we pick the next thread in the queue and start a new
displaced step operation on it. See displaced_step_prepare and
displaced_step_finish for details. */
/* Return true if THREAD is doing a displaced step. */
static bool
displaced_step_in_progress_thread (thread_info *thread)
{
gdb_assert (thread != nullptr);
return thread->displaced_step_state.in_progress ();
}
/* Return true if INF has a thread doing a displaced step. */
static bool
displaced_step_in_progress (inferior *inf)
{
return inf->displaced_step_state.in_progress_count > 0;
}
/* Return true if any thread is doing a displaced step. */
static bool
displaced_step_in_progress_any_thread ()
{
for (inferior *inf : all_non_exited_inferiors ())
{
if (displaced_step_in_progress (inf))
return true;
}
return false;
}
static void
infrun_inferior_exit (struct inferior *inf)
{
inf->displaced_step_state.reset ();
inf->thread_waiting_for_vfork_done = nullptr;
}
static void
infrun_inferior_execd (inferior *exec_inf, inferior *follow_inf)
{
/* If some threads where was doing a displaced step in this inferior at the
moment of the exec, they no longer exist. Even if the exec'ing thread
doing a displaced step, we don't want to to any fixup nor restore displaced
stepping buffer bytes. */
follow_inf->displaced_step_state.reset ();
for (thread_info *thread : follow_inf->threads ())
thread->displaced_step_state.reset ();
/* Since an in-line step is done with everything else stopped, if there was
one in progress at the time of the exec, it must have been the exec'ing
thread. */
clear_step_over_info ();
follow_inf->thread_waiting_for_vfork_done = nullptr;
}
/* If ON, and the architecture supports it, GDB will use displaced
stepping to step over breakpoints. If OFF, or if the architecture
doesn't support it, GDB will instead use the traditional
hold-and-step approach. If AUTO (which is the default), GDB will
decide which technique to use to step over breakpoints depending on
whether the target works in a non-stop way (see use_displaced_stepping). */
static enum auto_boolean can_use_displaced_stepping = AUTO_BOOLEAN_AUTO;
static void
show_can_use_displaced_stepping (struct ui_file *file, int from_tty,
struct cmd_list_element *c,
const char *value)
{
if (can_use_displaced_stepping == AUTO_BOOLEAN_AUTO)
gdb_printf (file,
_("Debugger's willingness to use displaced stepping "
"to step over breakpoints is %s (currently %s).\n"),
value, target_is_non_stop_p () ? "on" : "off");
else
gdb_printf (file,
_("Debugger's willingness to use displaced stepping "
"to step over breakpoints is %s.\n"), value);
}
/* Return true if the gdbarch implements the required methods to use
displaced stepping. */
static bool
gdbarch_supports_displaced_stepping (gdbarch *arch)
{
/* Only check for the presence of `prepare`. The gdbarch verification ensures
that if `prepare` is provided, so is `finish`. */
return gdbarch_displaced_step_prepare_p (arch);
}
/* Return non-zero if displaced stepping can/should be used to step
over breakpoints of thread TP. */
static bool
use_displaced_stepping (thread_info *tp)
{
/* If the user disabled it explicitly, don't use displaced stepping. */
if (can_use_displaced_stepping == AUTO_BOOLEAN_FALSE)
return false;
/* If "auto", only use displaced stepping if the target operates in a non-stop
way. */
if (can_use_displaced_stepping == AUTO_BOOLEAN_AUTO
&& !target_is_non_stop_p ())
return false;
gdbarch *gdbarch = get_thread_regcache (tp)->arch ();
/* If the architecture doesn't implement displaced stepping, don't use
it. */
if (!gdbarch_supports_displaced_stepping (gdbarch))
return false;
/* If recording, don't use displaced stepping. */
if (find_record_target () != nullptr)
return false;
/* If displaced stepping failed before for this inferior, don't bother trying
again. */
if (tp->inf->displaced_step_state.failed_before)
return false;
return true;
}
/* Simple function wrapper around displaced_step_thread_state::reset. */
static void
displaced_step_reset (displaced_step_thread_state *displaced)
{
displaced->reset ();
}
/* A cleanup that wraps displaced_step_reset. We use this instead of, say,
SCOPE_EXIT, because it needs to be discardable with "cleanup.release ()". */
using displaced_step_reset_cleanup = FORWARD_SCOPE_EXIT (displaced_step_reset);
/* Prepare to single-step, using displaced stepping.
Note that we cannot use displaced stepping when we have a signal to
deliver. If we have a signal to deliver and an instruction to step
over, then after the step, there will be no indication from the
target whether the thread entered a signal handler or ignored the
signal and stepped over the instruction successfully --- both cases
result in a simple SIGTRAP. In the first case we mustn't do a
fixup, and in the second case we must --- but we can't tell which.
Comments in the code for 'random signals' in handle_inferior_event
explain how we handle this case instead.
Returns DISPLACED_STEP_PREPARE_STATUS_OK if preparing was successful -- this
thread is going to be stepped now; DISPLACED_STEP_PREPARE_STATUS_UNAVAILABLE
if displaced stepping this thread got queued; or
DISPLACED_STEP_PREPARE_STATUS_CANT if this instruction can't be displaced
stepped. */
static displaced_step_prepare_status
displaced_step_prepare_throw (thread_info *tp)
{
regcache *regcache = get_thread_regcache (tp);
struct gdbarch *gdbarch = regcache->arch ();
displaced_step_thread_state &disp_step_thread_state
= tp->displaced_step_state;
/* We should never reach this function if the architecture does not
support displaced stepping. */
gdb_assert (gdbarch_supports_displaced_stepping (gdbarch));
/* Nor if the thread isn't meant to step over a breakpoint. */
gdb_assert (tp->control.trap_expected);
/* Disable range stepping while executing in the scratch pad. We
want a single-step even if executing the displaced instruction in
the scratch buffer lands within the stepping range (e.g., a
jump/branch). */
tp->control.may_range_step = 0;
/* We are about to start a displaced step for this thread. If one is already
in progress, something's wrong. */
gdb_assert (!disp_step_thread_state.in_progress ());
if (tp->inf->displaced_step_state.unavailable)
{
/* The gdbarch tells us it's not worth asking to try a prepare because
it is likely that it will return unavailable, so don't bother asking. */
displaced_debug_printf ("deferring step of %s",
tp->ptid.to_string ().c_str ());
global_thread_step_over_chain_enqueue (tp);
return DISPLACED_STEP_PREPARE_STATUS_UNAVAILABLE;
}
displaced_debug_printf ("displaced-stepping %s now",
tp->ptid.to_string ().c_str ());
scoped_restore_current_thread restore_thread;
switch_to_thread (tp);
CORE_ADDR original_pc = regcache_read_pc (regcache);
CORE_ADDR displaced_pc;
/* Display the instruction we are going to displaced step. */
if (debug_displaced)
{
string_file tmp_stream;
int dislen = gdb_print_insn (gdbarch, original_pc, &tmp_stream,
nullptr);
if (dislen > 0)
{
gdb::byte_vector insn_buf (dislen);
read_memory (original_pc, insn_buf.data (), insn_buf.size ());
std::string insn_bytes = bytes_to_string (insn_buf);
displaced_debug_printf ("original insn %s: %s \t %s",
paddress (gdbarch, original_pc),
insn_bytes.c_str (),
tmp_stream.string ().c_str ());
}
else
displaced_debug_printf ("original insn %s: invalid length: %d",
paddress (gdbarch, original_pc), dislen);
}
displaced_step_prepare_status status
= gdbarch_displaced_step_prepare (gdbarch, tp, displaced_pc);
if (status == DISPLACED_STEP_PREPARE_STATUS_CANT)
{
displaced_debug_printf ("failed to prepare (%s)",
tp->ptid.to_string ().c_str ());
return DISPLACED_STEP_PREPARE_STATUS_CANT;
}
else if (status == DISPLACED_STEP_PREPARE_STATUS_UNAVAILABLE)
{
/* Not enough displaced stepping resources available, defer this
request by placing it the queue. */
displaced_debug_printf ("not enough resources available, "
"deferring step of %s",
tp->ptid.to_string ().c_str ());
global_thread_step_over_chain_enqueue (tp);
return DISPLACED_STEP_PREPARE_STATUS_UNAVAILABLE;
}
gdb_assert (status == DISPLACED_STEP_PREPARE_STATUS_OK);
/* Save the information we need to fix things up if the step
succeeds. */
disp_step_thread_state.set (gdbarch);
tp->inf->displaced_step_state.in_progress_count++;
displaced_debug_printf ("prepared successfully thread=%s, "
"original_pc=%s, displaced_pc=%s",
tp->ptid.to_string ().c_str (),
paddress (gdbarch, original_pc),
paddress (gdbarch, displaced_pc));
/* Display the new displaced instruction(s). */
if (debug_displaced)
{
string_file tmp_stream;
CORE_ADDR addr = displaced_pc;
/* If displaced stepping is going to use h/w single step then we know
that the replacement instruction can only be a single instruction,
in that case set the end address at the next byte.
Otherwise the displaced stepping copy instruction routine could
have generated multiple instructions, and all we know is that they
must fit within the LEN bytes of the buffer. */
CORE_ADDR end
= addr + (gdbarch_displaced_step_hw_singlestep (gdbarch)
? 1 : gdbarch_displaced_step_buffer_length (gdbarch));
while (addr < end)
{
int dislen = gdb_print_insn (gdbarch, addr, &tmp_stream, nullptr);
if (dislen <= 0)
{
displaced_debug_printf
("replacement insn %s: invalid length: %d",
paddress (gdbarch, addr), dislen);
break;
}
gdb::byte_vector insn_buf (dislen);
read_memory (addr, insn_buf.data (), insn_buf.size ());
std::string insn_bytes = bytes_to_string (insn_buf);
std::string insn_str = tmp_stream.release ();
displaced_debug_printf ("replacement insn %s: %s \t %s",
paddress (gdbarch, addr),
insn_bytes.c_str (),
insn_str.c_str ());
addr += dislen;
}
}
return DISPLACED_STEP_PREPARE_STATUS_OK;
}
/* Wrapper for displaced_step_prepare_throw that disabled further
attempts at displaced stepping if we get a memory error. */
static displaced_step_prepare_status
displaced_step_prepare (thread_info *thread)
{
displaced_step_prepare_status status
= DISPLACED_STEP_PREPARE_STATUS_CANT;
try
{
status = displaced_step_prepare_throw (thread);
}
catch (const gdb_exception_error &ex)
{
if (ex.error != MEMORY_ERROR
&& ex.error != NOT_SUPPORTED_ERROR)
throw;
infrun_debug_printf ("caught exception, disabling displaced stepping: %s",
ex.what ());
/* Be verbose if "set displaced-stepping" is "on", silent if
"auto". */
if (can_use_displaced_stepping == AUTO_BOOLEAN_TRUE)
{
warning (_("disabling displaced stepping: %s"),
ex.what ());
}
/* Disable further displaced stepping attempts. */
thread->inf->displaced_step_state.failed_before = 1;
}
return status;
}
/* True if any thread of TARGET that matches RESUME_PTID requires
target_thread_events enabled. This assumes TARGET does not support
target thread options. */
static bool
any_thread_needs_target_thread_events (process_stratum_target *target,
ptid_t resume_ptid)
{
for (thread_info *tp : all_non_exited_threads (target, resume_ptid))
if (displaced_step_in_progress_thread (tp)
|| schedlock_applies (tp)
|| tp->thread_fsm () != nullptr)
return true;
return false;
}
/* Maybe disable thread-{cloned,created,exited} event reporting after
a step-over (either in-line or displaced) finishes. */
static void
update_thread_events_after_step_over (thread_info *event_thread,
const target_waitstatus &event_status)
{
if (schedlock_applies (event_thread))
{
/* If scheduler-locking applies, continue reporting
thread-created/thread-cloned events. */
return;
}
else if (target_supports_set_thread_options (0))
{
/* We can control per-thread options. Disable events for the
event thread, unless the thread is gone. */
if (event_status.kind () != TARGET_WAITKIND_THREAD_EXITED)
event_thread->set_thread_options (0);
}
else
{
/* We can only control the target-wide target_thread_events
setting. Disable it, but only if other threads in the target
don't need it enabled. */
process_stratum_target *target = event_thread->inf->process_target ();
if (!any_thread_needs_target_thread_events (target, minus_one_ptid))
target_thread_events (false);
}
}
/* If we displaced stepped an instruction successfully, adjust registers and
memory to yield the same effect the instruction would have had if we had
executed it at its original address, and return
DISPLACED_STEP_FINISH_STATUS_OK. If the instruction didn't complete,
relocate the PC and return DISPLACED_STEP_FINISH_STATUS_NOT_EXECUTED.
If the thread wasn't displaced stepping, return
DISPLACED_STEP_FINISH_STATUS_OK as well. */
static displaced_step_finish_status
displaced_step_finish (thread_info *event_thread,
const target_waitstatus &event_status)
{
/* Check whether the parent is displaced stepping. */
inferior *parent_inf = event_thread->inf;
/* If this was a fork/vfork/clone, this event indicates that the
displaced stepping of the syscall instruction has been done, so
we perform cleanup for parent here. Also note that this
operation also cleans up the child for vfork, because their pages
are shared. */
/* If this is a fork (child gets its own address space copy) and
some displaced step buffers were in use at the time of the fork,
restore the displaced step buffer bytes in the child process.
Architectures which support displaced stepping and fork events
must supply an implementation of
gdbarch_displaced_step_restore_all_in_ptid. This is not enforced
during gdbarch validation to support architectures which support
displaced stepping but not forks. */
if (event_status.kind () == TARGET_WAITKIND_FORKED)
{
struct regcache *parent_regcache = get_thread_regcache (event_thread);
struct gdbarch *gdbarch = parent_regcache->arch ();
if (gdbarch_supports_displaced_stepping (gdbarch))
gdbarch_displaced_step_restore_all_in_ptid
(gdbarch, parent_inf, event_status.child_ptid ());
}
displaced_step_thread_state *displaced = &event_thread->displaced_step_state;
/* Was this thread performing a displaced step? */
if (!displaced->in_progress ())
return DISPLACED_STEP_FINISH_STATUS_OK;
update_thread_events_after_step_over (event_thread, event_status);
gdb_assert (event_thread->inf->displaced_step_state.in_progress_count > 0);
event_thread->inf->displaced_step_state.in_progress_count--;
/* Fixup may need to read memory/registers. Switch to the thread
that we're fixing up. Also, target_stopped_by_watchpoint checks
the current thread, and displaced_step_restore performs ptid-dependent
memory accesses using current_inferior(). */
switch_to_thread (event_thread);
displaced_step_reset_cleanup cleanup (displaced);
/* Do the fixup, and release the resources acquired to do the displaced
step. */
displaced_step_finish_status status
= gdbarch_displaced_step_finish (displaced->get_original_gdbarch (),
event_thread, event_status);
if (event_status.kind () == TARGET_WAITKIND_FORKED
|| event_status.kind () == TARGET_WAITKIND_VFORKED
|| event_status.kind () == TARGET_WAITKIND_THREAD_CLONED)
{
/* Since the vfork/fork/clone syscall instruction was executed
in the scratchpad, the child's PC is also within the
scratchpad. Set the child's PC to the parent's PC value,
which has already been fixed up. Note: we use the parent's
aspace here, although we're touching the child, because the
child hasn't been added to the inferior list yet at this
point. */
struct regcache *parent_regcache = get_thread_regcache (event_thread);
struct gdbarch *gdbarch = parent_regcache->arch ();
struct regcache *child_regcache
= get_thread_arch_regcache (parent_inf, event_status.child_ptid (),
gdbarch);
/* Read PC value of parent. */
CORE_ADDR parent_pc = regcache_read_pc (parent_regcache);
displaced_debug_printf ("write child pc from %s to %s",
paddress (gdbarch,
regcache_read_pc (child_regcache)),
paddress (gdbarch, parent_pc));
regcache_write_pc (child_regcache, parent_pc);
}
return status;
}
/* Data to be passed around while handling an event. This data is
discarded between events. */
struct execution_control_state
{
explicit execution_control_state (thread_info *thr = nullptr)
: ptid (thr == nullptr ? null_ptid : thr->ptid),
event_thread (thr)
{
}
process_stratum_target *target = nullptr;
ptid_t ptid;
/* The thread that got the event, if this was a thread event; NULL
otherwise. */
struct thread_info *event_thread;
struct target_waitstatus ws;
int stop_func_filled_in = 0;
CORE_ADDR stop_func_alt_start = 0;
CORE_ADDR stop_func_start = 0;
CORE_ADDR stop_func_end = 0;
const char *stop_func_name = nullptr;
int wait_some_more = 0;
/* True if the event thread hit the single-step breakpoint of
another thread. Thus the event doesn't cause a stop, the thread
needs to be single-stepped past the single-step breakpoint before
we can switch back to the original stepping thread. */
int hit_singlestep_breakpoint = 0;
};
static void keep_going_pass_signal (struct execution_control_state *ecs);
static void prepare_to_wait (struct execution_control_state *ecs);
static bool keep_going_stepped_thread (struct thread_info *tp);
static step_over_what thread_still_needs_step_over (struct thread_info *tp);
/* Are there any pending step-over requests? If so, run all we can
now and return true. Otherwise, return false. */
static bool
start_step_over (void)
{
INFRUN_SCOPED_DEBUG_ENTER_EXIT;
/* Don't start a new step-over if we already have an in-line
step-over operation ongoing. */
if (step_over_info_valid_p ())
return false;
/* Steal the global thread step over chain. As we try to initiate displaced
steps, threads will be enqueued in the global chain if no buffers are
available. If we iterated on the global chain directly, we might iterate
indefinitely. */
thread_step_over_list threads_to_step
= std::move (global_thread_step_over_list);
infrun_debug_printf ("stealing global queue of threads to step, length = %d",
thread_step_over_chain_length (threads_to_step));
bool started = false;
/* On scope exit (whatever the reason, return or exception), if there are
threads left in the THREADS_TO_STEP chain, put back these threads in the
global list. */
SCOPE_EXIT
{
if (threads_to_step.empty ())
infrun_debug_printf ("step-over queue now empty");
else
{
infrun_debug_printf ("putting back %d threads to step in global queue",
thread_step_over_chain_length (threads_to_step));
global_thread_step_over_chain_enqueue_chain
(std::move (threads_to_step));
}
};
thread_step_over_list_safe_range range
= make_thread_step_over_list_safe_range (threads_to_step);
for (thread_info *tp : range)
{
step_over_what step_what;
int must_be_in_line;
gdb_assert (!tp->stop_requested);
if (tp->inf->displaced_step_state.unavailable)
{
/* The arch told us to not even try preparing another displaced step
for this inferior. Just leave the thread in THREADS_TO_STEP, it
will get moved to the global chain on scope exit. */
continue;
}
if (tp->inf->thread_waiting_for_vfork_done != nullptr)
{
/* When we stop all threads, handling a vfork, any thread in the step
over chain remains there. A user could also try to continue a
thread stopped at a breakpoint while another thread is waiting for
a vfork-done event. In any case, we don't want to start a step
over right now. */
continue;
}
/* Remove thread from the THREADS_TO_STEP chain. If anything goes wrong
while we try to prepare the displaced step, we don't add it back to
the global step over chain. This is to avoid a thread staying in the
step over chain indefinitely if something goes wrong when resuming it
If the error is intermittent and it still needs a step over, it will
get enqueued again when we try to resume it normally. */
threads_to_step.erase (threads_to_step.iterator_to (*tp));
step_what = thread_still_needs_step_over (tp);
must_be_in_line = ((step_what & STEP_OVER_WATCHPOINT)
|| ((step_what & STEP_OVER_BREAKPOINT)
&& !use_displaced_stepping (tp)));
/* We currently stop all threads of all processes to step-over
in-line. If we need to start a new in-line step-over, let
any pending displaced steps finish first. */
if (must_be_in_line && displaced_step_in_progress_any_thread ())
{
global_thread_step_over_chain_enqueue (tp);
continue;
}
if (tp->control.trap_expected
|| tp->resumed ()
|| tp->executing ())
{
internal_error ("[%s] has inconsistent state: "
"trap_expected=%d, resumed=%d, executing=%d\n",
tp->ptid.to_string ().c_str (),
tp->control.trap_expected,
tp->resumed (),
tp->executing ());
}
infrun_debug_printf ("resuming [%s] for step-over",
tp->ptid.to_string ().c_str ());
/* keep_going_pass_signal skips the step-over if the breakpoint
is no longer inserted. In all-stop, we want to keep looking
for a thread that needs a step-over instead of resuming TP,
because we wouldn't be able to resume anything else until the
target stops again. In non-stop, the resume always resumes
only TP, so it's OK to let the thread resume freely. */
if (!target_is_non_stop_p () && !step_what)
continue;
switch_to_thread (tp);
execution_control_state ecs (tp);
keep_going_pass_signal (&ecs);
if (!ecs.wait_some_more)
error (_("Command aborted."));
/* If the thread's step over could not be initiated because no buffers
were available, it was re-added to the global step over chain. */
if (tp->resumed ())
{
infrun_debug_printf ("[%s] was resumed.",
tp->ptid.to_string ().c_str ());
gdb_assert (!thread_is_in_step_over_chain (tp));
}
else
{
infrun_debug_printf ("[%s] was NOT resumed.",
tp->ptid.to_string ().c_str ());
gdb_assert (thread_is_in_step_over_chain (tp));
}
/* If we started a new in-line step-over, we're done. */
if (step_over_info_valid_p ())
{
gdb_assert (tp->control.trap_expected);
started = true;
break;
}
if (!target_is_non_stop_p ())
{
/* On all-stop, shouldn't have resumed unless we needed a
step over. */
gdb_assert (tp->control.trap_expected
|| tp->step_after_step_resume_breakpoint);
/* With remote targets (at least), in all-stop, we can't
issue any further remote commands until the program stops
again. */
started = true;
break;
}
/* Either the thread no longer needed a step-over, or a new
displaced stepping sequence started. Even in the latter
case, continue looking. Maybe we can also start another
displaced step on a thread of other process. */
}
return started;
}
/* Update global variables holding ptids to hold NEW_PTID if they were
holding OLD_PTID. */
static void
infrun_thread_ptid_changed (process_stratum_target *target,
ptid_t old_ptid, ptid_t new_ptid)
{
if (inferior_ptid == old_ptid
&& current_inferior ()->process_target () == target)
inferior_ptid = new_ptid;
}
static const char schedlock_off[] = "off";
static const char schedlock_on[] = "on";
static const char schedlock_step[] = "step";
static const char schedlock_replay[] = "replay";
static const char *const scheduler_enums[] = {
schedlock_off,
schedlock_on,
schedlock_step,
schedlock_replay,
nullptr
};
static const char *scheduler_mode = schedlock_replay;
static void
show_scheduler_mode (struct ui_file *file, int from_tty,
struct cmd_list_element *c, const char *value)
{
gdb_printf (file,
_("Mode for locking scheduler "
"during execution is \"%s\".\n"),
value);
}
static void
set_schedlock_func (const char *args, int from_tty, struct cmd_list_element *c)
{
if (!target_can_lock_scheduler ())
{
scheduler_mode = schedlock_off;
error (_("Target '%s' cannot support this command."),
target_shortname ());
}
}
/* True if execution commands resume all threads of all processes by
default; otherwise, resume only threads of the current inferior
process. */
bool sched_multi = false;
/* Try to setup for software single stepping. Return true if target_resume()
should use hardware single step.
GDBARCH the current gdbarch. */
static bool
maybe_software_singlestep (struct gdbarch *gdbarch)
{
bool hw_step = true;
if (execution_direction == EXEC_FORWARD
&& gdbarch_software_single_step_p (gdbarch))
hw_step = !insert_single_step_breakpoints (gdbarch);
return hw_step;
}
/* See infrun.h. */
ptid_t
user_visible_resume_ptid (int step)
{
ptid_t resume_ptid;
if (non_stop)
{
/* With non-stop mode on, threads are always handled
individually. */
resume_ptid = inferior_ptid;
}
else if ((scheduler_mode == schedlock_on)
|| (scheduler_mode == schedlock_step && step))
{
/* User-settable 'scheduler' mode requires solo thread
resume. */
resume_ptid = inferior_ptid;
}
else if ((scheduler_mode == schedlock_replay)
&& target_record_will_replay (minus_one_ptid, execution_direction))
{
/* User-settable 'scheduler' mode requires solo thread resume in replay
mode. */
resume_ptid = inferior_ptid;
}
else if (inferior_ptid != null_ptid
&& inferior_thread ()->control.in_cond_eval)
{
/* The inferior thread is evaluating a BP condition. Other threads
might be stopped or running and we do not want to change their
state, thus, resume only the current thread. */
resume_ptid = inferior_ptid;
}
else if (!sched_multi && target_supports_multi_process ())
{
/* Resume all threads of the current process (and none of other
processes). */
resume_ptid = ptid_t (inferior_ptid.pid ());
}
else
{
/* Resume all threads of all processes. */
resume_ptid = RESUME_ALL;
}
return resume_ptid;
}
/* See infrun.h. */
process_stratum_target *
user_visible_resume_target (ptid_t resume_ptid)
{
return (resume_ptid == minus_one_ptid && sched_multi
? nullptr
: current_inferior ()->process_target ());
}
/* Find a thread from the inferiors that we'll resume that is waiting
for a vfork-done event. */
static thread_info *
find_thread_waiting_for_vfork_done ()
{
gdb_assert (!target_is_non_stop_p ());
if (sched_multi)
{
for (inferior *inf : all_non_exited_inferiors ())
if (inf->thread_waiting_for_vfork_done != nullptr)
return inf->thread_waiting_for_vfork_done;
}
else
{
inferior *cur_inf = current_inferior ();
if (cur_inf->thread_waiting_for_vfork_done != nullptr)
return cur_inf->thread_waiting_for_vfork_done;
}
return nullptr;
}
/* Return a ptid representing the set of threads that we will resume,
in the perspective of the target, assuming run control handling
does not require leaving some threads stopped (e.g., stepping past
breakpoint). USER_STEP indicates whether we're about to start the
target for a stepping command. */
static ptid_t
internal_resume_ptid (int user_step)
{
/* In non-stop, we always control threads individually. Note that
the target may always work in non-stop mode even with "set
non-stop off", in which case user_visible_resume_ptid could
return a wildcard ptid. */
if (target_is_non_stop_p ())
return inferior_ptid;
/* The rest of the function assumes non-stop==off and
target-non-stop==off.
If a thread is waiting for a vfork-done event, it means breakpoints are out
for this inferior (well, program space in fact). We don't want to resume
any thread other than the one waiting for vfork done, otherwise these other
threads could miss breakpoints. So if a thread in the resumption set is
waiting for a vfork-done event, resume only that thread.
The resumption set width depends on whether schedule-multiple is on or off.
Note that if the target_resume interface was more flexible, we could be
smarter here when schedule-multiple is on. For example, imagine 3
inferiors with 2 threads each (1.1, 1.2, 2.1, 2.2, 3.1 and 3.2). Threads
2.1 and 3.2 are both waiting for a vfork-done event. Then we could ask the
target(s) to resume:
- All threads of inferior 1
- Thread 2.1
- Thread 3.2
Since we don't have that flexibility (we can only pass one ptid), just
resume the first thread waiting for a vfork-done event we find (e.g. thread
2.1). */
thread_info *thr = find_thread_waiting_for_vfork_done ();
if (thr != nullptr)
{
/* If we have a thread that is waiting for a vfork-done event,
then we should have switched to it earlier. Calling
target_resume with thread scope is only possible when the
current thread matches the thread scope. */
gdb_assert (thr->ptid == inferior_ptid);
gdb_assert (thr->inf->process_target ()
== inferior_thread ()->inf->process_target ());
return thr->ptid;
}
return user_visible_resume_ptid (user_step);
}
/* Wrapper for target_resume, that handles infrun-specific
bookkeeping. */
static void
do_target_resume (ptid_t resume_ptid, bool step, enum gdb_signal sig)
{
struct thread_info *tp = inferior_thread ();
gdb_assert (!tp->stop_requested);
/* Install inferior's terminal modes. */
target_terminal::inferior ();
/* Avoid confusing the next resume, if the next stop/resume
happens to apply to another thread. */
tp->set_stop_signal (GDB_SIGNAL_0);
/* Advise target which signals may be handled silently.
If we have removed breakpoints because we are stepping over one
in-line (in any thread), we need to receive all signals to avoid
accidentally skipping a breakpoint during execution of a signal
handler.
Likewise if we're displaced stepping, otherwise a trap for a
breakpoint in a signal handler might be confused with the
displaced step finishing. We don't make the displaced_step_finish
step distinguish the cases instead, because:
- a backtrace while stopped in the signal handler would show the
scratch pad as frame older than the signal handler, instead of
the real mainline code.
- when the thread is later resumed, the signal handler would
return to the scratch pad area, which would no longer be
valid. */
if (step_over_info_valid_p ()
|| displaced_step_in_progress (tp->inf))
target_pass_signals ({});
else
target_pass_signals (signal_pass);
/* Request that the target report thread-{created,cloned,exited}
events in the following situations:
- If we are performing an in-line step-over-breakpoint, then we
will remove a breakpoint from the target and only run the
current thread. We don't want any new thread (spawned by the
step) to start running, as it might miss the breakpoint. We
need to clear the step-over state if the stepped thread exits,
so we also enable thread-exit events.
- If we are stepping over a breakpoint out of line (displaced
stepping) then we won't remove a breakpoint from the target,
but, if the step spawns a new clone thread, then we will need
to fixup the $pc address in the clone child too, so we need it
to start stopped. We need to release the displaced stepping
buffer if the stepped thread exits, so we also enable
thread-exit events.
- If scheduler-locking applies, threads that the current thread
spawns should remain halted. It's not strictly necessary to
enable thread-exit events in this case, but it doesn't hurt.
*/
if (step_over_info_valid_p ()
|| displaced_step_in_progress_thread (tp)
|| schedlock_applies (tp))
{
gdb_thread_options options
= GDB_THREAD_OPTION_CLONE | GDB_THREAD_OPTION_EXIT;
if (target_supports_set_thread_options (options))
tp->set_thread_options (options);
else
target_thread_events (true);
}
else if (tp->thread_fsm () != nullptr)
{
gdb_thread_options options = GDB_THREAD_OPTION_EXIT;
if (target_supports_set_thread_options (options))
tp->set_thread_options (options);
else
target_thread_events (true);
}
else
{
if (target_supports_set_thread_options (0))
tp->set_thread_options (0);
else
{
process_stratum_target *resume_target = tp->inf->process_target ();
if (!any_thread_needs_target_thread_events (resume_target,
resume_ptid))
target_thread_events (false);
}
}
/* If we're resuming more than one thread simultaneously, then any
thread other than the leader is being set to run free. Clear any
previous thread option for those threads. */
if (resume_ptid != inferior_ptid && target_supports_set_thread_options (0))
{
process_stratum_target *resume_target = tp->inf->process_target ();
for (thread_info *thr_iter : all_non_exited_threads (resume_target,
resume_ptid))
if (thr_iter != tp)
thr_iter->set_thread_options (0);
}
infrun_debug_printf ("resume_ptid=%s, step=%d, sig=%s",
resume_ptid.to_string ().c_str (),
step, gdb_signal_to_symbol_string (sig));
target_resume (resume_ptid, step, sig);
}
/* Resume the inferior. SIG is the signal to give the inferior
(GDB_SIGNAL_0 for none). Note: don't call this directly; instead
call 'resume', which handles exceptions. */
static void
resume_1 (enum gdb_signal sig)
{
struct thread_info *tp = inferior_thread ();
regcache *regcache = get_thread_regcache (tp);
struct gdbarch *gdbarch = regcache->arch ();
ptid_t resume_ptid;
/* This represents the user's step vs continue request. When
deciding whether "set scheduler-locking step" applies, it's the
user's intention that counts. */
const int user_step = tp->control.stepping_command;
/* This represents what we'll actually request the target to do.
This can decay from a step to a continue, if e.g., we need to
implement single-stepping with breakpoints (software
single-step). */
bool step;
gdb_assert (!tp->stop_requested);
gdb_assert (!thread_is_in_step_over_chain (tp));
if (tp->has_pending_waitstatus ())
{
infrun_debug_printf
("thread %s has pending wait "
"status %s (currently_stepping=%d).",
tp->ptid.to_string ().c_str (),
tp->pending_waitstatus ().to_string ().c_str (),
currently_stepping (tp));
tp->inf->process_target ()->threads_executing = true;
tp->set_resumed (true);
/* FIXME: What should we do if we are supposed to resume this
thread with a signal? Maybe we should maintain a queue of
pending signals to deliver. */
if (sig != GDB_SIGNAL_0)
{
warning (_("Couldn't deliver signal %s to %s."),
gdb_signal_to_name (sig),
tp->ptid.to_string ().c_str ());
}
tp->set_stop_signal (GDB_SIGNAL_0);
if (target_can_async_p ())
{
target_async (true);
/* Tell the event loop we have an event to process. */
mark_async_event_handler (infrun_async_inferior_event_token);
}
return;
}
tp->stepped_breakpoint = 0;
/* Depends on stepped_breakpoint. */
step = currently_stepping (tp);
if (current_inferior ()->thread_waiting_for_vfork_done != nullptr)
{
/* Don't try to single-step a vfork parent that is waiting for
the child to get out of the shared memory region (by exec'ing
or exiting). This is particularly important on software
single-step archs, as the child process would trip on the
software single step breakpoint inserted for the parent
process. Since the parent will not actually execute any
instruction until the child is out of the shared region (such
are vfork's semantics), it is safe to simply continue it.
Eventually, we'll see a TARGET_WAITKIND_VFORK_DONE event for
the parent, and tell it to `keep_going', which automatically
re-sets it stepping. */
infrun_debug_printf ("resume : clear step");
step = false;
}
CORE_ADDR pc = regcache_read_pc (regcache);
infrun_debug_printf ("step=%d, signal=%s, trap_expected=%d, "
"current thread [%s] at %s",
step, gdb_signal_to_symbol_string (sig),
tp->control.trap_expected,
inferior_ptid.to_string ().c_str (),
paddress (gdbarch, pc));
const address_space *aspace = tp->inf->aspace.get ();
/* Normally, by the time we reach `resume', the breakpoints are either
removed or inserted, as appropriate. The exception is if we're sitting
at a permanent breakpoint; we need to step over it, but permanent
breakpoints can't be removed. So we have to test for it here. */
if (breakpoint_here_p (aspace, pc) == permanent_breakpoint_here)
{
if (sig != GDB_SIGNAL_0)
{
/* We have a signal to pass to the inferior. The resume
may, or may not take us to the signal handler. If this
is a step, we'll need to stop in the signal handler, if
there's one, (if the target supports stepping into
handlers), or in the next mainline instruction, if
there's no handler. If this is a continue, we need to be
sure to run the handler with all breakpoints inserted.
In all cases, set a breakpoint at the current address
(where the handler returns to), and once that breakpoint
is hit, resume skipping the permanent breakpoint. If
that breakpoint isn't hit, then we've stepped into the
signal handler (or hit some other event). We'll delete
the step-resume breakpoint then. */
infrun_debug_printf ("resume: skipping permanent breakpoint, "
"deliver signal first");
clear_step_over_info ();
tp->control.trap_expected = 0;
if (tp->control.step_resume_breakpoint == nullptr)
{
/* Set a "high-priority" step-resume, as we don't want
user breakpoints at PC to trigger (again) when this
hits. */
insert_hp_step_resume_breakpoint_at_frame (get_current_frame ());
gdb_assert (tp->control.step_resume_breakpoint->first_loc ()
.permanent);
tp->step_after_step_resume_breakpoint = step;
}
insert_breakpoints ();
}
else
{
/* There's no signal to pass, we can go ahead and skip the
permanent breakpoint manually. */
infrun_debug_printf ("skipping permanent breakpoint");
gdbarch_skip_permanent_breakpoint (gdbarch, regcache);
/* Update pc to reflect the new address from which we will
execute instructions. */
pc = regcache_read_pc (regcache);
if (step)
{
/* We've already advanced the PC, so the stepping part
is done. Now we need to arrange for a trap to be
reported to handle_inferior_event. Set a breakpoint
at the current PC, and run to it. Don't update
prev_pc, because if we end in
switch_back_to_stepped_thread, we want the "expected
thread advanced also" branch to be taken. IOW, we
don't want this thread to step further from PC
(overstep). */
gdb_assert (!step_over_info_valid_p ());
insert_single_step_breakpoint (gdbarch, aspace, pc);
insert_breakpoints ();
resume_ptid = internal_resume_ptid (user_step);
do_target_resume (resume_ptid, false, GDB_SIGNAL_0);
tp->set_resumed (true);
return;
}
}
}
/* If we have a breakpoint to step over, make sure to do a single
step only. Same if we have software watchpoints. */
if (tp->control.trap_expected || bpstat_should_step ())
tp->control.may_range_step = 0;
/* If displaced stepping is enabled, step over breakpoints by executing a
copy of the instruction at a different address.
We can't use displaced stepping when we have a signal to deliver;
the comments for displaced_step_prepare explain why. The
comments in the handle_inferior event for dealing with 'random
signals' explain what we do instead.
We can't use displaced stepping when we are waiting for vfork_done
event, displaced stepping breaks the vfork child similarly as single
step software breakpoint. */
if (tp->control.trap_expected
&& use_displaced_stepping (tp)
&& !step_over_info_valid_p ()
&& sig == GDB_SIGNAL_0
&& current_inferior ()->thread_waiting_for_vfork_done == nullptr)
{
displaced_step_prepare_status prepare_status
= displaced_step_prepare (tp);
if (prepare_status == DISPLACED_STEP_PREPARE_STATUS_UNAVAILABLE)
{
infrun_debug_printf ("Got placed in step-over queue");
tp->control.trap_expected = 0;
return;
}
else if (prepare_status == DISPLACED_STEP_PREPARE_STATUS_CANT)
{
/* Fallback to stepping over the breakpoint in-line. */
if (target_is_non_stop_p ())
stop_all_threads ("displaced stepping falling back on inline stepping");
set_step_over_info (aspace, regcache_read_pc (regcache), 0,
tp->global_num);
step = maybe_software_singlestep (gdbarch);
insert_breakpoints ();
}
else if (prepare_status == DISPLACED_STEP_PREPARE_STATUS_OK)
{
/* Update pc to reflect the new address from which we will
execute instructions due to displaced stepping. */
pc = regcache_read_pc (get_thread_regcache (tp));
step = gdbarch_displaced_step_hw_singlestep (gdbarch);
}
else
gdb_assert_not_reached ("Invalid displaced_step_prepare_status "
"value.");
}
/* Do we need to do it the hard way, w/temp breakpoints? */
else if (step)
step = maybe_software_singlestep (gdbarch);
/* Currently, our software single-step implementation leads to different
results than hardware single-stepping in one situation: when stepping
into delivering a signal which has an associated signal handler,
hardware single-step will stop at the first instruction of the handler,
while software single-step will simply skip execution of the handler.
For now, this difference in behavior is accepted since there is no
easy way to actually implement single-stepping into a signal handler
without kernel support.
However, there is one scenario where this difference leads to follow-on
problems: if we're stepping off a breakpoint by removing all breakpoints
and then single-stepping. In this case, the software single-step
behavior means that even if there is a *breakpoint* in the signal
handler, GDB still would not stop.
Fortunately, we can at least fix this particular issue. We detect
here the case where we are about to deliver a signal while software
single-stepping with breakpoints removed. In this situation, we
revert the decisions to remove all breakpoints and insert single-
step breakpoints, and instead we install a step-resume breakpoint
at the current address, deliver the signal without stepping, and
once we arrive back at the step-resume breakpoint, actually step
over the breakpoint we originally wanted to step over. */
if (thread_has_single_step_breakpoints_set (tp)
&& sig != GDB_SIGNAL_0
&& step_over_info_valid_p ())
{
/* If we have nested signals or a pending signal is delivered
immediately after a handler returns, might already have
a step-resume breakpoint set on the earlier handler. We cannot
set another step-resume breakpoint; just continue on until the
original breakpoint is hit. */
if (tp->control.step_resume_breakpoint == nullptr)
{
insert_hp_step_resume_breakpoint_at_frame (get_current_frame ());
tp->step_after_step_resume_breakpoint = 1;
}
delete_single_step_breakpoints (tp);
clear_step_over_info ();
tp->control.trap_expected = 0;
insert_breakpoints ();
}
/* If STEP is set, it's a request to use hardware stepping
facilities. But in that case, we should never
use singlestep breakpoint. */
gdb_assert (!(thread_has_single_step_breakpoints_set (tp) && step));
/* Decide the set of threads to ask the target to resume. */
if (tp->control.trap_expected)
{
/* We're allowing a thread to run past a breakpoint it has
hit, either by single-stepping the thread with the breakpoint
removed, or by displaced stepping, with the breakpoint inserted.
In the former case, we need to single-step only this thread,
and keep others stopped, as they can miss this breakpoint if
allowed to run. That's not really a problem for displaced
stepping, but, we still keep other threads stopped, in case
another thread is also stopped for a breakpoint waiting for
its turn in the displaced stepping queue. */
resume_ptid = inferior_ptid;
}
else
resume_ptid = internal_resume_ptid (user_step);
if (execution_direction != EXEC_REVERSE
&& step && breakpoint_inserted_here_p (aspace, pc))
{
/* There are two cases where we currently need to step a
breakpoint instruction when we have a signal to deliver:
- See handle_signal_stop where we handle random signals that
could take out us out of the stepping range. Normally, in
that case we end up continuing (instead of stepping) over the
signal handler with a breakpoint at PC, but there are cases
where we should _always_ single-step, even if we have a
step-resume breakpoint, like when a software watchpoint is
set. Assuming single-stepping and delivering a signal at the
same time would takes us to the signal handler, then we could
have removed the breakpoint at PC to step over it. However,
some hardware step targets (like e.g., Mac OS) can't step
into signal handlers, and for those, we need to leave the
breakpoint at PC inserted, as otherwise if the handler
recurses and executes PC again, it'll miss the breakpoint.
So we leave the breakpoint inserted anyway, but we need to
record that we tried to step a breakpoint instruction, so
that adjust_pc_after_break doesn't end up confused.
- In non-stop if we insert a breakpoint (e.g., a step-resume)
in one thread after another thread that was stepping had been
momentarily paused for a step-over. When we re-resume the
stepping thread, it may be resumed from that address with a
breakpoint that hasn't trapped yet. Seen with
gdb.threads/non-stop-fair-events.exp, on targets that don't
do displaced stepping. */
infrun_debug_printf ("resume: [%s] stepped breakpoint",
tp->ptid.to_string ().c_str ());
tp->stepped_breakpoint = 1;
/* Most targets can step a breakpoint instruction, thus
executing it normally. But if this one cannot, just
continue and we will hit it anyway. */
if (gdbarch_cannot_step_breakpoint (gdbarch))
step = false;
}
if (tp->control.may_range_step)
{
/* If we're resuming a thread with the PC out of the step
range, then we're doing some nested/finer run control
operation, like stepping the thread out of the dynamic
linker or the displaced stepping scratch pad. We
shouldn't have allowed a range step then. */
gdb_assert (pc_in_thread_step_range (pc, tp));
}
do_target_resume (resume_ptid, step, sig);
tp->set_resumed (true);
}
/* Resume the inferior. SIG is the signal to give the inferior
(GDB_SIGNAL_0 for none). This is a wrapper around 'resume_1' that
rolls back state on error. */
static void
resume (gdb_signal sig)
{
try
{
resume_1 (sig);
}
catch (const gdb_exception &ex)
{
/* If resuming is being aborted for any reason, delete any
single-step breakpoint resume_1 may have created, to avoid
confusing the following resumption, and to avoid leaving
single-step breakpoints perturbing other threads, in case
we're running in non-stop mode. */
if (inferior_ptid != null_ptid)
delete_single_step_breakpoints (inferior_thread ());
throw;
}
}
/* Proceeding. */
/* See infrun.h. */
/* Counter that tracks number of user visible stops. This can be used
to tell whether a command has proceeded the inferior past the
current location. This allows e.g., inferior function calls in
breakpoint commands to not interrupt the command list. When the
call finishes successfully, the inferior is standing at the same
breakpoint as if nothing happened (and so we don't call
normal_stop). */
static ULONGEST current_stop_id;
/* See infrun.h. */
ULONGEST
get_stop_id (void)
{
return current_stop_id;
}
/* Called when we report a user visible stop. */
static void
new_stop_id (void)
{
current_stop_id++;
}
/* Clear out all variables saying what to do when inferior is continued.
First do this, then set the ones you want, then call `proceed'. */
static void
clear_proceed_status_thread (struct thread_info *tp)
{
infrun_debug_printf ("%s", tp->ptid.to_string ().c_str ());
/* If we're starting a new sequence, then the previous finished
single-step is no longer relevant. */
if (tp->has_pending_waitstatus ())
{
if (tp->stop_reason () == TARGET_STOPPED_BY_SINGLE_STEP)
{
infrun_debug_printf ("pending event of %s was a finished step. "
"Discarding.",
tp->ptid.to_string ().c_str ());
tp->clear_pending_waitstatus ();
tp->set_stop_reason (TARGET_STOPPED_BY_NO_REASON);
}
else
{
infrun_debug_printf
("thread %s has pending wait status %s (currently_stepping=%d).",
tp->ptid.to_string ().c_str (),
tp->pending_waitstatus ().to_string ().c_str (),
currently_stepping (tp));
}
}
/* If this signal should not be seen by program, give it zero.
Used for debugging signals. */
if (!signal_pass_state (tp->stop_signal ()))
tp->set_stop_signal (GDB_SIGNAL_0);
tp->release_thread_fsm ();
tp->control.trap_expected = 0;
tp->control.step_range_start = 0;
tp->control.step_range_end = 0;
tp->control.may_range_step = 0;
tp->control.step_frame_id = null_frame_id;
tp->control.step_stack_frame_id = null_frame_id;
tp->control.step_over_calls = STEP_OVER_UNDEBUGGABLE;
tp->control.step_start_function = nullptr;
tp->stop_requested = 0;
tp->control.stop_step = 0;
tp->control.proceed_to_finish = 0;
tp->control.stepping_command = 0;
/* Discard any remaining commands or status from previous stop. */
bpstat_clear (&tp->control.stop_bpstat);
}
/* Notify the current interpreter and observers that the target is about to
proceed. */
static void
notify_about_to_proceed ()
{
top_level_interpreter ()->on_about_to_proceed ();
gdb::observers::about_to_proceed.notify ();
}
void
clear_proceed_status (int step)
{
/* With scheduler-locking replay, stop replaying other threads if we're
not replaying the user-visible resume ptid.
This is a convenience feature to not require the user to explicitly
stop replaying the other threads. We're assuming that the user's
intent is to resume tracing the recorded process. */
if (!non_stop && scheduler_mode == schedlock_replay
&& target_record_is_replaying (minus_one_ptid)
&& !target_record_will_replay (user_visible_resume_ptid (step),
execution_direction))
target_record_stop_replaying ();
if (!non_stop && inferior_ptid != null_ptid)
{
ptid_t resume_ptid = user_visible_resume_ptid (step);
process_stratum_target *resume_target
= user_visible_resume_target (resume_ptid);
/* In all-stop mode, delete the per-thread status of all threads
we're about to resume, implicitly and explicitly. */
for (thread_info *tp : all_non_exited_threads (resume_target, resume_ptid))
clear_proceed_status_thread (tp);
}
if (inferior_ptid != null_ptid)
{
struct inferior *inferior;
if (non_stop)
{
/* If in non-stop mode, only delete the per-thread status of
the current thread. */
clear_proceed_status_thread (inferior_thread ());
}
inferior = current_inferior ();
inferior->control.stop_soon = NO_STOP_QUIETLY;
}
notify_about_to_proceed ();
}
/* Returns true if TP is still stopped at a breakpoint that needs
stepping-over in order to make progress. If the breakpoint is gone
meanwhile, we can skip the whole step-over dance. */
static bool
thread_still_needs_step_over_bp (struct thread_info *tp)
{
if (tp->stepping_over_breakpoint)
{
struct regcache *regcache = get_thread_regcache (tp);
if (breakpoint_here_p (tp->inf->aspace.get (),
regcache_read_pc (regcache))
== ordinary_breakpoint_here)
return true;
tp->stepping_over_breakpoint = 0;
}
return false;
}
/* Check whether thread TP still needs to start a step-over in order
to make progress when resumed. Returns an bitwise or of enum
step_over_what bits, indicating what needs to be stepped over. */
static step_over_what
thread_still_needs_step_over (struct thread_info *tp)
{
step_over_what what = 0;
if (thread_still_needs_step_over_bp (tp))
what |= STEP_OVER_BREAKPOINT;
if (tp->stepping_over_watchpoint
&& !target_have_steppable_watchpoint ())
what |= STEP_OVER_WATCHPOINT;
return what;
}
/* Returns true if scheduler locking applies. STEP indicates whether
we're about to do a step/next-like command to a thread. */
static bool
schedlock_applies (struct thread_info *tp)
{
return (scheduler_mode == schedlock_on
|| (scheduler_mode == schedlock_step
&& tp->control.stepping_command)
|| (scheduler_mode == schedlock_replay
&& target_record_will_replay (minus_one_ptid,
execution_direction)));
}
/* When FORCE_P is false, set process_stratum_target::COMMIT_RESUMED_STATE
in all target stacks that have threads executing and don't have threads
with pending events.
When FORCE_P is true, set process_stratum_target::COMMIT_RESUMED_STATE
in all target stacks that have threads executing regardless of whether
there are pending events or not.
Passing FORCE_P as false makes sense when GDB is going to wait for
events from all threads and will therefore spot the pending events.
However, if GDB is only going to wait for events from select threads
(i.e. when performing an inferior call) then a pending event on some
other thread will not be spotted, and if we fail to commit the resume
state for the thread performing the inferior call, then the inferior
call will never complete (or even start). */
static void
maybe_set_commit_resumed_all_targets (bool force_p)
{
scoped_restore_current_thread restore_thread;
for (inferior *inf : all_non_exited_inferiors ())
{
process_stratum_target *proc_target = inf->process_target ();
if (proc_target->commit_resumed_state)
{
/* We already set this in a previous iteration, via another
inferior sharing the process_stratum target. */
continue;
}
/* If the target has no resumed threads, it would be useless to
ask it to commit the resumed threads. */
if (!proc_target->threads_executing)
{
infrun_debug_printf ("not requesting commit-resumed for target "
"%s, no resumed threads",
proc_target->shortname ());
continue;
}
/* As an optimization, if a thread from this target has some
status to report, handle it before requiring the target to
commit its resumed threads: handling the status might lead to
resuming more threads. */
if (!force_p && proc_target->has_resumed_with_pending_wait_status ())
{
infrun_debug_printf ("not requesting commit-resumed for target %s, a"
" thread has a pending waitstatus",
proc_target->shortname ());
continue;
}
switch_to_inferior_no_thread (inf);
if (!force_p && target_has_pending_events ())
{
infrun_debug_printf ("not requesting commit-resumed for target %s, "
"target has pending events",
proc_target->shortname ());
continue;
}
infrun_debug_printf ("enabling commit-resumed for target %s",
proc_target->shortname ());
proc_target->commit_resumed_state = true;
}
}
/* See infrun.h. */
void
maybe_call_commit_resumed_all_targets ()
{
scoped_restore_current_thread restore_thread;
for (inferior *inf : all_non_exited_inferiors ())
{
process_stratum_target *proc_target = inf->process_target ();
if (!proc_target->commit_resumed_state)
continue;
switch_to_inferior_no_thread (inf);
infrun_debug_printf ("calling commit_resumed for target %s",
proc_target->shortname());
target_commit_resumed ();
}
}
/* To track nesting of scoped_disable_commit_resumed objects, ensuring
that only the outermost one attempts to re-enable
commit-resumed. */
static bool enable_commit_resumed = true;
/* See infrun.h. */
scoped_disable_commit_resumed::scoped_disable_commit_resumed
(const char *reason)
: m_reason (reason),
m_prev_enable_commit_resumed (enable_commit_resumed)
{
infrun_debug_printf ("reason=%s", m_reason);
enable_commit_resumed = false;
for (inferior *inf : all_non_exited_inferiors ())
{
process_stratum_target *proc_target = inf->process_target ();
if (m_prev_enable_commit_resumed)
{
/* This is the outermost instance: force all
COMMIT_RESUMED_STATE to false. */
proc_target->commit_resumed_state = false;
}
else
{
/* This is not the outermost instance, we expect
COMMIT_RESUMED_STATE to have been cleared by the
outermost instance. */
gdb_assert (!proc_target->commit_resumed_state);
}
}
}
/* See infrun.h. */
void
scoped_disable_commit_resumed::reset ()
{
if (m_reset)
return;
m_reset = true;
infrun_debug_printf ("reason=%s", m_reason);
gdb_assert (!enable_commit_resumed);
enable_commit_resumed = m_prev_enable_commit_resumed;
if (m_prev_enable_commit_resumed)
{
/* This is the outermost instance, re-enable
COMMIT_RESUMED_STATE on the targets where it's possible. */
maybe_set_commit_resumed_all_targets (false);
}
else
{
/* This is not the outermost instance, we expect
COMMIT_RESUMED_STATE to still be false. */
for (inferior *inf : all_non_exited_inferiors ())
{
process_stratum_target *proc_target = inf->process_target ();
gdb_assert (!proc_target->commit_resumed_state);
}
}
}
/* See infrun.h. */
scoped_disable_commit_resumed::~scoped_disable_commit_resumed ()
{
reset ();
}
/* See infrun.h. */
void
scoped_disable_commit_resumed::reset_and_commit ()
{
reset ();
maybe_call_commit_resumed_all_targets ();
}
/* See infrun.h. */
scoped_enable_commit_resumed::scoped_enable_commit_resumed
(const char *reason, bool force_p)
: m_reason (reason),
m_prev_enable_commit_resumed (enable_commit_resumed)
{
infrun_debug_printf ("reason=%s", m_reason);
if (!enable_commit_resumed)
{
enable_commit_resumed = true;
/* Re-enable COMMIT_RESUMED_STATE on the targets where it's
possible. */
maybe_set_commit_resumed_all_targets (force_p);
maybe_call_commit_resumed_all_targets ();
}
}
/* See infrun.h. */
scoped_enable_commit_resumed::~scoped_enable_commit_resumed ()
{
infrun_debug_printf ("reason=%s", m_reason);
gdb_assert (enable_commit_resumed);
enable_commit_resumed = m_prev_enable_commit_resumed;
if (!enable_commit_resumed)
{
/* Force all COMMIT_RESUMED_STATE back to false. */
for (inferior *inf : all_non_exited_inferiors ())
{
process_stratum_target *proc_target = inf->process_target ();
proc_target->commit_resumed_state = false;
}
}
}
/* Check that all the targets we're about to resume are in non-stop
mode. Ideally, we'd only care whether all targets support
target-async, but we're not there yet. E.g., stop_all_threads
doesn't know how to handle all-stop targets. Also, the remote
protocol in all-stop mode is synchronous, irrespective of
target-async, which means that things like a breakpoint re-set
triggered by one target would try to read memory from all targets
and fail. */
static void
check_multi_target_resumption (process_stratum_target *resume_target)
{
if (!non_stop && resume_target == nullptr)
{
scoped_restore_current_thread restore_thread;
/* This is used to track whether we're resuming more than one
target. */
process_stratum_target *first_connection = nullptr;
/* The first inferior we see with a target that does not work in
always-non-stop mode. */
inferior *first_not_non_stop = nullptr;
for (inferior *inf : all_non_exited_inferiors ())
{
switch_to_inferior_no_thread (inf);
if (!target_has_execution ())
continue;
process_stratum_target *proc_target
= current_inferior ()->process_target();
if (!target_is_non_stop_p ())
first_not_non_stop = inf;
if (first_connection == nullptr)
first_connection = proc_target;
else if (first_connection != proc_target
&& first_not_non_stop != nullptr)
{
switch_to_inferior_no_thread (first_not_non_stop);
proc_target = current_inferior ()->process_target();
error (_("Connection %d (%s) does not support "
"multi-target resumption."),
proc_target->connection_number,
make_target_connection_string (proc_target).c_str ());
}
}
}
}
/* Helper function for `proceed`. Check if thread TP is suitable for
resuming, and, if it is, switch to the thread and call
`keep_going_pass_signal`. If TP is not suitable for resuming then this
function will just return without switching threads. */
static void
proceed_resume_thread_checked (thread_info *tp)
{
if (!tp->inf->has_execution ())
{
infrun_debug_printf ("[%s] target has no execution",
tp->ptid.to_string ().c_str ());
return;
}
if (tp->resumed ())
{
infrun_debug_printf ("[%s] resumed",
tp->ptid.to_string ().c_str ());
gdb_assert (tp->executing () || tp->has_pending_waitstatus ());
return;
}
if (thread_is_in_step_over_chain (tp))
{
infrun_debug_printf ("[%s] needs step-over",
tp->ptid.to_string ().c_str ());