[IA64] Synchronize RBS on PTRACE_ATTACH
When attaching to a stopped process, the RSE must be explicitly synced to user-space, so the debugger can read the correct values. Signed-off-by: Petr Tesarik <ptesarik@suse.cz> CC: Roland McGrath <roland@redhat.com> Signed-off-by: Tony Luck <tony.luck@intel.com>
This commit is contained in:
parent
3b2ce0b178
commit
aa91a2e900
2 changed files with 61 additions and 0 deletions
|
@ -613,6 +613,63 @@ void ia64_sync_krbs(void)
|
||||||
unw_init_running(do_sync_rbs, ia64_sync_kernel_rbs);
|
unw_init_running(do_sync_rbs, ia64_sync_kernel_rbs);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* After PTRACE_ATTACH, a thread's register backing store area in user
|
||||||
|
* space is assumed to contain correct data whenever the thread is
|
||||||
|
* stopped. arch_ptrace_stop takes care of this on tracing stops.
|
||||||
|
* But if the child was already stopped for job control when we attach
|
||||||
|
* to it, then it might not ever get into ptrace_stop by the time we
|
||||||
|
* want to examine the user memory containing the RBS.
|
||||||
|
*/
|
||||||
|
void
|
||||||
|
ptrace_attach_sync_user_rbs (struct task_struct *child)
|
||||||
|
{
|
||||||
|
int stopped = 0;
|
||||||
|
struct unw_frame_info info;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If the child is in TASK_STOPPED, we need to change that to
|
||||||
|
* TASK_TRACED momentarily while we operate on it. This ensures
|
||||||
|
* that the child won't be woken up and return to user mode while
|
||||||
|
* we are doing the sync. (It can only be woken up for SIGKILL.)
|
||||||
|
*/
|
||||||
|
|
||||||
|
read_lock(&tasklist_lock);
|
||||||
|
if (child->signal) {
|
||||||
|
spin_lock_irq(&child->sighand->siglock);
|
||||||
|
if (child->state == TASK_STOPPED &&
|
||||||
|
!test_and_set_tsk_thread_flag(child, TIF_RESTORE_RSE)) {
|
||||||
|
tsk_set_notify_resume(child);
|
||||||
|
|
||||||
|
child->state = TASK_TRACED;
|
||||||
|
stopped = 1;
|
||||||
|
}
|
||||||
|
spin_unlock_irq(&child->sighand->siglock);
|
||||||
|
}
|
||||||
|
read_unlock(&tasklist_lock);
|
||||||
|
|
||||||
|
if (!stopped)
|
||||||
|
return;
|
||||||
|
|
||||||
|
unw_init_from_blocked_task(&info, child);
|
||||||
|
do_sync_rbs(&info, ia64_sync_user_rbs);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Now move the child back into TASK_STOPPED if it should be in a
|
||||||
|
* job control stop, so that SIGCONT can be used to wake it up.
|
||||||
|
*/
|
||||||
|
read_lock(&tasklist_lock);
|
||||||
|
if (child->signal) {
|
||||||
|
spin_lock_irq(&child->sighand->siglock);
|
||||||
|
if (child->state == TASK_TRACED &&
|
||||||
|
(child->signal->flags & SIGNAL_STOP_STOPPED)) {
|
||||||
|
child->state = TASK_STOPPED;
|
||||||
|
}
|
||||||
|
spin_unlock_irq(&child->sighand->siglock);
|
||||||
|
}
|
||||||
|
read_unlock(&tasklist_lock);
|
||||||
|
}
|
||||||
|
|
||||||
static inline int
|
static inline int
|
||||||
thread_matches (struct task_struct *thread, unsigned long addr)
|
thread_matches (struct task_struct *thread, unsigned long addr)
|
||||||
{
|
{
|
||||||
|
|
|
@ -310,6 +310,10 @@ struct switch_stack {
|
||||||
#define arch_ptrace_stop_needed(code, info) \
|
#define arch_ptrace_stop_needed(code, info) \
|
||||||
(!test_thread_flag(TIF_RESTORE_RSE))
|
(!test_thread_flag(TIF_RESTORE_RSE))
|
||||||
|
|
||||||
|
extern void ptrace_attach_sync_user_rbs (struct task_struct *);
|
||||||
|
#define arch_ptrace_attach(child) \
|
||||||
|
ptrace_attach_sync_user_rbs(child)
|
||||||
|
|
||||||
#endif /* !__KERNEL__ */
|
#endif /* !__KERNEL__ */
|
||||||
|
|
||||||
/* pt_all_user_regs is used for PTRACE_GETREGS PTRACE_SETREGS */
|
/* pt_all_user_regs is used for PTRACE_GETREGS PTRACE_SETREGS */
|
||||||
|
|
Loading…
Add table
Reference in a new issue