Pull per signal_struct coredumps from Eric Biederman: "Current coredumps are mixed up with the exit code, the signal handling code, and the ptrace code making coredumps much more complicated than necessary and difficult to follow. This series of changes starts with ptrace_stop and cleans it up, making it easier to follow what is happening in ptrace_stop. Then cleans up the exec interactions with coredumps. Then cleans up the coredump interactions with exit. Finally the coredump interactions with the signal handling code is cleaned up. The first and last changes are bug fixes for minor bugs. I believe the fact that vfork followed by execve can kill the process the called vfork if exec fails is sufficient justification to change the userspace visible behavior. In previous discussions some of these changes were organized differently and individually appeared to make the code base worse. As currently written I believe they all stand on their own as cleanups and bug fixes. Which means that even if the worst should happen and the last change needs to be reverted for some unimaginable reason, the code base will still be improved. If the worst does not happen there are a more cleanups that can be made. Signals that generate coredumps can easily become eligible for short circuit delivery in complete_signal. The entire rendezvous for generating a coredump can move into get_signal. The function force_sig_info_to_task be written in a way that does not modify the signal handling state of the target task (because coredumps are eligible for short circuit delivery). Many of these future cleanups can be done another way but nothing so cleanly as if coredumps become per signal_struct" * 'per_signal_struct_coredumps-for-v5.16' of git://git.kernel.org/pub/scm/linux/kernel/git/ebiederm/user-namespace: coredump: Limit coredumps to a single thread group coredump: Don't perform any cleanups before dumping core exit: Factor coredump_exit_mm out of exit_mm exec: Check for a pending fatal signal instead of core_state ptrace: Remove the unnecessary arguments from arch_ptrace_stop signal: Remove the bogus sigkill_pending in ptrace_stop
150 lines
5.2 KiB
C
150 lines
5.2 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
/*
|
|
* Copyright (C) 1998-2004 Hewlett-Packard Co
|
|
* David Mosberger-Tang <davidm@hpl.hp.com>
|
|
* Stephane Eranian <eranian@hpl.hp.com>
|
|
* Copyright (C) 2003 Intel Co
|
|
* Suresh Siddha <suresh.b.siddha@intel.com>
|
|
* Fenghua Yu <fenghua.yu@intel.com>
|
|
* Arun Sharma <arun.sharma@intel.com>
|
|
*
|
|
* 12/07/98 S. Eranian added pt_regs & switch_stack
|
|
* 12/21/98 D. Mosberger updated to match latest code
|
|
* 6/17/99 D. Mosberger added second unat member to "struct switch_stack"
|
|
*
|
|
*/
|
|
#ifndef _ASM_IA64_PTRACE_H
|
|
#define _ASM_IA64_PTRACE_H
|
|
|
|
#ifndef ASM_OFFSETS_C
|
|
#include <asm/asm-offsets.h>
|
|
#endif
|
|
#include <uapi/asm/ptrace.h>
|
|
|
|
/*
|
|
* Base-2 logarithm of number of pages to allocate per task structure
|
|
* (including register backing store and memory stack):
|
|
*/
|
|
#if defined(CONFIG_IA64_PAGE_SIZE_4KB)
|
|
# define KERNEL_STACK_SIZE_ORDER 3
|
|
#elif defined(CONFIG_IA64_PAGE_SIZE_8KB)
|
|
# define KERNEL_STACK_SIZE_ORDER 2
|
|
#elif defined(CONFIG_IA64_PAGE_SIZE_16KB)
|
|
# define KERNEL_STACK_SIZE_ORDER 1
|
|
#else
|
|
# define KERNEL_STACK_SIZE_ORDER 0
|
|
#endif
|
|
|
|
#define IA64_RBS_OFFSET ((IA64_TASK_SIZE + IA64_THREAD_INFO_SIZE + 31) & ~31)
|
|
#define IA64_STK_OFFSET ((1 << KERNEL_STACK_SIZE_ORDER)*PAGE_SIZE)
|
|
|
|
#define KERNEL_STACK_SIZE IA64_STK_OFFSET
|
|
|
|
#ifndef __ASSEMBLY__
|
|
|
|
#include <asm/current.h>
|
|
#include <asm/page.h>
|
|
|
|
/*
|
|
* We use the ia64_psr(regs)->ri to determine which of the three
|
|
* instructions in bundle (16 bytes) took the sample. Generate
|
|
* the canonical representation by adding to instruction pointer.
|
|
*/
|
|
# define instruction_pointer(regs) ((regs)->cr_iip + ia64_psr(regs)->ri)
|
|
# define instruction_pointer_set(regs, val) \
|
|
({ \
|
|
ia64_psr(regs)->ri = (val & 0xf); \
|
|
regs->cr_iip = (val & ~0xfULL); \
|
|
})
|
|
|
|
static inline unsigned long user_stack_pointer(struct pt_regs *regs)
|
|
{
|
|
return regs->r12;
|
|
}
|
|
|
|
static inline int is_syscall_success(struct pt_regs *regs)
|
|
{
|
|
return regs->r10 != -1;
|
|
}
|
|
|
|
static inline long regs_return_value(struct pt_regs *regs)
|
|
{
|
|
if (is_syscall_success(regs))
|
|
return regs->r8;
|
|
else
|
|
return -regs->r8;
|
|
}
|
|
|
|
/* Conserve space in histogram by encoding slot bits in address
|
|
* bits 2 and 3 rather than bits 0 and 1.
|
|
*/
|
|
#define profile_pc(regs) \
|
|
({ \
|
|
unsigned long __ip = instruction_pointer(regs); \
|
|
(__ip & ~3UL) + ((__ip & 3UL) << 2); \
|
|
})
|
|
|
|
/* given a pointer to a task_struct, return the user's pt_regs */
|
|
# define task_pt_regs(t) (((struct pt_regs *) ((char *) (t) + IA64_STK_OFFSET)) - 1)
|
|
# define ia64_psr(regs) ((struct ia64_psr *) &(regs)->cr_ipsr)
|
|
# define user_mode(regs) (((struct ia64_psr *) &(regs)->cr_ipsr)->cpl != 0)
|
|
# define user_stack(task,regs) ((long) regs - (long) task == IA64_STK_OFFSET - sizeof(*regs))
|
|
# define fsys_mode(task,regs) \
|
|
({ \
|
|
struct task_struct *_task = (task); \
|
|
struct pt_regs *_regs = (regs); \
|
|
!user_mode(_regs) && user_stack(_task, _regs); \
|
|
})
|
|
|
|
/*
|
|
* System call handlers that, upon successful completion, need to return a negative value
|
|
* should call force_successful_syscall_return() right before returning. On architectures
|
|
* where the syscall convention provides for a separate error flag (e.g., alpha, ia64,
|
|
* ppc{,64}, sparc{,64}, possibly others), this macro can be used to ensure that the error
|
|
* flag will not get set. On architectures which do not support a separate error flag,
|
|
* the macro is a no-op and the spurious error condition needs to be filtered out by some
|
|
* other means (e.g., in user-level, by passing an extra argument to the syscall handler,
|
|
* or something along those lines).
|
|
*
|
|
* On ia64, we can clear the user's pt_regs->r8 to force a successful syscall.
|
|
*/
|
|
# define force_successful_syscall_return() (task_pt_regs(current)->r8 = 0)
|
|
|
|
struct task_struct; /* forward decl */
|
|
struct unw_frame_info; /* forward decl */
|
|
|
|
extern unsigned long ia64_get_user_rbs_end (struct task_struct *, struct pt_regs *,
|
|
unsigned long *);
|
|
extern long ia64_peek (struct task_struct *, struct switch_stack *, unsigned long,
|
|
unsigned long, long *);
|
|
extern long ia64_poke (struct task_struct *, struct switch_stack *, unsigned long,
|
|
unsigned long, long);
|
|
extern void ia64_flush_fph (struct task_struct *);
|
|
extern void ia64_sync_fph (struct task_struct *);
|
|
extern void ia64_sync_krbs(void);
|
|
extern long ia64_sync_user_rbs (struct task_struct *, struct switch_stack *,
|
|
unsigned long, unsigned long);
|
|
|
|
/* get nat bits for scratch registers such that bit N==1 iff scratch register rN is a NaT */
|
|
extern unsigned long ia64_get_scratch_nat_bits (struct pt_regs *pt, unsigned long scratch_unat);
|
|
/* put nat bits for scratch registers such that scratch register rN is a NaT iff bit N==1 */
|
|
extern unsigned long ia64_put_scratch_nat_bits (struct pt_regs *pt, unsigned long nat);
|
|
|
|
extern void ia64_increment_ip (struct pt_regs *pt);
|
|
extern void ia64_decrement_ip (struct pt_regs *pt);
|
|
|
|
extern void ia64_ptrace_stop(void);
|
|
#define arch_ptrace_stop() \
|
|
ia64_ptrace_stop()
|
|
#define arch_ptrace_stop_needed() \
|
|
(!test_thread_flag(TIF_RESTORE_RSE))
|
|
|
|
extern void ptrace_attach_sync_user_rbs (struct task_struct *);
|
|
#define arch_ptrace_attach(child) \
|
|
ptrace_attach_sync_user_rbs(child)
|
|
|
|
#define arch_has_single_step() (1)
|
|
#define arch_has_block_step() (1)
|
|
|
|
#endif /* !__ASSEMBLY__ */
|
|
#endif /* _ASM_IA64_PTRACE_H */
|