- Add initial support to recognise the HeXin C2000 processor. - Add papr-vpd and papr-sysparm character device drivers for VPD & sysparm retrieval, so userspace tools can be adapted to avoid doing raw firmware calls from userspace. - Sched domains optimisations for shared processor partitions on P9/P10. - A series of optimisations for KVM running as a nested HV under PowerVM. - Other small features and fixes. Thanks to: Aditya Gupta, Aneesh Kumar K.V, Arnd Bergmann, Christophe Leroy, Colin Ian King, Dario Binacchi, David Heidelberg, Geoff Levand, Gustavo A. R. Silva, Haoran Liu, Jordan Niethe, Kajol Jain, Kevin Hao, Kunwu Chan, Li kunyu, Li zeming, Masahiro Yamada, Michal Suchánek, Nathan Lynch, Naveen N Rao, Nicholas Piggin, Randy Dunlap, Sathvika Vasireddy, Srikar Dronamraju, Stephen Rothwell, Vaibhav Jain, Zhao Ke. -----BEGIN PGP SIGNATURE----- iQJHBAABCAAxFiEEJFGtCPCthwEv2Y/bUevqPMjhpYAFAmWRVf0THG1wZUBlbGxl cm1hbi5pZC5hdQAKCRBR6+o8yOGlgIfpEACns86LkKuH1wTxbXJFaY2vIdPbBVUO oh0+y6Bm6ybCVvSp/CcyDPRRWpVlnp4BZlAh4x3gHrdRYEbIaFhI3gUzUtPLxAmf Oza1qyN570AFOudTNOy3VErtHiMHSuI7ckRshXWCakbAN8VlBDFWje3VJ4vZZ5OB Ii4RM0a3e/XqUZodLQXvDcqo3GDeIVmf1BnOTvEFFPhjZUZBfJarL6OHuyX7Xp1J oGSBA3O7UBVGrQsoGS5UAMRqZQnvLc5hn150FU1qDPkHu5X5iLvIMUakTFCYgGYw mT7DBPpDWKKFSfVjsjIVX2GPv8XSMPnZDmxOl/SIKM1F4aKAL9vmbYP6AMXXmvVB SpluSmkcp+YujtK5QO8BN4I2SD3xIbhH8yjMUh2CAFP1SBR0QnKpXUGHRiZ0m7fM SSFAHHLEzKJC46vUsazazoldyWQMAwBHKQzoASHf59yrEP4uta/+pimHdsOeU2UP IAQEYzw7fTKbEIvqV4qf6sW+5bVUhISS1vSlJ3OEkGqUxVvaUMQ2ePPbX+rfv7lS hXlxh9vjFzcDK5PYmLi0Agua9ct0ER0MOdY5kRMXAb4+AlVLQi4EgymxRCrjYu2/ XodDf1xJU2w7gdMc4TpiouHRrOtZQ9JWH5j+x0YnN4lG2vmG7lbU22a4myn6PjP9 RLAymXt4/1iHqA== =LjlQ -----END PGP SIGNATURE----- Merge tag 'powerpc-6.8-1' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux Pull powerpc updates from Michael Ellerman: - Add initial support to recognise the HeXin C2000 processor. - Add papr-vpd and papr-sysparm character device drivers for VPD & sysparm retrieval, so userspace tools can be adapted to avoid doing raw firmware calls from userspace. - Sched domains optimisations for shared processor partitions on P9/P10. - A series of optimisations for KVM running as a nested HV under PowerVM. - Other small features and fixes. Thanks to Aditya Gupta, Aneesh Kumar K.V, Arnd Bergmann, Christophe Leroy, Colin Ian King, Dario Binacchi, David Heidelberg, Geoff Levand, Gustavo A. R. Silva, Haoran Liu, Jordan Niethe, Kajol Jain, Kevin Hao, Kunwu Chan, Li kunyu, Li zeming, Masahiro Yamada, Michal Suchánek, Nathan Lynch, Naveen N Rao, Nicholas Piggin, Randy Dunlap, Sathvika Vasireddy, Srikar Dronamraju, Stephen Rothwell, Vaibhav Jain, and Zhao Ke. * tag 'powerpc-6.8-1' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux: (96 commits) powerpc/ps3_defconfig: Disable PPC64_BIG_ENDIAN_ELF_ABI_V2 powerpc/86xx: Drop unused CONFIG_MPC8610 powerpc/powernv: Add error handling to opal_prd_range_is_valid selftests/powerpc: Fix spelling mistake "EACCESS" -> "EACCES" powerpc/hvcall: Reorder Nestedv2 hcall opcodes powerpc/ps3: Add missing set_freezable() for ps3_probe_thread() powerpc/mpc83xx: Use wait_event_freezable() for freezable kthread powerpc/mpc83xx: Add the missing set_freezable() for agent_thread_fn() powerpc/fsl: Fix fsl,tmu-calibration to match the schema powerpc/smp: Dynamically build Powerpc topology powerpc/smp: Avoid asym packing within thread_group of a core powerpc/smp: Add __ro_after_init attribute powerpc/smp: Disable MC domain for shared processor powerpc/smp: Enable Asym packing for cores on shared processor powerpc/sched: Cleanup vcpu_is_preempted() powerpc: add cpu_spec.cpu_features to vmcoreinfo powerpc/imc-pmu: Add a null pointer check in update_events_in_group() powerpc/powernv: Add a null pointer check in opal_powercap_init() powerpc/powernv: Add a null pointer check in opal_event_init() powerpc/powernv: Add a null pointer check to scom_debug_init_one() ...
321 lines
6.9 KiB
ArmAsm
321 lines
6.9 KiB
ArmAsm
/* SPDX-License-Identifier: GPL-2.0-or-later */
|
|
/*
|
|
* Split from ftrace_64.S
|
|
*/
|
|
|
|
#include <linux/export.h>
|
|
#include <linux/magic.h>
|
|
#include <asm/ppc_asm.h>
|
|
#include <asm/asm-offsets.h>
|
|
#include <asm/ftrace.h>
|
|
#include <asm/ppc-opcode.h>
|
|
#include <asm/thread_info.h>
|
|
#include <asm/bug.h>
|
|
#include <asm/ptrace.h>
|
|
|
|
/*
|
|
*
|
|
* ftrace_caller()/ftrace_regs_caller() is the function that replaces _mcount()
|
|
* when ftrace is active.
|
|
*
|
|
* We arrive here after a function A calls function B, and we are the trace
|
|
* function for B. When we enter r1 points to A's stack frame, B has not yet
|
|
* had a chance to allocate one yet.
|
|
*
|
|
* Additionally r2 may point either to the TOC for A, or B, depending on
|
|
* whether B did a TOC setup sequence before calling us.
|
|
*
|
|
* On entry the LR points back to the _mcount() call site, and r0 holds the
|
|
* saved LR as it was on entry to B, ie. the original return address at the
|
|
* call site in A.
|
|
*
|
|
* Our job is to save the register state into a struct pt_regs (on the stack)
|
|
* and then arrange for the ftrace function to be called.
|
|
*/
|
|
.macro ftrace_regs_entry allregs
|
|
/* Create a minimal stack frame for representing B */
|
|
PPC_STLU r1, -STACK_FRAME_MIN_SIZE(r1)
|
|
|
|
/* Create our stack frame + pt_regs */
|
|
PPC_STLU r1,-SWITCH_FRAME_SIZE(r1)
|
|
|
|
/* Save all gprs to pt_regs */
|
|
SAVE_GPR(0, r1)
|
|
SAVE_GPRS(3, 10, r1)
|
|
|
|
#ifdef CONFIG_PPC64
|
|
/* Save the original return address in A's stack frame */
|
|
std r0, LRSAVE+SWITCH_FRAME_SIZE+STACK_FRAME_MIN_SIZE(r1)
|
|
/* Ok to continue? */
|
|
lbz r3, PACA_FTRACE_ENABLED(r13)
|
|
cmpdi r3, 0
|
|
beq ftrace_no_trace
|
|
#endif
|
|
|
|
.if \allregs == 1
|
|
SAVE_GPR(2, r1)
|
|
SAVE_GPRS(11, 31, r1)
|
|
.else
|
|
#ifdef CONFIG_LIVEPATCH_64
|
|
SAVE_GPR(14, r1)
|
|
#endif
|
|
.endif
|
|
|
|
/* Save previous stack pointer (r1) */
|
|
addi r8, r1, SWITCH_FRAME_SIZE+STACK_FRAME_MIN_SIZE
|
|
PPC_STL r8, GPR1(r1)
|
|
|
|
.if \allregs == 1
|
|
/* Load special regs for save below */
|
|
mfmsr r8
|
|
mfctr r9
|
|
mfxer r10
|
|
mfcr r11
|
|
.else
|
|
/* Clear MSR to flag as ftrace_caller versus frace_regs_caller */
|
|
li r8, 0
|
|
.endif
|
|
|
|
/* Get the _mcount() call site out of LR */
|
|
mflr r7
|
|
/* Save it as pt_regs->nip */
|
|
PPC_STL r7, _NIP(r1)
|
|
/* Also save it in B's stackframe header for proper unwind */
|
|
PPC_STL r7, LRSAVE+SWITCH_FRAME_SIZE(r1)
|
|
/* Save the read LR in pt_regs->link */
|
|
PPC_STL r0, _LINK(r1)
|
|
|
|
#ifdef CONFIG_PPC64
|
|
/* Save callee's TOC in the ABI compliant location */
|
|
std r2, STK_GOT(r1)
|
|
LOAD_PACA_TOC() /* get kernel TOC in r2 */
|
|
LOAD_REG_ADDR(r3, function_trace_op)
|
|
ld r5,0(r3)
|
|
#else
|
|
lis r3,function_trace_op@ha
|
|
lwz r5,function_trace_op@l(r3)
|
|
#endif
|
|
|
|
#ifdef CONFIG_LIVEPATCH_64
|
|
mr r14, r7 /* remember old NIP */
|
|
#endif
|
|
|
|
/* Calculate ip from nip-4 into r3 for call below */
|
|
subi r3, r7, MCOUNT_INSN_SIZE
|
|
|
|
/* Put the original return address in r4 as parent_ip */
|
|
mr r4, r0
|
|
|
|
/* Save special regs */
|
|
PPC_STL r8, _MSR(r1)
|
|
.if \allregs == 1
|
|
PPC_STL r9, _CTR(r1)
|
|
PPC_STL r10, _XER(r1)
|
|
PPC_STL r11, _CCR(r1)
|
|
.endif
|
|
|
|
/* Load &pt_regs in r6 for call below */
|
|
addi r6, r1, STACK_INT_FRAME_REGS
|
|
.endm
|
|
|
|
.macro ftrace_regs_exit allregs
|
|
/* Load ctr with the possibly modified NIP */
|
|
PPC_LL r3, _NIP(r1)
|
|
mtctr r3
|
|
|
|
#ifdef CONFIG_LIVEPATCH_64
|
|
cmpd r14, r3 /* has NIP been altered? */
|
|
#endif
|
|
|
|
/* Restore gprs */
|
|
.if \allregs == 1
|
|
REST_GPRS(2, 31, r1)
|
|
.else
|
|
REST_GPRS(3, 10, r1)
|
|
#ifdef CONFIG_LIVEPATCH_64
|
|
REST_GPR(14, r1)
|
|
#endif
|
|
.endif
|
|
|
|
/* Restore possibly modified LR */
|
|
PPC_LL r0, _LINK(r1)
|
|
mtlr r0
|
|
|
|
#ifdef CONFIG_PPC64
|
|
/* Restore callee's TOC */
|
|
ld r2, STK_GOT(r1)
|
|
#endif
|
|
|
|
/* Pop our stack frame */
|
|
addi r1, r1, SWITCH_FRAME_SIZE+STACK_FRAME_MIN_SIZE
|
|
|
|
#ifdef CONFIG_LIVEPATCH_64
|
|
/* Based on the cmpd above, if the NIP was altered handle livepatch */
|
|
bne- livepatch_handler
|
|
#endif
|
|
bctr /* jump after _mcount site */
|
|
.endm
|
|
|
|
_GLOBAL(ftrace_regs_caller)
|
|
ftrace_regs_entry 1
|
|
/* ftrace_call(r3, r4, r5, r6) */
|
|
.globl ftrace_regs_call
|
|
ftrace_regs_call:
|
|
bl ftrace_stub
|
|
ftrace_regs_exit 1
|
|
|
|
_GLOBAL(ftrace_caller)
|
|
ftrace_regs_entry 0
|
|
/* ftrace_call(r3, r4, r5, r6) */
|
|
.globl ftrace_call
|
|
ftrace_call:
|
|
bl ftrace_stub
|
|
ftrace_regs_exit 0
|
|
|
|
_GLOBAL(ftrace_stub)
|
|
blr
|
|
|
|
#ifdef CONFIG_PPC64
|
|
ftrace_no_trace:
|
|
mflr r3
|
|
mtctr r3
|
|
REST_GPR(3, r1)
|
|
addi r1, r1, SWITCH_FRAME_SIZE+STACK_FRAME_MIN_SIZE
|
|
mtlr r0
|
|
bctr
|
|
#endif
|
|
|
|
#ifdef CONFIG_LIVEPATCH_64
|
|
/*
|
|
* This function runs in the mcount context, between two functions. As
|
|
* such it can only clobber registers which are volatile and used in
|
|
* function linkage.
|
|
*
|
|
* We get here when a function A, calls another function B, but B has
|
|
* been live patched with a new function C.
|
|
*
|
|
* On entry:
|
|
* - we have no stack frame and can not allocate one
|
|
* - LR points back to the original caller (in A)
|
|
* - CTR holds the new NIP in C
|
|
* - r0, r11 & r12 are free
|
|
*/
|
|
livepatch_handler:
|
|
ld r12, PACA_THREAD_INFO(r13)
|
|
|
|
/* Allocate 3 x 8 bytes */
|
|
ld r11, TI_livepatch_sp(r12)
|
|
addi r11, r11, 24
|
|
std r11, TI_livepatch_sp(r12)
|
|
|
|
/* Save toc & real LR on livepatch stack */
|
|
std r2, -24(r11)
|
|
mflr r12
|
|
std r12, -16(r11)
|
|
|
|
/* Store stack end marker */
|
|
lis r12, STACK_END_MAGIC@h
|
|
ori r12, r12, STACK_END_MAGIC@l
|
|
std r12, -8(r11)
|
|
|
|
/* Put ctr in r12 for global entry and branch there */
|
|
mfctr r12
|
|
bctrl
|
|
|
|
/*
|
|
* Now we are returning from the patched function to the original
|
|
* caller A. We are free to use r11, r12 and we can use r2 until we
|
|
* restore it.
|
|
*/
|
|
|
|
ld r12, PACA_THREAD_INFO(r13)
|
|
|
|
ld r11, TI_livepatch_sp(r12)
|
|
|
|
/* Check stack marker hasn't been trashed */
|
|
lis r2, STACK_END_MAGIC@h
|
|
ori r2, r2, STACK_END_MAGIC@l
|
|
ld r12, -8(r11)
|
|
1: tdne r12, r2
|
|
EMIT_BUG_ENTRY 1b, __FILE__, __LINE__ - 1, 0
|
|
|
|
/* Restore LR & toc from livepatch stack */
|
|
ld r12, -16(r11)
|
|
mtlr r12
|
|
ld r2, -24(r11)
|
|
|
|
/* Pop livepatch stack frame */
|
|
ld r12, PACA_THREAD_INFO(r13)
|
|
subi r11, r11, 24
|
|
std r11, TI_livepatch_sp(r12)
|
|
|
|
/* Return to original caller of live patched function */
|
|
blr
|
|
#endif /* CONFIG_LIVEPATCH */
|
|
|
|
#ifndef CONFIG_ARCH_USING_PATCHABLE_FUNCTION_ENTRY
|
|
_GLOBAL(mcount)
|
|
_GLOBAL(_mcount)
|
|
EXPORT_SYMBOL(_mcount)
|
|
mflr r12
|
|
mtctr r12
|
|
mtlr r0
|
|
bctr
|
|
#endif
|
|
|
|
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
|
_GLOBAL(return_to_handler)
|
|
/* need to save return values */
|
|
#ifdef CONFIG_PPC64
|
|
std r4, -32(r1)
|
|
std r3, -24(r1)
|
|
/* save TOC */
|
|
std r2, -16(r1)
|
|
std r31, -8(r1)
|
|
mr r31, r1
|
|
stdu r1, -112(r1)
|
|
|
|
/*
|
|
* We might be called from a module.
|
|
* Switch to our TOC to run inside the core kernel.
|
|
*/
|
|
LOAD_PACA_TOC()
|
|
#else
|
|
stwu r1, -16(r1)
|
|
stw r3, 8(r1)
|
|
stw r4, 12(r1)
|
|
#endif
|
|
|
|
bl ftrace_return_to_handler
|
|
nop
|
|
|
|
/* return value has real return address */
|
|
mtlr r3
|
|
|
|
#ifdef CONFIG_PPC64
|
|
ld r1, 0(r1)
|
|
ld r4, -32(r1)
|
|
ld r3, -24(r1)
|
|
ld r2, -16(r1)
|
|
ld r31, -8(r1)
|
|
#else
|
|
lwz r3, 8(r1)
|
|
lwz r4, 12(r1)
|
|
addi r1, r1, 16
|
|
#endif
|
|
|
|
/* Jump back to real return address */
|
|
blr
|
|
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
|
|
|
|
.pushsection ".tramp.ftrace.text","aw",@progbits;
|
|
.globl ftrace_tramp_text
|
|
ftrace_tramp_text:
|
|
.space 32
|
|
.popsection
|
|
|
|
.pushsection ".tramp.ftrace.init","aw",@progbits;
|
|
.globl ftrace_tramp_init
|
|
ftrace_tramp_init:
|
|
.space 32
|
|
.popsection
|