All MSI vectors are multiplexed into a single notification vector when posted MSI is enabled. It is the responsibility of the notification vector handler to demultiplex MSI vectors. In the handler the MSI vector handlers are dispatched without IDT delivery for each pending MSI interrupt. For example, the interrupt flow will change as follows: (3 MSIs of different vectors arrive in a a high frequency burst) BEFORE: interrupt(MSI) irq_enter() handler() /* EOI */ irq_exit() process_softirq() interrupt(MSI) irq_enter() handler() /* EOI */ irq_exit() process_softirq() interrupt(MSI) irq_enter() handler() /* EOI */ irq_exit() process_softirq() AFTER: interrupt /* Posted MSI notification vector */ irq_enter() atomic_xchg(PIR) handler() handler() handler() pi_clear_on() apic_eoi() irq_exit() process_softirq() Except for the leading MSI, CPU notifications are skipped/coalesced. For MSIs which arrive at a low frequency, the demultiplexing loop does not wait for more interrupts to coalesce. Therefore, there's no additional latency other than the processing time. Signed-off-by: Jacob Pan <jacob.jun.pan@linux.intel.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Link: https://lore.kernel.org/r/20240423174114.526704-9-jacob.jun.pan@linux.intel.com
90 lines
2.4 KiB
C
90 lines
2.4 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef _ASM_X86_HARDIRQ_H
|
|
#define _ASM_X86_HARDIRQ_H
|
|
|
|
#include <linux/threads.h>
|
|
#include <asm/current.h>
|
|
|
|
typedef struct {
|
|
#if IS_ENABLED(CONFIG_KVM_INTEL)
|
|
u8 kvm_cpu_l1tf_flush_l1d;
|
|
#endif
|
|
unsigned int __nmi_count; /* arch dependent */
|
|
#ifdef CONFIG_X86_LOCAL_APIC
|
|
unsigned int apic_timer_irqs; /* arch dependent */
|
|
unsigned int irq_spurious_count;
|
|
unsigned int icr_read_retry_count;
|
|
#endif
|
|
#if IS_ENABLED(CONFIG_KVM)
|
|
unsigned int kvm_posted_intr_ipis;
|
|
unsigned int kvm_posted_intr_wakeup_ipis;
|
|
unsigned int kvm_posted_intr_nested_ipis;
|
|
#endif
|
|
unsigned int x86_platform_ipis; /* arch dependent */
|
|
unsigned int apic_perf_irqs;
|
|
unsigned int apic_irq_work_irqs;
|
|
#ifdef CONFIG_SMP
|
|
unsigned int irq_resched_count;
|
|
unsigned int irq_call_count;
|
|
#endif
|
|
unsigned int irq_tlb_count;
|
|
#ifdef CONFIG_X86_THERMAL_VECTOR
|
|
unsigned int irq_thermal_count;
|
|
#endif
|
|
#ifdef CONFIG_X86_MCE_THRESHOLD
|
|
unsigned int irq_threshold_count;
|
|
#endif
|
|
#ifdef CONFIG_X86_MCE_AMD
|
|
unsigned int irq_deferred_error_count;
|
|
#endif
|
|
#ifdef CONFIG_X86_HV_CALLBACK_VECTOR
|
|
unsigned int irq_hv_callback_count;
|
|
#endif
|
|
#if IS_ENABLED(CONFIG_HYPERV)
|
|
unsigned int irq_hv_reenlightenment_count;
|
|
unsigned int hyperv_stimer0_count;
|
|
#endif
|
|
#ifdef CONFIG_X86_POSTED_MSI
|
|
unsigned int posted_msi_notification_count;
|
|
#endif
|
|
} ____cacheline_aligned irq_cpustat_t;
|
|
|
|
DECLARE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
|
|
|
|
#ifdef CONFIG_X86_POSTED_MSI
|
|
DECLARE_PER_CPU_ALIGNED(struct pi_desc, posted_msi_pi_desc);
|
|
#endif
|
|
#define __ARCH_IRQ_STAT
|
|
|
|
#define inc_irq_stat(member) this_cpu_inc(irq_stat.member)
|
|
|
|
extern void ack_bad_irq(unsigned int irq);
|
|
|
|
extern u64 arch_irq_stat_cpu(unsigned int cpu);
|
|
#define arch_irq_stat_cpu arch_irq_stat_cpu
|
|
|
|
extern u64 arch_irq_stat(void);
|
|
#define arch_irq_stat arch_irq_stat
|
|
|
|
#define local_softirq_pending_ref pcpu_hot.softirq_pending
|
|
|
|
#if IS_ENABLED(CONFIG_KVM_INTEL)
|
|
static inline void kvm_set_cpu_l1tf_flush_l1d(void)
|
|
{
|
|
__this_cpu_write(irq_stat.kvm_cpu_l1tf_flush_l1d, 1);
|
|
}
|
|
|
|
static __always_inline void kvm_clear_cpu_l1tf_flush_l1d(void)
|
|
{
|
|
__this_cpu_write(irq_stat.kvm_cpu_l1tf_flush_l1d, 0);
|
|
}
|
|
|
|
static __always_inline bool kvm_get_cpu_l1tf_flush_l1d(void)
|
|
{
|
|
return __this_cpu_read(irq_stat.kvm_cpu_l1tf_flush_l1d);
|
|
}
|
|
#else /* !IS_ENABLED(CONFIG_KVM_INTEL) */
|
|
static inline void kvm_set_cpu_l1tf_flush_l1d(void) { }
|
|
#endif /* IS_ENABLED(CONFIG_KVM_INTEL) */
|
|
|
|
#endif /* _ASM_X86_HARDIRQ_H */
|