sched, net: Fixup busy_loop_us_clock()
The only valid use of preempt_enable_no_resched() is if the very next line is schedule() or if we know preemption cannot actually be enabled by that statement due to known more preempt_count 'refs'. This busy_poll stuff looks to be completely and utterly broken, sched_clock() can return utter garbage with interrupts enabled (rare but still) and it can drift unbounded between CPUs. This means that if you get preempted/migrated and your new CPU is years behind on the previous CPU we get to busy spin for a _very_ long time. There is a _REASON_ sched_clock() warns about preemptability - papering over it with a preempt_disable()/preempt_enable_no_resched() is just terminal brain damage on so many levels. Replace sched_clock() usage with local_clock() which has a bounded drift between CPUs (<2 jiffies). There is a further problem with the entire busy wait poll thing in that the spin time is additive to the syscall timeout, not inclusive. Reviewed-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Peter Zijlstra <peterz@infradead.org> Cc: David S. Miller <davem@davemloft.net> Cc: rui.zhang@intel.com Cc: jacob.jun.pan@linux.intel.com Cc: Mike Galbraith <bitbucket@online.de> Cc: hpa@zytor.com Cc: Arjan van de Ven <arjan@linux.intel.com> Cc: lenb@kernel.org Cc: rjw@rjwysocki.net Cc: Eliezer Tamir <eliezer.tamir@linux.intel.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Andrew Morton <akpm@linux-foundation.org> Link: http://lkml.kernel.org/r/20131119151338.GF3694@twins.programming.kicks-ass.net Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
1774e9f3e5
commit
3708983452
1 changed files with 1 additions and 18 deletions
|
@ -42,27 +42,10 @@ static inline bool net_busy_loop_on(void)
|
||||||
return sysctl_net_busy_poll;
|
return sysctl_net_busy_poll;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* a wrapper to make debug_smp_processor_id() happy
|
|
||||||
* we can use sched_clock() because we don't care much about precision
|
|
||||||
* we only care that the average is bounded
|
|
||||||
*/
|
|
||||||
#ifdef CONFIG_DEBUG_PREEMPT
|
|
||||||
static inline u64 busy_loop_us_clock(void)
|
static inline u64 busy_loop_us_clock(void)
|
||||||
{
|
{
|
||||||
u64 rc;
|
return local_clock() >> 10;
|
||||||
|
|
||||||
preempt_disable_notrace();
|
|
||||||
rc = sched_clock();
|
|
||||||
preempt_enable_no_resched_notrace();
|
|
||||||
|
|
||||||
return rc >> 10;
|
|
||||||
}
|
}
|
||||||
#else /* CONFIG_DEBUG_PREEMPT */
|
|
||||||
static inline u64 busy_loop_us_clock(void)
|
|
||||||
{
|
|
||||||
return sched_clock() >> 10;
|
|
||||||
}
|
|
||||||
#endif /* CONFIG_DEBUG_PREEMPT */
|
|
||||||
|
|
||||||
static inline unsigned long sk_busy_loop_end_time(struct sock *sk)
|
static inline unsigned long sk_busy_loop_end_time(struct sock *sk)
|
||||||
{
|
{
|
||||||
|
|
Loading…
Add table
Reference in a new issue