Updates for timers and timekeeping:
- Properly cast the input to secs_to_jiffies() to unsigned long as otherwise the result uses the data type of the input variable, which causes result range checks to fail if the input data type is signed and smaller than unsigned long. - Handle late armed hrtimers gracefully on CPU hotplug There are legitimate cases where a hrtimer is (re)armed on an outgoing CPU after the timers have been migrated away. This triggers warnings and caused people to implement horrible workarounds in RCU. But those work arounds are incomplete and do not cover e.g. the scheduler hrtimers. Stop this by force moving timer which are enqueued on the current CPU after timer migration to be queued on a remote online CPU. This allows to undo the workarounds in a seperate step. - Demote a warning level printk() to info level in the clocksource watchdog code as there is no point to emit a warning level message for a purely informational message. - Mark a helper function __always_inline and move it into the existing #ifdef block to avoid 'unused function' warnings from CLANG -----BEGIN PGP SIGNATURE----- iQJHBAABCgAxFiEEQp8+kY+LLUocC4bMphj1TA10mKEFAmegv8QTHHRnbHhAbGlu dXRyb25peC5kZQAKCRCmGPVMDXSYoWA7EADF7/GBufaTAYr1ZQKs2oK+xD+Vhs8M 4CHgG0zlnl0HkPk1CE2VNBJ9PP8C5bKfMQJyYdtsxELVBFiJJEPEqbgpGFJQljD7 lG/bJSc5MctOauSkbURZyFKtzOwre+q4tWqZ2xvth0LTtaY3SycsImIWCKr4cvKv 95IQlXLMUkHZsTR4sXLSwaE1Kt9uyHOPa00pkvsQJ3CaWT7BAc+bdbZ83OdM7BTk 2XnLvH3zlwijp/o4sS8HCpdX24HQlsKm7TF5igxGmwNophRwNzP3Imd3yh6onpL3 9BrEYPyptKl7hB9N0y3mMu7JRljphfVBmfmzcGYLfkjuGmX7KkOOj5tpD9PwqIFl Mu8fDff1wTMxpDcoMzW2M540xYq3Pm9kheuwGQFH3XRoq28IKxo6MufXWgpgJkz4 JxpQ8h+9wT4LodVthcaotqHxe1yTsOzot0ggejtCDMptVlLXudZu4J/QvBqOiygg +3ehX7G+AY+yTbqyncPY/jLKd2lnp3PArmJ0zqvYGiGsnr07F7zFZmCGhgUr96w7 ZQWc9D9AvHREPoXiXb6AxbYAOImjVbYW2+Y1eB3eWK6GlhALoQ82YGldW4y4rfeu +9OaU7WgRTjlfBdaid7DmqBaLvpCSQKj/spzwKkq7eAiO9RSNdy91eaug99Ezjgn NySGwUM4t0Wkfw== =lUGt -----END PGP SIGNATURE----- Merge tag 'timers-urgent-2025-02-03' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip Pull timer fixes from Thomas Gleixner: - Properly cast the input to secs_to_jiffies() to unsigned long as otherwise the result uses the data type of the input variable, which causes result range checks to fail if the input data type is signed and smaller than unsigned long. - Handle late armed hrtimers gracefully on CPU hotplug There are legitimate cases where a hrtimer is (re)armed on an outgoing CPU after the timers have been migrated away. This triggers warnings and caused people to implement horrible workarounds in RCU. But those workarounds are incomplete and do not cover e.g. the scheduler hrtimers. Stop this by force moving timer which are enqueued on the current CPU after timer migration to be queued on a remote online CPU. This allows to undo the workarounds in a seperate step. - Demote a warning level printk() to info level in the clocksource watchdog code as there is no point to emit a warning level message for a purely informational message. - Mark a helper function __always_inline and move it into the existing #ifdef block to avoid 'unused function' warnings from CLANG * tag 'timers-urgent-2025-02-03' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: jiffies: Cast to unsigned long in secs_to_jiffies() conversion clocksource: Use pr_info() for "Checking clocksource synchronization" message hrtimers: Force migrate away hrtimers queued after CPUHP_AP_HRTIMERS_DYING hrtimers: Mark is_migration_base() with __always_inline
This commit is contained in:
commit
f286757b64
4 changed files with 98 additions and 33 deletions
|
@ -125,6 +125,7 @@ struct hrtimer_cpu_base {
|
|||
ktime_t softirq_expires_next;
|
||||
struct hrtimer *softirq_next_timer;
|
||||
struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES];
|
||||
call_single_data_t csd;
|
||||
} ____cacheline_aligned;
|
||||
|
||||
|
||||
|
|
|
@ -537,7 +537,7 @@ static __always_inline unsigned long msecs_to_jiffies(const unsigned int m)
|
|||
*
|
||||
* Return: jiffies value
|
||||
*/
|
||||
#define secs_to_jiffies(_secs) ((_secs) * HZ)
|
||||
#define secs_to_jiffies(_secs) (unsigned long)((_secs) * HZ)
|
||||
|
||||
extern unsigned long __usecs_to_jiffies(const unsigned int u);
|
||||
#if !(USEC_PER_SEC % HZ)
|
||||
|
|
|
@ -382,7 +382,8 @@ void clocksource_verify_percpu(struct clocksource *cs)
|
|||
return;
|
||||
}
|
||||
testcpu = smp_processor_id();
|
||||
pr_warn("Checking clocksource %s synchronization from CPU %d to CPUs %*pbl.\n", cs->name, testcpu, cpumask_pr_args(&cpus_chosen));
|
||||
pr_info("Checking clocksource %s synchronization from CPU %d to CPUs %*pbl.\n",
|
||||
cs->name, testcpu, cpumask_pr_args(&cpus_chosen));
|
||||
for_each_cpu(cpu, &cpus_chosen) {
|
||||
if (cpu == testcpu)
|
||||
continue;
|
||||
|
|
|
@ -58,6 +58,8 @@
|
|||
#define HRTIMER_ACTIVE_SOFT (HRTIMER_ACTIVE_HARD << MASK_SHIFT)
|
||||
#define HRTIMER_ACTIVE_ALL (HRTIMER_ACTIVE_SOFT | HRTIMER_ACTIVE_HARD)
|
||||
|
||||
static void retrigger_next_event(void *arg);
|
||||
|
||||
/*
|
||||
* The timer bases:
|
||||
*
|
||||
|
@ -111,7 +113,8 @@ DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) =
|
|||
.clockid = CLOCK_TAI,
|
||||
.get_time = &ktime_get_clocktai,
|
||||
},
|
||||
}
|
||||
},
|
||||
.csd = CSD_INIT(retrigger_next_event, NULL)
|
||||
};
|
||||
|
||||
static const int hrtimer_clock_to_base_table[MAX_CLOCKS] = {
|
||||
|
@ -124,6 +127,14 @@ static const int hrtimer_clock_to_base_table[MAX_CLOCKS] = {
|
|||
[CLOCK_TAI] = HRTIMER_BASE_TAI,
|
||||
};
|
||||
|
||||
static inline bool hrtimer_base_is_online(struct hrtimer_cpu_base *base)
|
||||
{
|
||||
if (!IS_ENABLED(CONFIG_HOTPLUG_CPU))
|
||||
return true;
|
||||
else
|
||||
return likely(base->online);
|
||||
}
|
||||
|
||||
/*
|
||||
* Functions and macros which are different for UP/SMP systems are kept in a
|
||||
* single place
|
||||
|
@ -145,11 +156,6 @@ static struct hrtimer_cpu_base migration_cpu_base = {
|
|||
|
||||
#define migration_base migration_cpu_base.clock_base[0]
|
||||
|
||||
static inline bool is_migration_base(struct hrtimer_clock_base *base)
|
||||
{
|
||||
return base == &migration_base;
|
||||
}
|
||||
|
||||
/*
|
||||
* We are using hashed locking: holding per_cpu(hrtimer_bases)[n].lock
|
||||
* means that all timers which are tied to this base via timer->base are
|
||||
|
@ -183,27 +189,54 @@ struct hrtimer_clock_base *lock_hrtimer_base(const struct hrtimer *timer,
|
|||
}
|
||||
|
||||
/*
|
||||
* We do not migrate the timer when it is expiring before the next
|
||||
* event on the target cpu. When high resolution is enabled, we cannot
|
||||
* reprogram the target cpu hardware and we would cause it to fire
|
||||
* late. To keep it simple, we handle the high resolution enabled and
|
||||
* disabled case similar.
|
||||
* Check if the elected target is suitable considering its next
|
||||
* event and the hotplug state of the current CPU.
|
||||
*
|
||||
* If the elected target is remote and its next event is after the timer
|
||||
* to queue, then a remote reprogram is necessary. However there is no
|
||||
* guarantee the IPI handling the operation would arrive in time to meet
|
||||
* the high resolution deadline. In this case the local CPU becomes a
|
||||
* preferred target, unless it is offline.
|
||||
*
|
||||
* High and low resolution modes are handled the same way for simplicity.
|
||||
*
|
||||
* Called with cpu_base->lock of target cpu held.
|
||||
*/
|
||||
static int
|
||||
hrtimer_check_target(struct hrtimer *timer, struct hrtimer_clock_base *new_base)
|
||||
static bool hrtimer_suitable_target(struct hrtimer *timer, struct hrtimer_clock_base *new_base,
|
||||
struct hrtimer_cpu_base *new_cpu_base,
|
||||
struct hrtimer_cpu_base *this_cpu_base)
|
||||
{
|
||||
ktime_t expires;
|
||||
|
||||
/*
|
||||
* The local CPU clockevent can be reprogrammed. Also get_target_base()
|
||||
* guarantees it is online.
|
||||
*/
|
||||
if (new_cpu_base == this_cpu_base)
|
||||
return true;
|
||||
|
||||
/*
|
||||
* The offline local CPU can't be the default target if the
|
||||
* next remote target event is after this timer. Keep the
|
||||
* elected new base. An IPI will we issued to reprogram
|
||||
* it as a last resort.
|
||||
*/
|
||||
if (!hrtimer_base_is_online(this_cpu_base))
|
||||
return true;
|
||||
|
||||
expires = ktime_sub(hrtimer_get_expires(timer), new_base->offset);
|
||||
return expires < new_base->cpu_base->expires_next;
|
||||
|
||||
return expires >= new_base->cpu_base->expires_next;
|
||||
}
|
||||
|
||||
static inline
|
||||
struct hrtimer_cpu_base *get_target_base(struct hrtimer_cpu_base *base,
|
||||
int pinned)
|
||||
static inline struct hrtimer_cpu_base *get_target_base(struct hrtimer_cpu_base *base, int pinned)
|
||||
{
|
||||
if (!hrtimer_base_is_online(base)) {
|
||||
int cpu = cpumask_any_and(cpu_online_mask, housekeeping_cpumask(HK_TYPE_TIMER));
|
||||
|
||||
return &per_cpu(hrtimer_bases, cpu);
|
||||
}
|
||||
|
||||
#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
|
||||
if (static_branch_likely(&timers_migration_enabled) && !pinned)
|
||||
return &per_cpu(hrtimer_bases, get_nohz_timer_target());
|
||||
|
@ -254,8 +287,8 @@ again:
|
|||
raw_spin_unlock(&base->cpu_base->lock);
|
||||
raw_spin_lock(&new_base->cpu_base->lock);
|
||||
|
||||
if (new_cpu_base != this_cpu_base &&
|
||||
hrtimer_check_target(timer, new_base)) {
|
||||
if (!hrtimer_suitable_target(timer, new_base, new_cpu_base,
|
||||
this_cpu_base)) {
|
||||
raw_spin_unlock(&new_base->cpu_base->lock);
|
||||
raw_spin_lock(&base->cpu_base->lock);
|
||||
new_cpu_base = this_cpu_base;
|
||||
|
@ -264,8 +297,7 @@ again:
|
|||
}
|
||||
WRITE_ONCE(timer->base, new_base);
|
||||
} else {
|
||||
if (new_cpu_base != this_cpu_base &&
|
||||
hrtimer_check_target(timer, new_base)) {
|
||||
if (!hrtimer_suitable_target(timer, new_base, new_cpu_base, this_cpu_base)) {
|
||||
new_cpu_base = this_cpu_base;
|
||||
goto again;
|
||||
}
|
||||
|
@ -275,11 +307,6 @@ again:
|
|||
|
||||
#else /* CONFIG_SMP */
|
||||
|
||||
static inline bool is_migration_base(struct hrtimer_clock_base *base)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline struct hrtimer_clock_base *
|
||||
lock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags)
|
||||
__acquires(&timer->base->cpu_base->lock)
|
||||
|
@ -716,8 +743,6 @@ static inline int hrtimer_is_hres_enabled(void)
|
|||
return hrtimer_hres_enabled;
|
||||
}
|
||||
|
||||
static void retrigger_next_event(void *arg);
|
||||
|
||||
/*
|
||||
* Switch to high resolution mode
|
||||
*/
|
||||
|
@ -1205,6 +1230,7 @@ static int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
|
|||
u64 delta_ns, const enum hrtimer_mode mode,
|
||||
struct hrtimer_clock_base *base)
|
||||
{
|
||||
struct hrtimer_cpu_base *this_cpu_base = this_cpu_ptr(&hrtimer_bases);
|
||||
struct hrtimer_clock_base *new_base;
|
||||
bool force_local, first;
|
||||
|
||||
|
@ -1216,9 +1242,15 @@ static int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
|
|||
* and enforce reprogramming after it is queued no matter whether
|
||||
* it is the new first expiring timer again or not.
|
||||
*/
|
||||
force_local = base->cpu_base == this_cpu_ptr(&hrtimer_bases);
|
||||
force_local = base->cpu_base == this_cpu_base;
|
||||
force_local &= base->cpu_base->next_timer == timer;
|
||||
|
||||
/*
|
||||
* Don't force local queuing if this enqueue happens on a unplugged
|
||||
* CPU after hrtimer_cpu_dying() has been invoked.
|
||||
*/
|
||||
force_local &= this_cpu_base->online;
|
||||
|
||||
/*
|
||||
* Remove an active timer from the queue. In case it is not queued
|
||||
* on the current CPU, make sure that remove_hrtimer() updates the
|
||||
|
@ -1248,8 +1280,27 @@ static int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
|
|||
}
|
||||
|
||||
first = enqueue_hrtimer(timer, new_base, mode);
|
||||
if (!force_local)
|
||||
return first;
|
||||
if (!force_local) {
|
||||
/*
|
||||
* If the current CPU base is online, then the timer is
|
||||
* never queued on a remote CPU if it would be the first
|
||||
* expiring timer there.
|
||||
*/
|
||||
if (hrtimer_base_is_online(this_cpu_base))
|
||||
return first;
|
||||
|
||||
/*
|
||||
* Timer was enqueued remote because the current base is
|
||||
* already offline. If the timer is the first to expire,
|
||||
* kick the remote CPU to reprogram the clock event.
|
||||
*/
|
||||
if (first) {
|
||||
struct hrtimer_cpu_base *new_cpu_base = new_base->cpu_base;
|
||||
|
||||
smp_call_function_single_async(new_cpu_base->cpu, &new_cpu_base->csd);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Timer was forced to stay on the current CPU to avoid
|
||||
|
@ -1370,6 +1421,18 @@ static void hrtimer_sync_wait_running(struct hrtimer_cpu_base *cpu_base,
|
|||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
static __always_inline bool is_migration_base(struct hrtimer_clock_base *base)
|
||||
{
|
||||
return base == &migration_base;
|
||||
}
|
||||
#else
|
||||
static __always_inline bool is_migration_base(struct hrtimer_clock_base *base)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* This function is called on PREEMPT_RT kernels when the fast path
|
||||
* deletion of a timer failed because the timer callback function was
|
||||
|
|
Loading…
Add table
Reference in a new issue