rcu/kvfree: Move need_offload_krc() out of krcp->lock
The need_offload_krc() function currently holds the krcp->lock in order to safely check krcp->head. This commit removes the need for this lock in that function by updating the krcp->head pointer using WRITE_ONCE() macro so that readers can carry out lockless loads of that pointer. Signed-off-by: Uladzislau Rezki (Sony) <urezki@gmail.com> Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
This commit is contained in:
parent
8c15a9e808
commit
8fc5494ad5
1 changed files with 4 additions and 7 deletions
|
@ -3194,7 +3194,7 @@ static void kfree_rcu_monitor(struct work_struct *work)
|
||||||
// objects queued on the linked list.
|
// objects queued on the linked list.
|
||||||
if (!krwp->head_free) {
|
if (!krwp->head_free) {
|
||||||
krwp->head_free = krcp->head;
|
krwp->head_free = krcp->head;
|
||||||
krcp->head = NULL;
|
WRITE_ONCE(krcp->head, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
WRITE_ONCE(krcp->count, 0);
|
WRITE_ONCE(krcp->count, 0);
|
||||||
|
@ -3208,6 +3208,8 @@ static void kfree_rcu_monitor(struct work_struct *work)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
raw_spin_unlock_irqrestore(&krcp->lock, flags);
|
||||||
|
|
||||||
// If there is nothing to detach, it means that our job is
|
// If there is nothing to detach, it means that our job is
|
||||||
// successfully done here. In case of having at least one
|
// successfully done here. In case of having at least one
|
||||||
// of the channels that is still busy we should rearm the
|
// of the channels that is still busy we should rearm the
|
||||||
|
@ -3215,8 +3217,6 @@ static void kfree_rcu_monitor(struct work_struct *work)
|
||||||
// still in progress.
|
// still in progress.
|
||||||
if (need_offload_krc(krcp))
|
if (need_offload_krc(krcp))
|
||||||
schedule_delayed_monitor_work(krcp);
|
schedule_delayed_monitor_work(krcp);
|
||||||
|
|
||||||
raw_spin_unlock_irqrestore(&krcp->lock, flags);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static enum hrtimer_restart
|
static enum hrtimer_restart
|
||||||
|
@ -3386,7 +3386,7 @@ void kvfree_call_rcu(struct rcu_head *head, void *ptr)
|
||||||
|
|
||||||
head->func = ptr;
|
head->func = ptr;
|
||||||
head->next = krcp->head;
|
head->next = krcp->head;
|
||||||
krcp->head = head;
|
WRITE_ONCE(krcp->head, head);
|
||||||
success = true;
|
success = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3463,15 +3463,12 @@ static struct shrinker kfree_rcu_shrinker = {
|
||||||
void __init kfree_rcu_scheduler_running(void)
|
void __init kfree_rcu_scheduler_running(void)
|
||||||
{
|
{
|
||||||
int cpu;
|
int cpu;
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
for_each_possible_cpu(cpu) {
|
for_each_possible_cpu(cpu) {
|
||||||
struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
|
struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
|
||||||
|
|
||||||
raw_spin_lock_irqsave(&krcp->lock, flags);
|
|
||||||
if (need_offload_krc(krcp))
|
if (need_offload_krc(krcp))
|
||||||
schedule_delayed_monitor_work(krcp);
|
schedule_delayed_monitor_work(krcp);
|
||||||
raw_spin_unlock_irqrestore(&krcp->lock, flags);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Add table
Reference in a new issue