1
0
Fork 0
mirror of synced 2025-03-06 20:59:54 +01:00

um: convert irq_lock to raw spinlock

Since this is deep in the architecture, and the code is
called nested into other deep management code, this really
needs to be a raw spinlock. Convert it.

Link: https://patch.msgid.link/20250110125550.32479-8-johannes@sipsolutions.net
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: Richard Weinberger <richard@nod.at>
This commit is contained in:
Johannes Berg 2025-01-10 13:54:06 +01:00 committed by Richard Weinberger
parent daa1a05ba4
commit 96178631c3

View file

@ -52,7 +52,7 @@ struct irq_entry {
bool sigio_workaround;
};
static DEFINE_SPINLOCK(irq_lock);
static DEFINE_RAW_SPINLOCK(irq_lock);
static LIST_HEAD(active_fds);
static DECLARE_BITMAP(irqs_allocated, UM_LAST_SIGNAL_IRQ);
static bool irqs_suspended;
@ -257,7 +257,7 @@ static struct irq_entry *get_irq_entry_by_fd(int fd)
return NULL;
}
static void free_irq_entry(struct irq_entry *to_free, bool remove)
static void remove_irq_entry(struct irq_entry *to_free, bool remove)
{
if (!to_free)
return;
@ -265,7 +265,6 @@ static void free_irq_entry(struct irq_entry *to_free, bool remove)
if (remove)
os_del_epoll_fd(to_free->fd);
list_del(&to_free->list);
kfree(to_free);
}
static bool update_irq_entry(struct irq_entry *entry)
@ -286,17 +285,19 @@ static bool update_irq_entry(struct irq_entry *entry)
return false;
}
static void update_or_free_irq_entry(struct irq_entry *entry)
static struct irq_entry *update_or_remove_irq_entry(struct irq_entry *entry)
{
if (!update_irq_entry(entry))
free_irq_entry(entry, false);
if (update_irq_entry(entry))
return NULL;
remove_irq_entry(entry, false);
return entry;
}
static int activate_fd(int irq, int fd, enum um_irq_type type, void *dev_id,
void (*timetravel_handler)(int, int, void *,
struct time_travel_event *))
{
struct irq_entry *irq_entry;
struct irq_entry *irq_entry, *to_free = NULL;
int err, events = os_event_mask(type);
unsigned long flags;
@ -304,9 +305,10 @@ static int activate_fd(int irq, int fd, enum um_irq_type type, void *dev_id,
if (err < 0)
goto out;
spin_lock_irqsave(&irq_lock, flags);
raw_spin_lock_irqsave(&irq_lock, flags);
irq_entry = get_irq_entry_by_fd(fd);
if (irq_entry) {
already:
/* cannot register the same FD twice with the same type */
if (WARN_ON(irq_entry->reg[type].events)) {
err = -EALREADY;
@ -316,11 +318,22 @@ static int activate_fd(int irq, int fd, enum um_irq_type type, void *dev_id,
/* temporarily disable to avoid IRQ-side locking */
os_del_epoll_fd(fd);
} else {
irq_entry = kzalloc(sizeof(*irq_entry), GFP_ATOMIC);
if (!irq_entry) {
err = -ENOMEM;
goto out_unlock;
struct irq_entry *new;
/* don't restore interrupts */
raw_spin_unlock(&irq_lock);
new = kzalloc(sizeof(*irq_entry), GFP_ATOMIC);
if (!new) {
local_irq_restore(flags);
return -ENOMEM;
}
raw_spin_lock(&irq_lock);
irq_entry = get_irq_entry_by_fd(fd);
if (irq_entry) {
to_free = new;
goto already;
}
irq_entry = new;
irq_entry->fd = fd;
list_add_tail(&irq_entry->list, &active_fds);
maybe_sigio_broken(fd);
@ -339,12 +352,11 @@ static int activate_fd(int irq, int fd, enum um_irq_type type, void *dev_id,
#endif
WARN_ON(!update_irq_entry(irq_entry));
spin_unlock_irqrestore(&irq_lock, flags);
return 0;
err = 0;
out_unlock:
spin_unlock_irqrestore(&irq_lock, flags);
raw_spin_unlock_irqrestore(&irq_lock, flags);
out:
kfree(to_free);
return err;
}
@ -358,19 +370,20 @@ void free_irq_by_fd(int fd)
struct irq_entry *to_free;
unsigned long flags;
spin_lock_irqsave(&irq_lock, flags);
raw_spin_lock_irqsave(&irq_lock, flags);
to_free = get_irq_entry_by_fd(fd);
free_irq_entry(to_free, true);
spin_unlock_irqrestore(&irq_lock, flags);
remove_irq_entry(to_free, true);
raw_spin_unlock_irqrestore(&irq_lock, flags);
kfree(to_free);
}
EXPORT_SYMBOL(free_irq_by_fd);
static void free_irq_by_irq_and_dev(unsigned int irq, void *dev)
{
struct irq_entry *entry;
struct irq_entry *entry, *to_free = NULL;
unsigned long flags;
spin_lock_irqsave(&irq_lock, flags);
raw_spin_lock_irqsave(&irq_lock, flags);
list_for_each_entry(entry, &active_fds, list) {
enum um_irq_type i;
@ -386,12 +399,13 @@ static void free_irq_by_irq_and_dev(unsigned int irq, void *dev)
os_del_epoll_fd(entry->fd);
reg->events = 0;
update_or_free_irq_entry(entry);
to_free = update_or_remove_irq_entry(entry);
goto out;
}
}
out:
spin_unlock_irqrestore(&irq_lock, flags);
raw_spin_unlock_irqrestore(&irq_lock, flags);
kfree(to_free);
}
void deactivate_fd(int fd, int irqnum)
@ -402,7 +416,7 @@ void deactivate_fd(int fd, int irqnum)
os_del_epoll_fd(fd);
spin_lock_irqsave(&irq_lock, flags);
raw_spin_lock_irqsave(&irq_lock, flags);
entry = get_irq_entry_by_fd(fd);
if (!entry)
goto out;
@ -414,9 +428,10 @@ void deactivate_fd(int fd, int irqnum)
entry->reg[i].events = 0;
}
update_or_free_irq_entry(entry);
entry = update_or_remove_irq_entry(entry);
out:
spin_unlock_irqrestore(&irq_lock, flags);
raw_spin_unlock_irqrestore(&irq_lock, flags);
kfree(entry);
ignore_sigio_fd(fd);
}
@ -546,7 +561,7 @@ void um_irqs_suspend(void)
irqs_suspended = true;
spin_lock_irqsave(&irq_lock, flags);
raw_spin_lock_irqsave(&irq_lock, flags);
list_for_each_entry(entry, &active_fds, list) {
enum um_irq_type t;
bool clear = true;
@ -579,7 +594,7 @@ void um_irqs_suspend(void)
!__ignore_sigio_fd(entry->fd);
}
}
spin_unlock_irqrestore(&irq_lock, flags);
raw_spin_unlock_irqrestore(&irq_lock, flags);
}
void um_irqs_resume(void)
@ -588,7 +603,7 @@ void um_irqs_resume(void)
unsigned long flags;
spin_lock_irqsave(&irq_lock, flags);
raw_spin_lock_irqsave(&irq_lock, flags);
list_for_each_entry(entry, &active_fds, list) {
if (entry->suspended) {
int err = os_set_fd_async(entry->fd);
@ -602,7 +617,7 @@ void um_irqs_resume(void)
}
}
}
spin_unlock_irqrestore(&irq_lock, flags);
raw_spin_unlock_irqrestore(&irq_lock, flags);
irqs_suspended = false;
send_sigio_to_self();
@ -613,7 +628,7 @@ static int normal_irq_set_wake(struct irq_data *d, unsigned int on)
struct irq_entry *entry;
unsigned long flags;
spin_lock_irqsave(&irq_lock, flags);
raw_spin_lock_irqsave(&irq_lock, flags);
list_for_each_entry(entry, &active_fds, list) {
enum um_irq_type t;
@ -628,7 +643,7 @@ static int normal_irq_set_wake(struct irq_data *d, unsigned int on)
}
}
unlock:
spin_unlock_irqrestore(&irq_lock, flags);
raw_spin_unlock_irqrestore(&irq_lock, flags);
return 0;
}
#else