2 * Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
4 * Provides a framework for enqueueing and running callbacks from hardirq
5 * context. The enqueueing is NMI-safe.
9 #include <linux/kernel.h>
10 #include <linux/export.h>
11 #include <linux/irq_work.h>
12 #include <linux/percpu.h>
13 #include <linux/hardirq.h>
14 #include <linux/irqflags.h>
15 #include <linux/sched.h>
16 #include <linux/tick.h>
17 #include <linux/cpu.h>
18 #include <linux/notifier.h>
19 #include <asm/processor.h>
22 static DEFINE_PER_CPU(struct llist_head, raised_list);
23 static DEFINE_PER_CPU(struct llist_head, lazy_list);
26 * Claim the entry so that no one else will poke at it.
28 static bool irq_work_claim(struct irq_work *work)
30 unsigned long flags, oflags, nflags;
33 * Start with our best wish as a premise but only trust any
34 * flag value after cmpxchg() result.
36 flags = work->flags & ~IRQ_WORK_PENDING;
38 nflags = flags | IRQ_WORK_FLAGS;
39 oflags = cmpxchg(&work->flags, flags, nflags);
42 if (oflags & IRQ_WORK_PENDING)
51 void __weak arch_irq_work_raise(void)
54 * Lame architectures will get the timer tick callback
59 * Enqueue the irq_work @entry unless it's already pending
62 * Can be re-enqueued while the callback is still in progress.
64 bool irq_work_queue(struct irq_work *work)
66 /* Only queue if not already pending */
67 if (!irq_work_claim(work))
70 /* Queue the entry and raise the IPI if needed. */
73 /* If the work is "lazy", handle it from next tick if any */
74 if (work->flags & IRQ_WORK_LAZY) {
75 if (llist_add(&work->llnode, &__get_cpu_var(lazy_list)) &&
76 tick_nohz_tick_stopped())
77 arch_irq_work_raise();
79 if (llist_add(&work->llnode, &__get_cpu_var(raised_list)))
80 arch_irq_work_raise();
87 EXPORT_SYMBOL_GPL(irq_work_queue);
89 bool irq_work_needs_cpu(void)
91 struct llist_head *raised, *lazy;
93 raised = &__get_cpu_var(raised_list);
94 lazy = &__get_cpu_var(lazy_list);
95 if (llist_empty(raised) && llist_empty(lazy))
98 /* All work should have been flushed before going offline */
99 WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));
104 static void irq_work_run_list(struct llist_head *list)
107 struct irq_work *work;
108 struct llist_node *llnode;
110 BUG_ON(!irqs_disabled());
112 if (llist_empty(list))
115 llnode = llist_del_all(list);
116 while (llnode != NULL) {
117 work = llist_entry(llnode, struct irq_work, llnode);
119 llnode = llist_next(llnode);
122 * Clear the PENDING bit, after this point the @work
124 * Make it immediately visible so that other CPUs trying
125 * to claim that work don't rely on us to handle their data
126 * while we are in the middle of the func.
128 flags = work->flags & ~IRQ_WORK_PENDING;
129 xchg(&work->flags, flags);
133 * Clear the BUSY bit and return to the free state if
134 * no-one else claimed it meanwhile.
136 (void)cmpxchg(&work->flags, flags, flags & ~IRQ_WORK_BUSY);
140 static void __irq_work_run(void)
142 irq_work_run_list(&__get_cpu_var(raised_list));
143 irq_work_run_list(&__get_cpu_var(lazy_list));
147 * Run the irq_work entries on this cpu. Requires to be ran from hardirq
148 * context with local IRQs disabled.
150 void irq_work_run(void)
155 EXPORT_SYMBOL_GPL(irq_work_run);
158 * Synchronize against the irq_work @entry, ensures the entry is not
161 void irq_work_sync(struct irq_work *work)
163 WARN_ON_ONCE(irqs_disabled());
165 while (work->flags & IRQ_WORK_BUSY)
168 EXPORT_SYMBOL_GPL(irq_work_sync);
170 #ifdef CONFIG_HOTPLUG_CPU
171 static int irq_work_cpu_notify(struct notifier_block *self,
172 unsigned long action, void *hcpu)
174 long cpu = (long)hcpu;
178 /* Called from stop_machine */
179 if (WARN_ON_ONCE(cpu != smp_processor_id()))
189 static struct notifier_block cpu_notify;
191 static __init int irq_work_init_cpu_notifier(void)
193 cpu_notify.notifier_call = irq_work_cpu_notify;
194 cpu_notify.priority = 0;
195 register_cpu_notifier(&cpu_notify);
198 device_initcall(irq_work_init_cpu_notifier);
200 #endif /* CONFIG_HOTPLUG_CPU */