Branch data Line data Source code
1 : : // SPDX-License-Identifier: GPL-2.0
2 : : /*
3 : : * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
4 : : * Copyright (C) 2005-2006 Thomas Gleixner
5 : : *
6 : : * This file contains driver APIs to the irq subsystem.
7 : : */
8 : :
9 : : #define pr_fmt(fmt) "genirq: " fmt
10 : :
11 : : #include <linux/irq.h>
12 : : #include <linux/kthread.h>
13 : : #include <linux/module.h>
14 : : #include <linux/random.h>
15 : : #include <linux/interrupt.h>
16 : : #include <linux/irqdomain.h>
17 : : #include <linux/slab.h>
18 : : #include <linux/sched.h>
19 : : #include <linux/sched/rt.h>
20 : : #include <linux/sched/task.h>
21 : : #include <uapi/linux/sched/types.h>
22 : : #include <linux/task_work.h>
23 : :
24 : : #include "internals.h"
25 : :
26 : : #if defined(CONFIG_IRQ_FORCED_THREADING) && !defined(CONFIG_PREEMPT_RT)
27 : : __read_mostly bool force_irqthreads;
28 : : EXPORT_SYMBOL_GPL(force_irqthreads);
29 : :
30 : 0 : static int __init setup_forced_irqthreads(char *arg)
31 : : {
32 : 0 : force_irqthreads = true;
33 : 0 : return 0;
34 : : }
35 : : early_param("threadirqs", setup_forced_irqthreads);
36 : : #endif
37 : :
38 : 621 : static void __synchronize_hardirq(struct irq_desc *desc, bool sync_chip)
39 : : {
40 : : struct irq_data *irqd = irq_desc_get_irq_data(desc);
41 : : bool inprogress;
42 : :
43 : : do {
44 : : unsigned long flags;
45 : :
46 : : /*
47 : : * Wait until we're out of the critical section. This might
48 : : * give the wrong answer due to the lack of memory barriers.
49 : : */
50 [ - + ]: 621 : while (irqd_irq_inprogress(&desc->irq_data))
51 : 0 : cpu_relax();
52 : :
53 : : /* Ok, that indicated we're done: double-check carefully. */
54 : 621 : raw_spin_lock_irqsave(&desc->lock, flags);
55 : 621 : inprogress = irqd_irq_inprogress(&desc->irq_data);
56 : :
57 : : /*
58 : : * If requested and supported, check at the chip whether it
59 : : * is in flight at the hardware level, i.e. already pending
60 : : * in a CPU and waiting for service and acknowledge.
61 : : */
62 [ + - ]: 621 : if (!inprogress && sync_chip) {
63 : : /*
64 : : * Ignore the return code. inprogress is only updated
65 : : * when the chip supports it.
66 : : */
67 : 621 : __irq_get_irqchip_state(irqd, IRQCHIP_STATE_ACTIVE,
68 : : &inprogress);
69 : : }
70 : 621 : raw_spin_unlock_irqrestore(&desc->lock, flags);
71 : :
72 : : /* Oops, that failed? */
73 [ - + ]: 621 : } while (inprogress);
74 : 621 : }
75 : :
76 : : /**
77 : : * synchronize_hardirq - wait for pending hard IRQ handlers (on other CPUs)
78 : : * @irq: interrupt number to wait for
79 : : *
80 : : * This function waits for any pending hard IRQ handlers for this
81 : : * interrupt to complete before returning. If you use this
82 : : * function while holding a resource the IRQ handler may need you
83 : : * will deadlock. It does not take associated threaded handlers
84 : : * into account.
85 : : *
86 : : * Do not use this for shutdown scenarios where you must be sure
87 : : * that all parts (hardirq and threaded handler) have completed.
88 : : *
89 : : * Returns: false if a threaded handler is active.
90 : : *
91 : : * This function may be called - with care - from IRQ context.
92 : : *
93 : : * It does not check whether there is an interrupt in flight at the
94 : : * hardware level, but not serviced yet, as this might deadlock when
95 : : * called with interrupts disabled and the target CPU of the interrupt
96 : : * is the current CPU.
97 : : */
98 : 0 : bool synchronize_hardirq(unsigned int irq)
99 : : {
100 : 0 : struct irq_desc *desc = irq_to_desc(irq);
101 : :
102 [ # # ]: 0 : if (desc) {
103 : 0 : __synchronize_hardirq(desc, false);
104 : 0 : return !atomic_read(&desc->threads_active);
105 : : }
106 : :
107 : : return true;
108 : : }
109 : : EXPORT_SYMBOL(synchronize_hardirq);
110 : :
111 : : /**
112 : : * synchronize_irq - wait for pending IRQ handlers (on other CPUs)
113 : : * @irq: interrupt number to wait for
114 : : *
115 : : * This function waits for any pending IRQ handlers for this interrupt
116 : : * to complete before returning. If you use this function while
117 : : * holding a resource the IRQ handler may need you will deadlock.
118 : : *
119 : : * Can only be called from preemptible code as it might sleep when
120 : : * an interrupt thread is associated to @irq.
121 : : *
122 : : * It optionally makes sure (when the irq chip supports that method)
123 : : * that the interrupt is not pending in any CPU and waiting for
124 : : * service.
125 : : */
126 : 207 : void synchronize_irq(unsigned int irq)
127 : : {
128 : 207 : struct irq_desc *desc = irq_to_desc(irq);
129 : :
130 [ + - ]: 207 : if (desc) {
131 : 207 : __synchronize_hardirq(desc, true);
132 : : /*
133 : : * We made sure that no hardirq handler is
134 : : * running. Now verify that no threaded handlers are
135 : : * active.
136 : : */
137 [ - + # # ]: 414 : wait_event(desc->wait_for_threads,
138 : : !atomic_read(&desc->threads_active));
139 : : }
140 : 207 : }
141 : : EXPORT_SYMBOL(synchronize_irq);
142 : :
143 : : #ifdef CONFIG_SMP
144 : : cpumask_var_t irq_default_affinity;
145 : :
146 : : static bool __irq_can_set_affinity(struct irq_desc *desc)
147 : : {
148 [ + - + + : 7866 : if (!desc || !irqd_can_balance(&desc->irq_data) ||
+ - # # #
# # # # #
# # # # ]
149 [ - + # # : 4968 : !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity)
# # ]
150 : : return false;
151 : : return true;
152 : : }
153 : :
154 : : /**
155 : : * irq_can_set_affinity - Check if the affinity of a given irq can be set
156 : : * @irq: Interrupt to check
157 : : *
158 : : */
159 : 0 : int irq_can_set_affinity(unsigned int irq)
160 : : {
161 : 0 : return __irq_can_set_affinity(irq_to_desc(irq));
162 : : }
163 : :
164 : : /**
165 : : * irq_can_set_affinity_usr - Check if affinity of a irq can be set from user space
166 : : * @irq: Interrupt to check
167 : : *
168 : : * Like irq_can_set_affinity() above, but additionally checks for the
169 : : * AFFINITY_MANAGED flag.
170 : : */
171 : 0 : bool irq_can_set_affinity_usr(unsigned int irq)
172 : : {
173 : 0 : struct irq_desc *desc = irq_to_desc(irq);
174 : :
175 [ # # # # ]: 0 : return __irq_can_set_affinity(desc) &&
176 : : !irqd_affinity_is_managed(&desc->irq_data);
177 : : }
178 : :
179 : : /**
180 : : * irq_set_thread_affinity - Notify irq threads to adjust affinity
181 : : * @desc: irq descriptor which has affitnity changed
182 : : *
183 : : * We just set IRQTF_AFFINITY and delegate the affinity setting
184 : : * to the interrupt thread itself. We can not call
185 : : * set_cpus_allowed_ptr() here as we hold desc->lock and this
186 : : * code can be called from hard interrupt context.
187 : : */
188 : 0 : void irq_set_thread_affinity(struct irq_desc *desc)
189 : : {
190 : : struct irqaction *action;
191 : :
192 [ # # ]: 0 : for_each_action_of_desc(desc, action)
193 [ # # ]: 0 : if (action->thread)
194 : 0 : set_bit(IRQTF_AFFINITY, &action->thread_flags);
195 : 0 : }
196 : :
197 : : #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
198 : 0 : static void irq_validate_effective_affinity(struct irq_data *data)
199 : : {
200 : : const struct cpumask *m = irq_data_get_effective_affinity_mask(data);
201 : : struct irq_chip *chip = irq_data_get_irq_chip(data);
202 : :
203 [ # # ]: 0 : if (!cpumask_empty(m))
204 : 0 : return;
205 [ # # ]: 0 : pr_warn_once("irq_chip %s did not update eff. affinity mask of irq %u\n",
206 : : chip->name, data->irq);
207 : : }
208 : :
209 : : static inline void irq_init_effective_affinity(struct irq_data *data,
210 : : const struct cpumask *mask)
211 : : {
212 : : cpumask_copy(irq_data_get_effective_affinity_mask(data), mask);
213 : : }
214 : : #else
215 : : static inline void irq_validate_effective_affinity(struct irq_data *data) { }
216 : : static inline void irq_init_effective_affinity(struct irq_data *data,
217 : : const struct cpumask *mask) { }
218 : : #endif
219 : :
220 : 0 : int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
221 : : bool force)
222 : : {
223 : : struct irq_desc *desc = irq_data_to_desc(data);
224 : : struct irq_chip *chip = irq_data_get_irq_chip(data);
225 : : int ret;
226 : :
227 [ # # # # ]: 0 : if (!chip || !chip->irq_set_affinity)
228 : : return -EINVAL;
229 : :
230 : 0 : ret = chip->irq_set_affinity(data, mask, force);
231 [ # # # ]: 0 : switch (ret) {
232 : : case IRQ_SET_MASK_OK:
233 : : case IRQ_SET_MASK_OK_DONE:
234 : : cpumask_copy(desc->irq_common_data.affinity, mask);
235 : : /* fall through */
236 : : case IRQ_SET_MASK_OK_NOCOPY:
237 : 0 : irq_validate_effective_affinity(data);
238 : 0 : irq_set_thread_affinity(desc);
239 : : ret = 0;
240 : : }
241 : :
242 : 0 : return ret;
243 : : }
244 : :
245 : : #ifdef CONFIG_GENERIC_PENDING_IRQ
246 : : static inline int irq_set_affinity_pending(struct irq_data *data,
247 : : const struct cpumask *dest)
248 : : {
249 : : struct irq_desc *desc = irq_data_to_desc(data);
250 : :
251 : : irqd_set_move_pending(data);
252 : : irq_copy_pending(desc, dest);
253 : : return 0;
254 : : }
255 : : #else
256 : : static inline int irq_set_affinity_pending(struct irq_data *data,
257 : : const struct cpumask *dest)
258 : : {
259 : : return -EBUSY;
260 : : }
261 : : #endif
262 : :
263 : : static int irq_try_set_affinity(struct irq_data *data,
264 : : const struct cpumask *dest, bool force)
265 : : {
266 : 0 : int ret = irq_do_set_affinity(data, dest, force);
267 : :
268 : : /*
269 : : * In case that the underlying vector management is busy and the
270 : : * architecture supports the generic pending mechanism then utilize
271 : : * this to avoid returning an error to user space.
272 : : */
273 [ # # ]: 0 : if (ret == -EBUSY && !force)
274 : : ret = irq_set_affinity_pending(data, dest);
275 : : return ret;
276 : : }
277 : :
278 : 0 : static bool irq_set_affinity_deactivated(struct irq_data *data,
279 : : const struct cpumask *mask, bool force)
280 : : {
281 : : struct irq_desc *desc = irq_data_to_desc(data);
282 : :
283 : : /*
284 : : * Handle irq chips which can handle affinity only in activated
285 : : * state correctly
286 : : *
287 : : * If the interrupt is not yet activated, just store the affinity
288 : : * mask and do not call the chip driver at all. On activation the
289 : : * driver has to make sure anyway that the interrupt is in a
290 : : * useable state so startup works.
291 : : */
292 [ # # ]: 0 : if (!IS_ENABLED(CONFIG_IRQ_DOMAIN_HIERARCHY) ||
293 [ # # ]: 0 : irqd_is_activated(data) || !irqd_affinity_on_activate(data))
294 : : return false;
295 : :
296 : : cpumask_copy(desc->irq_common_data.affinity, mask);
297 : : irq_init_effective_affinity(data, mask);
298 : : irqd_set(data, IRQD_AFFINITY_SET);
299 : 0 : return true;
300 : : }
301 : :
302 : 0 : int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask,
303 : : bool force)
304 : : {
305 : : struct irq_chip *chip = irq_data_get_irq_chip(data);
306 : : struct irq_desc *desc = irq_data_to_desc(data);
307 : : int ret = 0;
308 : :
309 [ # # # # ]: 0 : if (!chip || !chip->irq_set_affinity)
310 : : return -EINVAL;
311 : :
312 [ # # ]: 0 : if (irq_set_affinity_deactivated(data, mask, force))
313 : : return 0;
314 : :
315 [ # # ]: 0 : if (irq_can_move_pcntxt(data) && !irqd_is_setaffinity_pending(data)) {
316 : 0 : ret = irq_try_set_affinity(data, mask, force);
317 : : } else {
318 : : irqd_set_move_pending(data);
319 : : irq_copy_pending(desc, mask);
320 : : }
321 : :
322 [ # # ]: 0 : if (desc->affinity_notify) {
323 : : kref_get(&desc->affinity_notify->kref);
324 [ # # ]: 0 : if (!schedule_work(&desc->affinity_notify->work)) {
325 : : /* Work was already scheduled, drop our extra ref */
326 : 0 : kref_put(&desc->affinity_notify->kref,
327 : : desc->affinity_notify->release);
328 : : }
329 : : }
330 : : irqd_set(data, IRQD_AFFINITY_SET);
331 : :
332 : 0 : return ret;
333 : : }
334 : :
335 : 0 : int __irq_set_affinity(unsigned int irq, const struct cpumask *mask, bool force)
336 : : {
337 : 0 : struct irq_desc *desc = irq_to_desc(irq);
338 : : unsigned long flags;
339 : : int ret;
340 : :
341 [ # # ]: 0 : if (!desc)
342 : : return -EINVAL;
343 : :
344 : 0 : raw_spin_lock_irqsave(&desc->lock, flags);
345 : 0 : ret = irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask, force);
346 : 0 : raw_spin_unlock_irqrestore(&desc->lock, flags);
347 : 0 : return ret;
348 : : }
349 : :
350 : 0 : int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
351 : : {
352 : : unsigned long flags;
353 : : struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
354 : :
355 [ # # ]: 0 : if (!desc)
356 : : return -EINVAL;
357 : 0 : desc->affinity_hint = m;
358 : 0 : irq_put_desc_unlock(desc, flags);
359 : : /* set the initial affinity to prevent every interrupt being on CPU0 */
360 [ # # ]: 0 : if (m)
361 : 0 : __irq_set_affinity(irq, m, false);
362 : : return 0;
363 : : }
364 : : EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
365 : :
366 : 0 : static void irq_affinity_notify(struct work_struct *work)
367 : : {
368 : : struct irq_affinity_notify *notify =
369 : 0 : container_of(work, struct irq_affinity_notify, work);
370 : 0 : struct irq_desc *desc = irq_to_desc(notify->irq);
371 : : cpumask_var_t cpumask;
372 : : unsigned long flags;
373 : :
374 [ # # ]: 0 : if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL))
375 : : goto out;
376 : :
377 : 0 : raw_spin_lock_irqsave(&desc->lock, flags);
378 : : if (irq_move_pending(&desc->irq_data))
379 : : irq_get_pending(cpumask, desc);
380 : : else
381 : : cpumask_copy(cpumask, desc->irq_common_data.affinity);
382 : 0 : raw_spin_unlock_irqrestore(&desc->lock, flags);
383 : :
384 : 0 : notify->notify(notify, cpumask);
385 : :
386 : : free_cpumask_var(cpumask);
387 : : out:
388 : 0 : kref_put(¬ify->kref, notify->release);
389 : 0 : }
390 : :
391 : : /**
392 : : * irq_set_affinity_notifier - control notification of IRQ affinity changes
393 : : * @irq: Interrupt for which to enable/disable notification
394 : : * @notify: Context for notification, or %NULL to disable
395 : : * notification. Function pointers must be initialised;
396 : : * the other fields will be initialised by this function.
397 : : *
398 : : * Must be called in process context. Notification may only be enabled
399 : : * after the IRQ is allocated and must be disabled before the IRQ is
400 : : * freed using free_irq().
401 : : */
402 : : int
403 : 0 : irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
404 : : {
405 : 0 : struct irq_desc *desc = irq_to_desc(irq);
406 : : struct irq_affinity_notify *old_notify;
407 : : unsigned long flags;
408 : :
409 : : /* The release function is promised process context */
410 : 0 : might_sleep();
411 : :
412 [ # # # # ]: 0 : if (!desc || desc->istate & IRQS_NMI)
413 : : return -EINVAL;
414 : :
415 : : /* Complete initialisation of *notify */
416 [ # # ]: 0 : if (notify) {
417 : 0 : notify->irq = irq;
418 : : kref_init(¬ify->kref);
419 : 0 : INIT_WORK(¬ify->work, irq_affinity_notify);
420 : : }
421 : :
422 : 0 : raw_spin_lock_irqsave(&desc->lock, flags);
423 : 0 : old_notify = desc->affinity_notify;
424 : 0 : desc->affinity_notify = notify;
425 : 0 : raw_spin_unlock_irqrestore(&desc->lock, flags);
426 : :
427 [ # # ]: 0 : if (old_notify) {
428 [ # # ]: 0 : if (cancel_work_sync(&old_notify->work)) {
429 : : /* Pending work had a ref, put that one too */
430 : 0 : kref_put(&old_notify->kref, old_notify->release);
431 : : }
432 : 0 : kref_put(&old_notify->kref, old_notify->release);
433 : : }
434 : :
435 : : return 0;
436 : : }
437 : : EXPORT_SYMBOL_GPL(irq_set_affinity_notifier);
438 : :
439 : : #ifndef CONFIG_AUTO_IRQ_AFFINITY
440 : : /*
441 : : * Generic version of the affinity autoselector.
442 : : */
443 : 2691 : int irq_setup_affinity(struct irq_desc *desc)
444 : : {
445 : : struct cpumask *set = irq_default_affinity;
446 : : int ret, node = irq_desc_get_node(desc);
447 : : static DEFINE_RAW_SPINLOCK(mask_lock);
448 : : static struct cpumask mask;
449 : :
450 : : /* Excludes PER_CPU and NO_BALANCE interrupts */
451 [ - + ]: 2691 : if (!__irq_can_set_affinity(desc))
452 : : return 0;
453 : :
454 : 0 : raw_spin_lock(&mask_lock);
455 : : /*
456 : : * Preserve the managed affinity setting and a userspace affinity
457 : : * setup, but make sure that one of the targets is online.
458 : : */
459 [ # # # # ]: 0 : if (irqd_affinity_is_managed(&desc->irq_data) ||
460 : : irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) {
461 [ # # ]: 0 : if (cpumask_intersects(desc->irq_common_data.affinity,
462 : : cpu_online_mask))
463 : 0 : set = desc->irq_common_data.affinity;
464 : : else
465 : : irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET);
466 : : }
467 : :
468 : : cpumask_and(&mask, cpu_online_mask, set);
469 [ # # ]: 0 : if (cpumask_empty(&mask))
470 : : cpumask_copy(&mask, cpu_online_mask);
471 : :
472 : : if (node != NUMA_NO_NODE) {
473 : : const struct cpumask *nodemask = cpumask_of_node(node);
474 : :
475 : : /* make sure at least one of the cpus in nodemask is online */
476 [ # # ]: 0 : if (cpumask_intersects(&mask, nodemask))
477 : : cpumask_and(&mask, &mask, nodemask);
478 : : }
479 : 0 : ret = irq_do_set_affinity(&desc->irq_data, &mask, false);
480 : : raw_spin_unlock(&mask_lock);
481 : 0 : return ret;
482 : : }
483 : : #else
484 : : /* Wrapper for ALPHA specific affinity selector magic */
485 : : int irq_setup_affinity(struct irq_desc *desc)
486 : : {
487 : : return irq_select_affinity(irq_desc_get_irq(desc));
488 : : }
489 : : #endif /* CONFIG_AUTO_IRQ_AFFINITY */
490 : : #endif /* CONFIG_SMP */
491 : :
492 : :
493 : : /**
494 : : * irq_set_vcpu_affinity - Set vcpu affinity for the interrupt
495 : : * @irq: interrupt number to set affinity
496 : : * @vcpu_info: vCPU specific data or pointer to a percpu array of vCPU
497 : : * specific data for percpu_devid interrupts
498 : : *
499 : : * This function uses the vCPU specific data to set the vCPU
500 : : * affinity for an irq. The vCPU specific data is passed from
501 : : * outside, such as KVM. One example code path is as below:
502 : : * KVM -> IOMMU -> irq_set_vcpu_affinity().
503 : : */
504 : 0 : int irq_set_vcpu_affinity(unsigned int irq, void *vcpu_info)
505 : : {
506 : : unsigned long flags;
507 : : struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
508 : : struct irq_data *data;
509 : : struct irq_chip *chip;
510 : : int ret = -ENOSYS;
511 : :
512 [ # # ]: 0 : if (!desc)
513 : : return -EINVAL;
514 : :
515 : : data = irq_desc_get_irq_data(desc);
516 : : do {
517 : : chip = irq_data_get_irq_chip(data);
518 [ # # # # ]: 0 : if (chip && chip->irq_set_vcpu_affinity)
519 : : break;
520 : : #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
521 : 0 : data = data->parent_data;
522 : : #else
523 : : data = NULL;
524 : : #endif
525 [ # # ]: 0 : } while (data);
526 : :
527 [ # # ]: 0 : if (data)
528 : 0 : ret = chip->irq_set_vcpu_affinity(data, vcpu_info);
529 : 0 : irq_put_desc_unlock(desc, flags);
530 : :
531 : 0 : return ret;
532 : : }
533 : : EXPORT_SYMBOL_GPL(irq_set_vcpu_affinity);
534 : :
535 : 0 : void __disable_irq(struct irq_desc *desc)
536 : : {
537 [ # # # # ]: 0 : if (!desc->depth++)
538 : 0 : irq_disable(desc);
539 : 0 : }
540 : :
541 : 0 : static int __disable_irq_nosync(unsigned int irq)
542 : : {
543 : : unsigned long flags;
544 : : struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
545 : :
546 [ # # ]: 0 : if (!desc)
547 : : return -EINVAL;
548 : : __disable_irq(desc);
549 : 0 : irq_put_desc_busunlock(desc, flags);
550 : 0 : return 0;
551 : : }
552 : :
553 : : /**
554 : : * disable_irq_nosync - disable an irq without waiting
555 : : * @irq: Interrupt to disable
556 : : *
557 : : * Disable the selected interrupt line. Disables and Enables are
558 : : * nested.
559 : : * Unlike disable_irq(), this function does not ensure existing
560 : : * instances of the IRQ handler have completed before returning.
561 : : *
562 : : * This function may be called from IRQ context.
563 : : */
564 : 0 : void disable_irq_nosync(unsigned int irq)
565 : : {
566 : 0 : __disable_irq_nosync(irq);
567 : 0 : }
568 : : EXPORT_SYMBOL(disable_irq_nosync);
569 : :
570 : : /**
571 : : * disable_irq - disable an irq and wait for completion
572 : : * @irq: Interrupt to disable
573 : : *
574 : : * Disable the selected interrupt line. Enables and Disables are
575 : : * nested.
576 : : * This function waits for any pending IRQ handlers for this interrupt
577 : : * to complete before returning. If you use this function while
578 : : * holding a resource the IRQ handler may need you will deadlock.
579 : : *
580 : : * This function may be called - with care - from IRQ context.
581 : : */
582 : 0 : void disable_irq(unsigned int irq)
583 : : {
584 [ # # ]: 0 : if (!__disable_irq_nosync(irq))
585 : 0 : synchronize_irq(irq);
586 : 0 : }
587 : : EXPORT_SYMBOL(disable_irq);
588 : :
589 : : /**
590 : : * disable_hardirq - disables an irq and waits for hardirq completion
591 : : * @irq: Interrupt to disable
592 : : *
593 : : * Disable the selected interrupt line. Enables and Disables are
594 : : * nested.
595 : : * This function waits for any pending hard IRQ handlers for this
596 : : * interrupt to complete before returning. If you use this function while
597 : : * holding a resource the hard IRQ handler may need you will deadlock.
598 : : *
599 : : * When used to optimistically disable an interrupt from atomic context
600 : : * the return value must be checked.
601 : : *
602 : : * Returns: false if a threaded handler is active.
603 : : *
604 : : * This function may be called - with care - from IRQ context.
605 : : */
606 : 0 : bool disable_hardirq(unsigned int irq)
607 : : {
608 [ # # ]: 0 : if (!__disable_irq_nosync(irq))
609 : 0 : return synchronize_hardirq(irq);
610 : :
611 : : return false;
612 : : }
613 : : EXPORT_SYMBOL_GPL(disable_hardirq);
614 : :
615 : : /**
616 : : * disable_nmi_nosync - disable an nmi without waiting
617 : : * @irq: Interrupt to disable
618 : : *
619 : : * Disable the selected interrupt line. Disables and enables are
620 : : * nested.
621 : : * The interrupt to disable must have been requested through request_nmi.
622 : : * Unlike disable_nmi(), this function does not ensure existing
623 : : * instances of the IRQ handler have completed before returning.
624 : : */
625 : 0 : void disable_nmi_nosync(unsigned int irq)
626 : : {
627 : : disable_irq_nosync(irq);
628 : 0 : }
629 : :
630 : 207 : void __enable_irq(struct irq_desc *desc)
631 : : {
632 [ - + - ]: 207 : switch (desc->depth) {
633 : : case 0:
634 : : err_out:
635 : 0 : WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n",
636 : : irq_desc_get_irq(desc));
637 : 0 : break;
638 : : case 1: {
639 [ - + ]: 207 : if (desc->istate & IRQS_SUSPENDED)
640 : : goto err_out;
641 : : /* Prevent probing on this irq: */
642 : : irq_settings_set_noprobe(desc);
643 : : /*
644 : : * Call irq_startup() not irq_enable() here because the
645 : : * interrupt might be marked NOAUTOEN. So irq_startup()
646 : : * needs to be invoked when it gets enabled the first
647 : : * time. If it was already started up, then irq_startup()
648 : : * will invoke irq_enable() under the hood.
649 : : */
650 : 207 : irq_startup(desc, IRQ_RESEND, IRQ_START_FORCE);
651 : 207 : break;
652 : : }
653 : : default:
654 : 0 : desc->depth--;
655 : : }
656 : 207 : }
657 : :
658 : : /**
659 : : * enable_irq - enable handling of an irq
660 : : * @irq: Interrupt to enable
661 : : *
662 : : * Undoes the effect of one call to disable_irq(). If this
663 : : * matches the last disable, processing of interrupts on this
664 : : * IRQ line is re-enabled.
665 : : *
666 : : * This function may be called from IRQ context only when
667 : : * desc->irq_data.chip->bus_lock and desc->chip->bus_sync_unlock are NULL !
668 : : */
669 : 207 : void enable_irq(unsigned int irq)
670 : : {
671 : : unsigned long flags;
672 : : struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
673 : :
674 [ + - ]: 207 : if (!desc)
675 : 0 : return;
676 [ - + + - ]: 207 : if (WARN(!desc->irq_data.chip,
677 : : KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq))
678 : : goto out;
679 : :
680 : 207 : __enable_irq(desc);
681 : : out:
682 : 207 : irq_put_desc_busunlock(desc, flags);
683 : : }
684 : : EXPORT_SYMBOL(enable_irq);
685 : :
686 : : /**
687 : : * enable_nmi - enable handling of an nmi
688 : : * @irq: Interrupt to enable
689 : : *
690 : : * The interrupt to enable must have been requested through request_nmi.
691 : : * Undoes the effect of one call to disable_nmi(). If this
692 : : * matches the last disable, processing of interrupts on this
693 : : * IRQ line is re-enabled.
694 : : */
695 : 0 : void enable_nmi(unsigned int irq)
696 : : {
697 : 0 : enable_irq(irq);
698 : 0 : }
699 : :
700 : 0 : static int set_irq_wake_real(unsigned int irq, unsigned int on)
701 : : {
702 : 0 : struct irq_desc *desc = irq_to_desc(irq);
703 : : int ret = -ENXIO;
704 : :
705 [ # # ]: 0 : if (irq_desc_get_chip(desc)->flags & IRQCHIP_SKIP_SET_WAKE)
706 : : return 0;
707 : :
708 [ # # ]: 0 : if (desc->irq_data.chip->irq_set_wake)
709 : 0 : ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on);
710 : :
711 : 0 : return ret;
712 : : }
713 : :
714 : : /**
715 : : * irq_set_irq_wake - control irq power management wakeup
716 : : * @irq: interrupt to control
717 : : * @on: enable/disable power management wakeup
718 : : *
719 : : * Enable/disable power management wakeup mode, which is
720 : : * disabled by default. Enables and disables must match,
721 : : * just as they match for non-wakeup mode support.
722 : : *
723 : : * Wakeup mode lets this IRQ wake the system from sleep
724 : : * states like "suspend to RAM".
725 : : */
726 : 0 : int irq_set_irq_wake(unsigned int irq, unsigned int on)
727 : : {
728 : : unsigned long flags;
729 : : struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
730 : : int ret = 0;
731 : :
732 [ # # ]: 0 : if (!desc)
733 : : return -EINVAL;
734 : :
735 : : /* Don't use NMIs as wake up interrupts please */
736 [ # # ]: 0 : if (desc->istate & IRQS_NMI) {
737 : : ret = -EINVAL;
738 : : goto out_unlock;
739 : : }
740 : :
741 : : /* wakeup-capable irqs can be shared between drivers that
742 : : * don't need to have the same sleep mode behaviors.
743 : : */
744 [ # # ]: 0 : if (on) {
745 [ # # ]: 0 : if (desc->wake_depth++ == 0) {
746 : 0 : ret = set_irq_wake_real(irq, on);
747 [ # # ]: 0 : if (ret)
748 : 0 : desc->wake_depth = 0;
749 : : else
750 : : irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE);
751 : : }
752 : : } else {
753 [ # # ]: 0 : if (desc->wake_depth == 0) {
754 : 0 : WARN(1, "Unbalanced IRQ %d wake disable\n", irq);
755 [ # # ]: 0 : } else if (--desc->wake_depth == 0) {
756 : 0 : ret = set_irq_wake_real(irq, on);
757 [ # # ]: 0 : if (ret)
758 : 0 : desc->wake_depth = 1;
759 : : else
760 : : irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE);
761 : : }
762 : : }
763 : :
764 : : out_unlock:
765 : 0 : irq_put_desc_busunlock(desc, flags);
766 : 0 : return ret;
767 : : }
768 : : EXPORT_SYMBOL(irq_set_irq_wake);
769 : :
770 : : /*
771 : : * Internal function that tells the architecture code whether a
772 : : * particular irq has been exclusively allocated or is available
773 : : * for driver use.
774 : : */
775 : 0 : int can_request_irq(unsigned int irq, unsigned long irqflags)
776 : : {
777 : : unsigned long flags;
778 : : struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
779 : : int canrequest = 0;
780 : :
781 [ # # ]: 0 : if (!desc)
782 : : return 0;
783 : :
784 [ # # ]: 0 : if (irq_settings_can_request(desc)) {
785 [ # # # # ]: 0 : if (!desc->action ||
786 : 0 : irqflags & desc->action->flags & IRQF_SHARED)
787 : : canrequest = 1;
788 : : }
789 : 0 : irq_put_desc_unlock(desc, flags);
790 : 0 : return canrequest;
791 : : }
792 : :
793 : 3312 : int __irq_set_trigger(struct irq_desc *desc, unsigned long flags)
794 : : {
795 : 3312 : struct irq_chip *chip = desc->irq_data.chip;
796 : : int ret, unmask = 0;
797 : :
798 [ + - - + ]: 3312 : if (!chip || !chip->irq_set_type) {
799 : : /*
800 : : * IRQF_TRIGGER_* but the PIC does not support multiple
801 : : * flow-types?
802 : : */
803 : : pr_debug("No set_type function for IRQ %d (%s)\n",
804 : : irq_desc_get_irq(desc),
805 : : chip ? (chip->name ? : "unknown") : "unknown");
806 : : return 0;
807 : : }
808 : :
809 [ # # ]: 0 : if (chip->flags & IRQCHIP_SET_TYPE_MASKED) {
810 [ # # ]: 0 : if (!irqd_irq_masked(&desc->irq_data))
811 : 0 : mask_irq(desc);
812 [ # # ]: 0 : if (!irqd_irq_disabled(&desc->irq_data))
813 : : unmask = 1;
814 : : }
815 : :
816 : : /* Mask all flags except trigger mode */
817 : 0 : flags &= IRQ_TYPE_SENSE_MASK;
818 : 0 : ret = chip->irq_set_type(&desc->irq_data, flags);
819 : :
820 [ # # # ]: 0 : switch (ret) {
821 : : case IRQ_SET_MASK_OK:
822 : : case IRQ_SET_MASK_OK_DONE:
823 : : irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK);
824 : : irqd_set(&desc->irq_data, flags);
825 : : /* fall through */
826 : :
827 : : case IRQ_SET_MASK_OK_NOCOPY:
828 : : flags = irqd_get_trigger_type(&desc->irq_data);
829 : : irq_settings_set_trigger_mask(desc, flags);
830 : : irqd_clear(&desc->irq_data, IRQD_LEVEL);
831 : : irq_settings_clr_level(desc);
832 [ # # ]: 0 : if (flags & IRQ_TYPE_LEVEL_MASK) {
833 : : irq_settings_set_level(desc);
834 : : irqd_set(&desc->irq_data, IRQD_LEVEL);
835 : : }
836 : :
837 : : ret = 0;
838 : : break;
839 : : default:
840 : 0 : pr_err("Setting trigger mode %lu for irq %u failed (%pS)\n",
841 : : flags, irq_desc_get_irq(desc), chip->irq_set_type);
842 : : }
843 [ # # ]: 0 : if (unmask)
844 : 0 : unmask_irq(desc);
845 : 0 : return ret;
846 : : }
847 : :
848 : : #ifdef CONFIG_HARDIRQS_SW_RESEND
849 : 0 : int irq_set_parent(int irq, int parent_irq)
850 : : {
851 : : unsigned long flags;
852 : 0 : struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
853 : :
854 [ # # ]: 0 : if (!desc)
855 : : return -EINVAL;
856 : :
857 : 0 : desc->parent_irq = parent_irq;
858 : :
859 : 0 : irq_put_desc_unlock(desc, flags);
860 : 0 : return 0;
861 : : }
862 : : EXPORT_SYMBOL_GPL(irq_set_parent);
863 : : #endif
864 : :
865 : : /*
866 : : * Default primary interrupt handler for threaded interrupts. Is
867 : : * assigned as primary handler when request_threaded_irq is called
868 : : * with handler == NULL. Useful for oneshot interrupts.
869 : : */
870 : 0 : static irqreturn_t irq_default_primary_handler(int irq, void *dev_id)
871 : : {
872 : 0 : return IRQ_WAKE_THREAD;
873 : : }
874 : :
875 : : /*
876 : : * Primary handler for nested threaded interrupts. Should never be
877 : : * called.
878 : : */
879 : 0 : static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id)
880 : : {
881 : 0 : WARN(1, "Primary handler called for nested irq %d\n", irq);
882 : 0 : return IRQ_NONE;
883 : : }
884 : :
885 : 0 : static irqreturn_t irq_forced_secondary_handler(int irq, void *dev_id)
886 : : {
887 : 0 : WARN(1, "Secondary action handler called for irq %d\n", irq);
888 : 0 : return IRQ_NONE;
889 : : }
890 : :
891 : 0 : static int irq_wait_for_interrupt(struct irqaction *action)
892 : : {
893 : : for (;;) {
894 : 0 : set_current_state(TASK_INTERRUPTIBLE);
895 : :
896 [ # # ]: 0 : if (kthread_should_stop()) {
897 : : /* may need to run one last time */
898 [ # # ]: 0 : if (test_and_clear_bit(IRQTF_RUNTHREAD,
899 : : &action->thread_flags)) {
900 : 0 : __set_current_state(TASK_RUNNING);
901 : 0 : return 0;
902 : : }
903 : 0 : __set_current_state(TASK_RUNNING);
904 : 0 : return -1;
905 : : }
906 : :
907 [ # # ]: 0 : if (test_and_clear_bit(IRQTF_RUNTHREAD,
908 : : &action->thread_flags)) {
909 : 0 : __set_current_state(TASK_RUNNING);
910 : 0 : return 0;
911 : : }
912 : 0 : schedule();
913 : 0 : }
914 : : }
915 : :
916 : : /*
917 : : * Oneshot interrupts keep the irq line masked until the threaded
918 : : * handler finished. unmask if the interrupt has not been disabled and
919 : : * is marked MASKED.
920 : : */
921 : 0 : static void irq_finalize_oneshot(struct irq_desc *desc,
922 : : struct irqaction *action)
923 : : {
924 [ # # # # ]: 0 : if (!(desc->istate & IRQS_ONESHOT) ||
925 : 0 : action->handler == irq_forced_secondary_handler)
926 : 0 : return;
927 : : again:
928 : : chip_bus_lock(desc);
929 : 0 : raw_spin_lock_irq(&desc->lock);
930 : :
931 : : /*
932 : : * Implausible though it may be we need to protect us against
933 : : * the following scenario:
934 : : *
935 : : * The thread is faster done than the hard interrupt handler
936 : : * on the other CPU. If we unmask the irq line then the
937 : : * interrupt can come in again and masks the line, leaves due
938 : : * to IRQS_INPROGRESS and the irq line is masked forever.
939 : : *
940 : : * This also serializes the state of shared oneshot handlers
941 : : * versus "desc->threads_onehsot |= action->thread_mask;" in
942 : : * irq_wake_thread(). See the comment there which explains the
943 : : * serialization.
944 : : */
945 [ # # ]: 0 : if (unlikely(irqd_irq_inprogress(&desc->irq_data))) {
946 : 0 : raw_spin_unlock_irq(&desc->lock);
947 : : chip_bus_sync_unlock(desc);
948 : 0 : cpu_relax();
949 : 0 : goto again;
950 : : }
951 : :
952 : : /*
953 : : * Now check again, whether the thread should run. Otherwise
954 : : * we would clear the threads_oneshot bit of this thread which
955 : : * was just set.
956 : : */
957 [ # # ]: 0 : if (test_bit(IRQTF_RUNTHREAD, &action->thread_flags))
958 : : goto out_unlock;
959 : :
960 : 0 : desc->threads_oneshot &= ~action->thread_mask;
961 : :
962 [ # # # # : 0 : if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) &&
# # ]
963 : : irqd_irq_masked(&desc->irq_data))
964 : 0 : unmask_threaded_irq(desc);
965 : :
966 : : out_unlock:
967 : 0 : raw_spin_unlock_irq(&desc->lock);
968 : : chip_bus_sync_unlock(desc);
969 : : }
970 : :
971 : : #ifdef CONFIG_SMP
972 : : /*
973 : : * Check whether we need to change the affinity of the interrupt thread.
974 : : */
975 : : static void
976 : 0 : irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
977 : : {
978 : : cpumask_var_t mask;
979 : : bool valid = true;
980 : :
981 [ # # ]: 0 : if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags))
982 : 0 : return;
983 : :
984 : : /*
985 : : * In case we are out of memory we set IRQTF_AFFINITY again and
986 : : * try again next time
987 : : */
988 : : if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
989 : : set_bit(IRQTF_AFFINITY, &action->thread_flags);
990 : : return;
991 : : }
992 : :
993 : 0 : raw_spin_lock_irq(&desc->lock);
994 : : /*
995 : : * This code is triggered unconditionally. Check the affinity
996 : : * mask pointer. For CPU_MASK_OFFSTACK=n this is optimized out.
997 : : */
998 : : if (cpumask_available(desc->irq_common_data.affinity)) {
999 : : const struct cpumask *m;
1000 : :
1001 : : m = irq_data_get_effective_affinity_mask(&desc->irq_data);
1002 : : cpumask_copy(mask, m);
1003 : : } else {
1004 : : valid = false;
1005 : : }
1006 : 0 : raw_spin_unlock_irq(&desc->lock);
1007 : :
1008 : : if (valid)
1009 : 0 : set_cpus_allowed_ptr(current, mask);
1010 : : free_cpumask_var(mask);
1011 : : }
1012 : : #else
1013 : : static inline void
1014 : : irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { }
1015 : : #endif
1016 : :
1017 : : /*
1018 : : * Interrupts which are not explicitly requested as threaded
1019 : : * interrupts rely on the implicit bh/preempt disable of the hard irq
1020 : : * context. So we need to disable bh here to avoid deadlocks and other
1021 : : * side effects.
1022 : : */
1023 : : static irqreturn_t
1024 : 0 : irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
1025 : : {
1026 : : irqreturn_t ret;
1027 : :
1028 : : local_bh_disable();
1029 : 0 : ret = action->thread_fn(action->irq, action->dev_id);
1030 [ # # ]: 0 : if (ret == IRQ_HANDLED)
1031 : 0 : atomic_inc(&desc->threads_handled);
1032 : :
1033 : 0 : irq_finalize_oneshot(desc, action);
1034 : : local_bh_enable();
1035 : 0 : return ret;
1036 : : }
1037 : :
1038 : : /*
1039 : : * Interrupts explicitly requested as threaded interrupts want to be
1040 : : * preemtible - many of them need to sleep and wait for slow busses to
1041 : : * complete.
1042 : : */
1043 : 0 : static irqreturn_t irq_thread_fn(struct irq_desc *desc,
1044 : : struct irqaction *action)
1045 : : {
1046 : : irqreturn_t ret;
1047 : :
1048 : 0 : ret = action->thread_fn(action->irq, action->dev_id);
1049 [ # # ]: 0 : if (ret == IRQ_HANDLED)
1050 : 0 : atomic_inc(&desc->threads_handled);
1051 : :
1052 : 0 : irq_finalize_oneshot(desc, action);
1053 : 0 : return ret;
1054 : : }
1055 : :
1056 : 0 : static void wake_threads_waitq(struct irq_desc *desc)
1057 : : {
1058 [ # # ]: 0 : if (atomic_dec_and_test(&desc->threads_active))
1059 : 0 : wake_up(&desc->wait_for_threads);
1060 : 0 : }
1061 : :
1062 : 0 : static void irq_thread_dtor(struct callback_head *unused)
1063 : : {
1064 : 0 : struct task_struct *tsk = current;
1065 : : struct irq_desc *desc;
1066 : : struct irqaction *action;
1067 : :
1068 [ # # # # : 0 : if (WARN_ON_ONCE(!(current->flags & PF_EXITING)))
# # ]
1069 : 0 : return;
1070 : :
1071 : 0 : action = kthread_data(tsk);
1072 : :
1073 : 0 : pr_err("exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
1074 : : tsk->comm, tsk->pid, action->irq);
1075 : :
1076 : :
1077 : 0 : desc = irq_to_desc(action->irq);
1078 : : /*
1079 : : * If IRQTF_RUNTHREAD is set, we need to decrement
1080 : : * desc->threads_active and wake possible waiters.
1081 : : */
1082 [ # # ]: 0 : if (test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags))
1083 : 0 : wake_threads_waitq(desc);
1084 : :
1085 : : /* Prevent a stale desc->threads_oneshot */
1086 : 0 : irq_finalize_oneshot(desc, action);
1087 : : }
1088 : :
1089 : 0 : static void irq_wake_secondary(struct irq_desc *desc, struct irqaction *action)
1090 : : {
1091 : 0 : struct irqaction *secondary = action->secondary;
1092 : :
1093 [ # # # # : 0 : if (WARN_ON_ONCE(!secondary))
# # ]
1094 : 0 : return;
1095 : :
1096 : 0 : raw_spin_lock_irq(&desc->lock);
1097 : 0 : __irq_wake_thread(desc, secondary);
1098 : 0 : raw_spin_unlock_irq(&desc->lock);
1099 : : }
1100 : :
1101 : : /*
1102 : : * Interrupt handler thread
1103 : : */
1104 : 0 : static int irq_thread(void *data)
1105 : : {
1106 : : struct callback_head on_exit_work;
1107 : : struct irqaction *action = data;
1108 : 0 : struct irq_desc *desc = irq_to_desc(action->irq);
1109 : : irqreturn_t (*handler_fn)(struct irq_desc *desc,
1110 : : struct irqaction *action);
1111 : :
1112 [ # # # # ]: 0 : if (force_irqthreads && test_bit(IRQTF_FORCED_THREAD,
1113 : : &action->thread_flags))
1114 : : handler_fn = irq_forced_thread_fn;
1115 : : else
1116 : : handler_fn = irq_thread_fn;
1117 : :
1118 : : init_task_work(&on_exit_work, irq_thread_dtor);
1119 : 0 : task_work_add(current, &on_exit_work, false);
1120 : :
1121 : 0 : irq_thread_check_affinity(desc, action);
1122 : :
1123 [ # # ]: 0 : while (!irq_wait_for_interrupt(action)) {
1124 : : irqreturn_t action_ret;
1125 : :
1126 : 0 : irq_thread_check_affinity(desc, action);
1127 : :
1128 : 0 : action_ret = handler_fn(desc, action);
1129 [ # # ]: 0 : if (action_ret == IRQ_WAKE_THREAD)
1130 : 0 : irq_wake_secondary(desc, action);
1131 : :
1132 : 0 : wake_threads_waitq(desc);
1133 : : }
1134 : :
1135 : : /*
1136 : : * This is the regular exit path. __free_irq() is stopping the
1137 : : * thread via kthread_stop() after calling
1138 : : * synchronize_hardirq(). So neither IRQTF_RUNTHREAD nor the
1139 : : * oneshot mask bit can be set.
1140 : : */
1141 : 0 : task_work_cancel(current, irq_thread_dtor);
1142 : 0 : return 0;
1143 : : }
1144 : :
1145 : : /**
1146 : : * irq_wake_thread - wake the irq thread for the action identified by dev_id
1147 : : * @irq: Interrupt line
1148 : : * @dev_id: Device identity for which the thread should be woken
1149 : : *
1150 : : */
1151 : 0 : void irq_wake_thread(unsigned int irq, void *dev_id)
1152 : : {
1153 : 0 : struct irq_desc *desc = irq_to_desc(irq);
1154 : : struct irqaction *action;
1155 : : unsigned long flags;
1156 : :
1157 [ # # # # : 0 : if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
# # ]
1158 : 0 : return;
1159 : :
1160 : 0 : raw_spin_lock_irqsave(&desc->lock, flags);
1161 [ # # ]: 0 : for_each_action_of_desc(desc, action) {
1162 [ # # ]: 0 : if (action->dev_id == dev_id) {
1163 [ # # ]: 0 : if (action->thread)
1164 : 0 : __irq_wake_thread(desc, action);
1165 : : break;
1166 : : }
1167 : : }
1168 : 0 : raw_spin_unlock_irqrestore(&desc->lock, flags);
1169 : : }
1170 : : EXPORT_SYMBOL_GPL(irq_wake_thread);
1171 : :
1172 : 2277 : static int irq_setup_forced_threading(struct irqaction *new)
1173 : : {
1174 [ - + ]: 2277 : if (!force_irqthreads)
1175 : : return 0;
1176 [ # # ]: 0 : if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT))
1177 : : return 0;
1178 : :
1179 : : /*
1180 : : * No further action required for interrupts which are requested as
1181 : : * threaded interrupts already
1182 : : */
1183 [ # # ]: 0 : if (new->handler == irq_default_primary_handler)
1184 : : return 0;
1185 : :
1186 : 0 : new->flags |= IRQF_ONESHOT;
1187 : :
1188 : : /*
1189 : : * Handle the case where we have a real primary handler and a
1190 : : * thread handler. We force thread them as well by creating a
1191 : : * secondary action.
1192 : : */
1193 [ # # # # ]: 0 : if (new->handler && new->thread_fn) {
1194 : : /* Allocate the secondary action */
1195 : 0 : new->secondary = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
1196 [ # # ]: 0 : if (!new->secondary)
1197 : : return -ENOMEM;
1198 : 0 : new->secondary->handler = irq_forced_secondary_handler;
1199 : 0 : new->secondary->thread_fn = new->thread_fn;
1200 : 0 : new->secondary->dev_id = new->dev_id;
1201 : 0 : new->secondary->irq = new->irq;
1202 : 0 : new->secondary->name = new->name;
1203 : : }
1204 : : /* Deal with the primary handler */
1205 : 0 : set_bit(IRQTF_FORCED_THREAD, &new->thread_flags);
1206 : 0 : new->thread_fn = new->handler;
1207 : 0 : new->handler = irq_default_primary_handler;
1208 : 0 : return 0;
1209 : : }
1210 : :
1211 : : static int irq_request_resources(struct irq_desc *desc)
1212 : : {
1213 : 2484 : struct irq_data *d = &desc->irq_data;
1214 : 2484 : struct irq_chip *c = d->chip;
1215 : :
1216 [ - + ]: 2484 : return c->irq_request_resources ? c->irq_request_resources(d) : 0;
1217 : : }
1218 : :
1219 : : static void irq_release_resources(struct irq_desc *desc)
1220 : : {
1221 : 0 : struct irq_data *d = &desc->irq_data;
1222 : 414 : struct irq_chip *c = d->chip;
1223 : :
1224 [ # # - + : 414 : if (c->irq_release_resources)
# # ]
1225 : 0 : c->irq_release_resources(d);
1226 : : }
1227 : :
1228 : : static bool irq_supports_nmi(struct irq_desc *desc)
1229 : : {
1230 : : struct irq_data *d = irq_desc_get_irq_data(desc);
1231 : :
1232 : : #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
1233 : : /* Only IRQs directly managed by the root irqchip can be set as NMI */
1234 [ # # # # ]: 0 : if (d->parent_data)
1235 : : return false;
1236 : : #endif
1237 : : /* Don't support NMIs for chips behind a slow bus */
1238 [ # # # # : 0 : if (d->chip->irq_bus_lock || d->chip->irq_bus_sync_unlock)
# # # # ]
1239 : : return false;
1240 : :
1241 : 0 : return d->chip->flags & IRQCHIP_SUPPORTS_NMI;
1242 : : }
1243 : :
1244 : : static int irq_nmi_setup(struct irq_desc *desc)
1245 : : {
1246 : : struct irq_data *d = irq_desc_get_irq_data(desc);
1247 : 0 : struct irq_chip *c = d->chip;
1248 : :
1249 [ # # # # ]: 0 : return c->irq_nmi_setup ? c->irq_nmi_setup(d) : -EINVAL;
1250 : : }
1251 : :
1252 : : static void irq_nmi_teardown(struct irq_desc *desc)
1253 : : {
1254 : : struct irq_data *d = irq_desc_get_irq_data(desc);
1255 : 0 : struct irq_chip *c = d->chip;
1256 : :
1257 [ # # # # ]: 0 : if (c->irq_nmi_teardown)
1258 : 0 : c->irq_nmi_teardown(d);
1259 : : }
1260 : :
1261 : : static int
1262 : 0 : setup_irq_thread(struct irqaction *new, unsigned int irq, bool secondary)
1263 : : {
1264 : : struct task_struct *t;
1265 : 0 : struct sched_param param = {
1266 : : .sched_priority = MAX_USER_RT_PRIO/2,
1267 : : };
1268 : :
1269 [ # # ]: 0 : if (!secondary) {
1270 : 0 : t = kthread_create(irq_thread, new, "irq/%d-%s", irq,
1271 : : new->name);
1272 : : } else {
1273 : 0 : t = kthread_create(irq_thread, new, "irq/%d-s-%s", irq,
1274 : : new->name);
1275 : 0 : param.sched_priority -= 1;
1276 : : }
1277 : :
1278 [ # # ]: 0 : if (IS_ERR(t))
1279 : 0 : return PTR_ERR(t);
1280 : :
1281 : 0 : sched_setscheduler_nocheck(t, SCHED_FIFO, ¶m);
1282 : :
1283 : : /*
1284 : : * We keep the reference to the task struct even if
1285 : : * the thread dies to avoid that the interrupt code
1286 : : * references an already freed task_struct.
1287 : : */
1288 : 0 : new->thread = get_task_struct(t);
1289 : : /*
1290 : : * Tell the thread to set its affinity. This is
1291 : : * important for shared interrupt handlers as we do
1292 : : * not invoke setup_affinity() for the secondary
1293 : : * handlers as everything is already set up. Even for
1294 : : * interrupts marked with IRQF_NO_BALANCE this is
1295 : : * correct as we want the thread to move to the cpu(s)
1296 : : * on which the requesting code placed the interrupt.
1297 : : */
1298 : 0 : set_bit(IRQTF_AFFINITY, &new->thread_flags);
1299 : 0 : return 0;
1300 : : }
1301 : :
1302 : : /*
1303 : : * Internal function to register an irqaction - typically used to
1304 : : * allocate special interrupts that are part of the architecture.
1305 : : *
1306 : : * Locking rules:
1307 : : *
1308 : : * desc->request_mutex Provides serialization against a concurrent free_irq()
1309 : : * chip_bus_lock Provides serialization for slow bus operations
1310 : : * desc->lock Provides serialization against hard interrupts
1311 : : *
1312 : : * chip_bus_lock and desc->lock are sufficient for all other management and
1313 : : * interrupt related functions. desc->request_mutex solely serializes
1314 : : * request/free_irq().
1315 : : */
1316 : : static int
1317 : 2898 : __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
1318 : : {
1319 : : struct irqaction *old, **old_ptr;
1320 : : unsigned long flags, thread_mask = 0;
1321 : : int ret, nested, shared = 0;
1322 : :
1323 [ + - ]: 2898 : if (!desc)
1324 : : return -EINVAL;
1325 : :
1326 [ + - ]: 2898 : if (desc->irq_data.chip == &no_irq_chip)
1327 : : return -ENOSYS;
1328 [ + - ]: 2898 : if (!try_module_get(desc->owner))
1329 : : return -ENODEV;
1330 : :
1331 : 2898 : new->irq = irq;
1332 : :
1333 : : /*
1334 : : * If the trigger type is not specified by the caller,
1335 : : * then use the default for this interrupt.
1336 : : */
1337 [ + - ]: 2898 : if (!(new->flags & IRQF_TRIGGER_MASK))
1338 : 2898 : new->flags |= irqd_get_trigger_type(&desc->irq_data);
1339 : :
1340 : : /*
1341 : : * Check whether the interrupt nests into another interrupt
1342 : : * thread.
1343 : : */
1344 : 2898 : nested = irq_settings_is_nested_thread(desc);
1345 [ - + ]: 2898 : if (nested) {
1346 [ # # ]: 0 : if (!new->thread_fn) {
1347 : : ret = -EINVAL;
1348 : : goto out_mput;
1349 : : }
1350 : : /*
1351 : : * Replace the primary handler which was provided from
1352 : : * the driver for non nested interrupt handling by the
1353 : : * dummy function which warns when called.
1354 : : */
1355 : 0 : new->handler = irq_nested_primary_handler;
1356 : : } else {
1357 [ + + ]: 2898 : if (irq_settings_can_thread(desc)) {
1358 : 2277 : ret = irq_setup_forced_threading(new);
1359 [ + - ]: 2277 : if (ret)
1360 : : goto out_mput;
1361 : : }
1362 : : }
1363 : :
1364 : : /*
1365 : : * Create a handler thread when a thread function is supplied
1366 : : * and the interrupt does not nest into another interrupt
1367 : : * thread.
1368 : : */
1369 [ - + # # ]: 2898 : if (new->thread_fn && !nested) {
1370 : 0 : ret = setup_irq_thread(new, irq, false);
1371 [ # # ]: 0 : if (ret)
1372 : : goto out_mput;
1373 [ # # ]: 0 : if (new->secondary) {
1374 : 0 : ret = setup_irq_thread(new->secondary, irq, true);
1375 [ # # ]: 0 : if (ret)
1376 : : goto out_thread;
1377 : : }
1378 : : }
1379 : :
1380 : : /*
1381 : : * Drivers are often written to work w/o knowledge about the
1382 : : * underlying irq chip implementation, so a request for a
1383 : : * threaded irq without a primary hard irq context handler
1384 : : * requires the ONESHOT flag to be set. Some irq chips like
1385 : : * MSI based interrupts are per se one shot safe. Check the
1386 : : * chip flags, so we can avoid the unmask dance at the end of
1387 : : * the threaded handler for those.
1388 : : */
1389 [ - + ]: 2898 : if (desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)
1390 : 0 : new->flags &= ~IRQF_ONESHOT;
1391 : :
1392 : : /*
1393 : : * Protects against a concurrent __free_irq() call which might wait
1394 : : * for synchronize_hardirq() to complete without holding the optional
1395 : : * chip bus lock and desc->lock. Also protects against handing out
1396 : : * a recycled oneshot thread_mask bit while it's still in use by
1397 : : * its previous owner.
1398 : : */
1399 : 2898 : mutex_lock(&desc->request_mutex);
1400 : :
1401 : : /*
1402 : : * Acquire bus lock as the irq_request_resources() callback below
1403 : : * might rely on the serialization or the magic power management
1404 : : * functions which are abusing the irq_bus_lock() callback,
1405 : : */
1406 : : chip_bus_lock(desc);
1407 : :
1408 : : /* First installed action requests resources. */
1409 [ + + ]: 2898 : if (!desc->action) {
1410 : : ret = irq_request_resources(desc);
1411 [ - + ]: 2484 : if (ret) {
1412 : 0 : pr_err("Failed to request resources for %s (irq %d) on irqchip %s\n",
1413 : : new->name, irq, desc->irq_data.chip->name);
1414 : 0 : goto out_bus_unlock;
1415 : : }
1416 : : }
1417 : :
1418 : : /*
1419 : : * The following block of code has to be executed atomically
1420 : : * protected against a concurrent interrupt and any of the other
1421 : : * management calls which are not serialized via
1422 : : * desc->request_mutex or the optional bus lock.
1423 : : */
1424 : 2898 : raw_spin_lock_irqsave(&desc->lock, flags);
1425 : 2898 : old_ptr = &desc->action;
1426 : 2898 : old = *old_ptr;
1427 [ + + ]: 2898 : if (old) {
1428 : : /*
1429 : : * Can't share interrupts unless both agree to and are
1430 : : * the same type (level, edge, polarity). So both flag
1431 : : * fields must have IRQF_SHARED set and the bits which
1432 : : * set the trigger type must match. Also all must
1433 : : * agree on ONESHOT.
1434 : : * Interrupt lines used for NMIs cannot be shared.
1435 : : */
1436 : : unsigned int oldtype;
1437 : :
1438 [ - + ]: 414 : if (desc->istate & IRQS_NMI) {
1439 : 0 : pr_err("Invalid attempt to share NMI for %s (irq %d) on irqchip %s.\n",
1440 : : new->name, irq, desc->irq_data.chip->name);
1441 : : ret = -EINVAL;
1442 : 0 : goto out_unlock;
1443 : : }
1444 : :
1445 : : /*
1446 : : * If nobody did set the configuration before, inherit
1447 : : * the one provided by the requester.
1448 : : */
1449 [ + + ]: 414 : if (irqd_trigger_type_was_set(&desc->irq_data)) {
1450 : : oldtype = irqd_get_trigger_type(&desc->irq_data);
1451 : : } else {
1452 : 207 : oldtype = new->flags & IRQF_TRIGGER_MASK;
1453 : : irqd_set_trigger_type(&desc->irq_data, oldtype);
1454 : : }
1455 : :
1456 [ + - + - ]: 828 : if (!((old->flags & new->flags) & IRQF_SHARED) ||
1457 [ + - ]: 828 : (oldtype != (new->flags & IRQF_TRIGGER_MASK)) ||
1458 : 414 : ((old->flags ^ new->flags) & IRQF_ONESHOT))
1459 : : goto mismatch;
1460 : :
1461 : : /* All handlers must agree on per-cpuness */
1462 [ + - ]: 414 : if ((old->flags & IRQF_PERCPU) !=
1463 : : (new->flags & IRQF_PERCPU))
1464 : : goto mismatch;
1465 : :
1466 : : /* add new interrupt at end of irq queue */
1467 : : do {
1468 : : /*
1469 : : * Or all existing action->thread_mask bits,
1470 : : * so we can find the next zero bit for this
1471 : : * new action.
1472 : : */
1473 : 621 : thread_mask |= old->thread_mask;
1474 : 621 : old_ptr = &old->next;
1475 : 621 : old = *old_ptr;
1476 [ + + ]: 621 : } while (old);
1477 : : shared = 1;
1478 : : }
1479 : :
1480 : : /*
1481 : : * Setup the thread mask for this irqaction for ONESHOT. For
1482 : : * !ONESHOT irqs the thread mask is 0 so we can avoid a
1483 : : * conditional in irq_wake_thread().
1484 : : */
1485 [ - + ]: 2898 : if (new->flags & IRQF_ONESHOT) {
1486 : : /*
1487 : : * Unlikely to have 32 resp 64 irqs sharing one line,
1488 : : * but who knows.
1489 : : */
1490 [ # # ]: 0 : if (thread_mask == ~0UL) {
1491 : : ret = -EBUSY;
1492 : : goto out_unlock;
1493 : : }
1494 : : /*
1495 : : * The thread_mask for the action is or'ed to
1496 : : * desc->thread_active to indicate that the
1497 : : * IRQF_ONESHOT thread handler has been woken, but not
1498 : : * yet finished. The bit is cleared when a thread
1499 : : * completes. When all threads of a shared interrupt
1500 : : * line have completed desc->threads_active becomes
1501 : : * zero and the interrupt line is unmasked. See
1502 : : * handle.c:irq_wake_thread() for further information.
1503 : : *
1504 : : * If no thread is woken by primary (hard irq context)
1505 : : * interrupt handlers, then desc->threads_active is
1506 : : * also checked for zero to unmask the irq line in the
1507 : : * affected hard irq flow handlers
1508 : : * (handle_[fasteoi|level]_irq).
1509 : : *
1510 : : * The new action gets the first zero bit of
1511 : : * thread_mask assigned. See the loop above which or's
1512 : : * all existing action->thread_mask bits.
1513 : : */
1514 : 0 : new->thread_mask = 1UL << ffz(thread_mask);
1515 : :
1516 [ - + # # ]: 2898 : } else if (new->handler == irq_default_primary_handler &&
1517 : 0 : !(desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)) {
1518 : : /*
1519 : : * The interrupt was requested with handler = NULL, so
1520 : : * we use the default primary handler for it. But it
1521 : : * does not have the oneshot flag set. In combination
1522 : : * with level interrupts this is deadly, because the
1523 : : * default primary handler just wakes the thread, then
1524 : : * the irq lines is reenabled, but the device still
1525 : : * has the level irq asserted. Rinse and repeat....
1526 : : *
1527 : : * While this works for edge type interrupts, we play
1528 : : * it safe and reject unconditionally because we can't
1529 : : * say for sure which type this interrupt really
1530 : : * has. The type flags are unreliable as the
1531 : : * underlying chip implementation can override them.
1532 : : */
1533 : 0 : pr_err("Threaded irq requested with handler=NULL and !ONESHOT for irq %d\n",
1534 : : irq);
1535 : : ret = -EINVAL;
1536 : 0 : goto out_unlock;
1537 : : }
1538 : :
1539 [ + + ]: 2898 : if (!shared) {
1540 : 2484 : init_waitqueue_head(&desc->wait_for_threads);
1541 : :
1542 : : /* Setup the type (level, edge polarity) if configured: */
1543 [ + + ]: 2484 : if (new->flags & IRQF_TRIGGER_MASK) {
1544 : 621 : ret = __irq_set_trigger(desc,
1545 : : new->flags & IRQF_TRIGGER_MASK);
1546 : :
1547 [ + - ]: 621 : if (ret)
1548 : : goto out_unlock;
1549 : : }
1550 : :
1551 : : /*
1552 : : * Activate the interrupt. That activation must happen
1553 : : * independently of IRQ_NOAUTOEN. request_irq() can fail
1554 : : * and the callers are supposed to handle
1555 : : * that. enable_irq() of an interrupt requested with
1556 : : * IRQ_NOAUTOEN is not supposed to fail. The activation
1557 : : * keeps it in shutdown mode, it merily associates
1558 : : * resources if necessary and if that's not possible it
1559 : : * fails. Interrupts which are in managed shutdown mode
1560 : : * will simply ignore that activation request.
1561 : : */
1562 : 2484 : ret = irq_activate(desc);
1563 [ + - ]: 2484 : if (ret)
1564 : : goto out_unlock;
1565 : :
1566 : 2484 : desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \
1567 : : IRQS_ONESHOT | IRQS_WAITING);
1568 : : irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
1569 : :
1570 [ + + ]: 2484 : if (new->flags & IRQF_PERCPU) {
1571 : : irqd_set(&desc->irq_data, IRQD_PER_CPU);
1572 : : irq_settings_set_per_cpu(desc);
1573 : : }
1574 : :
1575 [ - + ]: 2484 : if (new->flags & IRQF_ONESHOT)
1576 : 0 : desc->istate |= IRQS_ONESHOT;
1577 : :
1578 : : /* Exclude IRQ from balancing if requested */
1579 [ - + ]: 2484 : if (new->flags & IRQF_NOBALANCING) {
1580 : : irq_settings_set_no_balancing(desc);
1581 : : irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
1582 : : }
1583 : :
1584 [ + + ]: 2484 : if (irq_settings_can_autoenable(desc)) {
1585 : 1863 : irq_startup(desc, IRQ_RESEND, IRQ_START_COND);
1586 : : } else {
1587 : : /*
1588 : : * Shared interrupts do not go well with disabling
1589 : : * auto enable. The sharing interrupt might request
1590 : : * it while it's still disabled and then wait for
1591 : : * interrupts forever.
1592 : : */
1593 [ - + # # ]: 621 : WARN_ON_ONCE(new->flags & IRQF_SHARED);
1594 : : /* Undo nested disables: */
1595 : 621 : desc->depth = 1;
1596 : : }
1597 : :
1598 [ - + ]: 414 : } else if (new->flags & IRQF_TRIGGER_MASK) {
1599 : : unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK;
1600 : : unsigned int omsk = irqd_get_trigger_type(&desc->irq_data);
1601 : :
1602 [ # # ]: 0 : if (nmsk != omsk)
1603 : : /* hope the handler works with current trigger mode */
1604 : 0 : pr_warn("irq %d uses trigger mode %u; requested %u\n",
1605 : : irq, omsk, nmsk);
1606 : : }
1607 : :
1608 : 2898 : *old_ptr = new;
1609 : :
1610 : : irq_pm_install_action(desc, new);
1611 : :
1612 : : /* Reset broken irq detection when installing new handler */
1613 : 2898 : desc->irq_count = 0;
1614 : 2898 : desc->irqs_unhandled = 0;
1615 : :
1616 : : /*
1617 : : * Check whether we disabled the irq via the spurious handler
1618 : : * before. Reenable it and give it another chance.
1619 : : */
1620 [ + + - + ]: 2898 : if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) {
1621 : 0 : desc->istate &= ~IRQS_SPURIOUS_DISABLED;
1622 : 0 : __enable_irq(desc);
1623 : : }
1624 : :
1625 : 2898 : raw_spin_unlock_irqrestore(&desc->lock, flags);
1626 : : chip_bus_sync_unlock(desc);
1627 : 2898 : mutex_unlock(&desc->request_mutex);
1628 : :
1629 : : irq_setup_timings(desc, new);
1630 : :
1631 : : /*
1632 : : * Strictly no need to wake it up, but hung_task complains
1633 : : * when no hard interrupt wakes the thread up.
1634 : : */
1635 [ - + ]: 2898 : if (new->thread)
1636 : 0 : wake_up_process(new->thread);
1637 [ - + ]: 2898 : if (new->secondary)
1638 : 0 : wake_up_process(new->secondary->thread);
1639 : :
1640 : 2898 : register_irq_proc(irq, desc);
1641 : 2898 : new->dir = NULL;
1642 : 2898 : register_handler_proc(irq, new);
1643 : 2898 : return 0;
1644 : :
1645 : : mismatch:
1646 [ # # ]: 0 : if (!(new->flags & IRQF_PROBE_SHARED)) {
1647 : 0 : pr_err("Flags mismatch irq %d. %08x (%s) vs. %08x (%s)\n",
1648 : : irq, new->flags, new->name, old->flags, old->name);
1649 : : #ifdef CONFIG_DEBUG_SHIRQ
1650 : : dump_stack();
1651 : : #endif
1652 : : }
1653 : : ret = -EBUSY;
1654 : :
1655 : : out_unlock:
1656 : 0 : raw_spin_unlock_irqrestore(&desc->lock, flags);
1657 : :
1658 [ # # ]: 0 : if (!desc->action)
1659 : : irq_release_resources(desc);
1660 : : out_bus_unlock:
1661 : : chip_bus_sync_unlock(desc);
1662 : 0 : mutex_unlock(&desc->request_mutex);
1663 : :
1664 : : out_thread:
1665 [ # # ]: 0 : if (new->thread) {
1666 : : struct task_struct *t = new->thread;
1667 : :
1668 : 0 : new->thread = NULL;
1669 : 0 : kthread_stop(t);
1670 : 0 : put_task_struct(t);
1671 : : }
1672 [ # # # # ]: 0 : if (new->secondary && new->secondary->thread) {
1673 : : struct task_struct *t = new->secondary->thread;
1674 : :
1675 : 0 : new->secondary->thread = NULL;
1676 : 0 : kthread_stop(t);
1677 : 0 : put_task_struct(t);
1678 : : }
1679 : : out_mput:
1680 : 0 : module_put(desc->owner);
1681 : 0 : return ret;
1682 : : }
1683 : :
1684 : : /**
1685 : : * setup_irq - setup an interrupt
1686 : : * @irq: Interrupt line to setup
1687 : : * @act: irqaction for the interrupt
1688 : : *
1689 : : * Used to statically setup interrupts in the early boot process.
1690 : : */
1691 : 0 : int setup_irq(unsigned int irq, struct irqaction *act)
1692 : : {
1693 : : int retval;
1694 : 0 : struct irq_desc *desc = irq_to_desc(irq);
1695 : :
1696 [ # # # # : 0 : if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
# # ]
1697 : : return -EINVAL;
1698 : :
1699 : 0 : retval = irq_chip_pm_get(&desc->irq_data);
1700 [ # # ]: 0 : if (retval < 0)
1701 : : return retval;
1702 : :
1703 : 0 : retval = __setup_irq(irq, desc, act);
1704 : :
1705 [ # # ]: 0 : if (retval)
1706 : 0 : irq_chip_pm_put(&desc->irq_data);
1707 : :
1708 : 0 : return retval;
1709 : : }
1710 : : EXPORT_SYMBOL_GPL(setup_irq);
1711 : :
1712 : : /*
1713 : : * Internal function to unregister an irqaction - used to free
1714 : : * regular and special interrupts that are part of the architecture.
1715 : : */
1716 : 414 : static struct irqaction *__free_irq(struct irq_desc *desc, void *dev_id)
1717 : : {
1718 : 414 : unsigned irq = desc->irq_data.irq;
1719 : : struct irqaction *action, **action_ptr;
1720 : : unsigned long flags;
1721 : :
1722 [ - + ]: 414 : WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
1723 : :
1724 : 414 : mutex_lock(&desc->request_mutex);
1725 : : chip_bus_lock(desc);
1726 : 414 : raw_spin_lock_irqsave(&desc->lock, flags);
1727 : :
1728 : : /*
1729 : : * There can be multiple actions per IRQ descriptor, find the right
1730 : : * one based on the dev_id:
1731 : : */
1732 : 414 : action_ptr = &desc->action;
1733 : : for (;;) {
1734 : 414 : action = *action_ptr;
1735 : :
1736 [ - + ]: 414 : if (!action) {
1737 : 0 : WARN(1, "Trying to free already-free IRQ %d\n", irq);
1738 : 0 : raw_spin_unlock_irqrestore(&desc->lock, flags);
1739 : : chip_bus_sync_unlock(desc);
1740 : 0 : mutex_unlock(&desc->request_mutex);
1741 : 0 : return NULL;
1742 : : }
1743 : :
1744 [ - + ]: 414 : if (action->dev_id == dev_id)
1745 : : break;
1746 : 0 : action_ptr = &action->next;
1747 : 0 : }
1748 : :
1749 : : /* Found it - now remove it from the list of entries: */
1750 : 414 : *action_ptr = action->next;
1751 : :
1752 : : irq_pm_remove_action(desc, action);
1753 : :
1754 : : /* If this was the last handler, shut down the IRQ line: */
1755 [ + - ]: 414 : if (!desc->action) {
1756 : : irq_settings_clr_disable_unlazy(desc);
1757 : : /* Only shutdown. Deactivate after synchronize_hardirq() */
1758 : 414 : irq_shutdown(desc);
1759 : : }
1760 : :
1761 : : #ifdef CONFIG_SMP
1762 : : /* make sure affinity_hint is cleaned up */
1763 [ - + # # : 414 : if (WARN_ON_ONCE(desc->affinity_hint))
- + ]
1764 : 0 : desc->affinity_hint = NULL;
1765 : : #endif
1766 : :
1767 : 414 : raw_spin_unlock_irqrestore(&desc->lock, flags);
1768 : : /*
1769 : : * Drop bus_lock here so the changes which were done in the chip
1770 : : * callbacks above are synced out to the irq chips which hang
1771 : : * behind a slow bus (I2C, SPI) before calling synchronize_hardirq().
1772 : : *
1773 : : * Aside of that the bus_lock can also be taken from the threaded
1774 : : * handler in irq_finalize_oneshot() which results in a deadlock
1775 : : * because kthread_stop() would wait forever for the thread to
1776 : : * complete, which is blocked on the bus lock.
1777 : : *
1778 : : * The still held desc->request_mutex() protects against a
1779 : : * concurrent request_irq() of this irq so the release of resources
1780 : : * and timing data is properly serialized.
1781 : : */
1782 : : chip_bus_sync_unlock(desc);
1783 : :
1784 : 414 : unregister_handler_proc(irq, action);
1785 : :
1786 : : /*
1787 : : * Make sure it's not being used on another CPU and if the chip
1788 : : * supports it also make sure that there is no (not yet serviced)
1789 : : * interrupt in flight at the hardware level.
1790 : : */
1791 : 414 : __synchronize_hardirq(desc, true);
1792 : :
1793 : : #ifdef CONFIG_DEBUG_SHIRQ
1794 : : /*
1795 : : * It's a shared IRQ -- the driver ought to be prepared for an IRQ
1796 : : * event to happen even now it's being freed, so let's make sure that
1797 : : * is so by doing an extra call to the handler ....
1798 : : *
1799 : : * ( We do this after actually deregistering it, to make sure that a
1800 : : * 'real' IRQ doesn't run in parallel with our fake. )
1801 : : */
1802 : : if (action->flags & IRQF_SHARED) {
1803 : : local_irq_save(flags);
1804 : : action->handler(irq, dev_id);
1805 : : local_irq_restore(flags);
1806 : : }
1807 : : #endif
1808 : :
1809 : : /*
1810 : : * The action has already been removed above, but the thread writes
1811 : : * its oneshot mask bit when it completes. Though request_mutex is
1812 : : * held across this which prevents __setup_irq() from handing out
1813 : : * the same bit to a newly requested action.
1814 : : */
1815 [ - + ]: 414 : if (action->thread) {
1816 : 0 : kthread_stop(action->thread);
1817 : 0 : put_task_struct(action->thread);
1818 [ # # # # ]: 0 : if (action->secondary && action->secondary->thread) {
1819 : 0 : kthread_stop(action->secondary->thread);
1820 : 0 : put_task_struct(action->secondary->thread);
1821 : : }
1822 : : }
1823 : :
1824 : : /* Last action releases resources */
1825 [ + - ]: 414 : if (!desc->action) {
1826 : : /*
1827 : : * Reaquire bus lock as irq_release_resources() might
1828 : : * require it to deallocate resources over the slow bus.
1829 : : */
1830 : : chip_bus_lock(desc);
1831 : : /*
1832 : : * There is no interrupt on the fly anymore. Deactivate it
1833 : : * completely.
1834 : : */
1835 : 414 : raw_spin_lock_irqsave(&desc->lock, flags);
1836 : 414 : irq_domain_deactivate_irq(&desc->irq_data);
1837 : 414 : raw_spin_unlock_irqrestore(&desc->lock, flags);
1838 : :
1839 : : irq_release_resources(desc);
1840 : : chip_bus_sync_unlock(desc);
1841 : : irq_remove_timings(desc);
1842 : : }
1843 : :
1844 : 414 : mutex_unlock(&desc->request_mutex);
1845 : :
1846 : 414 : irq_chip_pm_put(&desc->irq_data);
1847 : 414 : module_put(desc->owner);
1848 : 414 : kfree(action->secondary);
1849 : 414 : return action;
1850 : : }
1851 : :
1852 : : /**
1853 : : * remove_irq - free an interrupt
1854 : : * @irq: Interrupt line to free
1855 : : * @act: irqaction for the interrupt
1856 : : *
1857 : : * Used to remove interrupts statically setup by the early boot process.
1858 : : */
1859 : 0 : void remove_irq(unsigned int irq, struct irqaction *act)
1860 : : {
1861 : 0 : struct irq_desc *desc = irq_to_desc(irq);
1862 : :
1863 [ # # # # : 0 : if (desc && !WARN_ON(irq_settings_is_per_cpu_devid(desc)))
# # ]
1864 : 0 : __free_irq(desc, act->dev_id);
1865 : 0 : }
1866 : : EXPORT_SYMBOL_GPL(remove_irq);
1867 : :
1868 : : /**
1869 : : * free_irq - free an interrupt allocated with request_irq
1870 : : * @irq: Interrupt line to free
1871 : : * @dev_id: Device identity to free
1872 : : *
1873 : : * Remove an interrupt handler. The handler is removed and if the
1874 : : * interrupt line is no longer in use by any driver it is disabled.
1875 : : * On a shared IRQ the caller must ensure the interrupt is disabled
1876 : : * on the card it drives before calling this function. The function
1877 : : * does not return until any executing interrupts for this IRQ
1878 : : * have completed.
1879 : : *
1880 : : * This function must not be called from interrupt context.
1881 : : *
1882 : : * Returns the devname argument passed to request_irq.
1883 : : */
1884 : 414 : const void *free_irq(unsigned int irq, void *dev_id)
1885 : : {
1886 : 414 : struct irq_desc *desc = irq_to_desc(irq);
1887 : : struct irqaction *action;
1888 : : const char *devname;
1889 : :
1890 [ + - - + : 828 : if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
+ - ]
1891 : : return NULL;
1892 : :
1893 : : #ifdef CONFIG_SMP
1894 [ - + - + ]: 414 : if (WARN_ON(desc->affinity_notify))
1895 : 0 : desc->affinity_notify = NULL;
1896 : : #endif
1897 : :
1898 : 414 : action = __free_irq(desc, dev_id);
1899 : :
1900 [ + - ]: 414 : if (!action)
1901 : : return NULL;
1902 : :
1903 : 414 : devname = action->name;
1904 : 414 : kfree(action);
1905 : 414 : return devname;
1906 : : }
1907 : : EXPORT_SYMBOL(free_irq);
1908 : :
1909 : : /* This function must be called with desc->lock held */
1910 : 0 : static const void *__cleanup_nmi(unsigned int irq, struct irq_desc *desc)
1911 : : {
1912 : : const char *devname = NULL;
1913 : :
1914 : 0 : desc->istate &= ~IRQS_NMI;
1915 : :
1916 [ # # # # ]: 0 : if (!WARN_ON(desc->action == NULL)) {
1917 : : irq_pm_remove_action(desc, desc->action);
1918 : 0 : devname = desc->action->name;
1919 : 0 : unregister_handler_proc(irq, desc->action);
1920 : :
1921 : 0 : kfree(desc->action);
1922 : 0 : desc->action = NULL;
1923 : : }
1924 : :
1925 : : irq_settings_clr_disable_unlazy(desc);
1926 : 0 : irq_shutdown_and_deactivate(desc);
1927 : :
1928 : : irq_release_resources(desc);
1929 : :
1930 : 0 : irq_chip_pm_put(&desc->irq_data);
1931 : 0 : module_put(desc->owner);
1932 : :
1933 : 0 : return devname;
1934 : : }
1935 : :
1936 : 0 : const void *free_nmi(unsigned int irq, void *dev_id)
1937 : : {
1938 : 0 : struct irq_desc *desc = irq_to_desc(irq);
1939 : : unsigned long flags;
1940 : : const void *devname;
1941 : :
1942 [ # # # # : 0 : if (!desc || WARN_ON(!(desc->istate & IRQS_NMI)))
# # ]
1943 : : return NULL;
1944 : :
1945 [ # # # # ]: 0 : if (WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1946 : : return NULL;
1947 : :
1948 : : /* NMI still enabled */
1949 [ # # # # ]: 0 : if (WARN_ON(desc->depth == 0))
1950 : : disable_nmi_nosync(irq);
1951 : :
1952 : 0 : raw_spin_lock_irqsave(&desc->lock, flags);
1953 : :
1954 : : irq_nmi_teardown(desc);
1955 : 0 : devname = __cleanup_nmi(irq, desc);
1956 : :
1957 : 0 : raw_spin_unlock_irqrestore(&desc->lock, flags);
1958 : :
1959 : 0 : return devname;
1960 : : }
1961 : :
1962 : : /**
1963 : : * request_threaded_irq - allocate an interrupt line
1964 : : * @irq: Interrupt line to allocate
1965 : : * @handler: Function to be called when the IRQ occurs.
1966 : : * Primary handler for threaded interrupts
1967 : : * If NULL and thread_fn != NULL the default
1968 : : * primary handler is installed
1969 : : * @thread_fn: Function called from the irq handler thread
1970 : : * If NULL, no irq thread is created
1971 : : * @irqflags: Interrupt type flags
1972 : : * @devname: An ascii name for the claiming device
1973 : : * @dev_id: A cookie passed back to the handler function
1974 : : *
1975 : : * This call allocates interrupt resources and enables the
1976 : : * interrupt line and IRQ handling. From the point this
1977 : : * call is made your handler function may be invoked. Since
1978 : : * your handler function must clear any interrupt the board
1979 : : * raises, you must take care both to initialise your hardware
1980 : : * and to set up the interrupt handler in the right order.
1981 : : *
1982 : : * If you want to set up a threaded irq handler for your device
1983 : : * then you need to supply @handler and @thread_fn. @handler is
1984 : : * still called in hard interrupt context and has to check
1985 : : * whether the interrupt originates from the device. If yes it
1986 : : * needs to disable the interrupt on the device and return
1987 : : * IRQ_WAKE_THREAD which will wake up the handler thread and run
1988 : : * @thread_fn. This split handler design is necessary to support
1989 : : * shared interrupts.
1990 : : *
1991 : : * Dev_id must be globally unique. Normally the address of the
1992 : : * device data structure is used as the cookie. Since the handler
1993 : : * receives this value it makes sense to use it.
1994 : : *
1995 : : * If your interrupt is shared you must pass a non NULL dev_id
1996 : : * as this is required when freeing the interrupt.
1997 : : *
1998 : : * Flags:
1999 : : *
2000 : : * IRQF_SHARED Interrupt is shared
2001 : : * IRQF_TRIGGER_* Specify active edge(s) or level
2002 : : *
2003 : : */
2004 : 2277 : int request_threaded_irq(unsigned int irq, irq_handler_t handler,
2005 : : irq_handler_t thread_fn, unsigned long irqflags,
2006 : : const char *devname, void *dev_id)
2007 : : {
2008 : : struct irqaction *action;
2009 : : struct irq_desc *desc;
2010 : : int retval;
2011 : :
2012 [ + - ]: 2277 : if (irq == IRQ_NOTCONNECTED)
2013 : : return -ENOTCONN;
2014 : :
2015 : : /*
2016 : : * Sanity-check: shared interrupts must pass in a real dev-ID,
2017 : : * otherwise we'll have trouble later trying to figure out
2018 : : * which interrupt is which (messes up the interrupt freeing
2019 : : * logic etc).
2020 : : *
2021 : : * Also IRQF_COND_SUSPEND only makes sense for shared interrupts and
2022 : : * it cannot be set along with IRQF_NO_SUSPEND.
2023 : : */
2024 [ + + + - : 4554 : if (((irqflags & IRQF_SHARED) && !dev_id) ||
+ - ]
2025 [ + - ]: 4554 : (!(irqflags & IRQF_SHARED) && (irqflags & IRQF_COND_SUSPEND)) ||
2026 : 2277 : ((irqflags & IRQF_NO_SUSPEND) && (irqflags & IRQF_COND_SUSPEND)))
2027 : : return -EINVAL;
2028 : :
2029 : 2277 : desc = irq_to_desc(irq);
2030 [ + - ]: 2277 : if (!desc)
2031 : : return -EINVAL;
2032 : :
2033 [ + - + - ]: 4554 : if (!irq_settings_can_request(desc) ||
2034 [ - + ]: 2277 : WARN_ON(irq_settings_is_per_cpu_devid(desc)))
2035 : : return -EINVAL;
2036 : :
2037 [ - + ]: 2277 : if (!handler) {
2038 [ # # ]: 0 : if (!thread_fn)
2039 : : return -EINVAL;
2040 : : handler = irq_default_primary_handler;
2041 : : }
2042 : :
2043 : 2277 : action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
2044 [ + - ]: 2277 : if (!action)
2045 : : return -ENOMEM;
2046 : :
2047 : 2277 : action->handler = handler;
2048 : 2277 : action->thread_fn = thread_fn;
2049 : 2277 : action->flags = irqflags;
2050 : 2277 : action->name = devname;
2051 : 2277 : action->dev_id = dev_id;
2052 : :
2053 : 2277 : retval = irq_chip_pm_get(&desc->irq_data);
2054 [ - + ]: 2277 : if (retval < 0) {
2055 : 0 : kfree(action);
2056 : 0 : return retval;
2057 : : }
2058 : :
2059 : 2277 : retval = __setup_irq(irq, desc, action);
2060 : :
2061 [ - + ]: 2277 : if (retval) {
2062 : 0 : irq_chip_pm_put(&desc->irq_data);
2063 : 0 : kfree(action->secondary);
2064 : 0 : kfree(action);
2065 : : }
2066 : :
2067 : : #ifdef CONFIG_DEBUG_SHIRQ_FIXME
2068 : : if (!retval && (irqflags & IRQF_SHARED)) {
2069 : : /*
2070 : : * It's a shared IRQ -- the driver ought to be prepared for it
2071 : : * to happen immediately, so let's make sure....
2072 : : * We disable the irq to make sure that a 'real' IRQ doesn't
2073 : : * run in parallel with our fake.
2074 : : */
2075 : : unsigned long flags;
2076 : :
2077 : : disable_irq(irq);
2078 : : local_irq_save(flags);
2079 : :
2080 : : handler(irq, dev_id);
2081 : :
2082 : : local_irq_restore(flags);
2083 : : enable_irq(irq);
2084 : : }
2085 : : #endif
2086 : 2277 : return retval;
2087 : : }
2088 : : EXPORT_SYMBOL(request_threaded_irq);
2089 : :
2090 : : /**
2091 : : * request_any_context_irq - allocate an interrupt line
2092 : : * @irq: Interrupt line to allocate
2093 : : * @handler: Function to be called when the IRQ occurs.
2094 : : * Threaded handler for threaded interrupts.
2095 : : * @flags: Interrupt type flags
2096 : : * @name: An ascii name for the claiming device
2097 : : * @dev_id: A cookie passed back to the handler function
2098 : : *
2099 : : * This call allocates interrupt resources and enables the
2100 : : * interrupt line and IRQ handling. It selects either a
2101 : : * hardirq or threaded handling method depending on the
2102 : : * context.
2103 : : *
2104 : : * On failure, it returns a negative value. On success,
2105 : : * it returns either IRQC_IS_HARDIRQ or IRQC_IS_NESTED.
2106 : : */
2107 : 0 : int request_any_context_irq(unsigned int irq, irq_handler_t handler,
2108 : : unsigned long flags, const char *name, void *dev_id)
2109 : : {
2110 : : struct irq_desc *desc;
2111 : : int ret;
2112 : :
2113 [ # # ]: 0 : if (irq == IRQ_NOTCONNECTED)
2114 : : return -ENOTCONN;
2115 : :
2116 : 0 : desc = irq_to_desc(irq);
2117 [ # # ]: 0 : if (!desc)
2118 : : return -EINVAL;
2119 : :
2120 [ # # ]: 0 : if (irq_settings_is_nested_thread(desc)) {
2121 : 0 : ret = request_threaded_irq(irq, NULL, handler,
2122 : : flags, name, dev_id);
2123 [ # # ]: 0 : return !ret ? IRQC_IS_NESTED : ret;
2124 : : }
2125 : :
2126 : : ret = request_irq(irq, handler, flags, name, dev_id);
2127 : 0 : return !ret ? IRQC_IS_HARDIRQ : ret;
2128 : : }
2129 : : EXPORT_SYMBOL_GPL(request_any_context_irq);
2130 : :
2131 : : /**
2132 : : * request_nmi - allocate an interrupt line for NMI delivery
2133 : : * @irq: Interrupt line to allocate
2134 : : * @handler: Function to be called when the IRQ occurs.
2135 : : * Threaded handler for threaded interrupts.
2136 : : * @irqflags: Interrupt type flags
2137 : : * @name: An ascii name for the claiming device
2138 : : * @dev_id: A cookie passed back to the handler function
2139 : : *
2140 : : * This call allocates interrupt resources and enables the
2141 : : * interrupt line and IRQ handling. It sets up the IRQ line
2142 : : * to be handled as an NMI.
2143 : : *
2144 : : * An interrupt line delivering NMIs cannot be shared and IRQ handling
2145 : : * cannot be threaded.
2146 : : *
2147 : : * Interrupt lines requested for NMI delivering must produce per cpu
2148 : : * interrupts and have auto enabling setting disabled.
2149 : : *
2150 : : * Dev_id must be globally unique. Normally the address of the
2151 : : * device data structure is used as the cookie. Since the handler
2152 : : * receives this value it makes sense to use it.
2153 : : *
2154 : : * If the interrupt line cannot be used to deliver NMIs, function
2155 : : * will fail and return a negative value.
2156 : : */
2157 : 0 : int request_nmi(unsigned int irq, irq_handler_t handler,
2158 : : unsigned long irqflags, const char *name, void *dev_id)
2159 : : {
2160 : : struct irqaction *action;
2161 : : struct irq_desc *desc;
2162 : : unsigned long flags;
2163 : : int retval;
2164 : :
2165 [ # # ]: 0 : if (irq == IRQ_NOTCONNECTED)
2166 : : return -ENOTCONN;
2167 : :
2168 : : /* NMI cannot be shared, used for Polling */
2169 [ # # ]: 0 : if (irqflags & (IRQF_SHARED | IRQF_COND_SUSPEND | IRQF_IRQPOLL))
2170 : : return -EINVAL;
2171 : :
2172 [ # # ]: 0 : if (!(irqflags & IRQF_PERCPU))
2173 : : return -EINVAL;
2174 : :
2175 [ # # ]: 0 : if (!handler)
2176 : : return -EINVAL;
2177 : :
2178 : 0 : desc = irq_to_desc(irq);
2179 : :
2180 [ # # # # : 0 : if (!desc || irq_settings_can_autoenable(desc) ||
# # ]
2181 [ # # ]: 0 : !irq_settings_can_request(desc) ||
2182 [ # # # # ]: 0 : WARN_ON(irq_settings_is_per_cpu_devid(desc)) ||
2183 : : !irq_supports_nmi(desc))
2184 : : return -EINVAL;
2185 : :
2186 : 0 : action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
2187 [ # # ]: 0 : if (!action)
2188 : : return -ENOMEM;
2189 : :
2190 : 0 : action->handler = handler;
2191 : 0 : action->flags = irqflags | IRQF_NO_THREAD | IRQF_NOBALANCING;
2192 : 0 : action->name = name;
2193 : 0 : action->dev_id = dev_id;
2194 : :
2195 : 0 : retval = irq_chip_pm_get(&desc->irq_data);
2196 [ # # ]: 0 : if (retval < 0)
2197 : : goto err_out;
2198 : :
2199 : 0 : retval = __setup_irq(irq, desc, action);
2200 [ # # ]: 0 : if (retval)
2201 : : goto err_irq_setup;
2202 : :
2203 : 0 : raw_spin_lock_irqsave(&desc->lock, flags);
2204 : :
2205 : : /* Setup NMI state */
2206 : 0 : desc->istate |= IRQS_NMI;
2207 : : retval = irq_nmi_setup(desc);
2208 [ # # ]: 0 : if (retval) {
2209 : 0 : __cleanup_nmi(irq, desc);
2210 : 0 : raw_spin_unlock_irqrestore(&desc->lock, flags);
2211 : 0 : return -EINVAL;
2212 : : }
2213 : :
2214 : 0 : raw_spin_unlock_irqrestore(&desc->lock, flags);
2215 : :
2216 : 0 : return 0;
2217 : :
2218 : : err_irq_setup:
2219 : 0 : irq_chip_pm_put(&desc->irq_data);
2220 : : err_out:
2221 : 0 : kfree(action);
2222 : :
2223 : 0 : return retval;
2224 : : }
2225 : :
2226 : 2484 : void enable_percpu_irq(unsigned int irq, unsigned int type)
2227 : : {
2228 : 2484 : unsigned int cpu = smp_processor_id();
2229 : : unsigned long flags;
2230 : : struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
2231 : :
2232 [ + - ]: 2484 : if (!desc)
2233 : 0 : return;
2234 : :
2235 : : /*
2236 : : * If the trigger type is not specified by the caller, then
2237 : : * use the default for this interrupt.
2238 : : */
2239 : 2484 : type &= IRQ_TYPE_SENSE_MASK;
2240 [ + + ]: 2484 : if (type == IRQ_TYPE_NONE)
2241 : : type = irqd_get_trigger_type(&desc->irq_data);
2242 : :
2243 [ + - ]: 2484 : if (type != IRQ_TYPE_NONE) {
2244 : : int ret;
2245 : :
2246 : 2484 : ret = __irq_set_trigger(desc, type);
2247 : :
2248 [ - + ]: 2484 : if (ret) {
2249 : 0 : WARN(1, "failed to set type for IRQ%d\n", irq);
2250 : 0 : goto out;
2251 : : }
2252 : : }
2253 : :
2254 : 2484 : irq_percpu_enable(desc, cpu);
2255 : : out:
2256 : 2484 : irq_put_desc_unlock(desc, flags);
2257 : : }
2258 : : EXPORT_SYMBOL_GPL(enable_percpu_irq);
2259 : :
2260 : 0 : void enable_percpu_nmi(unsigned int irq, unsigned int type)
2261 : : {
2262 : 0 : enable_percpu_irq(irq, type);
2263 : 0 : }
2264 : :
2265 : : /**
2266 : : * irq_percpu_is_enabled - Check whether the per cpu irq is enabled
2267 : : * @irq: Linux irq number to check for
2268 : : *
2269 : : * Must be called from a non migratable context. Returns the enable
2270 : : * state of a per cpu interrupt on the current cpu.
2271 : : */
2272 : 0 : bool irq_percpu_is_enabled(unsigned int irq)
2273 : : {
2274 : 0 : unsigned int cpu = smp_processor_id();
2275 : : struct irq_desc *desc;
2276 : : unsigned long flags;
2277 : : bool is_enabled;
2278 : :
2279 : : desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
2280 [ # # ]: 0 : if (!desc)
2281 : : return false;
2282 : :
2283 : 0 : is_enabled = cpumask_test_cpu(cpu, desc->percpu_enabled);
2284 : 0 : irq_put_desc_unlock(desc, flags);
2285 : :
2286 : 0 : return is_enabled;
2287 : : }
2288 : : EXPORT_SYMBOL_GPL(irq_percpu_is_enabled);
2289 : :
2290 : 0 : void disable_percpu_irq(unsigned int irq)
2291 : : {
2292 : 0 : unsigned int cpu = smp_processor_id();
2293 : : unsigned long flags;
2294 : : struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
2295 : :
2296 [ # # ]: 0 : if (!desc)
2297 : 0 : return;
2298 : :
2299 : 0 : irq_percpu_disable(desc, cpu);
2300 : 0 : irq_put_desc_unlock(desc, flags);
2301 : : }
2302 : : EXPORT_SYMBOL_GPL(disable_percpu_irq);
2303 : :
2304 : 0 : void disable_percpu_nmi(unsigned int irq)
2305 : : {
2306 : 0 : disable_percpu_irq(irq);
2307 : 0 : }
2308 : :
2309 : : /*
2310 : : * Internal function to unregister a percpu irqaction.
2311 : : */
2312 : 0 : static struct irqaction *__free_percpu_irq(unsigned int irq, void __percpu *dev_id)
2313 : : {
2314 : 0 : struct irq_desc *desc = irq_to_desc(irq);
2315 : : struct irqaction *action;
2316 : : unsigned long flags;
2317 : :
2318 [ # # ]: 0 : WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
2319 : :
2320 [ # # ]: 0 : if (!desc)
2321 : : return NULL;
2322 : :
2323 : 0 : raw_spin_lock_irqsave(&desc->lock, flags);
2324 : :
2325 : 0 : action = desc->action;
2326 [ # # # # ]: 0 : if (!action || action->percpu_dev_id != dev_id) {
2327 : 0 : WARN(1, "Trying to free already-free IRQ %d\n", irq);
2328 : 0 : goto bad;
2329 : : }
2330 : :
2331 [ # # ]: 0 : if (!cpumask_empty(desc->percpu_enabled)) {
2332 : 0 : WARN(1, "percpu IRQ %d still enabled on CPU%d!\n",
2333 : : irq, cpumask_first(desc->percpu_enabled));
2334 : 0 : goto bad;
2335 : : }
2336 : :
2337 : : /* Found it - now remove it from the list of entries: */
2338 : 0 : desc->action = NULL;
2339 : :
2340 : 0 : desc->istate &= ~IRQS_NMI;
2341 : :
2342 : 0 : raw_spin_unlock_irqrestore(&desc->lock, flags);
2343 : :
2344 : 0 : unregister_handler_proc(irq, action);
2345 : :
2346 : 0 : irq_chip_pm_put(&desc->irq_data);
2347 : 0 : module_put(desc->owner);
2348 : 0 : return action;
2349 : :
2350 : : bad:
2351 : 0 : raw_spin_unlock_irqrestore(&desc->lock, flags);
2352 : 0 : return NULL;
2353 : : }
2354 : :
2355 : : /**
2356 : : * remove_percpu_irq - free a per-cpu interrupt
2357 : : * @irq: Interrupt line to free
2358 : : * @act: irqaction for the interrupt
2359 : : *
2360 : : * Used to remove interrupts statically setup by the early boot process.
2361 : : */
2362 : 0 : void remove_percpu_irq(unsigned int irq, struct irqaction *act)
2363 : : {
2364 : 0 : struct irq_desc *desc = irq_to_desc(irq);
2365 : :
2366 [ # # # # ]: 0 : if (desc && irq_settings_is_per_cpu_devid(desc))
2367 : 0 : __free_percpu_irq(irq, act->percpu_dev_id);
2368 : 0 : }
2369 : :
2370 : : /**
2371 : : * free_percpu_irq - free an interrupt allocated with request_percpu_irq
2372 : : * @irq: Interrupt line to free
2373 : : * @dev_id: Device identity to free
2374 : : *
2375 : : * Remove a percpu interrupt handler. The handler is removed, but
2376 : : * the interrupt line is not disabled. This must be done on each
2377 : : * CPU before calling this function. The function does not return
2378 : : * until any executing interrupts for this IRQ have completed.
2379 : : *
2380 : : * This function must not be called from interrupt context.
2381 : : */
2382 : 0 : void free_percpu_irq(unsigned int irq, void __percpu *dev_id)
2383 : : {
2384 : 0 : struct irq_desc *desc = irq_to_desc(irq);
2385 : :
2386 [ # # # # ]: 0 : if (!desc || !irq_settings_is_per_cpu_devid(desc))
2387 : 0 : return;
2388 : :
2389 : : chip_bus_lock(desc);
2390 : 0 : kfree(__free_percpu_irq(irq, dev_id));
2391 : : chip_bus_sync_unlock(desc);
2392 : : }
2393 : : EXPORT_SYMBOL_GPL(free_percpu_irq);
2394 : :
2395 : 0 : void free_percpu_nmi(unsigned int irq, void __percpu *dev_id)
2396 : : {
2397 : 0 : struct irq_desc *desc = irq_to_desc(irq);
2398 : :
2399 [ # # # # ]: 0 : if (!desc || !irq_settings_is_per_cpu_devid(desc))
2400 : : return;
2401 : :
2402 [ # # # # ]: 0 : if (WARN_ON(!(desc->istate & IRQS_NMI)))
2403 : : return;
2404 : :
2405 : 0 : kfree(__free_percpu_irq(irq, dev_id));
2406 : : }
2407 : :
2408 : : /**
2409 : : * setup_percpu_irq - setup a per-cpu interrupt
2410 : : * @irq: Interrupt line to setup
2411 : : * @act: irqaction for the interrupt
2412 : : *
2413 : : * Used to statically setup per-cpu interrupts in the early boot process.
2414 : : */
2415 : 0 : int setup_percpu_irq(unsigned int irq, struct irqaction *act)
2416 : : {
2417 : 0 : struct irq_desc *desc = irq_to_desc(irq);
2418 : : int retval;
2419 : :
2420 [ # # # # ]: 0 : if (!desc || !irq_settings_is_per_cpu_devid(desc))
2421 : : return -EINVAL;
2422 : :
2423 : 0 : retval = irq_chip_pm_get(&desc->irq_data);
2424 [ # # ]: 0 : if (retval < 0)
2425 : : return retval;
2426 : :
2427 : 0 : retval = __setup_irq(irq, desc, act);
2428 : :
2429 [ # # ]: 0 : if (retval)
2430 : 0 : irq_chip_pm_put(&desc->irq_data);
2431 : :
2432 : 0 : return retval;
2433 : : }
2434 : :
2435 : : /**
2436 : : * __request_percpu_irq - allocate a percpu interrupt line
2437 : : * @irq: Interrupt line to allocate
2438 : : * @handler: Function to be called when the IRQ occurs.
2439 : : * @flags: Interrupt type flags (IRQF_TIMER only)
2440 : : * @devname: An ascii name for the claiming device
2441 : : * @dev_id: A percpu cookie passed back to the handler function
2442 : : *
2443 : : * This call allocates interrupt resources and enables the
2444 : : * interrupt on the local CPU. If the interrupt is supposed to be
2445 : : * enabled on other CPUs, it has to be done on each CPU using
2446 : : * enable_percpu_irq().
2447 : : *
2448 : : * Dev_id must be globally unique. It is a per-cpu variable, and
2449 : : * the handler gets called with the interrupted CPU's instance of
2450 : : * that variable.
2451 : : */
2452 : 621 : int __request_percpu_irq(unsigned int irq, irq_handler_t handler,
2453 : : unsigned long flags, const char *devname,
2454 : : void __percpu *dev_id)
2455 : : {
2456 : : struct irqaction *action;
2457 : : struct irq_desc *desc;
2458 : : int retval;
2459 : :
2460 [ + - ]: 621 : if (!dev_id)
2461 : : return -EINVAL;
2462 : :
2463 : 621 : desc = irq_to_desc(irq);
2464 [ + - + - : 1863 : if (!desc || !irq_settings_can_request(desc) ||
+ - ]
2465 : : !irq_settings_is_per_cpu_devid(desc))
2466 : : return -EINVAL;
2467 : :
2468 [ + - ]: 621 : if (flags && flags != IRQF_TIMER)
2469 : : return -EINVAL;
2470 : :
2471 : 621 : action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
2472 [ + - ]: 621 : if (!action)
2473 : : return -ENOMEM;
2474 : :
2475 : 621 : action->handler = handler;
2476 : 621 : action->flags = flags | IRQF_PERCPU | IRQF_NO_SUSPEND;
2477 : 621 : action->name = devname;
2478 : 621 : action->percpu_dev_id = dev_id;
2479 : :
2480 : 621 : retval = irq_chip_pm_get(&desc->irq_data);
2481 [ - + ]: 621 : if (retval < 0) {
2482 : 0 : kfree(action);
2483 : 0 : return retval;
2484 : : }
2485 : :
2486 : 621 : retval = __setup_irq(irq, desc, action);
2487 : :
2488 [ - + ]: 621 : if (retval) {
2489 : 0 : irq_chip_pm_put(&desc->irq_data);
2490 : 0 : kfree(action);
2491 : : }
2492 : :
2493 : 621 : return retval;
2494 : : }
2495 : : EXPORT_SYMBOL_GPL(__request_percpu_irq);
2496 : :
2497 : : /**
2498 : : * request_percpu_nmi - allocate a percpu interrupt line for NMI delivery
2499 : : * @irq: Interrupt line to allocate
2500 : : * @handler: Function to be called when the IRQ occurs.
2501 : : * @name: An ascii name for the claiming device
2502 : : * @dev_id: A percpu cookie passed back to the handler function
2503 : : *
2504 : : * This call allocates interrupt resources for a per CPU NMI. Per CPU NMIs
2505 : : * have to be setup on each CPU by calling prepare_percpu_nmi() before
2506 : : * being enabled on the same CPU by using enable_percpu_nmi().
2507 : : *
2508 : : * Dev_id must be globally unique. It is a per-cpu variable, and
2509 : : * the handler gets called with the interrupted CPU's instance of
2510 : : * that variable.
2511 : : *
2512 : : * Interrupt lines requested for NMI delivering should have auto enabling
2513 : : * setting disabled.
2514 : : *
2515 : : * If the interrupt line cannot be used to deliver NMIs, function
2516 : : * will fail returning a negative value.
2517 : : */
2518 : 0 : int request_percpu_nmi(unsigned int irq, irq_handler_t handler,
2519 : : const char *name, void __percpu *dev_id)
2520 : : {
2521 : : struct irqaction *action;
2522 : : struct irq_desc *desc;
2523 : : unsigned long flags;
2524 : : int retval;
2525 : :
2526 [ # # ]: 0 : if (!handler)
2527 : : return -EINVAL;
2528 : :
2529 : 0 : desc = irq_to_desc(irq);
2530 : :
2531 [ # # # # : 0 : if (!desc || !irq_settings_can_request(desc) ||
# # ]
2532 [ # # ]: 0 : !irq_settings_is_per_cpu_devid(desc) ||
2533 [ # # ]: 0 : irq_settings_can_autoenable(desc) ||
2534 : : !irq_supports_nmi(desc))
2535 : : return -EINVAL;
2536 : :
2537 : : /* The line cannot already be NMI */
2538 [ # # ]: 0 : if (desc->istate & IRQS_NMI)
2539 : : return -EINVAL;
2540 : :
2541 : 0 : action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
2542 [ # # ]: 0 : if (!action)
2543 : : return -ENOMEM;
2544 : :
2545 : 0 : action->handler = handler;
2546 : 0 : action->flags = IRQF_PERCPU | IRQF_NO_SUSPEND | IRQF_NO_THREAD
2547 : : | IRQF_NOBALANCING;
2548 : 0 : action->name = name;
2549 : 0 : action->percpu_dev_id = dev_id;
2550 : :
2551 : 0 : retval = irq_chip_pm_get(&desc->irq_data);
2552 [ # # ]: 0 : if (retval < 0)
2553 : : goto err_out;
2554 : :
2555 : 0 : retval = __setup_irq(irq, desc, action);
2556 [ # # ]: 0 : if (retval)
2557 : : goto err_irq_setup;
2558 : :
2559 : 0 : raw_spin_lock_irqsave(&desc->lock, flags);
2560 : 0 : desc->istate |= IRQS_NMI;
2561 : 0 : raw_spin_unlock_irqrestore(&desc->lock, flags);
2562 : :
2563 : 0 : return 0;
2564 : :
2565 : : err_irq_setup:
2566 : 0 : irq_chip_pm_put(&desc->irq_data);
2567 : : err_out:
2568 : 0 : kfree(action);
2569 : :
2570 : 0 : return retval;
2571 : : }
2572 : :
2573 : : /**
2574 : : * prepare_percpu_nmi - performs CPU local setup for NMI delivery
2575 : : * @irq: Interrupt line to prepare for NMI delivery
2576 : : *
2577 : : * This call prepares an interrupt line to deliver NMI on the current CPU,
2578 : : * before that interrupt line gets enabled with enable_percpu_nmi().
2579 : : *
2580 : : * As a CPU local operation, this should be called from non-preemptible
2581 : : * context.
2582 : : *
2583 : : * If the interrupt line cannot be used to deliver NMIs, function
2584 : : * will fail returning a negative value.
2585 : : */
2586 : 0 : int prepare_percpu_nmi(unsigned int irq)
2587 : : {
2588 : : unsigned long flags;
2589 : : struct irq_desc *desc;
2590 : : int ret = 0;
2591 : :
2592 : : WARN_ON(preemptible());
2593 : :
2594 : : desc = irq_get_desc_lock(irq, &flags,
2595 : : IRQ_GET_DESC_CHECK_PERCPU);
2596 [ # # ]: 0 : if (!desc)
2597 : : return -EINVAL;
2598 : :
2599 [ # # # # ]: 0 : if (WARN(!(desc->istate & IRQS_NMI),
2600 : : KERN_ERR "prepare_percpu_nmi called for a non-NMI interrupt: irq %u\n",
2601 : : irq)) {
2602 : : ret = -EINVAL;
2603 : : goto out;
2604 : : }
2605 : :
2606 : : ret = irq_nmi_setup(desc);
2607 [ # # ]: 0 : if (ret) {
2608 : 0 : pr_err("Failed to setup NMI delivery: irq %u\n", irq);
2609 : 0 : goto out;
2610 : : }
2611 : :
2612 : : out:
2613 : 0 : irq_put_desc_unlock(desc, flags);
2614 : 0 : return ret;
2615 : : }
2616 : :
2617 : : /**
2618 : : * teardown_percpu_nmi - undoes NMI setup of IRQ line
2619 : : * @irq: Interrupt line from which CPU local NMI configuration should be
2620 : : * removed
2621 : : *
2622 : : * This call undoes the setup done by prepare_percpu_nmi().
2623 : : *
2624 : : * IRQ line should not be enabled for the current CPU.
2625 : : *
2626 : : * As a CPU local operation, this should be called from non-preemptible
2627 : : * context.
2628 : : */
2629 : 0 : void teardown_percpu_nmi(unsigned int irq)
2630 : : {
2631 : : unsigned long flags;
2632 : : struct irq_desc *desc;
2633 : :
2634 : : WARN_ON(preemptible());
2635 : :
2636 : : desc = irq_get_desc_lock(irq, &flags,
2637 : : IRQ_GET_DESC_CHECK_PERCPU);
2638 [ # # ]: 0 : if (!desc)
2639 : 0 : return;
2640 : :
2641 [ # # # # ]: 0 : if (WARN_ON(!(desc->istate & IRQS_NMI)))
2642 : : goto out;
2643 : :
2644 : : irq_nmi_teardown(desc);
2645 : : out:
2646 : 0 : irq_put_desc_unlock(desc, flags);
2647 : : }
2648 : :
2649 : 621 : int __irq_get_irqchip_state(struct irq_data *data, enum irqchip_irq_state which,
2650 : : bool *state)
2651 : : {
2652 : : struct irq_chip *chip;
2653 : : int err = -EINVAL;
2654 : :
2655 : : do {
2656 : : chip = irq_data_get_irq_chip(data);
2657 [ # # + - ]: 621 : if (chip->irq_get_irqchip_state)
2658 : : break;
2659 : : #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
2660 : 621 : data = data->parent_data;
2661 : : #else
2662 : : data = NULL;
2663 : : #endif
2664 [ # # - + ]: 621 : } while (data);
2665 : :
2666 [ # # - + ]: 621 : if (data)
2667 : 0 : err = chip->irq_get_irqchip_state(data, which, state);
2668 : 621 : return err;
2669 : : }
2670 : :
2671 : : /**
2672 : : * irq_get_irqchip_state - returns the irqchip state of a interrupt.
2673 : : * @irq: Interrupt line that is forwarded to a VM
2674 : : * @which: One of IRQCHIP_STATE_* the caller wants to know about
2675 : : * @state: a pointer to a boolean where the state is to be storeed
2676 : : *
2677 : : * This call snapshots the internal irqchip state of an
2678 : : * interrupt, returning into @state the bit corresponding to
2679 : : * stage @which
2680 : : *
2681 : : * This function should be called with preemption disabled if the
2682 : : * interrupt controller has per-cpu registers.
2683 : : */
2684 : 0 : int irq_get_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
2685 : : bool *state)
2686 : : {
2687 : : struct irq_desc *desc;
2688 : : struct irq_data *data;
2689 : : unsigned long flags;
2690 : : int err = -EINVAL;
2691 : :
2692 : : desc = irq_get_desc_buslock(irq, &flags, 0);
2693 [ # # ]: 0 : if (!desc)
2694 : : return err;
2695 : :
2696 : : data = irq_desc_get_irq_data(desc);
2697 : :
2698 : : err = __irq_get_irqchip_state(data, which, state);
2699 : :
2700 : 0 : irq_put_desc_busunlock(desc, flags);
2701 : 0 : return err;
2702 : : }
2703 : : EXPORT_SYMBOL_GPL(irq_get_irqchip_state);
2704 : :
2705 : : /**
2706 : : * irq_set_irqchip_state - set the state of a forwarded interrupt.
2707 : : * @irq: Interrupt line that is forwarded to a VM
2708 : : * @which: State to be restored (one of IRQCHIP_STATE_*)
2709 : : * @val: Value corresponding to @which
2710 : : *
2711 : : * This call sets the internal irqchip state of an interrupt,
2712 : : * depending on the value of @which.
2713 : : *
2714 : : * This function should be called with preemption disabled if the
2715 : : * interrupt controller has per-cpu registers.
2716 : : */
2717 : 0 : int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
2718 : : bool val)
2719 : : {
2720 : : struct irq_desc *desc;
2721 : : struct irq_data *data;
2722 : : struct irq_chip *chip;
2723 : : unsigned long flags;
2724 : : int err = -EINVAL;
2725 : :
2726 : : desc = irq_get_desc_buslock(irq, &flags, 0);
2727 [ # # ]: 0 : if (!desc)
2728 : : return err;
2729 : :
2730 : : data = irq_desc_get_irq_data(desc);
2731 : :
2732 : : do {
2733 : : chip = irq_data_get_irq_chip(data);
2734 [ # # ]: 0 : if (chip->irq_set_irqchip_state)
2735 : : break;
2736 : : #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
2737 : 0 : data = data->parent_data;
2738 : : #else
2739 : : data = NULL;
2740 : : #endif
2741 [ # # ]: 0 : } while (data);
2742 : :
2743 [ # # ]: 0 : if (data)
2744 : 0 : err = chip->irq_set_irqchip_state(data, which, val);
2745 : :
2746 : 0 : irq_put_desc_busunlock(desc, flags);
2747 : 0 : return err;
2748 : : }
2749 : : EXPORT_SYMBOL_GPL(irq_set_irqchip_state);
|