LCOV - code coverage report
Current view: top level - kernel/irq - cpuhotplug.c (source / functions) Hit Total Coverage
Test: combined.info Lines: 0 77 0.0 %
Date: 2022-04-01 13:59:58 Functions: 0 6 0.0 %
Branches: 0 78 0.0 %

           Branch data     Line data    Source code
       1                 :            : // SPDX-License-Identifier: GPL-2.0
       2                 :            : /*
       3                 :            :  * Generic cpu hotunplug interrupt migration code copied from the
       4                 :            :  * arch/arm implementation
       5                 :            :  *
       6                 :            :  * Copyright (C) Russell King
       7                 :            :  *
       8                 :            :  * This program is free software; you can redistribute it and/or modify
       9                 :            :  * it under the terms of the GNU General Public License version 2 as
      10                 :            :  * published by the Free Software Foundation.
      11                 :            :  */
      12                 :            : #include <linux/interrupt.h>
      13                 :            : #include <linux/ratelimit.h>
      14                 :            : #include <linux/irq.h>
      15                 :            : #include <linux/sched/isolation.h>
      16                 :            : 
      17                 :            : #include "internals.h"
      18                 :            : 
      19                 :            : /* For !GENERIC_IRQ_EFFECTIVE_AFF_MASK this looks at general affinity mask */
      20                 :          0 : static inline bool irq_needs_fixup(struct irq_data *d)
      21                 :            : {
      22         [ #  # ]:          0 :         const struct cpumask *m = irq_data_get_effective_affinity_mask(d);
      23         [ #  # ]:          0 :         unsigned int cpu = smp_processor_id();
      24                 :            : 
      25                 :            : #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
      26                 :            :         /*
      27                 :            :          * The cpumask_empty() check is a workaround for interrupt chips,
      28                 :            :          * which do not implement effective affinity, but the architecture has
      29                 :            :          * enabled the config switch. Use the general affinity mask instead.
      30                 :            :          */
      31         [ #  # ]:          0 :         if (cpumask_empty(m))
      32                 :          0 :                 m = irq_data_get_affinity_mask(d);
      33                 :            : 
      34                 :            :         /*
      35                 :            :          * Sanity check. If the mask is not empty when excluding the outgoing
      36                 :            :          * CPU then it must contain at least one online CPU. The outgoing CPU
      37                 :            :          * has been removed from the online mask already.
      38                 :            :          */
      39         [ #  # ]:          0 :         if (cpumask_any_but(m, cpu) < nr_cpu_ids &&
      40         [ #  # ]:          0 :             cpumask_any_and(m, cpu_online_mask) >= nr_cpu_ids) {
      41                 :            :                 /*
      42                 :            :                  * If this happens then there was a missed IRQ fixup at some
      43                 :            :                  * point. Warn about it and enforce fixup.
      44                 :            :                  */
      45                 :          0 :                 pr_warn("Eff. affinity %*pbl of IRQ %u contains only offline CPUs after offlining CPU %u\n",
      46                 :            :                         cpumask_pr_args(m), d->irq, cpu);
      47                 :          0 :                 return true;
      48                 :            :         }
      49                 :            : #endif
      50                 :          0 :         return cpumask_test_cpu(cpu, m);
      51                 :            : }
      52                 :            : 
      53                 :          0 : static bool migrate_one_irq(struct irq_desc *desc)
      54                 :            : {
      55         [ #  # ]:          0 :         struct irq_data *d = irq_desc_get_irq_data(desc);
      56         [ #  # ]:          0 :         struct irq_chip *chip = irq_data_get_irq_chip(d);
      57   [ #  #  #  # ]:          0 :         bool maskchip = !irq_can_move_pcntxt(d) && !irqd_irq_masked(d);
      58                 :          0 :         const struct cpumask *affinity;
      59                 :          0 :         bool brokeaff = false;
      60                 :          0 :         int err;
      61                 :            : 
      62                 :            :         /*
      63                 :            :          * IRQ chip might be already torn down, but the irq descriptor is
      64                 :            :          * still in the radix tree. Also if the chip has no affinity setter,
      65                 :            :          * nothing can be done here.
      66                 :            :          */
      67   [ #  #  #  # ]:          0 :         if (!chip || !chip->irq_set_affinity) {
      68                 :            :                 pr_debug("IRQ %u: Unable to migrate away\n", d->irq);
      69                 :            :                 return false;
      70                 :            :         }
      71                 :            : 
      72                 :            :         /*
      73                 :            :          * No move required, if:
      74                 :            :          * - Interrupt is per cpu
      75                 :            :          * - Interrupt is not started
      76                 :            :          * - Affinity mask does not include this CPU.
      77                 :            :          *
      78                 :            :          * Note: Do not check desc->action as this might be a chained
      79                 :            :          * interrupt.
      80                 :            :          */
      81   [ #  #  #  #  :          0 :         if (irqd_is_per_cpu(d) || !irqd_is_started(d) || !irq_needs_fixup(d)) {
                   #  # ]
      82                 :            :                 /*
      83                 :            :                  * If an irq move is pending, abort it if the dying CPU is
      84                 :            :                  * the sole target.
      85                 :            :                  */
      86                 :          0 :                 irq_fixup_move_pending(desc, false);
      87                 :          0 :                 return false;
      88                 :            :         }
      89                 :            : 
      90                 :            :         /*
      91                 :            :          * Complete an eventually pending irq move cleanup. If this
      92                 :            :          * interrupt was moved in hard irq context, then the vectors need
      93                 :            :          * to be cleaned up. It can't wait until this interrupt actually
      94                 :            :          * happens and this CPU was involved.
      95                 :            :          */
      96                 :          0 :         irq_force_complete_move(desc);
      97                 :            : 
      98                 :            :         /*
      99                 :            :          * If there is a setaffinity pending, then try to reuse the pending
     100                 :            :          * mask, so the last change of the affinity does not get lost. If
     101                 :            :          * there is no move pending or the pending mask does not contain
     102                 :            :          * any online CPU, use the current affinity mask.
     103                 :            :          */
     104         [ #  # ]:          0 :         if (irq_fixup_move_pending(desc, true))
     105                 :          0 :                 affinity = irq_desc_get_pending_mask(desc);
     106                 :            :         else
     107                 :          0 :                 affinity = irq_data_get_affinity_mask(d);
     108                 :            : 
     109                 :            :         /* Mask the chip for interrupts which cannot move in process context */
     110   [ #  #  #  # ]:          0 :         if (maskchip && chip->irq_mask)
     111                 :          0 :                 chip->irq_mask(d);
     112                 :            : 
     113         [ #  # ]:          0 :         if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
     114                 :            :                 /*
     115                 :            :                  * If the interrupt is managed, then shut it down and leave
     116                 :            :                  * the affinity untouched.
     117                 :            :                  */
     118         [ #  # ]:          0 :                 if (irqd_affinity_is_managed(d)) {
     119                 :          0 :                         irqd_set_managed_shutdown(d);
     120                 :          0 :                         irq_shutdown_and_deactivate(desc);
     121                 :          0 :                         return false;
     122                 :            :                 }
     123                 :            :                 affinity = cpu_online_mask;
     124                 :            :                 brokeaff = true;
     125                 :            :         }
     126                 :            :         /*
     127                 :            :          * Do not set the force argument of irq_do_set_affinity() as this
     128                 :            :          * disables the masking of offline CPUs from the supplied affinity
     129                 :            :          * mask and therefore might keep/reassign the irq to the outgoing
     130                 :            :          * CPU.
     131                 :            :          */
     132                 :          0 :         err = irq_do_set_affinity(d, affinity, false);
     133         [ #  # ]:          0 :         if (err) {
     134         [ #  # ]:          0 :                 pr_warn_ratelimited("IRQ%u: set affinity failed(%d).\n",
     135                 :            :                                     d->irq, err);
     136                 :            :                 brokeaff = false;
     137                 :            :         }
     138                 :            : 
     139   [ #  #  #  # ]:          0 :         if (maskchip && chip->irq_unmask)
     140                 :          0 :                 chip->irq_unmask(d);
     141                 :            : 
     142                 :            :         return brokeaff;
     143                 :            : }
     144                 :            : 
     145                 :            : /**
     146                 :            :  * irq_migrate_all_off_this_cpu - Migrate irqs away from offline cpu
     147                 :            :  *
     148                 :            :  * The current CPU has been marked offline.  Migrate IRQs off this CPU.
     149                 :            :  * If the affinity settings do not allow other CPUs, force them onto any
     150                 :            :  * available CPU.
     151                 :            :  *
     152                 :            :  * Note: we must iterate over all IRQs, whether they have an attached
     153                 :            :  * action structure or not, as we need to get chained interrupts too.
     154                 :            :  */
     155                 :          0 : void irq_migrate_all_off_this_cpu(void)
     156                 :            : {
     157                 :          0 :         struct irq_desc *desc;
     158                 :          0 :         unsigned int irq;
     159                 :            : 
     160         [ #  # ]:          0 :         for_each_active_irq(irq) {
     161                 :          0 :                 bool affinity_broken;
     162                 :            : 
     163                 :          0 :                 desc = irq_to_desc(irq);
     164                 :          0 :                 raw_spin_lock(&desc->lock);
     165                 :          0 :                 affinity_broken = migrate_one_irq(desc);
     166                 :          0 :                 raw_spin_unlock(&desc->lock);
     167                 :            : 
     168         [ #  # ]:          0 :                 if (affinity_broken) {
     169         [ #  # ]:          0 :                         pr_warn_ratelimited("IRQ %u: no longer affine to CPU%u\n",
     170                 :            :                                             irq, smp_processor_id());
     171                 :            :                 }
     172                 :            :         }
     173                 :          0 : }
     174                 :            : 
     175                 :          0 : static bool hk_should_isolate(struct irq_data *data, unsigned int cpu)
     176                 :            : {
     177                 :          0 :         const struct cpumask *hk_mask;
     178                 :            : 
     179         [ #  # ]:          0 :         if (!housekeeping_enabled(HK_FLAG_MANAGED_IRQ))
     180                 :            :                 return false;
     181                 :            : 
     182                 :          0 :         hk_mask = housekeeping_cpumask(HK_FLAG_MANAGED_IRQ);
     183         [ #  # ]:          0 :         if (cpumask_subset(irq_data_get_effective_affinity_mask(data), hk_mask))
     184                 :            :                 return false;
     185                 :            : 
     186                 :          0 :         return cpumask_test_cpu(cpu, hk_mask);
     187                 :            : }
     188                 :            : 
     189                 :          0 : static void irq_restore_affinity_of_irq(struct irq_desc *desc, unsigned int cpu)
     190                 :            : {
     191         [ #  # ]:          0 :         struct irq_data *data = irq_desc_get_irq_data(desc);
     192         [ #  # ]:          0 :         const struct cpumask *affinity = irq_data_get_affinity_mask(data);
     193                 :            : 
     194   [ #  #  #  #  :          0 :         if (!irqd_affinity_is_managed(data) || !desc->action ||
                   #  # ]
     195   [ #  #  #  # ]:          0 :             !irq_data_get_irq_chip(data) || !cpumask_test_cpu(cpu, affinity))
     196                 :          0 :                 return;
     197                 :            : 
     198         [ #  # ]:          0 :         if (irqd_is_managed_and_shutdown(data)) {
     199                 :          0 :                 irq_startup(desc, IRQ_RESEND, IRQ_START_COND);
     200                 :          0 :                 return;
     201                 :            :         }
     202                 :            : 
     203                 :            :         /*
     204                 :            :          * If the interrupt can only be directed to a single target
     205                 :            :          * CPU then it is already assigned to a CPU in the affinity
     206                 :            :          * mask. No point in trying to move it around unless the
     207                 :            :          * isolation mechanism requests to move it to an upcoming
     208                 :            :          * housekeeping CPU.
     209                 :            :          */
     210   [ #  #  #  # ]:          0 :         if (!irqd_is_single_target(data) || hk_should_isolate(data, cpu))
     211                 :          0 :                 irq_set_affinity_locked(data, affinity, false);
     212                 :            : }
     213                 :            : 
     214                 :            : /**
     215                 :            :  * irq_affinity_online_cpu - Restore affinity for managed interrupts
     216                 :            :  * @cpu:        Upcoming CPU for which interrupts should be restored
     217                 :            :  */
     218                 :          0 : int irq_affinity_online_cpu(unsigned int cpu)
     219                 :            : {
     220                 :          0 :         struct irq_desc *desc;
     221                 :          0 :         unsigned int irq;
     222                 :            : 
     223                 :          0 :         irq_lock_sparse();
     224         [ #  # ]:          0 :         for_each_active_irq(irq) {
     225                 :          0 :                 desc = irq_to_desc(irq);
     226                 :          0 :                 raw_spin_lock_irq(&desc->lock);
     227                 :          0 :                 irq_restore_affinity_of_irq(desc, cpu);
     228                 :          0 :                 raw_spin_unlock_irq(&desc->lock);
     229                 :            :         }
     230                 :          0 :         irq_unlock_sparse();
     231                 :            : 
     232                 :          0 :         return 0;
     233                 :            : }

Generated by: LCOV version 1.14