LCOV - code coverage report
Current view: top level - arch/arm/mm - context.c (source / functions) Hit Total Coverage
Test: gcov_data_raspi2_real_modules_combined.info Lines: 47 48 97.9 %
Date: 2020-09-30 20:25:40 Functions: 4 4 100.0 %
Branches: 26 28 92.9 %

           Branch data     Line data    Source code
       1                 :            : // SPDX-License-Identifier: GPL-2.0-only
       2                 :            : /*
       3                 :            :  *  linux/arch/arm/mm/context.c
       4                 :            :  *
       5                 :            :  *  Copyright (C) 2002-2003 Deep Blue Solutions Ltd, all rights reserved.
       6                 :            :  *  Copyright (C) 2012 ARM Limited
       7                 :            :  *
       8                 :            :  *  Author: Will Deacon <will.deacon@arm.com>
       9                 :            :  */
      10                 :            : #include <linux/init.h>
      11                 :            : #include <linux/sched.h>
      12                 :            : #include <linux/mm.h>
      13                 :            : #include <linux/smp.h>
      14                 :            : #include <linux/percpu.h>
      15                 :            : 
      16                 :            : #include <asm/mmu_context.h>
      17                 :            : #include <asm/smp_plat.h>
      18                 :            : #include <asm/thread_notify.h>
      19                 :            : #include <asm/tlbflush.h>
      20                 :            : #include <asm/proc-fns.h>
      21                 :            : 
      22                 :            : /*
      23                 :            :  * On ARMv6, we have the following structure in the Context ID:
      24                 :            :  *
      25                 :            :  * 31                         7          0
      26                 :            :  * +-------------------------+-----------+
      27                 :            :  * |      process ID         |   ASID    |
      28                 :            :  * +-------------------------+-----------+
      29                 :            :  * |              context ID             |
      30                 :            :  * +-------------------------------------+
      31                 :            :  *
      32                 :            :  * The ASID is used to tag entries in the CPU caches and TLBs.
      33                 :            :  * The context ID is used by debuggers and trace logic, and
      34                 :            :  * should be unique within all running processes.
      35                 :            :  *
      36                 :            :  * In big endian operation, the two 32 bit words are swapped if accessed
      37                 :            :  * by non-64-bit operations.
      38                 :            :  */
      39                 :            : #define ASID_FIRST_VERSION      (1ULL << ASID_BITS)
      40                 :            : #define NUM_USER_ASIDS          ASID_FIRST_VERSION
      41                 :            : 
      42                 :            : static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
      43                 :            : static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
      44                 :            : static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS);
      45                 :            : 
      46                 :            : static DEFINE_PER_CPU(atomic64_t, active_asids);
      47                 :            : static DEFINE_PER_CPU(u64, reserved_asids);
      48                 :            : static cpumask_t tlb_flush_pending;
      49                 :            : 
      50                 :            : #ifdef CONFIG_ARM_ERRATA_798181
      51                 :            : void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm,
      52                 :            :                              cpumask_t *mask)
      53                 :            : {
      54                 :            :         int cpu;
      55                 :            :         unsigned long flags;
      56                 :            :         u64 context_id, asid;
      57                 :            : 
      58                 :            :         raw_spin_lock_irqsave(&cpu_asid_lock, flags);
      59                 :            :         context_id = mm->context.id.counter;
      60                 :            :         for_each_online_cpu(cpu) {
      61                 :            :                 if (cpu == this_cpu)
      62                 :            :                         continue;
      63                 :            :                 /*
      64                 :            :                  * We only need to send an IPI if the other CPUs are
      65                 :            :                  * running the same ASID as the one being invalidated.
      66                 :            :                  */
      67                 :            :                 asid = per_cpu(active_asids, cpu).counter;
      68                 :            :                 if (asid == 0)
      69                 :            :                         asid = per_cpu(reserved_asids, cpu);
      70                 :            :                 if (context_id == asid)
      71                 :            :                         cpumask_set_cpu(cpu, mask);
      72                 :            :         }
      73                 :            :         raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
      74                 :            : }
      75                 :            : #endif
      76                 :            : 
      77                 :            : #ifdef CONFIG_ARM_LPAE
      78                 :            : /*
      79                 :            :  * With LPAE, the ASID and page tables are updated atomicly, so there is
      80                 :            :  * no need for a reserved set of tables (the active ASID tracking prevents
      81                 :            :  * any issues across a rollover).
      82                 :            :  */
      83                 :            : #define cpu_set_reserved_ttbr0()
      84                 :            : #else
      85                 :            : static void cpu_set_reserved_ttbr0(void)
      86                 :            : {
      87                 :            :         u32 ttb;
      88                 :            :         /*
      89                 :            :          * Copy TTBR1 into TTBR0.
      90                 :            :          * This points at swapper_pg_dir, which contains only global
      91                 :            :          * entries so any speculative walks are perfectly safe.
      92                 :            :          */
      93                 :    4018533 :         asm volatile(
      94                 :            :         "  mrc     p15, 0, %0, c2, c0, 1           @ read TTBR1\n"
      95                 :            :         "  mcr     p15, 0, %0, c2, c0, 0           @ set TTBR0\n"
      96                 :            :         : "=r" (ttb));
      97                 :    4043254 :         isb();
      98                 :            : }
      99                 :            : #endif
     100                 :            : 
     101                 :            : #ifdef CONFIG_PID_IN_CONTEXTIDR
     102                 :            : static int contextidr_notifier(struct notifier_block *unused, unsigned long cmd,
     103                 :            :                                void *t)
     104                 :            : {
     105                 :            :         u32 contextidr;
     106                 :            :         pid_t pid;
     107                 :            :         struct thread_info *thread = t;
     108                 :            : 
     109                 :            :         if (cmd != THREAD_NOTIFY_SWITCH)
     110                 :            :                 return NOTIFY_DONE;
     111                 :            : 
     112                 :            :         pid = task_pid_nr(thread->task) << ASID_BITS;
     113                 :            :         asm volatile(
     114                 :            :         "  mrc     p15, 0, %0, c13, c0, 1\n"
     115                 :            :         "  and     %0, %0, %2\n"
     116                 :            :         "  orr     %0, %0, %1\n"
     117                 :            :         "  mcr     p15, 0, %0, c13, c0, 1\n"
     118                 :            :         : "=r" (contextidr), "+r" (pid)
     119                 :            :         : "I" (~ASID_MASK));
     120                 :            :         isb();
     121                 :            : 
     122                 :            :         return NOTIFY_OK;
     123                 :            : }
     124                 :            : 
     125                 :            : static struct notifier_block contextidr_notifier_block = {
     126                 :            :         .notifier_call = contextidr_notifier,
     127                 :            : };
     128                 :            : 
     129                 :            : static int __init contextidr_notifier_init(void)
     130                 :            : {
     131                 :            :         return thread_register_notifier(&contextidr_notifier_block);
     132                 :            : }
     133                 :            : arch_initcall(contextidr_notifier_init);
     134                 :            : #endif
     135                 :            : 
     136                 :       6672 : static void flush_context(unsigned int cpu)
     137                 :            : {
     138                 :            :         int i;
     139                 :            :         u64 asid;
     140                 :            : 
     141                 :            :         /* Update the list of reserved ASIDs and the ASID bitmap. */
     142                 :            :         bitmap_clear(asid_map, 0, NUM_USER_ASIDS);
     143         [ +  + ]:      40032 :         for_each_possible_cpu(i) {
     144                 :      53376 :                 asid = atomic64_xchg(&per_cpu(active_asids, i), 0);
     145                 :            :                 /*
     146                 :            :                  * If this CPU has already been through a
     147                 :            :                  * rollover, but hasn't run another task in
     148                 :            :                  * the meantime, we must preserve its reserved
     149                 :            :                  * ASID, as this is the only trace we have of
     150                 :            :                  * the process it is still running.
     151                 :            :                  */
     152         [ +  + ]:      26688 :                 if (asid == 0)
     153                 :        170 :                         asid = per_cpu(reserved_asids, i);
     154                 :      26688 :                 __set_bit(asid & ~ASID_MASK, asid_map);
     155                 :      26688 :                 per_cpu(reserved_asids, i) = asid;
     156                 :            :         }
     157                 :            : 
     158                 :            :         /* Queue a TLB invalidate and flush the I-cache if necessary. */
     159                 :            :         cpumask_setall(&tlb_flush_pending);
     160                 :            : 
     161         [ -  + ]:       6672 :         if (icache_is_vivt_asid_tagged())
     162                 :            :                 __flush_icache_all();
     163                 :       6672 : }
     164                 :            : 
     165                 :      44696 : static bool check_update_reserved_asid(u64 asid, u64 newasid)
     166                 :            : {
     167                 :            :         int cpu;
     168                 :            :         bool hit = false;
     169                 :            : 
     170                 :            :         /*
     171                 :            :          * Iterate over the set of reserved ASIDs looking for a match.
     172                 :            :          * If we find one, then we can update our mm to use newasid
     173                 :            :          * (i.e. the same ASID in the current generation) but we can't
     174                 :            :          * exit the loop early, since we need to ensure that all copies
     175                 :            :          * of the old ASID are updated to reflect the mm. Failure to do
     176                 :            :          * so could result in us missing the reserved ASID in a future
     177                 :            :          * generation.
     178                 :            :          */
     179         [ +  + ]:     268176 :         for_each_possible_cpu(cpu) {
     180         [ +  + ]:     178784 :                 if (per_cpu(reserved_asids, cpu) == asid) {
     181                 :            :                         hit = true;
     182                 :       8855 :                         per_cpu(reserved_asids, cpu) = newasid;
     183                 :            :                 }
     184                 :            :         }
     185                 :            : 
     186                 :      44696 :         return hit;
     187                 :            : }
     188                 :            : 
     189                 :    1706602 : static u64 new_context(struct mm_struct *mm, unsigned int cpu)
     190                 :            : {
     191                 :            :         static u32 cur_idx = 1;
     192                 :    3413204 :         u64 asid = atomic64_read(&mm->context.id);
     193                 :    1706602 :         u64 generation = atomic64_read(&asid_generation);
     194                 :            : 
     195         [ +  + ]:    1706602 :         if (asid != 0) {
     196                 :      44696 :                 u64 newasid = generation | (asid & ~ASID_MASK);
     197                 :            : 
     198                 :            :                 /*
     199                 :            :                  * If our current ASID was active during a rollover, we
     200                 :            :                  * can continue to use it and this was just a false alarm.
     201                 :            :                  */
     202         [ +  + ]:      44696 :                 if (check_update_reserved_asid(asid, newasid))
     203                 :            :                         return newasid;
     204                 :            : 
     205                 :            :                 /*
     206                 :            :                  * We had a valid ASID in a previous life, so try to re-use
     207                 :            :                  * it if possible.,
     208                 :            :                  */
     209                 :            :                 asid &= ~ASID_MASK;
     210         [ +  + ]:      71842 :                 if (!__test_and_set_bit(asid, asid_map))
     211                 :            :                         return newasid;
     212                 :            :         }
     213                 :            : 
     214                 :            :         /*
     215                 :            :          * Allocate a free ASID. If we can't find one, take a note of the
     216                 :            :          * currently active ASIDs and mark the TLBs as requiring flushes.
     217                 :            :          * We always count from ASID #1, as we reserve ASID #0 to switch
     218                 :            :          * via TTBR0 and to avoid speculative page table walks from hitting
     219                 :            :          * in any partial walk caches, which could be populated from
     220                 :            :          * overlapping level-1 descriptors used to map both the module
     221                 :            :          * area and the userspace stack.
     222                 :            :          */
     223                 :    1667951 :         asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, cur_idx);
     224         [ +  + ]:    1667951 :         if (asid == NUM_USER_ASIDS) {
     225                 :       6672 :                 generation = atomic64_add_return(ASID_FIRST_VERSION,
     226                 :            :                                                  &asid_generation);
     227                 :       6672 :                 flush_context(cpu);
     228                 :       6672 :                 asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
     229                 :            :         }
     230                 :            : 
     231                 :    1667951 :         __set_bit(asid, asid_map);
     232                 :    1667951 :         cur_idx = asid;
     233                 :            :         cpumask_clear(mm_cpumask(mm));
     234                 :    1667951 :         return asid | generation;
     235                 :            : }
     236                 :            : 
     237                 :    4018533 : void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
     238                 :            : {
     239                 :            :         unsigned long flags;
     240                 :    4018533 :         unsigned int cpu = smp_processor_id();
     241                 :            :         u64 asid;
     242                 :            : 
     243         [ -  + ]:    4018533 :         if (unlikely(mm->context.vmalloc_seq != init_mm.context.vmalloc_seq))
     244                 :          0 :                 __check_vmalloc_seq(mm);
     245                 :            : 
     246                 :            :         /*
     247                 :            :          * We cannot update the pgd and the ASID atomicly with classic
     248                 :            :          * MMU, so switch exclusively to global mappings to avoid
     249                 :            :          * speculative page table walking with the wrong TTBR.
     250                 :            :          */
     251                 :            :         cpu_set_reserved_ttbr0();
     252                 :            : 
     253                 :    8078346 :         asid = atomic64_read(&mm->context.id);
     254         [ +  + ]:    4029732 :         if (!((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS)
     255         [ +  + ]:    4654058 :             && atomic64_xchg(&per_cpu(active_asids, cpu), asid))
     256                 :            :                 goto switch_mm_fastpath;
     257                 :            : 
     258                 :    1709308 :         raw_spin_lock_irqsave(&cpu_asid_lock, flags);
     259                 :            :         /* Check that our ASID belongs to the current generation. */
     260                 :    1707576 :         asid = atomic64_read(&mm->context.id);
     261         [ +  + ]:    1707576 :         if ((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS) {
     262                 :    1706602 :                 asid = new_context(mm, cpu);
     263                 :    1706602 :                 atomic64_set(&mm->context.id, asid);
     264                 :            :         }
     265                 :            : 
     266         [ +  + ]:    3415152 :         if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) {
     267                 :            :                 local_flush_bp_all();
     268                 :            :                 local_flush_tlb_all();
     269                 :            :         }
     270                 :            : 
     271                 :    1707576 :         atomic64_set(&per_cpu(active_asids, cpu), asid);
     272                 :            :         cpumask_set_cpu(cpu, mm_cpumask(mm));
     273                 :    1707576 :         raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
     274                 :            : 
     275                 :            : switch_mm_fastpath:
     276                 :    8078540 :         cpu_switch_mm(mm->pgd, mm);
     277                 :    4042545 : }

Generated by: LCOV version 1.14