LCOV - code coverage report
Current view: top level - mm - memory.c (source / functions) Hit Total Coverage
Test: combined.info Lines: 900 1742 51.7 %
Date: 2022-04-01 14:17:54 Functions: 58 106 54.7 %
Branches: 450 1243 36.2 %

           Branch data     Line data    Source code
       1                 :            : // SPDX-License-Identifier: GPL-2.0-only
       2                 :            : /*
       3                 :            :  *  linux/mm/memory.c
       4                 :            :  *
       5                 :            :  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
       6                 :            :  */
       7                 :            : 
       8                 :            : /*
       9                 :            :  * demand-loading started 01.12.91 - seems it is high on the list of
      10                 :            :  * things wanted, and it should be easy to implement. - Linus
      11                 :            :  */
      12                 :            : 
      13                 :            : /*
      14                 :            :  * Ok, demand-loading was easy, shared pages a little bit tricker. Shared
      15                 :            :  * pages started 02.12.91, seems to work. - Linus.
      16                 :            :  *
      17                 :            :  * Tested sharing by executing about 30 /bin/sh: under the old kernel it
      18                 :            :  * would have taken more than the 6M I have free, but it worked well as
      19                 :            :  * far as I could see.
      20                 :            :  *
      21                 :            :  * Also corrected some "invalidate()"s - I wasn't doing enough of them.
      22                 :            :  */
      23                 :            : 
      24                 :            : /*
      25                 :            :  * Real VM (paging to/from disk) started 18.12.91. Much more work and
      26                 :            :  * thought has to go into this. Oh, well..
      27                 :            :  * 19.12.91  -  works, somewhat. Sometimes I get faults, don't know why.
      28                 :            :  *              Found it. Everything seems to work now.
      29                 :            :  * 20.12.91  -  Ok, making the swap-device changeable like the root.
      30                 :            :  */
      31                 :            : 
      32                 :            : /*
      33                 :            :  * 05.04.94  -  Multi-page memory management added for v1.1.
      34                 :            :  *              Idea by Alex Bligh (alex@cconcepts.co.uk)
      35                 :            :  *
      36                 :            :  * 16.07.99  -  Support of BIGMEM added by Gerhard Wichert, Siemens AG
      37                 :            :  *              (Gerhard.Wichert@pdb.siemens.de)
      38                 :            :  *
      39                 :            :  * Aug/Sep 2004 Changed to four level page tables (Andi Kleen)
      40                 :            :  */
      41                 :            : 
      42                 :            : #include <linux/kernel_stat.h>
      43                 :            : #include <linux/mm.h>
      44                 :            : #include <linux/sched/mm.h>
      45                 :            : #include <linux/sched/coredump.h>
      46                 :            : #include <linux/sched/numa_balancing.h>
      47                 :            : #include <linux/sched/task.h>
      48                 :            : #include <linux/hugetlb.h>
      49                 :            : #include <linux/mman.h>
      50                 :            : #include <linux/swap.h>
      51                 :            : #include <linux/highmem.h>
      52                 :            : #include <linux/pagemap.h>
      53                 :            : #include <linux/memremap.h>
      54                 :            : #include <linux/ksm.h>
      55                 :            : #include <linux/rmap.h>
      56                 :            : #include <linux/export.h>
      57                 :            : #include <linux/delayacct.h>
      58                 :            : #include <linux/init.h>
      59                 :            : #include <linux/pfn_t.h>
      60                 :            : #include <linux/writeback.h>
      61                 :            : #include <linux/memcontrol.h>
      62                 :            : #include <linux/mmu_notifier.h>
      63                 :            : #include <linux/swapops.h>
      64                 :            : #include <linux/elf.h>
      65                 :            : #include <linux/gfp.h>
      66                 :            : #include <linux/migrate.h>
      67                 :            : #include <linux/string.h>
      68                 :            : #include <linux/dma-debug.h>
      69                 :            : #include <linux/debugfs.h>
      70                 :            : #include <linux/userfaultfd_k.h>
      71                 :            : #include <linux/dax.h>
      72                 :            : #include <linux/oom.h>
      73                 :            : #include <linux/numa.h>
      74                 :            : 
      75                 :            : #include <trace/events/kmem.h>
      76                 :            : 
      77                 :            : #include <asm/io.h>
      78                 :            : #include <asm/mmu_context.h>
      79                 :            : #include <asm/pgalloc.h>
      80                 :            : #include <linux/uaccess.h>
      81                 :            : #include <asm/tlb.h>
      82                 :            : #include <asm/tlbflush.h>
      83                 :            : #include <asm/pgtable.h>
      84                 :            : 
      85                 :            : #include "internal.h"
      86                 :            : 
      87                 :            : #if defined(LAST_CPUPID_NOT_IN_PAGE_FLAGS) && !defined(CONFIG_COMPILE_TEST)
      88                 :            : #warning Unfortunate NUMA and NUMA Balancing config, growing page-frame for last_cpupid.
      89                 :            : #endif
      90                 :            : 
      91                 :            : #ifndef CONFIG_NEED_MULTIPLE_NODES
      92                 :            : /* use the per-pgdat data instead for discontigmem - mbligh */
      93                 :            : unsigned long max_mapnr;
      94                 :            : EXPORT_SYMBOL(max_mapnr);
      95                 :            : 
      96                 :            : struct page *mem_map;
      97                 :            : EXPORT_SYMBOL(mem_map);
      98                 :            : #endif
      99                 :            : 
     100                 :            : /*
     101                 :            :  * A number of key systems in x86 including ioremap() rely on the assumption
     102                 :            :  * that high_memory defines the upper bound on direct map memory, then end
     103                 :            :  * of ZONE_NORMAL.  Under CONFIG_DISCONTIG this means that max_low_pfn and
     104                 :            :  * highstart_pfn must be the same; there must be no gap between ZONE_NORMAL
     105                 :            :  * and ZONE_HIGHMEM.
     106                 :            :  */
     107                 :            : void *high_memory;
     108                 :            : EXPORT_SYMBOL(high_memory);
     109                 :            : 
     110                 :            : /*
     111                 :            :  * Randomize the address space (stacks, mmaps, brk, etc.).
     112                 :            :  *
     113                 :            :  * ( When CONFIG_COMPAT_BRK=y we exclude brk from randomization,
     114                 :            :  *   as ancient (libc5 based) binaries can segfault. )
     115                 :            :  */
     116                 :            : int randomize_va_space __read_mostly =
     117                 :            : #ifdef CONFIG_COMPAT_BRK
     118                 :            :                                         1;
     119                 :            : #else
     120                 :            :                                         2;
     121                 :            : #endif
     122                 :            : 
     123                 :            : #ifndef arch_faults_on_old_pte
     124                 :            : static inline bool arch_faults_on_old_pte(void)
     125                 :            : {
     126                 :            :         /*
     127                 :            :          * Those arches which don't have hw access flag feature need to
     128                 :            :          * implement their own helper. By default, "true" means pagefault
     129                 :            :          * will be hit on old pte.
     130                 :            :          */
     131                 :            :         return true;
     132                 :            : }
     133                 :            : #endif
     134                 :            : 
     135                 :          0 : static int __init disable_randmaps(char *s)
     136                 :            : {
     137                 :          0 :         randomize_va_space = 0;
     138                 :          0 :         return 1;
     139                 :            : }
     140                 :            : __setup("norandmaps", disable_randmaps);
     141                 :            : 
     142                 :            : unsigned long zero_pfn __read_mostly;
     143                 :            : EXPORT_SYMBOL(zero_pfn);
     144                 :            : 
     145                 :            : unsigned long highest_memmap_pfn __read_mostly;
     146                 :            : 
     147                 :            : /*
     148                 :            :  * CONFIG_MMU architectures set up ZERO_PAGE in their paging_init()
     149                 :            :  */
     150                 :         11 : static int __init init_zero_pfn(void)
     151                 :            : {
     152         [ -  + ]:         11 :         zero_pfn = page_to_pfn(ZERO_PAGE(0));
     153                 :         11 :         return 0;
     154                 :            : }
     155                 :            : core_initcall(init_zero_pfn);
     156                 :            : 
     157                 :    1003016 : void mm_trace_rss_stat(struct mm_struct *mm, int member, long count)
     158                 :            : {
     159                 :    1003016 :         trace_rss_stat(mm, member, count);
     160                 :      17798 : }
     161                 :            : 
     162                 :            : #if defined(SPLIT_RSS_COUNTING)
     163                 :            : 
     164                 :     142382 : void sync_mm_rss(struct mm_struct *mm)
     165                 :            : {
     166                 :     142382 :         int i;
     167                 :            : 
     168         [ +  + ]:     711910 :         for (i = 0; i < NR_MM_COUNTERS; i++) {
     169         [ +  + ]:     569528 :                 if (current->rss_stat.count[i]) {
     170                 :     130157 :                         add_mm_counter(mm, i, current->rss_stat.count[i]);
     171                 :     130157 :                         current->rss_stat.count[i] = 0;
     172                 :            :                 }
     173                 :            :         }
     174                 :     142382 :         current->rss_stat.events = 0;
     175                 :     142382 : }
     176                 :            : 
     177                 :    6445540 : static void add_mm_counter_fast(struct mm_struct *mm, int member, int val)
     178                 :            : {
     179         [ +  + ]:    6445540 :         struct task_struct *task = current;
     180                 :            : 
     181         [ +  + ]:    6445540 :         if (likely(task->mm == mm))
     182                 :    6436586 :                 task->rss_stat.count[member] += val;
     183                 :            :         else
     184                 :       8954 :                 add_mm_counter(mm, member, val);
     185                 :    6445540 : }
     186                 :            : #define inc_mm_counter_fast(mm, member) add_mm_counter_fast(mm, member, 1)
     187                 :            : #define dec_mm_counter_fast(mm, member) add_mm_counter_fast(mm, member, -1)
     188                 :            : 
     189                 :            : /* sync counter once per 64 page faults */
     190                 :            : #define TASK_RSS_EVENTS_THRESH  (64)
     191                 :    1155182 : static void check_sync_rss_stat(struct task_struct *task)
     192                 :            : {
     193         [ +  - ]:    1155182 :         if (unlikely(task != current))
     194                 :            :                 return;
     195         [ +  + ]:    1155182 :         if (unlikely(task->rss_stat.events++ > TASK_RSS_EVENTS_THRESH))
     196                 :       3798 :                 sync_mm_rss(task->mm);
     197                 :            : }
     198                 :            : #else /* SPLIT_RSS_COUNTING */
     199                 :            : 
     200                 :            : #define inc_mm_counter_fast(mm, member) inc_mm_counter(mm, member)
     201                 :            : #define dec_mm_counter_fast(mm, member) dec_mm_counter(mm, member)
     202                 :            : 
     203                 :            : static void check_sync_rss_stat(struct task_struct *task)
     204                 :            : {
     205                 :            : }
     206                 :            : 
     207                 :            : #endif /* SPLIT_RSS_COUNTING */
     208                 :            : 
     209                 :            : /*
     210                 :            :  * Note: this doesn't free the actual pages themselves. That
     211                 :            :  * has been handled earlier when unmapping all the memory regions.
     212                 :            :  */
     213                 :     128805 : static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd,
     214                 :            :                            unsigned long addr)
     215                 :            : {
     216         [ +  - ]:     128805 :         pgtable_t token = pmd_pgtable(*pmd);
     217                 :     128805 :         pmd_clear(pmd);
     218                 :     128805 :         pte_free_tlb(tlb, token, addr);
     219                 :     128805 :         mm_dec_nr_ptes(tlb->mm);
     220                 :     128805 : }
     221                 :            : 
     222                 :      78926 : static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
     223                 :            :                                 unsigned long addr, unsigned long end,
     224                 :            :                                 unsigned long floor, unsigned long ceiling)
     225                 :            : {
     226                 :      78926 :         pmd_t *pmd;
     227                 :      78926 :         unsigned long next;
     228                 :      78926 :         unsigned long start;
     229                 :            : 
     230                 :      78926 :         start = addr;
     231         [ +  - ]:      78926 :         pmd = pmd_offset(pud, addr);
     232                 :     134361 :         do {
     233         [ +  + ]:     134361 :                 next = pmd_addr_end(addr, end);
     234         [ +  + ]:     134361 :                 if (pmd_none_or_clear_bad(pmd))
     235                 :       5556 :                         continue;
     236                 :     128805 :                 free_pte_range(tlb, pmd, addr);
     237         [ +  + ]:     134361 :         } while (pmd++, addr = next, addr != end);
     238                 :            : 
     239                 :      78926 :         start &= PUD_MASK;
     240         [ +  + ]:      78926 :         if (start < floor)
     241                 :            :                 return;
     242         [ +  + ]:      78338 :         if (ceiling) {
     243                 :      52190 :                 ceiling &= PUD_MASK;
     244         [ +  - ]:      52190 :                 if (!ceiling)
     245                 :            :                         return;
     246                 :            :         }
     247         [ +  + ]:      78338 :         if (end - 1 > ceiling - 1)
     248                 :            :                 return;
     249                 :            : 
     250         [ +  - ]:      61857 :         pmd = pmd_offset(pud, start);
     251                 :      61857 :         pud_clear(pud);
     252                 :      61857 :         pmd_free_tlb(tlb, pmd, start);
     253                 :      61857 :         mm_dec_nr_pmds(tlb->mm);
     254                 :            : }
     255                 :            : 
     256                 :      79246 : static inline void free_pud_range(struct mmu_gather *tlb, p4d_t *p4d,
     257                 :            :                                 unsigned long addr, unsigned long end,
     258                 :            :                                 unsigned long floor, unsigned long ceiling)
     259                 :            : {
     260                 :      79246 :         pud_t *pud;
     261                 :      79246 :         unsigned long next;
     262                 :      79246 :         unsigned long start;
     263                 :            : 
     264                 :      79246 :         start = addr;
     265                 :      79246 :         pud = pud_offset(p4d, addr);
     266                 :      79284 :         do {
     267         [ +  + ]:      79284 :                 next = pud_addr_end(addr, end);
     268         [ +  + ]:      79284 :                 if (pud_none_or_clear_bad(pud))
     269                 :        358 :                         continue;
     270                 :      78926 :                 free_pmd_range(tlb, pud, addr, next, floor, ceiling);
     271         [ +  + ]:      79284 :         } while (pud++, addr = next, addr != end);
     272                 :            : 
     273                 :      79246 :         start &= P4D_MASK;
     274         [ +  + ]:      79246 :         if (start < floor)
     275                 :            :                 return;
     276         [ +  + ]:      69888 :         if (ceiling) {
     277                 :      52156 :                 ceiling &= P4D_MASK;
     278         [ +  - ]:      52156 :                 if (!ceiling)
     279                 :            :                         return;
     280                 :            :         }
     281         [ +  + ]:      69888 :         if (end - 1 > ceiling - 1)
     282                 :            :                 return;
     283                 :            : 
     284                 :      44755 :         pud = pud_offset(p4d, start);
     285                 :      44755 :         p4d_clear(p4d);
     286                 :      44755 :         pud_free_tlb(tlb, pud, start);
     287                 :      44755 :         mm_dec_nr_puds(tlb->mm);
     288                 :            : }
     289                 :            : 
     290                 :      80045 : static inline void free_p4d_range(struct mmu_gather *tlb, pgd_t *pgd,
     291                 :            :                                 unsigned long addr, unsigned long end,
     292                 :            :                                 unsigned long floor, unsigned long ceiling)
     293                 :            : {
     294                 :      80045 :         p4d_t *p4d;
     295                 :      80045 :         unsigned long next;
     296                 :      80045 :         unsigned long start;
     297                 :            : 
     298                 :      80045 :         start = addr;
     299                 :      80045 :         p4d = p4d_offset(pgd, addr);
     300                 :      80045 :         do {
     301         [ -  + ]:      80045 :                 next = p4d_addr_end(addr, end);
     302         [ +  + ]:      80045 :                 if (p4d_none_or_clear_bad(p4d))
     303                 :        799 :                         continue;
     304                 :      79246 :                 free_pud_range(tlb, p4d, addr, next, floor, ceiling);
     305         [ -  + ]:      80045 :         } while (p4d++, addr = next, addr != end);
     306                 :            : 
     307                 :      80045 :         start &= PGDIR_MASK;
     308         [ +  + ]:      80045 :         if (start < floor)
     309                 :            :                 return;
     310         [ +  + ]:      69888 :         if (ceiling) {
     311                 :      52156 :                 ceiling &= PGDIR_MASK;
     312         [ +  - ]:      52156 :                 if (!ceiling)
     313                 :            :                         return;
     314                 :            :         }
     315         [ +  + ]:      69888 :         if (end - 1 > ceiling - 1)
     316                 :            :                 return;
     317                 :            : 
     318                 :      44755 :         p4d = p4d_offset(pgd, start);
     319      [ -  -  + ]:      44755 :         pgd_clear(pgd);
     320                 :      44755 :         p4d_free_tlb(tlb, p4d, start);
     321                 :            : }
     322                 :            : 
     323                 :            : /*
     324                 :            :  * This function frees user-level page tables of a process.
     325                 :            :  */
     326                 :     211384 : void free_pgd_range(struct mmu_gather *tlb,
     327                 :            :                         unsigned long addr, unsigned long end,
     328                 :            :                         unsigned long floor, unsigned long ceiling)
     329                 :            : {
     330                 :     211384 :         pgd_t *pgd;
     331                 :     211384 :         unsigned long next;
     332                 :            : 
     333                 :            :         /*
     334                 :            :          * The next few lines have given us lots of grief...
     335                 :            :          *
     336                 :            :          * Why are we testing PMD* at this top level?  Because often
     337                 :            :          * there will be no work to do at all, and we'd prefer not to
     338                 :            :          * go all the way down to the bottom just to discover that.
     339                 :            :          *
     340                 :            :          * Why all these "- 1"s?  Because 0 represents both the bottom
     341                 :            :          * of the address space and the top of it (using -1 for the
     342                 :            :          * top wouldn't help much: the masks would do the wrong thing).
     343                 :            :          * The rule is that addr 0 and floor 0 refer to the bottom of
     344                 :            :          * the address space, but end 0 and ceiling 0 refer to the top
     345                 :            :          * Comparisons need to use "end - 1" and "ceiling - 1" (though
     346                 :            :          * that end 0 case should be mythical).
     347                 :            :          *
     348                 :            :          * Wherever addr is brought up or ceiling brought down, we must
     349                 :            :          * be careful to reject "the opposite 0" before it confuses the
     350                 :            :          * subsequent tests.  But what about where end is brought down
     351                 :            :          * by PMD_SIZE below? no, end can't go down to 0 there.
     352                 :            :          *
     353                 :            :          * Whereas we round start (addr) and ceiling down, by different
     354                 :            :          * masks at different levels, in order to test whether a table
     355                 :            :          * now has no other vmas using it, so can be freed, we don't
     356                 :            :          * bother to round floor or end up - the tests don't need that.
     357                 :            :          */
     358                 :            : 
     359                 :     211384 :         addr &= PMD_MASK;
     360         [ +  + ]:     211384 :         if (addr < floor) {
     361                 :     129676 :                 addr += PMD_SIZE;
     362         [ +  - ]:     129676 :                 if (!addr)
     363                 :            :                         return;
     364                 :            :         }
     365         [ +  + ]:     211384 :         if (ceiling) {
     366                 :     184698 :                 ceiling &= PMD_MASK;
     367         [ +  - ]:     184698 :                 if (!ceiling)
     368                 :            :                         return;
     369                 :            :         }
     370         [ +  + ]:     211384 :         if (end - 1 > ceiling - 1)
     371                 :     114312 :                 end -= PMD_SIZE;
     372         [ +  + ]:     211384 :         if (addr > end - 1)
     373                 :            :                 return;
     374                 :            :         /*
     375                 :            :          * We add page table cache pages with PAGE_SIZE,
     376                 :            :          * (see pte_free_tlb()), flush the tlb if we need
     377                 :            :          */
     378                 :      80045 :         tlb_change_page_size(tlb, PAGE_SIZE);
     379                 :      80045 :         pgd = pgd_offset(tlb->mm, addr);
     380                 :      80045 :         do {
     381         [ +  - ]:      80045 :                 next = pgd_addr_end(addr, end);
     382         [ -  + ]:      80045 :                 if (pgd_none_or_clear_bad(pgd))
     383                 :          0 :                         continue;
     384                 :      80045 :                 free_p4d_range(tlb, pgd, addr, next, floor, ceiling);
     385         [ -  + ]:      80045 :         } while (pgd++, addr = next, addr != end);
     386                 :            : }
     387                 :            : 
     388                 :     150505 : void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma,
     389                 :            :                 unsigned long floor, unsigned long ceiling)
     390                 :            : {
     391         [ +  + ]:     352935 :         while (vma) {
     392                 :     202430 :                 struct vm_area_struct *next = vma->vm_next;
     393                 :     202430 :                 unsigned long addr = vma->vm_start;
     394                 :            : 
     395                 :            :                 /*
     396                 :            :                  * Hide vma from rmap and truncate_pagecache before freeing
     397                 :            :                  * pgtables
     398                 :            :                  */
     399                 :     202430 :                 unlink_anon_vmas(vma);
     400                 :     202430 :                 unlink_file_vma(vma);
     401                 :            : 
     402         [ +  - ]:     202430 :                 if (is_vm_hugetlb_page(vma)) {
     403         [ #  # ]:          0 :                         hugetlb_free_pgd_range(tlb, addr, vma->vm_end,
     404                 :            :                                 floor, next ? next->vm_start : ceiling);
     405                 :            :                 } else {
     406                 :            :                         /*
     407                 :            :                          * Optimization: gather nearby vmas into one call down
     408                 :            :                          */
     409   [ +  +  +  + ]:    1029477 :                         while (next && next->vm_start <= vma->vm_end + PMD_SIZE
     410         [ +  - ]:     827047 :                                && !is_vm_hugetlb_page(next)) {
     411                 :     827047 :                                 vma = next;
     412                 :     827047 :                                 next = vma->vm_next;
     413                 :     827047 :                                 unlink_anon_vmas(vma);
     414                 :     827047 :                                 unlink_file_vma(vma);
     415                 :            :                         }
     416         [ +  + ]:     202430 :                         free_pgd_range(tlb, addr, vma->vm_end,
     417                 :            :                                 floor, next ? next->vm_start : ceiling);
     418                 :            :                 }
     419                 :            :                 vma = next;
     420                 :            :         }
     421                 :     150505 : }
     422                 :            : 
     423                 :      90551 : int __pte_alloc(struct mm_struct *mm, pmd_t *pmd)
     424                 :            : {
     425                 :      90551 :         spinlock_t *ptl;
     426                 :      90551 :         pgtable_t new = pte_alloc_one(mm);
     427         [ +  - ]:      90551 :         if (!new)
     428                 :            :                 return -ENOMEM;
     429                 :            : 
     430                 :            :         /*
     431                 :            :          * Ensure all pte setup (eg. pte page lock and page clearing) are
     432                 :            :          * visible before the pte is made visible to other CPUs by being
     433                 :            :          * put into page tables.
     434                 :            :          *
     435                 :            :          * The other side of the story is the pointer chasing in the page
     436                 :            :          * table walking code (when walking the page table without locking;
     437                 :            :          * ie. most of the time). Fortunately, these data accesses consist
     438                 :            :          * of a chain of data-dependent loads, meaning most CPUs (alpha
     439                 :            :          * being the notable exception) will already guarantee loads are
     440                 :            :          * seen in-order. See the alpha page table accessors for the
     441                 :            :          * smp_read_barrier_depends() barriers in page table walking code.
     442                 :            :          */
     443                 :      90551 :         smp_wmb(); /* Could be smp_wmb__xxx(before|after)_spin_lock */
     444                 :            : 
     445                 :      90551 :         ptl = pmd_lock(mm, pmd);
     446         [ +  - ]:      90551 :         if (likely(pmd_none(*pmd))) {   /* Has another populated it ? */
     447                 :      90551 :                 mm_inc_nr_ptes(mm);
     448                 :      90551 :                 pmd_populate(mm, pmd, new);
     449                 :      90551 :                 new = NULL;
     450                 :            :         }
     451                 :      90551 :         spin_unlock(ptl);
     452         [ -  + ]:      90551 :         if (new)
     453                 :          0 :                 pte_free(mm, new);
     454                 :            :         return 0;
     455                 :            : }
     456                 :            : 
     457                 :        220 : int __pte_alloc_kernel(pmd_t *pmd)
     458                 :            : {
     459                 :        220 :         pte_t *new = pte_alloc_one_kernel(&init_mm);
     460         [ +  - ]:        220 :         if (!new)
     461                 :            :                 return -ENOMEM;
     462                 :            : 
     463                 :        220 :         smp_wmb(); /* See comment in __pte_alloc */
     464                 :            : 
     465                 :        220 :         spin_lock(&init_mm.page_table_lock);
     466         [ +  - ]:        220 :         if (likely(pmd_none(*pmd))) {   /* Has another populated it ? */
     467         [ +  - ]:        220 :                 pmd_populate_kernel(&init_mm, pmd, new);
     468                 :        220 :                 new = NULL;
     469                 :            :         }
     470                 :        220 :         spin_unlock(&init_mm.page_table_lock);
     471         [ -  + ]:        220 :         if (new)
     472                 :          0 :                 pte_free_kernel(&init_mm, new);
     473                 :            :         return 0;
     474                 :            : }
     475                 :            : 
     476                 :    1280569 : static inline void init_rss_vec(int *rss)
     477                 :            : {
     478                 :    1280569 :         memset(rss, 0, sizeof(int) * NR_MM_COUNTERS);
     479                 :            : }
     480                 :            : 
     481                 :    1280569 : static inline void add_mm_rss_vec(struct mm_struct *mm, int *rss)
     482                 :            : {
     483                 :    1280569 :         int i;
     484                 :            : 
     485         [ +  + ]:    1280569 :         if (current->mm == mm)
     486                 :     112019 :                 sync_mm_rss(mm);
     487         [ +  + ]:    6402845 :         for (i = 0; i < NR_MM_COUNTERS; i++)
     488         [ +  + ]:    5122276 :                 if (rss[i])
     489                 :     846107 :                         add_mm_counter(mm, i, rss[i]);
     490                 :    1280569 : }
     491                 :            : 
     492                 :            : /*
     493                 :            :  * This function is called to print an error when a bad pte
     494                 :            :  * is found. For example, we might have a PFN-mapped pte in
     495                 :            :  * a region that doesn't allow it.
     496                 :            :  *
     497                 :            :  * The calling function must still handle the error.
     498                 :            :  */
     499                 :          0 : static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr,
     500                 :            :                           pte_t pte, struct page *page)
     501                 :            : {
     502                 :          0 :         pgd_t *pgd = pgd_offset(vma->vm_mm, addr);
     503                 :          0 :         p4d_t *p4d = p4d_offset(pgd, addr);
     504         [ #  # ]:          0 :         pud_t *pud = pud_offset(p4d, addr);
     505         [ #  # ]:          0 :         pmd_t *pmd = pmd_offset(pud, addr);
     506                 :          0 :         struct address_space *mapping;
     507                 :          0 :         pgoff_t index;
     508                 :          0 :         static unsigned long resume;
     509                 :          0 :         static unsigned long nr_shown;
     510                 :          0 :         static unsigned long nr_unshown;
     511                 :            : 
     512                 :            :         /*
     513                 :            :          * Allow a burst of 60 reports, then keep quiet for that minute;
     514                 :            :          * or allow a steady drip of one report per second.
     515                 :            :          */
     516         [ #  # ]:          0 :         if (nr_shown == 60) {
     517         [ #  # ]:          0 :                 if (time_before(jiffies, resume)) {
     518                 :          0 :                         nr_unshown++;
     519                 :          0 :                         return;
     520                 :            :                 }
     521         [ #  # ]:          0 :                 if (nr_unshown) {
     522                 :          0 :                         pr_alert("BUG: Bad page map: %lu messages suppressed\n",
     523                 :            :                                  nr_unshown);
     524                 :          0 :                         nr_unshown = 0;
     525                 :            :                 }
     526                 :          0 :                 nr_shown = 0;
     527                 :            :         }
     528         [ #  # ]:          0 :         if (nr_shown++ == 0)
     529                 :          0 :                 resume = jiffies + 60 * HZ;
     530                 :            : 
     531         [ #  # ]:          0 :         mapping = vma->vm_file ? vma->vm_file->f_mapping : NULL;
     532                 :          0 :         index = linear_page_index(vma, addr);
     533                 :            : 
     534                 :          0 :         pr_alert("BUG: Bad page map in process %s  pte:%08llx pmd:%08llx\n",
     535                 :            :                  current->comm,
     536                 :            :                  (long long)pte_val(pte), (long long)pmd_val(*pmd));
     537         [ #  # ]:          0 :         if (page)
     538                 :          0 :                 dump_page(page, "bad pte");
     539                 :          0 :         pr_alert("addr:%px vm_flags:%08lx anon_vma:%px mapping:%px index:%lx\n",
     540                 :            :                  (void *)addr, vma->vm_flags, vma->anon_vma, mapping, index);
     541   [ #  #  #  #  :          0 :         pr_alert("file:%pD fault:%ps mmap:%ps readpage:%ps\n",
                   #  # ]
     542                 :            :                  vma->vm_file,
     543                 :            :                  vma->vm_ops ? vma->vm_ops->fault : NULL,
     544                 :            :                  vma->vm_file ? vma->vm_file->f_op->mmap : NULL,
     545                 :            :                  mapping ? mapping->a_ops->readpage : NULL);
     546                 :          0 :         dump_stack();
     547                 :          0 :         add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
     548                 :            : }
     549                 :            : 
     550                 :            : /*
     551                 :            :  * vm_normal_page -- This function gets the "struct page" associated with a pte.
     552                 :            :  *
     553                 :            :  * "Special" mappings do not wish to be associated with a "struct page" (either
     554                 :            :  * it doesn't exist, or it exists but they don't want to touch it). In this
     555                 :            :  * case, NULL is returned here. "Normal" mappings do have a struct page.
     556                 :            :  *
     557                 :            :  * There are 2 broad cases. Firstly, an architecture may define a pte_special()
     558                 :            :  * pte bit, in which case this function is trivial. Secondly, an architecture
     559                 :            :  * may not have a spare pte bit, which requires a more complicated scheme,
     560                 :            :  * described below.
     561                 :            :  *
     562                 :            :  * A raw VM_PFNMAP mapping (ie. one that is not COWed) is always considered a
     563                 :            :  * special mapping (even if there are underlying and valid "struct pages").
     564                 :            :  * COWed pages of a VM_PFNMAP are always normal.
     565                 :            :  *
     566                 :            :  * The way we recognize COWed pages within VM_PFNMAP mappings is through the
     567                 :            :  * rules set up by "remap_pfn_range()": the vma will have the VM_PFNMAP bit
     568                 :            :  * set, and the vm_pgoff will point to the first PFN mapped: thus every special
     569                 :            :  * mapping will always honor the rule
     570                 :            :  *
     571                 :            :  *      pfn_of_page == vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT)
     572                 :            :  *
     573                 :            :  * And for normal mappings this is false.
     574                 :            :  *
     575                 :            :  * This restricts such mappings to be a linear translation from virtual address
     576                 :            :  * to pfn. To get around this restriction, we allow arbitrary mappings so long
     577                 :            :  * as the vma is not a COW mapping; in that case, we know that all ptes are
     578                 :            :  * special (because none can have been COWed).
     579                 :            :  *
     580                 :            :  *
     581                 :            :  * In order to support COW of arbitrary special mappings, we have VM_MIXEDMAP.
     582                 :            :  *
     583                 :            :  * VM_MIXEDMAP mappings can likewise contain memory with or without "struct
     584                 :            :  * page" backing, however the difference is that _all_ pages with a struct
     585                 :            :  * page (that is, those where pfn_valid is true) are refcounted and considered
     586                 :            :  * normal pages by the VM. The disadvantage is that pages are refcounted
     587                 :            :  * (which can be slower and simply not an option for some PFNMAP users). The
     588                 :            :  * advantage is that we don't have to follow the strict linearity rule of
     589                 :            :  * PFNMAP mappings in order to support COWable mappings.
     590                 :            :  *
     591                 :            :  */
     592                 :    7641846 : struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
     593                 :            :                             pte_t pte)
     594                 :            : {
     595         [ +  - ]:    7641846 :         unsigned long pfn = pte_pfn(pte);
     596                 :            : 
     597                 :    7641846 :         if (IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL)) {
     598         [ +  + ]:    7641846 :                 if (likely(!pte_special(pte)))
     599                 :    7606927 :                         goto check_pfn;
     600   [ +  +  -  + ]:      34919 :                 if (vma->vm_ops && vma->vm_ops->find_special_page)
     601                 :          0 :                         return vma->vm_ops->find_special_page(vma, addr);
     602         [ +  + ]:      34919 :                 if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))
     603                 :            :                         return NULL;
     604         [ -  + ]:      10153 :                 if (is_zero_pfn(pfn))
     605                 :            :                         return NULL;
     606         [ #  # ]:          0 :                 if (pte_devmap(pte))
     607                 :            :                         return NULL;
     608                 :            : 
     609                 :          0 :                 print_bad_pte(vma, addr, pte, NULL);
     610                 :          0 :                 return NULL;
     611                 :            :         }
     612                 :            : 
     613                 :            :         /* !CONFIG_ARCH_HAS_PTE_SPECIAL case follows: */
     614                 :            : 
     615                 :            :         if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
     616                 :            :                 if (vma->vm_flags & VM_MIXEDMAP) {
     617                 :            :                         if (!pfn_valid(pfn))
     618                 :            :                                 return NULL;
     619                 :            :                         goto out;
     620                 :            :                 } else {
     621                 :            :                         unsigned long off;
     622                 :            :                         off = (addr - vma->vm_start) >> PAGE_SHIFT;
     623                 :            :                         if (pfn == vma->vm_pgoff + off)
     624                 :            :                                 return NULL;
     625                 :            :                         if (!is_cow_mapping(vma->vm_flags))
     626                 :            :                                 return NULL;
     627                 :            :                 }
     628                 :            :         }
     629                 :            : 
     630                 :            :         if (is_zero_pfn(pfn))
     631                 :            :                 return NULL;
     632                 :            : 
     633                 :            : check_pfn:
     634         [ -  + ]:    7606927 :         if (unlikely(pfn > highest_memmap_pfn)) {
     635                 :          0 :                 print_bad_pte(vma, addr, pte, NULL);
     636                 :          0 :                 return NULL;
     637                 :            :         }
     638                 :            : 
     639                 :            :         /*
     640                 :            :          * NOTE! We still have PageReserved() pages in the page tables.
     641                 :            :          * eg. VDSO mappings can cause them to exist.
     642                 :            :          */
     643                 :    7606927 : out:
     644                 :    7606927 :         return pfn_to_page(pfn);
     645                 :            : }
     646                 :            : 
     647                 :            : #ifdef CONFIG_TRANSPARENT_HUGEPAGE
     648                 :            : struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
     649                 :            :                                 pmd_t pmd)
     650                 :            : {
     651                 :            :         unsigned long pfn = pmd_pfn(pmd);
     652                 :            : 
     653                 :            :         /*
     654                 :            :          * There is no pmd_special() but there may be special pmds, e.g.
     655                 :            :          * in a direct-access (dax) mapping, so let's just replicate the
     656                 :            :          * !CONFIG_ARCH_HAS_PTE_SPECIAL case from vm_normal_page() here.
     657                 :            :          */
     658                 :            :         if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
     659                 :            :                 if (vma->vm_flags & VM_MIXEDMAP) {
     660                 :            :                         if (!pfn_valid(pfn))
     661                 :            :                                 return NULL;
     662                 :            :                         goto out;
     663                 :            :                 } else {
     664                 :            :                         unsigned long off;
     665                 :            :                         off = (addr - vma->vm_start) >> PAGE_SHIFT;
     666                 :            :                         if (pfn == vma->vm_pgoff + off)
     667                 :            :                                 return NULL;
     668                 :            :                         if (!is_cow_mapping(vma->vm_flags))
     669                 :            :                                 return NULL;
     670                 :            :                 }
     671                 :            :         }
     672                 :            : 
     673                 :            :         if (pmd_devmap(pmd))
     674                 :            :                 return NULL;
     675                 :            :         if (is_huge_zero_pmd(pmd))
     676                 :            :                 return NULL;
     677                 :            :         if (unlikely(pfn > highest_memmap_pfn))
     678                 :            :                 return NULL;
     679                 :            : 
     680                 :            :         /*
     681                 :            :          * NOTE! We still have PageReserved() pages in the page tables.
     682                 :            :          * eg. VDSO mappings can cause them to exist.
     683                 :            :          */
     684                 :            : out:
     685                 :            :         return pfn_to_page(pfn);
     686                 :            : }
     687                 :            : #endif
     688                 :            : 
     689                 :            : /*
     690                 :            :  * copy one vm_area from one task to the other. Assumes the page tables
     691                 :            :  * already present in the new task to be cleared in the whole range
     692                 :            :  * covered by this vma.
     693                 :            :  */
     694                 :            : 
     695                 :            : static inline unsigned long
     696                 :     648069 : copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
     697                 :            :                 pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *vma,
     698                 :            :                 unsigned long addr, int *rss)
     699                 :            : {
     700                 :     648069 :         unsigned long vm_flags = vma->vm_flags;
     701                 :     648069 :         pte_t pte = *src_pte;
     702                 :     648069 :         struct page *page;
     703                 :            : 
     704                 :            :         /* pte contains position in swap or file, so copy. */
     705         [ -  + ]:     648069 :         if (unlikely(!pte_present(pte))) {
     706         [ #  # ]:          0 :                 swp_entry_t entry = pte_to_swp_entry(pte);
     707                 :            : 
     708         [ #  # ]:          0 :                 if (likely(!non_swap_entry(entry))) {
     709         [ #  # ]:          0 :                         if (swap_duplicate(entry) < 0)
     710                 :            :                                 return entry.val;
     711                 :            : 
     712                 :            :                         /* make sure dst_mm is on swapoff's mmlist. */
     713         [ #  # ]:          0 :                         if (unlikely(list_empty(&dst_mm->mmlist))) {
     714                 :          0 :                                 spin_lock(&mmlist_lock);
     715         [ #  # ]:          0 :                                 if (list_empty(&dst_mm->mmlist))
     716                 :          0 :                                         list_add(&dst_mm->mmlist,
     717                 :            :                                                         &src_mm->mmlist);
     718                 :          0 :                                 spin_unlock(&mmlist_lock);
     719                 :            :                         }
     720                 :          0 :                         rss[MM_SWAPENTS]++;
     721         [ #  # ]:          0 :                 } else if (is_migration_entry(entry)) {
     722                 :          0 :                         page = migration_entry_to_page(entry);
     723                 :            : 
     724                 :          0 :                         rss[mm_counter(page)]++;
     725                 :            : 
     726   [ #  #  #  # ]:          0 :                         if (is_write_migration_entry(entry) &&
     727                 :            :                                         is_cow_mapping(vm_flags)) {
     728                 :            :                                 /*
     729                 :            :                                  * COW mappings require pages in both
     730                 :            :                                  * parent and child to be set to read.
     731                 :            :                                  */
     732                 :          0 :                                 make_migration_entry_read(&entry);
     733                 :          0 :                                 pte = swp_entry_to_pte(entry);
     734                 :          0 :                                 if (pte_swp_soft_dirty(*src_pte))
     735                 :            :                                         pte = pte_swp_mksoft_dirty(pte);
     736                 :          0 :                                 set_pte_at(src_mm, addr, src_pte, pte);
     737                 :            :                         }
     738                 :            :                 } else if (is_device_private_entry(entry)) {
     739                 :            :                         page = device_private_entry_to_page(entry);
     740                 :            : 
     741                 :            :                         /*
     742                 :            :                          * Update rss count even for unaddressable pages, as
     743                 :            :                          * they should treated just like normal pages in this
     744                 :            :                          * respect.
     745                 :            :                          *
     746                 :            :                          * We will likely want to have some new rss counters
     747                 :            :                          * for unaddressable pages, at some point. But for now
     748                 :            :                          * keep things as they are.
     749                 :            :                          */
     750                 :            :                         get_page(page);
     751                 :            :                         rss[mm_counter(page)]++;
     752                 :            :                         page_dup_rmap(page, false);
     753                 :            : 
     754                 :            :                         /*
     755                 :            :                          * We do not preserve soft-dirty information, because so
     756                 :            :                          * far, checkpoint/restore is the only feature that
     757                 :            :                          * requires that. And checkpoint/restore does not work
     758                 :            :                          * when a device driver is involved (you cannot easily
     759                 :            :                          * save and restore device driver state).
     760                 :            :                          */
     761                 :            :                         if (is_write_device_private_entry(entry) &&
     762                 :            :                             is_cow_mapping(vm_flags)) {
     763                 :            :                                 make_device_private_entry_read(&entry);
     764                 :            :                                 pte = swp_entry_to_pte(entry);
     765                 :            :                                 set_pte_at(src_mm, addr, src_pte, pte);
     766                 :            :                         }
     767                 :            :                 }
     768                 :          0 :                 goto out_set_pte;
     769                 :            :         }
     770                 :            : 
     771                 :            :         /*
     772                 :            :          * If it's a COW mapping, write protect it both
     773                 :            :          * in the parent and the child
     774                 :            :          */
     775   [ +  +  +  + ]:     648069 :         if (is_cow_mapping(vm_flags) && pte_write(pte)) {
     776                 :     179459 :                 ptep_set_wrprotect(src_mm, addr, src_pte);
     777                 :     179459 :                 pte = pte_wrprotect(pte);
     778                 :            :         }
     779                 :            : 
     780                 :            :         /*
     781                 :            :          * If it's a shared mapping, mark it clean in
     782                 :            :          * the child
     783                 :            :          */
     784         [ +  + ]:     648069 :         if (vm_flags & VM_SHARED)
     785                 :        880 :                 pte = pte_mkclean(pte);
     786                 :     648069 :         pte = pte_mkold(pte);
     787                 :            : 
     788                 :     648069 :         page = vm_normal_page(vma, addr, pte);
     789         [ +  + ]:     648069 :         if (page) {
     790         [ -  + ]:     635859 :                 get_page(page);
     791                 :     635859 :                 page_dup_rmap(page, false);
     792                 :     635859 :                 rss[mm_counter(page)]++;
     793                 :      12210 :         } else if (pte_devmap(pte)) {
     794                 :            :                 page = pte_page(pte);
     795                 :            :         }
     796                 :            : 
     797                 :      12210 : out_set_pte:
     798                 :     648069 :         set_pte_at(dst_mm, addr, dst_pte, pte);
     799                 :     648069 :         return 0;
     800                 :            : }
     801                 :            : 
     802                 :     225777 : static int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
     803                 :            :                    pmd_t *dst_pmd, pmd_t *src_pmd, struct vm_area_struct *vma,
     804                 :            :                    unsigned long addr, unsigned long end)
     805                 :            : {
     806                 :     225777 :         pte_t *orig_src_pte, *orig_dst_pte;
     807                 :     225777 :         pte_t *src_pte, *dst_pte;
     808                 :     225777 :         spinlock_t *src_ptl, *dst_ptl;
     809                 :     225777 :         int progress = 0;
     810                 :     225777 :         int rss[NR_MM_COUNTERS];
     811                 :     225777 :         swp_entry_t entry = (swp_entry_t){0};
     812                 :            : 
     813                 :     225790 : again:
     814                 :     225790 :         init_rss_vec(rss);
     815                 :            : 
     816   [ +  +  +  -  :     451580 :         dst_pte = pte_alloc_map_lock(dst_mm, dst_pmd, addr, &dst_ptl);
                   +  - ]
     817         [ -  + ]:     225790 :         if (!dst_pte)
     818                 :          0 :                 return -ENOMEM;
     819         [ +  - ]:     225790 :         src_pte = pte_offset_map(src_pmd, addr);
     820         [ +  - ]:     225790 :         src_ptl = pte_lockptr(src_mm, src_pmd);
     821                 :     225790 :         spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
     822                 :     225790 :         orig_src_pte = src_pte;
     823                 :     225790 :         orig_dst_pte = dst_pte;
     824                 :    1545758 :         arch_enter_lazy_mmu_mode();
     825                 :            : 
     826                 :    1545758 :         do {
     827                 :            :                 /*
     828                 :            :                  * We are holding two locks at this point - either of them
     829                 :            :                  * could generate latencies in another task on another CPU.
     830                 :            :                  */
     831         [ +  + ]:    1545758 :                 if (progress >= 32) {
     832                 :      99944 :                         progress = 0;
     833         [ +  + ]:      99944 :                         if (need_resched() ||
     834                 :            :                             spin_needbreak(src_ptl) || spin_needbreak(dst_ptl))
     835                 :            :                                 break;
     836                 :            :                 }
     837         [ +  + ]:    1545745 :                 if (pte_none(*src_pte)) {
     838                 :     897676 :                         progress++;
     839                 :     897676 :                         continue;
     840                 :            :                 }
     841                 :     648069 :                 entry.val = copy_one_pte(dst_mm, src_mm, dst_pte, src_pte,
     842                 :            :                                                         vma, addr, rss);
     843         [ +  - ]:     648069 :                 if (entry.val)
     844                 :            :                         break;
     845                 :     648069 :                 progress += 8;
     846         [ +  + ]:    1545745 :         } while (dst_pte++, src_pte++, addr += PAGE_SIZE, addr != end);
     847                 :            : 
     848                 :     225790 :         arch_leave_lazy_mmu_mode();
     849                 :     225790 :         spin_unlock(src_ptl);
     850                 :     225790 :         pte_unmap(orig_src_pte);
     851                 :     225790 :         add_mm_rss_vec(dst_mm, rss);
     852                 :     225790 :         pte_unmap_unlock(orig_dst_pte, dst_ptl);
     853                 :     225790 :         cond_resched();
     854                 :            : 
     855         [ -  + ]:     225790 :         if (entry.val) {
     856         [ #  # ]:          0 :                 if (add_swap_count_continuation(entry, GFP_KERNEL) < 0)
     857                 :            :                         return -ENOMEM;
     858                 :            :                 progress = 0;
     859                 :            :         }
     860         [ +  + ]:     225790 :         if (addr != end)
     861                 :         13 :                 goto again;
     862                 :            :         return 0;
     863                 :            : }
     864                 :            : 
     865                 :     224024 : static inline int copy_pmd_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
     866                 :            :                 pud_t *dst_pud, pud_t *src_pud, struct vm_area_struct *vma,
     867                 :            :                 unsigned long addr, unsigned long end)
     868                 :            : {
     869                 :     224024 :         pmd_t *src_pmd, *dst_pmd;
     870                 :     224024 :         unsigned long next;
     871                 :            : 
     872                 :     224024 :         dst_pmd = pmd_alloc(dst_mm, dst_pud, addr);
     873         [ +  - ]:     224024 :         if (!dst_pmd)
     874                 :            :                 return -ENOMEM;
     875         [ +  - ]:     224024 :         src_pmd = pmd_offset(src_pud, addr);
     876                 :     226475 :         do {
     877         [ +  + ]:     226475 :                 next = pmd_addr_end(addr, end);
     878                 :     226475 :                 if (is_swap_pmd(*src_pmd) || pmd_trans_huge(*src_pmd)
     879                 :            :                         || pmd_devmap(*src_pmd)) {
     880                 :            :                         int err;
     881                 :            :                         VM_BUG_ON_VMA(next-addr != HPAGE_PMD_SIZE, vma);
     882                 :            :                         err = copy_huge_pmd(dst_mm, src_mm,
     883                 :            :                                             dst_pmd, src_pmd, addr, vma);
     884                 :            :                         if (err == -ENOMEM)
     885                 :            :                                 return -ENOMEM;
     886                 :            :                         if (!err)
     887                 :            :                                 continue;
     888                 :            :                         /* fall through */
     889                 :            :                 }
     890         [ +  + ]:     226475 :                 if (pmd_none_or_clear_bad(src_pmd))
     891                 :        698 :                         continue;
     892         [ +  - ]:     225777 :                 if (copy_pte_range(dst_mm, src_mm, dst_pmd, src_pmd,
     893                 :            :                                                 vma, addr, next))
     894                 :            :                         return -ENOMEM;
     895         [ +  + ]:     226475 :         } while (dst_pmd++, src_pmd++, addr = next, addr != end);
     896                 :            :         return 0;
     897                 :            : }
     898                 :            : 
     899                 :     224024 : static inline int copy_pud_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
     900                 :            :                 p4d_t *dst_p4d, p4d_t *src_p4d, struct vm_area_struct *vma,
     901                 :            :                 unsigned long addr, unsigned long end)
     902                 :            : {
     903                 :     224024 :         pud_t *src_pud, *dst_pud;
     904                 :     224024 :         unsigned long next;
     905                 :            : 
     906                 :     224024 :         dst_pud = pud_alloc(dst_mm, dst_p4d, addr);
     907         [ +  - ]:     224024 :         if (!dst_pud)
     908                 :            :                 return -ENOMEM;
     909                 :     224024 :         src_pud = pud_offset(src_p4d, addr);
     910                 :     224024 :         do {
     911         [ -  + ]:     224024 :                 next = pud_addr_end(addr, end);
     912                 :     224024 :                 if (pud_trans_huge(*src_pud) || pud_devmap(*src_pud)) {
     913                 :            :                         int err;
     914                 :            : 
     915                 :            :                         VM_BUG_ON_VMA(next-addr != HPAGE_PUD_SIZE, vma);
     916                 :            :                         err = copy_huge_pud(dst_mm, src_mm,
     917                 :            :                                             dst_pud, src_pud, addr, vma);
     918                 :            :                         if (err == -ENOMEM)
     919                 :            :                                 return -ENOMEM;
     920                 :            :                         if (!err)
     921                 :            :                                 continue;
     922                 :            :                         /* fall through */
     923                 :            :                 }
     924         [ -  + ]:     224024 :                 if (pud_none_or_clear_bad(src_pud))
     925                 :          0 :                         continue;
     926         [ +  - ]:     224024 :                 if (copy_pmd_range(dst_mm, src_mm, dst_pud, src_pud,
     927                 :            :                                                 vma, addr, next))
     928                 :            :                         return -ENOMEM;
     929         [ -  + ]:     224024 :         } while (dst_pud++, src_pud++, addr = next, addr != end);
     930                 :            :         return 0;
     931                 :            : }
     932                 :            : 
     933                 :     224024 : static inline int copy_p4d_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
     934                 :            :                 pgd_t *dst_pgd, pgd_t *src_pgd, struct vm_area_struct *vma,
     935                 :            :                 unsigned long addr, unsigned long end)
     936                 :            : {
     937                 :     224024 :         p4d_t *src_p4d, *dst_p4d;
     938                 :     224024 :         unsigned long next;
     939                 :            : 
     940                 :     224024 :         dst_p4d = p4d_alloc(dst_mm, dst_pgd, addr);
     941         [ +  - ]:     224024 :         if (!dst_p4d)
     942                 :            :                 return -ENOMEM;
     943                 :     224024 :         src_p4d = p4d_offset(src_pgd, addr);
     944                 :     224024 :         do {
     945         [ -  + ]:     224024 :                 next = p4d_addr_end(addr, end);
     946         [ +  - ]:     224024 :                 if (p4d_none_or_clear_bad(src_p4d))
     947                 :          0 :                         continue;
     948         [ +  - ]:     224024 :                 if (copy_pud_range(dst_mm, src_mm, dst_p4d, src_p4d,
     949                 :            :                                                 vma, addr, next))
     950                 :            :                         return -ENOMEM;
     951         [ -  + ]:     224024 :         } while (dst_p4d++, src_p4d++, addr = next, addr != end);
     952                 :            :         return 0;
     953                 :            : }
     954                 :            : 
     955                 :     528064 : int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
     956                 :            :                 struct vm_area_struct *vma)
     957                 :            : {
     958                 :     528064 :         pgd_t *src_pgd, *dst_pgd;
     959                 :     528064 :         unsigned long next;
     960                 :     528064 :         unsigned long addr = vma->vm_start;
     961                 :     528064 :         unsigned long end = vma->vm_end;
     962                 :     528064 :         struct mmu_notifier_range range;
     963                 :     528064 :         bool is_cow;
     964                 :     528064 :         int ret;
     965                 :            : 
     966                 :            :         /*
     967                 :            :          * Don't copy ptes where a page fault will fill them correctly.
     968                 :            :          * Fork becomes much lighter when there are big shared or private
     969                 :            :          * readonly mappings. The tradeoff is that copy_page_range is more
     970                 :            :          * efficient than faulting.
     971                 :            :          */
     972         [ +  + ]:     528064 :         if (!(vma->vm_flags & (VM_HUGETLB | VM_PFNMAP | VM_MIXEDMAP)) &&
     973         [ +  + ]:     518978 :                         !vma->anon_vma)
     974                 :            :                 return 0;
     975                 :            : 
     976         [ -  + ]:     224024 :         if (is_vm_hugetlb_page(vma))
     977                 :          0 :                 return copy_hugetlb_page_range(dst_mm, src_mm, vma);
     978                 :            : 
     979         [ +  + ]:     224024 :         if (unlikely(vma->vm_flags & VM_PFNMAP)) {
     980                 :            :                 /*
     981                 :            :                  * We do not free on error cases below as remove_vma
     982                 :            :                  * gets called on error from higher level routine
     983                 :            :                  */
     984                 :       9031 :                 ret = track_pfn_copy(vma);
     985         [ +  - ]:       9031 :                 if (ret)
     986                 :            :                         return ret;
     987                 :            :         }
     988                 :            : 
     989                 :            :         /*
     990                 :            :          * We need to invalidate the secondary MMU mappings only when
     991                 :            :          * there could be a permission downgrade on the ptes of the
     992                 :            :          * parent mm. And a permission downgrade will only happen if
     993                 :            :          * is_cow_mapping() returns true.
     994                 :            :          */
     995         [ +  + ]:     224024 :         is_cow = is_cow_mapping(vma->vm_flags);
     996                 :            : 
     997         [ +  + ]:     224024 :         if (is_cow) {
     998                 :     214938 :                 mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE,
     999                 :            :                                         0, vma, src_mm, addr, end);
    1000                 :     214938 :                 mmu_notifier_invalidate_range_start(&range);
    1001                 :            :         }
    1002                 :            : 
    1003                 :     224024 :         ret = 0;
    1004                 :     224024 :         dst_pgd = pgd_offset(dst_mm, addr);
    1005                 :     224024 :         src_pgd = pgd_offset(src_mm, addr);
    1006                 :     224024 :         do {
    1007         [ +  - ]:     224024 :                 next = pgd_addr_end(addr, end);
    1008         [ -  + ]:     224024 :                 if (pgd_none_or_clear_bad(src_pgd))
    1009                 :          0 :                         continue;
    1010         [ +  - ]:     224024 :                 if (unlikely(copy_p4d_range(dst_mm, src_mm, dst_pgd, src_pgd,
    1011                 :            :                                             vma, addr, next))) {
    1012                 :            :                         ret = -ENOMEM;
    1013                 :            :                         break;
    1014                 :            :                 }
    1015         [ -  + ]:     224024 :         } while (dst_pgd++, src_pgd++, addr = next, addr != end);
    1016                 :            : 
    1017         [ +  + ]:     224024 :         if (is_cow)
    1018                 :     214938 :                 mmu_notifier_invalidate_range_end(&range);
    1019                 :            :         return ret;
    1020                 :            : }
    1021                 :            : 
    1022                 :    1054261 : static unsigned long zap_pte_range(struct mmu_gather *tlb,
    1023                 :            :                                 struct vm_area_struct *vma, pmd_t *pmd,
    1024                 :            :                                 unsigned long addr, unsigned long end,
    1025                 :            :                                 struct zap_details *details)
    1026                 :            : {
    1027                 :    1054261 :         struct mm_struct *mm = tlb->mm;
    1028                 :    1054261 :         int force_flush = 0;
    1029                 :    1054261 :         int rss[NR_MM_COUNTERS];
    1030                 :    1054261 :         spinlock_t *ptl;
    1031                 :    1054261 :         pte_t *start_pte;
    1032                 :    1054261 :         pte_t *pte;
    1033                 :    1054261 :         swp_entry_t entry;
    1034                 :            : 
    1035                 :    1054261 :         tlb_change_page_size(tlb, PAGE_SIZE);
    1036                 :    1054779 : again:
    1037                 :    1054779 :         init_rss_vec(rss);
    1038         [ +  - ]:    2109558 :         start_pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
    1039                 :    1054779 :         pte = start_pte;
    1040                 :    1054779 :         flush_tlb_batched_pending(mm);
    1041                 :   30263820 :         arch_enter_lazy_mmu_mode();
    1042                 :   30263820 :         do {
    1043                 :   30263820 :                 pte_t ptent = *pte;
    1044         [ +  + ]:   30263820 :                 if (pte_none(ptent))
    1045                 :   23624150 :                         continue;
    1046                 :            : 
    1047         [ +  + ]:    6639666 :                 if (need_resched())
    1048                 :            :                         break;
    1049                 :            : 
    1050         [ +  - ]:    6639148 :                 if (pte_present(ptent)) {
    1051                 :    6639148 :                         struct page *page;
    1052                 :            : 
    1053                 :    6639148 :                         page = vm_normal_page(vma, addr, ptent);
    1054   [ -  +  -  - ]:    6639148 :                         if (unlikely(details) && page) {
    1055                 :            :                                 /*
    1056                 :            :                                  * unmap_shared_mapping_pages() wants to
    1057                 :            :                                  * invalidate cache without truncating:
    1058                 :            :                                  * unmap shared but keep private pages.
    1059                 :            :                                  */
    1060   [ #  #  #  # ]:          0 :                                 if (details->check_mapping &&
    1061                 :          0 :                                     details->check_mapping != page_rmapping(page))
    1062                 :          0 :                                         continue;
    1063                 :            :                         }
    1064                 :    6639148 :                         ptent = ptep_get_and_clear_full(mm, addr, pte,
    1065         [ +  + ]:    6639148 :                                                         tlb->fullmm);
    1066         [ +  + ]:    6639148 :                         tlb_remove_tlb_entry(tlb, pte, addr);
    1067         [ +  + ]:    6639148 :                         if (unlikely(!page))
    1068                 :      12743 :                                 continue;
    1069                 :            : 
    1070   [ -  +  +  + ]:    6626405 :                         if (!PageAnon(page)) {
    1071         [ -  + ]:    5708098 :                                 if (pte_dirty(ptent)) {
    1072                 :          0 :                                         force_flush = 1;
    1073                 :          0 :                                         set_page_dirty(page);
    1074                 :            :                                 }
    1075         [ +  + ]:    5708098 :                                 if (pte_young(ptent) &&
    1076         [ +  - ]:    5703397 :                                     likely(!(vma->vm_flags & VM_SEQ_READ)))
    1077                 :    5703397 :                                         mark_page_accessed(page);
    1078                 :            :                         }
    1079                 :    6626405 :                         rss[mm_counter(page)]--;
    1080                 :    6626405 :                         page_remove_rmap(page, false);
    1081         [ -  + ]:    6626405 :                         if (unlikely(page_mapcount(page) < 0))
    1082                 :          0 :                                 print_bad_pte(vma, addr, ptent, page);
    1083         [ +  - ]:    6626405 :                         if (unlikely(__tlb_remove_page(tlb, page))) {
    1084                 :            :                                 force_flush = 1;
    1085                 :            :                                 addr += PAGE_SIZE;
    1086                 :            :                                 break;
    1087                 :            :                         }
    1088                 :    6626405 :                         continue;
    1089                 :            :                 }
    1090                 :            : 
    1091         [ #  # ]:          0 :                 entry = pte_to_swp_entry(ptent);
    1092         [ #  # ]:          0 :                 if (non_swap_entry(entry) && is_device_private_entry(entry)) {
    1093                 :            :                         struct page *page = device_private_entry_to_page(entry);
    1094                 :            : 
    1095                 :            :                         if (unlikely(details && details->check_mapping)) {
    1096                 :            :                                 /*
    1097                 :            :                                  * unmap_shared_mapping_pages() wants to
    1098                 :            :                                  * invalidate cache without truncating:
    1099                 :            :                                  * unmap shared but keep private pages.
    1100                 :            :                                  */
    1101                 :            :                                 if (details->check_mapping !=
    1102                 :            :                                     page_rmapping(page))
    1103                 :            :                                         continue;
    1104                 :            :                         }
    1105                 :            : 
    1106                 :            :                         pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
    1107                 :            :                         rss[mm_counter(page)]--;
    1108                 :            :                         page_remove_rmap(page, false);
    1109                 :            :                         put_page(page);
    1110                 :            :                         continue;
    1111                 :            :                 }
    1112                 :            : 
    1113                 :            :                 /* If details->check_mapping, we leave swap entries. */
    1114         [ #  # ]:          0 :                 if (unlikely(details))
    1115                 :          0 :                         continue;
    1116                 :            : 
    1117         [ #  # ]:          0 :                 if (!non_swap_entry(entry))
    1118                 :          0 :                         rss[MM_SWAPENTS]--;
    1119         [ #  # ]:          0 :                 else if (is_migration_entry(entry)) {
    1120                 :          0 :                         struct page *page;
    1121                 :            : 
    1122                 :          0 :                         page = migration_entry_to_page(entry);
    1123                 :          0 :                         rss[mm_counter(page)]--;
    1124                 :            :                 }
    1125         [ #  # ]:          0 :                 if (unlikely(!free_swap_and_cache(entry)))
    1126                 :          0 :                         print_bad_pte(vma, addr, ptent, NULL);
    1127                 :          0 :                 pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
    1128         [ +  + ]:   30263300 :         } while (pte++, addr += PAGE_SIZE, addr != end);
    1129                 :            : 
    1130                 :    1054779 :         add_mm_rss_vec(mm, rss);
    1131                 :    1054779 :         arch_leave_lazy_mmu_mode();
    1132                 :            : 
    1133                 :            :         /* Do the actual TLB flush before dropping ptl */
    1134         [ -  + ]:    1054779 :         if (force_flush)
    1135                 :          0 :                 tlb_flush_mmu_tlbonly(tlb);
    1136                 :    1054779 :         pte_unmap_unlock(start_pte, ptl);
    1137                 :            : 
    1138                 :            :         /*
    1139                 :            :          * If we forced a TLB flush (either due to running out of
    1140                 :            :          * batch buffers or because we needed to flush dirty TLB
    1141                 :            :          * entries before releasing the ptl), free the batched
    1142                 :            :          * memory too. Restart if we didn't do everything.
    1143                 :            :          */
    1144         [ -  + ]:    1054779 :         if (force_flush) {
    1145                 :          0 :                 force_flush = 0;
    1146                 :          0 :                 tlb_flush_mmu(tlb);
    1147                 :            :         }
    1148                 :            : 
    1149         [ +  + ]:    1054779 :         if (addr != end) {
    1150                 :        518 :                 cond_resched();
    1151                 :        518 :                 goto again;
    1152                 :            :         }
    1153                 :            : 
    1154                 :    1054261 :         return addr;
    1155                 :            : }
    1156                 :            : 
    1157                 :    1011598 : static inline unsigned long zap_pmd_range(struct mmu_gather *tlb,
    1158                 :            :                                 struct vm_area_struct *vma, pud_t *pud,
    1159                 :            :                                 unsigned long addr, unsigned long end,
    1160                 :            :                                 struct zap_details *details)
    1161                 :            : {
    1162                 :    1011598 :         pmd_t *pmd;
    1163                 :    1011598 :         unsigned long next;
    1164                 :            : 
    1165         [ +  - ]:    1011598 :         pmd = pmd_offset(pud, addr);
    1166                 :    1074344 :         do {
    1167         [ +  + ]:    1074344 :                 next = pmd_addr_end(addr, end);
    1168                 :    1074344 :                 if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) {
    1169                 :            :                         if (next - addr != HPAGE_PMD_SIZE)
    1170                 :            :                                 __split_huge_pmd(vma, pmd, addr, false, NULL);
    1171                 :            :                         else if (zap_huge_pmd(tlb, vma, pmd, addr))
    1172                 :            :                                 goto next;
    1173                 :            :                         /* fall through */
    1174                 :            :                 }
    1175                 :            :                 /*
    1176                 :            :                  * Here there can be other concurrent MADV_DONTNEED or
    1177                 :            :                  * trans huge page faults running, and if the pmd is
    1178                 :            :                  * none or trans huge it can change under us. This is
    1179                 :            :                  * because MADV_DONTNEED holds the mmap_sem in read
    1180                 :            :                  * mode.
    1181                 :            :                  */
    1182         [ +  + ]:    1074344 :                 if (pmd_none_or_trans_huge_or_clear_bad(pmd))
    1183                 :      20083 :                         goto next;
    1184                 :    1054261 :                 next = zap_pte_range(tlb, vma, pmd, addr, next, details);
    1185                 :    1074344 : next:
    1186                 :    1074344 :                 cond_resched();
    1187         [ +  + ]:    1074344 :         } while (pmd++, addr = next, addr != end);
    1188                 :            : 
    1189                 :    1011598 :         return addr;
    1190                 :            : }
    1191                 :            : 
    1192                 :    1015867 : static inline unsigned long zap_pud_range(struct mmu_gather *tlb,
    1193                 :            :                                 struct vm_area_struct *vma, p4d_t *p4d,
    1194                 :            :                                 unsigned long addr, unsigned long end,
    1195                 :            :                                 struct zap_details *details)
    1196                 :            : {
    1197                 :    1015867 :         pud_t *pud;
    1198                 :    1015867 :         unsigned long next;
    1199                 :            : 
    1200                 :    1015867 :         pud = pud_offset(p4d, addr);
    1201                 :    1015917 :         do {
    1202         [ +  + ]:    1015917 :                 next = pud_addr_end(addr, end);
    1203                 :    1015917 :                 if (pud_trans_huge(*pud) || pud_devmap(*pud)) {
    1204                 :            :                         if (next - addr != HPAGE_PUD_SIZE) {
    1205                 :            :                                 VM_BUG_ON_VMA(!rwsem_is_locked(&tlb->mm->mmap_sem), vma);
    1206                 :            :                                 split_huge_pud(vma, pud, addr);
    1207                 :            :                         } else if (zap_huge_pud(tlb, vma, pud, addr))
    1208                 :            :                                 goto next;
    1209                 :            :                         /* fall through */
    1210                 :            :                 }
    1211         [ +  + ]:    1015917 :                 if (pud_none_or_clear_bad(pud))
    1212                 :       4319 :                         continue;
    1213                 :    1011598 :                 next = zap_pmd_range(tlb, vma, pud, addr, next, details);
    1214                 :            : next:
    1215                 :    1011598 :                 cond_resched();
    1216         [ +  + ]:    1015917 :         } while (pud++, addr = next, addr != end);
    1217                 :            : 
    1218                 :    1015867 :         return addr;
    1219                 :            : }
    1220                 :            : 
    1221                 :    1029477 : static inline unsigned long zap_p4d_range(struct mmu_gather *tlb,
    1222                 :            :                                 struct vm_area_struct *vma, pgd_t *pgd,
    1223                 :            :                                 unsigned long addr, unsigned long end,
    1224                 :            :                                 struct zap_details *details)
    1225                 :            : {
    1226                 :    1029477 :         p4d_t *p4d;
    1227                 :    1029477 :         unsigned long next;
    1228                 :            : 
    1229                 :    1029477 :         p4d = p4d_offset(pgd, addr);
    1230                 :    1029477 :         do {
    1231         [ -  + ]:    1029477 :                 next = p4d_addr_end(addr, end);
    1232         [ +  + ]:    1029477 :                 if (p4d_none_or_clear_bad(p4d))
    1233                 :      13610 :                         continue;
    1234                 :    1015867 :                 next = zap_pud_range(tlb, vma, p4d, addr, next, details);
    1235         [ -  + ]:    1029477 :         } while (p4d++, addr = next, addr != end);
    1236                 :            : 
    1237                 :    1029477 :         return addr;
    1238                 :            : }
    1239                 :            : 
    1240                 :    1029477 : void unmap_page_range(struct mmu_gather *tlb,
    1241                 :            :                              struct vm_area_struct *vma,
    1242                 :            :                              unsigned long addr, unsigned long end,
    1243                 :            :                              struct zap_details *details)
    1244                 :            : {
    1245                 :    1029477 :         pgd_t *pgd;
    1246                 :    1029477 :         unsigned long next;
    1247                 :            : 
    1248         [ -  + ]:    1029477 :         BUG_ON(addr >= end);
    1249                 :    1029477 :         tlb_start_vma(tlb, vma);
    1250                 :    1029477 :         pgd = pgd_offset(vma->vm_mm, addr);
    1251                 :    1029477 :         do {
    1252         [ +  - ]:    1029477 :                 next = pgd_addr_end(addr, end);
    1253         [ -  + ]:    1029477 :                 if (pgd_none_or_clear_bad(pgd))
    1254                 :          0 :                         continue;
    1255                 :    1029477 :                 next = zap_p4d_range(tlb, vma, pgd, addr, next, details);
    1256         [ -  + ]:    1029477 :         } while (pgd++, addr = next, addr != end);
    1257                 :    1029477 :         tlb_end_vma(tlb, vma);
    1258                 :    1029477 : }
    1259                 :            : 
    1260                 :            : 
    1261                 :    1029477 : static void unmap_single_vma(struct mmu_gather *tlb,
    1262                 :            :                 struct vm_area_struct *vma, unsigned long start_addr,
    1263                 :            :                 unsigned long end_addr,
    1264                 :            :                 struct zap_details *details)
    1265                 :            : {
    1266                 :    1029477 :         unsigned long start = max(vma->vm_start, start_addr);
    1267                 :    1029477 :         unsigned long end;
    1268                 :            : 
    1269         [ +  - ]:    1029477 :         if (start >= vma->vm_end)
    1270                 :            :                 return;
    1271                 :    1029477 :         end = min(vma->vm_end, end_addr);
    1272         [ +  - ]:    1029477 :         if (end <= vma->vm_start)
    1273                 :            :                 return;
    1274                 :            : 
    1275         [ +  + ]:    1029477 :         if (vma->vm_file)
    1276                 :     868351 :                 uprobe_munmap(vma, start, end);
    1277                 :            : 
    1278         [ +  + ]:    1029477 :         if (unlikely(vma->vm_flags & VM_PFNMAP))
    1279                 :      17732 :                 untrack_pfn(vma, 0, 0);
    1280                 :            : 
    1281         [ +  - ]:    1029477 :         if (start != end) {
    1282         [ -  + ]:    1029477 :                 if (unlikely(is_vm_hugetlb_page(vma))) {
    1283                 :            :                         /*
    1284                 :            :                          * It is undesirable to test vma->vm_file as it
    1285                 :            :                          * should be non-null for valid hugetlb area.
    1286                 :            :                          * However, vm_file will be NULL in the error
    1287                 :            :                          * cleanup path of mmap_region. When
    1288                 :            :                          * hugetlbfs ->mmap method fails,
    1289                 :            :                          * mmap_region() nullifies vma->vm_file
    1290                 :            :                          * before calling this function to clean up.
    1291                 :            :                          * Since no pte has actually been setup, it is
    1292                 :            :                          * safe to do nothing in this case.
    1293                 :            :                          */
    1294         [ #  # ]:          0 :                         if (vma->vm_file) {
    1295                 :          0 :                                 i_mmap_lock_write(vma->vm_file->f_mapping);
    1296                 :          0 :                                 __unmap_hugepage_range_final(tlb, vma, start, end, NULL);
    1297                 :          0 :                                 i_mmap_unlock_write(vma->vm_file->f_mapping);
    1298                 :            :                         }
    1299                 :            :                 } else
    1300                 :    1029477 :                         unmap_page_range(tlb, vma, start, end, details);
    1301                 :            :         }
    1302                 :            : }
    1303                 :            : 
    1304                 :            : /**
    1305                 :            :  * unmap_vmas - unmap a range of memory covered by a list of vma's
    1306                 :            :  * @tlb: address of the caller's struct mmu_gather
    1307                 :            :  * @vma: the starting vma
    1308                 :            :  * @start_addr: virtual address at which to start unmapping
    1309                 :            :  * @end_addr: virtual address at which to end unmapping
    1310                 :            :  *
    1311                 :            :  * Unmap all pages in the vma list.
    1312                 :            :  *
    1313                 :            :  * Only addresses between `start' and `end' will be unmapped.
    1314                 :            :  *
    1315                 :            :  * The VMA list must be sorted in ascending virtual address order.
    1316                 :            :  *
    1317                 :            :  * unmap_vmas() assumes that the caller will flush the whole unmapped address
    1318                 :            :  * range after unmap_vmas() returns.  So the only responsibility here is to
    1319                 :            :  * ensure that any thus-far unmapped pages are flushed before unmap_vmas()
    1320                 :            :  * drops the lock and schedules.
    1321                 :            :  */
    1322                 :     150505 : void unmap_vmas(struct mmu_gather *tlb,
    1323                 :            :                 struct vm_area_struct *vma, unsigned long start_addr,
    1324                 :            :                 unsigned long end_addr)
    1325                 :            : {
    1326                 :     150505 :         struct mmu_notifier_range range;
    1327                 :            : 
    1328                 :     150505 :         mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, vma->vm_mm,
    1329                 :            :                                 start_addr, end_addr);
    1330                 :     150505 :         mmu_notifier_invalidate_range_start(&range);
    1331   [ +  +  +  - ]:    1330487 :         for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next)
    1332                 :    1029477 :                 unmap_single_vma(tlb, vma, start_addr, end_addr, NULL);
    1333                 :     150505 :         mmu_notifier_invalidate_range_end(&range);
    1334                 :     150505 : }
    1335                 :            : 
    1336                 :            : /**
    1337                 :            :  * zap_page_range - remove user pages in a given range
    1338                 :            :  * @vma: vm_area_struct holding the applicable pages
    1339                 :            :  * @start: starting address of pages to zap
    1340                 :            :  * @size: number of bytes to zap
    1341                 :            :  *
    1342                 :            :  * Caller must protect the VMA list
    1343                 :            :  */
    1344                 :          0 : void zap_page_range(struct vm_area_struct *vma, unsigned long start,
    1345                 :            :                 unsigned long size)
    1346                 :            : {
    1347                 :          0 :         struct mmu_notifier_range range;
    1348                 :          0 :         struct mmu_gather tlb;
    1349                 :            : 
    1350                 :          0 :         lru_add_drain();
    1351                 :          0 :         mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
    1352                 :            :                                 start, start + size);
    1353                 :          0 :         tlb_gather_mmu(&tlb, vma->vm_mm, start, range.end);
    1354                 :          0 :         update_hiwater_rss(vma->vm_mm);
    1355                 :          0 :         mmu_notifier_invalidate_range_start(&range);
    1356   [ #  #  #  # ]:          0 :         for ( ; vma && vma->vm_start < range.end; vma = vma->vm_next)
    1357                 :          0 :                 unmap_single_vma(&tlb, vma, start, range.end, NULL);
    1358                 :          0 :         mmu_notifier_invalidate_range_end(&range);
    1359                 :          0 :         tlb_finish_mmu(&tlb, start, range.end);
    1360                 :          0 : }
    1361                 :            : 
    1362                 :            : /**
    1363                 :            :  * zap_page_range_single - remove user pages in a given range
    1364                 :            :  * @vma: vm_area_struct holding the applicable pages
    1365                 :            :  * @address: starting address of pages to zap
    1366                 :            :  * @size: number of bytes to zap
    1367                 :            :  * @details: details of shared cache invalidation
    1368                 :            :  *
    1369                 :            :  * The range must fit into one VMA.
    1370                 :            :  */
    1371                 :          0 : static void zap_page_range_single(struct vm_area_struct *vma, unsigned long address,
    1372                 :            :                 unsigned long size, struct zap_details *details)
    1373                 :            : {
    1374                 :          0 :         struct mmu_notifier_range range;
    1375                 :          0 :         struct mmu_gather tlb;
    1376                 :            : 
    1377                 :          0 :         lru_add_drain();
    1378                 :          0 :         mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
    1379                 :            :                                 address, address + size);
    1380                 :          0 :         tlb_gather_mmu(&tlb, vma->vm_mm, address, range.end);
    1381                 :          0 :         update_hiwater_rss(vma->vm_mm);
    1382                 :          0 :         mmu_notifier_invalidate_range_start(&range);
    1383                 :          0 :         unmap_single_vma(&tlb, vma, address, range.end, details);
    1384                 :          0 :         mmu_notifier_invalidate_range_end(&range);
    1385                 :          0 :         tlb_finish_mmu(&tlb, address, range.end);
    1386                 :          0 : }
    1387                 :            : 
    1388                 :            : /**
    1389                 :            :  * zap_vma_ptes - remove ptes mapping the vma
    1390                 :            :  * @vma: vm_area_struct holding ptes to be zapped
    1391                 :            :  * @address: starting address of pages to zap
    1392                 :            :  * @size: number of bytes to zap
    1393                 :            :  *
    1394                 :            :  * This function only unmaps ptes assigned to VM_PFNMAP vmas.
    1395                 :            :  *
    1396                 :            :  * The entire address range must be fully contained within the vma.
    1397                 :            :  *
    1398                 :            :  */
    1399                 :          0 : void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
    1400                 :            :                 unsigned long size)
    1401                 :            : {
    1402   [ #  #  #  # ]:          0 :         if (address < vma->vm_start || address + size > vma->vm_end ||
    1403         [ #  # ]:          0 :                         !(vma->vm_flags & VM_PFNMAP))
    1404                 :            :                 return;
    1405                 :            : 
    1406                 :          0 :         zap_page_range_single(vma, address, size, NULL);
    1407                 :            : }
    1408                 :            : EXPORT_SYMBOL_GPL(zap_vma_ptes);
    1409                 :            : 
    1410                 :     110456 : pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
    1411                 :            :                         spinlock_t **ptl)
    1412                 :            : {
    1413                 :     110456 :         pgd_t *pgd;
    1414                 :     110456 :         p4d_t *p4d;
    1415                 :     110456 :         pud_t *pud;
    1416                 :     110456 :         pmd_t *pmd;
    1417                 :            : 
    1418                 :     110456 :         pgd = pgd_offset(mm, addr);
    1419                 :     110456 :         p4d = p4d_alloc(mm, pgd, addr);
    1420         [ +  - ]:     110456 :         if (!p4d)
    1421                 :            :                 return NULL;
    1422                 :     110456 :         pud = pud_alloc(mm, p4d, addr);
    1423         [ +  - ]:     110456 :         if (!pud)
    1424                 :            :                 return NULL;
    1425                 :     110456 :         pmd = pmd_alloc(mm, pud, addr);
    1426         [ +  - ]:     110456 :         if (!pmd)
    1427                 :            :                 return NULL;
    1428                 :            : 
    1429                 :     110456 :         VM_BUG_ON(pmd_trans_huge(*pmd));
    1430   [ +  +  +  -  :     220912 :         return pte_alloc_map_lock(mm, pmd, addr, ptl);
                   +  - ]
    1431                 :            : }
    1432                 :            : 
    1433                 :            : /*
    1434                 :            :  * This is the old fallback for page remapping.
    1435                 :            :  *
    1436                 :            :  * For historical reasons, it only allows reserved pages. Only
    1437                 :            :  * old drivers should use this, and they needed to mark their
    1438                 :            :  * pages reserved for the old functions anyway.
    1439                 :            :  */
    1440                 :            : static int insert_page(struct vm_area_struct *vma, unsigned long addr,
    1441                 :            :                         struct page *page, pgprot_t prot)
    1442                 :            : {
    1443                 :            :         struct mm_struct *mm = vma->vm_mm;
    1444                 :            :         int retval;
    1445                 :            :         pte_t *pte;
    1446                 :            :         spinlock_t *ptl;
    1447                 :            : 
    1448                 :            :         retval = -EINVAL;
    1449                 :            :         if (PageAnon(page) || PageSlab(page) || page_has_type(page))
    1450                 :            :                 goto out;
    1451                 :            :         retval = -ENOMEM;
    1452                 :            :         flush_dcache_page(page);
    1453                 :            :         pte = get_locked_pte(mm, addr, &ptl);
    1454                 :            :         if (!pte)
    1455                 :            :                 goto out;
    1456                 :            :         retval = -EBUSY;
    1457                 :            :         if (!pte_none(*pte))
    1458                 :            :                 goto out_unlock;
    1459                 :            : 
    1460                 :            :         /* Ok, finally just insert the thing.. */
    1461                 :            :         get_page(page);
    1462                 :            :         inc_mm_counter_fast(mm, mm_counter_file(page));
    1463                 :            :         page_add_file_rmap(page, false);
    1464                 :            :         set_pte_at(mm, addr, pte, mk_pte(page, prot));
    1465                 :            : 
    1466                 :            :         retval = 0;
    1467                 :            : out_unlock:
    1468                 :            :         pte_unmap_unlock(pte, ptl);
    1469                 :            : out:
    1470                 :            :         return retval;
    1471                 :            : }
    1472                 :            : 
    1473                 :            : /**
    1474                 :            :  * vm_insert_page - insert single page into user vma
    1475                 :            :  * @vma: user vma to map to
    1476                 :            :  * @addr: target user address of this page
    1477                 :            :  * @page: source kernel page
    1478                 :            :  *
    1479                 :            :  * This allows drivers to insert individual pages they've allocated
    1480                 :            :  * into a user vma.
    1481                 :            :  *
    1482                 :            :  * The page has to be a nice clean _individual_ kernel allocation.
    1483                 :            :  * If you allocate a compound page, you need to have marked it as
    1484                 :            :  * such (__GFP_COMP), or manually just split the page up yourself
    1485                 :            :  * (see split_page()).
    1486                 :            :  *
    1487                 :            :  * NOTE! Traditionally this was done with "remap_pfn_range()" which
    1488                 :            :  * took an arbitrary page protection parameter. This doesn't allow
    1489                 :            :  * that. Your vma protection will have to be set up correctly, which
    1490                 :            :  * means that if you want a shared writable mapping, you'd better
    1491                 :            :  * ask for a shared writable mapping!
    1492                 :            :  *
    1493                 :            :  * The page does not need to be reserved.
    1494                 :            :  *
    1495                 :            :  * Usually this function is called from f_op->mmap() handler
    1496                 :            :  * under mm->mmap_sem write-lock, so it can change vma->vm_flags.
    1497                 :            :  * Caller must set VM_MIXEDMAP on vma if it wants to call this
    1498                 :            :  * function from other places, for example from page-fault handler.
    1499                 :            :  *
    1500                 :            :  * Return: %0 on success, negative error code otherwise.
    1501                 :            :  */
    1502                 :        176 : int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
    1503                 :            :                         struct page *page)
    1504                 :            : {
    1505   [ +  -  +  - ]:        176 :         if (addr < vma->vm_start || addr >= vma->vm_end)
    1506                 :            :                 return -EFAULT;
    1507   [ -  +  +  - ]:        352 :         if (!page_count(page))
    1508                 :            :                 return -EINVAL;
    1509         [ +  + ]:        176 :         if (!(vma->vm_flags & VM_MIXEDMAP)) {
    1510         [ -  + ]:         11 :                 BUG_ON(down_read_trylock(&vma->vm_mm->mmap_sem));
    1511         [ -  + ]:         11 :                 BUG_ON(vma->vm_flags & VM_PFNMAP);
    1512                 :         11 :                 vma->vm_flags |= VM_MIXEDMAP;
    1513                 :            :         }
    1514                 :        176 :         return insert_page(vma, addr, page, vma->vm_page_prot);
    1515                 :            : }
    1516                 :            : EXPORT_SYMBOL(vm_insert_page);
    1517                 :            : 
    1518                 :            : /*
    1519                 :            :  * __vm_map_pages - maps range of kernel pages into user vma
    1520                 :            :  * @vma: user vma to map to
    1521                 :            :  * @pages: pointer to array of source kernel pages
    1522                 :            :  * @num: number of pages in page array
    1523                 :            :  * @offset: user's requested vm_pgoff
    1524                 :            :  *
    1525                 :            :  * This allows drivers to map range of kernel pages into a user vma.
    1526                 :            :  *
    1527                 :            :  * Return: 0 on success and error code otherwise.
    1528                 :            :  */
    1529                 :          0 : static int __vm_map_pages(struct vm_area_struct *vma, struct page **pages,
    1530                 :            :                                 unsigned long num, unsigned long offset)
    1531                 :            : {
    1532         [ #  # ]:          0 :         unsigned long count = vma_pages(vma);
    1533                 :          0 :         unsigned long uaddr = vma->vm_start;
    1534                 :          0 :         int ret, i;
    1535                 :            : 
    1536                 :            :         /* Fail if the user requested offset is beyond the end of the object */
    1537         [ #  # ]:          0 :         if (offset >= num)
    1538                 :            :                 return -ENXIO;
    1539                 :            : 
    1540                 :            :         /* Fail if the user requested size exceeds available object size */
    1541         [ #  # ]:          0 :         if (count > num - offset)
    1542                 :            :                 return -ENXIO;
    1543                 :            : 
    1544         [ #  # ]:          0 :         for (i = 0; i < count; i++) {
    1545                 :          0 :                 ret = vm_insert_page(vma, uaddr, pages[offset + i]);
    1546         [ #  # ]:          0 :                 if (ret < 0)
    1547                 :          0 :                         return ret;
    1548                 :          0 :                 uaddr += PAGE_SIZE;
    1549                 :            :         }
    1550                 :            : 
    1551                 :            :         return 0;
    1552                 :            : }
    1553                 :            : 
    1554                 :            : /**
    1555                 :            :  * vm_map_pages - maps range of kernel pages starts with non zero offset
    1556                 :            :  * @vma: user vma to map to
    1557                 :            :  * @pages: pointer to array of source kernel pages
    1558                 :            :  * @num: number of pages in page array
    1559                 :            :  *
    1560                 :            :  * Maps an object consisting of @num pages, catering for the user's
    1561                 :            :  * requested vm_pgoff
    1562                 :            :  *
    1563                 :            :  * If we fail to insert any page into the vma, the function will return
    1564                 :            :  * immediately leaving any previously inserted pages present.  Callers
    1565                 :            :  * from the mmap handler may immediately return the error as their caller
    1566                 :            :  * will destroy the vma, removing any successfully inserted pages. Other
    1567                 :            :  * callers should make their own arrangements for calling unmap_region().
    1568                 :            :  *
    1569                 :            :  * Context: Process context. Called by mmap handlers.
    1570                 :            :  * Return: 0 on success and error code otherwise.
    1571                 :            :  */
    1572                 :          0 : int vm_map_pages(struct vm_area_struct *vma, struct page **pages,
    1573                 :            :                                 unsigned long num)
    1574                 :            : {
    1575                 :          0 :         return __vm_map_pages(vma, pages, num, vma->vm_pgoff);
    1576                 :            : }
    1577                 :            : EXPORT_SYMBOL(vm_map_pages);
    1578                 :            : 
    1579                 :            : /**
    1580                 :            :  * vm_map_pages_zero - map range of kernel pages starts with zero offset
    1581                 :            :  * @vma: user vma to map to
    1582                 :            :  * @pages: pointer to array of source kernel pages
    1583                 :            :  * @num: number of pages in page array
    1584                 :            :  *
    1585                 :            :  * Similar to vm_map_pages(), except that it explicitly sets the offset
    1586                 :            :  * to 0. This function is intended for the drivers that did not consider
    1587                 :            :  * vm_pgoff.
    1588                 :            :  *
    1589                 :            :  * Context: Process context. Called by mmap handlers.
    1590                 :            :  * Return: 0 on success and error code otherwise.
    1591                 :            :  */
    1592                 :          0 : int vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages,
    1593                 :            :                                 unsigned long num)
    1594                 :            : {
    1595                 :          0 :         return __vm_map_pages(vma, pages, num, 0);
    1596                 :            : }
    1597                 :            : EXPORT_SYMBOL(vm_map_pages_zero);
    1598                 :            : 
    1599                 :        775 : static vm_fault_t insert_pfn(struct vm_area_struct *vma, unsigned long addr,
    1600                 :            :                         pfn_t pfn, pgprot_t prot, bool mkwrite)
    1601                 :            : {
    1602                 :        775 :         struct mm_struct *mm = vma->vm_mm;
    1603                 :        775 :         pte_t *pte, entry;
    1604                 :        775 :         spinlock_t *ptl;
    1605                 :            : 
    1606                 :        775 :         pte = get_locked_pte(mm, addr, &ptl);
    1607         [ +  - ]:        775 :         if (!pte)
    1608                 :            :                 return VM_FAULT_OOM;
    1609         [ -  + ]:        775 :         if (!pte_none(*pte)) {
    1610         [ #  # ]:          0 :                 if (mkwrite) {
    1611                 :            :                         /*
    1612                 :            :                          * For read faults on private mappings the PFN passed
    1613                 :            :                          * in may not match the PFN we have mapped if the
    1614                 :            :                          * mapped PFN is a writeable COW page.  In the mkwrite
    1615                 :            :                          * case we are creating a writable PTE for a shared
    1616                 :            :                          * mapping and we expect the PFNs to match. If they
    1617                 :            :                          * don't match, we are likely racing with block
    1618                 :            :                          * allocation and mapping invalidation so just skip the
    1619                 :            :                          * update.
    1620                 :            :                          */
    1621   [ #  #  #  # ]:          0 :                         if (pte_pfn(*pte) != pfn_t_to_pfn(pfn)) {
    1622   [ #  #  #  # ]:          0 :                                 WARN_ON_ONCE(!is_zero_pfn(pte_pfn(*pte)));
    1623                 :          0 :                                 goto out_unlock;
    1624                 :            :                         }
    1625         [ #  # ]:          0 :                         entry = pte_mkyoung(*pte);
    1626         [ #  # ]:          0 :                         entry = maybe_mkwrite(pte_mkdirty(entry), vma);
    1627                 :          0 :                         if (ptep_set_access_flags(vma, addr, pte, entry, 1))
    1628                 :            :                                 update_mmu_cache(vma, addr, pte);
    1629                 :            :                 }
    1630                 :          0 :                 goto out_unlock;
    1631                 :            :         }
    1632                 :            : 
    1633                 :            :         /* Ok, finally just insert the thing.. */
    1634         [ -  + ]:        775 :         if (pfn_t_devmap(pfn))
    1635         [ #  # ]:          0 :                 entry = pte_mkdevmap(pfn_t_pte(pfn, prot));
    1636                 :            :         else
    1637         [ +  - ]:        775 :                 entry = pte_mkspecial(pfn_t_pte(pfn, prot));
    1638                 :            : 
    1639         [ -  + ]:        775 :         if (mkwrite) {
    1640         [ #  # ]:          0 :                 entry = pte_mkyoung(entry);
    1641         [ #  # ]:          0 :                 entry = maybe_mkwrite(pte_mkdirty(entry), vma);
    1642                 :            :         }
    1643                 :            : 
    1644                 :        775 :         set_pte_at(mm, addr, pte, entry);
    1645                 :        775 :         update_mmu_cache(vma, addr, pte); /* XXX: why not for insert_page? */
    1646                 :            : 
    1647                 :        775 : out_unlock:
    1648                 :        775 :         pte_unmap_unlock(pte, ptl);
    1649                 :        775 :         return VM_FAULT_NOPAGE;
    1650                 :            : }
    1651                 :            : 
    1652                 :            : /**
    1653                 :            :  * vmf_insert_pfn_prot - insert single pfn into user vma with specified pgprot
    1654                 :            :  * @vma: user vma to map to
    1655                 :            :  * @addr: target user address of this page
    1656                 :            :  * @pfn: source kernel pfn
    1657                 :            :  * @pgprot: pgprot flags for the inserted page
    1658                 :            :  *
    1659                 :            :  * This is exactly like vmf_insert_pfn(), except that it allows drivers to
    1660                 :            :  * to override pgprot on a per-page basis.
    1661                 :            :  *
    1662                 :            :  * This only makes sense for IO mappings, and it makes no sense for
    1663                 :            :  * COW mappings.  In general, using multiple vmas is preferable;
    1664                 :            :  * vmf_insert_pfn_prot should only be used if using multiple VMAs is
    1665                 :            :  * impractical.
    1666                 :            :  *
    1667                 :            :  * See vmf_insert_mixed_prot() for a discussion of the implication of using
    1668                 :            :  * a value of @pgprot different from that of @vma->vm_page_prot.
    1669                 :            :  *
    1670                 :            :  * Context: Process context.  May allocate using %GFP_KERNEL.
    1671                 :            :  * Return: vm_fault_t value.
    1672                 :            :  */
    1673                 :        775 : vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
    1674                 :            :                         unsigned long pfn, pgprot_t pgprot)
    1675                 :            : {
    1676                 :            :         /*
    1677                 :            :          * Technically, architectures with pte_special can avoid all these
    1678                 :            :          * restrictions (same for remap_pfn_range).  However we would like
    1679                 :            :          * consistency in testing and feature parity among all, so we should
    1680                 :            :          * try to keep these invariants in place for everybody.
    1681                 :            :          */
    1682         [ -  + ]:        775 :         BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)));
    1683         [ -  + ]:        775 :         BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
    1684                 :            :                                                 (VM_PFNMAP|VM_MIXEDMAP));
    1685   [ +  -  -  + ]:        775 :         BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
    1686   [ -  +  -  - ]:        775 :         BUG_ON((vma->vm_flags & VM_MIXEDMAP) && pfn_valid(pfn));
    1687                 :            : 
    1688   [ +  -  +  - ]:        775 :         if (addr < vma->vm_start || addr >= vma->vm_end)
    1689                 :            :                 return VM_FAULT_SIGBUS;
    1690                 :            : 
    1691         [ +  - ]:        775 :         if (!pfn_modify_allowed(pfn, pgprot))
    1692                 :            :                 return VM_FAULT_SIGBUS;
    1693                 :            : 
    1694                 :        775 :         track_pfn_insert(vma, &pgprot, __pfn_to_pfn_t(pfn, PFN_DEV));
    1695                 :            : 
    1696                 :        775 :         return insert_pfn(vma, addr, __pfn_to_pfn_t(pfn, PFN_DEV), pgprot,
    1697                 :            :                         false);
    1698                 :            : }
    1699                 :            : EXPORT_SYMBOL(vmf_insert_pfn_prot);
    1700                 :            : 
    1701                 :            : /**
    1702                 :            :  * vmf_insert_pfn - insert single pfn into user vma
    1703                 :            :  * @vma: user vma to map to
    1704                 :            :  * @addr: target user address of this page
    1705                 :            :  * @pfn: source kernel pfn
    1706                 :            :  *
    1707                 :            :  * Similar to vm_insert_page, this allows drivers to insert individual pages
    1708                 :            :  * they've allocated into a user vma. Same comments apply.
    1709                 :            :  *
    1710                 :            :  * This function should only be called from a vm_ops->fault handler, and
    1711                 :            :  * in that case the handler should return the result of this function.
    1712                 :            :  *
    1713                 :            :  * vma cannot be a COW mapping.
    1714                 :            :  *
    1715                 :            :  * As this is called only for pages that do not currently exist, we
    1716                 :            :  * do not need to flush old virtual caches or the TLB.
    1717                 :            :  *
    1718                 :            :  * Context: Process context.  May allocate using %GFP_KERNEL.
    1719                 :            :  * Return: vm_fault_t value.
    1720                 :            :  */
    1721                 :        437 : vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
    1722                 :            :                         unsigned long pfn)
    1723                 :            : {
    1724                 :        437 :         return vmf_insert_pfn_prot(vma, addr, pfn, vma->vm_page_prot);
    1725                 :            : }
    1726                 :            : EXPORT_SYMBOL(vmf_insert_pfn);
    1727                 :            : 
    1728                 :          0 : static bool vm_mixed_ok(struct vm_area_struct *vma, pfn_t pfn)
    1729                 :            : {
    1730                 :            :         /* these checks mirror the abort conditions in vm_normal_page */
    1731                 :          0 :         if (vma->vm_flags & VM_MIXEDMAP)
    1732                 :            :                 return true;
    1733         [ #  # ]:          0 :         if (pfn_t_devmap(pfn))
    1734                 :            :                 return true;
    1735         [ #  # ]:          0 :         if (pfn_t_special(pfn))
    1736                 :            :                 return true;
    1737         [ #  # ]:          0 :         if (is_zero_pfn(pfn_t_to_pfn(pfn)))
    1738                 :          0 :                 return true;
    1739                 :            :         return false;
    1740                 :            : }
    1741                 :            : 
    1742                 :          0 : static vm_fault_t __vm_insert_mixed(struct vm_area_struct *vma,
    1743                 :            :                 unsigned long addr, pfn_t pfn, pgprot_t pgprot,
    1744                 :            :                 bool mkwrite)
    1745                 :            : {
    1746                 :          0 :         int err;
    1747                 :            : 
    1748   [ #  #  #  # ]:          0 :         BUG_ON(!vm_mixed_ok(vma, pfn));
    1749                 :            : 
    1750   [ #  #  #  # ]:          0 :         if (addr < vma->vm_start || addr >= vma->vm_end)
    1751                 :            :                 return VM_FAULT_SIGBUS;
    1752                 :            : 
    1753                 :          0 :         track_pfn_insert(vma, &pgprot, pfn);
    1754                 :            : 
    1755         [ #  # ]:          0 :         if (!pfn_modify_allowed(pfn_t_to_pfn(pfn), pgprot))
    1756                 :            :                 return VM_FAULT_SIGBUS;
    1757                 :            : 
    1758                 :            :         /*
    1759                 :            :          * If we don't have pte special, then we have to use the pfn_valid()
    1760                 :            :          * based VM_MIXEDMAP scheme (see vm_normal_page), and thus we *must*
    1761                 :            :          * refcount the page if pfn_valid is true (hence insert_page rather
    1762                 :            :          * than insert_pfn).  If a zero_pfn were inserted into a VM_MIXEDMAP
    1763                 :            :          * without pte special, it would there be refcounted as a normal page.
    1764                 :            :          */
    1765                 :          0 :         if (!IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL) &&
    1766                 :            :             !pfn_t_devmap(pfn) && pfn_t_valid(pfn)) {
    1767                 :            :                 struct page *page;
    1768                 :            : 
    1769                 :            :                 /*
    1770                 :            :                  * At this point we are committed to insert_page()
    1771                 :            :                  * regardless of whether the caller specified flags that
    1772                 :            :                  * result in pfn_t_has_page() == false.
    1773                 :            :                  */
    1774                 :            :                 page = pfn_to_page(pfn_t_to_pfn(pfn));
    1775                 :            :                 err = insert_page(vma, addr, page, pgprot);
    1776                 :            :         } else {
    1777                 :          0 :                 return insert_pfn(vma, addr, pfn, pgprot, mkwrite);
    1778                 :            :         }
    1779                 :            : 
    1780                 :            :         if (err == -ENOMEM)
    1781                 :            :                 return VM_FAULT_OOM;
    1782                 :            :         if (err < 0 && err != -EBUSY)
    1783                 :            :                 return VM_FAULT_SIGBUS;
    1784                 :            : 
    1785                 :            :         return VM_FAULT_NOPAGE;
    1786                 :            : }
    1787                 :            : 
    1788                 :            : /**
    1789                 :            :  * vmf_insert_mixed_prot - insert single pfn into user vma with specified pgprot
    1790                 :            :  * @vma: user vma to map to
    1791                 :            :  * @addr: target user address of this page
    1792                 :            :  * @pfn: source kernel pfn
    1793                 :            :  * @pgprot: pgprot flags for the inserted page
    1794                 :            :  *
    1795                 :            :  * This is exactly like vmf_insert_mixed(), except that it allows drivers to
    1796                 :            :  * to override pgprot on a per-page basis.
    1797                 :            :  *
    1798                 :            :  * Typically this function should be used by drivers to set caching- and
    1799                 :            :  * encryption bits different than those of @vma->vm_page_prot, because
    1800                 :            :  * the caching- or encryption mode may not be known at mmap() time.
    1801                 :            :  * This is ok as long as @vma->vm_page_prot is not used by the core vm
    1802                 :            :  * to set caching and encryption bits for those vmas (except for COW pages).
    1803                 :            :  * This is ensured by core vm only modifying these page table entries using
    1804                 :            :  * functions that don't touch caching- or encryption bits, using pte_modify()
    1805                 :            :  * if needed. (See for example mprotect()).
    1806                 :            :  * Also when new page-table entries are created, this is only done using the
    1807                 :            :  * fault() callback, and never using the value of vma->vm_page_prot,
    1808                 :            :  * except for page-table entries that point to anonymous pages as the result
    1809                 :            :  * of COW.
    1810                 :            :  *
    1811                 :            :  * Context: Process context.  May allocate using %GFP_KERNEL.
    1812                 :            :  * Return: vm_fault_t value.
    1813                 :            :  */
    1814                 :          0 : vm_fault_t vmf_insert_mixed_prot(struct vm_area_struct *vma, unsigned long addr,
    1815                 :            :                                  pfn_t pfn, pgprot_t pgprot)
    1816                 :            : {
    1817                 :          0 :         return __vm_insert_mixed(vma, addr, pfn, pgprot, false);
    1818                 :            : }
    1819                 :            : EXPORT_SYMBOL(vmf_insert_mixed_prot);
    1820                 :            : 
    1821                 :          0 : vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
    1822                 :            :                 pfn_t pfn)
    1823                 :            : {
    1824                 :          0 :         return __vm_insert_mixed(vma, addr, pfn, vma->vm_page_prot, false);
    1825                 :            : }
    1826                 :            : EXPORT_SYMBOL(vmf_insert_mixed);
    1827                 :            : 
    1828                 :            : /*
    1829                 :            :  *  If the insertion of PTE failed because someone else already added a
    1830                 :            :  *  different entry in the mean time, we treat that as success as we assume
    1831                 :            :  *  the same entry was actually inserted.
    1832                 :            :  */
    1833                 :          0 : vm_fault_t vmf_insert_mixed_mkwrite(struct vm_area_struct *vma,
    1834                 :            :                 unsigned long addr, pfn_t pfn)
    1835                 :            : {
    1836                 :          0 :         return __vm_insert_mixed(vma, addr, pfn, vma->vm_page_prot, true);
    1837                 :            : }
    1838                 :            : EXPORT_SYMBOL(vmf_insert_mixed_mkwrite);
    1839                 :            : 
    1840                 :            : /*
    1841                 :            :  * maps a range of physical memory into the requested pages. the old
    1842                 :            :  * mappings are removed. any references to nonexistent pages results
    1843                 :            :  * in null mappings (currently treated as "copy-on-access")
    1844                 :            :  */
    1845                 :          0 : static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd,
    1846                 :            :                         unsigned long addr, unsigned long end,
    1847                 :            :                         unsigned long pfn, pgprot_t prot)
    1848                 :            : {
    1849                 :          0 :         pte_t *pte;
    1850                 :          0 :         spinlock_t *ptl;
    1851                 :          0 :         int err = 0;
    1852                 :            : 
    1853   [ #  #  #  #  :          0 :         pte = pte_alloc_map_lock(mm, pmd, addr, &ptl);
                   #  # ]
    1854         [ #  # ]:          0 :         if (!pte)
    1855                 :          0 :                 return -ENOMEM;
    1856                 :          0 :         arch_enter_lazy_mmu_mode();
    1857                 :          0 :         do {
    1858         [ #  # ]:          0 :                 BUG_ON(!pte_none(*pte));
    1859         [ #  # ]:          0 :                 if (!pfn_modify_allowed(pfn, prot)) {
    1860                 :            :                         err = -EACCES;
    1861                 :            :                         break;
    1862                 :            :                 }
    1863         [ #  # ]:          0 :                 set_pte_at(mm, addr, pte, pte_mkspecial(pfn_pte(pfn, prot)));
    1864                 :          0 :                 pfn++;
    1865         [ #  # ]:          0 :         } while (pte++, addr += PAGE_SIZE, addr != end);
    1866                 :          0 :         arch_leave_lazy_mmu_mode();
    1867                 :          0 :         pte_unmap_unlock(pte - 1, ptl);
    1868                 :          0 :         return err;
    1869                 :            : }
    1870                 :            : 
    1871                 :          0 : static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud,
    1872                 :            :                         unsigned long addr, unsigned long end,
    1873                 :            :                         unsigned long pfn, pgprot_t prot)
    1874                 :            : {
    1875                 :          0 :         pmd_t *pmd;
    1876                 :          0 :         unsigned long next;
    1877                 :          0 :         int err;
    1878                 :            : 
    1879                 :          0 :         pfn -= addr >> PAGE_SHIFT;
    1880                 :          0 :         pmd = pmd_alloc(mm, pud, addr);
    1881         [ #  # ]:          0 :         if (!pmd)
    1882                 :            :                 return -ENOMEM;
    1883                 :          0 :         VM_BUG_ON(pmd_trans_huge(*pmd));
    1884                 :          0 :         do {
    1885         [ #  # ]:          0 :                 next = pmd_addr_end(addr, end);
    1886                 :          0 :                 err = remap_pte_range(mm, pmd, addr, next,
    1887                 :          0 :                                 pfn + (addr >> PAGE_SHIFT), prot);
    1888         [ #  # ]:          0 :                 if (err)
    1889                 :          0 :                         return err;
    1890         [ #  # ]:          0 :         } while (pmd++, addr = next, addr != end);
    1891                 :            :         return 0;
    1892                 :            : }
    1893                 :            : 
    1894                 :          0 : static inline int remap_pud_range(struct mm_struct *mm, p4d_t *p4d,
    1895                 :            :                         unsigned long addr, unsigned long end,
    1896                 :            :                         unsigned long pfn, pgprot_t prot)
    1897                 :            : {
    1898                 :          0 :         pud_t *pud;
    1899                 :          0 :         unsigned long next;
    1900                 :          0 :         int err;
    1901                 :            : 
    1902                 :          0 :         pfn -= addr >> PAGE_SHIFT;
    1903                 :          0 :         pud = pud_alloc(mm, p4d, addr);
    1904         [ #  # ]:          0 :         if (!pud)
    1905                 :            :                 return -ENOMEM;
    1906                 :          0 :         do {
    1907         [ #  # ]:          0 :                 next = pud_addr_end(addr, end);
    1908                 :          0 :                 err = remap_pmd_range(mm, pud, addr, next,
    1909                 :          0 :                                 pfn + (addr >> PAGE_SHIFT), prot);
    1910         [ #  # ]:          0 :                 if (err)
    1911                 :          0 :                         return err;
    1912         [ #  # ]:          0 :         } while (pud++, addr = next, addr != end);
    1913                 :            :         return 0;
    1914                 :            : }
    1915                 :            : 
    1916                 :          0 : static inline int remap_p4d_range(struct mm_struct *mm, pgd_t *pgd,
    1917                 :            :                         unsigned long addr, unsigned long end,
    1918                 :            :                         unsigned long pfn, pgprot_t prot)
    1919                 :            : {
    1920                 :          0 :         p4d_t *p4d;
    1921                 :          0 :         unsigned long next;
    1922                 :          0 :         int err;
    1923                 :            : 
    1924                 :          0 :         pfn -= addr >> PAGE_SHIFT;
    1925                 :          0 :         p4d = p4d_alloc(mm, pgd, addr);
    1926         [ #  # ]:          0 :         if (!p4d)
    1927                 :            :                 return -ENOMEM;
    1928                 :          0 :         do {
    1929         [ #  # ]:          0 :                 next = p4d_addr_end(addr, end);
    1930                 :          0 :                 err = remap_pud_range(mm, p4d, addr, next,
    1931                 :          0 :                                 pfn + (addr >> PAGE_SHIFT), prot);
    1932         [ #  # ]:          0 :                 if (err)
    1933                 :          0 :                         return err;
    1934         [ #  # ]:          0 :         } while (p4d++, addr = next, addr != end);
    1935                 :            :         return 0;
    1936                 :            : }
    1937                 :            : 
    1938                 :            : /**
    1939                 :            :  * remap_pfn_range - remap kernel memory to userspace
    1940                 :            :  * @vma: user vma to map to
    1941                 :            :  * @addr: target user address to start at
    1942                 :            :  * @pfn: physical address of kernel memory
    1943                 :            :  * @size: size of map area
    1944                 :            :  * @prot: page protection flags for this mapping
    1945                 :            :  *
    1946                 :            :  * Note: this is only safe if the mm semaphore is held when called.
    1947                 :            :  *
    1948                 :            :  * Return: %0 on success, negative error code otherwise.
    1949                 :            :  */
    1950                 :          0 : int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
    1951                 :            :                     unsigned long pfn, unsigned long size, pgprot_t prot)
    1952                 :            : {
    1953                 :          0 :         pgd_t *pgd;
    1954                 :          0 :         unsigned long next;
    1955                 :          0 :         unsigned long end = addr + PAGE_ALIGN(size);
    1956                 :          0 :         struct mm_struct *mm = vma->vm_mm;
    1957                 :          0 :         unsigned long remap_pfn = pfn;
    1958                 :          0 :         int err;
    1959                 :            : 
    1960                 :            :         /*
    1961                 :            :          * Physically remapped pages are special. Tell the
    1962                 :            :          * rest of the world about it:
    1963                 :            :          *   VM_IO tells people not to look at these pages
    1964                 :            :          *      (accesses can have side effects).
    1965                 :            :          *   VM_PFNMAP tells the core MM that the base pages are just
    1966                 :            :          *      raw PFN mappings, and do not have a "struct page" associated
    1967                 :            :          *      with them.
    1968                 :            :          *   VM_DONTEXPAND
    1969                 :            :          *      Disable vma merging and expanding with mremap().
    1970                 :            :          *   VM_DONTDUMP
    1971                 :            :          *      Omit vma from core dump, even when VM_IO turned off.
    1972                 :            :          *
    1973                 :            :          * There's a horrible special case to handle copy-on-write
    1974                 :            :          * behaviour that some programs depend on. We mark the "original"
    1975                 :            :          * un-COW'ed pages by matching them up with "vma->vm_pgoff".
    1976                 :            :          * See vm_normal_page() for details.
    1977                 :            :          */
    1978         [ #  # ]:          0 :         if (is_cow_mapping(vma->vm_flags)) {
    1979   [ #  #  #  # ]:          0 :                 if (addr != vma->vm_start || end != vma->vm_end)
    1980                 :            :                         return -EINVAL;
    1981                 :          0 :                 vma->vm_pgoff = pfn;
    1982                 :            :         }
    1983                 :            : 
    1984                 :          0 :         err = track_pfn_remap(vma, &prot, remap_pfn, addr, PAGE_ALIGN(size));
    1985         [ #  # ]:          0 :         if (err)
    1986                 :            :                 return -EINVAL;
    1987                 :            : 
    1988                 :          0 :         vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
    1989                 :            : 
    1990         [ #  # ]:          0 :         BUG_ON(addr >= end);
    1991                 :          0 :         pfn -= addr >> PAGE_SHIFT;
    1992                 :          0 :         pgd = pgd_offset(mm, addr);
    1993                 :          0 :         flush_cache_range(vma, addr, end);
    1994                 :          0 :         do {
    1995         [ #  # ]:          0 :                 next = pgd_addr_end(addr, end);
    1996                 :          0 :                 err = remap_p4d_range(mm, pgd, addr, next,
    1997                 :          0 :                                 pfn + (addr >> PAGE_SHIFT), prot);
    1998         [ #  # ]:          0 :                 if (err)
    1999                 :            :                         break;
    2000         [ #  # ]:          0 :         } while (pgd++, addr = next, addr != end);
    2001                 :            : 
    2002         [ #  # ]:          0 :         if (err)
    2003                 :          0 :                 untrack_pfn(vma, remap_pfn, PAGE_ALIGN(size));
    2004                 :            : 
    2005                 :            :         return err;
    2006                 :            : }
    2007                 :            : EXPORT_SYMBOL(remap_pfn_range);
    2008                 :            : 
    2009                 :            : /**
    2010                 :            :  * vm_iomap_memory - remap memory to userspace
    2011                 :            :  * @vma: user vma to map to
    2012                 :            :  * @start: start of area
    2013                 :            :  * @len: size of area
    2014                 :            :  *
    2015                 :            :  * This is a simplified io_remap_pfn_range() for common driver use. The
    2016                 :            :  * driver just needs to give us the physical memory range to be mapped,
    2017                 :            :  * we'll figure out the rest from the vma information.
    2018                 :            :  *
    2019                 :            :  * NOTE! Some drivers might want to tweak vma->vm_page_prot first to get
    2020                 :            :  * whatever write-combining details or similar.
    2021                 :            :  *
    2022                 :            :  * Return: %0 on success, negative error code otherwise.
    2023                 :            :  */
    2024                 :          0 : int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len)
    2025                 :            : {
    2026                 :          0 :         unsigned long vm_len, pfn, pages;
    2027                 :            : 
    2028                 :            :         /* Check that the physical memory area passed in looks valid */
    2029         [ #  # ]:          0 :         if (start + len < start)
    2030                 :            :                 return -EINVAL;
    2031                 :            :         /*
    2032                 :            :          * You *really* shouldn't map things that aren't page-aligned,
    2033                 :            :          * but we've historically allowed it because IO memory might
    2034                 :            :          * just have smaller alignment.
    2035                 :            :          */
    2036                 :          0 :         len += start & ~PAGE_MASK;
    2037                 :          0 :         pfn = start >> PAGE_SHIFT;
    2038                 :          0 :         pages = (len + ~PAGE_MASK) >> PAGE_SHIFT;
    2039         [ #  # ]:          0 :         if (pfn + pages < pfn)
    2040                 :            :                 return -EINVAL;
    2041                 :            : 
    2042                 :            :         /* We start the mapping 'vm_pgoff' pages into the area */
    2043         [ #  # ]:          0 :         if (vma->vm_pgoff > pages)
    2044                 :            :                 return -EINVAL;
    2045                 :          0 :         pfn += vma->vm_pgoff;
    2046                 :          0 :         pages -= vma->vm_pgoff;
    2047                 :            : 
    2048                 :            :         /* Can we fit all of the mapping? */
    2049                 :          0 :         vm_len = vma->vm_end - vma->vm_start;
    2050         [ #  # ]:          0 :         if (vm_len >> PAGE_SHIFT > pages)
    2051                 :            :                 return -EINVAL;
    2052                 :            : 
    2053                 :            :         /* Ok, let it rip */
    2054                 :          0 :         return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot);
    2055                 :            : }
    2056                 :            : EXPORT_SYMBOL(vm_iomap_memory);
    2057                 :            : 
    2058                 :          0 : static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,
    2059                 :            :                                      unsigned long addr, unsigned long end,
    2060                 :            :                                      pte_fn_t fn, void *data, bool create)
    2061                 :            : {
    2062                 :          0 :         pte_t *pte;
    2063                 :          0 :         int err = 0;
    2064                 :          0 :         spinlock_t *uninitialized_var(ptl);
    2065                 :            : 
    2066         [ #  # ]:          0 :         if (create) {
    2067                 :          0 :                 pte = (mm == &init_mm) ?
    2068   [ #  #  #  #  :          0 :                         pte_alloc_kernel(pmd, addr) :
                   #  # ]
    2069   [ #  #  #  #  :          0 :                         pte_alloc_map_lock(mm, pmd, addr, &ptl);
                   #  # ]
    2070         [ #  # ]:          0 :                 if (!pte)
    2071                 :            :                         return -ENOMEM;
    2072                 :            :         } else {
    2073                 :          0 :                 pte = (mm == &init_mm) ?
    2074         [ #  # ]:          0 :                         pte_offset_kernel(pmd, addr) :
    2075         [ #  # ]:          0 :                         pte_offset_map_lock(mm, pmd, addr, &ptl);
    2076                 :            :         }
    2077                 :            : 
    2078         [ #  # ]:          0 :         BUG_ON(pmd_huge(*pmd));
    2079                 :            : 
    2080                 :          0 :         arch_enter_lazy_mmu_mode();
    2081                 :            : 
    2082                 :          0 :         do {
    2083   [ #  #  #  # ]:          0 :                 if (create || !pte_none(*pte)) {
    2084                 :          0 :                         err = fn(pte++, addr, data);
    2085         [ #  # ]:          0 :                         if (err)
    2086                 :            :                                 break;
    2087                 :            :                 }
    2088         [ #  # ]:          0 :         } while (addr += PAGE_SIZE, addr != end);
    2089                 :            : 
    2090                 :          0 :         arch_leave_lazy_mmu_mode();
    2091                 :            : 
    2092         [ #  # ]:          0 :         if (mm != &init_mm)
    2093                 :          0 :                 pte_unmap_unlock(pte-1, ptl);
    2094                 :            :         return err;
    2095                 :            : }
    2096                 :            : 
    2097                 :          0 : static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
    2098                 :            :                                      unsigned long addr, unsigned long end,
    2099                 :            :                                      pte_fn_t fn, void *data, bool create)
    2100                 :            : {
    2101                 :          0 :         pmd_t *pmd;
    2102                 :          0 :         unsigned long next;
    2103                 :          0 :         int err = 0;
    2104                 :            : 
    2105         [ #  # ]:          0 :         BUG_ON(pud_huge(*pud));
    2106                 :            : 
    2107         [ #  # ]:          0 :         if (create) {
    2108                 :          0 :                 pmd = pmd_alloc(mm, pud, addr);
    2109         [ #  # ]:          0 :                 if (!pmd)
    2110                 :            :                         return -ENOMEM;
    2111                 :            :         } else {
    2112         [ #  # ]:          0 :                 pmd = pmd_offset(pud, addr);
    2113                 :            :         }
    2114                 :          0 :         do {
    2115         [ #  # ]:          0 :                 next = pmd_addr_end(addr, end);
    2116   [ #  #  #  # ]:          0 :                 if (create || !pmd_none_or_clear_bad(pmd)) {
    2117                 :          0 :                         err = apply_to_pte_range(mm, pmd, addr, next, fn, data,
    2118                 :            :                                                  create);
    2119         [ #  # ]:          0 :                         if (err)
    2120                 :            :                                 break;
    2121                 :            :                 }
    2122         [ #  # ]:          0 :         } while (pmd++, addr = next, addr != end);
    2123                 :            :         return err;
    2124                 :            : }
    2125                 :            : 
    2126                 :          0 : static int apply_to_pud_range(struct mm_struct *mm, p4d_t *p4d,
    2127                 :            :                                      unsigned long addr, unsigned long end,
    2128                 :            :                                      pte_fn_t fn, void *data, bool create)
    2129                 :            : {
    2130                 :          0 :         pud_t *pud;
    2131                 :          0 :         unsigned long next;
    2132                 :          0 :         int err = 0;
    2133                 :            : 
    2134         [ #  # ]:          0 :         if (create) {
    2135                 :          0 :                 pud = pud_alloc(mm, p4d, addr);
    2136         [ #  # ]:          0 :                 if (!pud)
    2137                 :            :                         return -ENOMEM;
    2138                 :            :         } else {
    2139                 :          0 :                 pud = pud_offset(p4d, addr);
    2140                 :            :         }
    2141                 :          0 :         do {
    2142         [ #  # ]:          0 :                 next = pud_addr_end(addr, end);
    2143   [ #  #  #  # ]:          0 :                 if (create || !pud_none_or_clear_bad(pud)) {
    2144                 :          0 :                         err = apply_to_pmd_range(mm, pud, addr, next, fn, data,
    2145                 :            :                                                  create);
    2146         [ #  # ]:          0 :                         if (err)
    2147                 :            :                                 break;
    2148                 :            :                 }
    2149         [ #  # ]:          0 :         } while (pud++, addr = next, addr != end);
    2150                 :            :         return err;
    2151                 :            : }
    2152                 :            : 
    2153                 :          0 : static int apply_to_p4d_range(struct mm_struct *mm, pgd_t *pgd,
    2154                 :            :                                      unsigned long addr, unsigned long end,
    2155                 :            :                                      pte_fn_t fn, void *data, bool create)
    2156                 :            : {
    2157                 :          0 :         p4d_t *p4d;
    2158                 :          0 :         unsigned long next;
    2159                 :          0 :         int err = 0;
    2160                 :            : 
    2161         [ #  # ]:          0 :         if (create) {
    2162                 :          0 :                 p4d = p4d_alloc(mm, pgd, addr);
    2163         [ #  # ]:          0 :                 if (!p4d)
    2164                 :            :                         return -ENOMEM;
    2165                 :            :         } else {
    2166                 :          0 :                 p4d = p4d_offset(pgd, addr);
    2167                 :            :         }
    2168                 :          0 :         do {
    2169         [ #  # ]:          0 :                 next = p4d_addr_end(addr, end);
    2170         [ #  # ]:          0 :                 if (create || !p4d_none_or_clear_bad(p4d)) {
    2171                 :          0 :                         err = apply_to_pud_range(mm, p4d, addr, next, fn, data,
    2172                 :            :                                                  create);
    2173         [ #  # ]:          0 :                         if (err)
    2174                 :            :                                 break;
    2175                 :            :                 }
    2176         [ #  # ]:          0 :         } while (p4d++, addr = next, addr != end);
    2177                 :            :         return err;
    2178                 :            : }
    2179                 :            : 
    2180                 :          0 : static int __apply_to_page_range(struct mm_struct *mm, unsigned long addr,
    2181                 :            :                                  unsigned long size, pte_fn_t fn,
    2182                 :            :                                  void *data, bool create)
    2183                 :            : {
    2184                 :          0 :         pgd_t *pgd;
    2185                 :          0 :         unsigned long next;
    2186                 :          0 :         unsigned long end = addr + size;
    2187                 :          0 :         int err = 0;
    2188                 :            : 
    2189   [ #  #  #  # ]:          0 :         if (WARN_ON(addr >= end))
    2190                 :            :                 return -EINVAL;
    2191                 :            : 
    2192                 :          0 :         pgd = pgd_offset(mm, addr);
    2193                 :          0 :         do {
    2194         [ #  # ]:          0 :                 next = pgd_addr_end(addr, end);
    2195   [ #  #  #  # ]:          0 :                 if (!create && pgd_none_or_clear_bad(pgd))
    2196                 :          0 :                         continue;
    2197                 :          0 :                 err = apply_to_p4d_range(mm, pgd, addr, next, fn, data, create);
    2198         [ #  # ]:          0 :                 if (err)
    2199                 :            :                         break;
    2200         [ #  # ]:          0 :         } while (pgd++, addr = next, addr != end);
    2201                 :            : 
    2202                 :            :         return err;
    2203                 :            : }
    2204                 :            : 
    2205                 :            : /*
    2206                 :            :  * Scan a region of virtual memory, filling in page tables as necessary
    2207                 :            :  * and calling a provided function on each leaf page table.
    2208                 :            :  */
    2209                 :          0 : int apply_to_page_range(struct mm_struct *mm, unsigned long addr,
    2210                 :            :                         unsigned long size, pte_fn_t fn, void *data)
    2211                 :            : {
    2212                 :          0 :         return __apply_to_page_range(mm, addr, size, fn, data, true);
    2213                 :            : }
    2214                 :            : EXPORT_SYMBOL_GPL(apply_to_page_range);
    2215                 :            : 
    2216                 :            : /*
    2217                 :            :  * Scan a region of virtual memory, calling a provided function on
    2218                 :            :  * each leaf page table where it exists.
    2219                 :            :  *
    2220                 :            :  * Unlike apply_to_page_range, this does _not_ fill in page tables
    2221                 :            :  * where they are absent.
    2222                 :            :  */
    2223                 :          0 : int apply_to_existing_page_range(struct mm_struct *mm, unsigned long addr,
    2224                 :            :                                  unsigned long size, pte_fn_t fn, void *data)
    2225                 :            : {
    2226                 :          0 :         return __apply_to_page_range(mm, addr, size, fn, data, false);
    2227                 :            : }
    2228                 :            : EXPORT_SYMBOL_GPL(apply_to_existing_page_range);
    2229                 :            : 
    2230                 :            : /*
    2231                 :            :  * handle_pte_fault chooses page fault handler according to an entry which was
    2232                 :            :  * read non-atomically.  Before making any commitment, on those architectures
    2233                 :            :  * or configurations (e.g. i386 with PAE) which might give a mix of unmatched
    2234                 :            :  * parts, do_swap_page must check under lock before unmapping the pte and
    2235                 :            :  * proceeding (but do_wp_page is only called after already making such a check;
    2236                 :            :  * and do_anonymous_page can safely check later on).
    2237                 :            :  */
    2238                 :          0 : static inline int pte_unmap_same(struct mm_struct *mm, pmd_t *pmd,
    2239                 :            :                                 pte_t *page_table, pte_t orig_pte)
    2240                 :            : {
    2241                 :          0 :         int same = 1;
    2242                 :            : #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPTION)
    2243                 :          0 :         if (sizeof(pte_t) > sizeof(unsigned long)) {
    2244                 :            :                 spinlock_t *ptl = pte_lockptr(mm, pmd);
    2245                 :            :                 spin_lock(ptl);
    2246                 :            :                 same = pte_same(*page_table, orig_pte);
    2247                 :            :                 spin_unlock(ptl);
    2248                 :            :         }
    2249                 :            : #endif
    2250                 :          0 :         pte_unmap(page_table);
    2251                 :          0 :         return same;
    2252                 :            : }
    2253                 :            : 
    2254                 :     181188 : static inline bool cow_user_page(struct page *dst, struct page *src,
    2255                 :            :                                  struct vm_fault *vmf)
    2256                 :            : {
    2257                 :     181188 :         bool ret;
    2258                 :     181188 :         void *kaddr;
    2259                 :     181188 :         void __user *uaddr;
    2260                 :     181188 :         bool locked = false;
    2261                 :     181188 :         struct vm_area_struct *vma = vmf->vma;
    2262                 :     181188 :         struct mm_struct *mm = vma->vm_mm;
    2263                 :     181188 :         unsigned long addr = vmf->address;
    2264                 :            : 
    2265                 :     181188 :         debug_dma_assert_idle(src);
    2266                 :            : 
    2267         [ +  - ]:     181188 :         if (likely(src)) {
    2268                 :     181188 :                 copy_user_highpage(dst, src, addr, vma);
    2269                 :     181188 :                 return true;
    2270                 :            :         }
    2271                 :            : 
    2272                 :            :         /*
    2273                 :            :          * If the source page was a PFN mapping, we don't have
    2274                 :            :          * a "struct page" for it. We do a best-effort copy by
    2275                 :            :          * just copying from the original user address. If that
    2276                 :            :          * fails, we just zero-fill it. Live with it.
    2277                 :            :          */
    2278                 :          0 :         kaddr = kmap_atomic(dst);
    2279                 :          0 :         uaddr = (void __user *)(addr & PAGE_MASK);
    2280                 :            : 
    2281                 :            :         /*
    2282                 :            :          * On architectures with software "accessed" bits, we would
    2283                 :            :          * take a double page fault, so mark it accessed here.
    2284                 :            :          */
    2285                 :          0 :         if (arch_faults_on_old_pte() && !pte_young(vmf->orig_pte)) {
    2286                 :            :                 pte_t entry;
    2287                 :            : 
    2288                 :            :                 vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl);
    2289                 :            :                 locked = true;
    2290                 :            :                 if (!likely(pte_same(*vmf->pte, vmf->orig_pte))) {
    2291                 :            :                         /*
    2292                 :            :                          * Other thread has already handled the fault
    2293                 :            :                          * and we don't need to do anything. If it's
    2294                 :            :                          * not the case, the fault will be triggered
    2295                 :            :                          * again on the same address.
    2296                 :            :                          */
    2297                 :            :                         ret = false;
    2298                 :            :                         goto pte_unlock;
    2299                 :            :                 }
    2300                 :            : 
    2301                 :            :                 entry = pte_mkyoung(vmf->orig_pte);
    2302                 :            :                 if (ptep_set_access_flags(vma, addr, vmf->pte, entry, 0))
    2303                 :            :                         update_mmu_cache(vma, addr, vmf->pte);
    2304                 :            :         }
    2305                 :            : 
    2306                 :            :         /*
    2307                 :            :          * This really shouldn't fail, because the page is there
    2308                 :            :          * in the page tables. But it might just be unreadable,
    2309                 :            :          * in which case we just give up and fill the result with
    2310                 :            :          * zeroes.
    2311                 :            :          */
    2312         [ #  # ]:          0 :         if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) {
    2313                 :          0 :                 if (locked)
    2314                 :            :                         goto warn;
    2315                 :            : 
    2316                 :            :                 /* Re-validate under PTL if the page is still mapped */
    2317         [ #  # ]:          0 :                 vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl);
    2318                 :          0 :                 locked = true;
    2319         [ #  # ]:          0 :                 if (!likely(pte_same(*vmf->pte, vmf->orig_pte))) {
    2320                 :            :                         /* The PTE changed under us. Retry page fault. */
    2321                 :          0 :                         ret = false;
    2322                 :          0 :                         goto pte_unlock;
    2323                 :            :                 }
    2324                 :            : 
    2325                 :            :                 /*
    2326                 :            :                  * The same page can be mapped back since last copy attampt.
    2327                 :            :                  * Try to copy again under PTL.
    2328                 :            :                  */
    2329         [ #  # ]:          0 :                 if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) {
    2330                 :            :                         /*
    2331                 :            :                          * Give a warn in case there can be some obscure
    2332                 :            :                          * use-case
    2333                 :            :                          */
    2334                 :          0 : warn:
    2335                 :          0 :                         WARN_ON_ONCE(1);
    2336                 :          0 :                         clear_page(kaddr);
    2337                 :            :                 }
    2338                 :            :         }
    2339                 :            : 
    2340                 :          0 :         ret = true;
    2341                 :            : 
    2342                 :            : pte_unlock:
    2343                 :          0 :         if (locked)
    2344                 :          0 :                 pte_unmap_unlock(vmf->pte, vmf->ptl);
    2345                 :          0 :         kunmap_atomic(kaddr);
    2346                 :          0 :         flush_dcache_page(dst);
    2347                 :            : 
    2348                 :          0 :         return ret;
    2349                 :            : }
    2350                 :            : 
    2351                 :    1155182 : static gfp_t __get_fault_gfp_mask(struct vm_area_struct *vma)
    2352                 :            : {
    2353                 :    1155182 :         struct file *vm_file = vma->vm_file;
    2354                 :            : 
    2355                 :    1155182 :         if (vm_file)
    2356                 :     810547 :                 return mapping_gfp_mask(vm_file->f_mapping) | __GFP_FS | __GFP_IO;
    2357                 :            : 
    2358                 :            :         /*
    2359                 :            :          * Special mappings (e.g. VDSO) do not have any file so fake
    2360                 :            :          * a default GFP_KERNEL for them.
    2361                 :            :          */
    2362                 :            :         return GFP_KERNEL;
    2363                 :            : }
    2364                 :            : 
    2365                 :            : /*
    2366                 :            :  * Notify the address space that the page is about to become writable so that
    2367                 :            :  * it can prohibit this or wait for the page to get into an appropriate state.
    2368                 :            :  *
    2369                 :            :  * We do this without the lock held, so that it can sleep if it needs to.
    2370                 :            :  */
    2371                 :          0 : static vm_fault_t do_page_mkwrite(struct vm_fault *vmf)
    2372                 :            : {
    2373                 :          0 :         vm_fault_t ret;
    2374                 :          0 :         struct page *page = vmf->page;
    2375                 :          0 :         unsigned int old_flags = vmf->flags;
    2376                 :            : 
    2377                 :          0 :         vmf->flags = FAULT_FLAG_WRITE|FAULT_FLAG_MKWRITE;
    2378                 :            : 
    2379         [ #  # ]:          0 :         if (vmf->vma->vm_file &&
    2380         [ #  # ]:          0 :             IS_SWAPFILE(vmf->vma->vm_file->f_mapping->host))
    2381                 :            :                 return VM_FAULT_SIGBUS;
    2382                 :            : 
    2383                 :          0 :         ret = vmf->vma->vm_ops->page_mkwrite(vmf);
    2384                 :            :         /* Restore original flags so that caller is not surprised */
    2385                 :          0 :         vmf->flags = old_flags;
    2386         [ #  # ]:          0 :         if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))
    2387                 :            :                 return ret;
    2388         [ #  # ]:          0 :         if (unlikely(!(ret & VM_FAULT_LOCKED))) {
    2389                 :          0 :                 lock_page(page);
    2390         [ #  # ]:          0 :                 if (!page->mapping) {
    2391                 :          0 :                         unlock_page(page);
    2392                 :          0 :                         return 0; /* retry */
    2393                 :            :                 }
    2394                 :          0 :                 ret |= VM_FAULT_LOCKED;
    2395                 :            :         } else
    2396                 :            :                 VM_BUG_ON_PAGE(!PageLocked(page), page);
    2397                 :            :         return ret;
    2398                 :            : }
    2399                 :            : 
    2400                 :            : /*
    2401                 :            :  * Handle dirtying of a page in shared file mapping on a write fault.
    2402                 :            :  *
    2403                 :            :  * The function expects the page to be locked and unlocks it.
    2404                 :            :  */
    2405                 :       1471 : static vm_fault_t fault_dirty_shared_page(struct vm_fault *vmf)
    2406                 :            : {
    2407                 :       1471 :         struct vm_area_struct *vma = vmf->vma;
    2408                 :       1471 :         struct address_space *mapping;
    2409                 :       1471 :         struct page *page = vmf->page;
    2410                 :       1471 :         bool dirtied;
    2411   [ +  -  +  - ]:       1471 :         bool page_mkwrite = vma->vm_ops && vma->vm_ops->page_mkwrite;
    2412                 :            : 
    2413                 :       1471 :         dirtied = set_page_dirty(page);
    2414                 :       1471 :         VM_BUG_ON_PAGE(PageAnon(page), page);
    2415                 :            :         /*
    2416                 :            :          * Take a local copy of the address_space - page.mapping may be zeroed
    2417                 :            :          * by truncate after unlock_page().   The address_space itself remains
    2418                 :            :          * pinned by vma->vm_file's reference.  We rely on unlock_page()'s
    2419                 :            :          * release semantics to prevent the compiler from undoing this copying.
    2420                 :            :          */
    2421                 :       1471 :         mapping = page_rmapping(page);
    2422                 :       1471 :         unlock_page(page);
    2423                 :            : 
    2424         [ +  - ]:       1471 :         if (!page_mkwrite)
    2425                 :       1471 :                 file_update_time(vma->vm_file);
    2426                 :            : 
    2427                 :            :         /*
    2428                 :            :          * Throttle page dirtying rate down to writeback speed.
    2429                 :            :          *
    2430                 :            :          * mapping may be NULL here because some device drivers do not
    2431                 :            :          * set page.mapping but still dirty their pages
    2432                 :            :          *
    2433                 :            :          * Drop the mmap_sem before waiting on IO, if we can. The file
    2434                 :            :          * is pinning the mapping, as per above.
    2435                 :            :          */
    2436   [ -  +  -  - ]:       1471 :         if ((dirtied || page_mkwrite) && mapping) {
    2437                 :          0 :                 struct file *fpin;
    2438                 :            : 
    2439                 :          0 :                 fpin = maybe_unlock_mmap_for_io(vmf, NULL);
    2440                 :          0 :                 balance_dirty_pages_ratelimited(mapping);
    2441         [ #  # ]:          0 :                 if (fpin) {
    2442                 :          0 :                         fput(fpin);
    2443                 :          0 :                         return VM_FAULT_RETRY;
    2444                 :            :                 }
    2445                 :            :         }
    2446                 :            : 
    2447                 :            :         return 0;
    2448                 :            : }
    2449                 :            : 
    2450                 :            : /*
    2451                 :            :  * Handle write page faults for pages that can be reused in the current vma
    2452                 :            :  *
    2453                 :            :  * This can happen either due to the mapping being with the VM_SHARED flag,
    2454                 :            :  * or due to us being the last reference standing to the page. In either
    2455                 :            :  * case, all we need to do here is to mark the page as writable and update
    2456                 :            :  * any related book-keeping.
    2457                 :            :  */
    2458                 :     135535 : static inline void wp_page_reuse(struct vm_fault *vmf)
    2459                 :            :         __releases(vmf->ptl)
    2460                 :            : {
    2461                 :     135535 :         struct vm_area_struct *vma = vmf->vma;
    2462                 :     135535 :         struct page *page = vmf->page;
    2463                 :     135535 :         pte_t entry;
    2464                 :            :         /*
    2465                 :            :          * Clear the pages cpupid information as the existing
    2466                 :            :          * information potentially belongs to a now completely
    2467                 :            :          * unrelated process.
    2468                 :            :          */
    2469                 :     135535 :         if (page)
    2470                 :            :                 page_cpupid_xchg_last(page, (1 << LAST_CPUPID_SHIFT) - 1);
    2471                 :            : 
    2472         [ +  - ]:     135535 :         flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte));
    2473         [ +  - ]:     135535 :         entry = pte_mkyoung(vmf->orig_pte);
    2474         [ +  - ]:     135535 :         entry = maybe_mkwrite(pte_mkdirty(entry), vma);
    2475                 :     135535 :         if (ptep_set_access_flags(vma, vmf->address, vmf->pte, entry, 1))
    2476                 :            :                 update_mmu_cache(vma, vmf->address, vmf->pte);
    2477                 :     135535 :         pte_unmap_unlock(vmf->pte, vmf->ptl);
    2478                 :     135535 : }
    2479                 :            : 
    2480                 :            : /*
    2481                 :            :  * Handle the case of a page which we actually need to copy to a new page.
    2482                 :            :  *
    2483                 :            :  * Called with mmap_sem locked and the old page referenced, but
    2484                 :            :  * without the ptl held.
    2485                 :            :  *
    2486                 :            :  * High level logic flow:
    2487                 :            :  *
    2488                 :            :  * - Allocate a page, copy the content of the old page to the new one.
    2489                 :            :  * - Handle book keeping and accounting - cgroups, mmu-notifiers, etc.
    2490                 :            :  * - Take the PTL. If the pte changed, bail out and release the allocated page
    2491                 :            :  * - If the pte is still the way we remember it, update the page table and all
    2492                 :            :  *   relevant references. This includes dropping the reference the page-table
    2493                 :            :  *   held to the old page, as well as updating the rmap.
    2494                 :            :  * - In any case, unlock the PTL and drop the reference we took to the old page.
    2495                 :            :  */
    2496                 :     191154 : static vm_fault_t wp_page_copy(struct vm_fault *vmf)
    2497                 :            : {
    2498                 :     191154 :         struct vm_area_struct *vma = vmf->vma;
    2499                 :     191154 :         struct mm_struct *mm = vma->vm_mm;
    2500                 :     191154 :         struct page *old_page = vmf->page;
    2501                 :     191154 :         struct page *new_page = NULL;
    2502                 :     191154 :         pte_t entry;
    2503                 :     191154 :         int page_copied = 0;
    2504                 :     191154 :         struct mem_cgroup *memcg;
    2505                 :     191154 :         struct mmu_notifier_range range;
    2506                 :            : 
    2507   [ +  +  -  + ]:     194443 :         if (unlikely(anon_vma_prepare(vma)))
    2508                 :          0 :                 goto oom;
    2509                 :            : 
    2510   [ +  -  +  + ]:     382308 :         if (is_zero_pfn(pte_pfn(vmf->orig_pte))) {
    2511                 :       9966 :                 new_page = alloc_zeroed_user_highpage_movable(vma,
    2512                 :            :                                                               vmf->address);
    2513         [ -  + ]:       9966 :                 if (!new_page)
    2514                 :          0 :                         goto oom;
    2515                 :            :         } else {
    2516                 :     181188 :                 new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma,
    2517                 :            :                                 vmf->address);
    2518         [ -  + ]:     181188 :                 if (!new_page)
    2519                 :          0 :                         goto oom;
    2520                 :            : 
    2521         [ -  + ]:     181188 :                 if (!cow_user_page(new_page, old_page, vmf)) {
    2522                 :            :                         /*
    2523                 :            :                          * COW failed, if the fault was solved by other,
    2524                 :            :                          * it's fine. If not, userspace would re-fault on
    2525                 :            :                          * the same address and we will handle the fault
    2526                 :            :                          * from the second attempt.
    2527                 :            :                          */
    2528                 :          0 :                         put_page(new_page);
    2529         [ #  # ]:          0 :                         if (old_page)
    2530                 :          0 :                                 put_page(old_page);
    2531                 :          0 :                         return 0;
    2532                 :            :                 }
    2533                 :            :         }
    2534                 :            : 
    2535                 :     191154 :         if (mem_cgroup_try_charge_delay(new_page, mm, GFP_KERNEL, &memcg, false))
    2536                 :            :                 goto oom_free_new;
    2537                 :            : 
    2538                 :     191154 :         __SetPageUptodate(new_page);
    2539                 :            : 
    2540                 :     191154 :         mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm,
    2541                 :            :                                 vmf->address & PAGE_MASK,
    2542                 :     191154 :                                 (vmf->address & PAGE_MASK) + PAGE_SIZE);
    2543                 :     191154 :         mmu_notifier_invalidate_range_start(&range);
    2544                 :            : 
    2545                 :            :         /*
    2546                 :            :          * Re-check the pte - we dropped the lock
    2547                 :            :          */
    2548         [ +  - ]:     382308 :         vmf->pte = pte_offset_map_lock(mm, vmf->pmd, vmf->address, &vmf->ptl);
    2549         [ +  - ]:     191154 :         if (likely(pte_same(*vmf->pte, vmf->orig_pte))) {
    2550         [ +  + ]:     191154 :                 if (old_page) {
    2551   [ -  +  +  + ]:     181188 :                         if (!PageAnon(old_page)) {
    2552                 :     102422 :                                 dec_mm_counter_fast(mm,
    2553                 :            :                                                 mm_counter_file(old_page));
    2554                 :     102422 :                                 inc_mm_counter_fast(mm, MM_ANONPAGES);
    2555                 :            :                         }
    2556                 :            :                 } else {
    2557                 :       9966 :                         inc_mm_counter_fast(mm, MM_ANONPAGES);
    2558                 :            :                 }
    2559         [ +  - ]:     191154 :                 flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte));
    2560         [ +  - ]:     191154 :                 entry = mk_pte(new_page, vma->vm_page_prot);
    2561         [ +  - ]:     191154 :                 entry = maybe_mkwrite(pte_mkdirty(entry), vma);
    2562                 :            :                 /*
    2563                 :            :                  * Clear the pte entry and flush it first, before updating the
    2564                 :            :                  * pte with the new entry. This will avoid a race condition
    2565                 :            :                  * seen in the presence of one thread doing SMC and another
    2566                 :            :                  * thread doing COW.
    2567                 :            :                  */
    2568         [ -  + ]:     191154 :                 ptep_clear_flush_notify(vma, vmf->address, vmf->pte);
    2569                 :     191154 :                 page_add_new_anon_rmap(new_page, vma, vmf->address, false);
    2570                 :     191154 :                 mem_cgroup_commit_charge(new_page, memcg, false, false);
    2571                 :     191154 :                 lru_cache_add_active_or_unevictable(new_page, vma);
    2572                 :            :                 /*
    2573                 :            :                  * We call the notify macro here because, when using secondary
    2574                 :            :                  * mmu page tables (such as kvm shadow page tables), we want the
    2575                 :            :                  * new page to be mapped directly into the secondary page table.
    2576                 :            :                  */
    2577   [ -  +  +  + ]:     191154 :                 set_pte_at_notify(mm, vmf->address, vmf->pte, entry);
    2578         [ +  + ]:     191154 :                 update_mmu_cache(vma, vmf->address, vmf->pte);
    2579         [ +  + ]:     191154 :                 if (old_page) {
    2580                 :            :                         /*
    2581                 :            :                          * Only after switching the pte to the new page may
    2582                 :            :                          * we remove the mapcount here. Otherwise another
    2583                 :            :                          * process may come and find the rmap count decremented
    2584                 :            :                          * before the pte is switched to the new page, and
    2585                 :            :                          * "reuse" the old page writing into it while our pte
    2586                 :            :                          * here still points into it and can be read by other
    2587                 :            :                          * threads.
    2588                 :            :                          *
    2589                 :            :                          * The critical issue is to order this
    2590                 :            :                          * page_remove_rmap with the ptp_clear_flush above.
    2591                 :            :                          * Those stores are ordered by (if nothing else,)
    2592                 :            :                          * the barrier present in the atomic_add_negative
    2593                 :            :                          * in page_remove_rmap.
    2594                 :            :                          *
    2595                 :            :                          * Then the TLB flush in ptep_clear_flush ensures that
    2596                 :            :                          * no process can access the old page before the
    2597                 :            :                          * decremented mapcount is visible. And the old page
    2598                 :            :                          * cannot be reused until after the decremented
    2599                 :            :                          * mapcount is visible. So transitively, TLBs to
    2600                 :            :                          * old page will be flushed before it can be reused.
    2601                 :            :                          */
    2602                 :     181188 :                         page_remove_rmap(old_page, false);
    2603                 :            :                 }
    2604                 :            : 
    2605                 :            :                 /* Free the old page.. */
    2606                 :            :                 new_page = old_page;
    2607                 :            :                 page_copied = 1;
    2608                 :            :         } else {
    2609                 :            :                 mem_cgroup_cancel_charge(new_page, memcg, false);
    2610                 :            :         }
    2611                 :            : 
    2612         [ +  + ]:     191154 :         if (new_page)
    2613                 :     181188 :                 put_page(new_page);
    2614                 :            : 
    2615                 :     191154 :         pte_unmap_unlock(vmf->pte, vmf->ptl);
    2616                 :            :         /*
    2617                 :            :          * No need to double call mmu_notifier->invalidate_range() callback as
    2618                 :            :          * the above ptep_clear_flush_notify() did already call it.
    2619                 :            :          */
    2620         [ -  + ]:     191154 :         mmu_notifier_invalidate_range_only_end(&range);
    2621         [ +  + ]:     191154 :         if (old_page) {
    2622                 :            :                 /*
    2623                 :            :                  * Don't let another task, with possibly unlocked vma,
    2624                 :            :                  * keep the mlocked page.
    2625                 :            :                  */
    2626   [ +  -  -  + ]:     181188 :                 if (page_copied && (vma->vm_flags & VM_LOCKED)) {
    2627                 :          0 :                         lock_page(old_page);    /* LRU manipulation */
    2628   [ #  #  #  # ]:          0 :                         if (PageMlocked(old_page))
    2629                 :          0 :                                 munlock_vma_page(old_page);
    2630                 :          0 :                         unlock_page(old_page);
    2631                 :            :                 }
    2632                 :     181188 :                 put_page(old_page);
    2633                 :            :         }
    2634         [ -  + ]:     191154 :         return page_copied ? VM_FAULT_WRITE : 0;
    2635                 :            : oom_free_new:
    2636                 :            :         put_page(new_page);
    2637                 :          0 : oom:
    2638         [ #  # ]:          0 :         if (old_page)
    2639                 :          0 :                 put_page(old_page);
    2640                 :            :         return VM_FAULT_OOM;
    2641                 :            : }
    2642                 :            : 
    2643                 :            : /**
    2644                 :            :  * finish_mkwrite_fault - finish page fault for a shared mapping, making PTE
    2645                 :            :  *                        writeable once the page is prepared
    2646                 :            :  *
    2647                 :            :  * @vmf: structure describing the fault
    2648                 :            :  *
    2649                 :            :  * This function handles all that is needed to finish a write page fault in a
    2650                 :            :  * shared mapping due to PTE being read-only once the mapped page is prepared.
    2651                 :            :  * It handles locking of PTE and modifying it.
    2652                 :            :  *
    2653                 :            :  * The function expects the page to be locked or other protection against
    2654                 :            :  * concurrent faults / writeback (such as DAX radix tree locks).
    2655                 :            :  *
    2656                 :            :  * Return: %VM_FAULT_WRITE on success, %0 when PTE got changed before
    2657                 :            :  * we acquired PTE lock.
    2658                 :            :  */
    2659                 :          0 : vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf)
    2660                 :            : {
    2661         [ #  # ]:          0 :         WARN_ON_ONCE(!(vmf->vma->vm_flags & VM_SHARED));
    2662         [ #  # ]:          0 :         vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, vmf->address,
    2663                 :            :                                        &vmf->ptl);
    2664                 :            :         /*
    2665                 :            :          * We might have raced with another page fault while we released the
    2666                 :            :          * pte_offset_map_lock.
    2667                 :            :          */
    2668         [ #  # ]:          0 :         if (!pte_same(*vmf->pte, vmf->orig_pte)) {
    2669                 :          0 :                 pte_unmap_unlock(vmf->pte, vmf->ptl);
    2670                 :          0 :                 return VM_FAULT_NOPAGE;
    2671                 :            :         }
    2672                 :          0 :         wp_page_reuse(vmf);
    2673                 :          0 :         return 0;
    2674                 :            : }
    2675                 :            : 
    2676                 :            : /*
    2677                 :            :  * Handle write page faults for VM_MIXEDMAP or VM_PFNMAP for a VM_SHARED
    2678                 :            :  * mapping
    2679                 :            :  */
    2680                 :          0 : static vm_fault_t wp_pfn_shared(struct vm_fault *vmf)
    2681                 :            : {
    2682                 :          0 :         struct vm_area_struct *vma = vmf->vma;
    2683                 :            : 
    2684   [ #  #  #  # ]:          0 :         if (vma->vm_ops && vma->vm_ops->pfn_mkwrite) {
    2685                 :          0 :                 vm_fault_t ret;
    2686                 :            : 
    2687                 :          0 :                 pte_unmap_unlock(vmf->pte, vmf->ptl);
    2688                 :          0 :                 vmf->flags |= FAULT_FLAG_MKWRITE;
    2689                 :          0 :                 ret = vma->vm_ops->pfn_mkwrite(vmf);
    2690         [ #  # ]:          0 :                 if (ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))
    2691                 :            :                         return ret;
    2692                 :          0 :                 return finish_mkwrite_fault(vmf);
    2693                 :            :         }
    2694                 :          0 :         wp_page_reuse(vmf);
    2695                 :          0 :         return VM_FAULT_WRITE;
    2696                 :            : }
    2697                 :            : 
    2698                 :          0 : static vm_fault_t wp_page_shared(struct vm_fault *vmf)
    2699                 :            :         __releases(vmf->ptl)
    2700                 :            : {
    2701                 :          0 :         struct vm_area_struct *vma = vmf->vma;
    2702                 :          0 :         vm_fault_t ret = VM_FAULT_WRITE;
    2703                 :            : 
    2704         [ #  # ]:          0 :         get_page(vmf->page);
    2705                 :            : 
    2706   [ #  #  #  # ]:          0 :         if (vma->vm_ops && vma->vm_ops->page_mkwrite) {
    2707                 :          0 :                 vm_fault_t tmp;
    2708                 :            : 
    2709                 :          0 :                 pte_unmap_unlock(vmf->pte, vmf->ptl);
    2710                 :          0 :                 tmp = do_page_mkwrite(vmf);
    2711   [ #  #  #  # ]:          0 :                 if (unlikely(!tmp || (tmp &
    2712                 :            :                                       (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) {
    2713                 :          0 :                         put_page(vmf->page);
    2714                 :          0 :                         return tmp;
    2715                 :            :                 }
    2716                 :          0 :                 tmp = finish_mkwrite_fault(vmf);
    2717         [ #  # ]:          0 :                 if (unlikely(tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) {
    2718                 :          0 :                         unlock_page(vmf->page);
    2719                 :          0 :                         put_page(vmf->page);
    2720                 :          0 :                         return tmp;
    2721                 :            :                 }
    2722                 :            :         } else {
    2723                 :          0 :                 wp_page_reuse(vmf);
    2724                 :          0 :                 lock_page(vmf->page);
    2725                 :            :         }
    2726                 :          0 :         ret |= fault_dirty_shared_page(vmf);
    2727                 :          0 :         put_page(vmf->page);
    2728                 :            : 
    2729                 :          0 :         return ret;
    2730                 :            : }
    2731                 :            : 
    2732                 :            : /*
    2733                 :            :  * This routine handles present pages, when users try to write
    2734                 :            :  * to a shared page. It is done by copying the page to a new address
    2735                 :            :  * and decrementing the shared-page counter for the old page.
    2736                 :            :  *
    2737                 :            :  * Note that this routine assumes that the protection checks have been
    2738                 :            :  * done by the caller (the low-level page fault routine in most cases).
    2739                 :            :  * Thus we can safely just mark it writable once we've done any necessary
    2740                 :            :  * COW.
    2741                 :            :  *
    2742                 :            :  * We also mark the page dirty at this point even though the page will
    2743                 :            :  * change only once the write actually happens. This avoids a few races,
    2744                 :            :  * and potentially makes it more efficient.
    2745                 :            :  *
    2746                 :            :  * We enter with non-exclusive mmap_sem (to exclude vma changes,
    2747                 :            :  * but allow concurrent faults), with pte both mapped and locked.
    2748                 :            :  * We return with mmap_sem still held, but pte unmapped and unlocked.
    2749                 :            :  */
    2750                 :     326689 : static vm_fault_t do_wp_page(struct vm_fault *vmf)
    2751                 :            :         __releases(vmf->ptl)
    2752                 :            : {
    2753                 :     326689 :         struct vm_area_struct *vma = vmf->vma;
    2754                 :            : 
    2755                 :     326689 :         vmf->page = vm_normal_page(vma, vmf->address, vmf->orig_pte);
    2756         [ +  + ]:     326689 :         if (!vmf->page) {
    2757                 :            :                 /*
    2758                 :            :                  * VM_MIXEDMAP !pfn_valid() case, or VM_SOFTDIRTY clear on a
    2759                 :            :                  * VM_PFNMAP VMA.
    2760                 :            :                  *
    2761                 :            :                  * We should not cow pages in a shared writeable mapping.
    2762                 :            :                  * Just mark the pages writable and/or call ops->pfn_mkwrite.
    2763                 :            :                  */
    2764         [ -  + ]:       9966 :                 if ((vma->vm_flags & (VM_WRITE|VM_SHARED)) ==
    2765                 :            :                                      (VM_WRITE|VM_SHARED))
    2766                 :          0 :                         return wp_pfn_shared(vmf);
    2767                 :            : 
    2768                 :       9966 :                 pte_unmap_unlock(vmf->pte, vmf->ptl);
    2769                 :       9966 :                 return wp_page_copy(vmf);
    2770                 :            :         }
    2771                 :            : 
    2772                 :            :         /*
    2773                 :            :          * Take out anonymous pages first, anonymous shared vmas are
    2774                 :            :          * not dirty accountable.
    2775                 :            :          */
    2776   [ -  +  +  + ]:     316723 :         if (PageAnon(vmf->page)) {
    2777                 :     214301 :                 int total_map_swapcount;
    2778         [ -  + ]:     214301 :                 if (PageKsm(vmf->page) && (PageSwapCache(vmf->page) ||
    2779                 :            :                                            page_count(vmf->page) != 1))
    2780                 :            :                         goto copy;
    2781   [ -  +  -  + ]:     428602 :                 if (!trylock_page(vmf->page)) {
    2782         [ #  # ]:          0 :                         get_page(vmf->page);
    2783                 :          0 :                         pte_unmap_unlock(vmf->pte, vmf->ptl);
    2784                 :          0 :                         lock_page(vmf->page);
    2785         [ #  # ]:          0 :                         vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
    2786                 :            :                                         vmf->address, &vmf->ptl);
    2787         [ #  # ]:          0 :                         if (!pte_same(*vmf->pte, vmf->orig_pte)) {
    2788                 :          0 :                                 unlock_page(vmf->page);
    2789                 :          0 :                                 pte_unmap_unlock(vmf->pte, vmf->ptl);
    2790                 :          0 :                                 put_page(vmf->page);
    2791                 :     135535 :                                 return 0;
    2792                 :            :                         }
    2793                 :          0 :                         put_page(vmf->page);
    2794                 :            :                 }
    2795                 :     214301 :                 if (PageKsm(vmf->page)) {
    2796                 :            :                         bool reused = reuse_ksm_page(vmf->page, vmf->vma,
    2797                 :            :                                                      vmf->address);
    2798                 :            :                         unlock_page(vmf->page);
    2799                 :            :                         if (!reused)
    2800                 :            :                                 goto copy;
    2801                 :            :                         wp_page_reuse(vmf);
    2802                 :            :                         return VM_FAULT_WRITE;
    2803                 :            :                 }
    2804         [ +  + ]:     214301 :                 if (reuse_swap_page(vmf->page, &total_map_swapcount)) {
    2805         [ +  - ]:     135535 :                         if (total_map_swapcount == 1) {
    2806                 :            :                                 /*
    2807                 :            :                                  * The page is all ours. Move it to
    2808                 :            :                                  * our anon_vma so the rmap code will
    2809                 :            :                                  * not search our parent or siblings.
    2810                 :            :                                  * Protected against the rmap code by
    2811                 :            :                                  * the page lock.
    2812                 :            :                                  */
    2813                 :     135535 :                                 page_move_anon_rmap(vmf->page, vma);
    2814                 :            :                         }
    2815                 :     135535 :                         unlock_page(vmf->page);
    2816                 :     135535 :                         wp_page_reuse(vmf);
    2817                 :     135535 :                         return VM_FAULT_WRITE;
    2818                 :            :                 }
    2819                 :      78766 :                 unlock_page(vmf->page);
    2820         [ -  + ]:     102422 :         } else if (unlikely((vma->vm_flags & (VM_WRITE|VM_SHARED)) ==
    2821                 :            :                                         (VM_WRITE|VM_SHARED))) {
    2822                 :          0 :                 return wp_page_shared(vmf);
    2823                 :            :         }
    2824                 :     102422 : copy:
    2825                 :            :         /*
    2826                 :            :          * Ok, we need to copy. Oh, well..
    2827                 :            :          */
    2828         [ -  + ]:     181188 :         get_page(vmf->page);
    2829                 :            : 
    2830                 :     181188 :         pte_unmap_unlock(vmf->pte, vmf->ptl);
    2831                 :     181188 :         return wp_page_copy(vmf);
    2832                 :            : }
    2833                 :            : 
    2834                 :          0 : static void unmap_mapping_range_vma(struct vm_area_struct *vma,
    2835                 :            :                 unsigned long start_addr, unsigned long end_addr,
    2836                 :            :                 struct zap_details *details)
    2837                 :            : {
    2838                 :          0 :         zap_page_range_single(vma, start_addr, end_addr - start_addr, details);
    2839                 :            : }
    2840                 :            : 
    2841                 :          0 : static inline void unmap_mapping_range_tree(struct rb_root_cached *root,
    2842                 :            :                                             struct zap_details *details)
    2843                 :            : {
    2844                 :          0 :         struct vm_area_struct *vma;
    2845                 :          0 :         pgoff_t vba, vea, zba, zea;
    2846                 :            : 
    2847         [ #  # ]:          0 :         vma_interval_tree_foreach(vma, root,
    2848                 :            :                         details->first_index, details->last_index) {
    2849                 :            : 
    2850                 :          0 :                 vba = vma->vm_pgoff;
    2851                 :          0 :                 vea = vba + vma_pages(vma) - 1;
    2852                 :          0 :                 zba = details->first_index;
    2853                 :          0 :                 if (zba < vba)
    2854                 :            :                         zba = vba;
    2855                 :          0 :                 zea = details->last_index;
    2856                 :          0 :                 if (zea > vea)
    2857                 :            :                         zea = vea;
    2858                 :            : 
    2859                 :          0 :                 unmap_mapping_range_vma(vma,
    2860                 :          0 :                         ((zba - vba) << PAGE_SHIFT) + vma->vm_start,
    2861                 :          0 :                         ((zea - vba + 1) << PAGE_SHIFT) + vma->vm_start,
    2862                 :            :                                 details);
    2863                 :            :         }
    2864                 :          0 : }
    2865                 :            : 
    2866                 :            : /**
    2867                 :            :  * unmap_mapping_pages() - Unmap pages from processes.
    2868                 :            :  * @mapping: The address space containing pages to be unmapped.
    2869                 :            :  * @start: Index of first page to be unmapped.
    2870                 :            :  * @nr: Number of pages to be unmapped.  0 to unmap to end of file.
    2871                 :            :  * @even_cows: Whether to unmap even private COWed pages.
    2872                 :            :  *
    2873                 :            :  * Unmap the pages in this address space from any userspace process which
    2874                 :            :  * has them mmaped.  Generally, you want to remove COWed pages as well when
    2875                 :            :  * a file is being truncated, but not when invalidating pages from the page
    2876                 :            :  * cache.
    2877                 :            :  */
    2878                 :         22 : void unmap_mapping_pages(struct address_space *mapping, pgoff_t start,
    2879                 :            :                 pgoff_t nr, bool even_cows)
    2880                 :            : {
    2881                 :         22 :         struct zap_details details = { };
    2882                 :            : 
    2883         [ -  + ]:         22 :         details.check_mapping = even_cows ? NULL : mapping;
    2884                 :         22 :         details.first_index = start;
    2885                 :         22 :         details.last_index = start + nr - 1;
    2886         [ -  + ]:         22 :         if (details.last_index < details.first_index)
    2887                 :          0 :                 details.last_index = ULONG_MAX;
    2888                 :            : 
    2889                 :         22 :         i_mmap_lock_write(mapping);
    2890         [ -  + ]:         22 :         if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)))
    2891                 :          0 :                 unmap_mapping_range_tree(&mapping->i_mmap, &details);
    2892                 :         22 :         i_mmap_unlock_write(mapping);
    2893                 :         22 : }
    2894                 :            : 
    2895                 :            : /**
    2896                 :            :  * unmap_mapping_range - unmap the portion of all mmaps in the specified
    2897                 :            :  * address_space corresponding to the specified byte range in the underlying
    2898                 :            :  * file.
    2899                 :            :  *
    2900                 :            :  * @mapping: the address space containing mmaps to be unmapped.
    2901                 :            :  * @holebegin: byte in first page to unmap, relative to the start of
    2902                 :            :  * the underlying file.  This will be rounded down to a PAGE_SIZE
    2903                 :            :  * boundary.  Note that this is different from truncate_pagecache(), which
    2904                 :            :  * must keep the partial page.  In contrast, we must get rid of
    2905                 :            :  * partial pages.
    2906                 :            :  * @holelen: size of prospective hole in bytes.  This will be rounded
    2907                 :            :  * up to a PAGE_SIZE boundary.  A holelen of zero truncates to the
    2908                 :            :  * end of the file.
    2909                 :            :  * @even_cows: 1 when truncating a file, unmap even private COWed pages;
    2910                 :            :  * but 0 when invalidating pagecache, don't throw away private data.
    2911                 :            :  */
    2912                 :         22 : void unmap_mapping_range(struct address_space *mapping,
    2913                 :            :                 loff_t const holebegin, loff_t const holelen, int even_cows)
    2914                 :            : {
    2915                 :         22 :         pgoff_t hba = holebegin >> PAGE_SHIFT;
    2916                 :         22 :         pgoff_t hlen = (holelen + PAGE_SIZE - 1) >> PAGE_SHIFT;
    2917                 :            : 
    2918                 :            :         /* Check for overflow. */
    2919                 :         22 :         if (sizeof(holelen) > sizeof(hlen)) {
    2920                 :            :                 long long holeend =
    2921                 :            :                         (holebegin + holelen + PAGE_SIZE - 1) >> PAGE_SHIFT;
    2922                 :            :                 if (holeend & ~(long long)ULONG_MAX)
    2923                 :            :                         hlen = ULONG_MAX - hba + 1;
    2924                 :            :         }
    2925                 :            : 
    2926                 :         22 :         unmap_mapping_pages(mapping, hba, hlen, even_cows);
    2927                 :         22 : }
    2928                 :            : EXPORT_SYMBOL(unmap_mapping_range);
    2929                 :            : 
    2930                 :            : /*
    2931                 :            :  * We enter with non-exclusive mmap_sem (to exclude vma changes,
    2932                 :            :  * but allow concurrent faults), and pte mapped but not yet locked.
    2933                 :            :  * We return with pte unmapped and unlocked.
    2934                 :            :  *
    2935                 :            :  * We return with the mmap_sem locked or unlocked in the same cases
    2936                 :            :  * as does filemap_fault().
    2937                 :            :  */
    2938                 :          0 : vm_fault_t do_swap_page(struct vm_fault *vmf)
    2939                 :            : {
    2940                 :          0 :         struct vm_area_struct *vma = vmf->vma;
    2941                 :          0 :         struct page *page = NULL, *swapcache;
    2942                 :          0 :         struct mem_cgroup *memcg;
    2943                 :          0 :         swp_entry_t entry;
    2944                 :          0 :         pte_t pte;
    2945                 :          0 :         int locked;
    2946                 :          0 :         int exclusive = 0;
    2947                 :          0 :         vm_fault_t ret = 0;
    2948                 :            : 
    2949                 :          0 :         if (!pte_unmap_same(vma->vm_mm, vmf->pmd, vmf->pte, vmf->orig_pte))
    2950                 :            :                 goto out;
    2951                 :            : 
    2952         [ #  # ]:          0 :         entry = pte_to_swp_entry(vmf->orig_pte);
    2953         [ #  # ]:          0 :         if (unlikely(non_swap_entry(entry))) {
    2954         [ #  # ]:          0 :                 if (is_migration_entry(entry)) {
    2955                 :          0 :                         migration_entry_wait(vma->vm_mm, vmf->pmd,
    2956                 :            :                                              vmf->address);
    2957                 :          0 :                 } else if (is_device_private_entry(entry)) {
    2958                 :            :                         vmf->page = device_private_entry_to_page(entry);
    2959                 :            :                         ret = vmf->page->pgmap->ops->migrate_to_ram(vmf);
    2960                 :          0 :                 } else if (is_hwpoison_entry(entry)) {
    2961                 :            :                         ret = VM_FAULT_HWPOISON;
    2962                 :            :                 } else {
    2963                 :          0 :                         print_bad_pte(vma, vmf->address, vmf->orig_pte, NULL);
    2964                 :          0 :                         ret = VM_FAULT_SIGBUS;
    2965                 :            :                 }
    2966                 :          0 :                 goto out;
    2967                 :            :         }
    2968                 :            : 
    2969                 :            : 
    2970         [ #  # ]:          0 :         delayacct_set_flag(DELAYACCT_PF_SWAPIN);
    2971                 :          0 :         page = lookup_swap_cache(entry, vma, vmf->address);
    2972                 :          0 :         swapcache = page;
    2973                 :            : 
    2974         [ #  # ]:          0 :         if (!page) {
    2975                 :          0 :                 struct swap_info_struct *si = swp_swap_info(entry);
    2976                 :            : 
    2977   [ #  #  #  # ]:          0 :                 if (si->flags & SWP_SYNCHRONOUS_IO &&
    2978                 :          0 :                                 __swap_count(entry) == 1) {
    2979                 :            :                         /* skip swapcache */
    2980                 :          0 :                         page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma,
    2981                 :            :                                                         vmf->address);
    2982         [ #  # ]:          0 :                         if (page) {
    2983         [ #  # ]:          0 :                                 __SetPageLocked(page);
    2984         [ #  # ]:          0 :                                 __SetPageSwapBacked(page);
    2985                 :          0 :                                 set_page_private(page, entry.val);
    2986                 :          0 :                                 lru_cache_add_anon(page);
    2987                 :          0 :                                 swap_readpage(page, true);
    2988                 :            :                         }
    2989                 :            :                 } else {
    2990                 :          0 :                         page = swapin_readahead(entry, GFP_HIGHUSER_MOVABLE,
    2991                 :            :                                                 vmf);
    2992                 :          0 :                         swapcache = page;
    2993                 :            :                 }
    2994                 :            : 
    2995         [ #  # ]:          0 :                 if (!page) {
    2996                 :            :                         /*
    2997                 :            :                          * Back out if somebody else faulted in this pte
    2998                 :            :                          * while we released the pte lock.
    2999                 :            :                          */
    3000         [ #  # ]:          0 :                         vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
    3001                 :            :                                         vmf->address, &vmf->ptl);
    3002         [ #  # ]:          0 :                         if (likely(pte_same(*vmf->pte, vmf->orig_pte)))
    3003                 :          0 :                                 ret = VM_FAULT_OOM;
    3004         [ #  # ]:          0 :                         delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
    3005                 :          0 :                         goto unlock;
    3006                 :            :                 }
    3007                 :            : 
    3008                 :            :                 /* Had to read the page from swap area: Major fault */
    3009                 :          0 :                 ret = VM_FAULT_MAJOR;
    3010                 :          0 :                 count_vm_event(PGMAJFAULT);
    3011                 :          0 :                 count_memcg_event_mm(vma->vm_mm, PGMAJFAULT);
    3012                 :            :         } else if (PageHWPoison(page)) {
    3013                 :            :                 /*
    3014                 :            :                  * hwpoisoned dirty swapcache pages are kept for killing
    3015                 :            :                  * owner processes (which may be unknown at hwpoison time)
    3016                 :            :                  */
    3017                 :            :                 ret = VM_FAULT_HWPOISON;
    3018                 :            :                 delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
    3019                 :            :                 goto out_release;
    3020                 :            :         }
    3021                 :            : 
    3022                 :          0 :         locked = lock_page_or_retry(page, vma->vm_mm, vmf->flags);
    3023                 :            : 
    3024         [ #  # ]:          0 :         delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
    3025         [ #  # ]:          0 :         if (!locked) {
    3026                 :          0 :                 ret |= VM_FAULT_RETRY;
    3027                 :          0 :                 goto out_release;
    3028                 :            :         }
    3029                 :            : 
    3030                 :            :         /*
    3031                 :            :          * Make sure try_to_free_swap or reuse_swap_page or swapoff did not
    3032                 :            :          * release the swapcache from under us.  The page pin, and pte_same
    3033                 :            :          * test below, are not enough to exclude that.  Even if it is still
    3034                 :            :          * swapcache, we need to check that the page's swap has not changed.
    3035                 :            :          */
    3036   [ #  #  #  #  :          0 :         if (unlikely((!PageSwapCache(page) ||
                   #  # ]
    3037         [ #  # ]:          0 :                         page_private(page) != entry.val)) && swapcache)
    3038                 :          0 :                 goto out_page;
    3039                 :            : 
    3040         [ #  # ]:          0 :         page = ksm_might_need_to_copy(page, vma, vmf->address);
    3041         [ #  # ]:          0 :         if (unlikely(!page)) {
    3042                 :          0 :                 ret = VM_FAULT_OOM;
    3043                 :          0 :                 page = swapcache;
    3044                 :          0 :                 goto out_page;
    3045                 :            :         }
    3046                 :            : 
    3047         [ #  # ]:          0 :         if (mem_cgroup_try_charge_delay(page, vma->vm_mm, GFP_KERNEL,
    3048                 :            :                                         &memcg, false)) {
    3049                 :            :                 ret = VM_FAULT_OOM;
    3050                 :            :                 goto out_page;
    3051                 :            :         }
    3052                 :            : 
    3053                 :            :         /*
    3054                 :            :          * Back out if somebody else already faulted in this pte.
    3055                 :            :          */
    3056         [ #  # ]:          0 :         vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
    3057                 :            :                         &vmf->ptl);
    3058         [ #  # ]:          0 :         if (unlikely(!pte_same(*vmf->pte, vmf->orig_pte)))
    3059                 :          0 :                 goto out_nomap;
    3060                 :            : 
    3061         [ #  # ]:          0 :         if (unlikely(!PageUptodate(page))) {
    3062                 :          0 :                 ret = VM_FAULT_SIGBUS;
    3063                 :          0 :                 goto out_nomap;
    3064                 :            :         }
    3065                 :            : 
    3066                 :            :         /*
    3067                 :            :          * The page isn't present yet, go ahead with the fault.
    3068                 :            :          *
    3069                 :            :          * Be careful about the sequence of operations here.
    3070                 :            :          * To get its accounting right, reuse_swap_page() must be called
    3071                 :            :          * while the page is counted on swap but not yet in mapcount i.e.
    3072                 :            :          * before page_add_anon_rmap() and swap_free(); try_to_free_swap()
    3073                 :            :          * must be called after the swap_free(), or it will never succeed.
    3074                 :            :          */
    3075                 :            : 
    3076                 :          0 :         inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
    3077                 :          0 :         dec_mm_counter_fast(vma->vm_mm, MM_SWAPENTS);
    3078         [ #  # ]:          0 :         pte = mk_pte(page, vma->vm_page_prot);
    3079   [ #  #  #  # ]:          0 :         if ((vmf->flags & FAULT_FLAG_WRITE) && reuse_swap_page(page, NULL)) {
    3080         [ #  # ]:          0 :                 pte = maybe_mkwrite(pte_mkdirty(pte), vma);
    3081                 :          0 :                 vmf->flags &= ~FAULT_FLAG_WRITE;
    3082                 :          0 :                 ret |= VM_FAULT_WRITE;
    3083                 :          0 :                 exclusive = RMAP_EXCLUSIVE;
    3084                 :            :         }
    3085         [ #  # ]:          0 :         flush_icache_page(vma, page);
    3086         [ #  # ]:          0 :         if (pte_swp_soft_dirty(vmf->orig_pte))
    3087                 :            :                 pte = pte_mksoft_dirty(pte);
    3088         [ #  # ]:          0 :         set_pte_at(vma->vm_mm, vmf->address, vmf->pte, pte);
    3089         [ #  # ]:          0 :         arch_do_swap_page(vma->vm_mm, vma, vmf->address, pte, vmf->orig_pte);
    3090                 :          0 :         vmf->orig_pte = pte;
    3091                 :            : 
    3092                 :            :         /* ksm created a completely new copy */
    3093         [ #  # ]:          0 :         if (unlikely(page != swapcache && swapcache)) {
    3094                 :          0 :                 page_add_new_anon_rmap(page, vma, vmf->address, false);
    3095                 :          0 :                 mem_cgroup_commit_charge(page, memcg, false, false);
    3096                 :          0 :                 lru_cache_add_active_or_unevictable(page, vma);
    3097                 :            :         } else {
    3098                 :          0 :                 do_page_add_anon_rmap(page, vma, vmf->address, exclusive);
    3099                 :          0 :                 mem_cgroup_commit_charge(page, memcg, true, false);
    3100                 :          0 :                 activate_page(page);
    3101                 :            :         }
    3102                 :            : 
    3103                 :          0 :         swap_free(entry);
    3104         [ #  # ]:          0 :         if (mem_cgroup_swap_full(page) ||
    3105   [ #  #  #  # ]:          0 :             (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
    3106                 :          0 :                 try_to_free_swap(page);
    3107                 :          0 :         unlock_page(page);
    3108         [ #  # ]:          0 :         if (page != swapcache && swapcache) {
    3109                 :            :                 /*
    3110                 :            :                  * Hold the lock to avoid the swap entry to be reused
    3111                 :            :                  * until we take the PT lock for the pte_same() check
    3112                 :            :                  * (to avoid false positives from pte_same). For
    3113                 :            :                  * further safety release the lock after the swap_free
    3114                 :            :                  * so that the swap count won't change under a
    3115                 :            :                  * parallel locked swapcache.
    3116                 :            :                  */
    3117                 :          0 :                 unlock_page(swapcache);
    3118                 :          0 :                 put_page(swapcache);
    3119                 :            :         }
    3120                 :            : 
    3121         [ #  # ]:          0 :         if (vmf->flags & FAULT_FLAG_WRITE) {
    3122                 :          0 :                 ret |= do_wp_page(vmf);
    3123         [ #  # ]:          0 :                 if (ret & VM_FAULT_ERROR)
    3124                 :          0 :                         ret &= VM_FAULT_ERROR;
    3125                 :          0 :                 goto out;
    3126                 :            :         }
    3127                 :            : 
    3128                 :            :         /* No need to invalidate - it was non-present before */
    3129                 :            :         update_mmu_cache(vma, vmf->address, vmf->pte);
    3130                 :          0 : unlock:
    3131                 :          0 :         pte_unmap_unlock(vmf->pte, vmf->ptl);
    3132                 :            : out:
    3133                 :            :         return ret;
    3134                 :          0 : out_nomap:
    3135                 :          0 :         mem_cgroup_cancel_charge(page, memcg, false);
    3136                 :          0 :         pte_unmap_unlock(vmf->pte, vmf->ptl);
    3137                 :          0 : out_page:
    3138                 :          0 :         unlock_page(page);
    3139                 :          0 : out_release:
    3140                 :          0 :         put_page(page);
    3141         [ #  # ]:          0 :         if (page != swapcache && swapcache) {
    3142                 :          0 :                 unlock_page(swapcache);
    3143                 :          0 :                 put_page(swapcache);
    3144                 :            :         }
    3145                 :            :         return ret;
    3146                 :            : }
    3147                 :            : 
    3148                 :            : /*
    3149                 :            :  * We enter with non-exclusive mmap_sem (to exclude vma changes,
    3150                 :            :  * but allow concurrent faults), and pte mapped but not yet locked.
    3151                 :            :  * We return with mmap_sem still held, but pte unmapped and unlocked.
    3152                 :            :  */
    3153                 :     152864 : static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
    3154                 :            : {
    3155                 :     152864 :         struct vm_area_struct *vma = vmf->vma;
    3156                 :     152864 :         struct mem_cgroup *memcg;
    3157                 :     152864 :         struct page *page;
    3158                 :     152864 :         vm_fault_t ret = 0;
    3159                 :     152864 :         pte_t entry;
    3160                 :            : 
    3161                 :            :         /* File mapping without ->vm_ops ? */
    3162         [ +  - ]:     152864 :         if (vma->vm_flags & VM_SHARED)
    3163                 :            :                 return VM_FAULT_SIGBUS;
    3164                 :            : 
    3165                 :            :         /*
    3166                 :            :          * Use pte_alloc() instead of pte_alloc_map().  We can't run
    3167                 :            :          * pte_offset_map() on pmds where a huge pmd might be created
    3168                 :            :          * from a different thread.
    3169                 :            :          *
    3170                 :            :          * pte_alloc_map() is safe to use under down_write(mmap_sem) or when
    3171                 :            :          * parallel threads are excluded by other means.
    3172                 :            :          *
    3173                 :            :          * Here we only have down_read(mmap_sem).
    3174                 :            :          */
    3175   [ +  +  +  - ]:     152864 :         if (pte_alloc(vma->vm_mm, vmf->pmd))
    3176                 :            :                 return VM_FAULT_OOM;
    3177                 :            : 
    3178                 :            :         /* See the comment in pte_alloc_one_map() */
    3179         [ +  + ]:     152864 :         if (unlikely(pmd_trans_unstable(vmf->pmd)))
    3180                 :            :                 return 0;
    3181                 :            : 
    3182                 :            :         /* Use the zero-page for reads */
    3183         [ +  + ]:     152864 :         if (!(vmf->flags & FAULT_FLAG_WRITE) &&
    3184                 :            :                         !mm_forbids_zeropage(vma->vm_mm)) {
    3185         [ +  - ]:      10153 :                 entry = pte_mkspecial(pfn_pte(my_zero_pfn(vmf->address),
    3186                 :            :                                                 vma->vm_page_prot));
    3187         [ +  - ]:      20306 :                 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
    3188                 :            :                                 vmf->address, &vmf->ptl);
    3189         [ -  + ]:      10153 :                 if (!pte_none(*vmf->pte))
    3190                 :          0 :                         goto unlock;
    3191                 :      10153 :                 ret = check_stable_address_space(vma->vm_mm);
    3192                 :      10153 :                 if (ret)
    3193                 :          0 :                         goto unlock;
    3194                 :            :                 /* Deliver the page fault to userland, check inside PT lock */
    3195                 :      10153 :                 if (userfaultfd_missing(vma)) {
    3196                 :            :                         pte_unmap_unlock(vmf->pte, vmf->ptl);
    3197                 :            :                         return handle_userfault(vmf, VM_UFFD_MISSING);
    3198                 :            :                 }
    3199                 :      10153 :                 goto setpte;
    3200                 :            :         }
    3201                 :            : 
    3202                 :            :         /* Allocate our own private page. */
    3203   [ +  +  -  + ]:     193825 :         if (unlikely(anon_vma_prepare(vma)))
    3204                 :          0 :                 goto oom;
    3205                 :     142711 :         page = alloc_zeroed_user_highpage_movable(vma, vmf->address);
    3206         [ -  + ]:     142711 :         if (!page)
    3207                 :          0 :                 goto oom;
    3208                 :            : 
    3209                 :     142711 :         if (mem_cgroup_try_charge_delay(page, vma->vm_mm, GFP_KERNEL, &memcg,
    3210                 :            :                                         false))
    3211                 :            :                 goto oom_free_page;
    3212                 :            : 
    3213                 :            :         /*
    3214                 :            :          * The memory barrier inside __SetPageUptodate makes sure that
    3215                 :            :          * preceding stores to the page contents become visible before
    3216                 :            :          * the set_pte_at() write.
    3217                 :            :          */
    3218                 :     142711 :         __SetPageUptodate(page);
    3219                 :            : 
    3220         [ +  - ]:     142711 :         entry = mk_pte(page, vma->vm_page_prot);
    3221         [ +  - ]:     142711 :         if (vma->vm_flags & VM_WRITE)
    3222                 :     142711 :                 entry = pte_mkwrite(pte_mkdirty(entry));
    3223                 :            : 
    3224         [ +  - ]:     285422 :         vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
    3225                 :            :                         &vmf->ptl);
    3226         [ -  + ]:     142711 :         if (!pte_none(*vmf->pte))
    3227                 :          0 :                 goto release;
    3228                 :            : 
    3229                 :     142711 :         ret = check_stable_address_space(vma->vm_mm);
    3230                 :     142711 :         if (ret)
    3231                 :          0 :                 goto release;
    3232                 :            : 
    3233                 :            :         /* Deliver the page fault to userland, check inside PT lock */
    3234                 :     142711 :         if (userfaultfd_missing(vma)) {
    3235                 :            :                 pte_unmap_unlock(vmf->pte, vmf->ptl);
    3236                 :            :                 mem_cgroup_cancel_charge(page, memcg, false);
    3237                 :            :                 put_page(page);
    3238                 :            :                 return handle_userfault(vmf, VM_UFFD_MISSING);
    3239                 :            :         }
    3240                 :            : 
    3241                 :     142711 :         inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
    3242                 :     142711 :         page_add_new_anon_rmap(page, vma, vmf->address, false);
    3243                 :     142711 :         mem_cgroup_commit_charge(page, memcg, false, false);
    3244                 :     142711 :         lru_cache_add_active_or_unevictable(page, vma);
    3245                 :     152864 : setpte:
    3246                 :     152864 :         set_pte_at(vma->vm_mm, vmf->address, vmf->pte, entry);
    3247                 :            : 
    3248                 :            :         /* No need to invalidate - it was non-present before */
    3249                 :     152864 :         update_mmu_cache(vma, vmf->address, vmf->pte);
    3250                 :     152864 : unlock:
    3251                 :     152864 :         pte_unmap_unlock(vmf->pte, vmf->ptl);
    3252                 :     152864 :         return ret;
    3253                 :          0 : release:
    3254                 :          0 :         mem_cgroup_cancel_charge(page, memcg, false);
    3255                 :          0 :         put_page(page);
    3256                 :          0 :         goto unlock;
    3257                 :            : oom_free_page:
    3258                 :            :         put_page(page);
    3259                 :            : oom:
    3260                 :            :         return VM_FAULT_OOM;
    3261                 :            : }
    3262                 :            : 
    3263                 :            : /*
    3264                 :            :  * The mmap_sem must have been held on entry, and may have been
    3265                 :            :  * released depending on flags and vma->vm_ops->fault() return value.
    3266                 :            :  * See filemap_fault() and __lock_page_retry().
    3267                 :            :  */
    3268                 :      92290 : static vm_fault_t __do_fault(struct vm_fault *vmf)
    3269                 :            : {
    3270                 :      92290 :         struct vm_area_struct *vma = vmf->vma;
    3271                 :      92290 :         vm_fault_t ret;
    3272                 :            : 
    3273                 :            :         /*
    3274                 :            :          * Preallocate pte before we take page_lock because this might lead to
    3275                 :            :          * deadlocks for memcg reclaim which waits for pages under writeback:
    3276                 :            :          *                              lock_page(A)
    3277                 :            :          *                              SetPageWriteback(A)
    3278                 :            :          *                              unlock_page(A)
    3279                 :            :          * lock_page(B)
    3280                 :            :          *                              lock_page(B)
    3281                 :            :          * pte_alloc_pne
    3282                 :            :          *   shrink_page_list
    3283                 :            :          *     wait_on_page_writeback(A)
    3284                 :            :          *                              SetPageWriteback(B)
    3285                 :            :          *                              unlock_page(B)
    3286                 :            :          *                              # flush A, B to clear the writeback
    3287                 :            :          */
    3288   [ +  +  +  + ]:      92290 :         if (pmd_none(*vmf->pmd) && !vmf->prealloc_pte) {
    3289                 :      18767 :                 vmf->prealloc_pte = pte_alloc_one(vmf->vma->vm_mm);
    3290         [ +  - ]:      18767 :                 if (!vmf->prealloc_pte)
    3291                 :            :                         return VM_FAULT_OOM;
    3292                 :      18767 :                 smp_wmb(); /* See comment in __pte_alloc() */
    3293                 :            :         }
    3294                 :            : 
    3295                 :      92290 :         ret = vma->vm_ops->fault(vmf);
    3296         [ +  + ]:      92290 :         if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY |
    3297                 :            :                             VM_FAULT_DONE_COW)))
    3298                 :            :                 return ret;
    3299                 :            : 
    3300         [ +  + ]:      86309 :         if (unlikely(PageHWPoison(vmf->page))) {
    3301                 :            :                 if (ret & VM_FAULT_LOCKED)
    3302                 :            :                         unlock_page(vmf->page);
    3303                 :            :                 put_page(vmf->page);
    3304                 :            :                 vmf->page = NULL;
    3305                 :            :                 return VM_FAULT_HWPOISON;
    3306                 :            :         }
    3307                 :            : 
    3308         [ +  + ]:      86309 :         if (unlikely(!(ret & VM_FAULT_LOCKED)))
    3309                 :       9075 :                 lock_page(vmf->page);
    3310                 :            :         else
    3311                 :            :                 VM_BUG_ON_PAGE(!PageLocked(vmf->page), vmf->page);
    3312                 :            : 
    3313                 :            :         return ret;
    3314                 :            : }
    3315                 :            : 
    3316                 :            : /*
    3317                 :            :  * The ordering of these checks is important for pmds with _PAGE_DEVMAP set.
    3318                 :            :  * If we check pmd_trans_unstable() first we will trip the bad_pmd() check
    3319                 :            :  * inside of pmd_none_or_trans_huge_or_clear_bad(). This will end up correctly
    3320                 :            :  * returning 1 but not before it spams dmesg with the pmd_clear_bad() output.
    3321                 :            :  */
    3322                 :    1767723 : static int pmd_devmap_trans_unstable(pmd_t *pmd)
    3323                 :            : {
    3324                 :    1767723 :         return pmd_devmap(*pmd) || pmd_trans_unstable(pmd);
    3325                 :            : }
    3326                 :            : 
    3327                 :     672311 : static vm_fault_t pte_alloc_one_map(struct vm_fault *vmf)
    3328                 :            : {
    3329                 :     672311 :         struct vm_area_struct *vma = vmf->vma;
    3330                 :            : 
    3331         [ +  + ]:     672311 :         if (!pmd_none(*vmf->pmd))
    3332                 :     631231 :                 goto map_pte;
    3333         [ +  - ]:      41080 :         if (vmf->prealloc_pte) {
    3334                 :      41080 :                 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
    3335         [ -  + ]:      41080 :                 if (unlikely(!pmd_none(*vmf->pmd))) {
    3336                 :          0 :                         spin_unlock(vmf->ptl);
    3337                 :          0 :                         goto map_pte;
    3338                 :            :                 }
    3339                 :            : 
    3340                 :      41080 :                 mm_inc_nr_ptes(vma->vm_mm);
    3341                 :      41080 :                 pmd_populate(vma->vm_mm, vmf->pmd, vmf->prealloc_pte);
    3342                 :      41080 :                 spin_unlock(vmf->ptl);
    3343                 :      41080 :                 vmf->prealloc_pte = NULL;
    3344   [ #  #  #  # ]:          0 :         } else if (unlikely(pte_alloc(vma->vm_mm, vmf->pmd))) {
    3345                 :            :                 return VM_FAULT_OOM;
    3346                 :            :         }
    3347                 :          0 : map_pte:
    3348                 :            :         /*
    3349                 :            :          * If a huge pmd materialized under us just retry later.  Use
    3350                 :            :          * pmd_trans_unstable() via pmd_devmap_trans_unstable() instead of
    3351                 :            :          * pmd_trans_huge() to ensure the pmd didn't become pmd_trans_huge
    3352                 :            :          * under us and then back to pmd_none, as a result of MADV_DONTNEED
    3353                 :            :          * running immediately after a huge pmd fault in a different thread of
    3354                 :            :          * this mm, in turn leading to a misleading pmd_trans_huge() retval.
    3355                 :            :          * All we have to ensure is that it is a regular pmd that we can walk
    3356                 :            :          * with pte_offset_map() and we can do that through an atomic read in
    3357                 :            :          * C, which is what pmd_trans_unstable() provides.
    3358                 :            :          */
    3359         [ +  - ]:     672311 :         if (pmd_devmap_trans_unstable(vmf->pmd))
    3360                 :            :                 return VM_FAULT_NOPAGE;
    3361                 :            : 
    3362                 :            :         /*
    3363                 :            :          * At this point we know that our vmf->pmd points to a page of ptes
    3364                 :            :          * and it cannot become pmd_none(), pmd_devmap() or pmd_trans_huge()
    3365                 :            :          * for the duration of the fault.  If a racing MADV_DONTNEED runs and
    3366                 :            :          * we zap the ptes pointed to by our vmf->pmd, the vmf->ptl will still
    3367                 :            :          * be valid and we will re-check to make sure the vmf->pte isn't
    3368                 :            :          * pte_none() under vmf->ptl protection when we return to
    3369                 :            :          * alloc_set_pte().
    3370                 :            :          */
    3371         [ +  - ]:    1344622 :         vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
    3372                 :            :                         &vmf->ptl);
    3373                 :     672311 :         return 0;
    3374                 :            : }
    3375                 :            : 
    3376                 :            : #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
    3377                 :            : static void deposit_prealloc_pte(struct vm_fault *vmf)
    3378                 :            : {
    3379                 :            :         struct vm_area_struct *vma = vmf->vma;
    3380                 :            : 
    3381                 :            :         pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, vmf->prealloc_pte);
    3382                 :            :         /*
    3383                 :            :          * We are going to consume the prealloc table,
    3384                 :            :          * count that as nr_ptes.
    3385                 :            :          */
    3386                 :            :         mm_inc_nr_ptes(vma->vm_mm);
    3387                 :            :         vmf->prealloc_pte = NULL;
    3388                 :            : }
    3389                 :            : 
    3390                 :            : static vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page)
    3391                 :            : {
    3392                 :            :         struct vm_area_struct *vma = vmf->vma;
    3393                 :            :         bool write = vmf->flags & FAULT_FLAG_WRITE;
    3394                 :            :         unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
    3395                 :            :         pmd_t entry;
    3396                 :            :         int i;
    3397                 :            :         vm_fault_t ret;
    3398                 :            : 
    3399                 :            :         if (!transhuge_vma_suitable(vma, haddr))
    3400                 :            :                 return VM_FAULT_FALLBACK;
    3401                 :            : 
    3402                 :            :         ret = VM_FAULT_FALLBACK;
    3403                 :            :         page = compound_head(page);
    3404                 :            : 
    3405                 :            :         /*
    3406                 :            :          * Archs like ppc64 need additonal space to store information
    3407                 :            :          * related to pte entry. Use the preallocated table for that.
    3408                 :            :          */
    3409                 :            :         if (arch_needs_pgtable_deposit() && !vmf->prealloc_pte) {
    3410                 :            :                 vmf->prealloc_pte = pte_alloc_one(vma->vm_mm);
    3411                 :            :                 if (!vmf->prealloc_pte)
    3412                 :            :                         return VM_FAULT_OOM;
    3413                 :            :                 smp_wmb(); /* See comment in __pte_alloc() */
    3414                 :            :         }
    3415                 :            : 
    3416                 :            :         vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
    3417                 :            :         if (unlikely(!pmd_none(*vmf->pmd)))
    3418                 :            :                 goto out;
    3419                 :            : 
    3420                 :            :         for (i = 0; i < HPAGE_PMD_NR; i++)
    3421                 :            :                 flush_icache_page(vma, page + i);
    3422                 :            : 
    3423                 :            :         entry = mk_huge_pmd(page, vma->vm_page_prot);
    3424                 :            :         if (write)
    3425                 :            :                 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
    3426                 :            : 
    3427                 :            :         add_mm_counter(vma->vm_mm, mm_counter_file(page), HPAGE_PMD_NR);
    3428                 :            :         page_add_file_rmap(page, true);
    3429                 :            :         /*
    3430                 :            :          * deposit and withdraw with pmd lock held
    3431                 :            :          */
    3432                 :            :         if (arch_needs_pgtable_deposit())
    3433                 :            :                 deposit_prealloc_pte(vmf);
    3434                 :            : 
    3435                 :            :         set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry);
    3436                 :            : 
    3437                 :            :         update_mmu_cache_pmd(vma, haddr, vmf->pmd);
    3438                 :            : 
    3439                 :            :         /* fault is handled */
    3440                 :            :         ret = 0;
    3441                 :            :         count_vm_event(THP_FILE_MAPPED);
    3442                 :            : out:
    3443                 :            :         spin_unlock(vmf->ptl);
    3444                 :            :         return ret;
    3445                 :            : }
    3446                 :            : #else
    3447                 :            : static vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page)
    3448                 :            : {
    3449                 :            :         BUILD_BUG();
    3450                 :            :         return 0;
    3451                 :            : }
    3452                 :            : #endif
    3453                 :            : 
    3454                 :            : /**
    3455                 :            :  * alloc_set_pte - setup new PTE entry for given page and add reverse page
    3456                 :            :  * mapping. If needed, the fucntion allocates page table or use pre-allocated.
    3457                 :            :  *
    3458                 :            :  * @vmf: fault environment
    3459                 :            :  * @memcg: memcg to charge page (only for private mappings)
    3460                 :            :  * @page: page to map
    3461                 :            :  *
    3462                 :            :  * Caller must take care of unlocking vmf->ptl, if vmf->pte is non-NULL on
    3463                 :            :  * return.
    3464                 :            :  *
    3465                 :            :  * Target users are page handler itself and implementations of
    3466                 :            :  * vm_ops->map_pages.
    3467                 :            :  *
    3468                 :            :  * Return: %0 on success, %VM_FAULT_ code in case of error.
    3469                 :            :  */
    3470                 :    6570639 : vm_fault_t alloc_set_pte(struct vm_fault *vmf, struct mem_cgroup *memcg,
    3471                 :            :                 struct page *page)
    3472                 :            : {
    3473                 :    6570639 :         struct vm_area_struct *vma = vmf->vma;
    3474                 :    6570639 :         bool write = vmf->flags & FAULT_FLAG_WRITE;
    3475                 :    6570639 :         pte_t entry;
    3476                 :    6570639 :         vm_fault_t ret;
    3477                 :            : 
    3478         [ +  + ]:    6570639 :         if (pmd_none(*vmf->pmd) && PageTransCompound(page) &&
    3479                 :            :                         IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE)) {
    3480                 :            :                 /* THP on COW? */
    3481                 :            :                 VM_BUG_ON_PAGE(memcg, page);
    3482                 :            : 
    3483                 :            :                 ret = do_set_pmd(vmf, page);
    3484                 :            :                 if (ret != VM_FAULT_FALLBACK)
    3485                 :            :                         return ret;
    3486                 :            :         }
    3487                 :            : 
    3488         [ +  + ]:    6570639 :         if (!vmf->pte) {
    3489                 :     672311 :                 ret = pte_alloc_one_map(vmf);
    3490         [ +  - ]:     672311 :                 if (ret)
    3491                 :            :                         return ret;
    3492                 :            :         }
    3493                 :            : 
    3494                 :            :         /* Re-check under ptl */
    3495         [ +  + ]:    6570639 :         if (unlikely(!pte_none(*vmf->pte)))
    3496                 :            :                 return VM_FAULT_NOPAGE;
    3497                 :            : 
    3498         [ +  - ]:    6087843 :         flush_icache_page(vma, page);
    3499         [ +  - ]:    6087843 :         entry = mk_pte(page, vma->vm_page_prot);
    3500         [ +  + ]:    6087843 :         if (write)
    3501         [ +  - ]:      77216 :                 entry = maybe_mkwrite(pte_mkdirty(entry), vma);
    3502                 :            :         /* copy-on-write page */
    3503   [ +  +  +  + ]:    6087843 :         if (write && !(vma->vm_flags & VM_SHARED)) {
    3504                 :      75745 :                 inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
    3505                 :      75745 :                 page_add_new_anon_rmap(page, vma, vmf->address, false);
    3506                 :      75745 :                 mem_cgroup_commit_charge(page, memcg, false, false);
    3507                 :      75745 :                 lru_cache_add_active_or_unevictable(page, vma);
    3508                 :            :         } else {
    3509                 :    6012098 :                 inc_mm_counter_fast(vma->vm_mm, mm_counter_file(page));
    3510                 :    6012098 :                 page_add_file_rmap(page, false);
    3511                 :            :         }
    3512                 :    6087843 :         set_pte_at(vma->vm_mm, vmf->address, vmf->pte, entry);
    3513                 :            : 
    3514                 :            :         /* no need to invalidate: a not-present page won't be cached */
    3515                 :    6087843 :         update_mmu_cache(vma, vmf->address, vmf->pte);
    3516                 :            : 
    3517                 :    6087843 :         return 0;
    3518                 :            : }
    3519                 :            : 
    3520                 :            : 
    3521                 :            : /**
    3522                 :            :  * finish_fault - finish page fault once we have prepared the page to fault
    3523                 :            :  *
    3524                 :            :  * @vmf: structure describing the fault
    3525                 :            :  *
    3526                 :            :  * This function handles all that is needed to finish a page fault once the
    3527                 :            :  * page to fault in is prepared. It handles locking of PTEs, inserts PTE for
    3528                 :            :  * given page, adds reverse page mapping, handles memcg charges and LRU
    3529                 :            :  * addition.
    3530                 :            :  *
    3531                 :            :  * The function expects the page to be locked and on success it consumes a
    3532                 :            :  * reference of a page being mapped (for the PTE which maps it).
    3533                 :            :  *
    3534                 :            :  * Return: %0 on success, %VM_FAULT_ code in case of error.
    3535                 :            :  */
    3536                 :      86309 : vm_fault_t finish_fault(struct vm_fault *vmf)
    3537                 :            : {
    3538                 :      86309 :         struct page *page;
    3539                 :      86309 :         vm_fault_t ret = 0;
    3540                 :            : 
    3541                 :            :         /* Did we COW the page? */
    3542         [ +  + ]:      86309 :         if ((vmf->flags & FAULT_FLAG_WRITE) &&
    3543         [ +  + ]:      77216 :             !(vmf->vma->vm_flags & VM_SHARED))
    3544                 :      75745 :                 page = vmf->cow_page;
    3545                 :            :         else
    3546                 :      10564 :                 page = vmf->page;
    3547                 :            : 
    3548                 :            :         /*
    3549                 :            :          * check even for read faults because we might have lost our CoWed
    3550                 :            :          * page
    3551                 :            :          */
    3552         [ +  + ]:      86309 :         if (!(vmf->vma->vm_flags & VM_SHARED))
    3553                 :      84827 :                 ret = check_stable_address_space(vmf->vma->vm_mm);
    3554                 :            :         if (!ret)
    3555                 :      86309 :                 ret = alloc_set_pte(vmf, vmf->memcg, page);
    3556         [ +  - ]:      86309 :         if (vmf->pte)
    3557                 :      86309 :                 pte_unmap_unlock(vmf->pte, vmf->ptl);
    3558                 :      86309 :         return ret;
    3559                 :            : }
    3560                 :            : 
    3561                 :            : static unsigned long fault_around_bytes __read_mostly =
    3562                 :            :         rounddown_pow_of_two(65536);
    3563                 :            : 
    3564                 :            : #ifdef CONFIG_DEBUG_FS
    3565                 :          0 : static int fault_around_bytes_get(void *data, u64 *val)
    3566                 :            : {
    3567                 :          0 :         *val = fault_around_bytes;
    3568                 :          0 :         return 0;
    3569                 :            : }
    3570                 :            : 
    3571                 :            : /*
    3572                 :            :  * fault_around_bytes must be rounded down to the nearest page order as it's
    3573                 :            :  * what do_fault_around() expects to see.
    3574                 :            :  */
    3575                 :          0 : static int fault_around_bytes_set(void *data, u64 val)
    3576                 :            : {
    3577         [ #  # ]:          0 :         if (val / PAGE_SIZE > PTRS_PER_PTE)
    3578                 :            :                 return -EINVAL;
    3579         [ #  # ]:          0 :         if (val > PAGE_SIZE)
    3580   [ #  #  #  #  :          0 :                 fault_around_bytes = rounddown_pow_of_two(val);
          #  #  #  #  #  
          #  #  #  #  #  
          #  #  #  #  #  
          #  #  #  #  #  
          #  #  #  #  #  
          #  #  #  #  #  
          #  #  #  #  #  
                #  #  # ]
    3581                 :            :         else
    3582                 :          0 :                 fault_around_bytes = PAGE_SIZE; /* rounddown_pow_of_two(0) is undefined */
    3583                 :            :         return 0;
    3584                 :            : }
    3585                 :          0 : DEFINE_DEBUGFS_ATTRIBUTE(fault_around_bytes_fops,
    3586                 :            :                 fault_around_bytes_get, fault_around_bytes_set, "%llu\n");
    3587                 :            : 
    3588                 :         11 : static int __init fault_around_debugfs(void)
    3589                 :            : {
    3590                 :         11 :         debugfs_create_file_unsafe("fault_around_bytes", 0644, NULL, NULL,
    3591                 :            :                                    &fault_around_bytes_fops);
    3592                 :         11 :         return 0;
    3593                 :            : }
    3594                 :            : late_initcall(fault_around_debugfs);
    3595                 :            : #endif
    3596                 :            : 
    3597                 :            : /*
    3598                 :            :  * do_fault_around() tries to map few pages around the fault address. The hope
    3599                 :            :  * is that the pages will be needed soon and this will lower the number of
    3600                 :            :  * faults to handle.
    3601                 :            :  *
    3602                 :            :  * It uses vm_ops->map_pages() to map the pages, which skips the page if it's
    3603                 :            :  * not ready to be mapped: not up-to-date, locked, etc.
    3604                 :            :  *
    3605                 :            :  * This function is called with the page table lock taken. In the split ptlock
    3606                 :            :  * case the page table lock only protects only those entries which belong to
    3607                 :            :  * the page table corresponding to the fault address.
    3608                 :            :  *
    3609                 :            :  * This function doesn't cross the VMA boundaries, in order to call map_pages()
    3610                 :            :  * only once.
    3611                 :            :  *
    3612                 :            :  * fault_around_bytes defines how many bytes we'll try to map.
    3613                 :            :  * do_fault_around() expects it to be set to a power of two less than or equal
    3614                 :            :  * to PTRS_PER_PTE.
    3615                 :            :  *
    3616                 :            :  * The virtual address of the area that we map is naturally aligned to
    3617                 :            :  * fault_around_bytes rounded down to the machine page size
    3618                 :            :  * (and therefore to page order).  This way it's easier to guarantee
    3619                 :            :  * that we don't cross page table boundaries.
    3620                 :            :  */
    3621                 :     587363 : static vm_fault_t do_fault_around(struct vm_fault *vmf)
    3622                 :            : {
    3623                 :     587363 :         unsigned long address = vmf->address, nr_pages, mask;
    3624                 :     587363 :         pgoff_t start_pgoff = vmf->pgoff;
    3625                 :     587363 :         pgoff_t end_pgoff;
    3626                 :     587363 :         int off;
    3627                 :     587363 :         vm_fault_t ret = 0;
    3628                 :            : 
    3629         [ +  + ]:     587363 :         nr_pages = READ_ONCE(fault_around_bytes) >> PAGE_SHIFT;
    3630                 :     587363 :         mask = ~(nr_pages * PAGE_SIZE - 1) & PAGE_MASK;
    3631                 :            : 
    3632                 :     587363 :         vmf->address = max(address & mask, vmf->vma->vm_start);
    3633                 :     587363 :         off = ((address - vmf->address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
    3634                 :     587363 :         start_pgoff -= off;
    3635                 :            : 
    3636                 :            :         /*
    3637                 :            :          *  end_pgoff is either the end of the page table, the end of
    3638                 :            :          *  the vma or nr_pages from start_pgoff, depending what is nearest.
    3639                 :            :          */
    3640                 :     587363 :         end_pgoff = start_pgoff -
    3641                 :     587363 :                 ((vmf->address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) +
    3642                 :            :                 PTRS_PER_PTE - 1;
    3643         [ +  + ]:     587363 :         end_pgoff = min3(end_pgoff, vma_pages(vmf->vma) + vmf->vma->vm_pgoff - 1,
    3644                 :            :                         start_pgoff + nr_pages - 1);
    3645                 :            : 
    3646         [ +  + ]:     587363 :         if (pmd_none(*vmf->pmd)) {
    3647                 :      22845 :                 vmf->prealloc_pte = pte_alloc_one(vmf->vma->vm_mm);
    3648         [ -  + ]:      22845 :                 if (!vmf->prealloc_pte)
    3649                 :          0 :                         goto out;
    3650                 :      22845 :                 smp_wmb(); /* See comment in __pte_alloc() */
    3651                 :            :         }
    3652                 :            : 
    3653                 :     587363 :         vmf->vma->vm_ops->map_pages(vmf, start_pgoff, end_pgoff);
    3654                 :            : 
    3655                 :            :         /* Huge page is mapped? Page fault is solved */
    3656         [ +  + ]:     587363 :         if (pmd_trans_huge(*vmf->pmd)) {
    3657                 :            :                 ret = VM_FAULT_NOPAGE;
    3658                 :            :                 goto out;
    3659                 :            :         }
    3660                 :            : 
    3661                 :            :         /* ->map_pages() haven't done anything useful. Cold page cache? */
    3662         [ +  + ]:     587363 :         if (!vmf->pte)
    3663                 :       1361 :                 goto out;
    3664                 :            : 
    3665                 :            :         /* check if the page fault is solved */
    3666                 :     586002 :         vmf->pte -= (vmf->address >> PAGE_SHIFT) - (address >> PAGE_SHIFT);
    3667         [ +  + ]:     586002 :         if (!pte_none(*vmf->pte))
    3668                 :     583339 :                 ret = VM_FAULT_NOPAGE;
    3669                 :     586002 :         pte_unmap_unlock(vmf->pte, vmf->ptl);
    3670                 :     587363 : out:
    3671                 :     587363 :         vmf->address = address;
    3672                 :     587363 :         vmf->pte = NULL;
    3673                 :     587363 :         return ret;
    3674                 :            : }
    3675                 :            : 
    3676                 :     597213 : static vm_fault_t do_read_fault(struct vm_fault *vmf)
    3677                 :            : {
    3678                 :     597213 :         struct vm_area_struct *vma = vmf->vma;
    3679                 :     597213 :         vm_fault_t ret = 0;
    3680                 :            : 
    3681                 :            :         /*
    3682                 :            :          * Let's call ->map_pages() first and use ->fault() as fallback
    3683                 :            :          * if page by the offset is not ready to be mapped (cold cache or
    3684                 :            :          * something).
    3685                 :            :          */
    3686   [ +  +  +  - ]:     597213 :         if (vma->vm_ops->map_pages && fault_around_bytes >> PAGE_SHIFT > 1) {
    3687                 :     587363 :                 ret = do_fault_around(vmf);
    3688         [ +  + ]:     587363 :                 if (ret)
    3689                 :            :                         return ret;
    3690                 :            :         }
    3691                 :            : 
    3692                 :      13874 :         ret = __do_fault(vmf);
    3693         [ +  + ]:      13874 :         if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
    3694                 :            :                 return ret;
    3695                 :            : 
    3696                 :       9093 :         ret |= finish_fault(vmf);
    3697                 :       9093 :         unlock_page(vmf->page);
    3698         [ -  + ]:       9093 :         if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
    3699                 :          0 :                 put_page(vmf->page);
    3700                 :            :         return ret;
    3701                 :            : }
    3702                 :            : 
    3703                 :      76945 : static vm_fault_t do_cow_fault(struct vm_fault *vmf)
    3704                 :            : {
    3705                 :      76945 :         struct vm_area_struct *vma = vmf->vma;
    3706                 :      76945 :         vm_fault_t ret;
    3707                 :            : 
    3708   [ +  +  +  - ]:     125719 :         if (unlikely(anon_vma_prepare(vma)))
    3709                 :            :                 return VM_FAULT_OOM;
    3710                 :            : 
    3711                 :      76945 :         vmf->cow_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vmf->address);
    3712         [ +  - ]:      76945 :         if (!vmf->cow_page)
    3713                 :            :                 return VM_FAULT_OOM;
    3714                 :            : 
    3715                 :      76945 :         if (mem_cgroup_try_charge_delay(vmf->cow_page, vma->vm_mm, GFP_KERNEL,
    3716                 :            :                                 &vmf->memcg, false)) {
    3717                 :            :                 put_page(vmf->cow_page);
    3718                 :            :                 return VM_FAULT_OOM;
    3719                 :            :         }
    3720                 :            : 
    3721                 :      76945 :         ret = __do_fault(vmf);
    3722         [ +  + ]:      76945 :         if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
    3723                 :       1200 :                 goto uncharge_out;
    3724         [ +  - ]:      75745 :         if (ret & VM_FAULT_DONE_COW)
    3725                 :            :                 return ret;
    3726                 :            : 
    3727                 :      75745 :         copy_user_highpage(vmf->cow_page, vmf->page, vmf->address, vma);
    3728                 :      75745 :         __SetPageUptodate(vmf->cow_page);
    3729                 :            : 
    3730                 :      75745 :         ret |= finish_fault(vmf);
    3731                 :      75745 :         unlock_page(vmf->page);
    3732                 :      75745 :         put_page(vmf->page);
    3733         [ -  + ]:      75745 :         if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
    3734                 :          0 :                 goto uncharge_out;
    3735                 :            :         return ret;
    3736                 :       1200 : uncharge_out:
    3737                 :       1200 :         mem_cgroup_cancel_charge(vmf->cow_page, vmf->memcg, false);
    3738                 :       1200 :         put_page(vmf->cow_page);
    3739                 :       1200 :         return ret;
    3740                 :            : }
    3741                 :            : 
    3742                 :       1471 : static vm_fault_t do_shared_fault(struct vm_fault *vmf)
    3743                 :            : {
    3744                 :       1471 :         struct vm_area_struct *vma = vmf->vma;
    3745                 :       1471 :         vm_fault_t ret, tmp;
    3746                 :            : 
    3747                 :       1471 :         ret = __do_fault(vmf);
    3748         [ +  - ]:       1471 :         if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
    3749                 :            :                 return ret;
    3750                 :            : 
    3751                 :            :         /*
    3752                 :            :          * Check if the backing address space wants to know that the page is
    3753                 :            :          * about to become writable
    3754                 :            :          */
    3755         [ -  + ]:       1471 :         if (vma->vm_ops->page_mkwrite) {
    3756                 :          0 :                 unlock_page(vmf->page);
    3757                 :          0 :                 tmp = do_page_mkwrite(vmf);
    3758   [ #  #  #  # ]:          0 :                 if (unlikely(!tmp ||
    3759                 :            :                                 (tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) {
    3760                 :          0 :                         put_page(vmf->page);
    3761                 :          0 :                         return tmp;
    3762                 :            :                 }
    3763                 :            :         }
    3764                 :            : 
    3765                 :       1471 :         ret |= finish_fault(vmf);
    3766         [ -  + ]:       1471 :         if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE |
    3767                 :            :                                         VM_FAULT_RETRY))) {
    3768                 :          0 :                 unlock_page(vmf->page);
    3769                 :          0 :                 put_page(vmf->page);
    3770                 :          0 :                 return ret;
    3771                 :            :         }
    3772                 :            : 
    3773                 :       1471 :         ret |= fault_dirty_shared_page(vmf);
    3774                 :       1471 :         return ret;
    3775                 :            : }
    3776                 :            : 
    3777                 :            : /*
    3778                 :            :  * We enter with non-exclusive mmap_sem (to exclude vma changes,
    3779                 :            :  * but allow concurrent faults).
    3780                 :            :  * The mmap_sem may have been released depending on flags and our
    3781                 :            :  * return value.  See filemap_fault() and __lock_page_or_retry().
    3782                 :            :  * If mmap_sem is released, vma may become invalid (for example
    3783                 :            :  * by other thread calling munmap()).
    3784                 :            :  */
    3785                 :     675629 : static vm_fault_t do_fault(struct vm_fault *vmf)
    3786                 :            : {
    3787                 :     675629 :         struct vm_area_struct *vma = vmf->vma;
    3788                 :     675629 :         struct mm_struct *vm_mm = vma->vm_mm;
    3789                 :     675629 :         vm_fault_t ret;
    3790                 :            : 
    3791                 :            :         /*
    3792                 :            :          * The VMA was not fully populated on mmap() or missing VM_DONTEXPAND
    3793                 :            :          */
    3794         [ -  + ]:     675629 :         if (!vma->vm_ops->fault) {
    3795                 :            :                 /*
    3796                 :            :                  * If we find a migration pmd entry or a none pmd entry, which
    3797                 :            :                  * should never happen, return SIGBUS
    3798                 :            :                  */
    3799   [ #  #  #  # ]:          0 :                 if (unlikely(!pmd_present(*vmf->pmd)))
    3800                 :            :                         ret = VM_FAULT_SIGBUS;
    3801                 :            :                 else {
    3802         [ #  # ]:          0 :                         vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm,
    3803                 :            :                                                        vmf->pmd,
    3804                 :            :                                                        vmf->address,
    3805                 :            :                                                        &vmf->ptl);
    3806                 :            :                         /*
    3807                 :            :                          * Make sure this is not a temporary clearing of pte
    3808                 :            :                          * by holding ptl and checking again. A R/M/W update
    3809                 :            :                          * of pte involves: take ptl, clearing the pte so that
    3810                 :            :                          * we don't have concurrent modification by hardware
    3811                 :            :                          * followed by an update.
    3812                 :            :                          */
    3813         [ #  # ]:          0 :                         if (unlikely(pte_none(*vmf->pte)))
    3814                 :            :                                 ret = VM_FAULT_SIGBUS;
    3815                 :            :                         else
    3816                 :          0 :                                 ret = VM_FAULT_NOPAGE;
    3817                 :            : 
    3818                 :          0 :                         pte_unmap_unlock(vmf->pte, vmf->ptl);
    3819                 :            :                 }
    3820         [ +  + ]:     675629 :         } else if (!(vmf->flags & FAULT_FLAG_WRITE))
    3821                 :     597213 :                 ret = do_read_fault(vmf);
    3822         [ +  + ]:      78416 :         else if (!(vma->vm_flags & VM_SHARED))
    3823                 :      76945 :                 ret = do_cow_fault(vmf);
    3824                 :            :         else
    3825                 :       1471 :                 ret = do_shared_fault(vmf);
    3826                 :            : 
    3827                 :            :         /* preallocated pagetable is unused: free it */
    3828         [ +  + ]:     675629 :         if (vmf->prealloc_pte) {
    3829                 :        532 :                 pte_free(vm_mm, vmf->prealloc_pte);
    3830                 :        532 :                 vmf->prealloc_pte = NULL;
    3831                 :            :         }
    3832                 :     675629 :         return ret;
    3833                 :            : }
    3834                 :            : 
    3835                 :            : static int numa_migrate_prep(struct page *page, struct vm_area_struct *vma,
    3836                 :            :                                 unsigned long addr, int page_nid,
    3837                 :            :                                 int *flags)
    3838                 :            : {
    3839                 :            :         get_page(page);
    3840                 :            : 
    3841                 :            :         count_vm_numa_event(NUMA_HINT_FAULTS);
    3842                 :            :         if (page_nid == numa_node_id()) {
    3843                 :            :                 count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL);
    3844                 :            :                 *flags |= TNF_FAULT_LOCAL;
    3845                 :            :         }
    3846                 :            : 
    3847                 :            :         return mpol_misplaced(page, vma, addr);
    3848                 :            : }
    3849                 :            : 
    3850                 :            : static vm_fault_t do_numa_page(struct vm_fault *vmf)
    3851                 :            : {
    3852                 :            :         struct vm_area_struct *vma = vmf->vma;
    3853                 :            :         struct page *page = NULL;
    3854                 :            :         int page_nid = NUMA_NO_NODE;
    3855                 :            :         int last_cpupid;
    3856                 :            :         int target_nid;
    3857                 :            :         bool migrated = false;
    3858                 :            :         pte_t pte, old_pte;
    3859                 :            :         bool was_writable = pte_savedwrite(vmf->orig_pte);
    3860                 :            :         int flags = 0;
    3861                 :            : 
    3862                 :            :         /*
    3863                 :            :          * The "pte" at this point cannot be used safely without
    3864                 :            :          * validation through pte_unmap_same(). It's of NUMA type but
    3865                 :            :          * the pfn may be screwed if the read is non atomic.
    3866                 :            :          */
    3867                 :            :         vmf->ptl = pte_lockptr(vma->vm_mm, vmf->pmd);
    3868                 :            :         spin_lock(vmf->ptl);
    3869                 :            :         if (unlikely(!pte_same(*vmf->pte, vmf->orig_pte))) {
    3870                 :            :                 pte_unmap_unlock(vmf->pte, vmf->ptl);
    3871                 :            :                 goto out;
    3872                 :            :         }
    3873                 :            : 
    3874                 :            :         /*
    3875                 :            :          * Make it present again, Depending on how arch implementes non
    3876                 :            :          * accessible ptes, some can allow access by kernel mode.
    3877                 :            :          */
    3878                 :            :         old_pte = ptep_modify_prot_start(vma, vmf->address, vmf->pte);
    3879                 :            :         pte = pte_modify(old_pte, vma->vm_page_prot);
    3880                 :            :         pte = pte_mkyoung(pte);
    3881                 :            :         if (was_writable)
    3882                 :            :                 pte = pte_mkwrite(pte);
    3883                 :            :         ptep_modify_prot_commit(vma, vmf->address, vmf->pte, old_pte, pte);
    3884                 :            :         update_mmu_cache(vma, vmf->address, vmf->pte);
    3885                 :            : 
    3886                 :            :         page = vm_normal_page(vma, vmf->address, pte);
    3887                 :            :         if (!page) {
    3888                 :            :                 pte_unmap_unlock(vmf->pte, vmf->ptl);
    3889                 :            :                 return 0;
    3890                 :            :         }
    3891                 :            : 
    3892                 :            :         /* TODO: handle PTE-mapped THP */
    3893                 :            :         if (PageCompound(page)) {
    3894                 :            :                 pte_unmap_unlock(vmf->pte, vmf->ptl);
    3895                 :            :                 return 0;
    3896                 :            :         }
    3897                 :            : 
    3898                 :            :         /*
    3899                 :            :          * Avoid grouping on RO pages in general. RO pages shouldn't hurt as
    3900                 :            :          * much anyway since they can be in shared cache state. This misses
    3901                 :            :          * the case where a mapping is writable but the process never writes
    3902                 :            :          * to it but pte_write gets cleared during protection updates and
    3903                 :            :          * pte_dirty has unpredictable behaviour between PTE scan updates,
    3904                 :            :          * background writeback, dirty balancing and application behaviour.
    3905                 :            :          */
    3906                 :            :         if (!pte_write(pte))
    3907                 :            :                 flags |= TNF_NO_GROUP;
    3908                 :            : 
    3909                 :            :         /*
    3910                 :            :          * Flag if the page is shared between multiple address spaces. This
    3911                 :            :          * is later used when determining whether to group tasks together
    3912                 :            :          */
    3913                 :            :         if (page_mapcount(page) > 1 && (vma->vm_flags & VM_SHARED))
    3914                 :            :                 flags |= TNF_SHARED;
    3915                 :            : 
    3916                 :            :         last_cpupid = page_cpupid_last(page);
    3917                 :            :         page_nid = page_to_nid(page);
    3918                 :            :         target_nid = numa_migrate_prep(page, vma, vmf->address, page_nid,
    3919                 :            :                         &flags);
    3920                 :            :         pte_unmap_unlock(vmf->pte, vmf->ptl);
    3921                 :            :         if (target_nid == NUMA_NO_NODE) {
    3922                 :            :                 put_page(page);
    3923                 :            :                 goto out;
    3924                 :            :         }
    3925                 :            : 
    3926                 :            :         /* Migrate to the requested node */
    3927                 :            :         migrated = migrate_misplaced_page(page, vma, target_nid);
    3928                 :            :         if (migrated) {
    3929                 :            :                 page_nid = target_nid;
    3930                 :            :                 flags |= TNF_MIGRATED;
    3931                 :            :         } else
    3932                 :            :                 flags |= TNF_MIGRATE_FAIL;
    3933                 :            : 
    3934                 :            : out:
    3935                 :            :         if (page_nid != NUMA_NO_NODE)
    3936                 :            :                 task_numa_fault(last_cpupid, page_nid, 1, flags);
    3937                 :            :         return 0;
    3938                 :            : }
    3939                 :            : 
    3940                 :            : static inline vm_fault_t create_huge_pmd(struct vm_fault *vmf)
    3941                 :            : {
    3942                 :            :         if (vma_is_anonymous(vmf->vma))
    3943                 :            :                 return do_huge_pmd_anonymous_page(vmf);
    3944                 :            :         if (vmf->vma->vm_ops->huge_fault)
    3945                 :            :                 return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PMD);
    3946                 :            :         return VM_FAULT_FALLBACK;
    3947                 :            : }
    3948                 :            : 
    3949                 :            : /* `inline' is required to avoid gcc 4.1.2 build error */
    3950                 :            : static inline vm_fault_t wp_huge_pmd(struct vm_fault *vmf, pmd_t orig_pmd)
    3951                 :            : {
    3952                 :            :         if (vma_is_anonymous(vmf->vma))
    3953                 :            :                 return do_huge_pmd_wp_page(vmf, orig_pmd);
    3954                 :            :         if (vmf->vma->vm_ops->huge_fault)
    3955                 :            :                 return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PMD);
    3956                 :            : 
    3957                 :            :         /* COW handled on pte level: split pmd */
    3958                 :            :         VM_BUG_ON_VMA(vmf->vma->vm_flags & VM_SHARED, vmf->vma);
    3959                 :            :         __split_huge_pmd(vmf->vma, vmf->pmd, vmf->address, false, NULL);
    3960                 :            : 
    3961                 :            :         return VM_FAULT_FALLBACK;
    3962                 :            : }
    3963                 :            : 
    3964                 :            : static inline bool vma_is_accessible(struct vm_area_struct *vma)
    3965                 :            : {
    3966                 :            :         return vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE);
    3967                 :            : }
    3968                 :            : 
    3969                 :            : static vm_fault_t create_huge_pud(struct vm_fault *vmf)
    3970                 :            : {
    3971                 :            : #ifdef CONFIG_TRANSPARENT_HUGEPAGE
    3972                 :            :         /* No support for anonymous transparent PUD pages yet */
    3973                 :            :         if (vma_is_anonymous(vmf->vma))
    3974                 :            :                 return VM_FAULT_FALLBACK;
    3975                 :            :         if (vmf->vma->vm_ops->huge_fault)
    3976                 :            :                 return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PUD);
    3977                 :            : #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
    3978                 :            :         return VM_FAULT_FALLBACK;
    3979                 :            : }
    3980                 :            : 
    3981                 :            : static vm_fault_t wp_huge_pud(struct vm_fault *vmf, pud_t orig_pud)
    3982                 :            : {
    3983                 :            : #ifdef CONFIG_TRANSPARENT_HUGEPAGE
    3984                 :            :         /* No support for anonymous transparent PUD pages yet */
    3985                 :            :         if (vma_is_anonymous(vmf->vma))
    3986                 :            :                 return VM_FAULT_FALLBACK;
    3987                 :            :         if (vmf->vma->vm_ops->huge_fault)
    3988                 :            :                 return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PUD);
    3989                 :            : #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
    3990                 :            :         return VM_FAULT_FALLBACK;
    3991                 :            : }
    3992                 :            : 
    3993                 :            : /*
    3994                 :            :  * These routines also need to handle stuff like marking pages dirty
    3995                 :            :  * and/or accessed for architectures that don't do it in hardware (most
    3996                 :            :  * RISC architectures).  The early dirtying is also good on the i386.
    3997                 :            :  *
    3998                 :            :  * There is also a hook called "update_mmu_cache()" that architectures
    3999                 :            :  * with external mmu caches can use to update those (ie the Sparc or
    4000                 :            :  * PowerPC hashed page tables that act as extended TLBs).
    4001                 :            :  *
    4002                 :            :  * We enter with non-exclusive mmap_sem (to exclude vma changes, but allow
    4003                 :            :  * concurrent faults).
    4004                 :            :  *
    4005                 :            :  * The mmap_sem may have been released depending on flags and our return value.
    4006                 :            :  * See filemap_fault() and __lock_page_or_retry().
    4007                 :            :  */
    4008                 :    1155182 : static vm_fault_t handle_pte_fault(struct vm_fault *vmf)
    4009                 :            : {
    4010                 :    1155182 :         pte_t entry;
    4011                 :            : 
    4012         [ +  + ]:    1155182 :         if (unlikely(pmd_none(*vmf->pmd))) {
    4013                 :            :                 /*
    4014                 :            :                  * Leave __pte_alloc() until later: because vm_ops->fault may
    4015                 :            :                  * want to allocate huge page, and if we expose page table
    4016                 :            :                  * for an instant, it will be difficult to retract from
    4017                 :            :                  * concurrent faults and from rmap lookups.
    4018                 :            :                  */
    4019                 :      59770 :                 vmf->pte = NULL;
    4020                 :            :         } else {
    4021                 :            :                 /* See comment in pte_alloc_one_map() */
    4022         [ +  - ]:    1095412 :                 if (pmd_devmap_trans_unstable(vmf->pmd))
    4023                 :            :                         return 0;
    4024                 :            :                 /*
    4025                 :            :                  * A regular pmd is established and it can't morph into a huge
    4026                 :            :                  * pmd from under us anymore at this point because we hold the
    4027                 :            :                  * mmap_sem read mode and khugepaged takes it in write mode.
    4028                 :            :                  * So now it's safe to run pte_offset_map().
    4029                 :            :                  */
    4030         [ +  - ]:    1095412 :                 vmf->pte = pte_offset_map(vmf->pmd, vmf->address);
    4031                 :    1095412 :                 vmf->orig_pte = *vmf->pte;
    4032                 :            : 
    4033                 :            :                 /*
    4034                 :            :                  * some architectures can have larger ptes than wordsize,
    4035                 :            :                  * e.g.ppc44x-defconfig has CONFIG_PTE_64BIT=y and
    4036                 :            :                  * CONFIG_32BIT=y, so READ_ONCE cannot guarantee atomic
    4037                 :            :                  * accesses.  The code below just needs a consistent view
    4038                 :            :                  * for the ifs and we later double check anyway with the
    4039                 :            :                  * ptl lock held. So here a barrier will do.
    4040                 :            :                  */
    4041                 :    1095412 :                 barrier();
    4042         [ +  + ]:    1095412 :                 if (pte_none(vmf->orig_pte)) {
    4043                 :     768723 :                         pte_unmap(vmf->pte);
    4044                 :     768723 :                         vmf->pte = NULL;
    4045                 :            :                 }
    4046                 :            :         }
    4047                 :            : 
    4048         [ +  + ]:    1155182 :         if (!vmf->pte) {
    4049         [ +  + ]:     828493 :                 if (vma_is_anonymous(vmf->vma))
    4050                 :     152864 :                         return do_anonymous_page(vmf);
    4051                 :            :                 else
    4052                 :     675629 :                         return do_fault(vmf);
    4053                 :            :         }
    4054                 :            : 
    4055         [ -  + ]:     326689 :         if (!pte_present(vmf->orig_pte))
    4056                 :          0 :                 return do_swap_page(vmf);
    4057                 :            : 
    4058         [ +  - ]:     326689 :         if (pte_protnone(vmf->orig_pte) && vma_is_accessible(vmf->vma))
    4059                 :            :                 return do_numa_page(vmf);
    4060                 :            : 
    4061         [ +  - ]:     326689 :         vmf->ptl = pte_lockptr(vmf->vma->vm_mm, vmf->pmd);
    4062                 :     326689 :         spin_lock(vmf->ptl);
    4063                 :     326689 :         entry = vmf->orig_pte;
    4064         [ -  + ]:     326689 :         if (unlikely(!pte_same(*vmf->pte, entry)))
    4065                 :          0 :                 goto unlock;
    4066         [ +  - ]:     326689 :         if (vmf->flags & FAULT_FLAG_WRITE) {
    4067         [ +  - ]:     326689 :                 if (!pte_write(entry))
    4068                 :     326689 :                         return do_wp_page(vmf);
    4069                 :          0 :                 entry = pte_mkdirty(entry);
    4070                 :            :         }
    4071                 :          0 :         entry = pte_mkyoung(entry);
    4072                 :          0 :         if (ptep_set_access_flags(vmf->vma, vmf->address, vmf->pte, entry,
    4073                 :          0 :                                 vmf->flags & FAULT_FLAG_WRITE)) {
    4074                 :            :                 update_mmu_cache(vmf->vma, vmf->address, vmf->pte);
    4075                 :            :         } else {
    4076                 :            :                 /*
    4077                 :            :                  * This is needed only for protection faults but the arch code
    4078                 :            :                  * is not yet telling us if this is a protection fault or not.
    4079                 :            :                  * This still avoids useless tlb flushes for .text page faults
    4080                 :            :                  * with threads.
    4081                 :            :                  */
    4082                 :          0 :                 if (vmf->flags & FAULT_FLAG_WRITE)
    4083                 :          0 :                         flush_tlb_fix_spurious_fault(vmf->vma, vmf->address);
    4084                 :            :         }
    4085                 :          0 : unlock:
    4086                 :          0 :         pte_unmap_unlock(vmf->pte, vmf->ptl);
    4087                 :          0 :         return 0;
    4088                 :            : }
    4089                 :            : 
    4090                 :            : /*
    4091                 :            :  * By the time we get here, we already hold the mm semaphore
    4092                 :            :  *
    4093                 :            :  * The mmap_sem may have been released depending on flags and our
    4094                 :            :  * return value.  See filemap_fault() and __lock_page_or_retry().
    4095                 :            :  */
    4096                 :    1155182 : static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma,
    4097                 :            :                 unsigned long address, unsigned int flags)
    4098                 :            : {
    4099                 :    3465546 :         struct vm_fault vmf = {
    4100                 :            :                 .vma = vma,
    4101                 :    1155182 :                 .address = address & PAGE_MASK,
    4102                 :            :                 .flags = flags,
    4103                 :    1155182 :                 .pgoff = linear_page_index(vma, address),
    4104         [ +  + ]:    1155182 :                 .gfp_mask = __get_fault_gfp_mask(vma),
    4105                 :            :         };
    4106                 :    1155182 :         unsigned int dirty = flags & FAULT_FLAG_WRITE;
    4107                 :    1155182 :         struct mm_struct *mm = vma->vm_mm;
    4108                 :    1155182 :         pgd_t *pgd;
    4109                 :    1155182 :         p4d_t *p4d;
    4110                 :    1155182 :         vm_fault_t ret;
    4111                 :            : 
    4112                 :    1155182 :         pgd = pgd_offset(mm, address);
    4113                 :    1155182 :         p4d = p4d_alloc(mm, pgd, address);
    4114         [ +  - ]:    1155182 :         if (!p4d)
    4115                 :            :                 return VM_FAULT_OOM;
    4116                 :            : 
    4117                 :    1155182 :         vmf.pud = pud_alloc(mm, p4d, address);
    4118         [ +  - ]:    1155182 :         if (!vmf.pud)
    4119                 :            :                 return VM_FAULT_OOM;
    4120                 :    1155182 : retry_pud:
    4121                 :    1155182 :         if (pud_none(*vmf.pud) && __transparent_hugepage_enabled(vma)) {
    4122                 :            :                 ret = create_huge_pud(&vmf);
    4123                 :            :                 if (!(ret & VM_FAULT_FALLBACK))
    4124                 :            :                         return ret;
    4125                 :            :         } else {
    4126                 :    1155182 :                 pud_t orig_pud = *vmf.pud;
    4127                 :            : 
    4128                 :    1155182 :                 barrier();
    4129                 :    1155182 :                 if (pud_trans_huge(orig_pud) || pud_devmap(orig_pud)) {
    4130                 :            : 
    4131                 :            :                         /* NUMA case for anonymous PUDs would go here */
    4132                 :            : 
    4133                 :            :                         if (dirty && !pud_write(orig_pud)) {
    4134                 :            :                                 ret = wp_huge_pud(&vmf, orig_pud);
    4135                 :            :                                 if (!(ret & VM_FAULT_FALLBACK))
    4136                 :            :                                         return ret;
    4137                 :            :                         } else {
    4138                 :            :                                 huge_pud_set_accessed(&vmf, orig_pud);
    4139                 :            :                                 return 0;
    4140                 :            :                         }
    4141                 :            :                 }
    4142                 :            :         }
    4143                 :            : 
    4144                 :    1155182 :         vmf.pmd = pmd_alloc(mm, vmf.pud, address);
    4145         [ +  - ]:    1155182 :         if (!vmf.pmd)
    4146                 :            :                 return VM_FAULT_OOM;
    4147                 :            : 
    4148                 :            :         /* Huge pud page fault raced with pmd_alloc? */
    4149                 :    1155182 :         if (pud_trans_unstable(vmf.pud))
    4150                 :            :                 goto retry_pud;
    4151                 :            : 
    4152                 :    1155182 :         if (pmd_none(*vmf.pmd) && __transparent_hugepage_enabled(vma)) {
    4153                 :            :                 ret = create_huge_pmd(&vmf);
    4154                 :            :                 if (!(ret & VM_FAULT_FALLBACK))
    4155                 :            :                         return ret;
    4156                 :            :         } else {
    4157                 :    1155182 :                 pmd_t orig_pmd = *vmf.pmd;
    4158                 :            : 
    4159                 :    1155182 :                 barrier();
    4160                 :    1155182 :                 if (unlikely(is_swap_pmd(orig_pmd))) {
    4161                 :            :                         VM_BUG_ON(thp_migration_supported() &&
    4162                 :            :                                           !is_pmd_migration_entry(orig_pmd));
    4163                 :            :                         if (is_pmd_migration_entry(orig_pmd))
    4164                 :            :                                 pmd_migration_entry_wait(mm, vmf.pmd);
    4165                 :            :                         return 0;
    4166                 :            :                 }
    4167                 :    1155182 :                 if (pmd_trans_huge(orig_pmd) || pmd_devmap(orig_pmd)) {
    4168                 :            :                         if (pmd_protnone(orig_pmd) && vma_is_accessible(vma))
    4169                 :            :                                 return do_huge_pmd_numa_page(&vmf, orig_pmd);
    4170                 :            : 
    4171                 :            :                         if (dirty && !pmd_write(orig_pmd)) {
    4172                 :            :                                 ret = wp_huge_pmd(&vmf, orig_pmd);
    4173                 :            :                                 if (!(ret & VM_FAULT_FALLBACK))
    4174                 :            :                                         return ret;
    4175                 :            :                         } else {
    4176                 :            :                                 huge_pmd_set_accessed(&vmf, orig_pmd);
    4177                 :            :                                 return 0;
    4178                 :            :                         }
    4179                 :            :                 }
    4180                 :            :         }
    4181                 :            : 
    4182                 :    1155182 :         return handle_pte_fault(&vmf);
    4183                 :            : }
    4184                 :            : 
    4185                 :            : /*
    4186                 :            :  * By the time we get here, we already hold the mm semaphore
    4187                 :            :  *
    4188                 :            :  * The mmap_sem may have been released depending on flags and our
    4189                 :            :  * return value.  See filemap_fault() and __lock_page_or_retry().
    4190                 :            :  */
    4191                 :    1155182 : vm_fault_t handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
    4192                 :            :                 unsigned int flags)
    4193                 :            : {
    4194                 :    1155182 :         vm_fault_t ret;
    4195                 :            : 
    4196                 :    1155182 :         __set_current_state(TASK_RUNNING);
    4197                 :            : 
    4198                 :    1155182 :         count_vm_event(PGFAULT);
    4199                 :    1155182 :         count_memcg_event_mm(vma->vm_mm, PGFAULT);
    4200                 :            : 
    4201                 :            :         /* do counter updates before entering really critical section. */
    4202                 :    1155182 :         check_sync_rss_stat(current);
    4203                 :            : 
    4204         [ +  - ]:    1155182 :         if (!arch_vma_access_permitted(vma, flags & FAULT_FLAG_WRITE,
    4205                 :    1155182 :                                             flags & FAULT_FLAG_INSTRUCTION,
    4206                 :    1155182 :                                             flags & FAULT_FLAG_REMOTE))
    4207                 :            :                 return VM_FAULT_SIGSEGV;
    4208                 :            : 
    4209                 :            :         /*
    4210                 :            :          * Enable the memcg OOM handling for faults triggered in user
    4211                 :            :          * space.  Kernel faults are handled more gracefully.
    4212                 :            :          */
    4213                 :    1155182 :         if (flags & FAULT_FLAG_USER)
    4214                 :            :                 mem_cgroup_enter_user_fault();
    4215                 :            : 
    4216         [ -  + ]:    1155182 :         if (unlikely(is_vm_hugetlb_page(vma)))
    4217                 :          0 :                 ret = hugetlb_fault(vma->vm_mm, vma, address, flags);
    4218                 :            :         else
    4219                 :    1155182 :                 ret = __handle_mm_fault(vma, address, flags);
    4220                 :            : 
    4221         [ +  + ]:    1155182 :         if (flags & FAULT_FLAG_USER) {
    4222                 :    1088055 :                 mem_cgroup_exit_user_fault();
    4223                 :            :                 /*
    4224                 :            :                  * The task may have entered a memcg OOM situation but
    4225                 :            :                  * if the allocation error was handled gracefully (no
    4226                 :            :                  * VM_FAULT_OOM), there is no need to kill anything.
    4227                 :            :                  * Just clean up the OOM state peacefully.
    4228                 :            :                  */
    4229                 :    1088055 :                 if (task_in_memcg_oom(current) && !(ret & VM_FAULT_OOM))
    4230                 :            :                         mem_cgroup_oom_synchronize(false);
    4231                 :            :         }
    4232                 :            : 
    4233                 :            :         return ret;
    4234                 :            : }
    4235                 :            : EXPORT_SYMBOL_GPL(handle_mm_fault);
    4236                 :            : 
    4237                 :            : #ifndef __PAGETABLE_P4D_FOLDED
    4238                 :            : /*
    4239                 :            :  * Allocate p4d page table.
    4240                 :            :  * We've already handled the fast-path in-line.
    4241                 :            :  */
    4242                 :          0 : int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
    4243                 :            : {
    4244         [ #  # ]:          0 :         p4d_t *new = p4d_alloc_one(mm, address);
    4245         [ #  # ]:          0 :         if (!new)
    4246                 :            :                 return -ENOMEM;
    4247                 :            : 
    4248                 :          0 :         smp_wmb(); /* See comment in __pte_alloc */
    4249                 :            : 
    4250                 :          0 :         spin_lock(&mm->page_table_lock);
    4251         [ #  # ]:          0 :         if (pgd_present(*pgd))          /* Another has populated it */
    4252                 :          0 :                 p4d_free(mm, new);
    4253                 :            :         else
    4254                 :          0 :                 pgd_populate(mm, pgd, new);
    4255                 :          0 :         spin_unlock(&mm->page_table_lock);
    4256                 :          0 :         return 0;
    4257                 :            : }
    4258                 :            : #endif /* __PAGETABLE_P4D_FOLDED */
    4259                 :            : 
    4260                 :            : #ifndef __PAGETABLE_PUD_FOLDED
    4261                 :            : /*
    4262                 :            :  * Allocate page upper directory.
    4263                 :            :  * We've already handled the fast-path in-line.
    4264                 :            :  */
    4265                 :      45394 : int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address)
    4266                 :            : {
    4267         [ +  + ]:      45394 :         pud_t *new = pud_alloc_one(mm, address);
    4268         [ +  - ]:      45394 :         if (!new)
    4269                 :            :                 return -ENOMEM;
    4270                 :            : 
    4271                 :      45394 :         smp_wmb(); /* See comment in __pte_alloc */
    4272                 :            : 
    4273                 :      45394 :         spin_lock(&mm->page_table_lock);
    4274                 :            : #ifndef __ARCH_HAS_5LEVEL_HACK
    4275         [ +  - ]:      45394 :         if (!p4d_present(*p4d)) {
    4276                 :      45394 :                 mm_inc_nr_puds(mm);
    4277                 :      45394 :                 p4d_populate(mm, p4d, new);
    4278                 :            :         } else  /* Another has populated it */
    4279                 :          0 :                 pud_free(mm, new);
    4280                 :            : #else
    4281                 :            :         if (!pgd_present(*p4d)) {
    4282                 :            :                 mm_inc_nr_puds(mm);
    4283                 :            :                 pgd_populate(mm, p4d, new);
    4284                 :            :         } else  /* Another has populated it */
    4285                 :            :                 pud_free(mm, new);
    4286                 :            : #endif /* __ARCH_HAS_5LEVEL_HACK */
    4287                 :      45394 :         spin_unlock(&mm->page_table_lock);
    4288                 :      45394 :         return 0;
    4289                 :            : }
    4290                 :            : #endif /* __PAGETABLE_PUD_FOLDED */
    4291                 :            : 
    4292                 :            : #ifndef __PAGETABLE_PMD_FOLDED
    4293                 :            : /*
    4294                 :            :  * Allocate page middle directory.
    4295                 :            :  * We've already handled the fast-path in-line.
    4296                 :            :  */
    4297                 :      62652 : int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
    4298                 :            : {
    4299                 :      62652 :         spinlock_t *ptl;
    4300                 :      62652 :         pmd_t *new = pmd_alloc_one(mm, address);
    4301         [ +  - ]:      62652 :         if (!new)
    4302                 :            :                 return -ENOMEM;
    4303                 :            : 
    4304                 :      62652 :         smp_wmb(); /* See comment in __pte_alloc */
    4305                 :            : 
    4306                 :      62652 :         ptl = pud_lock(mm, pud);
    4307   [ +  -  +  - ]:     125304 :         if (!pud_present(*pud)) {
    4308                 :      62652 :                 mm_inc_nr_pmds(mm);
    4309         [ +  - ]:      62652 :                 pud_populate(mm, pud, new);
    4310                 :            :         } else  /* Another has populated it */
    4311                 :          0 :                 pmd_free(mm, new);
    4312                 :      62652 :         spin_unlock(ptl);
    4313                 :      62652 :         return 0;
    4314                 :            : }
    4315                 :            : #endif /* __PAGETABLE_PMD_FOLDED */
    4316                 :            : 
    4317                 :          0 : static int __follow_pte_pmd(struct mm_struct *mm, unsigned long address,
    4318                 :            :                             struct mmu_notifier_range *range,
    4319                 :            :                             pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp)
    4320                 :            : {
    4321                 :          0 :         pgd_t *pgd;
    4322                 :          0 :         p4d_t *p4d;
    4323                 :          0 :         pud_t *pud;
    4324                 :          0 :         pmd_t *pmd;
    4325                 :          0 :         pte_t *ptep;
    4326                 :            : 
    4327                 :          0 :         pgd = pgd_offset(mm, address);
    4328   [ #  #  #  # ]:          0 :         if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
    4329                 :          0 :                 goto out;
    4330                 :            : 
    4331                 :          0 :         p4d = p4d_offset(pgd, address);
    4332   [ #  #  #  # ]:          0 :         if (p4d_none(*p4d) || unlikely(p4d_bad(*p4d)))
    4333                 :          0 :                 goto out;
    4334                 :            : 
    4335         [ #  # ]:          0 :         pud = pud_offset(p4d, address);
    4336   [ #  #  #  # ]:          0 :         if (pud_none(*pud) || unlikely(pud_bad(*pud)))
    4337                 :          0 :                 goto out;
    4338                 :            : 
    4339                 :          0 :         pmd = pmd_offset(pud, address);
    4340                 :          0 :         VM_BUG_ON(pmd_trans_huge(*pmd));
    4341                 :            : 
    4342         [ #  # ]:          0 :         if (pmd_huge(*pmd)) {
    4343         [ #  # ]:          0 :                 if (!pmdpp)
    4344                 :          0 :                         goto out;
    4345                 :            : 
    4346         [ #  # ]:          0 :                 if (range) {
    4347                 :          0 :                         mmu_notifier_range_init(range, MMU_NOTIFY_CLEAR, 0,
    4348                 :            :                                                 NULL, mm, address & PMD_MASK,
    4349                 :          0 :                                                 (address & PMD_MASK) + PMD_SIZE);
    4350                 :          0 :                         mmu_notifier_invalidate_range_start(range);
    4351                 :            :                 }
    4352                 :          0 :                 *ptlp = pmd_lock(mm, pmd);
    4353         [ #  # ]:          0 :                 if (pmd_huge(*pmd)) {
    4354                 :          0 :                         *pmdpp = pmd;
    4355                 :          0 :                         return 0;
    4356                 :            :                 }
    4357                 :          0 :                 spin_unlock(*ptlp);
    4358         [ #  # ]:          0 :                 if (range)
    4359                 :          0 :                         mmu_notifier_invalidate_range_end(range);
    4360                 :            :         }
    4361                 :            : 
    4362   [ #  #  #  # ]:          0 :         if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
    4363                 :          0 :                 goto out;
    4364                 :            : 
    4365         [ #  # ]:          0 :         if (range) {
    4366                 :          0 :                 mmu_notifier_range_init(range, MMU_NOTIFY_CLEAR, 0, NULL, mm,
    4367                 :            :                                         address & PAGE_MASK,
    4368                 :          0 :                                         (address & PAGE_MASK) + PAGE_SIZE);
    4369                 :          0 :                 mmu_notifier_invalidate_range_start(range);
    4370                 :            :         }
    4371         [ #  # ]:          0 :         ptep = pte_offset_map_lock(mm, pmd, address, ptlp);
    4372         [ #  # ]:          0 :         if (!pte_present(*ptep))
    4373                 :          0 :                 goto unlock;
    4374                 :          0 :         *ptepp = ptep;
    4375                 :          0 :         return 0;
    4376                 :            : unlock:
    4377                 :          0 :         pte_unmap_unlock(ptep, *ptlp);
    4378         [ #  # ]:          0 :         if (range)
    4379                 :          0 :                 mmu_notifier_invalidate_range_end(range);
    4380                 :          0 : out:
    4381                 :            :         return -EINVAL;
    4382                 :            : }
    4383                 :            : 
    4384                 :          0 : static inline int follow_pte(struct mm_struct *mm, unsigned long address,
    4385                 :            :                              pte_t **ptepp, spinlock_t **ptlp)
    4386                 :            : {
    4387                 :          0 :         int res;
    4388                 :            : 
    4389                 :            :         /* (void) is needed to make gcc happy */
    4390                 :          0 :         (void) __cond_lock(*ptlp,
    4391                 :            :                            !(res = __follow_pte_pmd(mm, address, NULL,
    4392                 :            :                                                     ptepp, NULL, ptlp)));
    4393                 :          0 :         return res;
    4394                 :            : }
    4395                 :            : 
    4396                 :          0 : int follow_pte_pmd(struct mm_struct *mm, unsigned long address,
    4397                 :            :                    struct mmu_notifier_range *range,
    4398                 :            :                    pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp)
    4399                 :            : {
    4400                 :          0 :         int res;
    4401                 :            : 
    4402                 :            :         /* (void) is needed to make gcc happy */
    4403                 :          0 :         (void) __cond_lock(*ptlp,
    4404                 :            :                            !(res = __follow_pte_pmd(mm, address, range,
    4405                 :            :                                                     ptepp, pmdpp, ptlp)));
    4406                 :          0 :         return res;
    4407                 :            : }
    4408                 :            : EXPORT_SYMBOL(follow_pte_pmd);
    4409                 :            : 
    4410                 :            : /**
    4411                 :            :  * follow_pfn - look up PFN at a user virtual address
    4412                 :            :  * @vma: memory mapping
    4413                 :            :  * @address: user virtual address
    4414                 :            :  * @pfn: location to store found PFN
    4415                 :            :  *
    4416                 :            :  * Only IO mappings and raw PFN mappings are allowed.
    4417                 :            :  *
    4418                 :            :  * Return: zero and the pfn at @pfn on success, -ve otherwise.
    4419                 :            :  */
    4420                 :          0 : int follow_pfn(struct vm_area_struct *vma, unsigned long address,
    4421                 :            :         unsigned long *pfn)
    4422                 :            : {
    4423                 :          0 :         int ret = -EINVAL;
    4424                 :          0 :         spinlock_t *ptl;
    4425                 :          0 :         pte_t *ptep;
    4426                 :            : 
    4427         [ #  # ]:          0 :         if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
    4428                 :            :                 return ret;
    4429                 :            : 
    4430                 :          0 :         ret = follow_pte(vma->vm_mm, address, &ptep, &ptl);
    4431         [ #  # ]:          0 :         if (ret)
    4432                 :            :                 return ret;
    4433         [ #  # ]:          0 :         *pfn = pte_pfn(*ptep);
    4434                 :          0 :         pte_unmap_unlock(ptep, ptl);
    4435                 :          0 :         return 0;
    4436                 :            : }
    4437                 :            : EXPORT_SYMBOL(follow_pfn);
    4438                 :            : 
    4439                 :            : #ifdef CONFIG_HAVE_IOREMAP_PROT
    4440                 :          0 : int follow_phys(struct vm_area_struct *vma,
    4441                 :            :                 unsigned long address, unsigned int flags,
    4442                 :            :                 unsigned long *prot, resource_size_t *phys)
    4443                 :            : {
    4444                 :          0 :         int ret = -EINVAL;
    4445                 :          0 :         pte_t *ptep, pte;
    4446                 :          0 :         spinlock_t *ptl;
    4447                 :            : 
    4448         [ #  # ]:          0 :         if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
    4449                 :          0 :                 goto out;
    4450                 :            : 
    4451         [ #  # ]:          0 :         if (follow_pte(vma->vm_mm, address, &ptep, &ptl))
    4452                 :          0 :                 goto out;
    4453                 :          0 :         pte = *ptep;
    4454                 :            : 
    4455   [ #  #  #  # ]:          0 :         if ((flags & FOLL_WRITE) && !pte_write(pte))
    4456                 :          0 :                 goto unlock;
    4457                 :            : 
    4458         [ #  # ]:          0 :         *prot = pgprot_val(pte_pgprot(pte));
    4459         [ #  # ]:          0 :         *phys = (resource_size_t)pte_pfn(pte) << PAGE_SHIFT;
    4460                 :            : 
    4461                 :          0 :         ret = 0;
    4462                 :          0 : unlock:
    4463                 :          0 :         pte_unmap_unlock(ptep, ptl);
    4464                 :          0 : out:
    4465                 :          0 :         return ret;
    4466                 :            : }
    4467                 :            : 
    4468                 :          0 : int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
    4469                 :            :                         void *buf, int len, int write)
    4470                 :            : {
    4471                 :          0 :         resource_size_t phys_addr;
    4472                 :          0 :         unsigned long prot = 0;
    4473                 :          0 :         void __iomem *maddr;
    4474                 :          0 :         int offset = addr & (PAGE_SIZE-1);
    4475                 :            : 
    4476         [ #  # ]:          0 :         if (follow_phys(vma, addr, write, &prot, &phys_addr))
    4477                 :            :                 return -EINVAL;
    4478                 :            : 
    4479                 :          0 :         maddr = ioremap_prot(phys_addr, PAGE_ALIGN(len + offset), prot);
    4480         [ #  # ]:          0 :         if (!maddr)
    4481                 :            :                 return -ENOMEM;
    4482                 :            : 
    4483         [ #  # ]:          0 :         if (write)
    4484                 :          0 :                 memcpy_toio(maddr + offset, buf, len);
    4485                 :            :         else
    4486                 :          0 :                 memcpy_fromio(buf, maddr + offset, len);
    4487                 :          0 :         iounmap(maddr);
    4488                 :            : 
    4489                 :          0 :         return len;
    4490                 :            : }
    4491                 :            : EXPORT_SYMBOL_GPL(generic_access_phys);
    4492                 :            : #endif
    4493                 :            : 
    4494                 :            : /*
    4495                 :            :  * Access another process' address space as given in mm.  If non-NULL, use the
    4496                 :            :  * given task for page fault accounting.
    4497                 :            :  */
    4498                 :        748 : int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
    4499                 :            :                 unsigned long addr, void *buf, int len, unsigned int gup_flags)
    4500                 :            : {
    4501                 :        748 :         struct vm_area_struct *vma;
    4502                 :        748 :         void *old_buf = buf;
    4503                 :        748 :         int write = gup_flags & FOLL_WRITE;
    4504                 :            : 
    4505         [ +  - ]:        748 :         if (down_read_killable(&mm->mmap_sem))
    4506                 :            :                 return 0;
    4507                 :            : 
    4508                 :            :         /* ignore errors, just check how much was successfully transferred */
    4509         [ +  + ]:       1496 :         while (len) {
    4510                 :        748 :                 int bytes, ret, offset;
    4511                 :        748 :                 void *maddr;
    4512                 :        748 :                 struct page *page = NULL;
    4513                 :            : 
    4514                 :        748 :                 ret = get_user_pages_remote(tsk, mm, addr, 1,
    4515                 :            :                                 gup_flags, &page, &vma, NULL);
    4516         [ -  + ]:        748 :                 if (ret <= 0) {
    4517                 :            : #ifndef CONFIG_HAVE_IOREMAP_PROT
    4518                 :            :                         break;
    4519                 :            : #else
    4520                 :            :                         /*
    4521                 :            :                          * Check if this is a VM_IO | VM_PFNMAP VMA, which
    4522                 :            :                          * we can access using slightly different code.
    4523                 :            :                          */
    4524                 :          0 :                         vma = find_vma(mm, addr);
    4525   [ #  #  #  # ]:          0 :                         if (!vma || vma->vm_start > addr)
    4526                 :            :                                 break;
    4527   [ #  #  #  # ]:          0 :                         if (vma->vm_ops && vma->vm_ops->access)
    4528                 :          0 :                                 ret = vma->vm_ops->access(vma, addr, buf,
    4529                 :            :                                                           len, write);
    4530         [ #  # ]:          0 :                         if (ret <= 0)
    4531                 :            :                                 break;
    4532                 :            :                         bytes = ret;
    4533                 :            : #endif
    4534                 :            :                 } else {
    4535                 :        748 :                         bytes = len;
    4536                 :        748 :                         offset = addr & (PAGE_SIZE-1);
    4537         [ -  + ]:        748 :                         if (bytes > PAGE_SIZE-offset)
    4538                 :          0 :                                 bytes = PAGE_SIZE-offset;
    4539                 :            : 
    4540                 :        748 :                         maddr = kmap(page);
    4541         [ -  + ]:        748 :                         if (write) {
    4542                 :          0 :                                 copy_to_user_page(vma, page, addr,
    4543                 :            :                                                   maddr + offset, buf, bytes);
    4544                 :          0 :                                 set_page_dirty_lock(page);
    4545                 :            :                         } else {
    4546                 :        748 :                                 copy_from_user_page(vma, page, addr,
    4547                 :            :                                                     buf, maddr + offset, bytes);
    4548                 :            :                         }
    4549                 :        748 :                         kunmap(page);
    4550                 :        748 :                         put_page(page);
    4551                 :            :                 }
    4552                 :        748 :                 len -= bytes;
    4553                 :        748 :                 buf += bytes;
    4554                 :        748 :                 addr += bytes;
    4555                 :            :         }
    4556                 :        748 :         up_read(&mm->mmap_sem);
    4557                 :            : 
    4558                 :        748 :         return buf - old_buf;
    4559                 :            : }
    4560                 :            : 
    4561                 :            : /**
    4562                 :            :  * access_remote_vm - access another process' address space
    4563                 :            :  * @mm:         the mm_struct of the target address space
    4564                 :            :  * @addr:       start address to access
    4565                 :            :  * @buf:        source or destination buffer
    4566                 :            :  * @len:        number of bytes to transfer
    4567                 :            :  * @gup_flags:  flags modifying lookup behaviour
    4568                 :            :  *
    4569                 :            :  * The caller must hold a reference on @mm.
    4570                 :            :  *
    4571                 :            :  * Return: number of bytes copied from source to destination.
    4572                 :            :  */
    4573                 :        748 : int access_remote_vm(struct mm_struct *mm, unsigned long addr,
    4574                 :            :                 void *buf, int len, unsigned int gup_flags)
    4575                 :            : {
    4576                 :        748 :         return __access_remote_vm(NULL, mm, addr, buf, len, gup_flags);
    4577                 :            : }
    4578                 :            : 
    4579                 :            : /*
    4580                 :            :  * Access another process' address space.
    4581                 :            :  * Source/target buffer must be kernel space,
    4582                 :            :  * Do not walk the page table directly, use get_user_pages
    4583                 :            :  */
    4584                 :          0 : int access_process_vm(struct task_struct *tsk, unsigned long addr,
    4585                 :            :                 void *buf, int len, unsigned int gup_flags)
    4586                 :            : {
    4587                 :          0 :         struct mm_struct *mm;
    4588                 :          0 :         int ret;
    4589                 :            : 
    4590                 :          0 :         mm = get_task_mm(tsk);
    4591         [ #  # ]:          0 :         if (!mm)
    4592                 :            :                 return 0;
    4593                 :            : 
    4594                 :          0 :         ret = __access_remote_vm(tsk, mm, addr, buf, len, gup_flags);
    4595                 :            : 
    4596                 :          0 :         mmput(mm);
    4597                 :            : 
    4598                 :          0 :         return ret;
    4599                 :            : }
    4600                 :            : EXPORT_SYMBOL_GPL(access_process_vm);
    4601                 :            : 
    4602                 :            : /*
    4603                 :            :  * Print the name of a VMA.
    4604                 :            :  */
    4605                 :          0 : void print_vma_addr(char *prefix, unsigned long ip)
    4606                 :            : {
    4607                 :          0 :         struct mm_struct *mm = current->mm;
    4608                 :          0 :         struct vm_area_struct *vma;
    4609                 :            : 
    4610                 :            :         /*
    4611                 :            :          * we might be running from an atomic context so we cannot sleep
    4612                 :            :          */
    4613         [ #  # ]:          0 :         if (!down_read_trylock(&mm->mmap_sem))
    4614                 :            :                 return;
    4615                 :            : 
    4616                 :          0 :         vma = find_vma(mm, ip);
    4617   [ #  #  #  # ]:          0 :         if (vma && vma->vm_file) {
    4618                 :          0 :                 struct file *f = vma->vm_file;
    4619                 :          0 :                 char *buf = (char *)__get_free_page(GFP_NOWAIT);
    4620         [ #  # ]:          0 :                 if (buf) {
    4621                 :          0 :                         char *p;
    4622                 :            : 
    4623                 :          0 :                         p = file_path(f, buf, PAGE_SIZE);
    4624         [ #  # ]:          0 :                         if (IS_ERR(p))
    4625                 :          0 :                                 p = "?";
    4626                 :          0 :                         printk("%s%s[%lx+%lx]", prefix, kbasename(p),
    4627                 :            :                                         vma->vm_start,
    4628         [ #  # ]:          0 :                                         vma->vm_end - vma->vm_start);
    4629                 :          0 :                         free_page((unsigned long)buf);
    4630                 :            :                 }
    4631                 :            :         }
    4632                 :          0 :         up_read(&mm->mmap_sem);
    4633                 :            : }
    4634                 :            : 
    4635                 :            : #if defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_DEBUG_ATOMIC_SLEEP)
    4636                 :            : void __might_fault(const char *file, int line)
    4637                 :            : {
    4638                 :            :         /*
    4639                 :            :          * Some code (nfs/sunrpc) uses socket ops on kernel memory while
    4640                 :            :          * holding the mmap_sem, this is safe because kernel memory doesn't
    4641                 :            :          * get paged out, therefore we'll never actually fault, and the
    4642                 :            :          * below annotations will generate false positives.
    4643                 :            :          */
    4644                 :            :         if (uaccess_kernel())
    4645                 :            :                 return;
    4646                 :            :         if (pagefault_disabled())
    4647                 :            :                 return;
    4648                 :            :         __might_sleep(file, line, 0);
    4649                 :            : #if defined(CONFIG_DEBUG_ATOMIC_SLEEP)
    4650                 :            :         if (current->mm)
    4651                 :            :                 might_lock_read(&current->mm->mmap_sem);
    4652                 :            : #endif
    4653                 :            : }
    4654                 :            : EXPORT_SYMBOL(__might_fault);
    4655                 :            : #endif
    4656                 :            : 
    4657                 :            : #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
    4658                 :            : /*
    4659                 :            :  * Process all subpages of the specified huge page with the specified
    4660                 :            :  * operation.  The target subpage will be processed last to keep its
    4661                 :            :  * cache lines hot.
    4662                 :            :  */
    4663                 :          0 : static inline void process_huge_page(
    4664                 :            :         unsigned long addr_hint, unsigned int pages_per_huge_page,
    4665                 :            :         void (*process_subpage)(unsigned long addr, int idx, void *arg),
    4666                 :            :         void *arg)
    4667                 :            : {
    4668                 :          0 :         int i, n, base, l;
    4669                 :          0 :         unsigned long addr = addr_hint &
    4670                 :          0 :                 ~(((unsigned long)pages_per_huge_page << PAGE_SHIFT) - 1);
    4671                 :            : 
    4672                 :            :         /* Process target subpage last to keep its cache lines hot */
    4673                 :          0 :         might_sleep();
    4674                 :          0 :         n = (addr_hint - addr) / PAGE_SIZE;
    4675         [ #  # ]:          0 :         if (2 * n <= pages_per_huge_page) {
    4676                 :            :                 /* If target subpage in first half of huge page */
    4677                 :          0 :                 base = 0;
    4678                 :          0 :                 l = n;
    4679                 :            :                 /* Process subpages at the end of huge page */
    4680         [ #  # ]:          0 :                 for (i = pages_per_huge_page - 1; i >= 2 * n; i--) {
    4681                 :          0 :                         cond_resched();
    4682                 :          0 :                         process_subpage(addr + i * PAGE_SIZE, i, arg);
    4683                 :            :                 }
    4684                 :            :         } else {
    4685                 :            :                 /* If target subpage in second half of huge page */
    4686                 :          0 :                 base = pages_per_huge_page - 2 * (pages_per_huge_page - n);
    4687                 :          0 :                 l = pages_per_huge_page - n;
    4688                 :            :                 /* Process subpages at the begin of huge page */
    4689         [ #  # ]:          0 :                 for (i = 0; i < base; i++) {
    4690                 :          0 :                         cond_resched();
    4691                 :          0 :                         process_subpage(addr + i * PAGE_SIZE, i, arg);
    4692                 :            :                 }
    4693                 :            :         }
    4694                 :            :         /*
    4695                 :            :          * Process remaining subpages in left-right-left-right pattern
    4696                 :            :          * towards the target subpage
    4697                 :            :          */
    4698         [ #  # ]:          0 :         for (i = 0; i < l; i++) {
    4699                 :          0 :                 int left_idx = base + i;
    4700                 :          0 :                 int right_idx = base + 2 * l - 1 - i;
    4701                 :            : 
    4702                 :          0 :                 cond_resched();
    4703                 :          0 :                 process_subpage(addr + left_idx * PAGE_SIZE, left_idx, arg);
    4704                 :          0 :                 cond_resched();
    4705                 :          0 :                 process_subpage(addr + right_idx * PAGE_SIZE, right_idx, arg);
    4706                 :            :         }
    4707                 :          0 : }
    4708                 :            : 
    4709                 :            : static void clear_gigantic_page(struct page *page,
    4710                 :            :                                 unsigned long addr,
    4711                 :            :                                 unsigned int pages_per_huge_page)
    4712                 :            : {
    4713                 :            :         int i;
    4714                 :            :         struct page *p = page;
    4715                 :            : 
    4716                 :            :         might_sleep();
    4717                 :            :         for (i = 0; i < pages_per_huge_page;
    4718                 :            :              i++, p = mem_map_next(p, page, i)) {
    4719                 :            :                 cond_resched();
    4720                 :            :                 clear_user_highpage(p, addr + i * PAGE_SIZE);
    4721                 :            :         }
    4722                 :            : }
    4723                 :            : 
    4724                 :          0 : static void clear_subpage(unsigned long addr, int idx, void *arg)
    4725                 :            : {
    4726                 :          0 :         struct page *page = arg;
    4727                 :            : 
    4728                 :          0 :         clear_user_highpage(page + idx, addr);
    4729                 :          0 : }
    4730                 :            : 
    4731                 :          0 : void clear_huge_page(struct page *page,
    4732                 :            :                      unsigned long addr_hint, unsigned int pages_per_huge_page)
    4733                 :            : {
    4734                 :          0 :         unsigned long addr = addr_hint &
    4735                 :            :                 ~(((unsigned long)pages_per_huge_page << PAGE_SHIFT) - 1);
    4736                 :            : 
    4737         [ #  # ]:          0 :         if (unlikely(pages_per_huge_page > MAX_ORDER_NR_PAGES)) {
    4738                 :          0 :                 clear_gigantic_page(page, addr, pages_per_huge_page);
    4739                 :          0 :                 return;
    4740                 :            :         }
    4741                 :            : 
    4742                 :          0 :         process_huge_page(addr_hint, pages_per_huge_page, clear_subpage, page);
    4743                 :            : }
    4744                 :            : 
    4745                 :            : static void copy_user_gigantic_page(struct page *dst, struct page *src,
    4746                 :            :                                     unsigned long addr,
    4747                 :            :                                     struct vm_area_struct *vma,
    4748                 :            :                                     unsigned int pages_per_huge_page)
    4749                 :            : {
    4750                 :            :         int i;
    4751                 :            :         struct page *dst_base = dst;
    4752                 :            :         struct page *src_base = src;
    4753                 :            : 
    4754                 :            :         for (i = 0; i < pages_per_huge_page; ) {
    4755                 :            :                 cond_resched();
    4756                 :            :                 copy_user_highpage(dst, src, addr + i*PAGE_SIZE, vma);
    4757                 :            : 
    4758                 :            :                 i++;
    4759                 :            :                 dst = mem_map_next(dst, dst_base, i);
    4760                 :            :                 src = mem_map_next(src, src_base, i);
    4761                 :            :         }
    4762                 :            : }
    4763                 :            : 
    4764                 :            : struct copy_subpage_arg {
    4765                 :            :         struct page *dst;
    4766                 :            :         struct page *src;
    4767                 :            :         struct vm_area_struct *vma;
    4768                 :            : };
    4769                 :            : 
    4770                 :          0 : static void copy_subpage(unsigned long addr, int idx, void *arg)
    4771                 :            : {
    4772                 :          0 :         struct copy_subpage_arg *copy_arg = arg;
    4773                 :            : 
    4774                 :          0 :         copy_user_highpage(copy_arg->dst + idx, copy_arg->src + idx,
    4775                 :            :                            addr, copy_arg->vma);
    4776                 :          0 : }
    4777                 :            : 
    4778                 :          0 : void copy_user_huge_page(struct page *dst, struct page *src,
    4779                 :            :                          unsigned long addr_hint, struct vm_area_struct *vma,
    4780                 :            :                          unsigned int pages_per_huge_page)
    4781                 :            : {
    4782                 :          0 :         unsigned long addr = addr_hint &
    4783                 :            :                 ~(((unsigned long)pages_per_huge_page << PAGE_SHIFT) - 1);
    4784                 :          0 :         struct copy_subpage_arg arg = {
    4785                 :            :                 .dst = dst,
    4786                 :            :                 .src = src,
    4787                 :            :                 .vma = vma,
    4788                 :            :         };
    4789                 :            : 
    4790         [ #  # ]:          0 :         if (unlikely(pages_per_huge_page > MAX_ORDER_NR_PAGES)) {
    4791                 :          0 :                 copy_user_gigantic_page(dst, src, addr, vma,
    4792                 :            :                                         pages_per_huge_page);
    4793                 :          0 :                 return;
    4794                 :            :         }
    4795                 :            : 
    4796                 :          0 :         process_huge_page(addr_hint, pages_per_huge_page, copy_subpage, &arg);
    4797                 :            : }
    4798                 :            : 
    4799                 :          0 : long copy_huge_page_from_user(struct page *dst_page,
    4800                 :            :                                 const void __user *usr_src,
    4801                 :            :                                 unsigned int pages_per_huge_page,
    4802                 :            :                                 bool allow_pagefault)
    4803                 :            : {
    4804                 :          0 :         void *src = (void *)usr_src;
    4805                 :          0 :         void *page_kaddr;
    4806                 :          0 :         unsigned long i, rc = 0;
    4807                 :          0 :         unsigned long ret_val = pages_per_huge_page * PAGE_SIZE;
    4808                 :            : 
    4809         [ #  # ]:          0 :         for (i = 0; i < pages_per_huge_page; i++) {
    4810         [ #  # ]:          0 :                 if (allow_pagefault)
    4811                 :          0 :                         page_kaddr = kmap(dst_page + i);
    4812                 :            :                 else
    4813                 :          0 :                         page_kaddr = kmap_atomic(dst_page + i);
    4814                 :          0 :                 rc = copy_from_user(page_kaddr,
    4815         [ #  # ]:          0 :                                 (const void __user *)(src + i * PAGE_SIZE),
    4816                 :            :                                 PAGE_SIZE);
    4817         [ #  # ]:          0 :                 if (allow_pagefault)
    4818                 :            :                         kunmap(dst_page + i);
    4819                 :            :                 else
    4820                 :          0 :                         kunmap_atomic(page_kaddr);
    4821                 :            : 
    4822                 :          0 :                 ret_val -= (PAGE_SIZE - rc);
    4823         [ #  # ]:          0 :                 if (rc)
    4824                 :            :                         break;
    4825                 :            : 
    4826                 :          0 :                 cond_resched();
    4827                 :            :         }
    4828                 :          0 :         return ret_val;
    4829                 :            : }
    4830                 :            : #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
    4831                 :            : 
    4832                 :            : #if USE_SPLIT_PTE_PTLOCKS && ALLOC_SPLIT_PTLOCKS
    4833                 :            : 
    4834                 :            : static struct kmem_cache *page_ptl_cachep;
    4835                 :            : 
    4836                 :            : void __init ptlock_cache_init(void)
    4837                 :            : {
    4838                 :            :         page_ptl_cachep = kmem_cache_create("page->ptl", sizeof(spinlock_t), 0,
    4839                 :            :                         SLAB_PANIC, NULL);
    4840                 :            : }
    4841                 :            : 
    4842                 :            : bool ptlock_alloc(struct page *page)
    4843                 :            : {
    4844                 :            :         spinlock_t *ptl;
    4845                 :            : 
    4846                 :            :         ptl = kmem_cache_alloc(page_ptl_cachep, GFP_KERNEL);
    4847                 :            :         if (!ptl)
    4848                 :            :                 return false;
    4849                 :            :         page->ptl = ptl;
    4850                 :            :         return true;
    4851                 :            : }
    4852                 :            : 
    4853                 :            : void ptlock_free(struct page *page)
    4854                 :            : {
    4855                 :            :         kmem_cache_free(page_ptl_cachep, page->ptl);
    4856                 :            : }
    4857                 :            : #endif

Generated by: LCOV version 1.14