LCOV - code coverage report
Current view: top level - mm - mlock.c (source / functions) Hit Total Coverage
Test: combined.info Lines: 0 346 0.0 %
Date: 2022-04-01 13:59:58 Functions: 0 30 0.0 %
Branches: 0 262 0.0 %

           Branch data     Line data    Source code
       1                 :            : // SPDX-License-Identifier: GPL-2.0
       2                 :            : /*
       3                 :            :  *      linux/mm/mlock.c
       4                 :            :  *
       5                 :            :  *  (C) Copyright 1995 Linus Torvalds
       6                 :            :  *  (C) Copyright 2002 Christoph Hellwig
       7                 :            :  */
       8                 :            : 
       9                 :            : #include <linux/capability.h>
      10                 :            : #include <linux/mman.h>
      11                 :            : #include <linux/mm.h>
      12                 :            : #include <linux/sched/user.h>
      13                 :            : #include <linux/swap.h>
      14                 :            : #include <linux/swapops.h>
      15                 :            : #include <linux/pagemap.h>
      16                 :            : #include <linux/pagevec.h>
      17                 :            : #include <linux/mempolicy.h>
      18                 :            : #include <linux/syscalls.h>
      19                 :            : #include <linux/sched.h>
      20                 :            : #include <linux/export.h>
      21                 :            : #include <linux/rmap.h>
      22                 :            : #include <linux/mmzone.h>
      23                 :            : #include <linux/hugetlb.h>
      24                 :            : #include <linux/memcontrol.h>
      25                 :            : #include <linux/mm_inline.h>
      26                 :            : 
      27                 :            : #include "internal.h"
      28                 :            : 
      29                 :          0 : bool can_do_mlock(void)
      30                 :            : {
      31         [ #  # ]:          0 :         if (rlimit(RLIMIT_MEMLOCK) != 0)
      32                 :            :                 return true;
      33         [ #  # ]:          0 :         if (capable(CAP_IPC_LOCK))
      34                 :          0 :                 return true;
      35                 :            :         return false;
      36                 :            : }
      37                 :            : EXPORT_SYMBOL(can_do_mlock);
      38                 :            : 
      39                 :            : /*
      40                 :            :  * Mlocked pages are marked with PageMlocked() flag for efficient testing
      41                 :            :  * in vmscan and, possibly, the fault path; and to support semi-accurate
      42                 :            :  * statistics.
      43                 :            :  *
      44                 :            :  * An mlocked page [PageMlocked(page)] is unevictable.  As such, it will
      45                 :            :  * be placed on the LRU "unevictable" list, rather than the [in]active lists.
      46                 :            :  * The unevictable list is an LRU sibling list to the [in]active lists.
      47                 :            :  * PageUnevictable is set to indicate the unevictable state.
      48                 :            :  *
      49                 :            :  * When lazy mlocking via vmscan, it is important to ensure that the
      50                 :            :  * vma's VM_LOCKED status is not concurrently being modified, otherwise we
      51                 :            :  * may have mlocked a page that is being munlocked. So lazy mlock must take
      52                 :            :  * the mmap_sem for read, and verify that the vma really is locked
      53                 :            :  * (see mm/rmap.c).
      54                 :            :  */
      55                 :            : 
      56                 :            : /*
      57                 :            :  *  LRU accounting for clear_page_mlock()
      58                 :            :  */
      59                 :          0 : void clear_page_mlock(struct page *page)
      60                 :            : {
      61   [ #  #  #  # ]:          0 :         if (!TestClearPageMlocked(page))
      62                 :            :                 return;
      63                 :            : 
      64                 :          0 :         mod_zone_page_state(page_zone(page), NR_MLOCK,
      65                 :            :                             -hpage_nr_pages(page));
      66                 :          0 :         count_vm_event(UNEVICTABLE_PGCLEARED);
      67                 :            :         /*
      68                 :            :          * The previous TestClearPageMlocked() corresponds to the smp_mb()
      69                 :            :          * in __pagevec_lru_add_fn().
      70                 :            :          *
      71                 :            :          * See __pagevec_lru_add_fn for more explanation.
      72                 :            :          */
      73         [ #  # ]:          0 :         if (!isolate_lru_page(page)) {
      74                 :          0 :                 putback_lru_page(page);
      75                 :            :         } else {
      76                 :            :                 /*
      77                 :            :                  * We lost the race. the page already moved to evictable list.
      78                 :            :                  */
      79   [ #  #  #  # ]:          0 :                 if (PageUnevictable(page))
      80                 :          0 :                         count_vm_event(UNEVICTABLE_PGSTRANDED);
      81                 :            :         }
      82                 :            : }
      83                 :            : 
      84                 :            : /*
      85                 :            :  * Mark page as mlocked if not already.
      86                 :            :  * If page on LRU, isolate and putback to move to unevictable list.
      87                 :            :  */
      88                 :          0 : void mlock_vma_page(struct page *page)
      89                 :            : {
      90                 :            :         /* Serialize with page migration */
      91   [ #  #  #  # ]:          0 :         BUG_ON(!PageLocked(page));
      92                 :            : 
      93                 :          0 :         VM_BUG_ON_PAGE(PageTail(page), page);
      94                 :          0 :         VM_BUG_ON_PAGE(PageCompound(page) && PageDoubleMap(page), page);
      95                 :            : 
      96   [ #  #  #  # ]:          0 :         if (!TestSetPageMlocked(page)) {
      97                 :          0 :                 mod_zone_page_state(page_zone(page), NR_MLOCK,
      98                 :            :                                     hpage_nr_pages(page));
      99                 :          0 :                 count_vm_event(UNEVICTABLE_PGMLOCKED);
     100         [ #  # ]:          0 :                 if (!isolate_lru_page(page))
     101                 :          0 :                         putback_lru_page(page);
     102                 :            :         }
     103                 :          0 : }
     104                 :            : 
     105                 :            : /*
     106                 :            :  * Isolate a page from LRU with optional get_page() pin.
     107                 :            :  * Assumes lru_lock already held and page already pinned.
     108                 :            :  */
     109                 :          0 : static bool __munlock_isolate_lru_page(struct page *page, bool getpage)
     110                 :            : {
     111   [ #  #  #  # ]:          0 :         if (PageLRU(page)) {
     112                 :          0 :                 struct lruvec *lruvec;
     113                 :            : 
     114         [ #  # ]:          0 :                 lruvec = mem_cgroup_page_lruvec(page, page_pgdat(page));
     115         [ #  # ]:          0 :                 if (getpage)
     116         [ #  # ]:          0 :                         get_page(page);
     117         [ #  # ]:          0 :                 ClearPageLRU(page);
     118         [ #  # ]:          0 :                 del_page_from_lru_list(page, lruvec, page_lru(page));
     119                 :          0 :                 return true;
     120                 :            :         }
     121                 :            : 
     122                 :            :         return false;
     123                 :            : }
     124                 :            : 
     125                 :            : /*
     126                 :            :  * Finish munlock after successful page isolation
     127                 :            :  *
     128                 :            :  * Page must be locked. This is a wrapper for try_to_munlock()
     129                 :            :  * and putback_lru_page() with munlock accounting.
     130                 :            :  */
     131                 :          0 : static void __munlock_isolated_page(struct page *page)
     132                 :            : {
     133                 :            :         /*
     134                 :            :          * Optimization: if the page was mapped just once, that's our mapping
     135                 :            :          * and we don't need to check all the other vmas.
     136                 :            :          */
     137         [ #  # ]:          0 :         if (page_mapcount(page) > 1)
     138                 :          0 :                 try_to_munlock(page);
     139                 :            : 
     140                 :            :         /* Did try_to_unlock() succeed or punt? */
     141   [ #  #  #  # ]:          0 :         if (!PageMlocked(page))
     142                 :          0 :                 count_vm_event(UNEVICTABLE_PGMUNLOCKED);
     143                 :            : 
     144                 :          0 :         putback_lru_page(page);
     145                 :          0 : }
     146                 :            : 
     147                 :            : /*
     148                 :            :  * Accounting for page isolation fail during munlock
     149                 :            :  *
     150                 :            :  * Performs accounting when page isolation fails in munlock. There is nothing
     151                 :            :  * else to do because it means some other task has already removed the page
     152                 :            :  * from the LRU. putback_lru_page() will take care of removing the page from
     153                 :            :  * the unevictable list, if necessary. vmscan [page_referenced()] will move
     154                 :            :  * the page back to the unevictable list if some other vma has it mlocked.
     155                 :            :  */
     156                 :          0 : static void __munlock_isolation_failed(struct page *page)
     157                 :            : {
     158         [ #  # ]:          0 :         if (PageUnevictable(page))
     159                 :          0 :                 __count_vm_event(UNEVICTABLE_PGSTRANDED);
     160                 :            :         else
     161                 :          0 :                 __count_vm_event(UNEVICTABLE_PGMUNLOCKED);
     162                 :          0 : }
     163                 :            : 
     164                 :            : /**
     165                 :            :  * munlock_vma_page - munlock a vma page
     166                 :            :  * @page: page to be unlocked, either a normal page or THP page head
     167                 :            :  *
     168                 :            :  * returns the size of the page as a page mask (0 for normal page,
     169                 :            :  *         HPAGE_PMD_NR - 1 for THP head page)
     170                 :            :  *
     171                 :            :  * called from munlock()/munmap() path with page supposedly on the LRU.
     172                 :            :  * When we munlock a page, because the vma where we found the page is being
     173                 :            :  * munlock()ed or munmap()ed, we want to check whether other vmas hold the
     174                 :            :  * page locked so that we can leave it on the unevictable lru list and not
     175                 :            :  * bother vmscan with it.  However, to walk the page's rmap list in
     176                 :            :  * try_to_munlock() we must isolate the page from the LRU.  If some other
     177                 :            :  * task has removed the page from the LRU, we won't be able to do that.
     178                 :            :  * So we clear the PageMlocked as we might not get another chance.  If we
     179                 :            :  * can't isolate the page, we leave it for putback_lru_page() and vmscan
     180                 :            :  * [page_referenced()/try_to_unmap()] to deal with.
     181                 :            :  */
     182                 :          0 : unsigned int munlock_vma_page(struct page *page)
     183                 :            : {
     184                 :          0 :         int nr_pages;
     185         [ #  # ]:          0 :         pg_data_t *pgdat = page_pgdat(page);
     186                 :            : 
     187                 :            :         /* For try_to_munlock() and to serialize with page migration */
     188   [ #  #  #  # ]:          0 :         BUG_ON(!PageLocked(page));
     189                 :            : 
     190                 :          0 :         VM_BUG_ON_PAGE(PageTail(page), page);
     191                 :            : 
     192                 :            :         /*
     193                 :            :          * Serialize with any parallel __split_huge_page_refcount() which
     194                 :            :          * might otherwise copy PageMlocked to part of the tail pages before
     195                 :            :          * we clear it in the head page. It also stabilizes hpage_nr_pages().
     196                 :            :          */
     197                 :          0 :         spin_lock_irq(&pgdat->lru_lock);
     198                 :            : 
     199   [ #  #  #  # ]:          0 :         if (!TestClearPageMlocked(page)) {
     200                 :            :                 /* Potentially, PTE-mapped THP: do not skip the rest PTEs */
     201                 :          0 :                 nr_pages = 1;
     202                 :          0 :                 goto unlock_out;
     203                 :            :         }
     204                 :            : 
     205                 :          0 :         nr_pages = hpage_nr_pages(page);
     206                 :          0 :         __mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages);
     207                 :            : 
     208         [ #  # ]:          0 :         if (__munlock_isolate_lru_page(page, true)) {
     209                 :          0 :                 spin_unlock_irq(&pgdat->lru_lock);
     210                 :          0 :                 __munlock_isolated_page(page);
     211                 :          0 :                 goto out;
     212                 :            :         }
     213                 :          0 :         __munlock_isolation_failed(page);
     214                 :            : 
     215                 :          0 : unlock_out:
     216                 :          0 :         spin_unlock_irq(&pgdat->lru_lock);
     217                 :            : 
     218                 :          0 : out:
     219                 :          0 :         return nr_pages - 1;
     220                 :            : }
     221                 :            : 
     222                 :            : /*
     223                 :            :  * convert get_user_pages() return value to posix mlock() error
     224                 :            :  */
     225                 :          0 : static int __mlock_posix_error_return(long retval)
     226                 :            : {
     227                 :          0 :         if (retval == -EFAULT)
     228                 :            :                 retval = -ENOMEM;
     229         [ #  # ]:          0 :         else if (retval == -ENOMEM)
     230                 :          0 :                 retval = -EAGAIN;
     231                 :          0 :         return retval;
     232                 :            : }
     233                 :            : 
     234                 :            : /*
     235                 :            :  * Prepare page for fast batched LRU putback via putback_lru_evictable_pagevec()
     236                 :            :  *
     237                 :            :  * The fast path is available only for evictable pages with single mapping.
     238                 :            :  * Then we can bypass the per-cpu pvec and get better performance.
     239                 :            :  * when mapcount > 1 we need try_to_munlock() which can fail.
     240                 :            :  * when !page_evictable(), we need the full redo logic of putback_lru_page to
     241                 :            :  * avoid leaving evictable page in unevictable list.
     242                 :            :  *
     243                 :            :  * In case of success, @page is added to @pvec and @pgrescued is incremented
     244                 :            :  * in case that the page was previously unevictable. @page is also unlocked.
     245                 :            :  */
     246                 :          0 : static bool __putback_lru_fast_prepare(struct page *page, struct pagevec *pvec,
     247                 :            :                 int *pgrescued)
     248                 :            : {
     249                 :          0 :         VM_BUG_ON_PAGE(PageLRU(page), page);
     250                 :          0 :         VM_BUG_ON_PAGE(!PageLocked(page), page);
     251                 :            : 
     252   [ #  #  #  # ]:          0 :         if (page_mapcount(page) <= 1 && page_evictable(page)) {
     253         [ #  # ]:          0 :                 pagevec_add(pvec, page);
     254   [ #  #  #  # ]:          0 :                 if (TestClearPageUnevictable(page))
     255                 :          0 :                         (*pgrescued)++;
     256                 :          0 :                 unlock_page(page);
     257                 :          0 :                 return true;
     258                 :            :         }
     259                 :            : 
     260                 :            :         return false;
     261                 :            : }
     262                 :            : 
     263                 :            : /*
     264                 :            :  * Putback multiple evictable pages to the LRU
     265                 :            :  *
     266                 :            :  * Batched putback of evictable pages that bypasses the per-cpu pvec. Some of
     267                 :            :  * the pages might have meanwhile become unevictable but that is OK.
     268                 :            :  */
     269                 :          0 : static void __putback_lru_fast(struct pagevec *pvec, int pgrescued)
     270                 :            : {
     271         [ #  # ]:          0 :         count_vm_events(UNEVICTABLE_PGMUNLOCKED, pagevec_count(pvec));
     272                 :            :         /*
     273                 :            :          *__pagevec_lru_add() calls release_pages() so we don't call
     274                 :            :          * put_page() explicitly
     275                 :            :          */
     276                 :          0 :         __pagevec_lru_add(pvec);
     277         [ #  # ]:          0 :         count_vm_events(UNEVICTABLE_PGRESCUED, pgrescued);
     278                 :          0 : }
     279                 :            : 
     280                 :            : /*
     281                 :            :  * Munlock a batch of pages from the same zone
     282                 :            :  *
     283                 :            :  * The work is split to two main phases. First phase clears the Mlocked flag
     284                 :            :  * and attempts to isolate the pages, all under a single zone lru lock.
     285                 :            :  * The second phase finishes the munlock only for pages where isolation
     286                 :            :  * succeeded.
     287                 :            :  *
     288                 :            :  * Note that the pagevec may be modified during the process.
     289                 :            :  */
     290                 :          0 : static void __munlock_pagevec(struct pagevec *pvec, struct zone *zone)
     291                 :            : {
     292                 :          0 :         int i;
     293                 :          0 :         int nr = pagevec_count(pvec);
     294                 :          0 :         int delta_munlocked = -nr;
     295                 :          0 :         struct pagevec pvec_putback;
     296                 :          0 :         int pgrescued = 0;
     297                 :            : 
     298                 :          0 :         pagevec_init(&pvec_putback);
     299                 :            : 
     300                 :            :         /* Phase 1: page isolation */
     301                 :          0 :         spin_lock_irq(&zone->zone_pgdat->lru_lock);
     302         [ #  # ]:          0 :         for (i = 0; i < nr; i++) {
     303                 :          0 :                 struct page *page = pvec->pages[i];
     304                 :            : 
     305   [ #  #  #  # ]:          0 :                 if (TestClearPageMlocked(page)) {
     306                 :            :                         /*
     307                 :            :                          * We already have pin from follow_page_mask()
     308                 :            :                          * so we can spare the get_page() here.
     309                 :            :                          */
     310         [ #  # ]:          0 :                         if (__munlock_isolate_lru_page(page, false))
     311                 :          0 :                                 continue;
     312                 :            :                         else
     313                 :          0 :                                 __munlock_isolation_failed(page);
     314                 :            :                 } else {
     315                 :          0 :                         delta_munlocked++;
     316                 :            :                 }
     317                 :            : 
     318                 :            :                 /*
     319                 :            :                  * We won't be munlocking this page in the next phase
     320                 :            :                  * but we still need to release the follow_page_mask()
     321                 :            :                  * pin. We cannot do it under lru_lock however. If it's
     322                 :            :                  * the last pin, __page_cache_release() would deadlock.
     323                 :            :                  */
     324                 :          0 :                 pagevec_add(&pvec_putback, pvec->pages[i]);
     325                 :          0 :                 pvec->pages[i] = NULL;
     326                 :            :         }
     327                 :          0 :         __mod_zone_page_state(zone, NR_MLOCK, delta_munlocked);
     328                 :          0 :         spin_unlock_irq(&zone->zone_pgdat->lru_lock);
     329                 :            : 
     330                 :            :         /* Now we can release pins of pages that we are not munlocking */
     331         [ #  # ]:          0 :         pagevec_release(&pvec_putback);
     332                 :            : 
     333                 :            :         /* Phase 2: page munlock */
     334         [ #  # ]:          0 :         for (i = 0; i < nr; i++) {
     335                 :          0 :                 struct page *page = pvec->pages[i];
     336                 :            : 
     337         [ #  # ]:          0 :                 if (page) {
     338                 :          0 :                         lock_page(page);
     339         [ #  # ]:          0 :                         if (!__putback_lru_fast_prepare(page, &pvec_putback,
     340                 :            :                                         &pgrescued)) {
     341                 :            :                                 /*
     342                 :            :                                  * Slow path. We don't want to lose the last
     343                 :            :                                  * pin before unlock_page()
     344                 :            :                                  */
     345         [ #  # ]:          0 :                                 get_page(page); /* for putback_lru_page() */
     346                 :          0 :                                 __munlock_isolated_page(page);
     347                 :          0 :                                 unlock_page(page);
     348                 :          0 :                                 put_page(page); /* from follow_page_mask() */
     349                 :            :                         }
     350                 :            :                 }
     351                 :            :         }
     352                 :            : 
     353                 :            :         /*
     354                 :            :          * Phase 3: page putback for pages that qualified for the fast path
     355                 :            :          * This will also call put_page() to return pin from follow_page_mask()
     356                 :            :          */
     357         [ #  # ]:          0 :         if (pagevec_count(&pvec_putback))
     358                 :          0 :                 __putback_lru_fast(&pvec_putback, pgrescued);
     359                 :          0 : }
     360                 :            : 
     361                 :            : /*
     362                 :            :  * Fill up pagevec for __munlock_pagevec using pte walk
     363                 :            :  *
     364                 :            :  * The function expects that the struct page corresponding to @start address is
     365                 :            :  * a non-TPH page already pinned and in the @pvec, and that it belongs to @zone.
     366                 :            :  *
     367                 :            :  * The rest of @pvec is filled by subsequent pages within the same pmd and same
     368                 :            :  * zone, as long as the pte's are present and vm_normal_page() succeeds. These
     369                 :            :  * pages also get pinned.
     370                 :            :  *
     371                 :            :  * Returns the address of the next page that should be scanned. This equals
     372                 :            :  * @start + PAGE_SIZE when no page could be added by the pte walk.
     373                 :            :  */
     374                 :          0 : static unsigned long __munlock_pagevec_fill(struct pagevec *pvec,
     375                 :            :                         struct vm_area_struct *vma, struct zone *zone,
     376                 :            :                         unsigned long start, unsigned long end)
     377                 :            : {
     378                 :          0 :         pte_t *pte;
     379                 :          0 :         spinlock_t *ptl;
     380                 :            : 
     381                 :            :         /*
     382                 :            :          * Initialize pte walk starting at the already pinned page where we
     383                 :            :          * are sure that there is a pte, as it was pinned under the same
     384                 :            :          * mmap_sem write op.
     385                 :            :          */
     386                 :          0 :         pte = get_locked_pte(vma->vm_mm, start,      &ptl);
     387                 :            :         /* Make sure we do not cross the page table boundary */
     388         [ #  # ]:          0 :         end = pgd_addr_end(start, end);
     389         [ #  # ]:          0 :         end = p4d_addr_end(start, end);
     390         [ #  # ]:          0 :         end = pud_addr_end(start, end);
     391         [ #  # ]:          0 :         end = pmd_addr_end(start, end);
     392                 :            : 
     393                 :            :         /* The page next to the pinned page is the first we will try to get */
     394                 :          0 :         start += PAGE_SIZE;
     395         [ #  # ]:          0 :         while (start < end) {
     396                 :          0 :                 struct page *page = NULL;
     397                 :          0 :                 pte++;
     398         [ #  # ]:          0 :                 if (pte_present(*pte))
     399                 :          0 :                         page = vm_normal_page(vma, start, *pte);
     400                 :            :                 /*
     401                 :            :                  * Break if page could not be obtained or the page's node+zone does not
     402                 :            :                  * match
     403                 :            :                  */
     404   [ #  #  #  # ]:          0 :                 if (!page || page_zone(page) != zone)
     405                 :            :                         break;
     406                 :            : 
     407                 :            :                 /*
     408                 :            :                  * Do not use pagevec for PTE-mapped THP,
     409                 :            :                  * munlock_vma_pages_range() will handle them.
     410                 :            :                  */
     411         [ #  # ]:          0 :                 if (PageTransCompound(page))
     412                 :            :                         break;
     413                 :            : 
     414         [ #  # ]:          0 :                 get_page(page);
     415                 :            :                 /*
     416                 :            :                  * Increase the address that will be returned *before* the
     417                 :            :                  * eventual break due to pvec becoming full by adding the page
     418                 :            :                  */
     419                 :          0 :                 start += PAGE_SIZE;
     420         [ #  # ]:          0 :                 if (pagevec_add(pvec, page) == 0)
     421                 :            :                         break;
     422                 :            :         }
     423                 :          0 :         pte_unmap_unlock(pte, ptl);
     424                 :          0 :         return start;
     425                 :            : }
     426                 :            : 
     427                 :            : /*
     428                 :            :  * munlock_vma_pages_range() - munlock all pages in the vma range.'
     429                 :            :  * @vma - vma containing range to be munlock()ed.
     430                 :            :  * @start - start address in @vma of the range
     431                 :            :  * @end - end of range in @vma.
     432                 :            :  *
     433                 :            :  *  For mremap(), munmap() and exit().
     434                 :            :  *
     435                 :            :  * Called with @vma VM_LOCKED.
     436                 :            :  *
     437                 :            :  * Returns with VM_LOCKED cleared.  Callers must be prepared to
     438                 :            :  * deal with this.
     439                 :            :  *
     440                 :            :  * We don't save and restore VM_LOCKED here because pages are
     441                 :            :  * still on lru.  In unmap path, pages might be scanned by reclaim
     442                 :            :  * and re-mlocked by try_to_{munlock|unmap} before we unmap and
     443                 :            :  * free them.  This will result in freeing mlocked pages.
     444                 :            :  */
     445                 :          0 : void munlock_vma_pages_range(struct vm_area_struct *vma,
     446                 :            :                              unsigned long start, unsigned long end)
     447                 :            : {
     448                 :          0 :         vma->vm_flags &= VM_LOCKED_CLEAR_MASK;
     449                 :            : 
     450         [ #  # ]:          0 :         while (start < end) {
     451                 :          0 :                 struct page *page;
     452                 :          0 :                 unsigned int page_mask = 0;
     453                 :          0 :                 unsigned long page_increm;
     454                 :          0 :                 struct pagevec pvec;
     455                 :          0 :                 struct zone *zone;
     456                 :            : 
     457                 :          0 :                 pagevec_init(&pvec);
     458                 :            :                 /*
     459                 :            :                  * Although FOLL_DUMP is intended for get_dump_page(),
     460                 :            :                  * it just so happens that its special treatment of the
     461                 :            :                  * ZERO_PAGE (returning an error instead of doing get_page)
     462                 :            :                  * suits munlock very well (and if somehow an abnormal page
     463                 :            :                  * has sneaked into the range, we won't oops here: great).
     464                 :            :                  */
     465                 :          0 :                 page = follow_page(vma, start, FOLL_GET | FOLL_DUMP);
     466                 :            : 
     467   [ #  #  #  # ]:          0 :                 if (page && !IS_ERR(page)) {
     468                 :          0 :                         if (PageTransTail(page)) {
     469                 :            :                                 VM_BUG_ON_PAGE(PageMlocked(page), page);
     470                 :            :                                 put_page(page); /* follow_page_mask() */
     471                 :          0 :                         } else if (PageTransHuge(page)) {
     472                 :            :                                 lock_page(page);
     473                 :            :                                 /*
     474                 :            :                                  * Any THP page found by follow_page_mask() may
     475                 :            :                                  * have gotten split before reaching
     476                 :            :                                  * munlock_vma_page(), so we need to compute
     477                 :            :                                  * the page_mask here instead.
     478                 :            :                                  */
     479                 :            :                                 page_mask = munlock_vma_page(page);
     480                 :            :                                 unlock_page(page);
     481                 :            :                                 put_page(page); /* follow_page_mask() */
     482                 :            :                         } else {
     483                 :            :                                 /*
     484                 :            :                                  * Non-huge pages are handled in batches via
     485                 :            :                                  * pagevec. The pin from follow_page_mask()
     486                 :            :                                  * prevents them from collapsing by THP.
     487                 :            :                                  */
     488                 :          0 :                                 pagevec_add(&pvec, page);
     489                 :          0 :                                 zone = page_zone(page);
     490                 :            : 
     491                 :            :                                 /*
     492                 :            :                                  * Try to fill the rest of pagevec using fast
     493                 :            :                                  * pte walk. This will also update start to
     494                 :            :                                  * the next page to process. Then munlock the
     495                 :            :                                  * pagevec.
     496                 :            :                                  */
     497                 :          0 :                                 start = __munlock_pagevec_fill(&pvec, vma,
     498                 :            :                                                 zone, start, end);
     499                 :          0 :                                 __munlock_pagevec(&pvec, zone);
     500                 :          0 :                                 goto next;
     501                 :            :                         }
     502                 :            :                 }
     503                 :          0 :                 page_increm = 1 + page_mask;
     504                 :          0 :                 start += page_increm * PAGE_SIZE;
     505                 :          0 : next:
     506                 :          0 :                 cond_resched();
     507                 :            :         }
     508                 :          0 : }
     509                 :            : 
     510                 :            : /*
     511                 :            :  * mlock_fixup  - handle mlock[all]/munlock[all] requests.
     512                 :            :  *
     513                 :            :  * Filters out "special" vmas -- VM_LOCKED never gets set for these, and
     514                 :            :  * munlock is a no-op.  However, for some special vmas, we go ahead and
     515                 :            :  * populate the ptes.
     516                 :            :  *
     517                 :            :  * For vmas that pass the filters, merge/split as appropriate.
     518                 :            :  */
     519                 :          0 : static int mlock_fixup(struct vm_area_struct *vma, struct vm_area_struct **prev,
     520                 :            :         unsigned long start, unsigned long end, vm_flags_t newflags)
     521                 :            : {
     522                 :          0 :         struct mm_struct *mm = vma->vm_mm;
     523                 :          0 :         pgoff_t pgoff;
     524                 :          0 :         int nr_pages;
     525                 :          0 :         int ret = 0;
     526                 :          0 :         int lock = !!(newflags & VM_LOCKED);
     527                 :          0 :         vm_flags_t old_flags = vma->vm_flags;
     528                 :            : 
     529   [ #  #  #  #  :          0 :         if (newflags == vma->vm_flags || (vma->vm_flags & VM_SPECIAL) ||
                   #  # ]
     530         [ #  # ]:          0 :             is_vm_hugetlb_page(vma) || vma == get_gate_vma(current->mm) ||
     531                 :            :             vma_is_dax(vma))
     532                 :            :                 /* don't set VM_LOCKED or VM_LOCKONFAULT and don't count */
     533                 :          0 :                 goto out;
     534                 :            : 
     535                 :          0 :         pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
     536                 :          0 :         *prev = vma_merge(mm, *prev, start, end, newflags, vma->anon_vma,
     537                 :            :                           vma->vm_file, pgoff, vma_policy(vma),
     538                 :            :                           vma->vm_userfaultfd_ctx);
     539         [ #  # ]:          0 :         if (*prev) {
     540                 :          0 :                 vma = *prev;
     541                 :          0 :                 goto success;
     542                 :            :         }
     543                 :            : 
     544         [ #  # ]:          0 :         if (start != vma->vm_start) {
     545                 :          0 :                 ret = split_vma(mm, vma, start, 1);
     546         [ #  # ]:          0 :                 if (ret)
     547                 :          0 :                         goto out;
     548                 :            :         }
     549                 :            : 
     550         [ #  # ]:          0 :         if (end != vma->vm_end) {
     551                 :          0 :                 ret = split_vma(mm, vma, end, 0);
     552         [ #  # ]:          0 :                 if (ret)
     553                 :          0 :                         goto out;
     554                 :            :         }
     555                 :            : 
     556                 :          0 : success:
     557                 :            :         /*
     558                 :            :          * Keep track of amount of locked VM.
     559                 :            :          */
     560                 :          0 :         nr_pages = (end - start) >> PAGE_SHIFT;
     561         [ #  # ]:          0 :         if (!lock)
     562                 :          0 :                 nr_pages = -nr_pages;
     563         [ #  # ]:          0 :         else if (old_flags & VM_LOCKED)
     564                 :          0 :                 nr_pages = 0;
     565                 :          0 :         mm->locked_vm += nr_pages;
     566                 :            : 
     567                 :            :         /*
     568                 :            :          * vm_flags is protected by the mmap_sem held in write mode.
     569                 :            :          * It's okay if try_to_unmap_one unmaps a page just after we
     570                 :            :          * set VM_LOCKED, populate_vma_page_range will bring it back.
     571                 :            :          */
     572                 :            : 
     573         [ #  # ]:          0 :         if (lock)
     574                 :          0 :                 vma->vm_flags = newflags;
     575                 :            :         else
     576                 :          0 :                 munlock_vma_pages_range(vma, start, end);
     577                 :            : 
     578                 :          0 : out:
     579                 :          0 :         *prev = vma;
     580                 :          0 :         return ret;
     581                 :            : }
     582                 :            : 
     583                 :          0 : static int apply_vma_lock_flags(unsigned long start, size_t len,
     584                 :            :                                 vm_flags_t flags)
     585                 :            : {
     586                 :          0 :         unsigned long nstart, end, tmp;
     587                 :          0 :         struct vm_area_struct * vma, * prev;
     588                 :          0 :         int error;
     589                 :            : 
     590                 :          0 :         VM_BUG_ON(offset_in_page(start));
     591                 :          0 :         VM_BUG_ON(len != PAGE_ALIGN(len));
     592                 :          0 :         end = start + len;
     593         [ #  # ]:          0 :         if (end < start)
     594                 :            :                 return -EINVAL;
     595         [ #  # ]:          0 :         if (end == start)
     596                 :            :                 return 0;
     597                 :          0 :         vma = find_vma(current->mm, start);
     598   [ #  #  #  # ]:          0 :         if (!vma || vma->vm_start > start)
     599                 :            :                 return -ENOMEM;
     600                 :            : 
     601                 :          0 :         prev = vma->vm_prev;
     602         [ #  # ]:          0 :         if (start > vma->vm_start)
     603                 :          0 :                 prev = vma;
     604                 :            : 
     605                 :            :         for (nstart = start ; ; ) {
     606                 :          0 :                 vm_flags_t newflags = vma->vm_flags & VM_LOCKED_CLEAR_MASK;
     607                 :            : 
     608                 :          0 :                 newflags |= flags;
     609                 :            : 
     610                 :            :                 /* Here we know that  vma->vm_start <= nstart < vma->vm_end. */
     611                 :          0 :                 tmp = vma->vm_end;
     612                 :          0 :                 if (tmp > end)
     613                 :            :                         tmp = end;
     614                 :          0 :                 error = mlock_fixup(vma, &prev, nstart, tmp, newflags);
     615         [ #  # ]:          0 :                 if (error)
     616                 :            :                         break;
     617                 :          0 :                 nstart = tmp;
     618                 :          0 :                 if (nstart < prev->vm_end)
     619                 :            :                         nstart = prev->vm_end;
     620         [ #  # ]:          0 :                 if (nstart >= end)
     621                 :            :                         break;
     622                 :            : 
     623                 :          0 :                 vma = prev->vm_next;
     624   [ #  #  #  # ]:          0 :                 if (!vma || vma->vm_start != nstart) {
     625                 :            :                         error = -ENOMEM;
     626                 :            :                         break;
     627                 :            :                 }
     628                 :            :         }
     629                 :            :         return error;
     630                 :            : }
     631                 :            : 
     632                 :            : /*
     633                 :            :  * Go through vma areas and sum size of mlocked
     634                 :            :  * vma pages, as return value.
     635                 :            :  * Note deferred memory locking case(mlock2(,,MLOCK_ONFAULT)
     636                 :            :  * is also counted.
     637                 :            :  * Return value: previously mlocked page counts
     638                 :            :  */
     639                 :          0 : static unsigned long count_mm_mlocked_page_nr(struct mm_struct *mm,
     640                 :            :                 unsigned long start, size_t len)
     641                 :            : {
     642                 :          0 :         struct vm_area_struct *vma;
     643                 :          0 :         unsigned long count = 0;
     644                 :            : 
     645         [ #  # ]:          0 :         if (mm == NULL)
     646                 :          0 :                 mm = current->mm;
     647                 :            : 
     648                 :          0 :         vma = find_vma(mm, start);
     649         [ #  # ]:          0 :         if (vma == NULL)
     650                 :          0 :                 vma = mm->mmap;
     651                 :            : 
     652         [ #  # ]:          0 :         for (; vma ; vma = vma->vm_next) {
     653         [ #  # ]:          0 :                 if (start >= vma->vm_end)
     654                 :          0 :                         continue;
     655         [ #  # ]:          0 :                 if (start + len <=  vma->vm_start)
     656                 :            :                         break;
     657         [ #  # ]:          0 :                 if (vma->vm_flags & VM_LOCKED) {
     658         [ #  # ]:          0 :                         if (start > vma->vm_start)
     659                 :          0 :                                 count -= (start - vma->vm_start);
     660         [ #  # ]:          0 :                         if (start + len < vma->vm_end) {
     661                 :          0 :                                 count += start + len - vma->vm_start;
     662                 :          0 :                                 break;
     663                 :            :                         }
     664                 :          0 :                         count += vma->vm_end - vma->vm_start;
     665                 :            :                 }
     666                 :            :         }
     667                 :            : 
     668                 :          0 :         return count >> PAGE_SHIFT;
     669                 :            : }
     670                 :            : 
     671                 :          0 : static __must_check int do_mlock(unsigned long start, size_t len, vm_flags_t flags)
     672                 :            : {
     673                 :          0 :         unsigned long locked;
     674                 :          0 :         unsigned long lock_limit;
     675                 :          0 :         int error = -ENOMEM;
     676                 :            : 
     677                 :          0 :         start = untagged_addr(start);
     678                 :            : 
     679         [ #  # ]:          0 :         if (!can_do_mlock())
     680                 :            :                 return -EPERM;
     681                 :            : 
     682                 :          0 :         len = PAGE_ALIGN(len + (offset_in_page(start)));
     683                 :          0 :         start &= PAGE_MASK;
     684                 :            : 
     685                 :          0 :         lock_limit = rlimit(RLIMIT_MEMLOCK);
     686                 :          0 :         lock_limit >>= PAGE_SHIFT;
     687                 :          0 :         locked = len >> PAGE_SHIFT;
     688                 :            : 
     689         [ #  # ]:          0 :         if (down_write_killable(&current->mm->mmap_sem))
     690                 :            :                 return -EINTR;
     691                 :            : 
     692         [ #  # ]:          0 :         locked += current->mm->locked_vm;
     693   [ #  #  #  # ]:          0 :         if ((locked > lock_limit) && (!capable(CAP_IPC_LOCK))) {
     694                 :            :                 /*
     695                 :            :                  * It is possible that the regions requested intersect with
     696                 :            :                  * previously mlocked areas, that part area in "mm->locked_vm"
     697                 :            :                  * should not be counted to new mlock increment count. So check
     698                 :            :                  * and adjust locked count if necessary.
     699                 :            :                  */
     700                 :          0 :                 locked -= count_mm_mlocked_page_nr(current->mm,
     701                 :            :                                 start, len);
     702                 :            :         }
     703                 :            : 
     704                 :            :         /* check against resource limits */
     705   [ #  #  #  # ]:          0 :         if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
     706                 :          0 :                 error = apply_vma_lock_flags(start, len, flags);
     707                 :            : 
     708                 :          0 :         up_write(&current->mm->mmap_sem);
     709         [ #  # ]:          0 :         if (error)
     710                 :            :                 return error;
     711                 :            : 
     712                 :          0 :         error = __mm_populate(start, len, 0);
     713         [ #  # ]:          0 :         if (error)
     714         [ #  # ]:          0 :                 return __mlock_posix_error_return(error);
     715                 :            :         return 0;
     716                 :            : }
     717                 :            : 
     718                 :          0 : SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
     719                 :            : {
     720                 :          0 :         return do_mlock(start, len, VM_LOCKED);
     721                 :            : }
     722                 :            : 
     723                 :          0 : SYSCALL_DEFINE3(mlock2, unsigned long, start, size_t, len, int, flags)
     724                 :            : {
     725                 :          0 :         vm_flags_t vm_flags = VM_LOCKED;
     726                 :            : 
     727   [ #  #  #  # ]:          0 :         if (flags & ~MLOCK_ONFAULT)
     728                 :            :                 return -EINVAL;
     729                 :            : 
     730   [ #  #  #  # ]:          0 :         if (flags & MLOCK_ONFAULT)
     731                 :          0 :                 vm_flags |= VM_LOCKONFAULT;
     732                 :            : 
     733                 :          0 :         return do_mlock(start, len, vm_flags);
     734                 :            : }
     735                 :            : 
     736                 :          0 : SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len)
     737                 :            : {
     738                 :          0 :         int ret;
     739                 :            : 
     740                 :          0 :         start = untagged_addr(start);
     741                 :            : 
     742                 :          0 :         len = PAGE_ALIGN(len + (offset_in_page(start)));
     743                 :          0 :         start &= PAGE_MASK;
     744                 :            : 
     745         [ #  # ]:          0 :         if (down_write_killable(&current->mm->mmap_sem))
     746                 :            :                 return -EINTR;
     747                 :          0 :         ret = apply_vma_lock_flags(start, len, 0);
     748                 :          0 :         up_write(&current->mm->mmap_sem);
     749                 :            : 
     750                 :          0 :         return ret;
     751                 :            : }
     752                 :            : 
     753                 :            : /*
     754                 :            :  * Take the MCL_* flags passed into mlockall (or 0 if called from munlockall)
     755                 :            :  * and translate into the appropriate modifications to mm->def_flags and/or the
     756                 :            :  * flags for all current VMAs.
     757                 :            :  *
     758                 :            :  * There are a couple of subtleties with this.  If mlockall() is called multiple
     759                 :            :  * times with different flags, the values do not necessarily stack.  If mlockall
     760                 :            :  * is called once including the MCL_FUTURE flag and then a second time without
     761                 :            :  * it, VM_LOCKED and VM_LOCKONFAULT will be cleared from mm->def_flags.
     762                 :            :  */
     763                 :          0 : static int apply_mlockall_flags(int flags)
     764                 :            : {
     765                 :          0 :         struct vm_area_struct * vma, * prev = NULL;
     766                 :          0 :         vm_flags_t to_add = 0;
     767                 :            : 
     768         [ #  # ]:          0 :         current->mm->def_flags &= VM_LOCKED_CLEAR_MASK;
     769         [ #  # ]:          0 :         if (flags & MCL_FUTURE) {
     770         [ #  # ]:          0 :                 current->mm->def_flags |= VM_LOCKED;
     771                 :            : 
     772         [ #  # ]:          0 :                 if (flags & MCL_ONFAULT)
     773                 :          0 :                         current->mm->def_flags |= VM_LOCKONFAULT;
     774                 :            : 
     775         [ #  # ]:          0 :                 if (!(flags & MCL_CURRENT))
     776                 :          0 :                         goto out;
     777                 :            :         }
     778                 :            : 
     779         [ #  # ]:          0 :         if (flags & MCL_CURRENT) {
     780                 :          0 :                 to_add |= VM_LOCKED;
     781         [ #  # ]:          0 :                 if (flags & MCL_ONFAULT)
     782                 :          0 :                         to_add |= VM_LOCKONFAULT;
     783                 :            :         }
     784                 :            : 
     785         [ #  # ]:          0 :         for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
     786                 :          0 :                 vm_flags_t newflags;
     787                 :            : 
     788                 :          0 :                 newflags = vma->vm_flags & VM_LOCKED_CLEAR_MASK;
     789                 :          0 :                 newflags |= to_add;
     790                 :            : 
     791                 :            :                 /* Ignore errors */
     792                 :          0 :                 mlock_fixup(vma, &prev, vma->vm_start, vma->vm_end, newflags);
     793                 :          0 :                 cond_resched();
     794                 :            :         }
     795                 :          0 : out:
     796                 :          0 :         return 0;
     797                 :            : }
     798                 :            : 
     799                 :          0 : SYSCALL_DEFINE1(mlockall, int, flags)
     800                 :            : {
     801                 :          0 :         unsigned long lock_limit;
     802                 :          0 :         int ret;
     803                 :            : 
     804   [ #  #  #  #  :          0 :         if (!flags || (flags & ~(MCL_CURRENT | MCL_FUTURE | MCL_ONFAULT)) ||
                   #  # ]
     805                 :            :             flags == MCL_ONFAULT)
     806                 :            :                 return -EINVAL;
     807                 :            : 
     808         [ #  # ]:          0 :         if (!can_do_mlock())
     809                 :            :                 return -EPERM;
     810                 :            : 
     811                 :          0 :         lock_limit = rlimit(RLIMIT_MEMLOCK);
     812                 :          0 :         lock_limit >>= PAGE_SHIFT;
     813                 :            : 
     814         [ #  # ]:          0 :         if (down_write_killable(&current->mm->mmap_sem))
     815                 :            :                 return -EINTR;
     816                 :            : 
     817                 :          0 :         ret = -ENOMEM;
     818   [ #  #  #  #  :          0 :         if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
                   #  # ]
     819                 :          0 :             capable(CAP_IPC_LOCK))
     820                 :          0 :                 ret = apply_mlockall_flags(flags);
     821                 :          0 :         up_write(&current->mm->mmap_sem);
     822   [ #  #  #  # ]:          0 :         if (!ret && (flags & MCL_CURRENT))
     823   [ #  #  #  # ]:          0 :                 mm_populate(0, TASK_SIZE);
     824                 :            : 
     825                 :          0 :         return ret;
     826                 :            : }
     827                 :            : 
     828                 :          0 : SYSCALL_DEFINE0(munlockall)
     829                 :            : {
     830                 :          0 :         int ret;
     831                 :            : 
     832         [ #  # ]:          0 :         if (down_write_killable(&current->mm->mmap_sem))
     833                 :            :                 return -EINTR;
     834                 :          0 :         ret = apply_mlockall_flags(0);
     835                 :          0 :         up_write(&current->mm->mmap_sem);
     836                 :          0 :         return ret;
     837                 :            : }
     838                 :            : 
     839                 :            : /*
     840                 :            :  * Objects with different lifetime than processes (SHM_LOCK and SHM_HUGETLB
     841                 :            :  * shm segments) get accounted against the user_struct instead.
     842                 :            :  */
     843                 :            : static DEFINE_SPINLOCK(shmlock_user_lock);
     844                 :            : 
     845                 :          0 : int user_shm_lock(size_t size, struct user_struct *user)
     846                 :            : {
     847                 :          0 :         unsigned long lock_limit, locked;
     848                 :          0 :         int allowed = 0;
     849                 :            : 
     850                 :          0 :         locked = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
     851         [ #  # ]:          0 :         lock_limit = rlimit(RLIMIT_MEMLOCK);
     852         [ #  # ]:          0 :         if (lock_limit == RLIM_INFINITY)
     853                 :          0 :                 allowed = 1;
     854                 :          0 :         lock_limit >>= PAGE_SHIFT;
     855                 :          0 :         spin_lock(&shmlock_user_lock);
     856         [ #  # ]:          0 :         if (!allowed &&
     857   [ #  #  #  # ]:          0 :             locked + user->locked_shm > lock_limit && !capable(CAP_IPC_LOCK))
     858                 :          0 :                 goto out;
     859                 :          0 :         get_uid(user);
     860                 :          0 :         user->locked_shm += locked;
     861                 :          0 :         allowed = 1;
     862                 :          0 : out:
     863                 :          0 :         spin_unlock(&shmlock_user_lock);
     864                 :          0 :         return allowed;
     865                 :            : }
     866                 :            : 
     867                 :          0 : void user_shm_unlock(size_t size, struct user_struct *user)
     868                 :            : {
     869                 :          0 :         spin_lock(&shmlock_user_lock);
     870                 :          0 :         user->locked_shm -= (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
     871                 :          0 :         spin_unlock(&shmlock_user_lock);
     872                 :          0 :         free_uid(user);
     873                 :          0 : }

Generated by: LCOV version 1.14