LCOV - code coverage report
Current view: top level - mm - internal.h (source / functions) Hit Total Coverage
Test: combined.info Lines: 21 51 41.2 %
Date: 2022-03-28 16:04:14 Functions: 0 1 0.0 %
Branches: 38 82 46.3 %

           Branch data     Line data    Source code
       1                 :            : /* SPDX-License-Identifier: GPL-2.0-or-later */
       2                 :            : /* internal.h: mm/ internal definitions
       3                 :            :  *
       4                 :            :  * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
       5                 :            :  * Written by David Howells (dhowells@redhat.com)
       6                 :            :  */
       7                 :            : #ifndef __MM_INTERNAL_H
       8                 :            : #define __MM_INTERNAL_H
       9                 :            : 
      10                 :            : #include <linux/fs.h>
      11                 :            : #include <linux/mm.h>
      12                 :            : #include <linux/pagemap.h>
      13                 :            : #include <linux/tracepoint-defs.h>
      14                 :            : 
      15                 :            : /*
      16                 :            :  * The set of flags that only affect watermark checking and reclaim
      17                 :            :  * behaviour. This is used by the MM to obey the caller constraints
      18                 :            :  * about IO, FS and watermark checking while ignoring placement
      19                 :            :  * hints such as HIGHMEM usage.
      20                 :            :  */
      21                 :            : #define GFP_RECLAIM_MASK (__GFP_RECLAIM|__GFP_HIGH|__GFP_IO|__GFP_FS|\
      22                 :            :                         __GFP_NOWARN|__GFP_RETRY_MAYFAIL|__GFP_NOFAIL|\
      23                 :            :                         __GFP_NORETRY|__GFP_MEMALLOC|__GFP_NOMEMALLOC|\
      24                 :            :                         __GFP_ATOMIC)
      25                 :            : 
      26                 :            : /* The GFP flags allowed during early boot */
      27                 :            : #define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_RECLAIM|__GFP_IO|__GFP_FS))
      28                 :            : 
      29                 :            : /* Control allocation cpuset and node placement constraints */
      30                 :            : #define GFP_CONSTRAINT_MASK (__GFP_HARDWALL|__GFP_THISNODE)
      31                 :            : 
      32                 :            : /* Do not use these with a slab allocator */
      33                 :            : #define GFP_SLAB_BUG_MASK (__GFP_DMA32|__GFP_HIGHMEM|~__GFP_BITS_MASK)
      34                 :            : 
      35                 :            : void page_writeback_init(void);
      36                 :            : 
      37                 :            : vm_fault_t do_swap_page(struct vm_fault *vmf);
      38                 :            : 
      39                 :            : void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
      40                 :            :                 unsigned long floor, unsigned long ceiling);
      41                 :            : 
      42                 :          0 : static inline bool can_madv_lru_vma(struct vm_area_struct *vma)
      43                 :            : {
      44   [ #  #  #  #  :          0 :         return !(vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP));
                   #  # ]
      45                 :            : }
      46                 :            : 
      47                 :            : void unmap_page_range(struct mmu_gather *tlb,
      48                 :            :                              struct vm_area_struct *vma,
      49                 :            :                              unsigned long addr, unsigned long end,
      50                 :            :                              struct zap_details *details);
      51                 :            : 
      52                 :            : extern unsigned int __do_page_cache_readahead(struct address_space *mapping,
      53                 :            :                 struct file *filp, pgoff_t offset, unsigned long nr_to_read,
      54                 :            :                 unsigned long lookahead_size);
      55                 :            : 
      56                 :            : /*
      57                 :            :  * Submit IO for the read-ahead request in file_ra_state.
      58                 :            :  */
      59                 :      10218 : static inline unsigned long ra_submit(struct file_ra_state *ra,
      60                 :            :                 struct address_space *mapping, struct file *filp)
      61                 :            : {
      62                 :      10218 :         return __do_page_cache_readahead(mapping, filp,
      63                 :       6812 :                                         ra->start, ra->size, ra->async_size);
      64                 :            : }
      65                 :            : 
      66                 :            : /*
      67                 :            :  * Turn a non-refcounted page (->_refcount == 0) into refcounted with
      68                 :            :  * a count of one.
      69                 :            :  */
      70                 :    1635548 : static inline void set_page_refcounted(struct page *page)
      71                 :            : {
      72                 :    1635548 :         VM_BUG_ON_PAGE(PageTail(page), page);
      73                 :    1635548 :         VM_BUG_ON_PAGE(page_ref_count(page), page);
      74                 :    1635548 :         set_page_count(page, 1);
      75                 :            : }
      76                 :            : 
      77                 :            : extern unsigned long highest_memmap_pfn;
      78                 :            : 
      79                 :            : /*
      80                 :            :  * Maximum number of reclaim retries without progress before the OOM
      81                 :            :  * killer is consider the only way forward.
      82                 :            :  */
      83                 :            : #define MAX_RECLAIM_RETRIES 16
      84                 :            : 
      85                 :            : /*
      86                 :            :  * in mm/vmscan.c:
      87                 :            :  */
      88                 :            : extern int isolate_lru_page(struct page *page);
      89                 :            : extern void putback_lru_page(struct page *page);
      90                 :            : 
      91                 :            : /*
      92                 :            :  * in mm/rmap.c:
      93                 :            :  */
      94                 :            : extern pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address);
      95                 :            : 
      96                 :            : /*
      97                 :            :  * in mm/page_alloc.c
      98                 :            :  */
      99                 :            : 
     100                 :            : /*
     101                 :            :  * Structure for holding the mostly immutable allocation parameters passed
     102                 :            :  * between functions involved in allocations, including the alloc_pages*
     103                 :            :  * family of functions.
     104                 :            :  *
     105                 :            :  * nodemask, migratetype and high_zoneidx are initialized only once in
     106                 :            :  * __alloc_pages_nodemask() and then never change.
     107                 :            :  *
     108                 :            :  * zonelist, preferred_zone and classzone_idx are set first in
     109                 :            :  * __alloc_pages_nodemask() for the fast path, and might be later changed
     110                 :            :  * in __alloc_pages_slowpath(). All other functions pass the whole strucure
     111                 :            :  * by a const pointer.
     112                 :            :  */
     113                 :            : struct alloc_context {
     114                 :            :         struct zonelist *zonelist;
     115                 :            :         nodemask_t *nodemask;
     116                 :            :         struct zoneref *preferred_zoneref;
     117                 :            :         int migratetype;
     118                 :            :         enum zone_type high_zoneidx;
     119                 :            :         bool spread_dirty_pages;
     120                 :            : };
     121                 :            : 
     122                 :            : #define ac_classzone_idx(ac) zonelist_zone_idx(ac->preferred_zoneref)
     123                 :            : 
     124                 :            : /*
     125                 :            :  * Locate the struct page for both the matching buddy in our
     126                 :            :  * pair (buddy1) and the combined O(n+1) page they form (page).
     127                 :            :  *
     128                 :            :  * 1) Any buddy B1 will have an order O twin B2 which satisfies
     129                 :            :  * the following equation:
     130                 :            :  *     B2 = B1 ^ (1 << O)
     131                 :            :  * For example, if the starting buddy (buddy2) is #8 its order
     132                 :            :  * 1 buddy is #10:
     133                 :            :  *     B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10
     134                 :            :  *
     135                 :            :  * 2) Any buddy B will have an order O+1 parent P which
     136                 :            :  * satisfies the following equation:
     137                 :            :  *     P = B & ~(1 << O)
     138                 :            :  *
     139                 :            :  * Assumption: *_mem_map is contiguous at least up to MAX_ORDER
     140                 :            :  */
     141                 :            : static inline unsigned long
     142                 :    1236094 : __find_buddy_pfn(unsigned long page_pfn, unsigned int order)
     143                 :            : {
     144   [ +  +  +  + ]:    1236094 :         return page_pfn ^ (1 << order);
     145                 :            : }
     146                 :            : 
     147                 :            : extern struct page *__pageblock_pfn_to_page(unsigned long start_pfn,
     148                 :            :                                 unsigned long end_pfn, struct zone *zone);
     149                 :            : 
     150                 :          0 : static inline struct page *pageblock_pfn_to_page(unsigned long start_pfn,
     151                 :            :                                 unsigned long end_pfn, struct zone *zone)
     152                 :            : {
     153   [ #  #  #  #  :          0 :         if (zone->contiguous)
             #  #  #  # ]
     154                 :          0 :                 return pfn_to_page(start_pfn);
     155                 :            : 
     156                 :          0 :         return __pageblock_pfn_to_page(start_pfn, end_pfn, zone);
     157                 :            : }
     158                 :            : 
     159                 :            : extern int __isolate_free_page(struct page *page, unsigned int order);
     160                 :            : extern void memblock_free_pages(struct page *page, unsigned long pfn,
     161                 :            :                                         unsigned int order);
     162                 :            : extern void __free_pages_core(struct page *page, unsigned int order);
     163                 :            : extern void prep_compound_page(struct page *page, unsigned int order);
     164                 :            : extern void post_alloc_hook(struct page *page, unsigned int order,
     165                 :            :                                         gfp_t gfp_flags);
     166                 :            : extern int user_min_free_kbytes;
     167                 :            : 
     168                 :            : extern void zone_pcp_update(struct zone *zone);
     169                 :            : extern void zone_pcp_reset(struct zone *zone);
     170                 :            : 
     171                 :            : #if defined CONFIG_COMPACTION || defined CONFIG_CMA
     172                 :            : 
     173                 :            : /*
     174                 :            :  * in mm/compaction.c
     175                 :            :  */
     176                 :            : /*
     177                 :            :  * compact_control is used to track pages being migrated and the free pages
     178                 :            :  * they are being migrated to during memory compaction. The free_pfn starts
     179                 :            :  * at the end of a zone and migrate_pfn begins at the start. Movable pages
     180                 :            :  * are moved to the end of a zone during a compaction run and the run
     181                 :            :  * completes when free_pfn <= migrate_pfn
     182                 :            :  */
     183                 :            : struct compact_control {
     184                 :            :         struct list_head freepages;     /* List of free pages to migrate to */
     185                 :            :         struct list_head migratepages;  /* List of pages being migrated */
     186                 :            :         unsigned int nr_freepages;      /* Number of isolated free pages */
     187                 :            :         unsigned int nr_migratepages;   /* Number of pages to migrate */
     188                 :            :         unsigned long free_pfn;         /* isolate_freepages search base */
     189                 :            :         unsigned long migrate_pfn;      /* isolate_migratepages search base */
     190                 :            :         unsigned long fast_start_pfn;   /* a pfn to start linear scan from */
     191                 :            :         struct zone *zone;
     192                 :            :         unsigned long total_migrate_scanned;
     193                 :            :         unsigned long total_free_scanned;
     194                 :            :         unsigned short fast_search_fail;/* failures to use free list searches */
     195                 :            :         short search_order;             /* order to start a fast search at */
     196                 :            :         const gfp_t gfp_mask;           /* gfp mask of a direct compactor */
     197                 :            :         int order;                      /* order a direct compactor needs */
     198                 :            :         int migratetype;                /* migratetype of direct compactor */
     199                 :            :         const unsigned int alloc_flags; /* alloc flags of a direct compactor */
     200                 :            :         const int classzone_idx;        /* zone index of a direct compactor */
     201                 :            :         enum migrate_mode mode;         /* Async or sync migration mode */
     202                 :            :         bool ignore_skip_hint;          /* Scan blocks even if marked skip */
     203                 :            :         bool no_set_skip_hint;          /* Don't mark blocks for skipping */
     204                 :            :         bool ignore_block_suitable;     /* Scan blocks considered unsuitable */
     205                 :            :         bool direct_compaction;         /* False from kcompactd or /proc/... */
     206                 :            :         bool whole_zone;                /* Whole zone should/has been scanned */
     207                 :            :         bool contended;                 /* Signal lock or sched contention */
     208                 :            :         bool rescan;                    /* Rescanning the same pageblock */
     209                 :            : };
     210                 :            : 
     211                 :            : /*
     212                 :            :  * Used in direct compaction when a page should be taken from the freelists
     213                 :            :  * immediately when one is created during the free path.
     214                 :            :  */
     215                 :            : struct capture_control {
     216                 :            :         struct compact_control *cc;
     217                 :            :         struct page *page;
     218                 :            : };
     219                 :            : 
     220                 :            : unsigned long
     221                 :            : isolate_freepages_range(struct compact_control *cc,
     222                 :            :                         unsigned long start_pfn, unsigned long end_pfn);
     223                 :            : unsigned long
     224                 :            : isolate_migratepages_range(struct compact_control *cc,
     225                 :            :                            unsigned long low_pfn, unsigned long end_pfn);
     226                 :            : int find_suitable_fallback(struct free_area *area, unsigned int order,
     227                 :            :                         int migratetype, bool only_stealable, bool *can_steal);
     228                 :            : 
     229                 :            : #endif
     230                 :            : 
     231                 :            : /*
     232                 :            :  * This function returns the order of a free page in the buddy system. In
     233                 :            :  * general, page_zone(page)->lock must be held by the caller to prevent the
     234                 :            :  * page from being allocated in parallel and returning garbage as the order.
     235                 :            :  * If a caller does not hold page_zone(page)->lock, it must guarantee that the
     236                 :            :  * page cannot be allocated or merged in parallel. Alternatively, it must
     237                 :            :  * handle invalid values gracefully, and use page_order_unsafe() below.
     238                 :            :  */
     239                 :     354812 : static inline unsigned int page_order(struct page *page)
     240                 :            : {
     241                 :            :         /* PageBuddy() must be checked by the caller */
     242   [ -  -  -  +  :     354812 :         return page_private(page);
             +  +  +  + ]
     243                 :            : }
     244                 :            : 
     245                 :            : /*
     246                 :            :  * Like page_order(), but for callers who cannot afford to hold the zone lock.
     247                 :            :  * PageBuddy() should be checked first by the caller to minimize race window,
     248                 :            :  * and invalid values must be handled gracefully.
     249                 :            :  *
     250                 :            :  * READ_ONCE is used so that if the caller assigns the result into a local
     251                 :            :  * variable and e.g. tests it for valid range before using, the compiler cannot
     252                 :            :  * decide to remove the variable and inline the page_private(page) multiple
     253                 :            :  * times, potentially observing different values in the tests and the actual
     254                 :            :  * use of the result.
     255                 :            :  */
     256                 :            : #define page_order_unsafe(page)         READ_ONCE(page_private(page))
     257                 :            : 
     258                 :    1032824 : static inline bool is_cow_mapping(vm_flags_t flags)
     259                 :            : {
     260   [ -  -  -  +  :    1032824 :         return (flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
          +  +  -  -  +  
                      + ]
     261                 :            : }
     262                 :            : 
     263                 :            : /*
     264                 :            :  * These three helpers classifies VMAs for virtual memory accounting.
     265                 :            :  */
     266                 :            : 
     267                 :            : /*
     268                 :            :  * Executable code area - executable, not writable, not stack
     269                 :            :  */
     270                 :     707369 : static inline bool is_exec_mapping(vm_flags_t flags)
     271                 :            : {
     272   [ +  +  +  +  :     707369 :         return (flags & (VM_EXEC | VM_WRITE | VM_STACK)) == VM_EXEC;
          +  +  -  +  +  
                      + ]
     273                 :            : }
     274                 :            : 
     275                 :            : /*
     276                 :            :  * Stack area - atomatically grows in one direction
     277                 :            :  *
     278                 :            :  * VM_GROWSUP / VM_GROWSDOWN VMAs are always private anonymous:
     279                 :            :  * do_mmap() forbids all other combinations.
     280                 :            :  */
     281                 :     626353 : static inline bool is_stack_mapping(vm_flags_t flags)
     282                 :            : {
     283   [ -  +  -  +  :     626353 :         return (flags & VM_STACK) == VM_STACK;
          -  +  +  -  -  
                      + ]
     284                 :            : }
     285                 :            : 
     286                 :            : /*
     287                 :            :  * Data area - private, writable, not stack
     288                 :            :  */
     289                 :    1014410 : static inline bool is_data_mapping(vm_flags_t flags)
     290                 :            : {
     291   [ -  +  +  +  :    1014410 :         return (flags & (VM_WRITE | VM_SHARED | VM_STACK)) == VM_WRITE;
          +  +  +  +  -  
                -  +  + ]
     292                 :            : }
     293                 :            : 
     294                 :            : /* mm/util.c */
     295                 :            : void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
     296                 :            :                 struct vm_area_struct *prev);
     297                 :            : void __vma_unlink_list(struct mm_struct *mm, struct vm_area_struct *vma);
     298                 :            : 
     299                 :            : #ifdef CONFIG_MMU
     300                 :            : extern long populate_vma_page_range(struct vm_area_struct *vma,
     301                 :            :                 unsigned long start, unsigned long end, int *nonblocking);
     302                 :            : extern void munlock_vma_pages_range(struct vm_area_struct *vma,
     303                 :            :                         unsigned long start, unsigned long end);
     304                 :          0 : static inline void munlock_vma_pages_all(struct vm_area_struct *vma)
     305                 :            : {
     306                 :          0 :         munlock_vma_pages_range(vma, vma->vm_start, vma->vm_end);
     307                 :          0 : }
     308                 :            : 
     309                 :            : /*
     310                 :            :  * must be called with vma's mmap_sem held for read or write, and page locked.
     311                 :            :  */
     312                 :            : extern void mlock_vma_page(struct page *page);
     313                 :            : extern unsigned int munlock_vma_page(struct page *page);
     314                 :            : 
     315                 :            : /*
     316                 :            :  * Clear the page's PageMlocked().  This can be useful in a situation where
     317                 :            :  * we want to unconditionally remove a page from the pagecache -- e.g.,
     318                 :            :  * on truncation or freeing.
     319                 :            :  *
     320                 :            :  * It is legal to call this function for any page, mlocked or not.
     321                 :            :  * If called for a page that is still mapped by mlocked vmas, all we do
     322                 :            :  * is revert to lazy LRU behaviour -- semantics are not broken.
     323                 :            :  */
     324                 :            : extern void clear_page_mlock(struct page *page);
     325                 :            : 
     326                 :            : /*
     327                 :            :  * mlock_migrate_page - called only from migrate_misplaced_transhuge_page()
     328                 :            :  * (because that does not go through the full procedure of migration ptes):
     329                 :            :  * to migrate the Mlocked page flag; update statistics.
     330                 :            :  */
     331                 :            : static inline void mlock_migrate_page(struct page *newpage, struct page *page)
     332                 :            : {
     333                 :            :         if (TestClearPageMlocked(page)) {
     334                 :            :                 int nr_pages = hpage_nr_pages(page);
     335                 :            : 
     336                 :            :                 /* Holding pmd lock, no change in irq context: __mod is safe */
     337                 :            :                 __mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages);
     338                 :            :                 SetPageMlocked(newpage);
     339                 :            :                 __mod_zone_page_state(page_zone(newpage), NR_MLOCK, nr_pages);
     340                 :            :         }
     341                 :            : }
     342                 :            : 
     343                 :            : extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma);
     344                 :            : 
     345                 :            : /*
     346                 :            :  * At what user virtual address is page expected in @vma?
     347                 :            :  */
     348                 :            : static inline unsigned long
     349                 :          0 : __vma_address(struct page *page, struct vm_area_struct *vma)
     350                 :            : {
     351                 :          0 :         pgoff_t pgoff = page_to_pgoff(page);
     352         [ #  # ]:          0 :         return vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
     353                 :            : }
     354                 :            : 
     355                 :            : static inline unsigned long
     356                 :          0 : vma_address(struct page *page, struct vm_area_struct *vma)
     357                 :            : {
     358                 :          0 :         unsigned long start, end;
     359                 :            : 
     360                 :          0 :         start = __vma_address(page, vma);
     361                 :          0 :         end = start + PAGE_SIZE * (hpage_nr_pages(page) - 1);
     362                 :            : 
     363                 :            :         /* page should be within @vma mapping range */
     364                 :          0 :         VM_BUG_ON_VMA(end < vma->vm_start || start >= vma->vm_end, vma);
     365                 :            : 
     366                 :          0 :         return max(start, vma->vm_start);
     367                 :            : }
     368                 :            : 
     369                 :            : static inline struct file *maybe_unlock_mmap_for_io(struct vm_fault *vmf,
     370                 :            :                                                     struct file *fpin)
     371                 :            : {
     372                 :            :         int flags = vmf->flags;
     373                 :            : 
     374                 :            :         if (fpin)
     375                 :            :                 return fpin;
     376                 :            : 
     377                 :            :         /*
     378                 :            :          * FAULT_FLAG_RETRY_NOWAIT means we don't want to wait on page locks or
     379                 :            :          * anything, so we only pin the file and drop the mmap_sem if only
     380                 :            :          * FAULT_FLAG_ALLOW_RETRY is set.
     381                 :            :          */
     382                 :            :         if ((flags & (FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT)) ==
     383                 :            :             FAULT_FLAG_ALLOW_RETRY) {
     384                 :            :                 fpin = get_file(vmf->vma->vm_file);
     385                 :            :                 up_read(&vmf->vma->vm_mm->mmap_sem);
     386                 :            :         }
     387                 :            :         return fpin;
     388                 :            : }
     389                 :            : 
     390                 :            : #else /* !CONFIG_MMU */
     391                 :            : static inline void clear_page_mlock(struct page *page) { }
     392                 :            : static inline void mlock_vma_page(struct page *page) { }
     393                 :            : static inline void mlock_migrate_page(struct page *new, struct page *old) { }
     394                 :            : 
     395                 :            : #endif /* !CONFIG_MMU */
     396                 :            : 
     397                 :            : /*
     398                 :            :  * Return the mem_map entry representing the 'offset' subpage within
     399                 :            :  * the maximally aligned gigantic page 'base'.  Handle any discontiguity
     400                 :            :  * in the mem_map at MAX_ORDER_NR_PAGES boundaries.
     401                 :            :  */
     402                 :          0 : static inline struct page *mem_map_offset(struct page *base, int offset)
     403                 :            : {
     404         [ #  # ]:          0 :         if (unlikely(offset >= MAX_ORDER_NR_PAGES))
     405                 :          0 :                 return nth_page(base, offset);
     406                 :          0 :         return base + offset;
     407                 :            : }
     408                 :            : 
     409                 :            : /*
     410                 :            :  * Iterator over all subpages within the maximally aligned gigantic
     411                 :            :  * page 'base'.  Handle any discontiguity in the mem_map.
     412                 :            :  */
     413                 :          0 : static inline struct page *mem_map_next(struct page *iter,
     414                 :            :                                                 struct page *base, int offset)
     415                 :            : {
     416         [ #  # ]:          0 :         if (unlikely((offset & (MAX_ORDER_NR_PAGES - 1)) == 0)) {
     417                 :          0 :                 unsigned long pfn = page_to_pfn(base) + offset;
     418         [ #  # ]:          0 :                 if (!pfn_valid(pfn))
     419                 :            :                         return NULL;
     420                 :          0 :                 return pfn_to_page(pfn);
     421                 :            :         }
     422                 :          0 :         return iter + 1;
     423                 :            : }
     424                 :            : 
     425                 :            : /* Memory initialisation debug and verification */
     426                 :            : enum mminit_level {
     427                 :            :         MMINIT_WARNING,
     428                 :            :         MMINIT_VERIFY,
     429                 :            :         MMINIT_TRACE
     430                 :            : };
     431                 :            : 
     432                 :            : #ifdef CONFIG_DEBUG_MEMORY_INIT
     433                 :            : 
     434                 :            : extern int mminit_loglevel;
     435                 :            : 
     436                 :            : #define mminit_dprintk(level, prefix, fmt, arg...) \
     437                 :            : do { \
     438                 :            :         if (level < mminit_loglevel) { \
     439                 :            :                 if (level <= MMINIT_WARNING) \
     440                 :            :                         pr_warn("mminit::" prefix " " fmt, ##arg);  \
     441                 :            :                 else \
     442                 :            :                         printk(KERN_DEBUG "mminit::" prefix " " fmt, ##arg); \
     443                 :            :         } \
     444                 :            : } while (0)
     445                 :            : 
     446                 :            : extern void mminit_verify_pageflags_layout(void);
     447                 :            : extern void mminit_verify_zonelist(void);
     448                 :            : #else
     449                 :            : 
     450                 :            : static inline void mminit_dprintk(enum mminit_level level,
     451                 :            :                                 const char *prefix, const char *fmt, ...)
     452                 :            : {
     453                 :            : }
     454                 :            : 
     455                 :            : static inline void mminit_verify_pageflags_layout(void)
     456                 :            : {
     457                 :            : }
     458                 :            : 
     459                 :            : static inline void mminit_verify_zonelist(void)
     460                 :            : {
     461                 :            : }
     462                 :            : #endif /* CONFIG_DEBUG_MEMORY_INIT */
     463                 :            : 
     464                 :            : /* mminit_validate_memmodel_limits is independent of CONFIG_DEBUG_MEMORY_INIT */
     465                 :            : #if defined(CONFIG_SPARSEMEM)
     466                 :            : extern void mminit_validate_memmodel_limits(unsigned long *start_pfn,
     467                 :            :                                 unsigned long *end_pfn);
     468                 :            : #else
     469                 :            : static inline void mminit_validate_memmodel_limits(unsigned long *start_pfn,
     470                 :            :                                 unsigned long *end_pfn)
     471                 :            : {
     472                 :            : }
     473                 :            : #endif /* CONFIG_SPARSEMEM */
     474                 :            : 
     475                 :            : #define NODE_RECLAIM_NOSCAN     -2
     476                 :            : #define NODE_RECLAIM_FULL       -1
     477                 :            : #define NODE_RECLAIM_SOME       0
     478                 :            : #define NODE_RECLAIM_SUCCESS    1
     479                 :            : 
     480                 :            : #ifdef CONFIG_NUMA
     481                 :            : extern int node_reclaim(struct pglist_data *, gfp_t, unsigned int);
     482                 :            : #else
     483                 :            : static inline int node_reclaim(struct pglist_data *pgdat, gfp_t mask,
     484                 :            :                                 unsigned int order)
     485                 :            : {
     486                 :            :         return NODE_RECLAIM_NOSCAN;
     487                 :            : }
     488                 :            : #endif
     489                 :            : 
     490                 :            : extern int hwpoison_filter(struct page *p);
     491                 :            : 
     492                 :            : extern u32 hwpoison_filter_dev_major;
     493                 :            : extern u32 hwpoison_filter_dev_minor;
     494                 :            : extern u64 hwpoison_filter_flags_mask;
     495                 :            : extern u64 hwpoison_filter_flags_value;
     496                 :            : extern u64 hwpoison_filter_memcg;
     497                 :            : extern u32 hwpoison_filter_enable;
     498                 :            : 
     499                 :            : extern unsigned long  __must_check vm_mmap_pgoff(struct file *, unsigned long,
     500                 :            :         unsigned long, unsigned long,
     501                 :            :         unsigned long, unsigned long);
     502                 :            : 
     503                 :            : extern void set_pageblock_order(void);
     504                 :            : unsigned long reclaim_clean_pages_from_list(struct zone *zone,
     505                 :            :                                             struct list_head *page_list);
     506                 :            : /* The ALLOC_WMARK bits are used as an index to zone->watermark */
     507                 :            : #define ALLOC_WMARK_MIN         WMARK_MIN
     508                 :            : #define ALLOC_WMARK_LOW         WMARK_LOW
     509                 :            : #define ALLOC_WMARK_HIGH        WMARK_HIGH
     510                 :            : #define ALLOC_NO_WATERMARKS     0x04 /* don't check watermarks at all */
     511                 :            : 
     512                 :            : /* Mask to get the watermark bits */
     513                 :            : #define ALLOC_WMARK_MASK        (ALLOC_NO_WATERMARKS-1)
     514                 :            : 
     515                 :            : /*
     516                 :            :  * Only MMU archs have async oom victim reclaim - aka oom_reaper so we
     517                 :            :  * cannot assume a reduced access to memory reserves is sufficient for
     518                 :            :  * !MMU
     519                 :            :  */
     520                 :            : #ifdef CONFIG_MMU
     521                 :            : #define ALLOC_OOM               0x08
     522                 :            : #else
     523                 :            : #define ALLOC_OOM               ALLOC_NO_WATERMARKS
     524                 :            : #endif
     525                 :            : 
     526                 :            : #define ALLOC_HARDER             0x10 /* try to alloc harder */
     527                 :            : #define ALLOC_HIGH               0x20 /* __GFP_HIGH set */
     528                 :            : #define ALLOC_CPUSET             0x40 /* check for correct cpuset */
     529                 :            : #define ALLOC_CMA                0x80 /* allow allocations from CMA areas */
     530                 :            : #ifdef CONFIG_ZONE_DMA32
     531                 :            : #define ALLOC_NOFRAGMENT        0x100 /* avoid mixing pageblock types */
     532                 :            : #else
     533                 :            : #define ALLOC_NOFRAGMENT          0x0
     534                 :            : #endif
     535                 :            : #define ALLOC_KSWAPD            0x200 /* allow waking of kswapd */
     536                 :            : 
     537                 :            : enum ttu_flags;
     538                 :            : struct tlbflush_unmap_batch;
     539                 :            : 
     540                 :            : 
     541                 :            : /*
     542                 :            :  * only for MM internal work items which do not depend on
     543                 :            :  * any allocations or locks which might depend on allocations
     544                 :            :  */
     545                 :            : extern struct workqueue_struct *mm_percpu_wq;
     546                 :            : 
     547                 :            : #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
     548                 :            : void try_to_unmap_flush(void);
     549                 :            : void try_to_unmap_flush_dirty(void);
     550                 :            : void flush_tlb_batched_pending(struct mm_struct *mm);
     551                 :            : #else
     552                 :            : static inline void try_to_unmap_flush(void)
     553                 :            : {
     554                 :            : }
     555                 :            : static inline void try_to_unmap_flush_dirty(void)
     556                 :            : {
     557                 :            : }
     558                 :            : static inline void flush_tlb_batched_pending(struct mm_struct *mm)
     559                 :            : {
     560                 :            : }
     561                 :            : #endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */
     562                 :            : 
     563                 :            : extern const struct trace_print_flags pageflag_names[];
     564                 :            : extern const struct trace_print_flags vmaflag_names[];
     565                 :            : extern const struct trace_print_flags gfpflag_names[];
     566                 :            : 
     567                 :        312 : static inline bool is_migrate_highatomic(enum migratetype migratetype)
     568                 :            : {
     569   [ -  -  -  + ]:        312 :         return migratetype == MIGRATE_HIGHATOMIC;
     570                 :            : }
     571                 :            : 
     572                 :          0 : static inline bool is_migrate_highatomic_page(struct page *page)
     573                 :            : {
     574         [ #  # ]:          0 :         return get_pageblock_migratetype(page) == MIGRATE_HIGHATOMIC;
     575                 :            : }
     576                 :            : 
     577                 :            : void setup_zone_pageset(struct zone *zone);
     578                 :            : extern struct page *alloc_new_node_page(struct page *page, unsigned long node);
     579                 :            : #endif  /* __MM_INTERNAL_H */

Generated by: LCOV version 1.14