LCOV - code coverage report
Current view: top level - mm - vmscan.c (source / functions) Hit Total Coverage
Test: combined.info Lines: 104 1405 7.4 %
Date: 2022-04-01 13:59:58 Functions: 12 63 19.0 %
Branches: 30 948 3.2 %

           Branch data     Line data    Source code
       1                 :            : // SPDX-License-Identifier: GPL-2.0
       2                 :            : /*
       3                 :            :  *  linux/mm/vmscan.c
       4                 :            :  *
       5                 :            :  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
       6                 :            :  *
       7                 :            :  *  Swap reorganised 29.12.95, Stephen Tweedie.
       8                 :            :  *  kswapd added: 7.1.96  sct
       9                 :            :  *  Removed kswapd_ctl limits, and swap out as many pages as needed
      10                 :            :  *  to bring the system back to freepages.high: 2.4.97, Rik van Riel.
      11                 :            :  *  Zone aware kswapd started 02/00, Kanoj Sarcar (kanoj@sgi.com).
      12                 :            :  *  Multiqueue VM started 5.8.00, Rik van Riel.
      13                 :            :  */
      14                 :            : 
      15                 :            : #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
      16                 :            : 
      17                 :            : #include <linux/mm.h>
      18                 :            : #include <linux/sched/mm.h>
      19                 :            : #include <linux/module.h>
      20                 :            : #include <linux/gfp.h>
      21                 :            : #include <linux/kernel_stat.h>
      22                 :            : #include <linux/swap.h>
      23                 :            : #include <linux/pagemap.h>
      24                 :            : #include <linux/init.h>
      25                 :            : #include <linux/highmem.h>
      26                 :            : #include <linux/vmpressure.h>
      27                 :            : #include <linux/vmstat.h>
      28                 :            : #include <linux/file.h>
      29                 :            : #include <linux/writeback.h>
      30                 :            : #include <linux/blkdev.h>
      31                 :            : #include <linux/buffer_head.h>    /* for try_to_release_page(),
      32                 :            :                                         buffer_heads_over_limit */
      33                 :            : #include <linux/mm_inline.h>
      34                 :            : #include <linux/backing-dev.h>
      35                 :            : #include <linux/rmap.h>
      36                 :            : #include <linux/topology.h>
      37                 :            : #include <linux/cpu.h>
      38                 :            : #include <linux/cpuset.h>
      39                 :            : #include <linux/compaction.h>
      40                 :            : #include <linux/notifier.h>
      41                 :            : #include <linux/rwsem.h>
      42                 :            : #include <linux/delay.h>
      43                 :            : #include <linux/kthread.h>
      44                 :            : #include <linux/freezer.h>
      45                 :            : #include <linux/memcontrol.h>
      46                 :            : #include <linux/delayacct.h>
      47                 :            : #include <linux/sysctl.h>
      48                 :            : #include <linux/oom.h>
      49                 :            : #include <linux/pagevec.h>
      50                 :            : #include <linux/prefetch.h>
      51                 :            : #include <linux/printk.h>
      52                 :            : #include <linux/dax.h>
      53                 :            : #include <linux/psi.h>
      54                 :            : 
      55                 :            : #include <asm/tlbflush.h>
      56                 :            : #include <asm/div64.h>
      57                 :            : 
      58                 :            : #include <linux/swapops.h>
      59                 :            : #include <linux/balloon_compaction.h>
      60                 :            : 
      61                 :            : #include "internal.h"
      62                 :            : 
      63                 :            : #define CREATE_TRACE_POINTS
      64                 :            : #include <trace/events/vmscan.h>
      65                 :            : 
      66                 :            : struct scan_control {
      67                 :            :         /* How many pages shrink_list() should reclaim */
      68                 :            :         unsigned long nr_to_reclaim;
      69                 :            : 
      70                 :            :         /*
      71                 :            :          * Nodemask of nodes allowed by the caller. If NULL, all nodes
      72                 :            :          * are scanned.
      73                 :            :          */
      74                 :            :         nodemask_t      *nodemask;
      75                 :            : 
      76                 :            :         /*
      77                 :            :          * The memory cgroup that hit its limit and as a result is the
      78                 :            :          * primary target of this reclaim invocation.
      79                 :            :          */
      80                 :            :         struct mem_cgroup *target_mem_cgroup;
      81                 :            : 
      82                 :            :         /* Can active pages be deactivated as part of reclaim? */
      83                 :            : #define DEACTIVATE_ANON 1
      84                 :            : #define DEACTIVATE_FILE 2
      85                 :            :         unsigned int may_deactivate:2;
      86                 :            :         unsigned int force_deactivate:1;
      87                 :            :         unsigned int skipped_deactivate:1;
      88                 :            : 
      89                 :            :         /* Writepage batching in laptop mode; RECLAIM_WRITE */
      90                 :            :         unsigned int may_writepage:1;
      91                 :            : 
      92                 :            :         /* Can mapped pages be reclaimed? */
      93                 :            :         unsigned int may_unmap:1;
      94                 :            : 
      95                 :            :         /* Can pages be swapped as part of reclaim? */
      96                 :            :         unsigned int may_swap:1;
      97                 :            : 
      98                 :            :         /*
      99                 :            :          * Cgroups are not reclaimed below their configured memory.low,
     100                 :            :          * unless we threaten to OOM. If any cgroups are skipped due to
     101                 :            :          * memory.low and nothing was reclaimed, go back for memory.low.
     102                 :            :          */
     103                 :            :         unsigned int memcg_low_reclaim:1;
     104                 :            :         unsigned int memcg_low_skipped:1;
     105                 :            : 
     106                 :            :         unsigned int hibernation_mode:1;
     107                 :            : 
     108                 :            :         /* One of the zones is ready for compaction */
     109                 :            :         unsigned int compaction_ready:1;
     110                 :            : 
     111                 :            :         /* There is easily reclaimable cold cache in the current node */
     112                 :            :         unsigned int cache_trim_mode:1;
     113                 :            : 
     114                 :            :         /* The file pages on the current node are dangerously low */
     115                 :            :         unsigned int file_is_tiny:1;
     116                 :            : 
     117                 :            :         /* Allocation order */
     118                 :            :         s8 order;
     119                 :            : 
     120                 :            :         /* Scan (total_size >> priority) pages at once */
     121                 :            :         s8 priority;
     122                 :            : 
     123                 :            :         /* The highest zone to isolate pages for reclaim from */
     124                 :            :         s8 reclaim_idx;
     125                 :            : 
     126                 :            :         /* This context's GFP mask */
     127                 :            :         gfp_t gfp_mask;
     128                 :            : 
     129                 :            :         /* Incremented by the number of inactive pages that were scanned */
     130                 :            :         unsigned long nr_scanned;
     131                 :            : 
     132                 :            :         /* Number of pages freed so far during a call to shrink_zones() */
     133                 :            :         unsigned long nr_reclaimed;
     134                 :            : 
     135                 :            :         struct {
     136                 :            :                 unsigned int dirty;
     137                 :            :                 unsigned int unqueued_dirty;
     138                 :            :                 unsigned int congested;
     139                 :            :                 unsigned int writeback;
     140                 :            :                 unsigned int immediate;
     141                 :            :                 unsigned int file_taken;
     142                 :            :                 unsigned int taken;
     143                 :            :         } nr;
     144                 :            : 
     145                 :            :         /* for recording the reclaimed slab by now */
     146                 :            :         struct reclaim_state reclaim_state;
     147                 :            : };
     148                 :            : 
     149                 :            : #ifdef ARCH_HAS_PREFETCHW
     150                 :            : #define prefetchw_prev_lru_page(_page, _base, _field)                   \
     151                 :            :         do {                                                            \
     152                 :            :                 if ((_page)->lru.prev != _base) {                    \
     153                 :            :                         struct page *prev;                              \
     154                 :            :                                                                         \
     155                 :            :                         prev = lru_to_page(&(_page->lru));               \
     156                 :            :                         prefetchw(&prev->_field);                        \
     157                 :            :                 }                                                       \
     158                 :            :         } while (0)
     159                 :            : #else
     160                 :            : #define prefetchw_prev_lru_page(_page, _base, _field) do { } while (0)
     161                 :            : #endif
     162                 :            : 
     163                 :            : /*
     164                 :            :  * From 0 .. 100.  Higher means more swappy.
     165                 :            :  */
     166                 :            : int vm_swappiness = 60;
     167                 :            : /*
     168                 :            :  * The total number of pages which are beyond the high watermark within all
     169                 :            :  * zones.
     170                 :            :  */
     171                 :            : unsigned long vm_total_pages;
     172                 :            : 
     173                 :          0 : static void set_task_reclaim_state(struct task_struct *task,
     174                 :            :                                    struct reclaim_state *rs)
     175                 :            : {
     176                 :            :         /* Check for an overwrite */
     177   [ #  #  #  #  :          0 :         WARN_ON_ONCE(rs && task->reclaim_state);
                   #  # ]
     178                 :            : 
     179                 :            :         /* Check for the nulling of an already-nulled member */
     180   [ #  #  #  #  :          0 :         WARN_ON_ONCE(!rs && !task->reclaim_state);
                   #  # ]
     181                 :            : 
     182                 :          0 :         task->reclaim_state = rs;
     183                 :          0 : }
     184                 :            : 
     185                 :            : static LIST_HEAD(shrinker_list);
     186                 :            : static DECLARE_RWSEM(shrinker_rwsem);
     187                 :            : 
     188                 :            : #ifdef CONFIG_MEMCG
     189                 :            : /*
     190                 :            :  * We allow subsystems to populate their shrinker-related
     191                 :            :  * LRU lists before register_shrinker_prepared() is called
     192                 :            :  * for the shrinker, since we don't want to impose
     193                 :            :  * restrictions on their internal registration order.
     194                 :            :  * In this case shrink_slab_memcg() may find corresponding
     195                 :            :  * bit is set in the shrinkers map.
     196                 :            :  *
     197                 :            :  * This value is used by the function to detect registering
     198                 :            :  * shrinkers and to skip do_shrink_slab() calls for them.
     199                 :            :  */
     200                 :            : #define SHRINKER_REGISTERING ((struct shrinker *)~0UL)
     201                 :            : 
     202                 :            : static DEFINE_IDR(shrinker_idr);
     203                 :            : static int shrinker_nr_max;
     204                 :            : 
     205                 :            : static int prealloc_memcg_shrinker(struct shrinker *shrinker)
     206                 :            : {
     207                 :            :         int id, ret = -ENOMEM;
     208                 :            : 
     209                 :            :         down_write(&shrinker_rwsem);
     210                 :            :         /* This may call shrinker, so it must use down_read_trylock() */
     211                 :            :         id = idr_alloc(&shrinker_idr, SHRINKER_REGISTERING, 0, 0, GFP_KERNEL);
     212                 :            :         if (id < 0)
     213                 :            :                 goto unlock;
     214                 :            : 
     215                 :            :         if (id >= shrinker_nr_max) {
     216                 :            :                 if (memcg_expand_shrinker_maps(id)) {
     217                 :            :                         idr_remove(&shrinker_idr, id);
     218                 :            :                         goto unlock;
     219                 :            :                 }
     220                 :            : 
     221                 :            :                 shrinker_nr_max = id + 1;
     222                 :            :         }
     223                 :            :         shrinker->id = id;
     224                 :            :         ret = 0;
     225                 :            : unlock:
     226                 :            :         up_write(&shrinker_rwsem);
     227                 :            :         return ret;
     228                 :            : }
     229                 :            : 
     230                 :            : static void unregister_memcg_shrinker(struct shrinker *shrinker)
     231                 :            : {
     232                 :            :         int id = shrinker->id;
     233                 :            : 
     234                 :            :         BUG_ON(id < 0);
     235                 :            : 
     236                 :            :         down_write(&shrinker_rwsem);
     237                 :            :         idr_remove(&shrinker_idr, id);
     238                 :            :         up_write(&shrinker_rwsem);
     239                 :            : }
     240                 :            : 
     241                 :            : static bool cgroup_reclaim(struct scan_control *sc)
     242                 :            : {
     243                 :            :         return sc->target_mem_cgroup;
     244                 :            : }
     245                 :            : 
     246                 :            : /**
     247                 :            :  * writeback_throttling_sane - is the usual dirty throttling mechanism available?
     248                 :            :  * @sc: scan_control in question
     249                 :            :  *
     250                 :            :  * The normal page dirty throttling mechanism in balance_dirty_pages() is
     251                 :            :  * completely broken with the legacy memcg and direct stalling in
     252                 :            :  * shrink_page_list() is used for throttling instead, which lacks all the
     253                 :            :  * niceties such as fairness, adaptive pausing, bandwidth proportional
     254                 :            :  * allocation and configurability.
     255                 :            :  *
     256                 :            :  * This function tests whether the vmscan currently in progress can assume
     257                 :            :  * that the normal dirty throttling mechanism is operational.
     258                 :            :  */
     259                 :            : static bool writeback_throttling_sane(struct scan_control *sc)
     260                 :            : {
     261                 :            :         if (!cgroup_reclaim(sc))
     262                 :            :                 return true;
     263                 :            : #ifdef CONFIG_CGROUP_WRITEBACK
     264                 :            :         if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
     265                 :            :                 return true;
     266                 :            : #endif
     267                 :            :         return false;
     268                 :            : }
     269                 :            : #else
     270                 :            : static int prealloc_memcg_shrinker(struct shrinker *shrinker)
     271                 :            : {
     272                 :            :         return 0;
     273                 :            : }
     274                 :            : 
     275                 :            : static void unregister_memcg_shrinker(struct shrinker *shrinker)
     276                 :            : {
     277                 :            : }
     278                 :            : 
     279                 :          0 : static bool cgroup_reclaim(struct scan_control *sc)
     280                 :            : {
     281                 :          0 :         return false;
     282                 :            : }
     283                 :            : 
     284                 :          0 : static bool writeback_throttling_sane(struct scan_control *sc)
     285                 :            : {
     286                 :          0 :         return true;
     287                 :            : }
     288                 :            : #endif
     289                 :            : 
     290                 :            : /*
     291                 :            :  * This misses isolated pages which are not accounted for to save counters.
     292                 :            :  * As the data only determines if reclaim or compaction continues, it is
     293                 :            :  * not expected that isolated pages will be a dominating factor.
     294                 :            :  */
     295                 :          0 : unsigned long zone_reclaimable_pages(struct zone *zone)
     296                 :            : {
     297                 :          0 :         unsigned long nr;
     298                 :            : 
     299                 :          0 :         nr = zone_page_state_snapshot(zone, NR_ZONE_INACTIVE_FILE) +
     300                 :          0 :                 zone_page_state_snapshot(zone, NR_ZONE_ACTIVE_FILE);
     301         [ #  # ]:          0 :         if (get_nr_swap_pages() > 0)
     302                 :          0 :                 nr += zone_page_state_snapshot(zone, NR_ZONE_INACTIVE_ANON) +
     303                 :          0 :                         zone_page_state_snapshot(zone, NR_ZONE_ACTIVE_ANON);
     304                 :            : 
     305                 :          0 :         return nr;
     306                 :            : }
     307                 :            : 
     308                 :            : /**
     309                 :            :  * lruvec_lru_size -  Returns the number of pages on the given LRU list.
     310                 :            :  * @lruvec: lru vector
     311                 :            :  * @lru: lru to use
     312                 :            :  * @zone_idx: zones to consider (use MAX_NR_ZONES for the whole LRU list)
     313                 :            :  */
     314                 :          0 : unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru, int zone_idx)
     315                 :            : {
     316                 :          0 :         unsigned long size = 0;
     317                 :          0 :         int zid;
     318                 :            : 
     319         [ #  # ]:          0 :         for (zid = 0; zid <= zone_idx && zid < MAX_NR_ZONES; zid++) {
     320                 :          0 :                 struct zone *zone = &lruvec_pgdat(lruvec)->node_zones[zid];
     321                 :            : 
     322         [ #  # ]:          0 :                 if (!managed_zone(zone))
     323                 :          0 :                         continue;
     324                 :            : 
     325                 :          0 :                 if (!mem_cgroup_disabled())
     326                 :            :                         size += mem_cgroup_get_zone_lru_size(lruvec, lru, zid);
     327                 :            :                 else
     328                 :          0 :                         size += zone_page_state(zone, NR_ZONE_LRU_BASE + lru);
     329                 :            :         }
     330                 :          0 :         return size;
     331                 :            : }
     332                 :            : 
     333                 :            : /*
     334                 :            :  * Add a shrinker callback to be called from the vm.
     335                 :            :  */
     336                 :       3510 : int prealloc_shrinker(struct shrinker *shrinker)
     337                 :            : {
     338                 :       3510 :         unsigned int size = sizeof(*shrinker->nr_deferred);
     339                 :            : 
     340         [ +  + ]:       3510 :         if (shrinker->flags & SHRINKER_NUMA_AWARE)
     341                 :       2964 :                 size *= nr_node_ids;
     342                 :            : 
     343                 :       3510 :         shrinker->nr_deferred = kzalloc(size, GFP_KERNEL);
     344         [ -  + ]:       3510 :         if (!shrinker->nr_deferred)
     345                 :          0 :                 return -ENOMEM;
     346                 :            : 
     347                 :            :         if (shrinker->flags & SHRINKER_MEMCG_AWARE) {
     348                 :            :                 if (prealloc_memcg_shrinker(shrinker))
     349                 :            :                         goto free_deferred;
     350                 :            :         }
     351                 :            : 
     352                 :            :         return 0;
     353                 :            : 
     354                 :            : free_deferred:
     355                 :            :         kfree(shrinker->nr_deferred);
     356                 :            :         shrinker->nr_deferred = NULL;
     357                 :            :         return -ENOMEM;
     358                 :            : }
     359                 :            : 
     360                 :          0 : void free_prealloced_shrinker(struct shrinker *shrinker)
     361                 :            : {
     362         [ #  # ]:          0 :         if (!shrinker->nr_deferred)
     363                 :            :                 return;
     364                 :            : 
     365                 :          0 :         if (shrinker->flags & SHRINKER_MEMCG_AWARE)
     366                 :            :                 unregister_memcg_shrinker(shrinker);
     367                 :            : 
     368                 :          0 :         kfree(shrinker->nr_deferred);
     369                 :          0 :         shrinker->nr_deferred = NULL;
     370                 :            : }
     371                 :            : 
     372                 :       3510 : void register_shrinker_prepared(struct shrinker *shrinker)
     373                 :            : {
     374                 :       3510 :         down_write(&shrinker_rwsem);
     375                 :       3510 :         list_add_tail(&shrinker->list, &shrinker_list);
     376                 :            : #ifdef CONFIG_MEMCG
     377                 :            :         if (shrinker->flags & SHRINKER_MEMCG_AWARE)
     378                 :            :                 idr_replace(&shrinker_idr, shrinker, shrinker->id);
     379                 :            : #endif
     380                 :       3510 :         up_write(&shrinker_rwsem);
     381                 :       3510 : }
     382                 :            : 
     383                 :        546 : int register_shrinker(struct shrinker *shrinker)
     384                 :            : {
     385                 :        546 :         int err = prealloc_shrinker(shrinker);
     386                 :            : 
     387         [ +  - ]:        546 :         if (err)
     388                 :            :                 return err;
     389                 :        546 :         register_shrinker_prepared(shrinker);
     390                 :        546 :         return 0;
     391                 :            : }
     392                 :            : EXPORT_SYMBOL(register_shrinker);
     393                 :            : 
     394                 :            : /*
     395                 :            :  * Remove one
     396                 :            :  */
     397                 :        156 : void unregister_shrinker(struct shrinker *shrinker)
     398                 :            : {
     399         [ +  - ]:        156 :         if (!shrinker->nr_deferred)
     400                 :            :                 return;
     401                 :        156 :         if (shrinker->flags & SHRINKER_MEMCG_AWARE)
     402                 :            :                 unregister_memcg_shrinker(shrinker);
     403                 :        156 :         down_write(&shrinker_rwsem);
     404                 :        156 :         list_del(&shrinker->list);
     405                 :        156 :         up_write(&shrinker_rwsem);
     406                 :        156 :         kfree(shrinker->nr_deferred);
     407                 :        156 :         shrinker->nr_deferred = NULL;
     408                 :            : }
     409                 :            : EXPORT_SYMBOL(unregister_shrinker);
     410                 :            : 
     411                 :            : #define SHRINK_BATCH 128
     412                 :            : 
     413                 :          0 : static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
     414                 :            :                                     struct shrinker *shrinker, int priority)
     415                 :            : {
     416                 :          0 :         unsigned long freed = 0;
     417                 :          0 :         unsigned long long delta;
     418                 :          0 :         long total_scan;
     419                 :          0 :         long freeable;
     420                 :          0 :         long nr;
     421                 :          0 :         long new_nr;
     422                 :          0 :         int nid = shrinkctl->nid;
     423                 :          0 :         long batch_size = shrinker->batch ? shrinker->batch
     424         [ #  # ]:          0 :                                           : SHRINK_BATCH;
     425                 :          0 :         long scanned = 0, next_deferred;
     426                 :            : 
     427         [ #  # ]:          0 :         if (!(shrinker->flags & SHRINKER_NUMA_AWARE))
     428                 :          0 :                 nid = 0;
     429                 :            : 
     430                 :          0 :         freeable = shrinker->count_objects(shrinker, shrinkctl);
     431         [ #  # ]:          0 :         if (freeable == 0 || freeable == SHRINK_EMPTY)
     432                 :            :                 return freeable;
     433                 :            : 
     434                 :            :         /*
     435                 :            :          * copy the current shrinker scan count into a local variable
     436                 :            :          * and zero it so that other concurrent shrinker invocations
     437                 :            :          * don't also do this scanning work.
     438                 :            :          */
     439                 :          0 :         nr = atomic_long_xchg(&shrinker->nr_deferred[nid], 0);
     440                 :            : 
     441                 :          0 :         total_scan = nr;
     442         [ #  # ]:          0 :         if (shrinker->seeks) {
     443                 :          0 :                 delta = freeable >> priority;
     444                 :          0 :                 delta *= 4;
     445                 :          0 :                 do_div(delta, shrinker->seeks);
     446                 :            :         } else {
     447                 :            :                 /*
     448                 :            :                  * These objects don't require any IO to create. Trim
     449                 :            :                  * them aggressively under memory pressure to keep
     450                 :            :                  * them from causing refetches in the IO caches.
     451                 :            :                  */
     452                 :          0 :                 delta = freeable / 2;
     453                 :            :         }
     454                 :            : 
     455                 :          0 :         total_scan += delta;
     456         [ #  # ]:          0 :         if (total_scan < 0) {
     457                 :          0 :                 pr_err("shrink_slab: %pS negative objects to delete nr=%ld\n",
     458                 :            :                        shrinker->scan_objects, total_scan);
     459                 :          0 :                 total_scan = freeable;
     460                 :          0 :                 next_deferred = nr;
     461                 :            :         } else
     462                 :            :                 next_deferred = total_scan;
     463                 :            : 
     464                 :            :         /*
     465                 :            :          * We need to avoid excessive windup on filesystem shrinkers
     466                 :            :          * due to large numbers of GFP_NOFS allocations causing the
     467                 :            :          * shrinkers to return -1 all the time. This results in a large
     468                 :            :          * nr being built up so when a shrink that can do some work
     469                 :            :          * comes along it empties the entire cache due to nr >>>
     470                 :            :          * freeable. This is bad for sustaining a working set in
     471                 :            :          * memory.
     472                 :            :          *
     473                 :            :          * Hence only allow the shrinker to scan the entire cache when
     474                 :            :          * a large delta change is calculated directly.
     475                 :            :          */
     476         [ #  # ]:          0 :         if (delta < freeable / 4)
     477                 :          0 :                 total_scan = min(total_scan, freeable / 2);
     478                 :            : 
     479                 :            :         /*
     480                 :            :          * Avoid risking looping forever due to too large nr value:
     481                 :            :          * never try to free more than twice the estimate number of
     482                 :            :          * freeable entries.
     483                 :            :          */
     484                 :          0 :         if (total_scan > freeable * 2)
     485                 :            :                 total_scan = freeable * 2;
     486                 :            : 
     487                 :          0 :         trace_mm_shrink_slab_start(shrinker, shrinkctl, nr,
     488                 :            :                                    freeable, delta, total_scan, priority);
     489                 :            : 
     490                 :            :         /*
     491                 :            :          * Normally, we should not scan less than batch_size objects in one
     492                 :            :          * pass to avoid too frequent shrinker calls, but if the slab has less
     493                 :            :          * than batch_size objects in total and we are really tight on memory,
     494                 :            :          * we will try to reclaim all available objects, otherwise we can end
     495                 :            :          * up failing allocations although there are plenty of reclaimable
     496                 :            :          * objects spread over several slabs with usage less than the
     497                 :            :          * batch_size.
     498                 :            :          *
     499                 :            :          * We detect the "tight on memory" situations by looking at the total
     500                 :            :          * number of objects we want to scan (total_scan). If it is greater
     501                 :            :          * than the total number of objects on slab (freeable), we must be
     502                 :            :          * scanning at high prio and therefore should try to reclaim as much as
     503                 :            :          * possible.
     504                 :            :          */
     505         [ #  # ]:          0 :         while (total_scan >= batch_size ||
     506                 :            :                total_scan >= freeable) {
     507                 :          0 :                 unsigned long ret;
     508                 :          0 :                 unsigned long nr_to_scan = min(batch_size, total_scan);
     509                 :            : 
     510                 :          0 :                 shrinkctl->nr_to_scan = nr_to_scan;
     511                 :          0 :                 shrinkctl->nr_scanned = nr_to_scan;
     512                 :          0 :                 ret = shrinker->scan_objects(shrinker, shrinkctl);
     513         [ #  # ]:          0 :                 if (ret == SHRINK_STOP)
     514                 :            :                         break;
     515                 :          0 :                 freed += ret;
     516                 :            : 
     517         [ #  # ]:          0 :                 count_vm_events(SLABS_SCANNED, shrinkctl->nr_scanned);
     518                 :          0 :                 total_scan -= shrinkctl->nr_scanned;
     519                 :          0 :                 scanned += shrinkctl->nr_scanned;
     520                 :            : 
     521                 :          0 :                 cond_resched();
     522                 :            :         }
     523                 :            : 
     524         [ #  # ]:          0 :         if (next_deferred >= scanned)
     525                 :          0 :                 next_deferred -= scanned;
     526                 :            :         else
     527                 :            :                 next_deferred = 0;
     528                 :            :         /*
     529                 :            :          * move the unused scan count back into the shrinker in a
     530                 :            :          * manner that handles concurrent updates. If we exhausted the
     531                 :            :          * scan, there is no need to do an update.
     532                 :            :          */
     533         [ #  # ]:          0 :         if (next_deferred > 0)
     534                 :          0 :                 new_nr = atomic_long_add_return(next_deferred,
     535                 :          0 :                                                 &shrinker->nr_deferred[nid]);
     536                 :            :         else
     537                 :          0 :                 new_nr = atomic_long_read(&shrinker->nr_deferred[nid]);
     538                 :            : 
     539                 :          0 :         trace_mm_shrink_slab_end(shrinker, nid, freed, nr, new_nr, total_scan);
     540                 :          0 :         return freed;
     541                 :            : }
     542                 :            : 
     543                 :            : #ifdef CONFIG_MEMCG
     544                 :            : static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid,
     545                 :            :                         struct mem_cgroup *memcg, int priority)
     546                 :            : {
     547                 :            :         struct memcg_shrinker_map *map;
     548                 :            :         unsigned long ret, freed = 0;
     549                 :            :         int i;
     550                 :            : 
     551                 :            :         if (!mem_cgroup_online(memcg))
     552                 :            :                 return 0;
     553                 :            : 
     554                 :            :         if (!down_read_trylock(&shrinker_rwsem))
     555                 :            :                 return 0;
     556                 :            : 
     557                 :            :         map = rcu_dereference_protected(memcg->nodeinfo[nid]->shrinker_map,
     558                 :            :                                         true);
     559                 :            :         if (unlikely(!map))
     560                 :            :                 goto unlock;
     561                 :            : 
     562                 :            :         for_each_set_bit(i, map->map, shrinker_nr_max) {
     563                 :            :                 struct shrink_control sc = {
     564                 :            :                         .gfp_mask = gfp_mask,
     565                 :            :                         .nid = nid,
     566                 :            :                         .memcg = memcg,
     567                 :            :                 };
     568                 :            :                 struct shrinker *shrinker;
     569                 :            : 
     570                 :            :                 shrinker = idr_find(&shrinker_idr, i);
     571                 :            :                 if (unlikely(!shrinker || shrinker == SHRINKER_REGISTERING)) {
     572                 :            :                         if (!shrinker)
     573                 :            :                                 clear_bit(i, map->map);
     574                 :            :                         continue;
     575                 :            :                 }
     576                 :            : 
     577                 :            :                 /* Call non-slab shrinkers even though kmem is disabled */
     578                 :            :                 if (!memcg_kmem_enabled() &&
     579                 :            :                     !(shrinker->flags & SHRINKER_NONSLAB))
     580                 :            :                         continue;
     581                 :            : 
     582                 :            :                 ret = do_shrink_slab(&sc, shrinker, priority);
     583                 :            :                 if (ret == SHRINK_EMPTY) {
     584                 :            :                         clear_bit(i, map->map);
     585                 :            :                         /*
     586                 :            :                          * After the shrinker reported that it had no objects to
     587                 :            :                          * free, but before we cleared the corresponding bit in
     588                 :            :                          * the memcg shrinker map, a new object might have been
     589                 :            :                          * added. To make sure, we have the bit set in this
     590                 :            :                          * case, we invoke the shrinker one more time and reset
     591                 :            :                          * the bit if it reports that it is not empty anymore.
     592                 :            :                          * The memory barrier here pairs with the barrier in
     593                 :            :                          * memcg_set_shrinker_bit():
     594                 :            :                          *
     595                 :            :                          * list_lru_add()     shrink_slab_memcg()
     596                 :            :                          *   list_add_tail()    clear_bit()
     597                 :            :                          *   <MB>               <MB>
     598                 :            :                          *   set_bit()          do_shrink_slab()
     599                 :            :                          */
     600                 :            :                         smp_mb__after_atomic();
     601                 :            :                         ret = do_shrink_slab(&sc, shrinker, priority);
     602                 :            :                         if (ret == SHRINK_EMPTY)
     603                 :            :                                 ret = 0;
     604                 :            :                         else
     605                 :            :                                 memcg_set_shrinker_bit(memcg, nid, i);
     606                 :            :                 }
     607                 :            :                 freed += ret;
     608                 :            : 
     609                 :            :                 if (rwsem_is_contended(&shrinker_rwsem)) {
     610                 :            :                         freed = freed ? : 1;
     611                 :            :                         break;
     612                 :            :                 }
     613                 :            :         }
     614                 :            : unlock:
     615                 :            :         up_read(&shrinker_rwsem);
     616                 :            :         return freed;
     617                 :            : }
     618                 :            : #else /* CONFIG_MEMCG */
     619                 :            : static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid,
     620                 :            :                         struct mem_cgroup *memcg, int priority)
     621                 :            : {
     622                 :            :         return 0;
     623                 :            : }
     624                 :            : #endif /* CONFIG_MEMCG */
     625                 :            : 
     626                 :            : /**
     627                 :            :  * shrink_slab - shrink slab caches
     628                 :            :  * @gfp_mask: allocation context
     629                 :            :  * @nid: node whose slab caches to target
     630                 :            :  * @memcg: memory cgroup whose slab caches to target
     631                 :            :  * @priority: the reclaim priority
     632                 :            :  *
     633                 :            :  * Call the shrink functions to age shrinkable caches.
     634                 :            :  *
     635                 :            :  * @nid is passed along to shrinkers with SHRINKER_NUMA_AWARE set,
     636                 :            :  * unaware shrinkers will receive a node id of 0 instead.
     637                 :            :  *
     638                 :            :  * @memcg specifies the memory cgroup to target. Unaware shrinkers
     639                 :            :  * are called only if it is the root cgroup.
     640                 :            :  *
     641                 :            :  * @priority is sc->priority, we take the number of objects and >> by priority
     642                 :            :  * in order to get the scan target.
     643                 :            :  *
     644                 :            :  * Returns the number of reclaimed slab objects.
     645                 :            :  */
     646                 :          0 : static unsigned long shrink_slab(gfp_t gfp_mask, int nid,
     647                 :            :                                  struct mem_cgroup *memcg,
     648                 :            :                                  int priority)
     649                 :            : {
     650                 :          0 :         unsigned long ret, freed = 0;
     651                 :          0 :         struct shrinker *shrinker;
     652                 :            : 
     653                 :            :         /*
     654                 :            :          * The root memcg might be allocated even though memcg is disabled
     655                 :            :          * via "cgroup_disable=memory" boot parameter.  This could make
     656                 :            :          * mem_cgroup_is_root() return false, then just run memcg slab
     657                 :            :          * shrink, but skip global shrink.  This may result in premature
     658                 :            :          * oom.
     659                 :            :          */
     660                 :          0 :         if (!mem_cgroup_disabled() && !mem_cgroup_is_root(memcg))
     661                 :            :                 return shrink_slab_memcg(gfp_mask, nid, memcg, priority);
     662                 :            : 
     663         [ #  # ]:          0 :         if (!down_read_trylock(&shrinker_rwsem))
     664                 :          0 :                 goto out;
     665                 :            : 
     666         [ #  # ]:          0 :         list_for_each_entry(shrinker, &shrinker_list, list) {
     667                 :          0 :                 struct shrink_control sc = {
     668                 :            :                         .gfp_mask = gfp_mask,
     669                 :            :                         .nid = nid,
     670                 :            :                         .memcg = memcg,
     671                 :            :                 };
     672                 :            : 
     673                 :          0 :                 ret = do_shrink_slab(&sc, shrinker, priority);
     674         [ #  # ]:          0 :                 if (ret == SHRINK_EMPTY)
     675                 :          0 :                         ret = 0;
     676                 :          0 :                 freed += ret;
     677                 :            :                 /*
     678                 :            :                  * Bail out if someone want to register a new shrinker to
     679                 :            :                  * prevent the regsitration from being stalled for long periods
     680                 :            :                  * by parallel ongoing shrinking.
     681                 :            :                  */
     682         [ #  # ]:          0 :                 if (rwsem_is_contended(&shrinker_rwsem)) {
     683                 :          0 :                         freed = freed ? : 1;
     684                 :          0 :                         break;
     685                 :            :                 }
     686                 :            :         }
     687                 :            : 
     688                 :          0 :         up_read(&shrinker_rwsem);
     689                 :          0 : out:
     690                 :          0 :         cond_resched();
     691                 :          0 :         return freed;
     692                 :            : }
     693                 :            : 
     694                 :          0 : void drop_slab_node(int nid)
     695                 :            : {
     696                 :          0 :         unsigned long freed;
     697                 :            : 
     698                 :          0 :         do {
     699                 :          0 :                 struct mem_cgroup *memcg = NULL;
     700                 :            : 
     701                 :          0 :                 freed = 0;
     702                 :          0 :                 memcg = mem_cgroup_iter(NULL, NULL, NULL);
     703                 :          0 :                 do {
     704                 :          0 :                         freed += shrink_slab(GFP_KERNEL, nid, memcg, 0);
     705   [ #  #  #  # ]:          0 :                 } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)) != NULL);
     706   [ #  #  #  # ]:          0 :         } while (freed > 10);
     707                 :          0 : }
     708                 :            : 
     709                 :          0 : void drop_slab(void)
     710                 :            : {
     711                 :          0 :         int nid;
     712                 :            : 
     713         [ #  # ]:          0 :         for_each_online_node(nid)
     714                 :            :                 drop_slab_node(nid);
     715                 :          0 : }
     716                 :            : 
     717                 :          0 : static inline int is_page_cache_freeable(struct page *page)
     718                 :            : {
     719                 :            :         /*
     720                 :            :          * A freeable page cache page is referenced only by the caller
     721                 :            :          * that isolated the page, the page cache and optional buffer
     722                 :            :          * heads at page->private.
     723                 :            :          */
     724         [ #  # ]:          0 :         int page_cache_pins = PageTransHuge(page) && PageSwapCache(page) ?
     725                 :            :                 HPAGE_PMD_NR : 1;
     726         [ #  # ]:          0 :         return page_count(page) - page_has_private(page) == 1 + page_cache_pins;
     727                 :            : }
     728                 :            : 
     729                 :          0 : static int may_write_to_inode(struct inode *inode)
     730                 :            : {
     731         [ #  # ]:          0 :         if (current->flags & PF_SWAPWRITE)
     732                 :            :                 return 1;
     733         [ #  # ]:          0 :         if (!inode_write_congested(inode))
     734                 :            :                 return 1;
     735         [ #  # ]:          0 :         if (inode_to_bdi(inode) == current->backing_dev_info)
     736                 :          0 :                 return 1;
     737                 :            :         return 0;
     738                 :            : }
     739                 :            : 
     740                 :            : /*
     741                 :            :  * We detected a synchronous write error writing a page out.  Probably
     742                 :            :  * -ENOSPC.  We need to propagate that into the address_space for a subsequent
     743                 :            :  * fsync(), msync() or close().
     744                 :            :  *
     745                 :            :  * The tricky part is that after writepage we cannot touch the mapping: nothing
     746                 :            :  * prevents it from being freed up.  But we have a ref on the page and once
     747                 :            :  * that page is locked, the mapping is pinned.
     748                 :            :  *
     749                 :            :  * We're allowed to run sleeping lock_page() here because we know the caller has
     750                 :            :  * __GFP_FS.
     751                 :            :  */
     752                 :          0 : static void handle_write_error(struct address_space *mapping,
     753                 :            :                                 struct page *page, int error)
     754                 :            : {
     755                 :          0 :         lock_page(page);
     756         [ #  # ]:          0 :         if (page_mapping(page) == mapping)
     757                 :          0 :                 mapping_set_error(mapping, error);
     758                 :          0 :         unlock_page(page);
     759                 :          0 : }
     760                 :            : 
     761                 :            : /* possible outcome of pageout() */
     762                 :            : typedef enum {
     763                 :            :         /* failed to write page out, page is locked */
     764                 :            :         PAGE_KEEP,
     765                 :            :         /* move page to the active list, page is locked */
     766                 :            :         PAGE_ACTIVATE,
     767                 :            :         /* page has been sent to the disk successfully, page is unlocked */
     768                 :            :         PAGE_SUCCESS,
     769                 :            :         /* page is clean and locked */
     770                 :            :         PAGE_CLEAN,
     771                 :            : } pageout_t;
     772                 :            : 
     773                 :            : /*
     774                 :            :  * pageout is called by shrink_page_list() for each dirty page.
     775                 :            :  * Calls ->writepage().
     776                 :            :  */
     777                 :          0 : static pageout_t pageout(struct page *page, struct address_space *mapping)
     778                 :            : {
     779                 :            :         /*
     780                 :            :          * If the page is dirty, only perform writeback if that write
     781                 :            :          * will be non-blocking.  To prevent this allocation from being
     782                 :            :          * stalled by pagecache activity.  But note that there may be
     783                 :            :          * stalls if we need to run get_block().  We could test
     784                 :            :          * PagePrivate for that.
     785                 :            :          *
     786                 :            :          * If this process is currently in __generic_file_write_iter() against
     787                 :            :          * this page's queue, we can perform writeback even if that
     788                 :            :          * will block.
     789                 :            :          *
     790                 :            :          * If the page is swapcache, write it back even if that would
     791                 :            :          * block, for some throttling. This happens by accident, because
     792                 :            :          * swap_backing_dev_info is bust: it doesn't reflect the
     793                 :            :          * congestion state of the swapdevs.  Easy to fix, if needed.
     794                 :            :          */
     795         [ #  # ]:          0 :         if (!is_page_cache_freeable(page))
     796                 :            :                 return PAGE_KEEP;
     797         [ #  # ]:          0 :         if (!mapping) {
     798                 :            :                 /*
     799                 :            :                  * Some data journaling orphaned pages can have
     800                 :            :                  * page->mapping == NULL while being dirty with clean buffers.
     801                 :            :                  */
     802         [ #  # ]:          0 :                 if (page_has_private(page)) {
     803         [ #  # ]:          0 :                         if (try_to_free_buffers(page)) {
     804         [ #  # ]:          0 :                                 ClearPageDirty(page);
     805                 :          0 :                                 pr_info("%s: orphaned page\n", __func__);
     806                 :          0 :                                 return PAGE_CLEAN;
     807                 :            :                         }
     808                 :            :                 }
     809                 :          0 :                 return PAGE_KEEP;
     810                 :            :         }
     811         [ #  # ]:          0 :         if (mapping->a_ops->writepage == NULL)
     812                 :            :                 return PAGE_ACTIVATE;
     813         [ #  # ]:          0 :         if (!may_write_to_inode(mapping->host))
     814                 :            :                 return PAGE_KEEP;
     815                 :            : 
     816         [ #  # ]:          0 :         if (clear_page_dirty_for_io(page)) {
     817                 :          0 :                 int res;
     818                 :          0 :                 struct writeback_control wbc = {
     819                 :            :                         .sync_mode = WB_SYNC_NONE,
     820                 :            :                         .nr_to_write = SWAP_CLUSTER_MAX,
     821                 :            :                         .range_start = 0,
     822                 :            :                         .range_end = LLONG_MAX,
     823                 :            :                         .for_reclaim = 1,
     824                 :            :                 };
     825                 :            : 
     826         [ #  # ]:          0 :                 SetPageReclaim(page);
     827                 :          0 :                 res = mapping->a_ops->writepage(page, &wbc);
     828         [ #  # ]:          0 :                 if (res < 0)
     829                 :          0 :                         handle_write_error(mapping, page, res);
     830         [ #  # ]:          0 :                 if (res == AOP_WRITEPAGE_ACTIVATE) {
     831         [ #  # ]:          0 :                         ClearPageReclaim(page);
     832                 :          0 :                         return PAGE_ACTIVATE;
     833                 :            :                 }
     834                 :            : 
     835   [ #  #  #  # ]:          0 :                 if (!PageWriteback(page)) {
     836                 :            :                         /* synchronous write or broken a_ops? */
     837         [ #  # ]:          0 :                         ClearPageReclaim(page);
     838                 :            :                 }
     839                 :          0 :                 trace_mm_vmscan_writepage(page);
     840                 :          0 :                 inc_node_page_state(page, NR_VMSCAN_WRITE);
     841                 :          0 :                 return PAGE_SUCCESS;
     842                 :            :         }
     843                 :            : 
     844                 :            :         return PAGE_CLEAN;
     845                 :            : }
     846                 :            : 
     847                 :            : /*
     848                 :            :  * Same as remove_mapping, but if the page is removed from the mapping, it
     849                 :            :  * gets returned with a refcount of 0.
     850                 :            :  */
     851                 :          0 : static int __remove_mapping(struct address_space *mapping, struct page *page,
     852                 :            :                             bool reclaimed, struct mem_cgroup *target_memcg)
     853                 :            : {
     854                 :          0 :         unsigned long flags;
     855                 :          0 :         int refcount;
     856                 :            : 
     857   [ #  #  #  # ]:          0 :         BUG_ON(!PageLocked(page));
     858         [ #  # ]:          0 :         BUG_ON(mapping != page_mapping(page));
     859                 :            : 
     860                 :          0 :         xa_lock_irqsave(&mapping->i_pages, flags);
     861                 :            :         /*
     862                 :            :          * The non racy check for a busy page.
     863                 :            :          *
     864                 :            :          * Must be careful with the order of the tests. When someone has
     865                 :            :          * a ref to the page, it may be possible that they dirty it then
     866                 :            :          * drop the reference. So if PageDirty is tested before page_count
     867                 :            :          * here, then the following race may occur:
     868                 :            :          *
     869                 :            :          * get_user_pages(&page);
     870                 :            :          * [user mapping goes away]
     871                 :            :          * write_to(page);
     872                 :            :          *                              !PageDirty(page)    [good]
     873                 :            :          * SetPageDirty(page);
     874                 :            :          * put_page(page);
     875                 :            :          *                              !page_count(page)   [good, discard it]
     876                 :            :          *
     877                 :            :          * [oops, our write_to data is lost]
     878                 :            :          *
     879                 :            :          * Reversing the order of the tests ensures such a situation cannot
     880                 :            :          * escape unnoticed. The smp_rmb is needed to ensure the page->flags
     881                 :            :          * load is not satisfied before that of page->_refcount.
     882                 :            :          *
     883                 :            :          * Note that if SetPageDirty is always performed via set_page_dirty,
     884                 :            :          * and thus under the i_pages lock, then this ordering is not required.
     885                 :            :          */
     886                 :          0 :         refcount = 1 + compound_nr(page);
     887         [ #  # ]:          0 :         if (!page_ref_freeze(page, refcount))
     888                 :          0 :                 goto cannot_free;
     889                 :            :         /* note: atomic_cmpxchg in page_ref_freeze provides the smp_rmb */
     890   [ #  #  #  # ]:          0 :         if (unlikely(PageDirty(page))) {
     891                 :          0 :                 page_ref_unfreeze(page, refcount);
     892                 :          0 :                 goto cannot_free;
     893                 :            :         }
     894                 :            : 
     895         [ #  # ]:          0 :         if (PageSwapCache(page)) {
     896                 :          0 :                 swp_entry_t swap = { .val = page_private(page) };
     897                 :          0 :                 mem_cgroup_swapout(page, swap);
     898                 :          0 :                 __delete_from_swap_cache(page, swap);
     899                 :          0 :                 xa_unlock_irqrestore(&mapping->i_pages, flags);
     900                 :          0 :                 put_swap_page(page, swap);
     901                 :            :         } else {
     902                 :          0 :                 void (*freepage)(struct page *);
     903                 :          0 :                 void *shadow = NULL;
     904                 :            : 
     905                 :          0 :                 freepage = mapping->a_ops->freepage;
     906                 :            :                 /*
     907                 :            :                  * Remember a shadow entry for reclaimed file cache in
     908                 :            :                  * order to detect refaults, thus thrashing, later on.
     909                 :            :                  *
     910                 :            :                  * But don't store shadows in an address space that is
     911                 :            :                  * already exiting.  This is not just an optizimation,
     912                 :            :                  * inode reclaim needs to empty out the radix tree or
     913                 :            :                  * the nodes are lost.  Don't plant shadows behind its
     914                 :            :                  * back.
     915                 :            :                  *
     916                 :            :                  * We also don't store shadows for DAX mappings because the
     917                 :            :                  * only page cache pages found in these are zero pages
     918                 :            :                  * covering holes, and because we don't want to mix DAX
     919                 :            :                  * exceptional entries and shadow exceptional entries in the
     920                 :            :                  * same address_space.
     921                 :            :                  */
     922   [ #  #  #  #  :          0 :                 if (reclaimed && page_is_file_cache(page) &&
                   #  # ]
     923                 :            :                     !mapping_exiting(mapping) && !dax_mapping(mapping))
     924                 :          0 :                         shadow = workingset_eviction(page, target_memcg);
     925                 :          0 :                 __delete_from_page_cache(page, shadow);
     926                 :          0 :                 xa_unlock_irqrestore(&mapping->i_pages, flags);
     927                 :            : 
     928         [ #  # ]:          0 :                 if (freepage != NULL)
     929                 :          0 :                         freepage(page);
     930                 :            :         }
     931                 :            : 
     932                 :            :         return 1;
     933                 :            : 
     934                 :          0 : cannot_free:
     935                 :          0 :         xa_unlock_irqrestore(&mapping->i_pages, flags);
     936                 :          0 :         return 0;
     937                 :            : }
     938                 :            : 
     939                 :            : /*
     940                 :            :  * Attempt to detach a locked page from its ->mapping.  If it is dirty or if
     941                 :            :  * someone else has a ref on the page, abort and return 0.  If it was
     942                 :            :  * successfully detached, return 1.  Assumes the caller has a single ref on
     943                 :            :  * this page.
     944                 :            :  */
     945                 :          0 : int remove_mapping(struct address_space *mapping, struct page *page)
     946                 :            : {
     947         [ #  # ]:          0 :         if (__remove_mapping(mapping, page, false, NULL)) {
     948                 :            :                 /*
     949                 :            :                  * Unfreezing the refcount with 1 rather than 2 effectively
     950                 :            :                  * drops the pagecache ref for us without requiring another
     951                 :            :                  * atomic operation.
     952                 :            :                  */
     953                 :          0 :                 page_ref_unfreeze(page, 1);
     954                 :          0 :                 return 1;
     955                 :            :         }
     956                 :            :         return 0;
     957                 :            : }
     958                 :            : 
     959                 :            : /**
     960                 :            :  * putback_lru_page - put previously isolated page onto appropriate LRU list
     961                 :            :  * @page: page to be put back to appropriate lru list
     962                 :            :  *
     963                 :            :  * Add previously isolated @page to appropriate LRU list.
     964                 :            :  * Page may still be unevictable for other reasons.
     965                 :            :  *
     966                 :            :  * lru_lock must not be held, interrupts must be enabled.
     967                 :            :  */
     968                 :          0 : void putback_lru_page(struct page *page)
     969                 :            : {
     970                 :          0 :         lru_cache_add(page);
     971                 :          0 :         put_page(page);         /* drop ref from isolate */
     972                 :          0 : }
     973                 :            : 
     974                 :            : enum page_references {
     975                 :            :         PAGEREF_RECLAIM,
     976                 :            :         PAGEREF_RECLAIM_CLEAN,
     977                 :            :         PAGEREF_KEEP,
     978                 :            :         PAGEREF_ACTIVATE,
     979                 :            : };
     980                 :            : 
     981                 :            : static enum page_references page_check_references(struct page *page,
     982                 :            :                                                   struct scan_control *sc)
     983                 :            : {
     984                 :            :         int referenced_ptes, referenced_page;
     985                 :            :         unsigned long vm_flags;
     986                 :            : 
     987                 :            :         referenced_ptes = page_referenced(page, 1, sc->target_mem_cgroup,
     988                 :            :                                           &vm_flags);
     989                 :            :         referenced_page = TestClearPageReferenced(page);
     990                 :            : 
     991                 :            :         /*
     992                 :            :          * Mlock lost the isolation race with us.  Let try_to_unmap()
     993                 :            :          * move the page to the unevictable list.
     994                 :            :          */
     995                 :            :         if (vm_flags & VM_LOCKED)
     996                 :            :                 return PAGEREF_RECLAIM;
     997                 :            : 
     998                 :            :         if (referenced_ptes) {
     999                 :            :                 if (PageSwapBacked(page))
    1000                 :            :                         return PAGEREF_ACTIVATE;
    1001                 :            :                 /*
    1002                 :            :                  * All mapped pages start out with page table
    1003                 :            :                  * references from the instantiating fault, so we need
    1004                 :            :                  * to look twice if a mapped file page is used more
    1005                 :            :                  * than once.
    1006                 :            :                  *
    1007                 :            :                  * Mark it and spare it for another trip around the
    1008                 :            :                  * inactive list.  Another page table reference will
    1009                 :            :                  * lead to its activation.
    1010                 :            :                  *
    1011                 :            :                  * Note: the mark is set for activated pages as well
    1012                 :            :                  * so that recently deactivated but used pages are
    1013                 :            :                  * quickly recovered.
    1014                 :            :                  */
    1015                 :            :                 SetPageReferenced(page);
    1016                 :            : 
    1017                 :            :                 if (referenced_page || referenced_ptes > 1)
    1018                 :            :                         return PAGEREF_ACTIVATE;
    1019                 :            : 
    1020                 :            :                 /*
    1021                 :            :                  * Activate file-backed executable pages after first usage.
    1022                 :            :                  */
    1023                 :            :                 if (vm_flags & VM_EXEC)
    1024                 :            :                         return PAGEREF_ACTIVATE;
    1025                 :            : 
    1026                 :            :                 return PAGEREF_KEEP;
    1027                 :            :         }
    1028                 :            : 
    1029                 :            :         /* Reclaim if clean, defer dirty pages to writeback */
    1030                 :            :         if (referenced_page && !PageSwapBacked(page))
    1031                 :            :                 return PAGEREF_RECLAIM_CLEAN;
    1032                 :            : 
    1033                 :            :         return PAGEREF_RECLAIM;
    1034                 :            : }
    1035                 :            : 
    1036                 :            : /* Check if a page is dirty or under writeback */
    1037                 :          0 : static void page_check_dirty_writeback(struct page *page,
    1038                 :            :                                        bool *dirty, bool *writeback)
    1039                 :            : {
    1040                 :          0 :         struct address_space *mapping;
    1041                 :            : 
    1042                 :            :         /*
    1043                 :            :          * Anonymous pages are not handled by flushers and must be written
    1044                 :            :          * from reclaim context. Do not stall reclaim based on them
    1045                 :            :          */
    1046   [ #  #  #  # ]:          0 :         if (!page_is_file_cache(page) ||
    1047         [ #  # ]:          0 :             (PageAnon(page) && !PageSwapBacked(page))) {
    1048                 :          0 :                 *dirty = false;
    1049                 :          0 :                 *writeback = false;
    1050                 :          0 :                 return;
    1051                 :            :         }
    1052                 :            : 
    1053                 :            :         /* By default assume that the page flags are accurate */
    1054         [ #  # ]:          0 :         *dirty = PageDirty(page);
    1055         [ #  # ]:          0 :         *writeback = PageWriteback(page);
    1056                 :            : 
    1057                 :            :         /* Verify dirty/writeback state if the filesystem supports it */
    1058         [ #  # ]:          0 :         if (!page_has_private(page))
    1059                 :            :                 return;
    1060                 :            : 
    1061                 :          0 :         mapping = page_mapping(page);
    1062   [ #  #  #  # ]:          0 :         if (mapping && mapping->a_ops->is_dirty_writeback)
    1063                 :          0 :                 mapping->a_ops->is_dirty_writeback(page, dirty, writeback);
    1064                 :            : }
    1065                 :            : 
    1066                 :            : /*
    1067                 :            :  * shrink_page_list() returns the number of reclaimed pages
    1068                 :            :  */
    1069                 :          0 : static unsigned long shrink_page_list(struct list_head *page_list,
    1070                 :            :                                       struct pglist_data *pgdat,
    1071                 :            :                                       struct scan_control *sc,
    1072                 :            :                                       enum ttu_flags ttu_flags,
    1073                 :            :                                       struct reclaim_stat *stat,
    1074                 :            :                                       bool ignore_references)
    1075                 :            : {
    1076                 :          0 :         LIST_HEAD(ret_pages);
    1077                 :          0 :         LIST_HEAD(free_pages);
    1078                 :          0 :         unsigned nr_reclaimed = 0;
    1079                 :          0 :         unsigned pgactivate = 0;
    1080                 :            : 
    1081                 :          0 :         memset(stat, 0, sizeof(*stat));
    1082                 :          0 :         cond_resched();
    1083                 :            : 
    1084         [ #  # ]:          0 :         while (!list_empty(page_list)) {
    1085                 :          0 :                 struct address_space *mapping;
    1086                 :          0 :                 struct page *page;
    1087                 :          0 :                 int may_enter_fs;
    1088                 :          0 :                 enum page_references references = PAGEREF_RECLAIM;
    1089                 :          0 :                 bool dirty, writeback;
    1090                 :          0 :                 unsigned int nr_pages;
    1091                 :            : 
    1092                 :          0 :                 cond_resched();
    1093                 :            : 
    1094                 :          0 :                 page = lru_to_page(page_list);
    1095         [ #  # ]:          0 :                 list_del(&page->lru);
    1096                 :            : 
    1097   [ #  #  #  # ]:          0 :                 if (!trylock_page(page))
    1098                 :          0 :                         goto keep;
    1099                 :            : 
    1100                 :          0 :                 VM_BUG_ON_PAGE(PageActive(page), page);
    1101                 :            : 
    1102                 :          0 :                 nr_pages = compound_nr(page);
    1103                 :            : 
    1104                 :            :                 /* Account the number of base pages even though THP */
    1105                 :          0 :                 sc->nr_scanned += nr_pages;
    1106                 :            : 
    1107         [ #  # ]:          0 :                 if (unlikely(!page_evictable(page)))
    1108                 :          0 :                         goto activate_locked;
    1109                 :            : 
    1110   [ #  #  #  # ]:          0 :                 if (!sc->may_unmap && page_mapped(page))
    1111                 :          0 :                         goto keep_locked;
    1112                 :            : 
    1113         [ #  # ]:          0 :                 may_enter_fs = (sc->gfp_mask & __GFP_FS) ||
    1114         [ #  # ]:          0 :                         (PageSwapCache(page) && (sc->gfp_mask & __GFP_IO));
    1115                 :            : 
    1116                 :            :                 /*
    1117                 :            :                  * The number of dirty pages determines if a node is marked
    1118                 :            :                  * reclaim_congested which affects wait_iff_congested. kswapd
    1119                 :            :                  * will stall and start writing pages if the tail of the LRU
    1120                 :            :                  * is all dirty unqueued pages.
    1121                 :            :                  */
    1122                 :          0 :                 page_check_dirty_writeback(page, &dirty, &writeback);
    1123   [ #  #  #  # ]:          0 :                 if (dirty || writeback)
    1124                 :          0 :                         stat->nr_dirty++;
    1125                 :            : 
    1126   [ #  #  #  # ]:          0 :                 if (dirty && !writeback)
    1127                 :          0 :                         stat->nr_unqueued_dirty++;
    1128                 :            : 
    1129                 :            :                 /*
    1130                 :            :                  * Treat this page as congested if the underlying BDI is or if
    1131                 :            :                  * pages are cycling through the LRU so quickly that the
    1132                 :            :                  * pages marked for immediate reclaim are making it to the
    1133                 :            :                  * end of the LRU a second time.
    1134                 :            :                  */
    1135                 :          0 :                 mapping = page_mapping(page);
    1136   [ #  #  #  #  :          0 :                 if (((dirty || writeback) && mapping &&
             #  #  #  # ]
    1137                 :          0 :                      inode_write_congested(mapping->host)) ||
    1138   [ #  #  #  # ]:          0 :                     (writeback && PageReclaim(page)))
    1139                 :          0 :                         stat->nr_congested++;
    1140                 :            : 
    1141                 :            :                 /*
    1142                 :            :                  * If a page at the tail of the LRU is under writeback, there
    1143                 :            :                  * are three cases to consider.
    1144                 :            :                  *
    1145                 :            :                  * 1) If reclaim is encountering an excessive number of pages
    1146                 :            :                  *    under writeback and this page is both under writeback and
    1147                 :            :                  *    PageReclaim then it indicates that pages are being queued
    1148                 :            :                  *    for IO but are being recycled through the LRU before the
    1149                 :            :                  *    IO can complete. Waiting on the page itself risks an
    1150                 :            :                  *    indefinite stall if it is impossible to writeback the
    1151                 :            :                  *    page due to IO error or disconnected storage so instead
    1152                 :            :                  *    note that the LRU is being scanned too quickly and the
    1153                 :            :                  *    caller can stall after page list has been processed.
    1154                 :            :                  *
    1155                 :            :                  * 2) Global or new memcg reclaim encounters a page that is
    1156                 :            :                  *    not marked for immediate reclaim, or the caller does not
    1157                 :            :                  *    have __GFP_FS (or __GFP_IO if it's simply going to swap,
    1158                 :            :                  *    not to fs). In this case mark the page for immediate
    1159                 :            :                  *    reclaim and continue scanning.
    1160                 :            :                  *
    1161                 :            :                  *    Require may_enter_fs because we would wait on fs, which
    1162                 :            :                  *    may not have submitted IO yet. And the loop driver might
    1163                 :            :                  *    enter reclaim, and deadlock if it waits on a page for
    1164                 :            :                  *    which it is needed to do the write (loop masks off
    1165                 :            :                  *    __GFP_IO|__GFP_FS for this reason); but more thought
    1166                 :            :                  *    would probably show more reasons.
    1167                 :            :                  *
    1168                 :            :                  * 3) Legacy memcg encounters a page that is already marked
    1169                 :            :                  *    PageReclaim. memcg does not have any dirty pages
    1170                 :            :                  *    throttling so we could easily OOM just because too many
    1171                 :            :                  *    pages are in writeback and there is nothing else to
    1172                 :            :                  *    reclaim. Wait for the writeback to complete.
    1173                 :            :                  *
    1174                 :            :                  * In cases 1) and 2) we activate the pages to get them out of
    1175                 :            :                  * the way while we continue scanning for clean pages on the
    1176                 :            :                  * inactive list and refilling from the active list. The
    1177                 :            :                  * observation here is that waiting for disk writes is more
    1178                 :            :                  * expensive than potentially causing reloads down the line.
    1179                 :            :                  * Since they're marked for immediate reclaim, they won't put
    1180                 :            :                  * memory pressure on the cache working set any longer than it
    1181                 :            :                  * takes to write them to disk.
    1182                 :            :                  */
    1183   [ #  #  #  # ]:          0 :                 if (PageWriteback(page)) {
    1184                 :            :                         /* Case 1 above */
    1185   [ #  #  #  # ]:          0 :                         if (current_is_kswapd() &&
    1186         [ #  # ]:          0 :                             PageReclaim(page) &&
    1187                 :          0 :                             test_bit(PGDAT_WRITEBACK, &pgdat->flags)) {
    1188                 :          0 :                                 stat->nr_immediate++;
    1189                 :          0 :                                 goto activate_locked;
    1190                 :            : 
    1191                 :            :                         /* Case 2 above */
    1192                 :          0 :                         } else if (writeback_throttling_sane(sc) ||
    1193                 :            :                             !PageReclaim(page) || !may_enter_fs) {
    1194                 :            :                                 /*
    1195                 :            :                                  * This is slightly racy - end_page_writeback()
    1196                 :            :                                  * might have just cleared PageReclaim, then
    1197                 :            :                                  * setting PageReclaim here end up interpreted
    1198                 :            :                                  * as PageReadahead - but that does not matter
    1199                 :            :                                  * enough to care.  What we do want is for this
    1200                 :            :                                  * page to have PageReclaim set next time memcg
    1201                 :            :                                  * reclaim reaches the tests above, so it will
    1202                 :            :                                  * then wait_on_page_writeback() to avoid OOM;
    1203                 :            :                                  * and it's also appropriate in global reclaim.
    1204                 :            :                                  */
    1205         [ #  # ]:          0 :                                 SetPageReclaim(page);
    1206                 :          0 :                                 stat->nr_writeback++;
    1207                 :          0 :                                 goto activate_locked;
    1208                 :            : 
    1209                 :            :                         /* Case 3 above */
    1210                 :            :                         } else {
    1211                 :            :                                 unlock_page(page);
    1212                 :            :                                 wait_on_page_writeback(page);
    1213                 :            :                                 /* then go back and try same page again */
    1214                 :            :                                 list_add_tail(&page->lru, page_list);
    1215                 :          0 :                                 continue;
    1216                 :            :                         }
    1217                 :            :                 }
    1218                 :            : 
    1219         [ #  # ]:          0 :                 if (!ignore_references)
    1220                 :          0 :                         references = page_check_references(page, sc);
    1221                 :            : 
    1222      [ #  #  # ]:          0 :                 switch (references) {
    1223                 :          0 :                 case PAGEREF_ACTIVATE:
    1224                 :          0 :                         goto activate_locked;
    1225                 :          0 :                 case PAGEREF_KEEP:
    1226                 :          0 :                         stat->nr_ref_keep += nr_pages;
    1227                 :          0 :                         goto keep_locked;
    1228                 :            :                 case PAGEREF_RECLAIM:
    1229                 :            :                 case PAGEREF_RECLAIM_CLEAN:
    1230                 :          0 :                         ; /* try to reclaim the page below */
    1231                 :            :                 }
    1232                 :            : 
    1233                 :            :                 /*
    1234                 :            :                  * Anonymous process memory has backing store?
    1235                 :            :                  * Try to allocate it some swap space here.
    1236                 :            :                  * Lazyfree page could be freed directly
    1237                 :            :                  */
    1238   [ #  #  #  #  :          0 :                 if (PageAnon(page) && PageSwapBacked(page)) {
                   #  # ]
    1239         [ #  # ]:          0 :                         if (!PageSwapCache(page)) {
    1240         [ #  # ]:          0 :                                 if (!(sc->gfp_mask & __GFP_IO))
    1241                 :          0 :                                         goto keep_locked;
    1242                 :          0 :                                 if (PageTransHuge(page)) {
    1243                 :            :                                         /* cannot split THP, skip it */
    1244                 :            :                                         if (!can_split_huge_page(page, NULL))
    1245                 :            :                                                 goto activate_locked;
    1246                 :            :                                         /*
    1247                 :            :                                          * Split pages without a PMD map right
    1248                 :            :                                          * away. Chances are some or all of the
    1249                 :            :                                          * tail pages can be freed without IO.
    1250                 :            :                                          */
    1251                 :            :                                         if (!compound_mapcount(page) &&
    1252                 :            :                                             split_huge_page_to_list(page,
    1253                 :            :                                                                     page_list))
    1254                 :            :                                                 goto activate_locked;
    1255                 :            :                                 }
    1256         [ #  # ]:          0 :                                 if (!add_to_swap(page)) {
    1257         [ #  # ]:          0 :                                         if (!PageTransHuge(page))
    1258                 :          0 :                                                 goto activate_locked_split;
    1259                 :            :                                         /* Fallback to swap normal pages */
    1260                 :            :                                         if (split_huge_page_to_list(page,
    1261                 :            :                                                                     page_list))
    1262                 :            :                                                 goto activate_locked;
    1263                 :            : #ifdef CONFIG_TRANSPARENT_HUGEPAGE
    1264                 :            :                                         count_vm_event(THP_SWPOUT_FALLBACK);
    1265                 :            : #endif
    1266                 :            :                                         if (!add_to_swap(page))
    1267                 :            :                                                 goto activate_locked_split;
    1268                 :            :                                 }
    1269                 :            : 
    1270                 :          0 :                                 may_enter_fs = 1;
    1271                 :            : 
    1272                 :            :                                 /* Adding to swap updated mapping */
    1273                 :          0 :                                 mapping = page_mapping(page);
    1274                 :            :                         }
    1275                 :            :                 } else if (unlikely(PageTransHuge(page))) {
    1276                 :            :                         /* Split file THP */
    1277                 :            :                         if (split_huge_page_to_list(page, page_list))
    1278                 :            :                                 goto keep_locked;
    1279                 :            :                 }
    1280                 :            : 
    1281                 :            :                 /*
    1282                 :            :                  * THP may get split above, need minus tail pages and update
    1283                 :            :                  * nr_pages to avoid accounting tail pages twice.
    1284                 :            :                  *
    1285                 :            :                  * The tail pages that are added into swap cache successfully
    1286                 :            :                  * reach here.
    1287                 :            :                  */
    1288         [ #  # ]:          0 :                 if ((nr_pages > 1) && !PageTransHuge(page)) {
    1289                 :          0 :                         sc->nr_scanned -= (nr_pages - 1);
    1290                 :          0 :                         nr_pages = 1;
    1291                 :            :                 }
    1292                 :            : 
    1293                 :            :                 /*
    1294                 :            :                  * The page is mapped into the page tables of one or more
    1295                 :            :                  * processes. Try to unmap it here.
    1296                 :            :                  */
    1297         [ #  # ]:          0 :                 if (page_mapped(page)) {
    1298                 :          0 :                         enum ttu_flags flags = ttu_flags | TTU_BATCH_FLUSH;
    1299                 :            : 
    1300                 :          0 :                         if (unlikely(PageTransHuge(page)))
    1301                 :            :                                 flags |= TTU_SPLIT_HUGE_PMD;
    1302         [ #  # ]:          0 :                         if (!try_to_unmap(page, flags)) {
    1303                 :          0 :                                 stat->nr_unmap_fail += nr_pages;
    1304                 :          0 :                                 goto activate_locked;
    1305                 :            :                         }
    1306                 :            :                 }
    1307                 :            : 
    1308   [ #  #  #  # ]:          0 :                 if (PageDirty(page)) {
    1309                 :            :                         /*
    1310                 :            :                          * Only kswapd can writeback filesystem pages
    1311                 :            :                          * to avoid risk of stack overflow. But avoid
    1312                 :            :                          * injecting inefficient single-page IO into
    1313                 :            :                          * flusher writeback as much as possible: only
    1314                 :            :                          * write pages when we've encountered many
    1315                 :            :                          * dirty pages, and when we've already scanned
    1316                 :            :                          * the rest of the LRU for clean pages and see
    1317                 :            :                          * the same dirty pages again (PageReclaim).
    1318                 :            :                          */
    1319   [ #  #  #  # ]:          0 :                         if (page_is_file_cache(page) &&
    1320   [ #  #  #  # ]:          0 :                             (!current_is_kswapd() || !PageReclaim(page) ||
    1321                 :          0 :                              !test_bit(PGDAT_DIRTY, &pgdat->flags))) {
    1322                 :            :                                 /*
    1323                 :            :                                  * Immediately reclaim when written back.
    1324                 :            :                                  * Similar in principal to deactivate_page()
    1325                 :            :                                  * except we already have the page isolated
    1326                 :            :                                  * and know it's dirty
    1327                 :            :                                  */
    1328                 :          0 :                                 inc_node_page_state(page, NR_VMSCAN_IMMEDIATE);
    1329         [ #  # ]:          0 :                                 SetPageReclaim(page);
    1330                 :            : 
    1331                 :          0 :                                 goto activate_locked;
    1332                 :            :                         }
    1333                 :            : 
    1334         [ #  # ]:          0 :                         if (references == PAGEREF_RECLAIM_CLEAN)
    1335                 :          0 :                                 goto keep_locked;
    1336         [ #  # ]:          0 :                         if (!may_enter_fs)
    1337                 :          0 :                                 goto keep_locked;
    1338         [ #  # ]:          0 :                         if (!sc->may_writepage)
    1339                 :          0 :                                 goto keep_locked;
    1340                 :            : 
    1341                 :            :                         /*
    1342                 :            :                          * Page is dirty. Flush the TLB if a writable entry
    1343                 :            :                          * potentially exists to avoid CPU writes after IO
    1344                 :            :                          * starts and then write it out here.
    1345                 :            :                          */
    1346                 :          0 :                         try_to_unmap_flush_dirty();
    1347   [ #  #  #  # ]:          0 :                         switch (pageout(page, mapping)) {
    1348                 :          0 :                         case PAGE_KEEP:
    1349                 :          0 :                                 goto keep_locked;
    1350                 :          0 :                         case PAGE_ACTIVATE:
    1351                 :          0 :                                 goto activate_locked;
    1352                 :            :                         case PAGE_SUCCESS:
    1353   [ #  #  #  # ]:          0 :                                 if (PageWriteback(page))
    1354                 :          0 :                                         goto keep;
    1355   [ #  #  #  # ]:          0 :                                 if (PageDirty(page))
    1356                 :          0 :                                         goto keep;
    1357                 :            : 
    1358                 :            :                                 /*
    1359                 :            :                                  * A synchronous write - probably a ramdisk.  Go
    1360                 :            :                                  * ahead and try to reclaim the page.
    1361                 :            :                                  */
    1362   [ #  #  #  # ]:          0 :                                 if (!trylock_page(page))
    1363                 :          0 :                                         goto keep;
    1364   [ #  #  #  #  :          0 :                                 if (PageDirty(page) || PageWriteback(page))
                   #  # ]
    1365                 :          0 :                                         goto keep_locked;
    1366                 :          0 :                                 mapping = page_mapping(page);
    1367                 :          0 :                         case PAGE_CLEAN:
    1368                 :          0 :                                 ; /* try to free the page below */
    1369                 :            :                         }
    1370                 :            :                 }
    1371                 :            : 
    1372                 :            :                 /*
    1373                 :            :                  * If the page has buffers, try to free the buffer mappings
    1374                 :            :                  * associated with this page. If we succeed we try to free
    1375                 :            :                  * the page as well.
    1376                 :            :                  *
    1377                 :            :                  * We do this even if the page is PageDirty().
    1378                 :            :                  * try_to_release_page() does not perform I/O, but it is
    1379                 :            :                  * possible for a page to have PageDirty set, but it is actually
    1380                 :            :                  * clean (all its buffers are clean).  This happens if the
    1381                 :            :                  * buffers were written out directly, with submit_bh(). ext3
    1382                 :            :                  * will do this, as well as the blockdev mapping.
    1383                 :            :                  * try_to_release_page() will discover that cleanness and will
    1384                 :            :                  * drop the buffers and mark the page clean - it can be freed.
    1385                 :            :                  *
    1386                 :            :                  * Rarely, pages can have buffers and no ->mapping.  These are
    1387                 :            :                  * the pages which were not successfully invalidated in
    1388                 :            :                  * truncate_complete_page().  We try to drop those buffers here
    1389                 :            :                  * and if that worked, and the page is no longer mapped into
    1390                 :            :                  * process address space (page_count == 1) it can be freed.
    1391                 :            :                  * Otherwise, leave the page on the LRU so it is swappable.
    1392                 :            :                  */
    1393         [ #  # ]:          0 :                 if (page_has_private(page)) {
    1394         [ #  # ]:          0 :                         if (!try_to_release_page(page, sc->gfp_mask))
    1395                 :          0 :                                 goto activate_locked;
    1396   [ #  #  #  # ]:          0 :                         if (!mapping && page_count(page) == 1) {
    1397                 :          0 :                                 unlock_page(page);
    1398         [ #  # ]:          0 :                                 if (put_page_testzero(page))
    1399                 :          0 :                                         goto free_it;
    1400                 :            :                                 else {
    1401                 :            :                                         /*
    1402                 :            :                                          * rare race with speculative reference.
    1403                 :            :                                          * the speculative reference will free
    1404                 :            :                                          * this page shortly, so we may
    1405                 :            :                                          * increment nr_reclaimed here (and
    1406                 :            :                                          * leave it off the LRU).
    1407                 :            :                                          */
    1408                 :          0 :                                         nr_reclaimed++;
    1409                 :          0 :                                         continue;
    1410                 :            :                                 }
    1411                 :            :                         }
    1412                 :            :                 }
    1413                 :            : 
    1414   [ #  #  #  #  :          0 :                 if (PageAnon(page) && !PageSwapBacked(page)) {
                   #  # ]
    1415                 :            :                         /* follow __remove_mapping for reference */
    1416         [ #  # ]:          0 :                         if (!page_ref_freeze(page, 1))
    1417                 :          0 :                                 goto keep_locked;
    1418   [ #  #  #  # ]:          0 :                         if (PageDirty(page)) {
    1419                 :          0 :                                 page_ref_unfreeze(page, 1);
    1420                 :          0 :                                 goto keep_locked;
    1421                 :            :                         }
    1422                 :            : 
    1423                 :          0 :                         count_vm_event(PGLAZYFREED);
    1424                 :            :                         count_memcg_page_event(page, PGLAZYFREED);
    1425   [ #  #  #  # ]:          0 :                 } else if (!mapping || !__remove_mapping(mapping, page, true,
    1426                 :            :                                                          sc->target_mem_cgroup))
    1427                 :          0 :                         goto keep_locked;
    1428                 :            : 
    1429                 :          0 :                 unlock_page(page);
    1430                 :          0 : free_it:
    1431                 :            :                 /*
    1432                 :            :                  * THP may get swapped out in a whole, need account
    1433                 :            :                  * all base pages.
    1434                 :            :                  */
    1435                 :          0 :                 nr_reclaimed += nr_pages;
    1436                 :            : 
    1437                 :            :                 /*
    1438                 :            :                  * Is there need to periodically free_page_list? It would
    1439                 :            :                  * appear not as the counts should be low
    1440                 :            :                  */
    1441                 :          0 :                 if (unlikely(PageTransHuge(page)))
    1442                 :            :                         (*get_compound_page_dtor(page))(page);
    1443                 :            :                 else
    1444                 :          0 :                         list_add(&page->lru, &free_pages);
    1445                 :          0 :                 continue;
    1446                 :            : 
    1447                 :            : activate_locked_split:
    1448                 :            :                 /*
    1449                 :            :                  * The tail pages that are failed to add into swap cache
    1450                 :            :                  * reach here.  Fixup nr_scanned and nr_pages.
    1451                 :            :                  */
    1452         [ #  # ]:          0 :                 if (nr_pages > 1) {
    1453                 :          0 :                         sc->nr_scanned -= (nr_pages - 1);
    1454                 :          0 :                         nr_pages = 1;
    1455                 :            :                 }
    1456                 :          0 : activate_locked:
    1457                 :            :                 /* Not a candidate for swapping, so reclaim swap space. */
    1458   [ #  #  #  #  :          0 :                 if (PageSwapCache(page) && (mem_cgroup_swap_full(page) ||
                   #  # ]
    1459                 :            :                                                 PageMlocked(page)))
    1460                 :          0 :                         try_to_free_swap(page);
    1461                 :          0 :                 VM_BUG_ON_PAGE(PageActive(page), page);
    1462   [ #  #  #  # ]:          0 :                 if (!PageMlocked(page)) {
    1463                 :          0 :                         int type = page_is_file_cache(page);
    1464         [ #  # ]:          0 :                         SetPageActive(page);
    1465                 :          0 :                         stat->nr_activate[type] += nr_pages;
    1466                 :          0 :                         count_memcg_page_event(page, PGACTIVATE);
    1467                 :            :                 }
    1468                 :          0 : keep_locked:
    1469                 :          0 :                 unlock_page(page);
    1470                 :          0 : keep:
    1471                 :          0 :                 list_add(&page->lru, &ret_pages);
    1472                 :          0 :                 VM_BUG_ON_PAGE(PageLRU(page) || PageUnevictable(page), page);
    1473                 :            :         }
    1474                 :            : 
    1475                 :          0 :         pgactivate = stat->nr_activate[0] + stat->nr_activate[1];
    1476                 :            : 
    1477                 :          0 :         mem_cgroup_uncharge_list(&free_pages);
    1478                 :          0 :         try_to_unmap_flush();
    1479                 :          0 :         free_unref_page_list(&free_pages);
    1480                 :            : 
    1481         [ #  # ]:          0 :         list_splice(&ret_pages, page_list);
    1482         [ #  # ]:          0 :         count_vm_events(PGACTIVATE, pgactivate);
    1483                 :            : 
    1484                 :          0 :         return nr_reclaimed;
    1485                 :            : }
    1486                 :            : 
    1487                 :          0 : unsigned long reclaim_clean_pages_from_list(struct zone *zone,
    1488                 :            :                                             struct list_head *page_list)
    1489                 :            : {
    1490                 :          0 :         struct scan_control sc = {
    1491                 :            :                 .gfp_mask = GFP_KERNEL,
    1492                 :            :                 .priority = DEF_PRIORITY,
    1493                 :            :                 .may_unmap = 1,
    1494                 :            :         };
    1495                 :          0 :         struct reclaim_stat dummy_stat;
    1496                 :          0 :         unsigned long ret;
    1497                 :          0 :         struct page *page, *next;
    1498                 :          0 :         LIST_HEAD(clean_pages);
    1499                 :            : 
    1500         [ #  # ]:          0 :         list_for_each_entry_safe(page, next, page_list, lru) {
    1501   [ #  #  #  #  :          0 :                 if (page_is_file_cache(page) && !PageDirty(page) &&
                   #  # ]
    1502         [ #  # ]:          0 :                     !__PageMovable(page) && !PageUnevictable(page)) {
    1503         [ #  # ]:          0 :                         ClearPageActive(page);
    1504                 :          0 :                         list_move(&page->lru, &clean_pages);
    1505                 :            :                 }
    1506                 :            :         }
    1507                 :            : 
    1508                 :          0 :         ret = shrink_page_list(&clean_pages, zone->zone_pgdat, &sc,
    1509                 :            :                         TTU_IGNORE_ACCESS, &dummy_stat, true);
    1510         [ #  # ]:          0 :         list_splice(&clean_pages, page_list);
    1511                 :          0 :         mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE, -ret);
    1512                 :          0 :         return ret;
    1513                 :            : }
    1514                 :            : 
    1515                 :            : /*
    1516                 :            :  * Attempt to remove the specified page from its LRU.  Only take this page
    1517                 :            :  * if it is of the appropriate PageActive status.  Pages which are being
    1518                 :            :  * freed elsewhere are also ignored.
    1519                 :            :  *
    1520                 :            :  * page:        page to consider
    1521                 :            :  * mode:        one of the LRU isolation modes defined above
    1522                 :            :  *
    1523                 :            :  * returns 0 on success, -ve errno on failure.
    1524                 :            :  */
    1525                 :          0 : int __isolate_lru_page(struct page *page, isolate_mode_t mode)
    1526                 :            : {
    1527                 :          0 :         int ret = -EINVAL;
    1528                 :            : 
    1529                 :            :         /* Only take pages on the LRU. */
    1530   [ #  #  #  # ]:          0 :         if (!PageLRU(page))
    1531                 :            :                 return ret;
    1532                 :            : 
    1533                 :            :         /* Compaction should not handle unevictable pages but CMA can do so */
    1534   [ #  #  #  #  :          0 :         if (PageUnevictable(page) && !(mode & ISOLATE_UNEVICTABLE))
                   #  # ]
    1535                 :            :                 return ret;
    1536                 :            : 
    1537                 :          0 :         ret = -EBUSY;
    1538                 :            : 
    1539                 :            :         /*
    1540                 :            :          * To minimise LRU disruption, the caller can indicate that it only
    1541                 :            :          * wants to isolate pages it will be able to operate on without
    1542                 :            :          * blocking - clean pages for the most part.
    1543                 :            :          *
    1544                 :            :          * ISOLATE_ASYNC_MIGRATE is used to indicate that it only wants to pages
    1545                 :            :          * that it is possible to migrate without blocking
    1546                 :            :          */
    1547         [ #  # ]:          0 :         if (mode & ISOLATE_ASYNC_MIGRATE) {
    1548                 :            :                 /* All the caller can do on PageWriteback is block */
    1549   [ #  #  #  # ]:          0 :                 if (PageWriteback(page))
    1550                 :            :                         return ret;
    1551                 :            : 
    1552   [ #  #  #  # ]:          0 :                 if (PageDirty(page)) {
    1553                 :          0 :                         struct address_space *mapping;
    1554                 :          0 :                         bool migrate_dirty;
    1555                 :            : 
    1556                 :            :                         /*
    1557                 :            :                          * Only pages without mappings or that have a
    1558                 :            :                          * ->migratepage callback are possible to migrate
    1559                 :            :                          * without blocking. However, we can be racing with
    1560                 :            :                          * truncation so it's necessary to lock the page
    1561                 :            :                          * to stabilise the mapping as truncation holds
    1562                 :            :                          * the page lock until after the page is removed
    1563                 :            :                          * from the page cache.
    1564                 :            :                          */
    1565   [ #  #  #  # ]:          0 :                         if (!trylock_page(page))
    1566                 :            :                                 return ret;
    1567                 :            : 
    1568                 :          0 :                         mapping = page_mapping(page);
    1569   [ #  #  #  # ]:          0 :                         migrate_dirty = !mapping || mapping->a_ops->migratepage;
    1570                 :          0 :                         unlock_page(page);
    1571         [ #  # ]:          0 :                         if (!migrate_dirty)
    1572                 :            :                                 return ret;
    1573                 :            :                 }
    1574                 :            :         }
    1575                 :            : 
    1576   [ #  #  #  # ]:          0 :         if ((mode & ISOLATE_UNMAPPED) && page_mapped(page))
    1577                 :            :                 return ret;
    1578                 :            : 
    1579         [ #  # ]:          0 :         if (likely(get_page_unless_zero(page))) {
    1580                 :            :                 /*
    1581                 :            :                  * Be careful not to clear PageLRU until after we're
    1582                 :            :                  * sure the page is not being freed elsewhere -- the
    1583                 :            :                  * page release code relies on it.
    1584                 :            :                  */
    1585         [ #  # ]:          0 :                 ClearPageLRU(page);
    1586                 :          0 :                 ret = 0;
    1587                 :            :         }
    1588                 :            : 
    1589                 :            :         return ret;
    1590                 :            : }
    1591                 :            : 
    1592                 :            : 
    1593                 :            : /*
    1594                 :            :  * Update LRU sizes after isolating pages. The LRU size updates must
    1595                 :            :  * be complete before mem_cgroup_update_lru_size due to a santity check.
    1596                 :            :  */
    1597                 :          0 : static __always_inline void update_lru_sizes(struct lruvec *lruvec,
    1598                 :            :                         enum lru_list lru, unsigned long *nr_zone_taken)
    1599                 :            : {
    1600                 :          0 :         int zid;
    1601                 :            : 
    1602         [ #  # ]:          0 :         for (zid = 0; zid < MAX_NR_ZONES; zid++) {
    1603         [ #  # ]:          0 :                 if (!nr_zone_taken[zid])
    1604                 :          0 :                         continue;
    1605                 :            : 
    1606                 :          0 :                 __update_lru_size(lruvec, lru, zid, -nr_zone_taken[zid]);
    1607                 :            : #ifdef CONFIG_MEMCG
    1608                 :            :                 mem_cgroup_update_lru_size(lruvec, lru, zid, -nr_zone_taken[zid]);
    1609                 :            : #endif
    1610                 :            :         }
    1611                 :            : 
    1612                 :            : }
    1613                 :            : 
    1614                 :            : /**
    1615                 :            :  * pgdat->lru_lock is heavily contended.  Some of the functions that
    1616                 :            :  * shrink the lists perform better by taking out a batch of pages
    1617                 :            :  * and working on them outside the LRU lock.
    1618                 :            :  *
    1619                 :            :  * For pagecache intensive workloads, this function is the hottest
    1620                 :            :  * spot in the kernel (apart from copy_*_user functions).
    1621                 :            :  *
    1622                 :            :  * Appropriate locks must be held before calling this function.
    1623                 :            :  *
    1624                 :            :  * @nr_to_scan: The number of eligible pages to look through on the list.
    1625                 :            :  * @lruvec:     The LRU vector to pull pages from.
    1626                 :            :  * @dst:        The temp list to put pages on to.
    1627                 :            :  * @nr_scanned: The number of pages that were scanned.
    1628                 :            :  * @sc:         The scan_control struct for this reclaim session
    1629                 :            :  * @mode:       One of the LRU isolation modes
    1630                 :            :  * @lru:        LRU list id for isolating
    1631                 :            :  *
    1632                 :            :  * returns how many pages were moved onto *@dst.
    1633                 :            :  */
    1634                 :          0 : static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
    1635                 :            :                 struct lruvec *lruvec, struct list_head *dst,
    1636                 :            :                 unsigned long *nr_scanned, struct scan_control *sc,
    1637                 :            :                 enum lru_list lru)
    1638                 :            : {
    1639                 :          0 :         struct list_head *src = &lruvec->lists[lru];
    1640                 :          0 :         unsigned long nr_taken = 0;
    1641                 :          0 :         unsigned long nr_zone_taken[MAX_NR_ZONES] = { 0 };
    1642                 :          0 :         unsigned long nr_skipped[MAX_NR_ZONES] = { 0, };
    1643                 :          0 :         unsigned long skipped = 0;
    1644                 :          0 :         unsigned long scan, total_scan, nr_pages;
    1645                 :          0 :         LIST_HEAD(pages_skipped);
    1646         [ #  # ]:          0 :         isolate_mode_t mode = (sc->may_unmap ? 0 : ISOLATE_UNMAPPED);
    1647                 :            : 
    1648                 :          0 :         total_scan = 0;
    1649                 :          0 :         scan = 0;
    1650         [ #  # ]:          0 :         while (scan < nr_to_scan && !list_empty(src)) {
    1651                 :          0 :                 struct page *page;
    1652                 :            : 
    1653                 :          0 :                 page = lru_to_page(src);
    1654         [ #  # ]:          0 :                 prefetchw_prev_lru_page(page, src, flags);
    1655                 :            : 
    1656                 :          0 :                 VM_BUG_ON_PAGE(!PageLRU(page), page);
    1657                 :            : 
    1658                 :          0 :                 nr_pages = compound_nr(page);
    1659                 :          0 :                 total_scan += nr_pages;
    1660                 :            : 
    1661         [ #  # ]:          0 :                 if (page_zonenum(page) > sc->reclaim_idx) {
    1662                 :          0 :                         list_move(&page->lru, &pages_skipped);
    1663                 :          0 :                         nr_skipped[page_zonenum(page)] += nr_pages;
    1664                 :          0 :                         continue;
    1665                 :            :                 }
    1666                 :            : 
    1667                 :            :                 /*
    1668                 :            :                  * Do not count skipped pages because that makes the function
    1669                 :            :                  * return with no isolated pages if the LRU mostly contains
    1670                 :            :                  * ineligible pages.  This causes the VM to not reclaim any
    1671                 :            :                  * pages, triggering a premature OOM.
    1672                 :            :                  *
    1673                 :            :                  * Account all tail pages of THP.  This would not cause
    1674                 :            :                  * premature OOM since __isolate_lru_page() returns -EBUSY
    1675                 :            :                  * only when the page is being freed somewhere else.
    1676                 :            :                  */
    1677                 :          0 :                 scan += nr_pages;
    1678      [ #  #  # ]:          0 :                 switch (__isolate_lru_page(page, mode)) {
    1679                 :          0 :                 case 0:
    1680                 :          0 :                         nr_taken += nr_pages;
    1681                 :          0 :                         nr_zone_taken[page_zonenum(page)] += nr_pages;
    1682                 :          0 :                         list_move(&page->lru, dst);
    1683                 :            :                         break;
    1684                 :            : 
    1685                 :          0 :                 case -EBUSY:
    1686                 :            :                         /* else it is being freed elsewhere */
    1687                 :          0 :                         list_move(&page->lru, src);
    1688                 :          0 :                         continue;
    1689                 :            : 
    1690                 :          0 :                 default:
    1691         [ #  # ]:          0 :                         BUG();
    1692                 :            :                 }
    1693                 :            :         }
    1694                 :            : 
    1695                 :            :         /*
    1696                 :            :          * Splice any skipped pages to the start of the LRU list. Note that
    1697                 :            :          * this disrupts the LRU order when reclaiming for lower zones but
    1698                 :            :          * we cannot splice to the tail. If we did then the SWAP_CLUSTER_MAX
    1699                 :            :          * scanning would soon rescan the same pages to skip and put the
    1700                 :            :          * system at risk of premature OOM.
    1701                 :            :          */
    1702         [ #  # ]:          0 :         if (!list_empty(&pages_skipped)) {
    1703                 :          0 :                 int zid;
    1704                 :            : 
    1705         [ #  # ]:          0 :                 list_splice(&pages_skipped, src);
    1706         [ #  # ]:          0 :                 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
    1707         [ #  # ]:          0 :                         if (!nr_skipped[zid])
    1708                 :          0 :                                 continue;
    1709                 :            : 
    1710         [ #  # ]:          0 :                         __count_zid_vm_events(PGSCAN_SKIP, zid, nr_skipped[zid]);
    1711                 :          0 :                         skipped += nr_skipped[zid];
    1712                 :            :                 }
    1713                 :            :         }
    1714                 :          0 :         *nr_scanned = total_scan;
    1715                 :          0 :         trace_mm_vmscan_lru_isolate(sc->reclaim_idx, sc->order, nr_to_scan,
    1716                 :            :                                     total_scan, skipped, nr_taken, mode, lru);
    1717                 :          0 :         update_lru_sizes(lruvec, lru, nr_zone_taken);
    1718                 :          0 :         return nr_taken;
    1719                 :            : }
    1720                 :            : 
    1721                 :            : /**
    1722                 :            :  * isolate_lru_page - tries to isolate a page from its LRU list
    1723                 :            :  * @page: page to isolate from its LRU list
    1724                 :            :  *
    1725                 :            :  * Isolates a @page from an LRU list, clears PageLRU and adjusts the
    1726                 :            :  * vmstat statistic corresponding to whatever LRU list the page was on.
    1727                 :            :  *
    1728                 :            :  * Returns 0 if the page was removed from an LRU list.
    1729                 :            :  * Returns -EBUSY if the page was not on an LRU list.
    1730                 :            :  *
    1731                 :            :  * The returned page will have PageLRU() cleared.  If it was found on
    1732                 :            :  * the active list, it will have PageActive set.  If it was found on
    1733                 :            :  * the unevictable list, it will have the PageUnevictable bit set. That flag
    1734                 :            :  * may need to be cleared by the caller before letting the page go.
    1735                 :            :  *
    1736                 :            :  * The vmstat statistic corresponding to the list on which the page was
    1737                 :            :  * found will be decremented.
    1738                 :            :  *
    1739                 :            :  * Restrictions:
    1740                 :            :  *
    1741                 :            :  * (1) Must be called with an elevated refcount on the page. This is a
    1742                 :            :  *     fundamentnal difference from isolate_lru_pages (which is called
    1743                 :            :  *     without a stable reference).
    1744                 :            :  * (2) the lru_lock must not be held.
    1745                 :            :  * (3) interrupts must be enabled.
    1746                 :            :  */
    1747                 :          0 : int isolate_lru_page(struct page *page)
    1748                 :            : {
    1749                 :          0 :         int ret = -EBUSY;
    1750                 :            : 
    1751                 :          0 :         VM_BUG_ON_PAGE(!page_count(page), page);
    1752   [ #  #  #  # ]:          0 :         WARN_RATELIMIT(PageTail(page), "trying to isolate tail page");
    1753                 :            : 
    1754   [ #  #  #  # ]:          0 :         if (PageLRU(page)) {
    1755                 :          0 :                 pg_data_t *pgdat = page_pgdat(page);
    1756                 :          0 :                 struct lruvec *lruvec;
    1757                 :            : 
    1758                 :          0 :                 spin_lock_irq(&pgdat->lru_lock);
    1759         [ #  # ]:          0 :                 lruvec = mem_cgroup_page_lruvec(page, pgdat);
    1760   [ #  #  #  # ]:          0 :                 if (PageLRU(page)) {
    1761         [ #  # ]:          0 :                         int lru = page_lru(page);
    1762         [ #  # ]:          0 :                         get_page(page);
    1763         [ #  # ]:          0 :                         ClearPageLRU(page);
    1764                 :          0 :                         del_page_from_lru_list(page, lruvec, lru);
    1765                 :          0 :                         ret = 0;
    1766                 :            :                 }
    1767                 :          0 :                 spin_unlock_irq(&pgdat->lru_lock);
    1768                 :            :         }
    1769                 :          0 :         return ret;
    1770                 :            : }
    1771                 :            : 
    1772                 :            : /*
    1773                 :            :  * A direct reclaimer may isolate SWAP_CLUSTER_MAX pages from the LRU list and
    1774                 :            :  * then get rescheduled. When there are massive number of tasks doing page
    1775                 :            :  * allocation, such sleeping direct reclaimers may keep piling up on each CPU,
    1776                 :            :  * the LRU list will go small and be scanned faster than necessary, leading to
    1777                 :            :  * unnecessary swapping, thrashing and OOM.
    1778                 :            :  */
    1779                 :            : static int too_many_isolated(struct pglist_data *pgdat, int file,
    1780                 :            :                 struct scan_control *sc)
    1781                 :            : {
    1782                 :            :         unsigned long inactive, isolated;
    1783                 :            : 
    1784                 :            :         if (current_is_kswapd())
    1785                 :            :                 return 0;
    1786                 :            : 
    1787                 :            :         if (!writeback_throttling_sane(sc))
    1788                 :            :                 return 0;
    1789                 :            : 
    1790                 :            :         if (file) {
    1791                 :            :                 inactive = node_page_state(pgdat, NR_INACTIVE_FILE);
    1792                 :            :                 isolated = node_page_state(pgdat, NR_ISOLATED_FILE);
    1793                 :            :         } else {
    1794                 :            :                 inactive = node_page_state(pgdat, NR_INACTIVE_ANON);
    1795                 :            :                 isolated = node_page_state(pgdat, NR_ISOLATED_ANON);
    1796                 :            :         }
    1797                 :            : 
    1798                 :            :         /*
    1799                 :            :          * GFP_NOIO/GFP_NOFS callers are allowed to isolate more pages, so they
    1800                 :            :          * won't get blocked by normal direct-reclaimers, forming a circular
    1801                 :            :          * deadlock.
    1802                 :            :          */
    1803                 :            :         if ((sc->gfp_mask & (__GFP_IO | __GFP_FS)) == (__GFP_IO | __GFP_FS))
    1804                 :            :                 inactive >>= 3;
    1805                 :            : 
    1806                 :            :         return isolated > inactive;
    1807                 :            : }
    1808                 :            : 
    1809                 :            : /*
    1810                 :            :  * This moves pages from @list to corresponding LRU list.
    1811                 :            :  *
    1812                 :            :  * We move them the other way if the page is referenced by one or more
    1813                 :            :  * processes, from rmap.
    1814                 :            :  *
    1815                 :            :  * If the pages are mostly unmapped, the processing is fast and it is
    1816                 :            :  * appropriate to hold zone_lru_lock across the whole operation.  But if
    1817                 :            :  * the pages are mapped, the processing is slow (page_referenced()) so we
    1818                 :            :  * should drop zone_lru_lock around each page.  It's impossible to balance
    1819                 :            :  * this, so instead we remove the pages from the LRU while processing them.
    1820                 :            :  * It is safe to rely on PG_active against the non-LRU pages in here because
    1821                 :            :  * nobody will play with that bit on a non-LRU page.
    1822                 :            :  *
    1823                 :            :  * The downside is that we have to touch page->_refcount against each page.
    1824                 :            :  * But we had to alter page->flags anyway.
    1825                 :            :  *
    1826                 :            :  * Returns the number of pages moved to the given lruvec.
    1827                 :            :  */
    1828                 :            : 
    1829                 :          0 : static unsigned noinline_for_stack move_pages_to_lru(struct lruvec *lruvec,
    1830                 :            :                                                      struct list_head *list)
    1831                 :            : {
    1832                 :          0 :         struct pglist_data *pgdat = lruvec_pgdat(lruvec);
    1833                 :          0 :         int nr_pages, nr_moved = 0;
    1834                 :          0 :         LIST_HEAD(pages_to_free);
    1835                 :          0 :         struct page *page;
    1836                 :          0 :         enum lru_list lru;
    1837                 :            : 
    1838         [ #  # ]:          0 :         while (!list_empty(list)) {
    1839                 :          0 :                 page = lru_to_page(list);
    1840                 :          0 :                 VM_BUG_ON_PAGE(PageLRU(page), page);
    1841         [ #  # ]:          0 :                 if (unlikely(!page_evictable(page))) {
    1842                 :          0 :                         list_del(&page->lru);
    1843                 :          0 :                         spin_unlock_irq(&pgdat->lru_lock);
    1844                 :          0 :                         putback_lru_page(page);
    1845                 :          0 :                         spin_lock_irq(&pgdat->lru_lock);
    1846                 :          0 :                         continue;
    1847                 :            :                 }
    1848         [ #  # ]:          0 :                 lruvec = mem_cgroup_page_lruvec(page, pgdat);
    1849                 :            : 
    1850         [ #  # ]:          0 :                 SetPageLRU(page);
    1851         [ #  # ]:          0 :                 lru = page_lru(page);
    1852                 :            : 
    1853                 :          0 :                 nr_pages = hpage_nr_pages(page);
    1854                 :          0 :                 update_lru_size(lruvec, lru, page_zonenum(page), nr_pages);
    1855                 :          0 :                 list_move(&page->lru, &lruvec->lists[lru]);
    1856                 :            : 
    1857         [ #  # ]:          0 :                 if (put_page_testzero(page)) {
    1858         [ #  # ]:          0 :                         __ClearPageLRU(page);
    1859         [ #  # ]:          0 :                         __ClearPageActive(page);
    1860                 :          0 :                         del_page_from_lru_list(page, lruvec, lru);
    1861                 :            : 
    1862         [ #  # ]:          0 :                         if (unlikely(PageCompound(page))) {
    1863                 :          0 :                                 spin_unlock_irq(&pgdat->lru_lock);
    1864                 :          0 :                                 (*get_compound_page_dtor(page))(page);
    1865                 :          0 :                                 spin_lock_irq(&pgdat->lru_lock);
    1866                 :            :                         } else
    1867                 :          0 :                                 list_add(&page->lru, &pages_to_free);
    1868                 :            :                 } else {
    1869                 :          0 :                         nr_moved += nr_pages;
    1870                 :            :                 }
    1871                 :            :         }
    1872                 :            : 
    1873                 :            :         /*
    1874                 :            :          * To save our caller's stack, now use input list for pages to free.
    1875                 :            :          */
    1876         [ #  # ]:          0 :         list_splice(&pages_to_free, list);
    1877                 :            : 
    1878                 :          0 :         return nr_moved;
    1879                 :            : }
    1880                 :            : 
    1881                 :            : /*
    1882                 :            :  * If a kernel thread (such as nfsd for loop-back mounts) services
    1883                 :            :  * a backing device by writing to the page cache it sets PF_LESS_THROTTLE.
    1884                 :            :  * In that case we should only throttle if the backing device it is
    1885                 :            :  * writing to is congested.  In other cases it is safe to throttle.
    1886                 :            :  */
    1887                 :          0 : static int current_may_throttle(void)
    1888                 :            : {
    1889         [ #  # ]:          0 :         return !(current->flags & PF_LESS_THROTTLE) ||
    1890   [ #  #  #  #  :          0 :                 current->backing_dev_info == NULL ||
                   #  # ]
    1891         [ #  # ]:          0 :                 bdi_write_congested(current->backing_dev_info);
    1892                 :            : }
    1893                 :            : 
    1894                 :            : /*
    1895                 :            :  * shrink_inactive_list() is a helper for shrink_node().  It returns the number
    1896                 :            :  * of reclaimed pages
    1897                 :            :  */
    1898                 :            : static noinline_for_stack unsigned long
    1899                 :          0 : shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
    1900                 :            :                      struct scan_control *sc, enum lru_list lru)
    1901                 :            : {
    1902                 :          0 :         LIST_HEAD(page_list);
    1903                 :          0 :         unsigned long nr_scanned;
    1904                 :          0 :         unsigned long nr_reclaimed = 0;
    1905                 :          0 :         unsigned long nr_taken;
    1906                 :          0 :         struct reclaim_stat stat;
    1907                 :          0 :         int file = is_file_lru(lru);
    1908                 :          0 :         enum vm_event_item item;
    1909                 :          0 :         struct pglist_data *pgdat = lruvec_pgdat(lruvec);
    1910                 :          0 :         struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
    1911                 :          0 :         bool stalled = false;
    1912                 :            : 
    1913         [ #  # ]:          0 :         while (unlikely(too_many_isolated(pgdat, file, sc))) {
    1914         [ #  # ]:          0 :                 if (stalled)
    1915                 :            :                         return 0;
    1916                 :            : 
    1917                 :            :                 /* wait a bit for the reclaimer. */
    1918                 :          0 :                 msleep(100);
    1919                 :          0 :                 stalled = true;
    1920                 :            : 
    1921                 :            :                 /* We are about to die and free our memory. Return now. */
    1922         [ #  # ]:          0 :                 if (fatal_signal_pending(current))
    1923                 :            :                         return SWAP_CLUSTER_MAX;
    1924                 :            :         }
    1925                 :            : 
    1926                 :          0 :         lru_add_drain();
    1927                 :            : 
    1928                 :          0 :         spin_lock_irq(&pgdat->lru_lock);
    1929                 :            : 
    1930                 :          0 :         nr_taken = isolate_lru_pages(nr_to_scan, lruvec, &page_list,
    1931                 :            :                                      &nr_scanned, sc, lru);
    1932                 :            : 
    1933                 :          0 :         __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, nr_taken);
    1934                 :          0 :         reclaim_stat->recent_scanned[file] += nr_taken;
    1935                 :            : 
    1936         [ #  # ]:          0 :         item = current_is_kswapd() ? PGSCAN_KSWAPD : PGSCAN_DIRECT;
    1937                 :          0 :         if (!cgroup_reclaim(sc))
    1938         [ #  # ]:          0 :                 __count_vm_events(item, nr_scanned);
    1939                 :          0 :         __count_memcg_events(lruvec_memcg(lruvec), item, nr_scanned);
    1940                 :          0 :         spin_unlock_irq(&pgdat->lru_lock);
    1941                 :            : 
    1942         [ #  # ]:          0 :         if (nr_taken == 0)
    1943                 :            :                 return 0;
    1944                 :            : 
    1945                 :          0 :         nr_reclaimed = shrink_page_list(&page_list, pgdat, sc, 0,
    1946                 :            :                                 &stat, false);
    1947                 :            : 
    1948                 :          0 :         spin_lock_irq(&pgdat->lru_lock);
    1949                 :            : 
    1950         [ #  # ]:          0 :         item = current_is_kswapd() ? PGSTEAL_KSWAPD : PGSTEAL_DIRECT;
    1951                 :          0 :         if (!cgroup_reclaim(sc))
    1952         [ #  # ]:          0 :                 __count_vm_events(item, nr_reclaimed);
    1953                 :          0 :         __count_memcg_events(lruvec_memcg(lruvec), item, nr_reclaimed);
    1954                 :          0 :         reclaim_stat->recent_rotated[0] += stat.nr_activate[0];
    1955                 :          0 :         reclaim_stat->recent_rotated[1] += stat.nr_activate[1];
    1956                 :            : 
    1957                 :          0 :         move_pages_to_lru(lruvec, &page_list);
    1958                 :            : 
    1959                 :          0 :         __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -nr_taken);
    1960                 :            : 
    1961                 :          0 :         spin_unlock_irq(&pgdat->lru_lock);
    1962                 :            : 
    1963                 :          0 :         mem_cgroup_uncharge_list(&page_list);
    1964                 :          0 :         free_unref_page_list(&page_list);
    1965                 :            : 
    1966                 :            :         /*
    1967                 :            :          * If dirty pages are scanned that are not queued for IO, it
    1968                 :            :          * implies that flushers are not doing their job. This can
    1969                 :            :          * happen when memory pressure pushes dirty pages to the end of
    1970                 :            :          * the LRU before the dirty limits are breached and the dirty
    1971                 :            :          * data has expired. It can also happen when the proportion of
    1972                 :            :          * dirty pages grows not through writes but through memory
    1973                 :            :          * pressure reclaiming all the clean cache. And in some cases,
    1974                 :            :          * the flushers simply cannot keep up with the allocation
    1975                 :            :          * rate. Nudge the flusher threads in case they are asleep.
    1976                 :            :          */
    1977         [ #  # ]:          0 :         if (stat.nr_unqueued_dirty == nr_taken)
    1978                 :          0 :                 wakeup_flusher_threads(WB_REASON_VMSCAN);
    1979                 :            : 
    1980                 :          0 :         sc->nr.dirty += stat.nr_dirty;
    1981                 :          0 :         sc->nr.congested += stat.nr_congested;
    1982                 :          0 :         sc->nr.unqueued_dirty += stat.nr_unqueued_dirty;
    1983                 :          0 :         sc->nr.writeback += stat.nr_writeback;
    1984                 :          0 :         sc->nr.immediate += stat.nr_immediate;
    1985                 :          0 :         sc->nr.taken += nr_taken;
    1986         [ #  # ]:          0 :         if (file)
    1987                 :          0 :                 sc->nr.file_taken += nr_taken;
    1988                 :            : 
    1989                 :          0 :         trace_mm_vmscan_lru_shrink_inactive(pgdat->node_id,
    1990                 :          0 :                         nr_scanned, nr_reclaimed, &stat, sc->priority, file);
    1991                 :          0 :         return nr_reclaimed;
    1992                 :            : }
    1993                 :            : 
    1994                 :          0 : static void shrink_active_list(unsigned long nr_to_scan,
    1995                 :            :                                struct lruvec *lruvec,
    1996                 :            :                                struct scan_control *sc,
    1997                 :            :                                enum lru_list lru)
    1998                 :            : {
    1999                 :          0 :         unsigned long nr_taken;
    2000                 :          0 :         unsigned long nr_scanned;
    2001                 :          0 :         unsigned long vm_flags;
    2002                 :          0 :         LIST_HEAD(l_hold);      /* The pages which were snipped off */
    2003                 :          0 :         LIST_HEAD(l_active);
    2004                 :          0 :         LIST_HEAD(l_inactive);
    2005                 :          0 :         struct page *page;
    2006                 :          0 :         struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
    2007                 :          0 :         unsigned nr_deactivate, nr_activate;
    2008                 :          0 :         unsigned nr_rotated = 0;
    2009                 :          0 :         int file = is_file_lru(lru);
    2010                 :          0 :         struct pglist_data *pgdat = lruvec_pgdat(lruvec);
    2011                 :            : 
    2012                 :          0 :         lru_add_drain();
    2013                 :            : 
    2014                 :          0 :         spin_lock_irq(&pgdat->lru_lock);
    2015                 :            : 
    2016                 :          0 :         nr_taken = isolate_lru_pages(nr_to_scan, lruvec, &l_hold,
    2017                 :            :                                      &nr_scanned, sc, lru);
    2018                 :            : 
    2019                 :          0 :         __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, nr_taken);
    2020                 :          0 :         reclaim_stat->recent_scanned[file] += nr_taken;
    2021                 :            : 
    2022         [ #  # ]:          0 :         __count_vm_events(PGREFILL, nr_scanned);
    2023                 :          0 :         __count_memcg_events(lruvec_memcg(lruvec), PGREFILL, nr_scanned);
    2024                 :            : 
    2025                 :          0 :         spin_unlock_irq(&pgdat->lru_lock);
    2026                 :            : 
    2027         [ #  # ]:          0 :         while (!list_empty(&l_hold)) {
    2028                 :          0 :                 cond_resched();
    2029                 :          0 :                 page = lru_to_page(&l_hold);
    2030                 :          0 :                 list_del(&page->lru);
    2031                 :            : 
    2032         [ #  # ]:          0 :                 if (unlikely(!page_evictable(page))) {
    2033                 :          0 :                         putback_lru_page(page);
    2034                 :          0 :                         continue;
    2035                 :            :                 }
    2036                 :            : 
    2037         [ #  # ]:          0 :                 if (unlikely(buffer_heads_over_limit)) {
    2038   [ #  #  #  # ]:          0 :                         if (page_has_private(page) && trylock_page(page)) {
    2039         [ #  # ]:          0 :                                 if (page_has_private(page))
    2040                 :          0 :                                         try_to_release_page(page, 0);
    2041                 :          0 :                                 unlock_page(page);
    2042                 :            :                         }
    2043                 :            :                 }
    2044                 :            : 
    2045         [ #  # ]:          0 :                 if (page_referenced(page, 0, sc->target_mem_cgroup,
    2046                 :            :                                     &vm_flags)) {
    2047                 :          0 :                         nr_rotated += hpage_nr_pages(page);
    2048                 :            :                         /*
    2049                 :            :                          * Identify referenced, file-backed active pages and
    2050                 :            :                          * give them one more trip around the active list. So
    2051                 :            :                          * that executable code get better chances to stay in
    2052                 :            :                          * memory under moderate memory pressure.  Anon pages
    2053                 :            :                          * are not likely to be evicted by use-once streaming
    2054                 :            :                          * IO, plus JVM can create lots of anon VM_EXEC pages,
    2055                 :            :                          * so we ignore them here.
    2056                 :            :                          */
    2057   [ #  #  #  # ]:          0 :                         if ((vm_flags & VM_EXEC) && page_is_file_cache(page)) {
    2058                 :          0 :                                 list_add(&page->lru, &l_active);
    2059                 :          0 :                                 continue;
    2060                 :            :                         }
    2061                 :            :                 }
    2062                 :            : 
    2063         [ #  # ]:          0 :                 ClearPageActive(page);  /* we are de-activating */
    2064         [ #  # ]:          0 :                 SetPageWorkingset(page);
    2065                 :          0 :                 list_add(&page->lru, &l_inactive);
    2066                 :            :         }
    2067                 :            : 
    2068                 :            :         /*
    2069                 :            :          * Move pages back to the lru list.
    2070                 :            :          */
    2071                 :          0 :         spin_lock_irq(&pgdat->lru_lock);
    2072                 :            :         /*
    2073                 :            :          * Count referenced pages from currently used mappings as rotated,
    2074                 :            :          * even though only some of them are actually re-activated.  This
    2075                 :            :          * helps balance scan pressure between file and anonymous pages in
    2076                 :            :          * get_scan_count.
    2077                 :            :          */
    2078                 :          0 :         reclaim_stat->recent_rotated[file] += nr_rotated;
    2079                 :            : 
    2080                 :          0 :         nr_activate = move_pages_to_lru(lruvec, &l_active);
    2081                 :          0 :         nr_deactivate = move_pages_to_lru(lruvec, &l_inactive);
    2082                 :            :         /* Keep all free pages in l_active list */
    2083         [ #  # ]:          0 :         list_splice(&l_inactive, &l_active);
    2084                 :            : 
    2085         [ #  # ]:          0 :         __count_vm_events(PGDEACTIVATE, nr_deactivate);
    2086                 :          0 :         __count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE, nr_deactivate);
    2087                 :            : 
    2088                 :          0 :         __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -nr_taken);
    2089                 :          0 :         spin_unlock_irq(&pgdat->lru_lock);
    2090                 :            : 
    2091                 :          0 :         mem_cgroup_uncharge_list(&l_active);
    2092                 :          0 :         free_unref_page_list(&l_active);
    2093                 :          0 :         trace_mm_vmscan_lru_shrink_active(pgdat->node_id, nr_taken, nr_activate,
    2094                 :          0 :                         nr_deactivate, nr_rotated, sc->priority, file);
    2095                 :          0 : }
    2096                 :            : 
    2097                 :          0 : unsigned long reclaim_pages(struct list_head *page_list)
    2098                 :            : {
    2099                 :          0 :         int nid = -1;
    2100                 :          0 :         unsigned long nr_reclaimed = 0;
    2101                 :          0 :         LIST_HEAD(node_page_list);
    2102                 :          0 :         struct reclaim_stat dummy_stat;
    2103                 :          0 :         struct page *page;
    2104                 :          0 :         struct scan_control sc = {
    2105                 :            :                 .gfp_mask = GFP_KERNEL,
    2106                 :            :                 .priority = DEF_PRIORITY,
    2107                 :            :                 .may_writepage = 1,
    2108                 :            :                 .may_unmap = 1,
    2109                 :            :                 .may_swap = 1,
    2110                 :            :         };
    2111                 :            : 
    2112         [ #  # ]:          0 :         while (!list_empty(page_list)) {
    2113                 :          0 :                 page = lru_to_page(page_list);
    2114         [ #  # ]:          0 :                 if (nid == -1) {
    2115                 :          0 :                         nid = page_to_nid(page);
    2116                 :          0 :                         INIT_LIST_HEAD(&node_page_list);
    2117                 :            :                 }
    2118                 :            : 
    2119         [ #  # ]:          0 :                 if (nid == page_to_nid(page)) {
    2120         [ #  # ]:          0 :                         ClearPageActive(page);
    2121                 :          0 :                         list_move(&page->lru, &node_page_list);
    2122                 :          0 :                         continue;
    2123                 :            :                 }
    2124                 :            : 
    2125                 :          0 :                 nr_reclaimed += shrink_page_list(&node_page_list,
    2126                 :            :                                                 NODE_DATA(nid),
    2127                 :            :                                                 &sc, 0,
    2128                 :            :                                                 &dummy_stat, false);
    2129         [ #  # ]:          0 :                 while (!list_empty(&node_page_list)) {
    2130                 :          0 :                         page = lru_to_page(&node_page_list);
    2131                 :          0 :                         list_del(&page->lru);
    2132                 :          0 :                         putback_lru_page(page);
    2133                 :            :                 }
    2134                 :            : 
    2135                 :            :                 nid = -1;
    2136                 :            :         }
    2137                 :            : 
    2138         [ #  # ]:          0 :         if (!list_empty(&node_page_list)) {
    2139                 :          0 :                 nr_reclaimed += shrink_page_list(&node_page_list,
    2140                 :            :                                                 NODE_DATA(nid),
    2141                 :            :                                                 &sc, 0,
    2142                 :            :                                                 &dummy_stat, false);
    2143         [ #  # ]:          0 :                 while (!list_empty(&node_page_list)) {
    2144                 :          0 :                         page = lru_to_page(&node_page_list);
    2145                 :          0 :                         list_del(&page->lru);
    2146                 :          0 :                         putback_lru_page(page);
    2147                 :            :                 }
    2148                 :            :         }
    2149                 :            : 
    2150                 :          0 :         return nr_reclaimed;
    2151                 :            : }
    2152                 :            : 
    2153                 :          0 : static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan,
    2154                 :            :                                  struct lruvec *lruvec, struct scan_control *sc)
    2155                 :            : {
    2156         [ #  # ]:          0 :         if (is_active_lru(lru)) {
    2157         [ #  # ]:          0 :                 if (sc->may_deactivate & (1 << is_file_lru(lru)))
    2158                 :          0 :                         shrink_active_list(nr_to_scan, lruvec, sc, lru);
    2159                 :            :                 else
    2160                 :          0 :                         sc->skipped_deactivate = 1;
    2161                 :          0 :                 return 0;
    2162                 :            :         }
    2163                 :            : 
    2164                 :          0 :         return shrink_inactive_list(nr_to_scan, lruvec, sc, lru);
    2165                 :            : }
    2166                 :            : 
    2167                 :            : /*
    2168                 :            :  * The inactive anon list should be small enough that the VM never has
    2169                 :            :  * to do too much work.
    2170                 :            :  *
    2171                 :            :  * The inactive file list should be small enough to leave most memory
    2172                 :            :  * to the established workingset on the scan-resistant active list,
    2173                 :            :  * but large enough to avoid thrashing the aggregate readahead window.
    2174                 :            :  *
    2175                 :            :  * Both inactive lists should also be large enough that each inactive
    2176                 :            :  * page has a chance to be referenced again before it is reclaimed.
    2177                 :            :  *
    2178                 :            :  * If that fails and refaulting is observed, the inactive list grows.
    2179                 :            :  *
    2180                 :            :  * The inactive_ratio is the target ratio of ACTIVE to INACTIVE pages
    2181                 :            :  * on this LRU, maintained by the pageout code. An inactive_ratio
    2182                 :            :  * of 3 means 3:1 or 25% of the pages are kept on the inactive list.
    2183                 :            :  *
    2184                 :            :  * total     target    max
    2185                 :            :  * memory    ratio     inactive
    2186                 :            :  * -------------------------------------
    2187                 :            :  *   10MB       1         5MB
    2188                 :            :  *  100MB       1        50MB
    2189                 :            :  *    1GB       3       250MB
    2190                 :            :  *   10GB      10       0.9GB
    2191                 :            :  *  100GB      31         3GB
    2192                 :            :  *    1TB     101        10GB
    2193                 :            :  *   10TB     320        32GB
    2194                 :            :  */
    2195                 :          0 : static bool inactive_is_low(struct lruvec *lruvec, enum lru_list inactive_lru)
    2196                 :            : {
    2197                 :          0 :         enum lru_list active_lru = inactive_lru + LRU_ACTIVE;
    2198                 :          0 :         unsigned long inactive, active;
    2199                 :          0 :         unsigned long inactive_ratio;
    2200                 :          0 :         unsigned long gb;
    2201                 :            : 
    2202                 :          0 :         inactive = lruvec_page_state(lruvec, NR_LRU_BASE + inactive_lru);
    2203                 :          0 :         active = lruvec_page_state(lruvec, NR_LRU_BASE + active_lru);
    2204                 :            : 
    2205                 :          0 :         gb = (inactive + active) >> (30 - PAGE_SHIFT);
    2206         [ #  # ]:          0 :         if (gb)
    2207                 :          0 :                 inactive_ratio = int_sqrt(10 * gb);
    2208                 :            :         else
    2209                 :            :                 inactive_ratio = 1;
    2210                 :            : 
    2211                 :          0 :         return inactive * inactive_ratio < active;
    2212                 :            : }
    2213                 :            : 
    2214                 :            : enum scan_balance {
    2215                 :            :         SCAN_EQUAL,
    2216                 :            :         SCAN_FRACT,
    2217                 :            :         SCAN_ANON,
    2218                 :            :         SCAN_FILE,
    2219                 :            : };
    2220                 :            : 
    2221                 :            : /*
    2222                 :            :  * Determine how aggressively the anon and file LRU lists should be
    2223                 :            :  * scanned.  The relative value of each set of LRU lists is determined
    2224                 :            :  * by looking at the fraction of the pages scanned we did rotate back
    2225                 :            :  * onto the active list instead of evict.
    2226                 :            :  *
    2227                 :            :  * nr[0] = anon inactive pages to scan; nr[1] = anon active pages to scan
    2228                 :            :  * nr[2] = file inactive pages to scan; nr[3] = file active pages to scan
    2229                 :            :  */
    2230                 :          0 : static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc,
    2231                 :            :                            unsigned long *nr)
    2232                 :            : {
    2233         [ #  # ]:          0 :         struct mem_cgroup *memcg = lruvec_memcg(lruvec);
    2234         [ #  # ]:          0 :         int swappiness = mem_cgroup_swappiness(memcg);
    2235                 :          0 :         struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
    2236                 :          0 :         u64 fraction[2];
    2237                 :          0 :         u64 denominator = 0;    /* gcc */
    2238         [ #  # ]:          0 :         struct pglist_data *pgdat = lruvec_pgdat(lruvec);
    2239                 :          0 :         unsigned long anon_prio, file_prio;
    2240                 :          0 :         enum scan_balance scan_balance;
    2241                 :          0 :         unsigned long anon, file;
    2242                 :          0 :         unsigned long ap, fp;
    2243                 :          0 :         enum lru_list lru;
    2244                 :            : 
    2245                 :            :         /* If we have no swap space, do not bother scanning anon pages. */
    2246   [ #  #  #  # ]:          0 :         if (!sc->may_swap || mem_cgroup_get_nr_swap_pages(memcg) <= 0) {
    2247                 :          0 :                 scan_balance = SCAN_FILE;
    2248                 :          0 :                 goto out;
    2249                 :            :         }
    2250                 :            : 
    2251                 :            :         /*
    2252                 :            :          * Global reclaim will swap to prevent OOM even with no
    2253                 :            :          * swappiness, but memcg users want to use this knob to
    2254                 :            :          * disable swapping for individual groups completely when
    2255                 :            :          * using the memory controller's swap limit feature would be
    2256                 :            :          * too expensive.
    2257                 :            :          */
    2258                 :          0 :         if (cgroup_reclaim(sc) && !swappiness) {
    2259                 :            :                 scan_balance = SCAN_FILE;
    2260                 :            :                 goto out;
    2261                 :            :         }
    2262                 :            : 
    2263                 :            :         /*
    2264                 :            :          * Do not apply any pressure balancing cleverness when the
    2265                 :            :          * system is close to OOM, scan both anon and file equally
    2266                 :            :          * (unless the swappiness setting disagrees with swapping).
    2267                 :            :          */
    2268   [ #  #  #  # ]:          0 :         if (!sc->priority && swappiness) {
    2269                 :          0 :                 scan_balance = SCAN_EQUAL;
    2270                 :          0 :                 goto out;
    2271                 :            :         }
    2272                 :            : 
    2273                 :            :         /*
    2274                 :            :          * If the system is almost out of file pages, force-scan anon.
    2275                 :            :          */
    2276         [ #  # ]:          0 :         if (sc->file_is_tiny) {
    2277                 :          0 :                 scan_balance = SCAN_ANON;
    2278                 :          0 :                 goto out;
    2279                 :            :         }
    2280                 :            : 
    2281                 :            :         /*
    2282                 :            :          * If there is enough inactive page cache, we do not reclaim
    2283                 :            :          * anything from the anonymous working right now.
    2284                 :            :          */
    2285         [ #  # ]:          0 :         if (sc->cache_trim_mode) {
    2286                 :          0 :                 scan_balance = SCAN_FILE;
    2287                 :          0 :                 goto out;
    2288                 :            :         }
    2289                 :            : 
    2290                 :          0 :         scan_balance = SCAN_FRACT;
    2291                 :            : 
    2292                 :            :         /*
    2293                 :            :          * With swappiness at 100, anonymous and file have the same priority.
    2294                 :            :          * This scanning priority is essentially the inverse of IO cost.
    2295                 :            :          */
    2296                 :          0 :         anon_prio = swappiness;
    2297                 :          0 :         file_prio = 200 - anon_prio;
    2298                 :            : 
    2299                 :            :         /*
    2300                 :            :          * OK, so we have swap space and a fair amount of page cache
    2301                 :            :          * pages.  We use the recently rotated / recently scanned
    2302                 :            :          * ratios to determine how valuable each cache is.
    2303                 :            :          *
    2304                 :            :          * Because workloads change over time (and to avoid overflow)
    2305                 :            :          * we keep these statistics as a floating average, which ends
    2306                 :            :          * up weighing recent references more than old ones.
    2307                 :            :          *
    2308                 :            :          * anon in [0], file in [1]
    2309                 :            :          */
    2310                 :            : 
    2311                 :          0 :         anon  = lruvec_lru_size(lruvec, LRU_ACTIVE_ANON, MAX_NR_ZONES) +
    2312                 :          0 :                 lruvec_lru_size(lruvec, LRU_INACTIVE_ANON, MAX_NR_ZONES);
    2313                 :          0 :         file  = lruvec_lru_size(lruvec, LRU_ACTIVE_FILE, MAX_NR_ZONES) +
    2314                 :          0 :                 lruvec_lru_size(lruvec, LRU_INACTIVE_FILE, MAX_NR_ZONES);
    2315                 :            : 
    2316                 :          0 :         spin_lock_irq(&pgdat->lru_lock);
    2317         [ #  # ]:          0 :         if (unlikely(reclaim_stat->recent_scanned[0] > anon / 4)) {
    2318                 :          0 :                 reclaim_stat->recent_scanned[0] /= 2;
    2319                 :          0 :                 reclaim_stat->recent_rotated[0] /= 2;
    2320                 :            :         }
    2321                 :            : 
    2322         [ #  # ]:          0 :         if (unlikely(reclaim_stat->recent_scanned[1] > file / 4)) {
    2323                 :          0 :                 reclaim_stat->recent_scanned[1] /= 2;
    2324                 :          0 :                 reclaim_stat->recent_rotated[1] /= 2;
    2325                 :            :         }
    2326                 :            : 
    2327                 :            :         /*
    2328                 :            :          * The amount of pressure on anon vs file pages is inversely
    2329                 :            :          * proportional to the fraction of recently scanned pages on
    2330                 :            :          * each list that were recently referenced and in active use.
    2331                 :            :          */
    2332                 :          0 :         ap = anon_prio * (reclaim_stat->recent_scanned[0] + 1);
    2333                 :          0 :         ap /= reclaim_stat->recent_rotated[0] + 1;
    2334                 :            : 
    2335                 :          0 :         fp = file_prio * (reclaim_stat->recent_scanned[1] + 1);
    2336                 :          0 :         fp /= reclaim_stat->recent_rotated[1] + 1;
    2337                 :          0 :         spin_unlock_irq(&pgdat->lru_lock);
    2338                 :            : 
    2339                 :          0 :         fraction[0] = ap;
    2340                 :          0 :         fraction[1] = fp;
    2341                 :          0 :         denominator = ap + fp + 1;
    2342                 :          0 : out:
    2343         [ #  # ]:          0 :         for_each_evictable_lru(lru) {
    2344                 :          0 :                 int file = is_file_lru(lru);
    2345                 :          0 :                 unsigned long lruvec_size;
    2346                 :          0 :                 unsigned long scan;
    2347                 :          0 :                 unsigned long protection;
    2348                 :            : 
    2349                 :          0 :                 lruvec_size = lruvec_lru_size(lruvec, lru, sc->reclaim_idx);
    2350      [ #  #  # ]:          0 :                 protection = mem_cgroup_protection(memcg,
    2351                 :            :                                                    sc->memcg_low_reclaim);
    2352                 :            : 
    2353                 :          0 :                 if (protection) {
    2354                 :            :                         /*
    2355                 :            :                          * Scale a cgroup's reclaim pressure by proportioning
    2356                 :            :                          * its current usage to its memory.low or memory.min
    2357                 :            :                          * setting.
    2358                 :            :                          *
    2359                 :            :                          * This is important, as otherwise scanning aggression
    2360                 :            :                          * becomes extremely binary -- from nothing as we
    2361                 :            :                          * approach the memory protection threshold, to totally
    2362                 :            :                          * nominal as we exceed it.  This results in requiring
    2363                 :            :                          * setting extremely liberal protection thresholds. It
    2364                 :            :                          * also means we simply get no protection at all if we
    2365                 :            :                          * set it too low, which is not ideal.
    2366                 :            :                          *
    2367                 :            :                          * If there is any protection in place, we reduce scan
    2368                 :            :                          * pressure by how much of the total memory used is
    2369                 :            :                          * within protection thresholds.
    2370                 :            :                          *
    2371                 :            :                          * There is one special case: in the first reclaim pass,
    2372                 :            :                          * we skip over all groups that are within their low
    2373                 :            :                          * protection. If that fails to reclaim enough pages to
    2374                 :            :                          * satisfy the reclaim goal, we come back and override
    2375                 :            :                          * the best-effort low protection. However, we still
    2376                 :            :                          * ideally want to honor how well-behaved groups are in
    2377                 :            :                          * that case instead of simply punishing them all
    2378                 :            :                          * equally. As such, we reclaim them based on how much
    2379                 :            :                          * memory they are using, reducing the scan pressure
    2380                 :            :                          * again by how much of the total memory used is under
    2381                 :            :                          * hard protection.
    2382                 :            :                          */
    2383                 :            :                         unsigned long cgroup_size = mem_cgroup_size(memcg);
    2384                 :            : 
    2385                 :            :                         /* Avoid TOCTOU with earlier protection check */
    2386                 :            :                         cgroup_size = max(cgroup_size, protection);
    2387                 :            : 
    2388                 :            :                         scan = lruvec_size - lruvec_size * protection /
    2389                 :            :                                 cgroup_size;
    2390                 :            : 
    2391                 :            :                         /*
    2392                 :            :                          * Minimally target SWAP_CLUSTER_MAX pages to keep
    2393                 :            :                          * reclaim moving forwards, avoiding decremeting
    2394                 :            :                          * sc->priority further than desirable.
    2395                 :            :                          */
    2396                 :            :                         scan = max(scan, SWAP_CLUSTER_MAX);
    2397                 :            :                 } else {
    2398                 :          0 :                         scan = lruvec_size;
    2399                 :            :                 }
    2400                 :            : 
    2401                 :          0 :                 scan >>= sc->priority;
    2402                 :            : 
    2403                 :            :                 /*
    2404                 :            :                  * If the cgroup's already been deleted, make sure to
    2405                 :            :                  * scrape out the remaining cache.
    2406                 :            :                  */
    2407                 :          0 :                 if (!scan && !mem_cgroup_online(memcg))
    2408                 :            :                         scan = min(lruvec_size, SWAP_CLUSTER_MAX);
    2409                 :            : 
    2410      [ #  #  # ]:          0 :                 switch (scan_balance) {
    2411                 :            :                 case SCAN_EQUAL:
    2412                 :            :                         /* Scan lists relative to size */
    2413                 :            :                         break;
    2414                 :            :                 case SCAN_FRACT:
    2415                 :            :                         /*
    2416                 :            :                          * Scan types proportional to swappiness and
    2417                 :            :                          * their relative recent reclaim efficiency.
    2418                 :            :                          * Make sure we don't miss the last page on
    2419                 :            :                          * the offlined memory cgroups because of a
    2420                 :            :                          * round-off error.
    2421                 :            :                          */
    2422                 :          0 :                         scan = mem_cgroup_online(memcg) ?
    2423                 :          0 :                                div64_u64(scan * fraction[file], denominator) :
    2424                 :            :                                DIV64_U64_ROUND_UP(scan * fraction[file],
    2425                 :            :                                                   denominator);
    2426                 :          0 :                         break;
    2427                 :          0 :                 case SCAN_FILE:
    2428                 :            :                 case SCAN_ANON:
    2429                 :            :                         /* Scan one type exclusively */
    2430         [ #  # ]:          0 :                         if ((scan_balance == SCAN_FILE) != file) {
    2431                 :          0 :                                 lruvec_size = 0;
    2432                 :          0 :                                 scan = 0;
    2433                 :            :                         }
    2434                 :            :                         break;
    2435                 :            :                 default:
    2436                 :            :                         /* Look ma, no brain */
    2437                 :          0 :                         BUG();
    2438                 :            :                 }
    2439                 :            : 
    2440                 :          0 :                 nr[lru] = scan;
    2441                 :            :         }
    2442                 :          0 : }
    2443                 :            : 
    2444                 :          0 : static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
    2445                 :            : {
    2446                 :          0 :         unsigned long nr[NR_LRU_LISTS];
    2447                 :          0 :         unsigned long targets[NR_LRU_LISTS];
    2448                 :          0 :         unsigned long nr_to_scan;
    2449                 :          0 :         enum lru_list lru;
    2450                 :          0 :         unsigned long nr_reclaimed = 0;
    2451                 :          0 :         unsigned long nr_to_reclaim = sc->nr_to_reclaim;
    2452                 :          0 :         struct blk_plug plug;
    2453                 :          0 :         bool scan_adjusted;
    2454                 :            : 
    2455                 :          0 :         get_scan_count(lruvec, sc, nr);
    2456                 :            : 
    2457                 :            :         /* Record the original scan target for proportional adjustments later */
    2458                 :          0 :         memcpy(targets, nr, sizeof(nr));
    2459                 :            : 
    2460                 :            :         /*
    2461                 :            :          * Global reclaiming within direct reclaim at DEF_PRIORITY is a normal
    2462                 :            :          * event that can occur when there is little memory pressure e.g.
    2463                 :            :          * multiple streaming readers/writers. Hence, we do not abort scanning
    2464                 :            :          * when the requested number of pages are reclaimed when scanning at
    2465                 :            :          * DEF_PRIORITY on the assumption that the fact we are direct
    2466                 :            :          * reclaiming implies that kswapd is not keeping up and it is best to
    2467                 :            :          * do a batch of work at once. For memcg reclaim one check is made to
    2468                 :            :          * abort proportional reclaim if either the file or anon lru has already
    2469                 :            :          * dropped to zero at the first pass.
    2470                 :            :          */
    2471         [ #  # ]:          0 :         scan_adjusted = (!cgroup_reclaim(sc) && !current_is_kswapd() &&
    2472         [ #  # ]:          0 :                          sc->priority == DEF_PRIORITY);
    2473                 :            : 
    2474                 :          0 :         blk_start_plug(&plug);
    2475   [ #  #  #  # ]:          0 :         while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
    2476         [ #  # ]:          0 :                                         nr[LRU_INACTIVE_FILE]) {
    2477                 :            :                 unsigned long nr_anon, nr_file, percentage;
    2478                 :            :                 unsigned long nr_scanned;
    2479                 :            : 
    2480         [ #  # ]:          0 :                 for_each_evictable_lru(lru) {
    2481         [ #  # ]:          0 :                         if (nr[lru]) {
    2482                 :          0 :                                 nr_to_scan = min(nr[lru], SWAP_CLUSTER_MAX);
    2483                 :          0 :                                 nr[lru] -= nr_to_scan;
    2484                 :            : 
    2485                 :          0 :                                 nr_reclaimed += shrink_list(lru, nr_to_scan,
    2486                 :            :                                                             lruvec, sc);
    2487                 :            :                         }
    2488                 :            :                 }
    2489                 :            : 
    2490                 :          0 :                 cond_resched();
    2491                 :            : 
    2492         [ #  # ]:          0 :                 if (nr_reclaimed < nr_to_reclaim || scan_adjusted)
    2493                 :          0 :                         continue;
    2494                 :            : 
    2495                 :            :                 /*
    2496                 :            :                  * For kswapd and memcg, reclaim at least the number of pages
    2497                 :            :                  * requested. Ensure that the anon and file LRUs are scanned
    2498                 :            :                  * proportionally what was requested by get_scan_count(). We
    2499                 :            :                  * stop reclaiming one LRU and reduce the amount scanning
    2500                 :            :                  * proportional to the original scan target.
    2501                 :            :                  */
    2502                 :          0 :                 nr_file = nr[LRU_INACTIVE_FILE] + nr[LRU_ACTIVE_FILE];
    2503                 :          0 :                 nr_anon = nr[LRU_INACTIVE_ANON] + nr[LRU_ACTIVE_ANON];
    2504                 :            : 
    2505                 :            :                 /*
    2506                 :            :                  * It's just vindictive to attack the larger once the smaller
    2507                 :            :                  * has gone to zero.  And given the way we stop scanning the
    2508                 :            :                  * smaller below, this makes sure that we only make one nudge
    2509                 :            :                  * towards proportionality once we've got nr_to_reclaim.
    2510                 :            :                  */
    2511         [ #  # ]:          0 :                 if (!nr_file || !nr_anon)
    2512                 :            :                         break;
    2513                 :            : 
    2514         [ #  # ]:          0 :                 if (nr_file > nr_anon) {
    2515                 :          0 :                         unsigned long scan_target = targets[LRU_INACTIVE_ANON] +
    2516                 :          0 :                                                 targets[LRU_ACTIVE_ANON] + 1;
    2517                 :          0 :                         lru = LRU_BASE;
    2518                 :          0 :                         percentage = nr_anon * 100 / scan_target;
    2519                 :            :                 } else {
    2520                 :          0 :                         unsigned long scan_target = targets[LRU_INACTIVE_FILE] +
    2521                 :          0 :                                                 targets[LRU_ACTIVE_FILE] + 1;
    2522                 :          0 :                         lru = LRU_FILE;
    2523                 :          0 :                         percentage = nr_file * 100 / scan_target;
    2524                 :            :                 }
    2525                 :            : 
    2526                 :            :                 /* Stop scanning the smaller of the LRU */
    2527                 :          0 :                 nr[lru] = 0;
    2528                 :          0 :                 nr[lru + LRU_ACTIVE] = 0;
    2529                 :            : 
    2530                 :            :                 /*
    2531                 :            :                  * Recalculate the other LRU scan count based on its original
    2532                 :            :                  * scan target and the percentage scanning already complete
    2533                 :            :                  */
    2534         [ #  # ]:          0 :                 lru = (lru == LRU_FILE) ? LRU_BASE : LRU_FILE;
    2535                 :          0 :                 nr_scanned = targets[lru] - nr[lru];
    2536                 :          0 :                 nr[lru] = targets[lru] * (100 - percentage) / 100;
    2537                 :          0 :                 nr[lru] -= min(nr[lru], nr_scanned);
    2538                 :            : 
    2539                 :          0 :                 lru += LRU_ACTIVE;
    2540                 :          0 :                 nr_scanned = targets[lru] - nr[lru];
    2541                 :          0 :                 nr[lru] = targets[lru] * (100 - percentage) / 100;
    2542                 :          0 :                 nr[lru] -= min(nr[lru], nr_scanned);
    2543                 :            : 
    2544                 :          0 :                 scan_adjusted = true;
    2545                 :            :         }
    2546                 :          0 :         blk_finish_plug(&plug);
    2547                 :          0 :         sc->nr_reclaimed += nr_reclaimed;
    2548                 :            : 
    2549                 :            :         /*
    2550                 :            :          * Even if we did not try to evict anon pages at all, we want to
    2551                 :            :          * rebalance the anon lru active/inactive ratio.
    2552                 :            :          */
    2553   [ #  #  #  # ]:          0 :         if (total_swap_pages && inactive_is_low(lruvec, LRU_INACTIVE_ANON))
    2554                 :          0 :                 shrink_active_list(SWAP_CLUSTER_MAX, lruvec,
    2555                 :            :                                    sc, LRU_ACTIVE_ANON);
    2556                 :          0 : }
    2557                 :            : 
    2558                 :            : /* Use reclaim/compaction for costly allocs or under memory pressure */
    2559                 :          0 : static bool in_reclaim_compaction(struct scan_control *sc)
    2560                 :            : {
    2561         [ #  # ]:          0 :         if (IS_ENABLED(CONFIG_COMPACTION) && sc->order &&
    2562                 :          0 :                         (sc->order > PAGE_ALLOC_COSTLY_ORDER ||
    2563         [ #  # ]:          0 :                          sc->priority < DEF_PRIORITY - 2))
    2564                 :          0 :                 return true;
    2565                 :            : 
    2566                 :            :         return false;
    2567                 :            : }
    2568                 :            : 
    2569                 :            : /*
    2570                 :            :  * Reclaim/compaction is used for high-order allocation requests. It reclaims
    2571                 :            :  * order-0 pages before compacting the zone. should_continue_reclaim() returns
    2572                 :            :  * true if more pages should be reclaimed such that when the page allocator
    2573                 :            :  * calls try_to_compact_zone() that it will have enough free pages to succeed.
    2574                 :            :  * It will give up earlier than that if there is difficulty reclaiming pages.
    2575                 :            :  */
    2576                 :          0 : static inline bool should_continue_reclaim(struct pglist_data *pgdat,
    2577                 :            :                                         unsigned long nr_reclaimed,
    2578                 :            :                                         struct scan_control *sc)
    2579                 :            : {
    2580                 :          0 :         unsigned long pages_for_compaction;
    2581                 :          0 :         unsigned long inactive_lru_pages;
    2582                 :          0 :         int z;
    2583                 :            : 
    2584                 :            :         /* If not in reclaim/compaction mode, stop */
    2585         [ #  # ]:          0 :         if (!in_reclaim_compaction(sc))
    2586                 :            :                 return false;
    2587                 :            : 
    2588                 :            :         /*
    2589                 :            :          * Stop if we failed to reclaim any pages from the last SWAP_CLUSTER_MAX
    2590                 :            :          * number of pages that were scanned. This will return to the caller
    2591                 :            :          * with the risk reclaim/compaction and the resulting allocation attempt
    2592                 :            :          * fails. In the past we have tried harder for __GFP_RETRY_MAYFAIL
    2593                 :            :          * allocations through requiring that the full LRU list has been scanned
    2594                 :            :          * first, by assuming that zero delta of sc->nr_scanned means full LRU
    2595                 :            :          * scan, but that approximation was wrong, and there were corner cases
    2596                 :            :          * where always a non-zero amount of pages were scanned.
    2597                 :            :          */
    2598         [ #  # ]:          0 :         if (!nr_reclaimed)
    2599                 :            :                 return false;
    2600                 :            : 
    2601                 :            :         /* If compaction would go ahead or the allocation would succeed, stop */
    2602         [ #  # ]:          0 :         for (z = 0; z <= sc->reclaim_idx; z++) {
    2603                 :          0 :                 struct zone *zone = &pgdat->node_zones[z];
    2604         [ #  # ]:          0 :                 if (!managed_zone(zone))
    2605                 :          0 :                         continue;
    2606                 :            : 
    2607         [ #  # ]:          0 :                 switch (compaction_suitable(zone, sc->order, 0, sc->reclaim_idx)) {
    2608                 :            :                 case COMPACT_SUCCESS:
    2609                 :            :                 case COMPACT_CONTINUE:
    2610                 :            :                         return false;
    2611                 :          0 :                 default:
    2612                 :            :                         /* check next zone */
    2613                 :          0 :                         ;
    2614                 :            :                 }
    2615                 :            :         }
    2616                 :            : 
    2617                 :            :         /*
    2618                 :            :          * If we have not reclaimed enough pages for compaction and the
    2619                 :            :          * inactive lists are large enough, continue reclaiming
    2620                 :            :          */
    2621                 :          0 :         pages_for_compaction = compact_gap(sc->order);
    2622                 :          0 :         inactive_lru_pages = node_page_state(pgdat, NR_INACTIVE_FILE);
    2623         [ #  # ]:          0 :         if (get_nr_swap_pages() > 0)
    2624                 :          0 :                 inactive_lru_pages += node_page_state(pgdat, NR_INACTIVE_ANON);
    2625                 :            : 
    2626                 :          0 :         return inactive_lru_pages > pages_for_compaction;
    2627                 :            : }
    2628                 :            : 
    2629                 :          0 : static void shrink_node_memcgs(pg_data_t *pgdat, struct scan_control *sc)
    2630                 :            : {
    2631                 :          0 :         struct mem_cgroup *target_memcg = sc->target_mem_cgroup;
    2632                 :          0 :         struct mem_cgroup *memcg;
    2633                 :            : 
    2634                 :          0 :         memcg = mem_cgroup_iter(target_memcg, NULL, NULL);
    2635                 :          0 :         do {
    2636                 :          0 :                 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat);
    2637                 :          0 :                 unsigned long reclaimed;
    2638                 :          0 :                 unsigned long scanned;
    2639                 :            : 
    2640                 :          0 :                 switch (mem_cgroup_protected(target_memcg, memcg)) {
    2641                 :            :                 case MEMCG_PROT_MIN:
    2642                 :            :                         /*
    2643                 :            :                          * Hard protection.
    2644                 :            :                          * If there is no reclaimable memory, OOM.
    2645                 :            :                          */
    2646                 :            :                         continue;
    2647                 :            :                 case MEMCG_PROT_LOW:
    2648                 :            :                         /*
    2649                 :            :                          * Soft protection.
    2650                 :            :                          * Respect the protection only as long as
    2651                 :            :                          * there is an unprotected supply
    2652                 :            :                          * of reclaimable memory from other cgroups.
    2653                 :            :                          */
    2654                 :            :                         if (!sc->memcg_low_reclaim) {
    2655                 :            :                                 sc->memcg_low_skipped = 1;
    2656                 :            :                                 continue;
    2657                 :            :                         }
    2658                 :            :                         memcg_memory_event(memcg, MEMCG_LOW);
    2659                 :            :                         break;
    2660                 :            :                 case MEMCG_PROT_NONE:
    2661                 :            :                         /*
    2662                 :            :                          * All protection thresholds breached. We may
    2663                 :            :                          * still choose to vary the scan pressure
    2664                 :            :                          * applied based on by how much the cgroup in
    2665                 :            :                          * question has exceeded its protection
    2666                 :            :                          * thresholds (see get_scan_count).
    2667                 :            :                          */
    2668                 :            :                         break;
    2669                 :            :                 }
    2670                 :            : 
    2671                 :          0 :                 reclaimed = sc->nr_reclaimed;
    2672                 :          0 :                 scanned = sc->nr_scanned;
    2673                 :            : 
    2674                 :          0 :                 shrink_lruvec(lruvec, sc);
    2675                 :            : 
    2676                 :          0 :                 shrink_slab(sc->gfp_mask, pgdat->node_id, memcg,
    2677                 :          0 :                             sc->priority);
    2678                 :            : 
    2679                 :            :                 /* Record the group's reclaim efficiency */
    2680                 :          0 :                 vmpressure(sc->gfp_mask, memcg, false,
    2681                 :            :                            sc->nr_scanned - scanned,
    2682                 :            :                            sc->nr_reclaimed - reclaimed);
    2683                 :            : 
    2684                 :          0 :         } while ((memcg = mem_cgroup_iter(target_memcg, memcg, NULL)));
    2685                 :          0 : }
    2686                 :            : 
    2687                 :          0 : static void shrink_node(pg_data_t *pgdat, struct scan_control *sc)
    2688                 :            : {
    2689                 :          0 :         struct reclaim_state *reclaim_state = current->reclaim_state;
    2690                 :          0 :         unsigned long nr_reclaimed, nr_scanned;
    2691                 :          0 :         struct lruvec *target_lruvec;
    2692                 :          0 :         bool reclaimable = false;
    2693                 :          0 :         unsigned long file;
    2694                 :            : 
    2695                 :          0 :         target_lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup, pgdat);
    2696                 :            : 
    2697                 :          0 : again:
    2698                 :          0 :         memset(&sc->nr, 0, sizeof(sc->nr));
    2699                 :            : 
    2700                 :          0 :         nr_reclaimed = sc->nr_reclaimed;
    2701                 :          0 :         nr_scanned = sc->nr_scanned;
    2702                 :            : 
    2703                 :            :         /*
    2704                 :            :          * Target desirable inactive:active list ratios for the anon
    2705                 :            :          * and file LRU lists.
    2706                 :            :          */
    2707         [ #  # ]:          0 :         if (!sc->force_deactivate) {
    2708                 :          0 :                 unsigned long refaults;
    2709                 :            : 
    2710         [ #  # ]:          0 :                 if (inactive_is_low(target_lruvec, LRU_INACTIVE_ANON))
    2711                 :          0 :                         sc->may_deactivate |= DEACTIVATE_ANON;
    2712                 :            :                 else
    2713                 :          0 :                         sc->may_deactivate &= ~DEACTIVATE_ANON;
    2714                 :            : 
    2715                 :            :                 /*
    2716                 :            :                  * When refaults are being observed, it means a new
    2717                 :            :                  * workingset is being established. Deactivate to get
    2718                 :            :                  * rid of any stale active pages quickly.
    2719                 :            :                  */
    2720                 :          0 :                 refaults = lruvec_page_state(target_lruvec,
    2721                 :            :                                              WORKINGSET_ACTIVATE);
    2722   [ #  #  #  # ]:          0 :                 if (refaults != target_lruvec->refaults ||
    2723                 :          0 :                     inactive_is_low(target_lruvec, LRU_INACTIVE_FILE))
    2724                 :          0 :                         sc->may_deactivate |= DEACTIVATE_FILE;
    2725                 :            :                 else
    2726                 :          0 :                         sc->may_deactivate &= ~DEACTIVATE_FILE;
    2727                 :            :         } else
    2728                 :          0 :                 sc->may_deactivate = DEACTIVATE_ANON | DEACTIVATE_FILE;
    2729                 :            : 
    2730                 :            :         /*
    2731                 :            :          * If we have plenty of inactive file pages that aren't
    2732                 :            :          * thrashing, try to reclaim those first before touching
    2733                 :            :          * anonymous pages.
    2734                 :            :          */
    2735                 :          0 :         file = lruvec_page_state(target_lruvec, NR_INACTIVE_FILE);
    2736   [ #  #  #  # ]:          0 :         if (file >> sc->priority && !(sc->may_deactivate & DEACTIVATE_FILE))
    2737                 :          0 :                 sc->cache_trim_mode = 1;
    2738                 :            :         else
    2739                 :          0 :                 sc->cache_trim_mode = 0;
    2740                 :            : 
    2741                 :            :         /*
    2742                 :            :          * Prevent the reclaimer from falling into the cache trap: as
    2743                 :            :          * cache pages start out inactive, every cache fault will tip
    2744                 :            :          * the scan balance towards the file LRU.  And as the file LRU
    2745                 :            :          * shrinks, so does the window for rotation from references.
    2746                 :            :          * This means we have a runaway feedback loop where a tiny
    2747                 :            :          * thrashing file LRU becomes infinitely more attractive than
    2748                 :            :          * anon pages.  Try to detect this based on file LRU size.
    2749                 :            :          */
    2750                 :          0 :         if (!cgroup_reclaim(sc)) {
    2751                 :          0 :                 unsigned long total_high_wmark = 0;
    2752                 :          0 :                 unsigned long free, anon;
    2753                 :          0 :                 int z;
    2754                 :            : 
    2755                 :          0 :                 free = sum_zone_node_page_state(pgdat->node_id, NR_FREE_PAGES);
    2756                 :          0 :                 file = node_page_state(pgdat, NR_ACTIVE_FILE) +
    2757                 :          0 :                            node_page_state(pgdat, NR_INACTIVE_FILE);
    2758                 :            : 
    2759         [ #  # ]:          0 :                 for (z = 0; z < MAX_NR_ZONES; z++) {
    2760                 :          0 :                         struct zone *zone = &pgdat->node_zones[z];
    2761         [ #  # ]:          0 :                         if (!managed_zone(zone))
    2762                 :          0 :                                 continue;
    2763                 :            : 
    2764                 :          0 :                         total_high_wmark += high_wmark_pages(zone);
    2765                 :            :                 }
    2766                 :            : 
    2767                 :            :                 /*
    2768                 :            :                  * Consider anon: if that's low too, this isn't a
    2769                 :            :                  * runaway file reclaim problem, but rather just
    2770                 :            :                  * extreme pressure. Reclaim as per usual then.
    2771                 :            :                  */
    2772                 :          0 :                 anon = node_page_state(pgdat, NR_INACTIVE_ANON);
    2773                 :            : 
    2774                 :          0 :                 sc->file_is_tiny =
    2775                 :          0 :                         file + free <= total_high_wmark &&
    2776   [ #  #  #  # ]:          0 :                         !(sc->may_deactivate & DEACTIVATE_ANON) &&
    2777         [ #  # ]:          0 :                         anon >> sc->priority;
    2778                 :            :         }
    2779                 :            : 
    2780                 :          0 :         shrink_node_memcgs(pgdat, sc);
    2781                 :            : 
    2782         [ #  # ]:          0 :         if (reclaim_state) {
    2783                 :          0 :                 sc->nr_reclaimed += reclaim_state->reclaimed_slab;
    2784                 :          0 :                 reclaim_state->reclaimed_slab = 0;
    2785                 :            :         }
    2786                 :            : 
    2787                 :            :         /* Record the subtree's reclaim efficiency */
    2788                 :          0 :         vmpressure(sc->gfp_mask, sc->target_mem_cgroup, true,
    2789                 :            :                    sc->nr_scanned - nr_scanned,
    2790         [ #  # ]:          0 :                    sc->nr_reclaimed - nr_reclaimed);
    2791                 :            : 
    2792         [ #  # ]:          0 :         if (sc->nr_reclaimed - nr_reclaimed)
    2793                 :          0 :                 reclaimable = true;
    2794                 :            : 
    2795         [ #  # ]:          0 :         if (current_is_kswapd()) {
    2796                 :            :                 /*
    2797                 :            :                  * If reclaim is isolating dirty pages under writeback,
    2798                 :            :                  * it implies that the long-lived page allocation rate
    2799                 :            :                  * is exceeding the page laundering rate. Either the
    2800                 :            :                  * global limits are not being effective at throttling
    2801                 :            :                  * processes due to the page distribution throughout
    2802                 :            :                  * zones or there is heavy usage of a slow backing
    2803                 :            :                  * device. The only option is to throttle from reclaim
    2804                 :            :                  * context which is not ideal as there is no guarantee
    2805                 :            :                  * the dirtying process is throttled in the same way
    2806                 :            :                  * balance_dirty_pages() manages.
    2807                 :            :                  *
    2808                 :            :                  * Once a node is flagged PGDAT_WRITEBACK, kswapd will
    2809                 :            :                  * count the number of pages under pages flagged for
    2810                 :            :                  * immediate reclaim and stall if any are encountered
    2811                 :            :                  * in the nr_immediate check below.
    2812                 :            :                  */
    2813   [ #  #  #  # ]:          0 :                 if (sc->nr.writeback && sc->nr.writeback == sc->nr.taken)
    2814                 :          0 :                         set_bit(PGDAT_WRITEBACK, &pgdat->flags);
    2815                 :            : 
    2816                 :            :                 /* Allow kswapd to start writing pages during reclaim.*/
    2817         [ #  # ]:          0 :                 if (sc->nr.unqueued_dirty == sc->nr.file_taken)
    2818                 :          0 :                         set_bit(PGDAT_DIRTY, &pgdat->flags);
    2819                 :            : 
    2820                 :            :                 /*
    2821                 :            :                  * If kswapd scans pages marked marked for immediate
    2822                 :            :                  * reclaim and under writeback (nr_immediate), it
    2823                 :            :                  * implies that pages are cycling through the LRU
    2824                 :            :                  * faster than they are written so also forcibly stall.
    2825                 :            :                  */
    2826         [ #  # ]:          0 :                 if (sc->nr.immediate)
    2827                 :          0 :                         congestion_wait(BLK_RW_ASYNC, HZ/10);
    2828                 :            :         }
    2829                 :            : 
    2830                 :            :         /*
    2831                 :            :          * Tag a node/memcg as congested if all the dirty pages
    2832                 :            :          * scanned were backed by a congested BDI and
    2833                 :            :          * wait_iff_congested will stall.
    2834                 :            :          *
    2835                 :            :          * Legacy memcg will stall in page writeback so avoid forcibly
    2836                 :            :          * stalling in wait_iff_congested().
    2837                 :            :          */
    2838         [ #  # ]:          0 :         if ((current_is_kswapd() ||
    2839                 :          0 :              (cgroup_reclaim(sc) && writeback_throttling_sane(sc))) &&
    2840   [ #  #  #  # ]:          0 :             sc->nr.dirty && sc->nr.dirty == sc->nr.congested)
    2841                 :          0 :                 set_bit(LRUVEC_CONGESTED, &target_lruvec->flags);
    2842                 :            : 
    2843                 :            :         /*
    2844                 :            :          * Stall direct reclaim for IO completions if underlying BDIs
    2845                 :            :          * and node is congested. Allow kswapd to continue until it
    2846                 :            :          * starts encountering unqueued dirty pages or cycling through
    2847                 :            :          * the LRU too quickly.
    2848                 :            :          */
    2849   [ #  #  #  # ]:          0 :         if (!current_is_kswapd() && current_may_throttle() &&
    2850   [ #  #  #  # ]:          0 :             !sc->hibernation_mode &&
    2851                 :          0 :             test_bit(LRUVEC_CONGESTED, &target_lruvec->flags))
    2852                 :          0 :                 wait_iff_congested(BLK_RW_ASYNC, HZ/10);
    2853                 :            : 
    2854         [ #  # ]:          0 :         if (should_continue_reclaim(pgdat, sc->nr_reclaimed - nr_reclaimed,
    2855                 :            :                                     sc))
    2856                 :          0 :                 goto again;
    2857                 :            : 
    2858                 :            :         /*
    2859                 :            :          * Kswapd gives up on balancing particular nodes after too
    2860                 :            :          * many failures to reclaim anything from them and goes to
    2861                 :            :          * sleep. On reclaim progress, reset the failure counter. A
    2862                 :            :          * successful direct reclaim run will revive a dormant kswapd.
    2863                 :            :          */
    2864         [ #  # ]:          0 :         if (reclaimable)
    2865                 :          0 :                 pgdat->kswapd_failures = 0;
    2866                 :          0 : }
    2867                 :            : 
    2868                 :            : /*
    2869                 :            :  * Returns true if compaction should go ahead for a costly-order request, or
    2870                 :            :  * the allocation would already succeed without compaction. Return false if we
    2871                 :            :  * should reclaim first.
    2872                 :            :  */
    2873                 :            : static inline bool compaction_ready(struct zone *zone, struct scan_control *sc)
    2874                 :            : {
    2875                 :            :         unsigned long watermark;
    2876                 :            :         enum compact_result suitable;
    2877                 :            : 
    2878                 :            :         suitable = compaction_suitable(zone, sc->order, 0, sc->reclaim_idx);
    2879                 :            :         if (suitable == COMPACT_SUCCESS)
    2880                 :            :                 /* Allocation should succeed already. Don't reclaim. */
    2881                 :            :                 return true;
    2882                 :            :         if (suitable == COMPACT_SKIPPED)
    2883                 :            :                 /* Compaction cannot yet proceed. Do reclaim. */
    2884                 :            :                 return false;
    2885                 :            : 
    2886                 :            :         /*
    2887                 :            :          * Compaction is already possible, but it takes time to run and there
    2888                 :            :          * are potentially other callers using the pages just freed. So proceed
    2889                 :            :          * with reclaim to make a buffer of free pages available to give
    2890                 :            :          * compaction a reasonable chance of completing and allocating the page.
    2891                 :            :          * Note that we won't actually reclaim the whole buffer in one attempt
    2892                 :            :          * as the target watermark in should_continue_reclaim() is lower. But if
    2893                 :            :          * we are already above the high+gap watermark, don't reclaim at all.
    2894                 :            :          */
    2895                 :            :         watermark = high_wmark_pages(zone) + compact_gap(sc->order);
    2896                 :            : 
    2897                 :            :         return zone_watermark_ok_safe(zone, 0, watermark, sc->reclaim_idx);
    2898                 :            : }
    2899                 :            : 
    2900                 :            : /*
    2901                 :            :  * This is the direct reclaim path, for page-allocating processes.  We only
    2902                 :            :  * try to reclaim pages from zones which will satisfy the caller's allocation
    2903                 :            :  * request.
    2904                 :            :  *
    2905                 :            :  * If a zone is deemed to be full of pinned pages then just give it a light
    2906                 :            :  * scan then give up on it.
    2907                 :            :  */
    2908                 :          0 : static void shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
    2909                 :            : {
    2910                 :          0 :         struct zoneref *z;
    2911                 :          0 :         struct zone *zone;
    2912                 :          0 :         unsigned long nr_soft_reclaimed;
    2913                 :          0 :         unsigned long nr_soft_scanned;
    2914                 :          0 :         gfp_t orig_mask;
    2915                 :          0 :         pg_data_t *last_pgdat = NULL;
    2916                 :            : 
    2917                 :            :         /*
    2918                 :            :          * If the number of buffer_heads in the machine exceeds the maximum
    2919                 :            :          * allowed level, force direct reclaim to scan the highmem zone as
    2920                 :            :          * highmem pages could be pinning lowmem pages storing buffer_heads
    2921                 :            :          */
    2922                 :          0 :         orig_mask = sc->gfp_mask;
    2923         [ #  # ]:          0 :         if (buffer_heads_over_limit) {
    2924                 :          0 :                 sc->gfp_mask |= __GFP_HIGHMEM;
    2925                 :          0 :                 sc->reclaim_idx = gfp_zone(sc->gfp_mask);
    2926                 :            :         }
    2927                 :            : 
    2928   [ #  #  #  #  :          0 :         for_each_zone_zonelist_nodemask(zone, z, zonelist,
                   #  # ]
    2929                 :            :                                         sc->reclaim_idx, sc->nodemask) {
    2930                 :            :                 /*
    2931                 :            :                  * Take care memory controller reclaiming has small influence
    2932                 :            :                  * to global LRU.
    2933                 :            :                  */
    2934                 :          0 :                 if (!cgroup_reclaim(sc)) {
    2935         [ #  # ]:          0 :                         if (!cpuset_zone_allowed(zone,
    2936                 :            :                                                  GFP_KERNEL | __GFP_HARDWALL))
    2937                 :          0 :                                 continue;
    2938                 :            : 
    2939                 :            :                         /*
    2940                 :            :                          * If we already have plenty of memory free for
    2941                 :            :                          * compaction in this zone, don't free any more.
    2942                 :            :                          * Even though compaction is invoked for any
    2943                 :            :                          * non-zero order, only frequent costly order
    2944                 :            :                          * reclamation is disruptive enough to become a
    2945                 :            :                          * noticeable problem, like transparent huge
    2946                 :            :                          * page allocations.
    2947                 :            :                          */
    2948                 :          0 :                         if (IS_ENABLED(CONFIG_COMPACTION) &&
    2949   [ #  #  #  # ]:          0 :                             sc->order > PAGE_ALLOC_COSTLY_ORDER &&
    2950                 :          0 :                             compaction_ready(zone, sc)) {
    2951                 :          0 :                                 sc->compaction_ready = true;
    2952                 :          0 :                                 continue;
    2953                 :            :                         }
    2954                 :            : 
    2955                 :            :                         /*
    2956                 :            :                          * Shrink each node in the zonelist once. If the
    2957                 :            :                          * zonelist is ordered by zone (not the default) then a
    2958                 :            :                          * node may be shrunk multiple times but in that case
    2959                 :            :                          * the user prefers lower zones being preserved.
    2960                 :            :                          */
    2961         [ #  # ]:          0 :                         if (zone->zone_pgdat == last_pgdat)
    2962                 :          0 :                                 continue;
    2963                 :            : 
    2964                 :            :                         /*
    2965                 :            :                          * This steals pages from memory cgroups over softlimit
    2966                 :            :                          * and returns the number of reclaimed pages and
    2967                 :            :                          * scanned pages. This works for global memory pressure
    2968                 :            :                          * and balancing, not for a memcg's limit.
    2969                 :            :                          */
    2970                 :          0 :                         nr_soft_scanned = 0;
    2971                 :          0 :                         nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone->zone_pgdat,
    2972                 :            :                                                 sc->order, sc->gfp_mask,
    2973                 :            :                                                 &nr_soft_scanned);
    2974                 :          0 :                         sc->nr_reclaimed += nr_soft_reclaimed;
    2975                 :          0 :                         sc->nr_scanned += nr_soft_scanned;
    2976                 :            :                         /* need some check for avoid more shrink_zone() */
    2977                 :            :                 }
    2978                 :            : 
    2979                 :            :                 /* See comment about same check for global reclaim above */
    2980                 :          0 :                 if (zone->zone_pgdat == last_pgdat)
    2981                 :            :                         continue;
    2982                 :          0 :                 last_pgdat = zone->zone_pgdat;
    2983                 :          0 :                 shrink_node(zone->zone_pgdat, sc);
    2984                 :            :         }
    2985                 :            : 
    2986                 :            :         /*
    2987                 :            :          * Restore to original mask to avoid the impact on the caller if we
    2988                 :            :          * promoted it to __GFP_HIGHMEM.
    2989                 :            :          */
    2990                 :          0 :         sc->gfp_mask = orig_mask;
    2991                 :          0 : }
    2992                 :            : 
    2993                 :          0 : static void snapshot_refaults(struct mem_cgroup *target_memcg, pg_data_t *pgdat)
    2994                 :            : {
    2995                 :          0 :         struct lruvec *target_lruvec;
    2996                 :          0 :         unsigned long refaults;
    2997                 :            : 
    2998                 :          0 :         target_lruvec = mem_cgroup_lruvec(target_memcg, pgdat);
    2999                 :          0 :         refaults = lruvec_page_state(target_lruvec, WORKINGSET_ACTIVATE);
    3000                 :          0 :         target_lruvec->refaults = refaults;
    3001                 :            : }
    3002                 :            : 
    3003                 :            : /*
    3004                 :            :  * This is the main entry point to direct page reclaim.
    3005                 :            :  *
    3006                 :            :  * If a full scan of the inactive list fails to free enough memory then we
    3007                 :            :  * are "out of memory" and something needs to be killed.
    3008                 :            :  *
    3009                 :            :  * If the caller is !__GFP_FS then the probability of a failure is reasonably
    3010                 :            :  * high - the zone may be full of dirty or under-writeback pages, which this
    3011                 :            :  * caller can't do much about.  We kick the writeback threads and take explicit
    3012                 :            :  * naps in the hope that some of these pages can be written.  But if the
    3013                 :            :  * allocating task holds filesystem locks which prevent writeout this might not
    3014                 :            :  * work, and the allocation attempt will fail.
    3015                 :            :  *
    3016                 :            :  * returns:     0, if no pages reclaimed
    3017                 :            :  *              else, the number of pages reclaimed
    3018                 :            :  */
    3019                 :          0 : static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
    3020                 :            :                                           struct scan_control *sc)
    3021                 :            : {
    3022                 :          0 :         int initial_priority = sc->priority;
    3023                 :          0 :         pg_data_t *last_pgdat;
    3024                 :          0 :         struct zoneref *z;
    3025                 :          0 :         struct zone *zone;
    3026                 :            : retry:
    3027         [ #  # ]:          0 :         delayacct_freepages_start();
    3028                 :            : 
    3029                 :          0 :         if (!cgroup_reclaim(sc))
    3030                 :          0 :                 __count_zid_vm_events(ALLOCSTALL, sc->reclaim_idx, 1);
    3031                 :            : 
    3032                 :          0 :         do {
    3033                 :          0 :                 vmpressure_prio(sc->gfp_mask, sc->target_mem_cgroup,
    3034                 :            :                                 sc->priority);
    3035                 :          0 :                 sc->nr_scanned = 0;
    3036                 :          0 :                 shrink_zones(zonelist, sc);
    3037                 :            : 
    3038         [ #  # ]:          0 :                 if (sc->nr_reclaimed >= sc->nr_to_reclaim)
    3039                 :            :                         break;
    3040                 :            : 
    3041         [ #  # ]:          0 :                 if (sc->compaction_ready)
    3042                 :            :                         break;
    3043                 :            : 
    3044                 :            :                 /*
    3045                 :            :                  * If we're getting trouble reclaiming, start doing
    3046                 :            :                  * writepage even in laptop mode.
    3047                 :            :                  */
    3048         [ #  # ]:          0 :                 if (sc->priority < DEF_PRIORITY - 2)
    3049                 :          0 :                         sc->may_writepage = 1;
    3050         [ #  # ]:          0 :         } while (--sc->priority >= 0);
    3051                 :            : 
    3052                 :          0 :         last_pgdat = NULL;
    3053   [ #  #  #  #  :          0 :         for_each_zone_zonelist_nodemask(zone, z, zonelist, sc->reclaim_idx,
                   #  # ]
    3054                 :            :                                         sc->nodemask) {
    3055         [ #  # ]:          0 :                 if (zone->zone_pgdat == last_pgdat)
    3056                 :          0 :                         continue;
    3057                 :          0 :                 last_pgdat = zone->zone_pgdat;
    3058                 :            : 
    3059                 :          0 :                 snapshot_refaults(sc->target_mem_cgroup, zone->zone_pgdat);
    3060                 :            : 
    3061                 :          0 :                 if (cgroup_reclaim(sc)) {
    3062                 :            :                         struct lruvec *lruvec;
    3063                 :            : 
    3064                 :            :                         lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup,
    3065                 :            :                                                    zone->zone_pgdat);
    3066                 :            :                         clear_bit(LRUVEC_CONGESTED, &lruvec->flags);
    3067                 :            :                 }
    3068                 :            :         }
    3069                 :            : 
    3070         [ #  # ]:          0 :         delayacct_freepages_end();
    3071                 :            : 
    3072         [ #  # ]:          0 :         if (sc->nr_reclaimed)
    3073                 :          0 :                 return sc->nr_reclaimed;
    3074                 :            : 
    3075                 :            :         /* Aborted reclaim to try compaction? don't OOM, then */
    3076         [ #  # ]:          0 :         if (sc->compaction_ready)
    3077                 :            :                 return 1;
    3078                 :            : 
    3079                 :            :         /*
    3080                 :            :          * We make inactive:active ratio decisions based on the node's
    3081                 :            :          * composition of memory, but a restrictive reclaim_idx or a
    3082                 :            :          * memory.low cgroup setting can exempt large amounts of
    3083                 :            :          * memory from reclaim. Neither of which are very common, so
    3084                 :            :          * instead of doing costly eligibility calculations of the
    3085                 :            :          * entire cgroup subtree up front, we assume the estimates are
    3086                 :            :          * good, and retry with forcible deactivation if that fails.
    3087                 :            :          */
    3088         [ #  # ]:          0 :         if (sc->skipped_deactivate) {
    3089                 :          0 :                 sc->priority = initial_priority;
    3090                 :          0 :                 sc->force_deactivate = 1;
    3091                 :          0 :                 sc->skipped_deactivate = 0;
    3092                 :          0 :                 goto retry;
    3093                 :            :         }
    3094                 :            : 
    3095                 :            :         /* Untapped cgroup reserves?  Don't OOM, retry. */
    3096         [ #  # ]:          0 :         if (sc->memcg_low_skipped) {
    3097                 :          0 :                 sc->priority = initial_priority;
    3098                 :          0 :                 sc->force_deactivate = 0;
    3099                 :          0 :                 sc->skipped_deactivate = 0;
    3100                 :          0 :                 sc->memcg_low_reclaim = 1;
    3101                 :          0 :                 sc->memcg_low_skipped = 0;
    3102                 :          0 :                 goto retry;
    3103                 :            :         }
    3104                 :            : 
    3105                 :            :         return 0;
    3106                 :            : }
    3107                 :            : 
    3108                 :          0 : static bool allow_direct_reclaim(pg_data_t *pgdat)
    3109                 :            : {
    3110                 :          0 :         struct zone *zone;
    3111                 :          0 :         unsigned long pfmemalloc_reserve = 0;
    3112                 :          0 :         unsigned long free_pages = 0;
    3113                 :          0 :         int i;
    3114                 :          0 :         bool wmark_ok;
    3115                 :            : 
    3116         [ #  # ]:          0 :         if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES)
    3117                 :            :                 return true;
    3118                 :            : 
    3119         [ #  # ]:          0 :         for (i = 0; i <= ZONE_NORMAL; i++) {
    3120                 :          0 :                 zone = &pgdat->node_zones[i];
    3121         [ #  # ]:          0 :                 if (!managed_zone(zone))
    3122                 :          0 :                         continue;
    3123                 :            : 
    3124         [ #  # ]:          0 :                 if (!zone_reclaimable_pages(zone))
    3125                 :          0 :                         continue;
    3126                 :            : 
    3127                 :          0 :                 pfmemalloc_reserve += min_wmark_pages(zone);
    3128                 :          0 :                 free_pages += zone_page_state(zone, NR_FREE_PAGES);
    3129                 :            :         }
    3130                 :            : 
    3131                 :            :         /* If there are no reserves (unexpected config) then do not throttle */
    3132         [ #  # ]:          0 :         if (!pfmemalloc_reserve)
    3133                 :            :                 return true;
    3134                 :            : 
    3135                 :          0 :         wmark_ok = free_pages > pfmemalloc_reserve / 2;
    3136                 :            : 
    3137                 :            :         /* kswapd must be awake if processes are being throttled */
    3138   [ #  #  #  # ]:          0 :         if (!wmark_ok && waitqueue_active(&pgdat->kswapd_wait)) {
    3139                 :          0 :                 pgdat->kswapd_classzone_idx = min(pgdat->kswapd_classzone_idx,
    3140                 :            :                                                 (enum zone_type)ZONE_NORMAL);
    3141                 :          0 :                 wake_up_interruptible(&pgdat->kswapd_wait);
    3142                 :            :         }
    3143                 :            : 
    3144                 :            :         return wmark_ok;
    3145                 :            : }
    3146                 :            : 
    3147                 :            : /*
    3148                 :            :  * Throttle direct reclaimers if backing storage is backed by the network
    3149                 :            :  * and the PFMEMALLOC reserve for the preferred node is getting dangerously
    3150                 :            :  * depleted. kswapd will continue to make progress and wake the processes
    3151                 :            :  * when the low watermark is reached.
    3152                 :            :  *
    3153                 :            :  * Returns true if a fatal signal was delivered during throttling. If this
    3154                 :            :  * happens, the page allocator should not consider triggering the OOM killer.
    3155                 :            :  */
    3156                 :          0 : static bool throttle_direct_reclaim(gfp_t gfp_mask, struct zonelist *zonelist,
    3157                 :            :                                         nodemask_t *nodemask)
    3158                 :            : {
    3159                 :          0 :         struct zoneref *z;
    3160                 :          0 :         struct zone *zone;
    3161                 :          0 :         pg_data_t *pgdat = NULL;
    3162                 :            : 
    3163                 :            :         /*
    3164                 :            :          * Kernel threads should not be throttled as they may be indirectly
    3165                 :            :          * responsible for cleaning pages necessary for reclaim to make forward
    3166                 :            :          * progress. kjournald for example may enter direct reclaim while
    3167                 :            :          * committing a transaction where throttling it could forcing other
    3168                 :            :          * processes to block on log_wait_commit().
    3169                 :            :          */
    3170         [ #  # ]:          0 :         if (current->flags & PF_KTHREAD)
    3171                 :          0 :                 goto out;
    3172                 :            : 
    3173                 :            :         /*
    3174                 :            :          * If a fatal signal is pending, this process should not throttle.
    3175                 :            :          * It should return quickly so it can exit and free its memory
    3176                 :            :          */
    3177         [ #  # ]:          0 :         if (fatal_signal_pending(current))
    3178                 :          0 :                 goto out;
    3179                 :            : 
    3180                 :            :         /*
    3181                 :            :          * Check if the pfmemalloc reserves are ok by finding the first node
    3182                 :            :          * with a usable ZONE_NORMAL or lower zone. The expectation is that
    3183                 :            :          * GFP_KERNEL will be required for allocating network buffers when
    3184                 :            :          * swapping over the network so ZONE_HIGHMEM is unusable.
    3185                 :            :          *
    3186                 :            :          * Throttling is based on the first usable node and throttled processes
    3187                 :            :          * wait on a queue until kswapd makes progress and wakes them. There
    3188                 :            :          * is an affinity then between processes waking up and where reclaim
    3189                 :            :          * progress has been made assuming the process wakes on the same node.
    3190                 :            :          * More importantly, processes running on remote nodes will not compete
    3191                 :            :          * for remote pfmemalloc reserves and processes on different nodes
    3192                 :            :          * should make reasonable progress.
    3193                 :            :          */
    3194   [ #  #  #  # ]:          0 :         for_each_zone_zonelist_nodemask(zone, z, zonelist,
    3195                 :            :                                         gfp_zone(gfp_mask), nodemask) {
    3196         [ #  # ]:          0 :                 if (zone_idx(zone) > ZONE_NORMAL)
    3197         [ #  # ]:          0 :                         continue;
    3198                 :            : 
    3199                 :            :                 /* Throttle based on the first usable node */
    3200                 :          0 :                 pgdat = zone->zone_pgdat;
    3201         [ #  # ]:          0 :                 if (allow_direct_reclaim(pgdat))
    3202                 :          0 :                         goto out;
    3203                 :            :                 break;
    3204                 :            :         }
    3205                 :            : 
    3206                 :            :         /* If no zone was usable by the allocation flags then do not throttle */
    3207         [ #  # ]:          0 :         if (!pgdat)
    3208                 :          0 :                 goto out;
    3209                 :            : 
    3210                 :            :         /* Account for the throttling */
    3211                 :          0 :         count_vm_event(PGSCAN_DIRECT_THROTTLE);
    3212                 :            : 
    3213                 :            :         /*
    3214                 :            :          * If the caller cannot enter the filesystem, it's possible that it
    3215                 :            :          * is due to the caller holding an FS lock or performing a journal
    3216                 :            :          * transaction in the case of a filesystem like ext[3|4]. In this case,
    3217                 :            :          * it is not safe to block on pfmemalloc_wait as kswapd could be
    3218                 :            :          * blocked waiting on the same lock. Instead, throttle for up to a
    3219                 :            :          * second before continuing.
    3220                 :            :          */
    3221         [ #  # ]:          0 :         if (!(gfp_mask & __GFP_FS)) {
    3222   [ #  #  #  #  :          0 :                 wait_event_interruptible_timeout(pgdat->pfmemalloc_wait,
             #  #  #  # ]
    3223                 :            :                         allow_direct_reclaim(pgdat), HZ);
    3224                 :            : 
    3225                 :          0 :                 goto check_pending;
    3226                 :            :         }
    3227                 :            : 
    3228                 :            :         /* Throttle until kswapd wakes the process */
    3229   [ #  #  #  #  :          0 :         wait_event_killable(zone->zone_pgdat->pfmemalloc_wait,
                   #  # ]
    3230                 :            :                 allow_direct_reclaim(pgdat));
    3231                 :            : 
    3232                 :          0 : check_pending:
    3233         [ #  # ]:          0 :         if (fatal_signal_pending(current))
    3234                 :          0 :                 return true;
    3235                 :            : 
    3236                 :          0 : out:
    3237                 :            :         return false;
    3238                 :            : }
    3239                 :            : 
    3240                 :          0 : unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
    3241                 :            :                                 gfp_t gfp_mask, nodemask_t *nodemask)
    3242                 :            : {
    3243                 :          0 :         unsigned long nr_reclaimed;
    3244                 :          0 :         struct scan_control sc = {
    3245                 :            :                 .nr_to_reclaim = SWAP_CLUSTER_MAX,
    3246                 :            :                 .gfp_mask = current_gfp_context(gfp_mask),
    3247                 :            :                 .reclaim_idx = gfp_zone(gfp_mask),
    3248                 :            :                 .order = order,
    3249                 :            :                 .nodemask = nodemask,
    3250                 :            :                 .priority = DEF_PRIORITY,
    3251         [ #  # ]:          0 :                 .may_writepage = !laptop_mode,
    3252                 :            :                 .may_unmap = 1,
    3253                 :            :                 .may_swap = 1,
    3254                 :            :         };
    3255                 :            : 
    3256                 :            :         /*
    3257                 :            :          * scan_control uses s8 fields for order, priority, and reclaim_idx.
    3258                 :            :          * Confirm they are large enough for max values.
    3259                 :            :          */
    3260                 :          0 :         BUILD_BUG_ON(MAX_ORDER > S8_MAX);
    3261                 :          0 :         BUILD_BUG_ON(DEF_PRIORITY > S8_MAX);
    3262                 :          0 :         BUILD_BUG_ON(MAX_NR_ZONES > S8_MAX);
    3263                 :            : 
    3264                 :            :         /*
    3265                 :            :          * Do not enter reclaim if fatal signal was delivered while throttled.
    3266                 :            :          * 1 is returned so that the page allocator does not OOM kill at this
    3267                 :            :          * point.
    3268                 :            :          */
    3269         [ #  # ]:          0 :         if (throttle_direct_reclaim(sc.gfp_mask, zonelist, nodemask))
    3270                 :            :                 return 1;
    3271                 :            : 
    3272                 :          0 :         set_task_reclaim_state(current, &sc.reclaim_state);
    3273                 :          0 :         trace_mm_vmscan_direct_reclaim_begin(order, sc.gfp_mask);
    3274                 :            : 
    3275                 :          0 :         nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
    3276                 :            : 
    3277                 :          0 :         trace_mm_vmscan_direct_reclaim_end(nr_reclaimed);
    3278                 :          0 :         set_task_reclaim_state(current, NULL);
    3279                 :            : 
    3280                 :          0 :         return nr_reclaimed;
    3281                 :            : }
    3282                 :            : 
    3283                 :            : #ifdef CONFIG_MEMCG
    3284                 :            : 
    3285                 :            : /* Only used by soft limit reclaim. Do not reuse for anything else. */
    3286                 :            : unsigned long mem_cgroup_shrink_node(struct mem_cgroup *memcg,
    3287                 :            :                                                 gfp_t gfp_mask, bool noswap,
    3288                 :            :                                                 pg_data_t *pgdat,
    3289                 :            :                                                 unsigned long *nr_scanned)
    3290                 :            : {
    3291                 :            :         struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat);
    3292                 :            :         struct scan_control sc = {
    3293                 :            :                 .nr_to_reclaim = SWAP_CLUSTER_MAX,
    3294                 :            :                 .target_mem_cgroup = memcg,
    3295                 :            :                 .may_writepage = !laptop_mode,
    3296                 :            :                 .may_unmap = 1,
    3297                 :            :                 .reclaim_idx = MAX_NR_ZONES - 1,
    3298                 :            :                 .may_swap = !noswap,
    3299                 :            :         };
    3300                 :            : 
    3301                 :            :         WARN_ON_ONCE(!current->reclaim_state);
    3302                 :            : 
    3303                 :            :         sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
    3304                 :            :                         (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
    3305                 :            : 
    3306                 :            :         trace_mm_vmscan_memcg_softlimit_reclaim_begin(sc.order,
    3307                 :            :                                                       sc.gfp_mask);
    3308                 :            : 
    3309                 :            :         /*
    3310                 :            :          * NOTE: Although we can get the priority field, using it
    3311                 :            :          * here is not a good idea, since it limits the pages we can scan.
    3312                 :            :          * if we don't reclaim here, the shrink_node from balance_pgdat
    3313                 :            :          * will pick up pages from other mem cgroup's as well. We hack
    3314                 :            :          * the priority and make it zero.
    3315                 :            :          */
    3316                 :            :         shrink_lruvec(lruvec, &sc);
    3317                 :            : 
    3318                 :            :         trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed);
    3319                 :            : 
    3320                 :            :         *nr_scanned = sc.nr_scanned;
    3321                 :            : 
    3322                 :            :         return sc.nr_reclaimed;
    3323                 :            : }
    3324                 :            : 
    3325                 :            : unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
    3326                 :            :                                            unsigned long nr_pages,
    3327                 :            :                                            gfp_t gfp_mask,
    3328                 :            :                                            bool may_swap)
    3329                 :            : {
    3330                 :            :         unsigned long nr_reclaimed;
    3331                 :            :         unsigned long pflags;
    3332                 :            :         unsigned int noreclaim_flag;
    3333                 :            :         struct scan_control sc = {
    3334                 :            :                 .nr_to_reclaim = max(nr_pages, SWAP_CLUSTER_MAX),
    3335                 :            :                 .gfp_mask = (current_gfp_context(gfp_mask) & GFP_RECLAIM_MASK) |
    3336                 :            :                                 (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK),
    3337                 :            :                 .reclaim_idx = MAX_NR_ZONES - 1,
    3338                 :            :                 .target_mem_cgroup = memcg,
    3339                 :            :                 .priority = DEF_PRIORITY,
    3340                 :            :                 .may_writepage = !laptop_mode,
    3341                 :            :                 .may_unmap = 1,
    3342                 :            :                 .may_swap = may_swap,
    3343                 :            :         };
    3344                 :            :         /*
    3345                 :            :          * Traverse the ZONELIST_FALLBACK zonelist of the current node to put
    3346                 :            :          * equal pressure on all the nodes. This is based on the assumption that
    3347                 :            :          * the reclaim does not bail out early.
    3348                 :            :          */
    3349                 :            :         struct zonelist *zonelist = node_zonelist(numa_node_id(), sc.gfp_mask);
    3350                 :            : 
    3351                 :            :         set_task_reclaim_state(current, &sc.reclaim_state);
    3352                 :            : 
    3353                 :            :         trace_mm_vmscan_memcg_reclaim_begin(0, sc.gfp_mask);
    3354                 :            : 
    3355                 :            :         psi_memstall_enter(&pflags);
    3356                 :            :         noreclaim_flag = memalloc_noreclaim_save();
    3357                 :            : 
    3358                 :            :         nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
    3359                 :            : 
    3360                 :            :         memalloc_noreclaim_restore(noreclaim_flag);
    3361                 :            :         psi_memstall_leave(&pflags);
    3362                 :            : 
    3363                 :            :         trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed);
    3364                 :            :         set_task_reclaim_state(current, NULL);
    3365                 :            : 
    3366                 :            :         return nr_reclaimed;
    3367                 :            : }
    3368                 :            : #endif
    3369                 :            : 
    3370                 :          0 : static void age_active_anon(struct pglist_data *pgdat,
    3371                 :            :                                 struct scan_control *sc)
    3372                 :            : {
    3373                 :          0 :         struct mem_cgroup *memcg;
    3374                 :          0 :         struct lruvec *lruvec;
    3375                 :            : 
    3376         [ #  # ]:          0 :         if (!total_swap_pages)
    3377                 :            :                 return;
    3378                 :            : 
    3379                 :          0 :         lruvec = mem_cgroup_lruvec(NULL, pgdat);
    3380         [ #  # ]:          0 :         if (!inactive_is_low(lruvec, LRU_INACTIVE_ANON))
    3381                 :            :                 return;
    3382                 :            : 
    3383                 :          0 :         memcg = mem_cgroup_iter(NULL, NULL, NULL);
    3384                 :          0 :         do {
    3385                 :          0 :                 lruvec = mem_cgroup_lruvec(memcg, pgdat);
    3386                 :          0 :                 shrink_active_list(SWAP_CLUSTER_MAX, lruvec,
    3387                 :            :                                    sc, LRU_ACTIVE_ANON);
    3388                 :          0 :                 memcg = mem_cgroup_iter(NULL, memcg, NULL);
    3389                 :          0 :         } while (memcg);
    3390                 :            : }
    3391                 :            : 
    3392                 :          0 : static bool pgdat_watermark_boosted(pg_data_t *pgdat, int classzone_idx)
    3393                 :            : {
    3394                 :          0 :         int i;
    3395                 :          0 :         struct zone *zone;
    3396                 :            : 
    3397                 :            :         /*
    3398                 :            :          * Check for watermark boosts top-down as the higher zones
    3399                 :            :          * are more likely to be boosted. Both watermarks and boosts
    3400                 :            :          * should not be checked at the time time as reclaim would
    3401                 :            :          * start prematurely when there is no boosting and a lower
    3402                 :            :          * zone is balanced.
    3403                 :            :          */
    3404         [ #  # ]:          0 :         for (i = classzone_idx; i >= 0; i--) {
    3405                 :          0 :                 zone = pgdat->node_zones + i;
    3406         [ #  # ]:          0 :                 if (!managed_zone(zone))
    3407                 :          0 :                         continue;
    3408                 :            : 
    3409         [ #  # ]:          0 :                 if (zone->watermark_boost)
    3410                 :            :                         return true;
    3411                 :            :         }
    3412                 :            : 
    3413                 :            :         return false;
    3414                 :            : }
    3415                 :            : 
    3416                 :            : /*
    3417                 :            :  * Returns true if there is an eligible zone balanced for the request order
    3418                 :            :  * and classzone_idx
    3419                 :            :  */
    3420                 :        156 : static bool pgdat_balanced(pg_data_t *pgdat, int order, int classzone_idx)
    3421                 :            : {
    3422                 :        156 :         int i;
    3423                 :        156 :         unsigned long mark = -1;
    3424                 :        156 :         struct zone *zone;
    3425                 :            : 
    3426                 :            :         /*
    3427                 :            :          * Check watermarks bottom-up as lower zones are more likely to
    3428                 :            :          * meet watermarks.
    3429                 :            :          */
    3430         [ +  - ]:        156 :         for (i = 0; i <= classzone_idx; i++) {
    3431                 :        156 :                 zone = pgdat->node_zones + i;
    3432                 :            : 
    3433         [ -  + ]:        156 :                 if (!managed_zone(zone))
    3434                 :          0 :                         continue;
    3435                 :            : 
    3436                 :        156 :                 mark = high_wmark_pages(zone);
    3437         [ -  + ]:        156 :                 if (zone_watermark_ok_safe(zone, order, mark, classzone_idx))
    3438                 :            :                         return true;
    3439                 :            :         }
    3440                 :            : 
    3441                 :            :         /*
    3442                 :            :          * If a node has no populated zone within classzone_idx, it does not
    3443                 :            :          * need balancing by definition. This can happen if a zone-restricted
    3444                 :            :          * allocation tries to wake a remote kswapd.
    3445                 :            :          */
    3446         [ #  # ]:          0 :         if (mark == -1)
    3447                 :          0 :                 return true;
    3448                 :            : 
    3449                 :            :         return false;
    3450                 :            : }
    3451                 :            : 
    3452                 :            : /* Clear pgdat state for congested, dirty or under writeback. */
    3453                 :        156 : static void clear_pgdat_congested(pg_data_t *pgdat)
    3454                 :            : {
    3455                 :        156 :         struct lruvec *lruvec = mem_cgroup_lruvec(NULL, pgdat);
    3456                 :            : 
    3457                 :        156 :         clear_bit(LRUVEC_CONGESTED, &lruvec->flags);
    3458                 :        156 :         clear_bit(PGDAT_DIRTY, &pgdat->flags);
    3459                 :        156 :         clear_bit(PGDAT_WRITEBACK, &pgdat->flags);
    3460                 :        156 : }
    3461                 :            : 
    3462                 :            : /*
    3463                 :            :  * Prepare kswapd for sleeping. This verifies that there are no processes
    3464                 :            :  * waiting in throttle_direct_reclaim() and that watermarks have been met.
    3465                 :            :  *
    3466                 :            :  * Returns true if kswapd is ready to sleep
    3467                 :            :  */
    3468                 :        156 : static bool prepare_kswapd_sleep(pg_data_t *pgdat, int order, int classzone_idx)
    3469                 :            : {
    3470                 :            :         /*
    3471                 :            :          * The throttled processes are normally woken up in balance_pgdat() as
    3472                 :            :          * soon as allow_direct_reclaim() is true. But there is a potential
    3473                 :            :          * race between when kswapd checks the watermarks and a process gets
    3474                 :            :          * throttled. There is also a potential race if processes get
    3475                 :            :          * throttled, kswapd wakes, a large process exits thereby balancing the
    3476                 :            :          * zones, which causes kswapd to exit balance_pgdat() before reaching
    3477                 :            :          * the wake up checks. If kswapd is going to sleep, no process should
    3478                 :            :          * be sleeping on pfmemalloc_wait, so wake them now if necessary. If
    3479                 :            :          * the wake up is premature, processes will wake kswapd and get
    3480                 :            :          * throttled again. The difference from wake ups in balance_pgdat() is
    3481                 :            :          * that here we are under prepare_to_wait().
    3482                 :            :          */
    3483         [ -  + ]:        156 :         if (waitqueue_active(&pgdat->pfmemalloc_wait))
    3484                 :          0 :                 wake_up_all(&pgdat->pfmemalloc_wait);
    3485                 :            : 
    3486                 :            :         /* Hopeless node, leave it to direct reclaim */
    3487         [ +  - ]:        156 :         if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES)
    3488                 :            :                 return true;
    3489                 :            : 
    3490         [ +  - ]:        156 :         if (pgdat_balanced(pgdat, order, classzone_idx)) {
    3491                 :        156 :                 clear_pgdat_congested(pgdat);
    3492                 :        156 :                 return true;
    3493                 :            :         }
    3494                 :            : 
    3495                 :            :         return false;
    3496                 :            : }
    3497                 :            : 
    3498                 :            : /*
    3499                 :            :  * kswapd shrinks a node of pages that are at or below the highest usable
    3500                 :            :  * zone that is currently unbalanced.
    3501                 :            :  *
    3502                 :            :  * Returns true if kswapd scanned at least the requested number of pages to
    3503                 :            :  * reclaim or if the lack of progress was due to pages under writeback.
    3504                 :            :  * This is used to determine if the scanning priority needs to be raised.
    3505                 :            :  */
    3506                 :          0 : static bool kswapd_shrink_node(pg_data_t *pgdat,
    3507                 :            :                                struct scan_control *sc)
    3508                 :            : {
    3509                 :          0 :         struct zone *zone;
    3510                 :          0 :         int z;
    3511                 :            : 
    3512                 :            :         /* Reclaim a number of pages proportional to the number of zones */
    3513                 :          0 :         sc->nr_to_reclaim = 0;
    3514         [ #  # ]:          0 :         for (z = 0; z <= sc->reclaim_idx; z++) {
    3515                 :          0 :                 zone = pgdat->node_zones + z;
    3516         [ #  # ]:          0 :                 if (!managed_zone(zone))
    3517                 :          0 :                         continue;
    3518                 :            : 
    3519                 :          0 :                 sc->nr_to_reclaim += max(high_wmark_pages(zone), SWAP_CLUSTER_MAX);
    3520                 :            :         }
    3521                 :            : 
    3522                 :            :         /*
    3523                 :            :          * Historically care was taken to put equal pressure on all zones but
    3524                 :            :          * now pressure is applied based on node LRU order.
    3525                 :            :          */
    3526                 :          0 :         shrink_node(pgdat, sc);
    3527                 :            : 
    3528                 :            :         /*
    3529                 :            :          * Fragmentation may mean that the system cannot be rebalanced for
    3530                 :            :          * high-order allocations. If twice the allocation size has been
    3531                 :            :          * reclaimed then recheck watermarks only at order-0 to prevent
    3532                 :            :          * excessive reclaim. Assume that a process requested a high-order
    3533                 :            :          * can direct reclaim/compact.
    3534                 :            :          */
    3535   [ #  #  #  # ]:          0 :         if (sc->order && sc->nr_reclaimed >= compact_gap(sc->order))
    3536                 :          0 :                 sc->order = 0;
    3537                 :            : 
    3538                 :          0 :         return sc->nr_scanned >= sc->nr_to_reclaim;
    3539                 :            : }
    3540                 :            : 
    3541                 :            : /*
    3542                 :            :  * For kswapd, balance_pgdat() will reclaim pages across a node from zones
    3543                 :            :  * that are eligible for use by the caller until at least one zone is
    3544                 :            :  * balanced.
    3545                 :            :  *
    3546                 :            :  * Returns the order kswapd finished reclaiming at.
    3547                 :            :  *
    3548                 :            :  * kswapd scans the zones in the highmem->normal->dma direction.  It skips
    3549                 :            :  * zones which have free_pages > high_wmark_pages(zone), but once a zone is
    3550                 :            :  * found to have free_pages <= high_wmark_pages(zone), any page in that zone
    3551                 :            :  * or lower is eligible for reclaim until at least one usable zone is
    3552                 :            :  * balanced.
    3553                 :            :  */
    3554                 :          0 : static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx)
    3555                 :            : {
    3556                 :          0 :         int i;
    3557                 :          0 :         unsigned long nr_soft_reclaimed;
    3558                 :          0 :         unsigned long nr_soft_scanned;
    3559                 :          0 :         unsigned long pflags;
    3560                 :          0 :         unsigned long nr_boost_reclaim;
    3561                 :          0 :         unsigned long zone_boosts[MAX_NR_ZONES] = { 0, };
    3562                 :          0 :         bool boosted;
    3563                 :          0 :         struct zone *zone;
    3564                 :          0 :         struct scan_control sc = {
    3565                 :            :                 .gfp_mask = GFP_KERNEL,
    3566                 :            :                 .order = order,
    3567                 :            :                 .may_unmap = 1,
    3568                 :            :         };
    3569                 :            : 
    3570                 :          0 :         set_task_reclaim_state(current, &sc.reclaim_state);
    3571                 :          0 :         psi_memstall_enter(&pflags);
    3572                 :          0 :         __fs_reclaim_acquire();
    3573                 :            : 
    3574                 :          0 :         count_vm_event(PAGEOUTRUN);
    3575                 :            : 
    3576                 :            :         /*
    3577                 :            :          * Account for the reclaim boost. Note that the zone boost is left in
    3578                 :            :          * place so that parallel allocations that are near the watermark will
    3579                 :            :          * stall or direct reclaim until kswapd is finished.
    3580                 :            :          */
    3581                 :          0 :         nr_boost_reclaim = 0;
    3582         [ #  # ]:          0 :         for (i = 0; i <= classzone_idx; i++) {
    3583                 :          0 :                 zone = pgdat->node_zones + i;
    3584         [ #  # ]:          0 :                 if (!managed_zone(zone))
    3585                 :          0 :                         continue;
    3586                 :            : 
    3587                 :          0 :                 nr_boost_reclaim += zone->watermark_boost;
    3588                 :          0 :                 zone_boosts[i] = zone->watermark_boost;
    3589                 :            :         }
    3590                 :            :         boosted = nr_boost_reclaim;
    3591                 :            : 
    3592                 :          0 : restart:
    3593                 :          0 :         sc.priority = DEF_PRIORITY;
    3594                 :          0 :         do {
    3595                 :          0 :                 unsigned long nr_reclaimed = sc.nr_reclaimed;
    3596                 :          0 :                 bool raise_priority = true;
    3597                 :          0 :                 bool balanced;
    3598                 :          0 :                 bool ret;
    3599                 :            : 
    3600                 :          0 :                 sc.reclaim_idx = classzone_idx;
    3601                 :            : 
    3602                 :            :                 /*
    3603                 :            :                  * If the number of buffer_heads exceeds the maximum allowed
    3604                 :            :                  * then consider reclaiming from all zones. This has a dual
    3605                 :            :                  * purpose -- on 64-bit systems it is expected that
    3606                 :            :                  * buffer_heads are stripped during active rotation. On 32-bit
    3607                 :            :                  * systems, highmem pages can pin lowmem memory and shrinking
    3608                 :            :                  * buffers can relieve lowmem pressure. Reclaim may still not
    3609                 :            :                  * go ahead if all eligible zones for the original allocation
    3610                 :            :                  * request are balanced to avoid excessive reclaim from kswapd.
    3611                 :            :                  */
    3612         [ #  # ]:          0 :                 if (buffer_heads_over_limit) {
    3613         [ #  # ]:          0 :                         for (i = MAX_NR_ZONES - 1; i >= 0; i--) {
    3614                 :          0 :                                 zone = pgdat->node_zones + i;
    3615         [ #  # ]:          0 :                                 if (!managed_zone(zone))
    3616                 :          0 :                                         continue;
    3617                 :            : 
    3618                 :          0 :                                 sc.reclaim_idx = i;
    3619                 :          0 :                                 break;
    3620                 :            :                         }
    3621                 :            :                 }
    3622                 :            : 
    3623                 :            :                 /*
    3624                 :            :                  * If the pgdat is imbalanced then ignore boosting and preserve
    3625                 :            :                  * the watermarks for a later time and restart. Note that the
    3626                 :            :                  * zone watermarks will be still reset at the end of balancing
    3627                 :            :                  * on the grounds that the normal reclaim should be enough to
    3628                 :            :                  * re-evaluate if boosting is required when kswapd next wakes.
    3629                 :            :                  */
    3630                 :          0 :                 balanced = pgdat_balanced(pgdat, sc.order, classzone_idx);
    3631         [ #  # ]:          0 :                 if (!balanced && nr_boost_reclaim) {
    3632                 :          0 :                         nr_boost_reclaim = 0;
    3633                 :          0 :                         goto restart;
    3634                 :            :                 }
    3635                 :            : 
    3636                 :            :                 /*
    3637                 :            :                  * If boosting is not active then only reclaim if there are no
    3638                 :            :                  * eligible zones. Note that sc.reclaim_idx is not used as
    3639                 :            :                  * buffer_heads_over_limit may have adjusted it.
    3640                 :            :                  */
    3641         [ #  # ]:          0 :                 if (!nr_boost_reclaim && balanced)
    3642                 :          0 :                         goto out;
    3643                 :            : 
    3644                 :            :                 /* Limit the priority of boosting to avoid reclaim writeback */
    3645   [ #  #  #  # ]:          0 :                 if (nr_boost_reclaim && sc.priority == DEF_PRIORITY - 2)
    3646                 :          0 :                         raise_priority = false;
    3647                 :            : 
    3648                 :            :                 /*
    3649                 :            :                  * Do not writeback or swap pages for boosted reclaim. The
    3650                 :            :                  * intent is to relieve pressure not issue sub-optimal IO
    3651                 :            :                  * from reclaim context. If no pages are reclaimed, the
    3652                 :            :                  * reclaim will be aborted.
    3653                 :            :                  */
    3654   [ #  #  #  # ]:          0 :                 sc.may_writepage = !laptop_mode && !nr_boost_reclaim;
    3655                 :          0 :                 sc.may_swap = !nr_boost_reclaim;
    3656                 :            : 
    3657                 :            :                 /*
    3658                 :            :                  * Do some background aging of the anon list, to give
    3659                 :            :                  * pages a chance to be referenced before reclaiming. All
    3660                 :            :                  * pages are rotated regardless of classzone as this is
    3661                 :            :                  * about consistent aging.
    3662                 :            :                  */
    3663                 :          0 :                 age_active_anon(pgdat, &sc);
    3664                 :            : 
    3665                 :            :                 /*
    3666                 :            :                  * If we're getting trouble reclaiming, start doing writepage
    3667                 :            :                  * even in laptop mode.
    3668                 :            :                  */
    3669         [ #  # ]:          0 :                 if (sc.priority < DEF_PRIORITY - 2)
    3670                 :          0 :                         sc.may_writepage = 1;
    3671                 :            : 
    3672                 :            :                 /* Call soft limit reclaim before calling shrink_node. */
    3673                 :          0 :                 sc.nr_scanned = 0;
    3674                 :          0 :                 nr_soft_scanned = 0;
    3675                 :          0 :                 nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(pgdat, sc.order,
    3676                 :            :                                                 sc.gfp_mask, &nr_soft_scanned);
    3677                 :          0 :                 sc.nr_reclaimed += nr_soft_reclaimed;
    3678                 :            : 
    3679                 :            :                 /*
    3680                 :            :                  * There should be no need to raise the scanning priority if
    3681                 :            :                  * enough pages are already being scanned that that high
    3682                 :            :                  * watermark would be met at 100% efficiency.
    3683                 :            :                  */
    3684         [ #  # ]:          0 :                 if (kswapd_shrink_node(pgdat, &sc))
    3685                 :          0 :                         raise_priority = false;
    3686                 :            : 
    3687                 :            :                 /*
    3688                 :            :                  * If the low watermark is met there is no need for processes
    3689                 :            :                  * to be throttled on pfmemalloc_wait as they should not be
    3690                 :            :                  * able to safely make forward progress. Wake them
    3691                 :            :                  */
    3692   [ #  #  #  # ]:          0 :                 if (waitqueue_active(&pgdat->pfmemalloc_wait) &&
    3693                 :          0 :                                 allow_direct_reclaim(pgdat))
    3694                 :          0 :                         wake_up_all(&pgdat->pfmemalloc_wait);
    3695                 :            : 
    3696                 :            :                 /* Check if kswapd should be suspending */
    3697                 :          0 :                 __fs_reclaim_release();
    3698                 :          0 :                 ret = try_to_freeze();
    3699         [ #  # ]:          0 :                 __fs_reclaim_acquire();
    3700   [ #  #  #  # ]:          0 :                 if (ret || kthread_should_stop())
    3701                 :            :                         break;
    3702                 :            : 
    3703                 :            :                 /*
    3704                 :            :                  * Raise priority if scanning rate is too low or there was no
    3705                 :            :                  * progress in reclaiming pages
    3706                 :            :                  */
    3707                 :          0 :                 nr_reclaimed = sc.nr_reclaimed - nr_reclaimed;
    3708                 :          0 :                 nr_boost_reclaim -= min(nr_boost_reclaim, nr_reclaimed);
    3709                 :            : 
    3710                 :            :                 /*
    3711                 :            :                  * If reclaim made no progress for a boost, stop reclaim as
    3712                 :            :                  * IO cannot be queued and it could be an infinite loop in
    3713                 :            :                  * extreme circumstances.
    3714                 :            :                  */
    3715         [ #  # ]:          0 :                 if (nr_boost_reclaim && !nr_reclaimed)
    3716                 :            :                         break;
    3717                 :            : 
    3718         [ #  # ]:          0 :                 if (raise_priority || !nr_reclaimed)
    3719                 :          0 :                         sc.priority--;
    3720         [ #  # ]:          0 :         } while (sc.priority >= 1);
    3721                 :            : 
    3722         [ #  # ]:          0 :         if (!sc.nr_reclaimed)
    3723                 :          0 :                 pgdat->kswapd_failures++;
    3724                 :            : 
    3725                 :          0 : out:
    3726                 :            :         /* If reclaim was boosted, account for the reclaim done in this pass */
    3727         [ #  # ]:          0 :         if (boosted) {
    3728                 :            :                 unsigned long flags;
    3729                 :            : 
    3730         [ #  # ]:          0 :                 for (i = 0; i <= classzone_idx; i++) {
    3731         [ #  # ]:          0 :                         if (!zone_boosts[i])
    3732                 :          0 :                                 continue;
    3733                 :            : 
    3734                 :            :                         /* Increments are under the zone lock */
    3735                 :          0 :                         zone = pgdat->node_zones + i;
    3736                 :          0 :                         spin_lock_irqsave(&zone->lock, flags);
    3737                 :          0 :                         zone->watermark_boost -= min(zone->watermark_boost, zone_boosts[i]);
    3738                 :          0 :                         spin_unlock_irqrestore(&zone->lock, flags);
    3739                 :            :                 }
    3740                 :            : 
    3741                 :            :                 /*
    3742                 :            :                  * As there is now likely space, wakeup kcompact to defragment
    3743                 :            :                  * pageblocks.
    3744                 :            :                  */
    3745                 :          0 :                 wakeup_kcompactd(pgdat, pageblock_order, classzone_idx);
    3746                 :            :         }
    3747                 :            : 
    3748                 :          0 :         snapshot_refaults(NULL, pgdat);
    3749                 :          0 :         __fs_reclaim_release();
    3750                 :          0 :         psi_memstall_leave(&pflags);
    3751                 :          0 :         set_task_reclaim_state(current, NULL);
    3752                 :            : 
    3753                 :            :         /*
    3754                 :            :          * Return the order kswapd stopped reclaiming at as
    3755                 :            :          * prepare_kswapd_sleep() takes it into account. If another caller
    3756                 :            :          * entered the allocator slow path while kswapd was awake, order will
    3757                 :            :          * remain at the higher level.
    3758                 :            :          */
    3759                 :          0 :         return sc.order;
    3760                 :            : }
    3761                 :            : 
    3762                 :            : /*
    3763                 :            :  * The pgdat->kswapd_classzone_idx is used to pass the highest zone index to be
    3764                 :            :  * reclaimed by kswapd from the waker. If the value is MAX_NR_ZONES which is not
    3765                 :            :  * a valid index then either kswapd runs for first time or kswapd couldn't sleep
    3766                 :            :  * after previous reclaim attempt (node is still unbalanced). In that case
    3767                 :            :  * return the zone index of the previous kswapd reclaim cycle.
    3768                 :            :  */
    3769                 :         78 : static enum zone_type kswapd_classzone_idx(pg_data_t *pgdat,
    3770                 :            :                                            enum zone_type prev_classzone_idx)
    3771                 :            : {
    3772                 :         78 :         if (pgdat->kswapd_classzone_idx == MAX_NR_ZONES)
    3773                 :         78 :                 return prev_classzone_idx;
    3774                 :            :         return pgdat->kswapd_classzone_idx;
    3775                 :            : }
    3776                 :            : 
    3777                 :         78 : static void kswapd_try_to_sleep(pg_data_t *pgdat, int alloc_order, int reclaim_order,
    3778                 :            :                                 unsigned int classzone_idx)
    3779                 :            : {
    3780                 :         78 :         long remaining = 0;
    3781                 :         78 :         DEFINE_WAIT(wait);
    3782                 :            : 
    3783   [ +  -  -  + ]:         78 :         if (freezing(current) || kthread_should_stop())
    3784                 :          0 :                 return;
    3785                 :            : 
    3786                 :         78 :         prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
    3787                 :            : 
    3788                 :            :         /*
    3789                 :            :          * Try to sleep for a short interval. Note that kcompactd will only be
    3790                 :            :          * woken if it is possible to sleep for a short interval. This is
    3791                 :            :          * deliberate on the assumption that if reclaim cannot keep an
    3792                 :            :          * eligible zone balanced that it's also unlikely that compaction will
    3793                 :            :          * succeed.
    3794                 :            :          */
    3795         [ +  - ]:         78 :         if (prepare_kswapd_sleep(pgdat, reclaim_order, classzone_idx)) {
    3796                 :            :                 /*
    3797                 :            :                  * Compaction records what page blocks it recently failed to
    3798                 :            :                  * isolate pages from and skips them in the future scanning.
    3799                 :            :                  * When kswapd is going to sleep, it is reasonable to assume
    3800                 :            :                  * that pages and compaction may succeed so reset the cache.
    3801                 :            :                  */
    3802                 :         78 :                 reset_isolation_suitable(pgdat);
    3803                 :            : 
    3804                 :            :                 /*
    3805                 :            :                  * We have freed the memory, now we should compact it to make
    3806                 :            :                  * allocation of the requested order possible.
    3807                 :            :                  */
    3808                 :         78 :                 wakeup_kcompactd(pgdat, alloc_order, classzone_idx);
    3809                 :            : 
    3810                 :         78 :                 remaining = schedule_timeout(HZ/10);
    3811                 :            : 
    3812                 :            :                 /*
    3813                 :            :                  * If woken prematurely then reset kswapd_classzone_idx and
    3814                 :            :                  * order. The values will either be from a wakeup request or
    3815                 :            :                  * the previous request that slept prematurely.
    3816                 :            :                  */
    3817         [ -  + ]:         78 :                 if (remaining) {
    3818         [ #  # ]:          0 :                         pgdat->kswapd_classzone_idx = kswapd_classzone_idx(pgdat, classzone_idx);
    3819                 :          0 :                         pgdat->kswapd_order = max(pgdat->kswapd_order, reclaim_order);
    3820                 :            :                 }
    3821                 :            : 
    3822                 :         78 :                 finish_wait(&pgdat->kswapd_wait, &wait);
    3823                 :         78 :                 prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
    3824                 :            :         }
    3825                 :            : 
    3826                 :            :         /*
    3827                 :            :          * After a short sleep, check if it was a premature sleep. If not, then
    3828                 :            :          * go fully to sleep until explicitly woken up.
    3829                 :            :          */
    3830   [ +  -  +  - ]:        156 :         if (!remaining &&
    3831                 :         78 :             prepare_kswapd_sleep(pgdat, reclaim_order, classzone_idx)) {
    3832                 :         78 :                 trace_mm_vmscan_kswapd_sleep(pgdat->node_id);
    3833                 :            : 
    3834                 :            :                 /*
    3835                 :            :                  * vmstat counters are not perfectly accurate and the estimated
    3836                 :            :                  * value for counters such as NR_FREE_PAGES can deviate from the
    3837                 :            :                  * true value by nr_online_cpus * threshold. To avoid the zone
    3838                 :            :                  * watermarks being breached while under pressure, we reduce the
    3839                 :            :                  * per-cpu vmstat threshold while kswapd is awake and restore
    3840                 :            :                  * them before going back to sleep.
    3841                 :            :                  */
    3842                 :         78 :                 set_pgdat_percpu_threshold(pgdat, calculate_normal_threshold);
    3843                 :            : 
    3844         [ +  - ]:         78 :                 if (!kthread_should_stop())
    3845                 :         78 :                         schedule();
    3846                 :            : 
    3847                 :          0 :                 set_pgdat_percpu_threshold(pgdat, calculate_pressure_threshold);
    3848                 :            :         } else {
    3849         [ #  # ]:          0 :                 if (remaining)
    3850                 :          0 :                         count_vm_event(KSWAPD_LOW_WMARK_HIT_QUICKLY);
    3851                 :            :                 else
    3852                 :          0 :                         count_vm_event(KSWAPD_HIGH_WMARK_HIT_QUICKLY);
    3853                 :            :         }
    3854                 :          0 :         finish_wait(&pgdat->kswapd_wait, &wait);
    3855                 :            : }
    3856                 :            : 
    3857                 :            : /*
    3858                 :            :  * The background pageout daemon, started as a kernel thread
    3859                 :            :  * from the init process.
    3860                 :            :  *
    3861                 :            :  * This basically trickles out pages so that we have _some_
    3862                 :            :  * free memory available even if there is no other activity
    3863                 :            :  * that frees anything up. This is needed for things like routing
    3864                 :            :  * etc, where we otherwise might have all activity going on in
    3865                 :            :  * asynchronous contexts that cannot page things out.
    3866                 :            :  *
    3867                 :            :  * If there are applications that are active memory-allocators
    3868                 :            :  * (most normal use), this basically shouldn't matter.
    3869                 :            :  */
    3870                 :         78 : static int kswapd(void *p)
    3871                 :            : {
    3872                 :         78 :         unsigned int alloc_order, reclaim_order;
    3873                 :         78 :         unsigned int classzone_idx = MAX_NR_ZONES - 1;
    3874                 :         78 :         pg_data_t *pgdat = (pg_data_t*)p;
    3875         [ +  - ]:         78 :         struct task_struct *tsk = current;
    3876         [ +  - ]:         78 :         const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
    3877                 :            : 
    3878         [ +  - ]:         78 :         if (!cpumask_empty(cpumask))
    3879                 :         78 :                 set_cpus_allowed_ptr(tsk, cpumask);
    3880                 :            : 
    3881                 :            :         /*
    3882                 :            :          * Tell the memory management that we're a "memory allocator",
    3883                 :            :          * and that if we need more memory we should get access to it
    3884                 :            :          * regardless (see "__alloc_pages()"). "kswapd" should
    3885                 :            :          * never get caught in the normal page freeing logic.
    3886                 :            :          *
    3887                 :            :          * (Kswapd normally doesn't need memory anyway, but sometimes
    3888                 :            :          * you need a small amount of memory in order to be able to
    3889                 :            :          * page out something else, and this flag essentially protects
    3890                 :            :          * us from recursively trying to free more memory as we're
    3891                 :            :          * trying to free the first piece of memory in the first place).
    3892                 :            :          */
    3893                 :         78 :         tsk->flags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD;
    3894                 :         78 :         set_freezable();
    3895                 :            : 
    3896                 :         78 :         pgdat->kswapd_order = 0;
    3897                 :         78 :         pgdat->kswapd_classzone_idx = MAX_NR_ZONES;
    3898                 :         78 :         for ( ; ; ) {
    3899                 :         78 :                 bool ret;
    3900                 :            : 
    3901                 :         78 :                 alloc_order = reclaim_order = pgdat->kswapd_order;
    3902         [ +  - ]:         78 :                 classzone_idx = kswapd_classzone_idx(pgdat, classzone_idx);
    3903                 :            : 
    3904                 :         78 : kswapd_try_sleep:
    3905                 :         78 :                 kswapd_try_to_sleep(pgdat, alloc_order, reclaim_order,
    3906                 :            :                                         classzone_idx);
    3907                 :            : 
    3908                 :            :                 /* Read the new order and classzone_idx */
    3909                 :          0 :                 alloc_order = reclaim_order = pgdat->kswapd_order;
    3910         [ #  # ]:          0 :                 classzone_idx = kswapd_classzone_idx(pgdat, classzone_idx);
    3911                 :          0 :                 pgdat->kswapd_order = 0;
    3912                 :          0 :                 pgdat->kswapd_classzone_idx = MAX_NR_ZONES;
    3913                 :            : 
    3914                 :          0 :                 ret = try_to_freeze();
    3915         [ #  # ]:          0 :                 if (kthread_should_stop())
    3916                 :            :                         break;
    3917                 :            : 
    3918                 :            :                 /*
    3919                 :            :                  * We can speed up thawing tasks if we don't call balance_pgdat
    3920                 :            :                  * after returning from the refrigerator
    3921                 :            :                  */
    3922         [ #  # ]:          0 :                 if (ret)
    3923                 :          0 :                         continue;
    3924                 :            : 
    3925                 :            :                 /*
    3926                 :            :                  * Reclaim begins at the requested order but if a high-order
    3927                 :            :                  * reclaim fails then kswapd falls back to reclaiming for
    3928                 :            :                  * order-0. If that happens, kswapd will consider sleeping
    3929                 :            :                  * for the order it finished reclaiming at (reclaim_order)
    3930                 :            :                  * but kcompactd is woken to compact for the original
    3931                 :            :                  * request (alloc_order).
    3932                 :            :                  */
    3933                 :          0 :                 trace_mm_vmscan_kswapd_wake(pgdat->node_id, classzone_idx,
    3934                 :            :                                                 alloc_order);
    3935                 :          0 :                 reclaim_order = balance_pgdat(pgdat, alloc_order, classzone_idx);
    3936         [ #  # ]:          0 :                 if (reclaim_order < alloc_order)
    3937                 :          0 :                         goto kswapd_try_sleep;
    3938                 :            :         }
    3939                 :            : 
    3940                 :          0 :         tsk->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD);
    3941                 :            : 
    3942                 :          0 :         return 0;
    3943                 :            : }
    3944                 :            : 
    3945                 :            : /*
    3946                 :            :  * A zone is low on free memory or too fragmented for high-order memory.  If
    3947                 :            :  * kswapd should reclaim (direct reclaim is deferred), wake it up for the zone's
    3948                 :            :  * pgdat.  It will wake up kcompactd after reclaiming memory.  If kswapd reclaim
    3949                 :            :  * has failed or is not needed, still wake up kcompactd if only compaction is
    3950                 :            :  * needed.
    3951                 :            :  */
    3952                 :          0 : void wakeup_kswapd(struct zone *zone, gfp_t gfp_flags, int order,
    3953                 :            :                    enum zone_type classzone_idx)
    3954                 :            : {
    3955                 :          0 :         pg_data_t *pgdat;
    3956                 :            : 
    3957         [ #  # ]:          0 :         if (!managed_zone(zone))
    3958                 :            :                 return;
    3959                 :            : 
    3960         [ #  # ]:          0 :         if (!cpuset_zone_allowed(zone, gfp_flags))
    3961                 :            :                 return;
    3962                 :          0 :         pgdat = zone->zone_pgdat;
    3963                 :            : 
    3964         [ #  # ]:          0 :         if (pgdat->kswapd_classzone_idx == MAX_NR_ZONES)
    3965                 :          0 :                 pgdat->kswapd_classzone_idx = classzone_idx;
    3966                 :            :         else
    3967                 :          0 :                 pgdat->kswapd_classzone_idx = max(pgdat->kswapd_classzone_idx,
    3968                 :            :                                                   classzone_idx);
    3969                 :          0 :         pgdat->kswapd_order = max(pgdat->kswapd_order, order);
    3970         [ #  # ]:          0 :         if (!waitqueue_active(&pgdat->kswapd_wait))
    3971                 :            :                 return;
    3972                 :            : 
    3973                 :            :         /* Hopeless node, leave it to direct reclaim if possible */
    3974   [ #  #  #  # ]:          0 :         if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES ||
    3975         [ #  # ]:          0 :             (pgdat_balanced(pgdat, order, classzone_idx) &&
    3976                 :          0 :              !pgdat_watermark_boosted(pgdat, classzone_idx))) {
    3977                 :            :                 /*
    3978                 :            :                  * There may be plenty of free memory available, but it's too
    3979                 :            :                  * fragmented for high-order allocations.  Wake up kcompactd
    3980                 :            :                  * and rely on compaction_suitable() to determine if it's
    3981                 :            :                  * needed.  If it fails, it will defer subsequent attempts to
    3982                 :            :                  * ratelimit its work.
    3983                 :            :                  */
    3984         [ #  # ]:          0 :                 if (!(gfp_flags & __GFP_DIRECT_RECLAIM))
    3985                 :          0 :                         wakeup_kcompactd(pgdat, order, classzone_idx);
    3986                 :          0 :                 return;
    3987                 :            :         }
    3988                 :            : 
    3989                 :          0 :         trace_mm_vmscan_wakeup_kswapd(pgdat->node_id, classzone_idx, order,
    3990                 :            :                                       gfp_flags);
    3991                 :          0 :         wake_up_interruptible(&pgdat->kswapd_wait);
    3992                 :            : }
    3993                 :            : 
    3994                 :            : #ifdef CONFIG_HIBERNATION
    3995                 :            : /*
    3996                 :            :  * Try to free `nr_to_reclaim' of memory, system-wide, and return the number of
    3997                 :            :  * freed pages.
    3998                 :            :  *
    3999                 :            :  * Rather than trying to age LRUs the aim is to preserve the overall
    4000                 :            :  * LRU order by reclaiming preferentially
    4001                 :            :  * inactive > active > active referenced > active mapped
    4002                 :            :  */
    4003                 :          0 : unsigned long shrink_all_memory(unsigned long nr_to_reclaim)
    4004                 :            : {
    4005                 :          0 :         struct scan_control sc = {
    4006                 :            :                 .nr_to_reclaim = nr_to_reclaim,
    4007                 :            :                 .gfp_mask = GFP_HIGHUSER_MOVABLE,
    4008                 :            :                 .reclaim_idx = MAX_NR_ZONES - 1,
    4009                 :            :                 .priority = DEF_PRIORITY,
    4010                 :            :                 .may_writepage = 1,
    4011                 :            :                 .may_unmap = 1,
    4012                 :            :                 .may_swap = 1,
    4013                 :            :                 .hibernation_mode = 1,
    4014                 :            :         };
    4015                 :          0 :         struct zonelist *zonelist = node_zonelist(numa_node_id(), sc.gfp_mask);
    4016                 :          0 :         unsigned long nr_reclaimed;
    4017                 :          0 :         unsigned int noreclaim_flag;
    4018                 :            : 
    4019                 :          0 :         fs_reclaim_acquire(sc.gfp_mask);
    4020                 :          0 :         noreclaim_flag = memalloc_noreclaim_save();
    4021                 :          0 :         set_task_reclaim_state(current, &sc.reclaim_state);
    4022                 :            : 
    4023                 :          0 :         nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
    4024                 :            : 
    4025                 :          0 :         set_task_reclaim_state(current, NULL);
    4026                 :          0 :         memalloc_noreclaim_restore(noreclaim_flag);
    4027                 :          0 :         fs_reclaim_release(sc.gfp_mask);
    4028                 :            : 
    4029                 :          0 :         return nr_reclaimed;
    4030                 :            : }
    4031                 :            : #endif /* CONFIG_HIBERNATION */
    4032                 :            : 
    4033                 :            : /* It's optimal to keep kswapds on the same CPUs as their memory, but
    4034                 :            :    not required for correctness.  So if the last cpu in a node goes
    4035                 :            :    away, we get changed to run anywhere: as the first one comes back,
    4036                 :            :    restore their cpu bindings. */
    4037                 :          0 : static int kswapd_cpu_online(unsigned int cpu)
    4038                 :            : {
    4039                 :          0 :         int nid;
    4040                 :            : 
    4041         [ #  # ]:          0 :         for_each_node_state(nid, N_MEMORY) {
    4042                 :          0 :                 pg_data_t *pgdat = NODE_DATA(nid);
    4043                 :          0 :                 const struct cpumask *mask;
    4044                 :            : 
    4045                 :          0 :                 mask = cpumask_of_node(pgdat->node_id);
    4046                 :            : 
    4047         [ #  # ]:          0 :                 if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids)
    4048                 :            :                         /* One of our CPUs online: restore mask */
    4049                 :          0 :                         set_cpus_allowed_ptr(pgdat->kswapd, mask);
    4050                 :            :         }
    4051                 :          0 :         return 0;
    4052                 :            : }
    4053                 :            : 
    4054                 :            : /*
    4055                 :            :  * This kswapd start function will be called by init and node-hot-add.
    4056                 :            :  * On node-hot-add, kswapd will moved to proper cpus if cpus are hot-added.
    4057                 :            :  */
    4058                 :         78 : int kswapd_run(int nid)
    4059                 :            : {
    4060                 :         78 :         pg_data_t *pgdat = NODE_DATA(nid);
    4061                 :         78 :         int ret = 0;
    4062                 :            : 
    4063         [ +  - ]:         78 :         if (pgdat->kswapd)
    4064                 :            :                 return 0;
    4065                 :            : 
    4066         [ +  - ]:         78 :         pgdat->kswapd = kthread_run(kswapd, pgdat, "kswapd%d", nid);
    4067         [ -  + ]:         78 :         if (IS_ERR(pgdat->kswapd)) {
    4068                 :            :                 /* failure at boot is fatal */
    4069         [ #  # ]:          0 :                 BUG_ON(system_state < SYSTEM_RUNNING);
    4070                 :          0 :                 pr_err("Failed to start kswapd on node %d\n", nid);
    4071                 :          0 :                 ret = PTR_ERR(pgdat->kswapd);
    4072                 :          0 :                 pgdat->kswapd = NULL;
    4073                 :            :         }
    4074                 :            :         return ret;
    4075                 :            : }
    4076                 :            : 
    4077                 :            : /*
    4078                 :            :  * Called by memory hotplug when all memory in a node is offlined.  Caller must
    4079                 :            :  * hold mem_hotplug_begin/end().
    4080                 :            :  */
    4081                 :          0 : void kswapd_stop(int nid)
    4082                 :            : {
    4083                 :          0 :         struct task_struct *kswapd = NODE_DATA(nid)->kswapd;
    4084                 :            : 
    4085         [ #  # ]:          0 :         if (kswapd) {
    4086                 :          0 :                 kthread_stop(kswapd);
    4087                 :          0 :                 NODE_DATA(nid)->kswapd = NULL;
    4088                 :            :         }
    4089                 :          0 : }
    4090                 :            : 
    4091                 :         78 : static int __init kswapd_init(void)
    4092                 :            : {
    4093                 :         78 :         int nid, ret;
    4094                 :            : 
    4095                 :         78 :         swap_setup();
    4096         [ +  + ]:        312 :         for_each_node_state(nid, N_MEMORY)
    4097                 :         78 :                 kswapd_run(nid);
    4098                 :         78 :         ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
    4099                 :            :                                         "mm/vmscan:online", kswapd_cpu_online,
    4100                 :            :                                         NULL);
    4101         [ -  + ]:         78 :         WARN_ON(ret < 0);
    4102                 :         78 :         return 0;
    4103                 :            : }
    4104                 :            : 
    4105                 :            : module_init(kswapd_init)
    4106                 :            : 
    4107                 :            : #ifdef CONFIG_NUMA
    4108                 :            : /*
    4109                 :            :  * Node reclaim mode
    4110                 :            :  *
    4111                 :            :  * If non-zero call node_reclaim when the number of free pages falls below
    4112                 :            :  * the watermarks.
    4113                 :            :  */
    4114                 :            : int node_reclaim_mode __read_mostly;
    4115                 :            : 
    4116                 :            : #define RECLAIM_WRITE (1<<0)      /* Writeout pages during reclaim */
    4117                 :            : #define RECLAIM_UNMAP (1<<1)      /* Unmap pages during reclaim */
    4118                 :            : 
    4119                 :            : /*
    4120                 :            :  * Priority for NODE_RECLAIM. This determines the fraction of pages
    4121                 :            :  * of a node considered for each zone_reclaim. 4 scans 1/16th of
    4122                 :            :  * a zone.
    4123                 :            :  */
    4124                 :            : #define NODE_RECLAIM_PRIORITY 4
    4125                 :            : 
    4126                 :            : /*
    4127                 :            :  * Percentage of pages in a zone that must be unmapped for node_reclaim to
    4128                 :            :  * occur.
    4129                 :            :  */
    4130                 :            : int sysctl_min_unmapped_ratio = 1;
    4131                 :            : 
    4132                 :            : /*
    4133                 :            :  * If the number of slab pages in a zone grows beyond this percentage then
    4134                 :            :  * slab reclaim needs to occur.
    4135                 :            :  */
    4136                 :            : int sysctl_min_slab_ratio = 5;
    4137                 :            : 
    4138                 :          0 : static inline unsigned long node_unmapped_file_pages(struct pglist_data *pgdat)
    4139                 :            : {
    4140                 :          0 :         unsigned long file_mapped = node_page_state(pgdat, NR_FILE_MAPPED);
    4141                 :          0 :         unsigned long file_lru = node_page_state(pgdat, NR_INACTIVE_FILE) +
    4142                 :          0 :                 node_page_state(pgdat, NR_ACTIVE_FILE);
    4143                 :            : 
    4144                 :            :         /*
    4145                 :            :          * It's possible for there to be more file mapped pages than
    4146                 :            :          * accounted for by the pages on the file LRU lists because
    4147                 :            :          * tmpfs pages accounted for as ANON can also be FILE_MAPPED
    4148                 :            :          */
    4149         [ #  # ]:          0 :         return (file_lru > file_mapped) ? (file_lru - file_mapped) : 0;
    4150                 :            : }
    4151                 :            : 
    4152                 :            : /* Work out how many page cache pages we can reclaim in this reclaim_mode */
    4153                 :          0 : static unsigned long node_pagecache_reclaimable(struct pglist_data *pgdat)
    4154                 :            : {
    4155                 :          0 :         unsigned long nr_pagecache_reclaimable;
    4156                 :          0 :         unsigned long delta = 0;
    4157                 :            : 
    4158                 :            :         /*
    4159                 :            :          * If RECLAIM_UNMAP is set, then all file pages are considered
    4160                 :            :          * potentially reclaimable. Otherwise, we have to worry about
    4161                 :            :          * pages like swapcache and node_unmapped_file_pages() provides
    4162                 :            :          * a better estimate
    4163                 :            :          */
    4164         [ #  # ]:          0 :         if (node_reclaim_mode & RECLAIM_UNMAP)
    4165                 :          0 :                 nr_pagecache_reclaimable = node_page_state(pgdat, NR_FILE_PAGES);
    4166                 :            :         else
    4167                 :          0 :                 nr_pagecache_reclaimable = node_unmapped_file_pages(pgdat);
    4168                 :            : 
    4169                 :            :         /* If we can't clean pages, remove dirty pages from consideration */
    4170         [ #  # ]:          0 :         if (!(node_reclaim_mode & RECLAIM_WRITE))
    4171                 :          0 :                 delta += node_page_state(pgdat, NR_FILE_DIRTY);
    4172                 :            : 
    4173                 :            :         /* Watch for any possible underflows due to delta */
    4174         [ #  # ]:          0 :         if (unlikely(delta > nr_pagecache_reclaimable))
    4175                 :          0 :                 delta = nr_pagecache_reclaimable;
    4176                 :            : 
    4177                 :          0 :         return nr_pagecache_reclaimable - delta;
    4178                 :            : }
    4179                 :            : 
    4180                 :            : /*
    4181                 :            :  * Try to free up some pages from this node through reclaim.
    4182                 :            :  */
    4183                 :          0 : static int __node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned int order)
    4184                 :            : {
    4185                 :            :         /* Minimum pages needed in order to stay on node */
    4186                 :          0 :         const unsigned long nr_pages = 1 << order;
    4187         [ #  # ]:          0 :         struct task_struct *p = current;
    4188                 :          0 :         unsigned int noreclaim_flag;
    4189                 :          0 :         struct scan_control sc = {
    4190                 :          0 :                 .nr_to_reclaim = max(nr_pages, SWAP_CLUSTER_MAX),
    4191                 :            :                 .gfp_mask = current_gfp_context(gfp_mask),
    4192                 :            :                 .order = order,
    4193                 :            :                 .priority = NODE_RECLAIM_PRIORITY,
    4194                 :          0 :                 .may_writepage = !!(node_reclaim_mode & RECLAIM_WRITE),
    4195         [ #  # ]:          0 :                 .may_unmap = !!(node_reclaim_mode & RECLAIM_UNMAP),
    4196                 :            :                 .may_swap = 1,
    4197                 :            :                 .reclaim_idx = gfp_zone(gfp_mask),
    4198                 :            :         };
    4199                 :            : 
    4200                 :          0 :         trace_mm_vmscan_node_reclaim_begin(pgdat->node_id, order,
    4201                 :            :                                            sc.gfp_mask);
    4202                 :            : 
    4203                 :          0 :         cond_resched();
    4204                 :          0 :         fs_reclaim_acquire(sc.gfp_mask);
    4205                 :            :         /*
    4206                 :            :          * We need to be able to allocate from the reserves for RECLAIM_UNMAP
    4207                 :            :          * and we also need to be able to write out pages for RECLAIM_WRITE
    4208                 :            :          * and RECLAIM_UNMAP.
    4209                 :            :          */
    4210                 :          0 :         noreclaim_flag = memalloc_noreclaim_save();
    4211                 :          0 :         p->flags |= PF_SWAPWRITE;
    4212                 :          0 :         set_task_reclaim_state(p, &sc.reclaim_state);
    4213                 :            : 
    4214         [ #  # ]:          0 :         if (node_pagecache_reclaimable(pgdat) > pgdat->min_unmapped_pages) {
    4215                 :            :                 /*
    4216                 :            :                  * Free memory by calling shrink node with increasing
    4217                 :            :                  * priorities until we have enough memory freed.
    4218                 :            :                  */
    4219                 :          0 :                 do {
    4220                 :          0 :                         shrink_node(pgdat, &sc);
    4221   [ #  #  #  # ]:          0 :                 } while (sc.nr_reclaimed < nr_pages && --sc.priority >= 0);
    4222                 :            :         }
    4223                 :            : 
    4224                 :          0 :         set_task_reclaim_state(p, NULL);
    4225                 :          0 :         current->flags &= ~PF_SWAPWRITE;
    4226                 :          0 :         memalloc_noreclaim_restore(noreclaim_flag);
    4227                 :          0 :         fs_reclaim_release(sc.gfp_mask);
    4228                 :            : 
    4229                 :          0 :         trace_mm_vmscan_node_reclaim_end(sc.nr_reclaimed);
    4230                 :            : 
    4231                 :          0 :         return sc.nr_reclaimed >= nr_pages;
    4232                 :            : }
    4233                 :            : 
    4234                 :          0 : int node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned int order)
    4235                 :            : {
    4236                 :          0 :         int ret;
    4237                 :            : 
    4238                 :            :         /*
    4239                 :            :          * Node reclaim reclaims unmapped file backed pages and
    4240                 :            :          * slab pages if we are over the defined limits.
    4241                 :            :          *
    4242                 :            :          * A small portion of unmapped file backed pages is needed for
    4243                 :            :          * file I/O otherwise pages read by file I/O will be immediately
    4244                 :            :          * thrown out if the node is overallocated. So we do not reclaim
    4245                 :            :          * if less than a specified percentage of the node is used by
    4246                 :            :          * unmapped file backed pages.
    4247                 :            :          */
    4248         [ #  # ]:          0 :         if (node_pagecache_reclaimable(pgdat) <= pgdat->min_unmapped_pages &&
    4249         [ #  # ]:          0 :             node_page_state(pgdat, NR_SLAB_RECLAIMABLE) <= pgdat->min_slab_pages)
    4250                 :            :                 return NODE_RECLAIM_FULL;
    4251                 :            : 
    4252                 :            :         /*
    4253                 :            :          * Do not scan if the allocation should not be delayed.
    4254                 :            :          */
    4255   [ #  #  #  # ]:          0 :         if (!gfpflags_allow_blocking(gfp_mask) || (current->flags & PF_MEMALLOC))
    4256                 :            :                 return NODE_RECLAIM_NOSCAN;
    4257                 :            : 
    4258                 :            :         /*
    4259                 :            :          * Only run node reclaim on the local node or on nodes that do not
    4260                 :            :          * have associated processors. This will favor the local processor
    4261                 :            :          * over remote processors and spread off node memory allocations
    4262                 :            :          * as wide as possible.
    4263                 :            :          */
    4264   [ #  #  #  # ]:          0 :         if (node_state(pgdat->node_id, N_CPU) && pgdat->node_id != numa_node_id())
    4265                 :            :                 return NODE_RECLAIM_NOSCAN;
    4266                 :            : 
    4267         [ #  # ]:          0 :         if (test_and_set_bit(PGDAT_RECLAIM_LOCKED, &pgdat->flags))
    4268                 :            :                 return NODE_RECLAIM_NOSCAN;
    4269                 :            : 
    4270                 :          0 :         ret = __node_reclaim(pgdat, gfp_mask, order);
    4271                 :          0 :         clear_bit(PGDAT_RECLAIM_LOCKED, &pgdat->flags);
    4272                 :            : 
    4273         [ #  # ]:          0 :         if (!ret)
    4274                 :          0 :                 count_vm_event(PGSCAN_ZONE_RECLAIM_FAILED);
    4275                 :            : 
    4276                 :            :         return ret;
    4277                 :            : }
    4278                 :            : #endif
    4279                 :            : 
    4280                 :            : /*
    4281                 :            :  * page_evictable - test whether a page is evictable
    4282                 :            :  * @page: the page to test
    4283                 :            :  *
    4284                 :            :  * Test whether page is evictable--i.e., should be placed on active/inactive
    4285                 :            :  * lists vs unevictable list.
    4286                 :            :  *
    4287                 :            :  * Reasons page might not be evictable:
    4288                 :            :  * (1) page's mapping marked unevictable
    4289                 :            :  * (2) page is part of an mlocked VMA
    4290                 :            :  *
    4291                 :            :  */
    4292                 :    3847042 : int page_evictable(struct page *page)
    4293                 :            : {
    4294                 :    3847042 :         int ret;
    4295                 :            : 
    4296                 :            :         /* Prevent address_space of inode and swap cache from being freed */
    4297                 :    3847042 :         rcu_read_lock();
    4298   [ +  -  -  + ]:    8692614 :         ret = !mapping_unevictable(page_mapping(page)) && !PageMlocked(page);
    4299                 :    3847042 :         rcu_read_unlock();
    4300                 :    3847042 :         return ret;
    4301                 :            : }
    4302                 :            : 
    4303                 :            : /**
    4304                 :            :  * check_move_unevictable_pages - check pages for evictability and move to
    4305                 :            :  * appropriate zone lru list
    4306                 :            :  * @pvec: pagevec with lru pages to check
    4307                 :            :  *
    4308                 :            :  * Checks pages for evictability, if an evictable page is in the unevictable
    4309                 :            :  * lru list, moves it to the appropriate evictable lru list. This function
    4310                 :            :  * should be only used for lru pages.
    4311                 :            :  */
    4312                 :          0 : void check_move_unevictable_pages(struct pagevec *pvec)
    4313                 :            : {
    4314                 :          0 :         struct lruvec *lruvec;
    4315                 :          0 :         struct pglist_data *pgdat = NULL;
    4316                 :          0 :         int pgscanned = 0;
    4317                 :          0 :         int pgrescued = 0;
    4318                 :          0 :         int i;
    4319                 :            : 
    4320         [ #  # ]:          0 :         for (i = 0; i < pvec->nr; i++) {
    4321                 :          0 :                 struct page *page = pvec->pages[i];
    4322         [ #  # ]:          0 :                 struct pglist_data *pagepgdat = page_pgdat(page);
    4323                 :            : 
    4324                 :          0 :                 pgscanned++;
    4325         [ #  # ]:          0 :                 if (pagepgdat != pgdat) {
    4326         [ #  # ]:          0 :                         if (pgdat)
    4327                 :          0 :                                 spin_unlock_irq(&pgdat->lru_lock);
    4328                 :          0 :                         pgdat = pagepgdat;
    4329                 :          0 :                         spin_lock_irq(&pgdat->lru_lock);
    4330                 :            :                 }
    4331         [ #  # ]:          0 :                 lruvec = mem_cgroup_page_lruvec(page, pgdat);
    4332                 :            : 
    4333   [ #  #  #  #  :          0 :                 if (!PageLRU(page) || !PageUnevictable(page))
                   #  # ]
    4334                 :          0 :                         continue;
    4335                 :            : 
    4336         [ #  # ]:          0 :                 if (page_evictable(page)) {
    4337                 :          0 :                         enum lru_list lru = page_lru_base_type(page);
    4338                 :            : 
    4339                 :          0 :                         VM_BUG_ON_PAGE(PageActive(page), page);
    4340         [ #  # ]:          0 :                         ClearPageUnevictable(page);
    4341                 :          0 :                         del_page_from_lru_list(page, lruvec, LRU_UNEVICTABLE);
    4342                 :          0 :                         add_page_to_lru_list(page, lruvec, lru);
    4343                 :          0 :                         pgrescued++;
    4344                 :            :                 }
    4345                 :            :         }
    4346                 :            : 
    4347         [ #  # ]:          0 :         if (pgdat) {
    4348         [ #  # ]:          0 :                 __count_vm_events(UNEVICTABLE_PGRESCUED, pgrescued);
    4349         [ #  # ]:          0 :                 __count_vm_events(UNEVICTABLE_PGSCANNED, pgscanned);
    4350                 :          0 :                 spin_unlock_irq(&pgdat->lru_lock);
    4351                 :            :         }
    4352                 :          0 : }
    4353                 :            : EXPORT_SYMBOL_GPL(check_move_unevictable_pages);

Generated by: LCOV version 1.14