LCOV - code coverage report
Current view: top level - mm - filemap.c (source / functions) Hit Total Coverage
Test: combined.info Lines: 778 1300 59.8 %
Date: 2022-03-28 16:04:14 Functions: 51 78 65.4 %
Branches: 352 828 42.5 %

           Branch data     Line data    Source code
       1                 :            : // SPDX-License-Identifier: GPL-2.0-only
       2                 :            : /*
       3                 :            :  *      linux/mm/filemap.c
       4                 :            :  *
       5                 :            :  * Copyright (C) 1994-1999  Linus Torvalds
       6                 :            :  */
       7                 :            : 
       8                 :            : /*
       9                 :            :  * This file handles the generic file mmap semantics used by
      10                 :            :  * most "normal" filesystems (but you don't /have/ to use this:
      11                 :            :  * the NFS filesystem used to do this differently, for example)
      12                 :            :  */
      13                 :            : #include <linux/export.h>
      14                 :            : #include <linux/compiler.h>
      15                 :            : #include <linux/dax.h>
      16                 :            : #include <linux/fs.h>
      17                 :            : #include <linux/sched/signal.h>
      18                 :            : #include <linux/uaccess.h>
      19                 :            : #include <linux/capability.h>
      20                 :            : #include <linux/kernel_stat.h>
      21                 :            : #include <linux/gfp.h>
      22                 :            : #include <linux/mm.h>
      23                 :            : #include <linux/swap.h>
      24                 :            : #include <linux/mman.h>
      25                 :            : #include <linux/pagemap.h>
      26                 :            : #include <linux/file.h>
      27                 :            : #include <linux/uio.h>
      28                 :            : #include <linux/error-injection.h>
      29                 :            : #include <linux/hash.h>
      30                 :            : #include <linux/writeback.h>
      31                 :            : #include <linux/backing-dev.h>
      32                 :            : #include <linux/pagevec.h>
      33                 :            : #include <linux/blkdev.h>
      34                 :            : #include <linux/security.h>
      35                 :            : #include <linux/cpuset.h>
      36                 :            : #include <linux/hugetlb.h>
      37                 :            : #include <linux/memcontrol.h>
      38                 :            : #include <linux/cleancache.h>
      39                 :            : #include <linux/shmem_fs.h>
      40                 :            : #include <linux/rmap.h>
      41                 :            : #include <linux/delayacct.h>
      42                 :            : #include <linux/psi.h>
      43                 :            : #include <linux/ramfs.h>
      44                 :            : #include "internal.h"
      45                 :            : 
      46                 :            : #define CREATE_TRACE_POINTS
      47                 :            : #include <trace/events/filemap.h>
      48                 :            : 
      49                 :            : /*
      50                 :            :  * FIXME: remove all knowledge of the buffer layer from the core VM
      51                 :            :  */
      52                 :            : #include <linux/buffer_head.h> /* for try_to_free_buffers */
      53                 :            : 
      54                 :            : #include <asm/mman.h>
      55                 :            : 
      56                 :            : /*
      57                 :            :  * Shared mappings implemented 30.11.1994. It's not fully working yet,
      58                 :            :  * though.
      59                 :            :  *
      60                 :            :  * Shared mappings now work. 15.8.1995  Bruno.
      61                 :            :  *
      62                 :            :  * finished 'unifying' the page and buffer cache and SMP-threaded the
      63                 :            :  * page-cache, 21.05.1999, Ingo Molnar <mingo@redhat.com>
      64                 :            :  *
      65                 :            :  * SMP-threaded pagemap-LRU 1999, Andrea Arcangeli <andrea@suse.de>
      66                 :            :  */
      67                 :            : 
      68                 :            : /*
      69                 :            :  * Lock ordering:
      70                 :            :  *
      71                 :            :  *  ->i_mmap_rwsem           (truncate_pagecache)
      72                 :            :  *    ->private_lock         (__free_pte->__set_page_dirty_buffers)
      73                 :            :  *      ->swap_lock          (exclusive_swap_page, others)
      74                 :            :  *        ->i_pages lock
      75                 :            :  *
      76                 :            :  *  ->i_mutex
      77                 :            :  *    ->i_mmap_rwsem         (truncate->unmap_mapping_range)
      78                 :            :  *
      79                 :            :  *  ->mmap_sem
      80                 :            :  *    ->i_mmap_rwsem
      81                 :            :  *      ->page_table_lock or pte_lock        (various, mainly in memory.c)
      82                 :            :  *        ->i_pages lock     (arch-dependent flush_dcache_mmap_lock)
      83                 :            :  *
      84                 :            :  *  ->mmap_sem
      85                 :            :  *    ->lock_page            (access_process_vm)
      86                 :            :  *
      87                 :            :  *  ->i_mutex                        (generic_perform_write)
      88                 :            :  *    ->mmap_sem             (fault_in_pages_readable->do_page_fault)
      89                 :            :  *
      90                 :            :  *  bdi->wb.list_lock
      91                 :            :  *    sb_lock                   (fs/fs-writeback.c)
      92                 :            :  *    ->i_pages lock         (__sync_single_inode)
      93                 :            :  *
      94                 :            :  *  ->i_mmap_rwsem
      95                 :            :  *    ->anon_vma.lock                (vma_adjust)
      96                 :            :  *
      97                 :            :  *  ->anon_vma.lock
      98                 :            :  *    ->page_table_lock or pte_lock  (anon_vma_prepare and various)
      99                 :            :  *
     100                 :            :  *  ->page_table_lock or pte_lock
     101                 :            :  *    ->swap_lock            (try_to_unmap_one)
     102                 :            :  *    ->private_lock         (try_to_unmap_one)
     103                 :            :  *    ->i_pages lock         (try_to_unmap_one)
     104                 :            :  *    ->pgdat->lru_lock           (follow_page->mark_page_accessed)
     105                 :            :  *    ->pgdat->lru_lock           (check_pte_range->isolate_lru_page)
     106                 :            :  *    ->private_lock         (page_remove_rmap->set_page_dirty)
     107                 :            :  *    ->i_pages lock         (page_remove_rmap->set_page_dirty)
     108                 :            :  *    bdi.wb->list_lock              (page_remove_rmap->set_page_dirty)
     109                 :            :  *    ->inode->i_lock             (page_remove_rmap->set_page_dirty)
     110                 :            :  *    ->memcg->move_lock  (page_remove_rmap->lock_page_memcg)
     111                 :            :  *    bdi.wb->list_lock              (zap_pte_range->set_page_dirty)
     112                 :            :  *    ->inode->i_lock             (zap_pte_range->set_page_dirty)
     113                 :            :  *    ->private_lock         (zap_pte_range->__set_page_dirty_buffers)
     114                 :            :  *
     115                 :            :  * ->i_mmap_rwsem
     116                 :            :  *   ->tasklist_lock            (memory_failure, collect_procs_ao)
     117                 :            :  */
     118                 :            : 
     119                 :        442 : static void page_cache_delete(struct address_space *mapping,
     120                 :            :                                    struct page *page, void *shadow)
     121                 :            : {
     122                 :        442 :         XA_STATE(xas, &mapping->i_pages, page->index);
     123                 :        442 :         unsigned int nr = 1;
     124                 :            : 
     125         [ -  + ]:        442 :         mapping_set_update(&xas, mapping);
     126                 :            : 
     127                 :            :         /* hugetlb pages are represented by a single entry in the xarray */
     128         [ +  - ]:        442 :         if (!PageHuge(page)) {
     129         [ -  + ]:        884 :                 xas_set_order(&xas, page->index, compound_order(page));
     130                 :        884 :                 nr = compound_nr(page);
     131                 :            :         }
     132                 :            : 
     133                 :        442 :         VM_BUG_ON_PAGE(!PageLocked(page), page);
     134                 :        442 :         VM_BUG_ON_PAGE(PageTail(page), page);
     135                 :        442 :         VM_BUG_ON_PAGE(nr != 1 && shadow, page);
     136                 :            : 
     137                 :        442 :         xas_store(&xas, shadow);
     138                 :        442 :         xas_init_marks(&xas);
     139                 :            : 
     140                 :        442 :         page->mapping = NULL;
     141                 :            :         /* Leave page->index set: truncation lookup relies upon it */
     142                 :            : 
     143         [ -  + ]:        442 :         if (shadow) {
     144                 :          0 :                 mapping->nrexceptional += nr;
     145                 :            :                 /*
     146                 :            :                  * Make sure the nrexceptional update is committed before
     147                 :            :                  * the nrpages update so that final truncate racing
     148                 :            :                  * with reclaim does not see both counters 0 at the
     149                 :            :                  * same time and miss a shadow entry.
     150                 :            :                  */
     151                 :          0 :                 smp_wmb();
     152                 :            :         }
     153                 :        442 :         mapping->nrpages -= nr;
     154                 :        442 : }
     155                 :            : 
     156                 :       7358 : static void unaccount_page_cache_page(struct address_space *mapping,
     157                 :            :                                       struct page *page)
     158                 :            : {
     159                 :       7358 :         int nr;
     160                 :            : 
     161                 :            :         /*
     162                 :            :          * if we're uptodate, flush out into the cleancache, otherwise
     163                 :            :          * invalidate any existing cleancache entries.  We can't leave
     164                 :            :          * stale data around in the cleancache once our page is gone
     165                 :            :          */
     166         [ +  + ]:       7358 :         if (PageUptodate(page) && PageMappedToDisk(page))
     167                 :            :                 cleancache_put_page(page);
     168                 :            :         else
     169                 :            :                 cleancache_invalidate_page(mapping, page);
     170                 :            : 
     171                 :       7358 :         VM_BUG_ON_PAGE(PageTail(page), page);
     172                 :       7358 :         VM_BUG_ON_PAGE(page_mapped(page), page);
     173         [ -  + ]:       7358 :         if (!IS_ENABLED(CONFIG_DEBUG_VM) && unlikely(page_mapped(page))) {
     174                 :          0 :                 int mapcount;
     175                 :            : 
     176                 :          0 :                 pr_alert("BUG: Bad page cache in process %s  pfn:%05lx\n",
     177                 :            :                          current->comm, page_to_pfn(page));
     178                 :          0 :                 dump_page(page, "still mapped when deleted");
     179                 :          0 :                 dump_stack();
     180                 :          0 :                 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
     181                 :            : 
     182                 :          0 :                 mapcount = page_mapcount(page);
     183         [ #  # ]:          0 :                 if (mapping_exiting(mapping) &&
     184         [ #  # ]:          0 :                     page_count(page) >= mapcount + 2) {
     185                 :            :                         /*
     186                 :            :                          * All vmas have already been torn down, so it's
     187                 :            :                          * a good bet that actually the page is unmapped,
     188                 :            :                          * and we'd prefer not to leak it: if we're wrong,
     189                 :            :                          * some other bad page check should catch it later.
     190                 :            :                          */
     191                 :          0 :                         page_mapcount_reset(page);
     192                 :          0 :                         page_ref_sub(page, mapcount);
     193                 :            :                 }
     194                 :            :         }
     195                 :            : 
     196                 :            :         /* hugetlb pages do not participate in page cache accounting. */
     197         [ +  - ]:       7358 :         if (PageHuge(page))
     198                 :            :                 return;
     199                 :            : 
     200                 :       7358 :         nr = hpage_nr_pages(page);
     201                 :            : 
     202                 :       7358 :         __mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, -nr);
     203   [ -  +  +  + ]:      14716 :         if (PageSwapBacked(page)) {
     204                 :        442 :                 __mod_node_page_state(page_pgdat(page), NR_SHMEM, -nr);
     205                 :        442 :                 if (PageTransHuge(page))
     206                 :            :                         __dec_node_page_state(page, NR_SHMEM_THPS);
     207                 :            :         } else if (PageTransHuge(page)) {
     208                 :            :                 __dec_node_page_state(page, NR_FILE_THPS);
     209                 :            :                 filemap_nr_thps_dec(mapping);
     210                 :            :         }
     211                 :            : 
     212                 :            :         /*
     213                 :            :          * At this point page must be either written or cleaned by
     214                 :            :          * truncate.  Dirty page here signals a bug and loss of
     215                 :            :          * unwritten data.
     216                 :            :          *
     217                 :            :          * This fixes dirty accounting after removing the page entirely
     218                 :            :          * but leaves PageDirty set: it has no effect for truncated
     219                 :            :          * page and anyway will be cleared before returning page into
     220                 :            :          * buddy allocator.
     221                 :            :          */
     222   [ -  +  -  +  :      14716 :         if (WARN_ON_ONCE(PageDirty(page)))
                   -  + ]
     223                 :          0 :                 account_page_cleaned(page, mapping, inode_to_wb(mapping->host));
     224                 :            : }
     225                 :            : 
     226                 :            : /*
     227                 :            :  * Delete a page from the page cache and free it. Caller has to make
     228                 :            :  * sure the page is locked and that nobody else uses it - or that usage
     229                 :            :  * is safe.  The caller must hold the i_pages lock.
     230                 :            :  */
     231                 :        442 : void __delete_from_page_cache(struct page *page, void *shadow)
     232                 :            : {
     233                 :        442 :         struct address_space *mapping = page->mapping;
     234                 :            : 
     235                 :        442 :         trace_mm_filemap_delete_from_page_cache(page);
     236                 :            : 
     237                 :        442 :         unaccount_page_cache_page(mapping, page);
     238                 :        442 :         page_cache_delete(mapping, page, shadow);
     239                 :        442 : }
     240                 :            : 
     241                 :       7358 : static void page_cache_free_page(struct address_space *mapping,
     242                 :            :                                 struct page *page)
     243                 :            : {
     244                 :       7358 :         void (*freepage)(struct page *);
     245                 :            : 
     246                 :       7358 :         freepage = mapping->a_ops->freepage;
     247                 :       7358 :         if (freepage)
     248                 :          0 :                 freepage(page);
     249                 :            : 
     250                 :       7358 :         if (PageTransHuge(page) && !PageHuge(page)) {
     251                 :            :                 page_ref_sub(page, HPAGE_PMD_NR);
     252                 :            :                 VM_BUG_ON_PAGE(page_count(page) <= 0, page);
     253                 :            :         } else {
     254                 :       7358 :                 put_page(page);
     255                 :            :         }
     256                 :            : }
     257                 :            : 
     258                 :            : /**
     259                 :            :  * delete_from_page_cache - delete page from page cache
     260                 :            :  * @page: the page which the kernel is trying to remove from page cache
     261                 :            :  *
     262                 :            :  * This must be called only on pages that have been verified to be in the page
     263                 :            :  * cache and locked.  It will never put the page into the free list, the caller
     264                 :            :  * has a reference on the page.
     265                 :            :  */
     266                 :        442 : void delete_from_page_cache(struct page *page)
     267                 :            : {
     268                 :        442 :         struct address_space *mapping = page_mapping(page);
     269                 :        442 :         unsigned long flags;
     270                 :            : 
     271   [ -  +  -  + ]:        884 :         BUG_ON(!PageLocked(page));
     272                 :        442 :         xa_lock_irqsave(&mapping->i_pages, flags);
     273                 :        442 :         __delete_from_page_cache(page, NULL);
     274                 :        442 :         xa_unlock_irqrestore(&mapping->i_pages, flags);
     275                 :            : 
     276         [ -  + ]:        442 :         page_cache_free_page(mapping, page);
     277                 :        442 : }
     278                 :            : EXPORT_SYMBOL(delete_from_page_cache);
     279                 :            : 
     280                 :            : /*
     281                 :            :  * page_cache_delete_batch - delete several pages from page cache
     282                 :            :  * @mapping: the mapping to which pages belong
     283                 :            :  * @pvec: pagevec with pages to delete
     284                 :            :  *
     285                 :            :  * The function walks over mapping->i_pages and removes pages passed in @pvec
     286                 :            :  * from the mapping. The function expects @pvec to be sorted by page index
     287                 :            :  * and is optimised for it to be dense.
     288                 :            :  * It tolerates holes in @pvec (mapping entries at those indices are not
     289                 :            :  * modified). The function expects only THP head pages to be present in the
     290                 :            :  * @pvec.
     291                 :            :  *
     292                 :            :  * The function expects the i_pages lock to be held.
     293                 :            :  */
     294                 :        598 : static void page_cache_delete_batch(struct address_space *mapping,
     295                 :            :                              struct pagevec *pvec)
     296                 :            : {
     297                 :        598 :         XA_STATE(xas, &mapping->i_pages, pvec->pages[0]->index);
     298                 :        598 :         int total_pages = 0;
     299                 :        598 :         int i = 0;
     300                 :        598 :         struct page *page;
     301                 :            : 
     302         [ +  - ]:        598 :         mapping_set_update(&xas, mapping);
     303         [ +  + ]:       7514 :         xas_for_each(&xas, page, ULONG_MAX) {
     304         [ +  + ]:       7358 :                 if (i >= pagevec_count(pvec))
     305                 :            :                         break;
     306                 :            : 
     307                 :            :                 /* A swap/dax/shadow entry got inserted? Skip it. */
     308         [ -  + ]:       6916 :                 if (xa_is_value(page))
     309                 :          0 :                         continue;
     310                 :            :                 /*
     311                 :            :                  * A page got inserted in our range? Skip it. We have our
     312                 :            :                  * pages locked so they are protected from being removed.
     313                 :            :                  * If we see a page whose index is higher than ours, it
     314                 :            :                  * means our page has been removed, which shouldn't be
     315                 :            :                  * possible because we're holding the PageLock.
     316                 :            :                  */
     317         [ -  + ]:       6916 :                 if (page != pvec->pages[i]) {
     318                 :          0 :                         VM_BUG_ON_PAGE(page->index > pvec->pages[i]->index,
     319                 :            :                                         page);
     320                 :          0 :                         continue;
     321                 :            :                 }
     322                 :            : 
     323   [ -  +  -  + ]:      13832 :                 WARN_ON_ONCE(!PageLocked(page));
     324                 :            : 
     325         [ +  - ]:       6916 :                 if (page->index == xas.xa_index)
     326                 :       6916 :                         page->mapping = NULL;
     327                 :            :                 /* Leave page->index set: truncation lookup relies on it */
     328                 :            : 
     329                 :            :                 /*
     330                 :            :                  * Move to the next page in the vector if this is a regular
     331                 :            :                  * page or the index is of the last sub-page of this compound
     332                 :            :                  * page.
     333                 :            :                  */
     334         [ +  - ]:      13832 :                 if (page->index + compound_nr(page) - 1 == xas.xa_index)
     335                 :       6916 :                         i++;
     336                 :       6916 :                 xas_store(&xas, NULL);
     337                 :       6916 :                 total_pages++;
     338                 :            :         }
     339                 :        598 :         mapping->nrpages -= total_pages;
     340                 :        598 : }
     341                 :            : 
     342                 :        598 : void delete_from_page_cache_batch(struct address_space *mapping,
     343                 :            :                                   struct pagevec *pvec)
     344                 :            : {
     345                 :        598 :         int i;
     346                 :        598 :         unsigned long flags;
     347                 :            : 
     348         [ +  - ]:        598 :         if (!pagevec_count(pvec))
     349                 :            :                 return;
     350                 :            : 
     351                 :        598 :         xa_lock_irqsave(&mapping->i_pages, flags);
     352         [ +  + ]:       8112 :         for (i = 0; i < pagevec_count(pvec); i++) {
     353                 :       6916 :                 trace_mm_filemap_delete_from_page_cache(pvec->pages[i]);
     354                 :            : 
     355                 :       6916 :                 unaccount_page_cache_page(mapping, pvec->pages[i]);
     356                 :            :         }
     357                 :        598 :         page_cache_delete_batch(mapping, pvec);
     358                 :        598 :         xa_unlock_irqrestore(&mapping->i_pages, flags);
     359                 :            : 
     360         [ +  + ]:       8112 :         for (i = 0; i < pagevec_count(pvec); i++)
     361         [ -  + ]:       6916 :                 page_cache_free_page(mapping, pvec->pages[i]);
     362                 :            : }
     363                 :            : 
     364                 :        338 : int filemap_check_errors(struct address_space *mapping)
     365                 :            : {
     366                 :        338 :         int ret = 0;
     367                 :            :         /* Check for outstanding write errors */
     368   [ -  +  -  - ]:        338 :         if (test_bit(AS_ENOSPC, &mapping->flags) &&
     369                 :          0 :             test_and_clear_bit(AS_ENOSPC, &mapping->flags))
     370                 :          0 :                 ret = -ENOSPC;
     371   [ -  +  -  - ]:        338 :         if (test_bit(AS_EIO, &mapping->flags) &&
     372                 :          0 :             test_and_clear_bit(AS_EIO, &mapping->flags))
     373                 :          0 :                 ret = -EIO;
     374                 :        338 :         return ret;
     375                 :            : }
     376                 :            : EXPORT_SYMBOL(filemap_check_errors);
     377                 :            : 
     378                 :          0 : static int filemap_check_and_keep_errors(struct address_space *mapping)
     379                 :            : {
     380                 :            :         /* Check for outstanding write errors */
     381         [ #  # ]:          0 :         if (test_bit(AS_EIO, &mapping->flags))
     382                 :            :                 return -EIO;
     383         [ #  # ]:          0 :         if (test_bit(AS_ENOSPC, &mapping->flags))
     384                 :          0 :                 return -ENOSPC;
     385                 :            :         return 0;
     386                 :            : }
     387                 :            : 
     388                 :            : /**
     389                 :            :  * __filemap_fdatawrite_range - start writeback on mapping dirty pages in range
     390                 :            :  * @mapping:    address space structure to write
     391                 :            :  * @start:      offset in bytes where the range starts
     392                 :            :  * @end:        offset in bytes where the range ends (inclusive)
     393                 :            :  * @sync_mode:  enable synchronous operation
     394                 :            :  *
     395                 :            :  * Start writeback against all of a mapping's dirty pages that lie
     396                 :            :  * within the byte offsets <start, end> inclusive.
     397                 :            :  *
     398                 :            :  * If sync_mode is WB_SYNC_ALL then this is a "data integrity" operation, as
     399                 :            :  * opposed to a regular memory cleansing writeback.  The difference between
     400                 :            :  * these two operations is that if a dirty page/buffer is encountered, it must
     401                 :            :  * be waited upon, and not just skipped over.
     402                 :            :  *
     403                 :            :  * Return: %0 on success, negative error code otherwise.
     404                 :            :  */
     405                 :        234 : int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
     406                 :            :                                 loff_t end, int sync_mode)
     407                 :            : {
     408                 :        234 :         int ret;
     409                 :        234 :         struct writeback_control wbc = {
     410                 :            :                 .sync_mode = sync_mode,
     411                 :            :                 .nr_to_write = LONG_MAX,
     412                 :            :                 .range_start = start,
     413                 :            :                 .range_end = end,
     414                 :            :         };
     415                 :            : 
     416         [ +  - ]:        234 :         if (!mapping_cap_writeback_dirty(mapping) ||
     417                 :            :             !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
     418                 :            :                 return 0;
     419                 :            : 
     420                 :        104 :         wbc_attach_fdatawrite_inode(&wbc, mapping->host);
     421                 :        104 :         ret = do_writepages(mapping, &wbc);
     422                 :        104 :         wbc_detach_inode(&wbc);
     423                 :        104 :         return ret;
     424                 :            : }
     425                 :            : 
     426                 :         13 : static inline int __filemap_fdatawrite(struct address_space *mapping,
     427                 :            :         int sync_mode)
     428                 :            : {
     429                 :         13 :         return __filemap_fdatawrite_range(mapping, 0, LLONG_MAX, sync_mode);
     430                 :            : }
     431                 :            : 
     432                 :          0 : int filemap_fdatawrite(struct address_space *mapping)
     433                 :            : {
     434                 :          0 :         return __filemap_fdatawrite(mapping, WB_SYNC_ALL);
     435                 :            : }
     436                 :            : EXPORT_SYMBOL(filemap_fdatawrite);
     437                 :            : 
     438                 :          0 : int filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
     439                 :            :                                 loff_t end)
     440                 :            : {
     441                 :          0 :         return __filemap_fdatawrite_range(mapping, start, end, WB_SYNC_ALL);
     442                 :            : }
     443                 :            : EXPORT_SYMBOL(filemap_fdatawrite_range);
     444                 :            : 
     445                 :            : /**
     446                 :            :  * filemap_flush - mostly a non-blocking flush
     447                 :            :  * @mapping:    target address_space
     448                 :            :  *
     449                 :            :  * This is a mostly non-blocking flush.  Not suitable for data-integrity
     450                 :            :  * purposes - I/O may not be started against all dirty pages.
     451                 :            :  *
     452                 :            :  * Return: %0 on success, negative error code otherwise.
     453                 :            :  */
     454                 :         13 : int filemap_flush(struct address_space *mapping)
     455                 :            : {
     456                 :         13 :         return __filemap_fdatawrite(mapping, WB_SYNC_NONE);
     457                 :            : }
     458                 :            : EXPORT_SYMBOL(filemap_flush);
     459                 :            : 
     460                 :            : /**
     461                 :            :  * filemap_range_has_page - check if a page exists in range.
     462                 :            :  * @mapping:           address space within which to check
     463                 :            :  * @start_byte:        offset in bytes where the range starts
     464                 :            :  * @end_byte:          offset in bytes where the range ends (inclusive)
     465                 :            :  *
     466                 :            :  * Find at least one page in the range supplied, usually used to check if
     467                 :            :  * direct writing in this range will trigger a writeback.
     468                 :            :  *
     469                 :            :  * Return: %true if at least one page exists in the specified range,
     470                 :            :  * %false otherwise.
     471                 :            :  */
     472                 :          0 : bool filemap_range_has_page(struct address_space *mapping,
     473                 :            :                            loff_t start_byte, loff_t end_byte)
     474                 :            : {
     475                 :          0 :         struct page *page;
     476                 :          0 :         XA_STATE(xas, &mapping->i_pages, start_byte >> PAGE_SHIFT);
     477                 :          0 :         pgoff_t max = end_byte >> PAGE_SHIFT;
     478                 :            : 
     479         [ #  # ]:          0 :         if (end_byte < start_byte)
     480                 :            :                 return false;
     481                 :            : 
     482                 :          0 :         rcu_read_lock();
     483                 :          0 :         for (;;) {
     484                 :          0 :                 page = xas_find(&xas, max);
     485         [ #  # ]:          0 :                 if (xas_retry(&xas, page))
     486                 :          0 :                         continue;
     487                 :            :                 /* Shadow entries don't count */
     488         [ #  # ]:          0 :                 if (xa_is_value(page))
     489                 :          0 :                         continue;
     490                 :            :                 /*
     491                 :            :                  * We don't need to try to pin this page; we're about to
     492                 :            :                  * release the RCU lock anyway.  It is enough to know that
     493                 :            :                  * there was a page here recently.
     494                 :            :                  */
     495                 :          0 :                 break;
     496                 :            :         }
     497                 :          0 :         rcu_read_unlock();
     498                 :            : 
     499                 :          0 :         return page != NULL;
     500                 :            : }
     501                 :            : EXPORT_SYMBOL(filemap_range_has_page);
     502                 :            : 
     503                 :        221 : static void __filemap_fdatawait_range(struct address_space *mapping,
     504                 :            :                                      loff_t start_byte, loff_t end_byte)
     505                 :            : {
     506                 :        221 :         pgoff_t index = start_byte >> PAGE_SHIFT;
     507                 :        221 :         pgoff_t end = end_byte >> PAGE_SHIFT;
     508                 :        221 :         struct pagevec pvec;
     509                 :        221 :         int nr_pages;
     510                 :            : 
     511         [ -  + ]:        221 :         if (end_byte < start_byte)
     512                 :          0 :                 return;
     513                 :            : 
     514                 :        221 :         pagevec_init(&pvec);
     515         [ +  + ]:        312 :         while (index <= end) {
     516                 :        221 :                 unsigned i;
     517                 :            : 
     518                 :        221 :                 nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index,
     519                 :            :                                 end, PAGECACHE_TAG_WRITEBACK);
     520         [ +  + ]:        221 :                 if (!nr_pages)
     521                 :            :                         break;
     522                 :            : 
     523         [ +  + ]:        182 :                 for (i = 0; i < nr_pages; i++) {
     524                 :         91 :                         struct page *page = pvec.pages[i];
     525                 :            : 
     526                 :         91 :                         wait_on_page_writeback(page);
     527         [ -  + ]:         91 :                         ClearPageError(page);
     528                 :            :                 }
     529         [ +  - ]:         91 :                 pagevec_release(&pvec);
     530                 :         91 :                 cond_resched();
     531                 :            :         }
     532                 :            : }
     533                 :            : 
     534                 :            : /**
     535                 :            :  * filemap_fdatawait_range - wait for writeback to complete
     536                 :            :  * @mapping:            address space structure to wait for
     537                 :            :  * @start_byte:         offset in bytes where the range starts
     538                 :            :  * @end_byte:           offset in bytes where the range ends (inclusive)
     539                 :            :  *
     540                 :            :  * Walk the list of under-writeback pages of the given address space
     541                 :            :  * in the given range and wait for all of them.  Check error status of
     542                 :            :  * the address space and return it.
     543                 :            :  *
     544                 :            :  * Since the error status of the address space is cleared by this function,
     545                 :            :  * callers are responsible for checking the return value and handling and/or
     546                 :            :  * reporting the error.
     547                 :            :  *
     548                 :            :  * Return: error status of the address space.
     549                 :            :  */
     550                 :        130 : int filemap_fdatawait_range(struct address_space *mapping, loff_t start_byte,
     551                 :            :                             loff_t end_byte)
     552                 :            : {
     553                 :          0 :         __filemap_fdatawait_range(mapping, start_byte, end_byte);
     554                 :        130 :         return filemap_check_errors(mapping);
     555                 :            : }
     556                 :            : EXPORT_SYMBOL(filemap_fdatawait_range);
     557                 :            : 
     558                 :            : /**
     559                 :            :  * filemap_fdatawait_range_keep_errors - wait for writeback to complete
     560                 :            :  * @mapping:            address space structure to wait for
     561                 :            :  * @start_byte:         offset in bytes where the range starts
     562                 :            :  * @end_byte:           offset in bytes where the range ends (inclusive)
     563                 :            :  *
     564                 :            :  * Walk the list of under-writeback pages of the given address space in the
     565                 :            :  * given range and wait for all of them.  Unlike filemap_fdatawait_range(),
     566                 :            :  * this function does not clear error status of the address space.
     567                 :            :  *
     568                 :            :  * Use this function if callers don't handle errors themselves.  Expected
     569                 :            :  * call sites are system-wide / filesystem-wide data flushers: e.g. sync(2),
     570                 :            :  * fsfreeze(8)
     571                 :            :  */
     572                 :          0 : int filemap_fdatawait_range_keep_errors(struct address_space *mapping,
     573                 :            :                 loff_t start_byte, loff_t end_byte)
     574                 :            : {
     575                 :          0 :         __filemap_fdatawait_range(mapping, start_byte, end_byte);
     576                 :          0 :         return filemap_check_and_keep_errors(mapping);
     577                 :            : }
     578                 :            : EXPORT_SYMBOL(filemap_fdatawait_range_keep_errors);
     579                 :            : 
     580                 :            : /**
     581                 :            :  * file_fdatawait_range - wait for writeback to complete
     582                 :            :  * @file:               file pointing to address space structure to wait for
     583                 :            :  * @start_byte:         offset in bytes where the range starts
     584                 :            :  * @end_byte:           offset in bytes where the range ends (inclusive)
     585                 :            :  *
     586                 :            :  * Walk the list of under-writeback pages of the address space that file
     587                 :            :  * refers to, in the given range and wait for all of them.  Check error
     588                 :            :  * status of the address space vs. the file->f_wb_err cursor and return it.
     589                 :            :  *
     590                 :            :  * Since the error status of the file is advanced by this function,
     591                 :            :  * callers are responsible for checking the return value and handling and/or
     592                 :            :  * reporting the error.
     593                 :            :  *
     594                 :            :  * Return: error status of the address space vs. the file->f_wb_err cursor.
     595                 :            :  */
     596                 :          0 : int file_fdatawait_range(struct file *file, loff_t start_byte, loff_t end_byte)
     597                 :            : {
     598                 :          0 :         struct address_space *mapping = file->f_mapping;
     599                 :            : 
     600                 :          0 :         __filemap_fdatawait_range(mapping, start_byte, end_byte);
     601                 :          0 :         return file_check_and_advance_wb_err(file);
     602                 :            : }
     603                 :            : EXPORT_SYMBOL(file_fdatawait_range);
     604                 :            : 
     605                 :            : /**
     606                 :            :  * filemap_fdatawait_keep_errors - wait for writeback without clearing errors
     607                 :            :  * @mapping: address space structure to wait for
     608                 :            :  *
     609                 :            :  * Walk the list of under-writeback pages of the given address space
     610                 :            :  * and wait for all of them.  Unlike filemap_fdatawait(), this function
     611                 :            :  * does not clear error status of the address space.
     612                 :            :  *
     613                 :            :  * Use this function if callers don't handle errors themselves.  Expected
     614                 :            :  * call sites are system-wide / filesystem-wide data flushers: e.g. sync(2),
     615                 :            :  * fsfreeze(8)
     616                 :            :  *
     617                 :            :  * Return: error status of the address space.
     618                 :            :  */
     619                 :          0 : int filemap_fdatawait_keep_errors(struct address_space *mapping)
     620                 :            : {
     621                 :          0 :         __filemap_fdatawait_range(mapping, 0, LLONG_MAX);
     622                 :          0 :         return filemap_check_and_keep_errors(mapping);
     623                 :            : }
     624                 :            : EXPORT_SYMBOL(filemap_fdatawait_keep_errors);
     625                 :            : 
     626                 :            : /* Returns true if writeback might be needed or already in progress. */
     627                 :        520 : static bool mapping_needs_writeback(struct address_space *mapping)
     628                 :            : {
     629                 :        520 :         if (dax_mapping(mapping))
     630                 :            :                 return mapping->nrexceptional;
     631                 :            : 
     632   [ +  +  +  + ]:        520 :         return mapping->nrpages;
     633                 :            : }
     634                 :            : 
     635                 :            : /**
     636                 :            :  * filemap_write_and_wait_range - write out & wait on a file range
     637                 :            :  * @mapping:    the address_space for the pages
     638                 :            :  * @lstart:     offset in bytes where the range starts
     639                 :            :  * @lend:       offset in bytes where the range ends (inclusive)
     640                 :            :  *
     641                 :            :  * Write out and wait upon file offsets lstart->lend, inclusive.
     642                 :            :  *
     643                 :            :  * Note that @lend is inclusive (describes the last byte to be written) so
     644                 :            :  * that this function can be used to write to the very end-of-file (end = -1).
     645                 :            :  *
     646                 :            :  * Return: error status of the address space.
     647                 :            :  */
     648                 :        338 : int filemap_write_and_wait_range(struct address_space *mapping,
     649                 :            :                                  loff_t lstart, loff_t lend)
     650                 :            : {
     651                 :        338 :         int err = 0;
     652                 :            : 
     653         [ +  + ]:        338 :         if (mapping_needs_writeback(mapping)) {
     654                 :        130 :                 err = __filemap_fdatawrite_range(mapping, lstart, lend,
     655                 :            :                                                  WB_SYNC_ALL);
     656                 :            :                 /*
     657                 :            :                  * Even if the above returned error, the pages may be
     658                 :            :                  * written partially (e.g. -ENOSPC), so we wait for it.
     659                 :            :                  * But the -EIO is special case, it may indicate the worst
     660                 :            :                  * thing (e.g. bug) happened, so we avoid waiting for it.
     661                 :            :                  */
     662         [ +  - ]:        130 :                 if (err != -EIO) {
     663                 :        130 :                         int err2 = filemap_fdatawait_range(mapping,
     664                 :            :                                                 lstart, lend);
     665         [ +  - ]:        130 :                         if (!err)
     666                 :        130 :                                 err = err2;
     667                 :            :                 } else {
     668                 :            :                         /* Clear any previously stored errors */
     669                 :          0 :                         filemap_check_errors(mapping);
     670                 :            :                 }
     671                 :            :         } else {
     672                 :        208 :                 err = filemap_check_errors(mapping);
     673                 :            :         }
     674                 :        338 :         return err;
     675                 :            : }
     676                 :            : EXPORT_SYMBOL(filemap_write_and_wait_range);
     677                 :            : 
     678                 :          0 : void __filemap_set_wb_err(struct address_space *mapping, int err)
     679                 :            : {
     680                 :          0 :         errseq_t eseq = errseq_set(&mapping->wb_err, err);
     681                 :            : 
     682                 :          0 :         trace_filemap_set_wb_err(mapping, eseq);
     683                 :          0 : }
     684                 :            : EXPORT_SYMBOL(__filemap_set_wb_err);
     685                 :            : 
     686                 :            : /**
     687                 :            :  * file_check_and_advance_wb_err - report wb error (if any) that was previously
     688                 :            :  *                                 and advance wb_err to current one
     689                 :            :  * @file: struct file on which the error is being reported
     690                 :            :  *
     691                 :            :  * When userland calls fsync (or something like nfsd does the equivalent), we
     692                 :            :  * want to report any writeback errors that occurred since the last fsync (or
     693                 :            :  * since the file was opened if there haven't been any).
     694                 :            :  *
     695                 :            :  * Grab the wb_err from the mapping. If it matches what we have in the file,
     696                 :            :  * then just quickly return 0. The file is all caught up.
     697                 :            :  *
     698                 :            :  * If it doesn't match, then take the mapping value, set the "seen" flag in
     699                 :            :  * it and try to swap it into place. If it works, or another task beat us
     700                 :            :  * to it with the new value, then update the f_wb_err and return the error
     701                 :            :  * portion. The error at this point must be reported via proper channels
     702                 :            :  * (a'la fsync, or NFS COMMIT operation, etc.).
     703                 :            :  *
     704                 :            :  * While we handle mapping->wb_err with atomic operations, the f_wb_err
     705                 :            :  * value is protected by the f_lock since we must ensure that it reflects
     706                 :            :  * the latest value swapped in for this file descriptor.
     707                 :            :  *
     708                 :            :  * Return: %0 on success, negative error code otherwise.
     709                 :            :  */
     710                 :        364 : int file_check_and_advance_wb_err(struct file *file)
     711                 :            : {
     712                 :        364 :         int err = 0;
     713                 :        364 :         errseq_t old = READ_ONCE(file->f_wb_err);
     714                 :        364 :         struct address_space *mapping = file->f_mapping;
     715                 :            : 
     716                 :            :         /* Locklessly handle the common case where nothing has changed */
     717         [ -  + ]:        364 :         if (errseq_check(&mapping->wb_err, old)) {
     718                 :            :                 /* Something changed, must use slow path */
     719                 :          0 :                 spin_lock(&file->f_lock);
     720                 :          0 :                 old = file->f_wb_err;
     721                 :          0 :                 err = errseq_check_and_advance(&mapping->wb_err,
     722                 :            :                                                 &file->f_wb_err);
     723                 :          0 :                 trace_file_check_and_advance_wb_err(file, old);
     724                 :          0 :                 spin_unlock(&file->f_lock);
     725                 :            :         }
     726                 :            : 
     727                 :            :         /*
     728                 :            :          * We're mostly using this function as a drop in replacement for
     729                 :            :          * filemap_check_errors. Clear AS_EIO/AS_ENOSPC to emulate the effect
     730                 :            :          * that the legacy code would have had on these flags.
     731                 :            :          */
     732                 :        364 :         clear_bit(AS_EIO, &mapping->flags);
     733                 :        364 :         clear_bit(AS_ENOSPC, &mapping->flags);
     734                 :        364 :         return err;
     735                 :            : }
     736                 :            : EXPORT_SYMBOL(file_check_and_advance_wb_err);
     737                 :            : 
     738                 :            : /**
     739                 :            :  * file_write_and_wait_range - write out & wait on a file range
     740                 :            :  * @file:       file pointing to address_space with pages
     741                 :            :  * @lstart:     offset in bytes where the range starts
     742                 :            :  * @lend:       offset in bytes where the range ends (inclusive)
     743                 :            :  *
     744                 :            :  * Write out and wait upon file offsets lstart->lend, inclusive.
     745                 :            :  *
     746                 :            :  * Note that @lend is inclusive (describes the last byte to be written) so
     747                 :            :  * that this function can be used to write to the very end-of-file (end = -1).
     748                 :            :  *
     749                 :            :  * After writing out and waiting on the data, we check and advance the
     750                 :            :  * f_wb_err cursor to the latest value, and return any errors detected there.
     751                 :            :  *
     752                 :            :  * Return: %0 on success, negative error code otherwise.
     753                 :            :  */
     754                 :        182 : int file_write_and_wait_range(struct file *file, loff_t lstart, loff_t lend)
     755                 :            : {
     756                 :        182 :         int err = 0, err2;
     757                 :        182 :         struct address_space *mapping = file->f_mapping;
     758                 :            : 
     759         [ +  + ]:        182 :         if (mapping_needs_writeback(mapping)) {
     760                 :         91 :                 err = __filemap_fdatawrite_range(mapping, lstart, lend,
     761                 :            :                                                  WB_SYNC_ALL);
     762                 :            :                 /* See comment of filemap_write_and_wait() */
     763         [ +  - ]:         91 :                 if (err != -EIO)
     764                 :         91 :                         __filemap_fdatawait_range(mapping, lstart, lend);
     765                 :            :         }
     766                 :        182 :         err2 = file_check_and_advance_wb_err(file);
     767         [ +  - ]:        182 :         if (!err)
     768                 :        182 :                 err = err2;
     769                 :        182 :         return err;
     770                 :            : }
     771                 :            : EXPORT_SYMBOL(file_write_and_wait_range);
     772                 :            : 
     773                 :            : /**
     774                 :            :  * replace_page_cache_page - replace a pagecache page with a new one
     775                 :            :  * @old:        page to be replaced
     776                 :            :  * @new:        page to replace with
     777                 :            :  * @gfp_mask:   allocation mode
     778                 :            :  *
     779                 :            :  * This function replaces a page in the pagecache with a new one.  On
     780                 :            :  * success it acquires the pagecache reference for the new page and
     781                 :            :  * drops it for the old page.  Both the old and new pages must be
     782                 :            :  * locked.  This function does not add the new page to the LRU, the
     783                 :            :  * caller must do that.
     784                 :            :  *
     785                 :            :  * The remove + add is atomic.  This function cannot fail.
     786                 :            :  *
     787                 :            :  * Return: %0
     788                 :            :  */
     789                 :          0 : int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
     790                 :            : {
     791                 :          0 :         struct address_space *mapping = old->mapping;
     792                 :          0 :         void (*freepage)(struct page *) = mapping->a_ops->freepage;
     793                 :          0 :         pgoff_t offset = old->index;
     794                 :          0 :         XA_STATE(xas, &mapping->i_pages, offset);
     795                 :          0 :         unsigned long flags;
     796                 :            : 
     797                 :          0 :         VM_BUG_ON_PAGE(!PageLocked(old), old);
     798                 :          0 :         VM_BUG_ON_PAGE(!PageLocked(new), new);
     799                 :          0 :         VM_BUG_ON_PAGE(new->mapping, new);
     800                 :            : 
     801         [ #  # ]:          0 :         get_page(new);
     802                 :          0 :         new->mapping = mapping;
     803                 :          0 :         new->index = offset;
     804                 :            : 
     805                 :          0 :         xas_lock_irqsave(&xas, flags);
     806                 :          0 :         xas_store(&xas, new);
     807                 :            : 
     808                 :          0 :         old->mapping = NULL;
     809                 :            :         /* hugetlb pages do not participate in page cache accounting. */
     810         [ #  # ]:          0 :         if (!PageHuge(old))
     811                 :          0 :                 __dec_node_page_state(new, NR_FILE_PAGES);
     812         [ #  # ]:          0 :         if (!PageHuge(new))
     813                 :          0 :                 __inc_node_page_state(new, NR_FILE_PAGES);
     814   [ #  #  #  # ]:          0 :         if (PageSwapBacked(old))
     815                 :          0 :                 __dec_node_page_state(new, NR_SHMEM);
     816   [ #  #  #  # ]:          0 :         if (PageSwapBacked(new))
     817                 :          0 :                 __inc_node_page_state(new, NR_SHMEM);
     818                 :          0 :         xas_unlock_irqrestore(&xas, flags);
     819         [ #  # ]:          0 :         mem_cgroup_migrate(old, new);
     820         [ #  # ]:          0 :         if (freepage)
     821                 :          0 :                 freepage(old);
     822                 :          0 :         put_page(old);
     823                 :            : 
     824                 :          0 :         return 0;
     825                 :            : }
     826                 :            : EXPORT_SYMBOL_GPL(replace_page_cache_page);
     827                 :            : 
     828                 :     169053 : static int __add_to_page_cache_locked(struct page *page,
     829                 :            :                                       struct address_space *mapping,
     830                 :            :                                       pgoff_t offset, gfp_t gfp_mask,
     831                 :            :                                       void **shadowp)
     832                 :            : {
     833                 :     169053 :         XA_STATE(xas, &mapping->i_pages, offset);
     834                 :     169053 :         int huge = PageHuge(page);
     835                 :     169053 :         struct mem_cgroup *memcg;
     836                 :     169053 :         int error;
     837                 :     169053 :         void *old;
     838                 :            : 
     839                 :     169053 :         VM_BUG_ON_PAGE(!PageLocked(page), page);
     840                 :     169053 :         VM_BUG_ON_PAGE(PageSwapBacked(page), page);
     841         [ +  - ]:     169053 :         mapping_set_update(&xas, mapping);
     842                 :            : 
     843         [ +  - ]:     169053 :         if (!huge) {
     844                 :     169053 :                 error = mem_cgroup_try_charge(page, current->mm,
     845                 :            :                                               gfp_mask, &memcg, false);
     846                 :     169053 :                 if (error)
     847                 :            :                         return error;
     848                 :            :         }
     849                 :            : 
     850         [ -  + ]:     169053 :         get_page(page);
     851                 :     169053 :         page->mapping = mapping;
     852                 :     169053 :         page->index = offset;
     853                 :            : 
     854                 :     169053 :         do {
     855                 :     169053 :                 xas_lock_irq(&xas);
     856                 :     169053 :                 old = xas_load(&xas);
     857   [ -  +  -  - ]:     169053 :                 if (old && !xa_is_value(old))
     858                 :          0 :                         xas_set_err(&xas, -EEXIST);
     859                 :     169053 :                 xas_store(&xas, page);
     860   [ -  +  -  - ]:     169053 :                 if (xas_error(&xas))
     861                 :          0 :                         goto unlock;
     862                 :            : 
     863         [ -  + ]:     169053 :                 if (xa_is_value(old)) {
     864                 :          0 :                         mapping->nrexceptional--;
     865         [ #  # ]:          0 :                         if (shadowp)
     866                 :          0 :                                 *shadowp = old;
     867                 :            :                 }
     868                 :     169053 :                 mapping->nrpages++;
     869                 :            : 
     870                 :            :                 /* hugetlb pages do not participate in page cache accounting */
     871         [ -  + ]:     169053 :                 if (!huge)
     872                 :     169053 :                         __inc_node_page_state(page, NR_FILE_PAGES);
     873                 :          0 : unlock:
     874                 :     169053 :                 xas_unlock_irq(&xas);
     875         [ -  + ]:     169053 :         } while (xas_nomem(&xas, gfp_mask & GFP_RECLAIM_MASK));
     876                 :            : 
     877   [ -  +  -  - ]:     169053 :         if (xas_error(&xas))
     878                 :          0 :                 goto error;
     879                 :            : 
     880                 :     169053 :         if (!huge)
     881                 :            :                 mem_cgroup_commit_charge(page, memcg, false, false);
     882                 :     169053 :         trace_mm_filemap_add_to_page_cache(page);
     883                 :     169053 :         return 0;
     884                 :            : error:
     885                 :          0 :         page->mapping = NULL;
     886                 :            :         /* Leave page->index set: truncation relies upon it */
     887                 :          0 :         if (!huge)
     888                 :            :                 mem_cgroup_cancel_charge(page, memcg, false);
     889                 :          0 :         put_page(page);
     890         [ #  # ]:          0 :         return xas_error(&xas);
     891                 :            : }
     892                 :            : ALLOW_ERROR_INJECTION(__add_to_page_cache_locked, ERRNO);
     893                 :            : 
     894                 :            : /**
     895                 :            :  * add_to_page_cache_locked - add a locked page to the pagecache
     896                 :            :  * @page:       page to add
     897                 :            :  * @mapping:    the page's address_space
     898                 :            :  * @offset:     page index
     899                 :            :  * @gfp_mask:   page allocation mode
     900                 :            :  *
     901                 :            :  * This function is used to add a page to the pagecache. It must be locked.
     902                 :            :  * This function does not add the page to the LRU.  The caller must do that.
     903                 :            :  *
     904                 :            :  * Return: %0 on success, negative error code otherwise.
     905                 :            :  */
     906                 :          0 : int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
     907                 :            :                 pgoff_t offset, gfp_t gfp_mask)
     908                 :            : {
     909                 :          0 :         return __add_to_page_cache_locked(page, mapping, offset,
     910                 :            :                                           gfp_mask, NULL);
     911                 :            : }
     912                 :            : EXPORT_SYMBOL(add_to_page_cache_locked);
     913                 :            : 
     914                 :     169053 : int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
     915                 :            :                                 pgoff_t offset, gfp_t gfp_mask)
     916                 :            : {
     917                 :     169053 :         void *shadow = NULL;
     918                 :     169053 :         int ret;
     919                 :            : 
     920         [ -  + ]:     169053 :         __SetPageLocked(page);
     921                 :     169053 :         ret = __add_to_page_cache_locked(page, mapping, offset,
     922                 :            :                                          gfp_mask, &shadow);
     923         [ -  + ]:     169053 :         if (unlikely(ret))
     924         [ #  # ]:          0 :                 __ClearPageLocked(page);
     925                 :            :         else {
     926                 :            :                 /*
     927                 :            :                  * The page might have been evicted from cache only
     928                 :            :                  * recently, in which case it should be activated like
     929                 :            :                  * any other repeatedly accessed page.
     930                 :            :                  * The exception is pages getting rewritten; evicting other
     931                 :            :                  * data from the working set, only to cache data that will
     932                 :            :                  * get overwritten with something else, is a waste of memory.
     933                 :            :                  */
     934   [ -  +  -  + ]:     338106 :                 WARN_ON_ONCE(PageActive(page));
     935   [ +  +  -  + ]:     169053 :                 if (!(gfp_mask & __GFP_WRITE) && shadow)
     936                 :          0 :                         workingset_refault(page, shadow);
     937                 :     169053 :                 lru_cache_add(page);
     938                 :            :         }
     939                 :     169053 :         return ret;
     940                 :            : }
     941                 :            : EXPORT_SYMBOL_GPL(add_to_page_cache_lru);
     942                 :            : 
     943                 :            : #ifdef CONFIG_NUMA
     944                 :     169053 : struct page *__page_cache_alloc(gfp_t gfp)
     945                 :            : {
     946                 :     169053 :         int n;
     947                 :     169053 :         struct page *page;
     948                 :            : 
     949         [ -  + ]:     169053 :         if (cpuset_do_page_mem_spread()) {
     950                 :          0 :                 unsigned int cpuset_mems_cookie;
     951                 :          0 :                 do {
     952                 :          0 :                         cpuset_mems_cookie = read_mems_allowed_begin();
     953                 :          0 :                         n = cpuset_mem_spread_node();
     954                 :          0 :                         page = __alloc_pages_node(n, gfp, 0);
     955   [ #  #  #  # ]:          0 :                 } while (!page && read_mems_allowed_retry(cpuset_mems_cookie));
     956                 :            : 
     957                 :          0 :                 return page;
     958                 :            :         }
     959                 :     169053 :         return alloc_pages(gfp, 0);
     960                 :            : }
     961                 :            : EXPORT_SYMBOL(__page_cache_alloc);
     962                 :            : #endif
     963                 :            : 
     964                 :            : /*
     965                 :            :  * In order to wait for pages to become available there must be
     966                 :            :  * waitqueues associated with pages. By using a hash table of
     967                 :            :  * waitqueues where the bucket discipline is to maintain all
     968                 :            :  * waiters on the same queue and wake all when any of the pages
     969                 :            :  * become available, and for the woken contexts to check to be
     970                 :            :  * sure the appropriate page became available, this saves space
     971                 :            :  * at a cost of "thundering herd" phenomena during rare hash
     972                 :            :  * collisions.
     973                 :            :  */
     974                 :            : #define PAGE_WAIT_TABLE_BITS 8
     975                 :            : #define PAGE_WAIT_TABLE_SIZE (1 << PAGE_WAIT_TABLE_BITS)
     976                 :            : static wait_queue_head_t page_wait_table[PAGE_WAIT_TABLE_SIZE] __cacheline_aligned;
     977                 :            : 
     978                 :      17532 : static wait_queue_head_t *page_waitqueue(struct page *page)
     979                 :            : {
     980                 :      17532 :         return &page_wait_table[hash_ptr(page, PAGE_WAIT_TABLE_BITS)];
     981                 :            : }
     982                 :            : 
     983                 :         13 : void __init pagecache_init(void)
     984                 :            : {
     985                 :         13 :         int i;
     986                 :            : 
     987         [ +  + ]:       3341 :         for (i = 0; i < PAGE_WAIT_TABLE_SIZE; i++)
     988                 :       3328 :                 init_waitqueue_head(&page_wait_table[i]);
     989                 :            : 
     990                 :         13 :         page_writeback_init();
     991                 :         13 : }
     992                 :            : 
     993                 :            : /* This has the same layout as wait_bit_key - see fs/cachefiles/rdwr.c */
     994                 :            : struct wait_page_key {
     995                 :            :         struct page *page;
     996                 :            :         int bit_nr;
     997                 :            :         int page_match;
     998                 :            : };
     999                 :            : 
    1000                 :            : struct wait_page_queue {
    1001                 :            :         struct page *page;
    1002                 :            :         int bit_nr;
    1003                 :            :         wait_queue_entry_t wait;
    1004                 :            : };
    1005                 :            : 
    1006                 :       8751 : static int wake_page_function(wait_queue_entry_t *wait, unsigned mode, int sync, void *arg)
    1007                 :            : {
    1008                 :       8751 :         struct wait_page_key *key = arg;
    1009                 :       8751 :         struct wait_page_queue *wait_page
    1010                 :       8751 :                 = container_of(wait, struct wait_page_queue, wait);
    1011                 :            : 
    1012         [ +  + ]:       8751 :         if (wait_page->page != key->page)
    1013                 :            :                return 0;
    1014                 :       8750 :         key->page_match = 1;
    1015                 :            : 
    1016         [ +  - ]:       8750 :         if (wait_page->bit_nr != key->bit_nr)
    1017                 :            :                 return 0;
    1018                 :            : 
    1019                 :            :         /*
    1020                 :            :          * Stop walking if it's locked.
    1021                 :            :          * Is this safe if put_and_wait_on_page_locked() is in use?
    1022                 :            :          * Yes: the waker must hold a reference to this page, and if PG_locked
    1023                 :            :          * has now already been set by another task, that task must also hold
    1024                 :            :          * a reference to the *same usage* of this page; so there is no need
    1025                 :            :          * to walk on to wake even the put_and_wait_on_page_locked() callers.
    1026                 :            :          */
    1027         [ +  - ]:       8750 :         if (test_bit(key->bit_nr, &key->page->flags))
    1028                 :            :                 return -1;
    1029                 :            : 
    1030                 :       8750 :         return autoremove_wake_function(wait, mode, sync, key);
    1031                 :            : }
    1032                 :            : 
    1033                 :       8765 : static void wake_up_page_bit(struct page *page, int bit_nr)
    1034                 :            : {
    1035                 :       8765 :         wait_queue_head_t *q = page_waitqueue(page);
    1036                 :       8765 :         struct wait_page_key key;
    1037                 :       8765 :         unsigned long flags;
    1038                 :       8765 :         wait_queue_entry_t bookmark;
    1039                 :            : 
    1040                 :       8765 :         key.page = page;
    1041                 :       8765 :         key.bit_nr = bit_nr;
    1042                 :       8765 :         key.page_match = 0;
    1043                 :            : 
    1044                 :       8765 :         bookmark.flags = 0;
    1045                 :       8765 :         bookmark.private = NULL;
    1046                 :       8765 :         bookmark.func = NULL;
    1047                 :       8765 :         INIT_LIST_HEAD(&bookmark.entry);
    1048                 :            : 
    1049                 :       8765 :         spin_lock_irqsave(&q->lock, flags);
    1050                 :       8765 :         __wake_up_locked_key_bookmark(q, TASK_NORMAL, &key, &bookmark);
    1051                 :            : 
    1052         [ -  + ]:       8765 :         while (bookmark.flags & WQ_FLAG_BOOKMARK) {
    1053                 :            :                 /*
    1054                 :            :                  * Take a breather from holding the lock,
    1055                 :            :                  * allow pages that finish wake up asynchronously
    1056                 :            :                  * to acquire the lock and remove themselves
    1057                 :            :                  * from wait queue
    1058                 :            :                  */
    1059                 :          0 :                 spin_unlock_irqrestore(&q->lock, flags);
    1060                 :          0 :                 cpu_relax();
    1061                 :          0 :                 spin_lock_irqsave(&q->lock, flags);
    1062                 :          0 :                 __wake_up_locked_key_bookmark(q, TASK_NORMAL, &key, &bookmark);
    1063                 :            :         }
    1064                 :            : 
    1065                 :            :         /*
    1066                 :            :          * It is possible for other pages to have collided on the waitqueue
    1067                 :            :          * hash, so in that case check for a page match. That prevents a long-
    1068                 :            :          * term waiter
    1069                 :            :          *
    1070                 :            :          * It is still possible to miss a case here, when we woke page waiters
    1071                 :            :          * and removed them from the waitqueue, but there are still other
    1072                 :            :          * page waiters.
    1073                 :            :          */
    1074   [ +  +  -  + ]:       8765 :         if (!waitqueue_active(q) || !key.page_match) {
    1075                 :       8558 :                 ClearPageWaiters(page);
    1076                 :            :                 /*
    1077                 :            :                  * It's possible to miss clearing Waiters here, when we woke
    1078                 :            :                  * our page waiters, but the hashed waitqueue has waiters for
    1079                 :            :                  * other pages on it.
    1080                 :            :                  *
    1081                 :            :                  * That's okay, it's a rare case. The next waker will clear it.
    1082                 :            :                  */
    1083                 :            :         }
    1084                 :       8765 :         spin_unlock_irqrestore(&q->lock, flags);
    1085                 :       8765 : }
    1086                 :            : 
    1087                 :        104 : static void wake_up_page(struct page *page, int bit)
    1088                 :            : {
    1089         [ +  + ]:        104 :         if (!PageWaiters(page))
    1090                 :            :                 return;
    1091                 :         91 :         wake_up_page_bit(page, bit);
    1092                 :            : }
    1093                 :            : 
    1094                 :            : /*
    1095                 :            :  * A choice of three behaviors for wait_on_page_bit_common():
    1096                 :            :  */
    1097                 :            : enum behavior {
    1098                 :            :         EXCLUSIVE,      /* Hold ref to page and take the bit when woken, like
    1099                 :            :                          * __lock_page() waiting on then setting PG_locked.
    1100                 :            :                          */
    1101                 :            :         SHARED,         /* Hold ref to page and check the bit when woken, like
    1102                 :            :                          * wait_on_page_writeback() waiting on PG_writeback.
    1103                 :            :                          */
    1104                 :            :         DROP,           /* Drop ref to page before wait, no check when woken,
    1105                 :            :                          * like put_and_wait_on_page_locked() on PG_locked.
    1106                 :            :                          */
    1107                 :            : };
    1108                 :            : 
    1109                 :       8767 : static inline int wait_on_page_bit_common(wait_queue_head_t *q,
    1110                 :            :         struct page *page, int bit_nr, int state, enum behavior behavior)
    1111                 :            : {
    1112                 :       8767 :         struct wait_page_queue wait_page;
    1113                 :       8767 :         wait_queue_entry_t *wait = &wait_page.wait;
    1114                 :       8767 :         bool bit_is_set;
    1115                 :       8767 :         bool thrashing = false;
    1116                 :       8767 :         bool delayacct = false;
    1117                 :       8767 :         unsigned long pflags;
    1118                 :       8767 :         int ret = 0;
    1119                 :            : 
    1120   [ +  +  +  + ]:      17443 :         if (bit_nr == PG_locked &&
    1121         [ -  + ]:      17343 :             !PageUptodate(page) && PageWorkingset(page)) {
    1122   [ #  #  #  # ]:          0 :                 if (!PageSwapBacked(page)) {
    1123         [ #  # ]:          0 :                         delayacct_thrashing_start();
    1124                 :            :                         delayacct = true;
    1125                 :            :                 }
    1126                 :            :                 psi_memstall_enter(&pflags);
    1127                 :            :                 thrashing = true;
    1128                 :            :         }
    1129                 :            : 
    1130                 :       8767 :         init_wait(wait);
    1131                 :       8767 :         wait->flags = behavior == EXCLUSIVE ? WQ_FLAG_EXCLUSIVE : 0;
    1132                 :       8767 :         wait->func = wake_page_function;
    1133                 :       8767 :         wait_page.page = page;
    1134                 :       8767 :         wait_page.bit_nr = bit_nr;
    1135                 :            : 
    1136                 :       8767 :         for (;;) {
    1137                 :       8767 :                 spin_lock_irq(&q->lock);
    1138                 :            : 
    1139         [ +  - ]:       8767 :                 if (likely(list_empty(&wait->entry))) {
    1140                 :       8767 :                         __add_wait_queue_entry_tail(q, wait);
    1141                 :       8767 :                         SetPageWaiters(page);
    1142                 :            :                 }
    1143                 :            : 
    1144                 :       8767 :                 set_current_state(state);
    1145                 :            : 
    1146                 :       8767 :                 spin_unlock_irq(&q->lock);
    1147                 :            : 
    1148                 :       8767 :                 bit_is_set = test_bit(bit_nr, &page->flags);
    1149         [ -  + ]:       8767 :                 if (behavior == DROP)
    1150                 :          0 :                         put_page(page);
    1151                 :            : 
    1152         [ +  + ]:       8767 :                 if (likely(bit_is_set))
    1153                 :       8739 :                         io_schedule();
    1154                 :            : 
    1155         [ +  + ]:       8767 :                 if (behavior == EXCLUSIVE) {
    1156         [ -  + ]:       3677 :                         if (!test_and_set_bit_lock(bit_nr, &page->flags))
    1157                 :            :                                 break;
    1158         [ +  - ]:       5090 :                 } else if (behavior == SHARED) {
    1159         [ -  + ]:       5090 :                         if (!test_bit(bit_nr, &page->flags))
    1160                 :            :                                 break;
    1161                 :            :                 }
    1162                 :            : 
    1163         [ #  # ]:          0 :                 if (signal_pending_state(state, current)) {
    1164                 :            :                         ret = -EINTR;
    1165                 :            :                         break;
    1166                 :            :                 }
    1167                 :            : 
    1168         [ #  # ]:          0 :                 if (behavior == DROP) {
    1169                 :            :                         /*
    1170                 :            :                          * We can no longer safely access page->flags:
    1171                 :            :                          * even if CONFIG_MEMORY_HOTREMOVE is not enabled,
    1172                 :            :                          * there is a risk of waiting forever on a page reused
    1173                 :            :                          * for something that keeps it locked indefinitely.
    1174                 :            :                          * But best check for -EINTR above before breaking.
    1175                 :            :                          */
    1176                 :            :                         break;
    1177                 :            :                 }
    1178                 :            :         }
    1179                 :            : 
    1180                 :       8767 :         finish_wait(q, wait);
    1181                 :            : 
    1182         [ -  + ]:       8767 :         if (thrashing) {
    1183         [ #  # ]:          0 :                 if (delayacct)
    1184         [ #  # ]:          0 :                         delayacct_thrashing_end();
    1185                 :            :                 psi_memstall_leave(&pflags);
    1186                 :            :         }
    1187                 :            : 
    1188                 :            :         /*
    1189                 :            :          * A signal could leave PageWaiters set. Clearing it here if
    1190                 :            :          * !waitqueue_active would be possible (by open-coding finish_wait),
    1191                 :            :          * but still fail to catch it in the case of wait hash collision. We
    1192                 :            :          * already can fail to clear wait hash collision cases, so don't
    1193                 :            :          * bother with signals either.
    1194                 :            :          */
    1195                 :            : 
    1196                 :       8767 :         return ret;
    1197                 :            : }
    1198                 :            : 
    1199                 :        117 : void wait_on_page_bit(struct page *page, int bit_nr)
    1200                 :            : {
    1201                 :        117 :         wait_queue_head_t *q = page_waitqueue(page);
    1202                 :        117 :         wait_on_page_bit_common(q, page, bit_nr, TASK_UNINTERRUPTIBLE, SHARED);
    1203                 :         26 : }
    1204                 :            : EXPORT_SYMBOL(wait_on_page_bit);
    1205                 :            : 
    1206                 :       4973 : int wait_on_page_bit_killable(struct page *page, int bit_nr)
    1207                 :            : {
    1208                 :       4973 :         wait_queue_head_t *q = page_waitqueue(page);
    1209                 :       4973 :         return wait_on_page_bit_common(q, page, bit_nr, TASK_KILLABLE, SHARED);
    1210                 :            : }
    1211                 :            : EXPORT_SYMBOL(wait_on_page_bit_killable);
    1212                 :            : 
    1213                 :            : /**
    1214                 :            :  * put_and_wait_on_page_locked - Drop a reference and wait for it to be unlocked
    1215                 :            :  * @page: The page to wait for.
    1216                 :            :  *
    1217                 :            :  * The caller should hold a reference on @page.  They expect the page to
    1218                 :            :  * become unlocked relatively soon, but do not wish to hold up migration
    1219                 :            :  * (for example) by holding the reference while waiting for the page to
    1220                 :            :  * come unlocked.  After this function returns, the caller should not
    1221                 :            :  * dereference @page.
    1222                 :            :  */
    1223                 :          0 : void put_and_wait_on_page_locked(struct page *page)
    1224                 :            : {
    1225                 :          0 :         wait_queue_head_t *q;
    1226                 :            : 
    1227         [ #  # ]:          0 :         page = compound_head(page);
    1228                 :          0 :         q = page_waitqueue(page);
    1229                 :          0 :         wait_on_page_bit_common(q, page, PG_locked, TASK_UNINTERRUPTIBLE, DROP);
    1230                 :          0 : }
    1231                 :            : 
    1232                 :            : /**
    1233                 :            :  * add_page_wait_queue - Add an arbitrary waiter to a page's wait queue
    1234                 :            :  * @page: Page defining the wait queue of interest
    1235                 :            :  * @waiter: Waiter to add to the queue
    1236                 :            :  *
    1237                 :            :  * Add an arbitrary @waiter to the wait queue for the nominated @page.
    1238                 :            :  */
    1239                 :          0 : void add_page_wait_queue(struct page *page, wait_queue_entry_t *waiter)
    1240                 :            : {
    1241                 :          0 :         wait_queue_head_t *q = page_waitqueue(page);
    1242                 :          0 :         unsigned long flags;
    1243                 :            : 
    1244                 :          0 :         spin_lock_irqsave(&q->lock, flags);
    1245                 :          0 :         __add_wait_queue_entry_tail(q, waiter);
    1246                 :          0 :         SetPageWaiters(page);
    1247                 :          0 :         spin_unlock_irqrestore(&q->lock, flags);
    1248                 :          0 : }
    1249                 :            : EXPORT_SYMBOL_GPL(add_page_wait_queue);
    1250                 :            : 
    1251                 :            : #ifndef clear_bit_unlock_is_negative_byte
    1252                 :            : 
    1253                 :            : /*
    1254                 :            :  * PG_waiters is the high bit in the same byte as PG_lock.
    1255                 :            :  *
    1256                 :            :  * On x86 (and on many other architectures), we can clear PG_lock and
    1257                 :            :  * test the sign bit at the same time. But if the architecture does
    1258                 :            :  * not support that special operation, we just do this all by hand
    1259                 :            :  * instead.
    1260                 :            :  *
    1261                 :            :  * The read of PG_waiters has to be after (or concurrently with) PG_locked
    1262                 :            :  * being cleared, but a memory barrier should be unneccssary since it is
    1263                 :            :  * in the same byte as PG_locked.
    1264                 :            :  */
    1265                 :            : static inline bool clear_bit_unlock_is_negative_byte(long nr, volatile void *mem)
    1266                 :            : {
    1267                 :            :         clear_bit_unlock(nr, mem);
    1268                 :            :         /* smp_mb__after_atomic(); */
    1269                 :            :         return test_bit(PG_waiters, mem);
    1270                 :            : }
    1271                 :            : 
    1272                 :            : #endif
    1273                 :            : 
    1274                 :            : /**
    1275                 :            :  * unlock_page - unlock a locked page
    1276                 :            :  * @page: the page
    1277                 :            :  *
    1278                 :            :  * Unlocks the page and wakes up sleepers in ___wait_on_page_locked().
    1279                 :            :  * Also wakes sleepers in wait_on_page_writeback() because the wakeup
    1280                 :            :  * mechanism between PageLocked pages and PageWriteback pages is shared.
    1281                 :            :  * But that's OK - sleepers in wait_on_page_writeback() just go back to sleep.
    1282                 :            :  *
    1283                 :            :  * Note that this depends on PG_waiters being the sign bit in the byte
    1284                 :            :  * that contains PG_locked - thus the BUILD_BUG_ON(). That allows us to
    1285                 :            :  * clear the PG_locked bit and test PG_waiters at the same time fairly
    1286                 :            :  * portably (architectures that do LL/SC can test any bit, while x86 can
    1287                 :            :  * test the sign bit).
    1288                 :            :  */
    1289                 :    7827974 : void unlock_page(struct page *page)
    1290                 :            : {
    1291                 :    7827974 :         BUILD_BUG_ON(PG_waiters != 7);
    1292         [ -  + ]:    7827974 :         page = compound_head(page);
    1293                 :    7827974 :         VM_BUG_ON_PAGE(!PageLocked(page), page);
    1294         [ +  + ]:    7827974 :         if (clear_bit_unlock_is_negative_byte(PG_locked, &page->flags))
    1295                 :       8674 :                 wake_up_page_bit(page, PG_locked);
    1296                 :    7827974 : }
    1297                 :            : EXPORT_SYMBOL(unlock_page);
    1298                 :            : 
    1299                 :            : /**
    1300                 :            :  * end_page_writeback - end writeback against a page
    1301                 :            :  * @page: the page
    1302                 :            :  */
    1303                 :        104 : void end_page_writeback(struct page *page)
    1304                 :            : {
    1305                 :            :         /*
    1306                 :            :          * TestClearPageReclaim could be used here but it is an atomic
    1307                 :            :          * operation and overkill in this particular case. Failing to
    1308                 :            :          * shuffle a page marked for immediate reclaim is too mild to
    1309                 :            :          * justify taking an atomic operation penalty at the end of
    1310                 :            :          * ever page writeback.
    1311                 :            :          */
    1312   [ -  +  -  + ]:        208 :         if (PageReclaim(page)) {
    1313         [ #  # ]:          0 :                 ClearPageReclaim(page);
    1314                 :          0 :                 rotate_reclaimable_page(page);
    1315                 :            :         }
    1316                 :            : 
    1317         [ -  + ]:        104 :         if (!test_clear_page_writeback(page))
    1318                 :          0 :                 BUG();
    1319                 :            : 
    1320                 :        104 :         smp_mb__after_atomic();
    1321                 :        104 :         wake_up_page(page, PG_writeback);
    1322                 :        104 : }
    1323                 :            : EXPORT_SYMBOL(end_page_writeback);
    1324                 :            : 
    1325                 :            : /*
    1326                 :            :  * After completing I/O on a page, call this routine to update the page
    1327                 :            :  * flags appropriately
    1328                 :            :  */
    1329                 :      10140 : void page_endio(struct page *page, bool is_write, int err)
    1330                 :            : {
    1331         [ +  - ]:      10140 :         if (!is_write) {
    1332         [ +  - ]:      10140 :                 if (!err) {
    1333                 :      10140 :                         SetPageUptodate(page);
    1334                 :            :                 } else {
    1335         [ #  # ]:          0 :                         ClearPageUptodate(page);
    1336         [ #  # ]:          0 :                         SetPageError(page);
    1337                 :            :                 }
    1338                 :      10140 :                 unlock_page(page);
    1339                 :            :         } else {
    1340         [ #  # ]:          0 :                 if (err) {
    1341                 :          0 :                         struct address_space *mapping;
    1342                 :            : 
    1343         [ #  # ]:          0 :                         SetPageError(page);
    1344                 :          0 :                         mapping = page_mapping(page);
    1345         [ #  # ]:          0 :                         if (mapping)
    1346                 :          0 :                                 mapping_set_error(mapping, err);
    1347                 :            :                 }
    1348                 :          0 :                 end_page_writeback(page);
    1349                 :            :         }
    1350                 :      10140 : }
    1351                 :            : EXPORT_SYMBOL_GPL(page_endio);
    1352                 :            : 
    1353                 :            : /**
    1354                 :            :  * __lock_page - get a lock on the page, assuming we need to sleep to get it
    1355                 :            :  * @__page: the page to lock
    1356                 :            :  */
    1357                 :          0 : void __lock_page(struct page *__page)
    1358                 :            : {
    1359         [ #  # ]:          0 :         struct page *page = compound_head(__page);
    1360                 :          0 :         wait_queue_head_t *q = page_waitqueue(page);
    1361                 :          0 :         wait_on_page_bit_common(q, page, PG_locked, TASK_UNINTERRUPTIBLE,
    1362                 :            :                                 EXCLUSIVE);
    1363                 :          0 : }
    1364                 :            : EXPORT_SYMBOL(__lock_page);
    1365                 :            : 
    1366                 :       3677 : int __lock_page_killable(struct page *__page)
    1367                 :            : {
    1368         [ -  + ]:       3677 :         struct page *page = compound_head(__page);
    1369                 :       3677 :         wait_queue_head_t *q = page_waitqueue(page);
    1370                 :       3677 :         return wait_on_page_bit_common(q, page, PG_locked, TASK_KILLABLE,
    1371                 :            :                                         EXCLUSIVE);
    1372                 :            : }
    1373                 :            : EXPORT_SYMBOL_GPL(__lock_page_killable);
    1374                 :            : 
    1375                 :            : /*
    1376                 :            :  * Return values:
    1377                 :            :  * 1 - page is locked; mmap_sem is still held.
    1378                 :            :  * 0 - page is not locked.
    1379                 :            :  *     mmap_sem has been released (up_read()), unless flags had both
    1380                 :            :  *     FAULT_FLAG_ALLOW_RETRY and FAULT_FLAG_RETRY_NOWAIT set, in
    1381                 :            :  *     which case mmap_sem is still held.
    1382                 :            :  *
    1383                 :            :  * If neither ALLOW_RETRY nor KILLABLE are set, will always return 1
    1384                 :            :  * with the page locked and the mmap_sem unperturbed.
    1385                 :            :  */
    1386                 :          0 : int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
    1387                 :            :                          unsigned int flags)
    1388                 :            : {
    1389         [ #  # ]:          0 :         if (flags & FAULT_FLAG_ALLOW_RETRY) {
    1390                 :            :                 /*
    1391                 :            :                  * CAUTION! In this case, mmap_sem is not released
    1392                 :            :                  * even though return 0.
    1393                 :            :                  */
    1394         [ #  # ]:          0 :                 if (flags & FAULT_FLAG_RETRY_NOWAIT)
    1395                 :            :                         return 0;
    1396                 :            : 
    1397                 :          0 :                 up_read(&mm->mmap_sem);
    1398         [ #  # ]:          0 :                 if (flags & FAULT_FLAG_KILLABLE)
    1399                 :          0 :                         wait_on_page_locked_killable(page);
    1400                 :            :                 else
    1401                 :          0 :                         wait_on_page_locked(page);
    1402                 :          0 :                 return 0;
    1403                 :            :         } else {
    1404         [ #  # ]:          0 :                 if (flags & FAULT_FLAG_KILLABLE) {
    1405                 :          0 :                         int ret;
    1406                 :            : 
    1407                 :          0 :                         ret = __lock_page_killable(page);
    1408         [ #  # ]:          0 :                         if (ret) {
    1409                 :          0 :                                 up_read(&mm->mmap_sem);
    1410                 :          0 :                                 return 0;
    1411                 :            :                         }
    1412                 :            :                 } else
    1413                 :          0 :                         __lock_page(page);
    1414                 :          0 :                 return 1;
    1415                 :            :         }
    1416                 :            : }
    1417                 :            : 
    1418                 :            : /**
    1419                 :            :  * page_cache_next_miss() - Find the next gap in the page cache.
    1420                 :            :  * @mapping: Mapping.
    1421                 :            :  * @index: Index.
    1422                 :            :  * @max_scan: Maximum range to search.
    1423                 :            :  *
    1424                 :            :  * Search the range [index, min(index + max_scan - 1, ULONG_MAX)] for the
    1425                 :            :  * gap with the lowest index.
    1426                 :            :  *
    1427                 :            :  * This function may be called under the rcu_read_lock.  However, this will
    1428                 :            :  * not atomically search a snapshot of the cache at a single point in time.
    1429                 :            :  * For example, if a gap is created at index 5, then subsequently a gap is
    1430                 :            :  * created at index 10, page_cache_next_miss covering both indices may
    1431                 :            :  * return 10 if called under the rcu_read_lock.
    1432                 :            :  *
    1433                 :            :  * Return: The index of the gap if found, otherwise an index outside the
    1434                 :            :  * range specified (in which case 'return - index >= max_scan' will be true).
    1435                 :            :  * In the rare case of index wrap-around, 0 will be returned.
    1436                 :            :  */
    1437                 :       2003 : pgoff_t page_cache_next_miss(struct address_space *mapping,
    1438                 :            :                              pgoff_t index, unsigned long max_scan)
    1439                 :            : {
    1440                 :       2003 :         XA_STATE(xas, &mapping->i_pages, index);
    1441                 :            : 
    1442         [ +  + ]:      36997 :         while (max_scan--) {
    1443                 :      36438 :                 void *entry = xas_next(&xas);
    1444   [ +  +  +  - ]:      36438 :                 if (!entry || xa_is_value(entry))
    1445                 :            :                         break;
    1446         [ +  - ]:      34994 :                 if (xas.xa_index == 0)
    1447                 :            :                         break;
    1448                 :            :         }
    1449                 :            : 
    1450                 :       2003 :         return xas.xa_index;
    1451                 :            : }
    1452                 :            : EXPORT_SYMBOL(page_cache_next_miss);
    1453                 :            : 
    1454                 :            : /**
    1455                 :            :  * page_cache_prev_miss() - Find the previous gap in the page cache.
    1456                 :            :  * @mapping: Mapping.
    1457                 :            :  * @index: Index.
    1458                 :            :  * @max_scan: Maximum range to search.
    1459                 :            :  *
    1460                 :            :  * Search the range [max(index - max_scan + 1, 0), index] for the
    1461                 :            :  * gap with the highest index.
    1462                 :            :  *
    1463                 :            :  * This function may be called under the rcu_read_lock.  However, this will
    1464                 :            :  * not atomically search a snapshot of the cache at a single point in time.
    1465                 :            :  * For example, if a gap is created at index 10, then subsequently a gap is
    1466                 :            :  * created at index 5, page_cache_prev_miss() covering both indices may
    1467                 :            :  * return 5 if called under the rcu_read_lock.
    1468                 :            :  *
    1469                 :            :  * Return: The index of the gap if found, otherwise an index outside the
    1470                 :            :  * range specified (in which case 'index - return >= max_scan' will be true).
    1471                 :            :  * In the rare case of wrap-around, ULONG_MAX will be returned.
    1472                 :            :  */
    1473                 :          0 : pgoff_t page_cache_prev_miss(struct address_space *mapping,
    1474                 :            :                              pgoff_t index, unsigned long max_scan)
    1475                 :            : {
    1476                 :          0 :         XA_STATE(xas, &mapping->i_pages, index);
    1477                 :            : 
    1478         [ #  # ]:          0 :         while (max_scan--) {
    1479                 :          0 :                 void *entry = xas_prev(&xas);
    1480   [ #  #  #  # ]:          0 :                 if (!entry || xa_is_value(entry))
    1481                 :            :                         break;
    1482         [ #  # ]:          0 :                 if (xas.xa_index == ULONG_MAX)
    1483                 :            :                         break;
    1484                 :            :         }
    1485                 :            : 
    1486                 :          0 :         return xas.xa_index;
    1487                 :            : }
    1488                 :            : EXPORT_SYMBOL(page_cache_prev_miss);
    1489                 :            : 
    1490                 :            : /**
    1491                 :            :  * find_get_entry - find and get a page cache entry
    1492                 :            :  * @mapping: the address_space to search
    1493                 :            :  * @offset: the page cache index
    1494                 :            :  *
    1495                 :            :  * Looks up the page cache slot at @mapping & @offset.  If there is a
    1496                 :            :  * page cache page, it is returned with an increased refcount.
    1497                 :            :  *
    1498                 :            :  * If the slot holds a shadow entry of a previously evicted page, or a
    1499                 :            :  * swap entry from shmem/tmpfs, it is returned.
    1500                 :            :  *
    1501                 :            :  * Return: the found page or shadow entry, %NULL if nothing is found.
    1502                 :            :  */
    1503                 :     349510 : struct page *find_get_entry(struct address_space *mapping, pgoff_t offset)
    1504                 :            : {
    1505                 :     349510 :         XA_STATE(xas, &mapping->i_pages, offset);
    1506                 :     349510 :         struct page *page;
    1507                 :            : 
    1508                 :     349510 :         rcu_read_lock();
    1509                 :            : repeat:
    1510                 :     349510 :         xas_reset(&xas);
    1511                 :     349510 :         page = xas_load(&xas);
    1512         [ +  - ]:     349510 :         if (xas_retry(&xas, page))
    1513                 :          0 :                 goto repeat;
    1514                 :            :         /*
    1515                 :            :          * A shadow entry of a recently evicted page, or a swap entry from
    1516                 :            :          * shmem/tmpfs.  Return it without attempting to raise page count.
    1517                 :            :          */
    1518   [ +  +  -  + ]:     349510 :         if (!page || xa_is_value(page))
    1519                 :      66241 :                 goto out;
    1520                 :            : 
    1521                 :     283269 :         if (!page_cache_get_speculative(page))
    1522                 :          0 :                 goto repeat;
    1523                 :            : 
    1524                 :            :         /*
    1525                 :            :          * Has the page moved or been split?
    1526                 :            :          * This is part of the lockless pagecache protocol. See
    1527                 :            :          * include/linux/pagemap.h for details.
    1528                 :            :          */
    1529   [ +  +  -  + ]:     566538 :         if (unlikely(page != xas_reload(&xas))) {
    1530                 :          0 :                 put_page(page);
    1531                 :          0 :                 goto repeat;
    1532                 :            :         }
    1533                 :     283269 :         page = find_subpage(page, offset);
    1534                 :     349510 : out:
    1535                 :     349510 :         rcu_read_unlock();
    1536                 :            : 
    1537                 :     349510 :         return page;
    1538                 :            : }
    1539                 :            : EXPORT_SYMBOL(find_get_entry);
    1540                 :            : 
    1541                 :            : /**
    1542                 :            :  * find_lock_entry - locate, pin and lock a page cache entry
    1543                 :            :  * @mapping: the address_space to search
    1544                 :            :  * @offset: the page cache index
    1545                 :            :  *
    1546                 :            :  * Looks up the page cache slot at @mapping & @offset.  If there is a
    1547                 :            :  * page cache page, it is returned locked and with an increased
    1548                 :            :  * refcount.
    1549                 :            :  *
    1550                 :            :  * If the slot holds a shadow entry of a previously evicted page, or a
    1551                 :            :  * swap entry from shmem/tmpfs, it is returned.
    1552                 :            :  *
    1553                 :            :  * find_lock_entry() may sleep.
    1554                 :            :  *
    1555                 :            :  * Return: the found page or shadow entry, %NULL if nothing is found.
    1556                 :            :  */
    1557                 :      20041 : struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset)
    1558                 :            : {
    1559                 :      20041 :         struct page *page;
    1560                 :            : 
    1561                 :      20041 : repeat:
    1562                 :      20041 :         page = find_get_entry(mapping, offset);
    1563   [ +  +  +  - ]:      20041 :         if (page && !xa_is_value(page)) {
    1564                 :       2946 :                 lock_page(page);
    1565                 :            :                 /* Has the page been truncated? */
    1566         [ -  + ]:       2946 :                 if (unlikely(page_mapping(page) != mapping)) {
    1567                 :          0 :                         unlock_page(page);
    1568                 :          0 :                         put_page(page);
    1569                 :          0 :                         goto repeat;
    1570                 :            :                 }
    1571                 :      20041 :                 VM_BUG_ON_PAGE(page_to_pgoff(page) != offset, page);
    1572                 :            :         }
    1573                 :      20041 :         return page;
    1574                 :            : }
    1575                 :            : EXPORT_SYMBOL(find_lock_entry);
    1576                 :            : 
    1577                 :            : /**
    1578                 :            :  * pagecache_get_page - find and get a page reference
    1579                 :            :  * @mapping: the address_space to search
    1580                 :            :  * @offset: the page index
    1581                 :            :  * @fgp_flags: PCG flags
    1582                 :            :  * @gfp_mask: gfp mask to use for the page cache data page allocation
    1583                 :            :  *
    1584                 :            :  * Looks up the page cache slot at @mapping & @offset.
    1585                 :            :  *
    1586                 :            :  * PCG flags modify how the page is returned.
    1587                 :            :  *
    1588                 :            :  * @fgp_flags can be:
    1589                 :            :  *
    1590                 :            :  * - FGP_ACCESSED: the page will be marked accessed
    1591                 :            :  * - FGP_LOCK: Page is return locked
    1592                 :            :  * - FGP_CREAT: If page is not present then a new page is allocated using
    1593                 :            :  *   @gfp_mask and added to the page cache and the VM's LRU
    1594                 :            :  *   list. The page is returned locked and with an increased
    1595                 :            :  *   refcount.
    1596                 :            :  * - FGP_FOR_MMAP: Similar to FGP_CREAT, only we want to allow the caller to do
    1597                 :            :  *   its own locking dance if the page is already in cache, or unlock the page
    1598                 :            :  *   before returning if we had to add the page to pagecache.
    1599                 :            :  *
    1600                 :            :  * If FGP_LOCK or FGP_CREAT are specified then the function may sleep even
    1601                 :            :  * if the GFP flags specified for FGP_CREAT are atomic.
    1602                 :            :  *
    1603                 :            :  * If there is a page cache page, it is returned with an increased refcount.
    1604                 :            :  *
    1605                 :            :  * Return: the found page or %NULL otherwise.
    1606                 :            :  */
    1607                 :     329469 : struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
    1608                 :            :         int fgp_flags, gfp_t gfp_mask)
    1609                 :            : {
    1610                 :     329469 :         struct page *page;
    1611                 :            : 
    1612                 :            : repeat:
    1613                 :     329469 :         page = find_get_entry(mapping, offset);
    1614         [ +  - ]:     329469 :         if (xa_is_value(page))
    1615                 :            :                 page = NULL;
    1616         [ +  + ]:     329469 :         if (!page)
    1617                 :      49146 :                 goto no_page;
    1618                 :            : 
    1619         [ +  + ]:     280323 :         if (fgp_flags & FGP_LOCK) {
    1620         [ -  + ]:        802 :                 if (fgp_flags & FGP_NOWAIT) {
    1621   [ #  #  #  # ]:          0 :                         if (!trylock_page(page)) {
    1622                 :          0 :                                 put_page(page);
    1623                 :          0 :                                 return NULL;
    1624                 :            :                         }
    1625                 :            :                 } else {
    1626                 :        802 :                         lock_page(page);
    1627                 :            :                 }
    1628                 :            : 
    1629                 :            :                 /* Has the page been truncated? */
    1630   [ -  +  -  + ]:        802 :                 if (unlikely(compound_head(page)->mapping != mapping)) {
    1631                 :          0 :                         unlock_page(page);
    1632                 :          0 :                         put_page(page);
    1633                 :          0 :                         goto repeat;
    1634                 :            :                 }
    1635                 :     280323 :                 VM_BUG_ON_PAGE(page->index != offset, page);
    1636                 :            :         }
    1637                 :            : 
    1638         [ +  + ]:     280323 :         if (fgp_flags & FGP_ACCESSED)
    1639                 :      26987 :                 mark_page_accessed(page);
    1640                 :            : 
    1641                 :     253336 : no_page:
    1642   [ +  +  +  + ]:     329469 :         if (!page && (fgp_flags & FGP_CREAT)) {
    1643                 :      16937 :                 int err;
    1644   [ +  +  +  - ]:      16937 :                 if ((fgp_flags & FGP_WRITE) && mapping_cap_account_dirty(mapping))
    1645                 :       5051 :                         gfp_mask |= __GFP_WRITE;
    1646         [ -  + ]:      16937 :                 if (fgp_flags & FGP_NOFS)
    1647                 :          0 :                         gfp_mask &= ~__GFP_FS;
    1648                 :            : 
    1649                 :      16937 :                 page = __page_cache_alloc(gfp_mask);
    1650         [ +  - ]:      16937 :                 if (!page)
    1651                 :            :                         return NULL;
    1652                 :            : 
    1653   [ -  +  -  + ]:      16937 :                 if (WARN_ON_ONCE(!(fgp_flags & (FGP_LOCK | FGP_FOR_MMAP))))
    1654                 :          0 :                         fgp_flags |= FGP_LOCK;
    1655                 :            : 
    1656                 :            :                 /* Init accessed so avoid atomic mark_page_accessed later */
    1657         [ +  + ]:      16937 :                 if (fgp_flags & FGP_ACCESSED)
    1658         [ -  + ]:      11886 :                         __SetPageReferenced(page);
    1659                 :            : 
    1660                 :      16937 :                 err = add_to_page_cache_lru(page, mapping, offset, gfp_mask);
    1661         [ -  + ]:      16937 :                 if (unlikely(err)) {
    1662                 :          0 :                         put_page(page);
    1663                 :          0 :                         page = NULL;
    1664         [ #  # ]:          0 :                         if (err == -EEXIST)
    1665                 :          0 :                                 goto repeat;
    1666                 :            :                 }
    1667                 :            : 
    1668                 :            :                 /*
    1669                 :            :                  * add_to_page_cache_lru locks the page, and for mmap we expect
    1670                 :            :                  * an unlocked page.
    1671                 :            :                  */
    1672   [ +  -  -  + ]:      16937 :                 if (page && (fgp_flags & FGP_FOR_MMAP))
    1673                 :          0 :                         unlock_page(page);
    1674                 :            :         }
    1675                 :            : 
    1676                 :            :         return page;
    1677                 :            : }
    1678                 :            : EXPORT_SYMBOL(pagecache_get_page);
    1679                 :            : 
    1680                 :            : /**
    1681                 :            :  * find_get_entries - gang pagecache lookup
    1682                 :            :  * @mapping:    The address_space to search
    1683                 :            :  * @start:      The starting page cache index
    1684                 :            :  * @nr_entries: The maximum number of entries
    1685                 :            :  * @entries:    Where the resulting entries are placed
    1686                 :            :  * @indices:    The cache indices corresponding to the entries in @entries
    1687                 :            :  *
    1688                 :            :  * find_get_entries() will search for and return a group of up to
    1689                 :            :  * @nr_entries entries in the mapping.  The entries are placed at
    1690                 :            :  * @entries.  find_get_entries() takes a reference against any actual
    1691                 :            :  * pages it returns.
    1692                 :            :  *
    1693                 :            :  * The search returns a group of mapping-contiguous page cache entries
    1694                 :            :  * with ascending indexes.  There may be holes in the indices due to
    1695                 :            :  * not-present pages.
    1696                 :            :  *
    1697                 :            :  * Any shadow entries of evicted pages, or swap entries from
    1698                 :            :  * shmem/tmpfs, are included in the returned array.
    1699                 :            :  *
    1700                 :            :  * Return: the number of pages and shadow entries which were found.
    1701                 :            :  */
    1702                 :       4100 : unsigned find_get_entries(struct address_space *mapping,
    1703                 :            :                           pgoff_t start, unsigned int nr_entries,
    1704                 :            :                           struct page **entries, pgoff_t *indices)
    1705                 :            : {
    1706                 :       4100 :         XA_STATE(xas, &mapping->i_pages, start);
    1707                 :       4100 :         struct page *page;
    1708                 :       4100 :         unsigned int ret = 0;
    1709                 :            : 
    1710         [ +  - ]:       4100 :         if (!nr_entries)
    1711                 :            :                 return 0;
    1712                 :            : 
    1713                 :       4100 :         rcu_read_lock();
    1714         [ +  + ]:      11016 :         xas_for_each(&xas, page, ULONG_MAX) {
    1715         [ +  - ]:       7358 :                 if (xas_retry(&xas, page))
    1716                 :          0 :                         continue;
    1717                 :            :                 /*
    1718                 :            :                  * A shadow entry of a recently evicted page, a swap
    1719                 :            :                  * entry from shmem/tmpfs or a DAX entry.  Return it
    1720                 :            :                  * without attempting to raise page count.
    1721                 :            :                  */
    1722         [ -  + ]:       7358 :                 if (xa_is_value(page))
    1723                 :          0 :                         goto export;
    1724                 :            : 
    1725                 :       7358 :                 if (!page_cache_get_speculative(page))
    1726                 :          0 :                         goto retry;
    1727                 :            : 
    1728                 :            :                 /* Has the page moved or been split? */
    1729   [ +  +  -  + ]:      14716 :                 if (unlikely(page != xas_reload(&xas)))
    1730                 :          0 :                         goto put_page;
    1731                 :       7358 :                 page = find_subpage(page, xas.xa_index);
    1732                 :            : 
    1733                 :       7358 : export:
    1734                 :       7358 :                 indices[ret] = xas.xa_index;
    1735                 :       7358 :                 entries[ret] = page;
    1736         [ +  + ]:       7358 :                 if (++ret == nr_entries)
    1737                 :            :                         break;
    1738                 :       6916 :                 continue;
    1739                 :            : put_page:
    1740                 :          0 :                 put_page(page);
    1741                 :          0 : retry:
    1742                 :          0 :                 xas_reset(&xas);
    1743                 :            :         }
    1744                 :       4100 :         rcu_read_unlock();
    1745                 :       4100 :         return ret;
    1746                 :            : }
    1747                 :            : 
    1748                 :            : /**
    1749                 :            :  * find_get_pages_range - gang pagecache lookup
    1750                 :            :  * @mapping:    The address_space to search
    1751                 :            :  * @start:      The starting page index
    1752                 :            :  * @end:        The final page index (inclusive)
    1753                 :            :  * @nr_pages:   The maximum number of pages
    1754                 :            :  * @pages:      Where the resulting pages are placed
    1755                 :            :  *
    1756                 :            :  * find_get_pages_range() will search for and return a group of up to @nr_pages
    1757                 :            :  * pages in the mapping starting at index @start and up to index @end
    1758                 :            :  * (inclusive).  The pages are placed at @pages.  find_get_pages_range() takes
    1759                 :            :  * a reference against the returned pages.
    1760                 :            :  *
    1761                 :            :  * The search returns a group of mapping-contiguous pages with ascending
    1762                 :            :  * indexes.  There may be holes in the indices due to not-present pages.
    1763                 :            :  * We also update @start to index the next page for the traversal.
    1764                 :            :  *
    1765                 :            :  * Return: the number of pages which were found. If this number is
    1766                 :            :  * smaller than @nr_pages, the end of specified range has been
    1767                 :            :  * reached.
    1768                 :            :  */
    1769                 :       5272 : unsigned find_get_pages_range(struct address_space *mapping, pgoff_t *start,
    1770                 :            :                               pgoff_t end, unsigned int nr_pages,
    1771                 :            :                               struct page **pages)
    1772                 :            : {
    1773                 :       5272 :         XA_STATE(xas, &mapping->i_pages, *start);
    1774                 :       5272 :         struct page *page;
    1775                 :       5272 :         unsigned ret = 0;
    1776                 :            : 
    1777         [ +  - ]:       5272 :         if (unlikely(!nr_pages))
    1778                 :            :                 return 0;
    1779                 :            : 
    1780                 :       5272 :         rcu_read_lock();
    1781         [ +  + ]:       5480 :         xas_for_each(&xas, page, end) {
    1782         [ +  - ]:        208 :                 if (xas_retry(&xas, page))
    1783                 :          0 :                         continue;
    1784                 :            :                 /* Skip over shadow, swap and DAX entries */
    1785         [ -  + ]:        208 :                 if (xa_is_value(page))
    1786                 :          0 :                         continue;
    1787                 :            : 
    1788                 :        208 :                 if (!page_cache_get_speculative(page))
    1789                 :          0 :                         goto retry;
    1790                 :            : 
    1791                 :            :                 /* Has the page moved or been split? */
    1792   [ -  +  -  + ]:        416 :                 if (unlikely(page != xas_reload(&xas)))
    1793                 :          0 :                         goto put_page;
    1794                 :            : 
    1795                 :        208 :                 pages[ret] = find_subpage(page, xas.xa_index);
    1796         [ -  + ]:        208 :                 if (++ret == nr_pages) {
    1797                 :          0 :                         *start = xas.xa_index + 1;
    1798                 :          0 :                         goto out;
    1799                 :            :                 }
    1800                 :        208 :                 continue;
    1801                 :            : put_page:
    1802                 :          0 :                 put_page(page);
    1803                 :          0 : retry:
    1804                 :          0 :                 xas_reset(&xas);
    1805                 :            :         }
    1806                 :            : 
    1807                 :            :         /*
    1808                 :            :          * We come here when there is no page beyond @end. We take care to not
    1809                 :            :          * overflow the index @start as it confuses some of the callers. This
    1810                 :            :          * breaks the iteration when there is a page at index -1 but that is
    1811                 :            :          * already broken anyway.
    1812                 :            :          */
    1813         [ -  + ]:       5272 :         if (end == (pgoff_t)-1)
    1814                 :          0 :                 *start = (pgoff_t)-1;
    1815                 :            :         else
    1816                 :       5272 :                 *start = end + 1;
    1817                 :       5272 : out:
    1818                 :       5272 :         rcu_read_unlock();
    1819                 :            : 
    1820                 :       5272 :         return ret;
    1821                 :            : }
    1822                 :            : 
    1823                 :            : /**
    1824                 :            :  * find_get_pages_contig - gang contiguous pagecache lookup
    1825                 :            :  * @mapping:    The address_space to search
    1826                 :            :  * @index:      The starting page index
    1827                 :            :  * @nr_pages:   The maximum number of pages
    1828                 :            :  * @pages:      Where the resulting pages are placed
    1829                 :            :  *
    1830                 :            :  * find_get_pages_contig() works exactly like find_get_pages(), except
    1831                 :            :  * that the returned number of pages are guaranteed to be contiguous.
    1832                 :            :  *
    1833                 :            :  * Return: the number of pages which were found.
    1834                 :            :  */
    1835                 :          0 : unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t index,
    1836                 :            :                                unsigned int nr_pages, struct page **pages)
    1837                 :            : {
    1838                 :          0 :         XA_STATE(xas, &mapping->i_pages, index);
    1839                 :          0 :         struct page *page;
    1840                 :          0 :         unsigned int ret = 0;
    1841                 :            : 
    1842         [ #  # ]:          0 :         if (unlikely(!nr_pages))
    1843                 :            :                 return 0;
    1844                 :            : 
    1845                 :          0 :         rcu_read_lock();
    1846         [ #  # ]:          0 :         for (page = xas_load(&xas); page; page = xas_next(&xas)) {
    1847         [ #  # ]:          0 :                 if (xas_retry(&xas, page))
    1848                 :          0 :                         continue;
    1849                 :            :                 /*
    1850                 :            :                  * If the entry has been swapped out, we can stop looking.
    1851                 :            :                  * No current caller is looking for DAX entries.
    1852                 :            :                  */
    1853         [ #  # ]:          0 :                 if (xa_is_value(page))
    1854                 :            :                         break;
    1855                 :            : 
    1856                 :          0 :                 if (!page_cache_get_speculative(page))
    1857                 :          0 :                         goto retry;
    1858                 :            : 
    1859                 :            :                 /* Has the page moved or been split? */
    1860   [ #  #  #  # ]:          0 :                 if (unlikely(page != xas_reload(&xas)))
    1861                 :          0 :                         goto put_page;
    1862                 :            : 
    1863                 :          0 :                 pages[ret] = find_subpage(page, xas.xa_index);
    1864         [ #  # ]:          0 :                 if (++ret == nr_pages)
    1865                 :            :                         break;
    1866                 :          0 :                 continue;
    1867                 :            : put_page:
    1868                 :          0 :                 put_page(page);
    1869                 :          0 : retry:
    1870                 :          0 :                 xas_reset(&xas);
    1871                 :            :         }
    1872                 :          0 :         rcu_read_unlock();
    1873                 :          0 :         return ret;
    1874                 :            : }
    1875                 :            : EXPORT_SYMBOL(find_get_pages_contig);
    1876                 :            : 
    1877                 :            : /**
    1878                 :            :  * find_get_pages_range_tag - find and return pages in given range matching @tag
    1879                 :            :  * @mapping:    the address_space to search
    1880                 :            :  * @index:      the starting page index
    1881                 :            :  * @end:        The final page index (inclusive)
    1882                 :            :  * @tag:        the tag index
    1883                 :            :  * @nr_pages:   the maximum number of pages
    1884                 :            :  * @pages:      where the resulting pages are placed
    1885                 :            :  *
    1886                 :            :  * Like find_get_pages, except we only return pages which are tagged with
    1887                 :            :  * @tag.   We update @index to index the next page for the traversal.
    1888                 :            :  *
    1889                 :            :  * Return: the number of pages which were found.
    1890                 :            :  */
    1891                 :        533 : unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t *index,
    1892                 :            :                         pgoff_t end, xa_mark_t tag, unsigned int nr_pages,
    1893                 :            :                         struct page **pages)
    1894                 :            : {
    1895                 :        533 :         XA_STATE(xas, &mapping->i_pages, *index);
    1896                 :        533 :         struct page *page;
    1897                 :        533 :         unsigned ret = 0;
    1898                 :            : 
    1899         [ +  - ]:        533 :         if (unlikely(!nr_pages))
    1900                 :            :                 return 0;
    1901                 :            : 
    1902                 :        533 :         rcu_read_lock();
    1903         [ +  + ]:        832 :         xas_for_each_marked(&xas, page, end, tag) {
    1904         [ +  - ]:        299 :                 if (xas_retry(&xas, page))
    1905                 :          0 :                         continue;
    1906                 :            :                 /*
    1907                 :            :                  * Shadow entries should never be tagged, but this iteration
    1908                 :            :                  * is lockless so there is a window for page reclaim to evict
    1909                 :            :                  * a page we saw tagged.  Skip over it.
    1910                 :            :                  */
    1911         [ -  + ]:        299 :                 if (xa_is_value(page))
    1912                 :          0 :                         continue;
    1913                 :            : 
    1914                 :        299 :                 if (!page_cache_get_speculative(page))
    1915                 :          0 :                         goto retry;
    1916                 :            : 
    1917                 :            :                 /* Has the page moved or been split? */
    1918   [ -  +  -  + ]:        598 :                 if (unlikely(page != xas_reload(&xas)))
    1919                 :          0 :                         goto put_page;
    1920                 :            : 
    1921                 :        299 :                 pages[ret] = find_subpage(page, xas.xa_index);
    1922         [ -  + ]:        299 :                 if (++ret == nr_pages) {
    1923                 :          0 :                         *index = xas.xa_index + 1;
    1924                 :          0 :                         goto out;
    1925                 :            :                 }
    1926                 :        299 :                 continue;
    1927                 :            : put_page:
    1928                 :          0 :                 put_page(page);
    1929                 :          0 : retry:
    1930                 :          0 :                 xas_reset(&xas);
    1931                 :            :         }
    1932                 :            : 
    1933                 :            :         /*
    1934                 :            :          * We come here when we got to @end. We take care to not overflow the
    1935                 :            :          * index @index as it confuses some of the callers. This breaks the
    1936                 :            :          * iteration when there is a page at index -1 but that is already
    1937                 :            :          * broken anyway.
    1938                 :            :          */
    1939         [ -  + ]:        533 :         if (end == (pgoff_t)-1)
    1940                 :          0 :                 *index = (pgoff_t)-1;
    1941                 :            :         else
    1942                 :        533 :                 *index = end + 1;
    1943                 :        533 : out:
    1944                 :        533 :         rcu_read_unlock();
    1945                 :            : 
    1946                 :        533 :         return ret;
    1947                 :            : }
    1948                 :            : EXPORT_SYMBOL(find_get_pages_range_tag);
    1949                 :            : 
    1950                 :            : /*
    1951                 :            :  * CD/DVDs are error prone. When a medium error occurs, the driver may fail
    1952                 :            :  * a _large_ part of the i/o request. Imagine the worst scenario:
    1953                 :            :  *
    1954                 :            :  *      ---R__________________________________________B__________
    1955                 :            :  *         ^ reading here                             ^ bad block(assume 4k)
    1956                 :            :  *
    1957                 :            :  * read(R) => miss => readahead(R...B) => media error => frustrating retries
    1958                 :            :  * => failing the whole request => read(R) => read(R+1) =>
    1959                 :            :  * readahead(R+1...B+1) => bang => read(R+2) => read(R+3) =>
    1960                 :            :  * readahead(R+3...B+2) => bang => read(R+3) => read(R+4) =>
    1961                 :            :  * readahead(R+4...B+3) => bang => read(R+4) => read(R+5) => ......
    1962                 :            :  *
    1963                 :            :  * It is going insane. Fix it by quickly scaling down the readahead size.
    1964                 :            :  */
    1965                 :          0 : static void shrink_readahead_size_eio(struct file *filp,
    1966                 :            :                                         struct file_ra_state *ra)
    1967                 :            : {
    1968                 :          0 :         ra->ra_pages /= 4;
    1969                 :            : }
    1970                 :            : 
    1971                 :            : /**
    1972                 :            :  * generic_file_buffered_read - generic file read routine
    1973                 :            :  * @iocb:       the iocb to read
    1974                 :            :  * @iter:       data destination
    1975                 :            :  * @written:    already copied
    1976                 :            :  *
    1977                 :            :  * This is a generic file read routine, and uses the
    1978                 :            :  * mapping->a_ops->readpage() function for the actual low-level stuff.
    1979                 :            :  *
    1980                 :            :  * This is really ugly. But the goto's actually try to clarify some
    1981                 :            :  * of the logic when it comes to error handling etc.
    1982                 :            :  *
    1983                 :            :  * Return:
    1984                 :            :  * * total number of bytes copied, including those the were already @written
    1985                 :            :  * * negative error code if nothing was copied
    1986                 :            :  */
    1987                 :      96590 : static ssize_t generic_file_buffered_read(struct kiocb *iocb,
    1988                 :            :                 struct iov_iter *iter, ssize_t written)
    1989                 :            : {
    1990                 :      96590 :         struct file *filp = iocb->ki_filp;
    1991                 :      96590 :         struct address_space *mapping = filp->f_mapping;
    1992                 :      96590 :         struct inode *inode = mapping->host;
    1993                 :      96590 :         struct file_ra_state *ra = &filp->f_ra;
    1994                 :      96590 :         loff_t *ppos = &iocb->ki_pos;
    1995                 :      96590 :         pgoff_t index;
    1996                 :      96590 :         pgoff_t last_index;
    1997                 :      96590 :         pgoff_t prev_index;
    1998                 :      96590 :         unsigned long offset;      /* offset into pagecache page */
    1999                 :      96590 :         unsigned int prev_offset;
    2000                 :      96590 :         int error = 0;
    2001                 :            : 
    2002         [ +  - ]:      96590 :         if (unlikely(*ppos >= inode->i_sb->s_maxbytes))
    2003                 :            :                 return 0;
    2004         [ -  + ]:      96590 :         iov_iter_truncate(iter, inode->i_sb->s_maxbytes);
    2005                 :            : 
    2006                 :      96590 :         index = *ppos >> PAGE_SHIFT;
    2007                 :      96590 :         prev_index = ra->prev_pos >> PAGE_SHIFT;
    2008                 :      96590 :         prev_offset = ra->prev_pos & (PAGE_SIZE-1);
    2009                 :      96590 :         last_index = (*ppos + iter->count + PAGE_SIZE-1) >> PAGE_SHIFT;
    2010                 :      96590 :         offset = *ppos & ~PAGE_MASK;
    2011                 :            : 
    2012                 :     158210 :         for (;;) {
    2013                 :     158210 :                 struct page *page;
    2014                 :     158210 :                 pgoff_t end_index;
    2015                 :     158210 :                 loff_t isize;
    2016                 :     158210 :                 unsigned long nr, ret;
    2017                 :            : 
    2018                 :     158210 :                 cond_resched();
    2019                 :            : find_page:
    2020         [ -  + ]:     158210 :                 if (fatal_signal_pending(current)) {
    2021                 :          0 :                         error = -EINTR;
    2022                 :          0 :                         goto out;
    2023                 :            :                 }
    2024                 :            : 
    2025                 :     158210 :                 page = find_get_page(mapping, index);
    2026         [ +  + ]:     158210 :                 if (!page) {
    2027         [ -  + ]:       5148 :                         if (iocb->ki_flags & IOCB_NOWAIT)
    2028                 :          0 :                                 goto would_block;
    2029                 :       5148 :                         page_cache_sync_readahead(mapping,
    2030                 :            :                                         ra, filp,
    2031                 :            :                                         index, last_index - index);
    2032                 :       5148 :                         page = find_get_page(mapping, index);
    2033         [ +  + ]:       5148 :                         if (unlikely(page == NULL))
    2034                 :         13 :                                 goto no_cached_page;
    2035                 :            :                 }
    2036         [ +  + ]:     158197 :                 if (PageReadahead(page)) {
    2037                 :        338 :                         page_cache_async_readahead(mapping,
    2038                 :            :                                         ra, filp, page,
    2039                 :            :                                         index, last_index - index);
    2040                 :            :                 }
    2041         [ +  + ]:     158197 :                 if (!PageUptodate(page)) {
    2042         [ -  + ]:       5015 :                         if (iocb->ki_flags & IOCB_NOWAIT) {
    2043                 :          0 :                                 put_page(page);
    2044                 :          0 :                                 goto would_block;
    2045                 :            :                         }
    2046                 :            : 
    2047                 :            :                         /*
    2048                 :            :                          * See comment in do_read_cache_page on why
    2049                 :            :                          * wait_on_page_locked is used to avoid unnecessarily
    2050                 :            :                          * serialisations and why it's safe.
    2051                 :            :                          */
    2052                 :       5015 :                         error = wait_on_page_locked_killable(page);
    2053         [ -  + ]:       5015 :                         if (unlikely(error))
    2054                 :          0 :                                 goto readpage_error;
    2055         [ +  + ]:       5015 :                         if (PageUptodate(page))
    2056                 :       4976 :                                 goto page_ok;
    2057                 :            : 
    2058         [ -  + ]:         39 :                         if (inode->i_blkbits == PAGE_SHIFT ||
    2059         [ #  # ]:          0 :                                         !mapping->a_ops->is_partially_uptodate)
    2060                 :         39 :                                 goto page_not_up_to_date;
    2061                 :            :                         /* pipes can't handle partially uptodate pages */
    2062         [ #  # ]:          0 :                         if (unlikely(iov_iter_is_pipe(iter)))
    2063                 :          0 :                                 goto page_not_up_to_date;
    2064   [ #  #  #  # ]:          0 :                         if (!trylock_page(page))
    2065                 :          0 :                                 goto page_not_up_to_date;
    2066                 :            :                         /* Did it get truncated before we got the lock? */
    2067         [ #  # ]:          0 :                         if (!page->mapping)
    2068                 :          0 :                                 goto page_not_up_to_date_locked;
    2069         [ #  # ]:          0 :                         if (!mapping->a_ops->is_partially_uptodate(page,
    2070                 :            :                                                         offset, iter->count))
    2071                 :          0 :                                 goto page_not_up_to_date_locked;
    2072                 :          0 :                         unlock_page(page);
    2073                 :            :                 }
    2074                 :     153182 : page_ok:
    2075                 :            :                 /*
    2076                 :            :                  * i_size must be checked after we know the page is Uptodate.
    2077                 :            :                  *
    2078                 :            :                  * Checking i_size after the check allows us to calculate
    2079                 :            :                  * the correct value for "nr", which means the zero-filled
    2080                 :            :                  * part of the page is not copied back to userspace (unless
    2081                 :            :                  * another truncate extends the file - this is desired though).
    2082                 :            :                  */
    2083                 :            : 
    2084         [ +  + ]:     158210 :                 isize = i_size_read(inode);
    2085                 :     158210 :                 end_index = (isize - 1) >> PAGE_SHIFT;
    2086         [ +  + ]:     158210 :                 if (unlikely(!isize || index > end_index)) {
    2087                 :         13 :                         put_page(page);
    2088                 :         13 :                         goto out;
    2089                 :            :                 }
    2090                 :            : 
    2091                 :            :                 /* nr is the maximum number of bytes to copy from this page */
    2092                 :     158197 :                 nr = PAGE_SIZE;
    2093         [ +  + ]:     158197 :                 if (index == end_index) {
    2094                 :      14352 :                         nr = ((isize - 1) & ~PAGE_MASK) + 1;
    2095         [ +  + ]:      14352 :                         if (nr <= offset) {
    2096                 :       8892 :                                 put_page(page);
    2097                 :       8892 :                                 goto out;
    2098                 :            :                         }
    2099                 :            :                 }
    2100                 :     149305 :                 nr = nr - offset;
    2101                 :            : 
    2102                 :            :                 /* If users can be writing to this page using arbitrary
    2103                 :            :                  * virtual addresses, take care about potential aliasing
    2104                 :            :                  * before reading the page on the kernel side.
    2105                 :            :                  */
    2106                 :     149305 :                 if (mapping_writably_mapped(mapping))
    2107                 :            :                         flush_dcache_page(page);
    2108                 :            : 
    2109                 :            :                 /*
    2110                 :            :                  * When a sequential read accesses a page several times,
    2111                 :            :                  * only mark it as accessed the first time.
    2112                 :            :                  */
    2113   [ +  +  +  + ]:     149305 :                 if (prev_index != index || offset != prev_offset)
    2114                 :     129376 :                         mark_page_accessed(page);
    2115                 :     149305 :                 prev_index = index;
    2116                 :            : 
    2117                 :            :                 /*
    2118                 :            :                  * Ok, we have the page, and it's up-to-date, so
    2119                 :            :                  * now we can copy it to user space...
    2120                 :            :                  */
    2121                 :            : 
    2122                 :     149305 :                 ret = copy_page_to_iter(page, offset, nr, iter);
    2123                 :     149305 :                 offset += ret;
    2124                 :     149305 :                 index += offset >> PAGE_SHIFT;
    2125                 :     149305 :                 offset &= ~PAGE_MASK;
    2126                 :     149305 :                 prev_offset = offset;
    2127                 :            : 
    2128                 :     149305 :                 put_page(page);
    2129                 :     149305 :                 written += ret;
    2130         [ +  + ]:     149305 :                 if (!iov_iter_count(iter))
    2131                 :      87685 :                         goto out;
    2132         [ -  + ]:      61620 :                 if (ret < nr) {
    2133                 :          0 :                         error = -EFAULT;
    2134                 :          0 :                         goto out;
    2135                 :            :                 }
    2136                 :      61620 :                 continue;
    2137                 :            : 
    2138                 :         39 : page_not_up_to_date:
    2139                 :            :                 /* Get exclusive access to the page ... */
    2140                 :         39 :                 error = lock_page_killable(page);
    2141         [ -  + ]:         39 :                 if (unlikely(error))
    2142                 :          0 :                         goto readpage_error;
    2143                 :            : 
    2144                 :         39 : page_not_up_to_date_locked:
    2145                 :            :                 /* Did it get truncated before we got the lock? */
    2146         [ -  + ]:         39 :                 if (!page->mapping) {
    2147                 :          0 :                         unlock_page(page);
    2148                 :          0 :                         put_page(page);
    2149                 :          0 :                         continue;
    2150                 :            :                 }
    2151                 :            : 
    2152                 :            :                 /* Did somebody else fill it already? */
    2153         [ -  + ]:         39 :                 if (PageUptodate(page)) {
    2154                 :          0 :                         unlock_page(page);
    2155                 :          0 :                         goto page_ok;
    2156                 :            :                 }
    2157                 :            : 
    2158                 :         39 : readpage:
    2159                 :            :                 /*
    2160                 :            :                  * A previous I/O error may have been due to temporary
    2161                 :            :                  * failures, eg. multipath errors.
    2162                 :            :                  * PG_error will be set again if readpage fails.
    2163                 :            :                  */
    2164         [ -  + ]:         52 :                 ClearPageError(page);
    2165                 :            :                 /* Start the actual read. The read will unlock the page. */
    2166                 :         52 :                 error = mapping->a_ops->readpage(filp, page);
    2167                 :            : 
    2168         [ -  + ]:         52 :                 if (unlikely(error)) {
    2169         [ #  # ]:          0 :                         if (error == AOP_TRUNCATED_PAGE) {
    2170                 :          0 :                                 put_page(page);
    2171                 :          0 :                                 error = 0;
    2172                 :          0 :                                 goto find_page;
    2173                 :            :                         }
    2174                 :          0 :                         goto readpage_error;
    2175                 :            :                 }
    2176                 :            : 
    2177         [ -  + ]:         52 :                 if (!PageUptodate(page)) {
    2178                 :          0 :                         error = lock_page_killable(page);
    2179         [ #  # ]:          0 :                         if (unlikely(error))
    2180                 :          0 :                                 goto readpage_error;
    2181         [ #  # ]:          0 :                         if (!PageUptodate(page)) {
    2182         [ #  # ]:          0 :                                 if (page->mapping == NULL) {
    2183                 :            :                                         /*
    2184                 :            :                                          * invalidate_mapping_pages got it
    2185                 :            :                                          */
    2186                 :          0 :                                         unlock_page(page);
    2187                 :          0 :                                         put_page(page);
    2188                 :          0 :                                         goto find_page;
    2189                 :            :                                 }
    2190                 :          0 :                                 unlock_page(page);
    2191                 :          0 :                                 shrink_readahead_size_eio(filp, ra);
    2192                 :          0 :                                 error = -EIO;
    2193                 :          0 :                                 goto readpage_error;
    2194                 :            :                         }
    2195                 :          0 :                         unlock_page(page);
    2196                 :            :                 }
    2197                 :            : 
    2198                 :         52 :                 goto page_ok;
    2199                 :            : 
    2200                 :          0 : readpage_error:
    2201                 :            :                 /* UHHUH! A synchronous read error occurred. Report it */
    2202                 :          0 :                 put_page(page);
    2203                 :          0 :                 goto out;
    2204                 :            : 
    2205                 :            : no_cached_page:
    2206                 :            :                 /*
    2207                 :            :                  * Ok, it wasn't cached, so we need to create a new
    2208                 :            :                  * page..
    2209                 :            :                  */
    2210                 :         13 :                 page = page_cache_alloc(mapping);
    2211         [ -  + ]:         13 :                 if (!page) {
    2212                 :          0 :                         error = -ENOMEM;
    2213                 :          0 :                         goto out;
    2214                 :            :                 }
    2215                 :         13 :                 error = add_to_page_cache_lru(page, mapping, index,
    2216                 :            :                                 mapping_gfp_constraint(mapping, GFP_KERNEL));
    2217         [ -  + ]:         13 :                 if (error) {
    2218                 :          0 :                         put_page(page);
    2219         [ #  # ]:          0 :                         if (error == -EEXIST) {
    2220                 :          0 :                                 error = 0;
    2221                 :          0 :                                 goto find_page;
    2222                 :            :                         }
    2223                 :          0 :                         goto out;
    2224                 :            :                 }
    2225                 :         13 :                 goto readpage;
    2226                 :            :         }
    2227                 :            : 
    2228                 :            : would_block:
    2229                 :            :         error = -EAGAIN;
    2230                 :      96590 : out:
    2231                 :      96590 :         ra->prev_pos = prev_index;
    2232                 :      96590 :         ra->prev_pos <<= PAGE_SHIFT;
    2233                 :      96590 :         ra->prev_pos |= prev_offset;
    2234                 :            : 
    2235                 :      96590 :         *ppos = ((loff_t)index << PAGE_SHIFT) + offset;
    2236         [ +  - ]:      96590 :         file_accessed(filp);
    2237         [ +  + ]:      96590 :         return written ? written : error;
    2238                 :            : }
    2239                 :            : 
    2240                 :            : /**
    2241                 :            :  * generic_file_read_iter - generic filesystem read routine
    2242                 :            :  * @iocb:       kernel I/O control block
    2243                 :            :  * @iter:       destination for the data read
    2244                 :            :  *
    2245                 :            :  * This is the "read_iter()" routine for all filesystems
    2246                 :            :  * that can use the page cache directly.
    2247                 :            :  * Return:
    2248                 :            :  * * number of bytes copied, even for partial reads
    2249                 :            :  * * negative error code if nothing was read
    2250                 :            :  */
    2251                 :            : ssize_t
    2252                 :      96590 : generic_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
    2253                 :            : {
    2254         [ -  + ]:      96590 :         size_t count = iov_iter_count(iter);
    2255                 :      96590 :         ssize_t retval = 0;
    2256                 :            : 
    2257         [ -  + ]:      96590 :         if (!count)
    2258                 :          0 :                 goto out; /* skip atime */
    2259                 :            : 
    2260         [ -  + ]:      96590 :         if (iocb->ki_flags & IOCB_DIRECT) {
    2261                 :          0 :                 struct file *file = iocb->ki_filp;
    2262                 :          0 :                 struct address_space *mapping = file->f_mapping;
    2263                 :          0 :                 struct inode *inode = mapping->host;
    2264                 :          0 :                 loff_t size;
    2265                 :            : 
    2266         [ #  # ]:          0 :                 size = i_size_read(inode);
    2267         [ #  # ]:          0 :                 if (iocb->ki_flags & IOCB_NOWAIT) {
    2268         [ #  # ]:          0 :                         if (filemap_range_has_page(mapping, iocb->ki_pos,
    2269                 :          0 :                                                    iocb->ki_pos + count - 1))
    2270                 :            :                                 return -EAGAIN;
    2271                 :            :                 } else {
    2272                 :          0 :                         retval = filemap_write_and_wait_range(mapping,
    2273                 :            :                                                 iocb->ki_pos,
    2274                 :          0 :                                                 iocb->ki_pos + count - 1);
    2275         [ #  # ]:          0 :                         if (retval < 0)
    2276                 :          0 :                                 goto out;
    2277                 :            :                 }
    2278                 :            : 
    2279         [ #  # ]:          0 :                 file_accessed(file);
    2280                 :            : 
    2281                 :          0 :                 retval = mapping->a_ops->direct_IO(iocb, iter);
    2282         [ #  # ]:          0 :                 if (retval >= 0) {
    2283                 :          0 :                         iocb->ki_pos += retval;
    2284                 :          0 :                         count -= retval;
    2285                 :            :                 }
    2286                 :          0 :                 iov_iter_revert(iter, count - iov_iter_count(iter));
    2287                 :            : 
    2288                 :            :                 /*
    2289                 :            :                  * Btrfs can have a short DIO read if we encounter
    2290                 :            :                  * compressed extents, so if there was an error, or if
    2291                 :            :                  * we've already read everything we wanted to, or if
    2292                 :            :                  * there was a short read because we hit EOF, go ahead
    2293                 :            :                  * and return.  Otherwise fallthrough to buffered io for
    2294                 :            :                  * the rest of the read.  Buffered reads will not work for
    2295                 :            :                  * DAX files, so don't bother trying.
    2296                 :            :                  */
    2297   [ #  #  #  # ]:          0 :                 if (retval < 0 || !count || iocb->ki_pos >= size ||
    2298                 :            :                     IS_DAX(inode))
    2299                 :          0 :                         goto out;
    2300                 :            :         }
    2301                 :            : 
    2302                 :      96590 :         retval = generic_file_buffered_read(iocb, iter, retval);
    2303                 :            : out:
    2304                 :            :         return retval;
    2305                 :            : }
    2306                 :            : EXPORT_SYMBOL(generic_file_read_iter);
    2307                 :            : 
    2308                 :            : #ifdef CONFIG_MMU
    2309                 :            : #define MMAP_LOTSAMISS  (100)
    2310                 :            : /*
    2311                 :            :  * lock_page_maybe_drop_mmap - lock the page, possibly dropping the mmap_sem
    2312                 :            :  * @vmf - the vm_fault for this fault.
    2313                 :            :  * @page - the page to lock.
    2314                 :            :  * @fpin - the pointer to the file we may pin (or is already pinned).
    2315                 :            :  *
    2316                 :            :  * This works similar to lock_page_or_retry in that it can drop the mmap_sem.
    2317                 :            :  * It differs in that it actually returns the page locked if it returns 1 and 0
    2318                 :            :  * if it couldn't lock the page.  If we did have to drop the mmap_sem then fpin
    2319                 :            :  * will point to the pinned file and needs to be fput()'ed at a later point.
    2320                 :            :  */
    2321                 :      94311 : static int lock_page_maybe_drop_mmap(struct vm_fault *vmf, struct page *page,
    2322                 :            :                                      struct file **fpin)
    2323                 :            : {
    2324   [ -  +  +  + ]:     188622 :         if (trylock_page(page))
    2325                 :            :                 return 1;
    2326                 :            : 
    2327                 :            :         /*
    2328                 :            :          * NOTE! This will make us return with VM_FAULT_RETRY, but with
    2329                 :            :          * the mmap_sem still held. That's how FAULT_FLAG_RETRY_NOWAIT
    2330                 :            :          * is supposed to work. We have way too many special cases..
    2331                 :            :          */
    2332         [ +  - ]:       3677 :         if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT)
    2333                 :            :                 return 0;
    2334                 :            : 
    2335                 :       3677 :         *fpin = maybe_unlock_mmap_for_io(vmf, *fpin);
    2336         [ +  - ]:       3677 :         if (vmf->flags & FAULT_FLAG_KILLABLE) {
    2337         [ -  + ]:       3677 :                 if (__lock_page_killable(page)) {
    2338                 :            :                         /*
    2339                 :            :                          * We didn't have the right flags to drop the mmap_sem,
    2340                 :            :                          * but all fault_handlers only check for fatal signals
    2341                 :            :                          * if we return VM_FAULT_RETRY, so we need to drop the
    2342                 :            :                          * mmap_sem here and return 0 if we don't have a fpin.
    2343                 :            :                          */
    2344         [ #  # ]:          0 :                         if (*fpin == NULL)
    2345                 :          0 :                                 up_read(&vmf->vma->vm_mm->mmap_sem);
    2346                 :          0 :                         return 0;
    2347                 :            :                 }
    2348                 :            :         } else
    2349                 :          0 :                 __lock_page(page);
    2350                 :            :         return 1;
    2351                 :            : }
    2352                 :            : 
    2353                 :            : 
    2354                 :            : /*
    2355                 :            :  * Synchronous readahead happens when we don't even find a page in the page
    2356                 :            :  * cache at all.  We don't want to perform IO under the mmap sem, so if we have
    2357                 :            :  * to drop the mmap sem we return the file that was pinned in order for us to do
    2358                 :            :  * that.  If we didn't pin a file then we return NULL.  The file that is
    2359                 :            :  * returned needs to be fput()'ed when we're done with it.
    2360                 :            :  */
    2361                 :       3406 : static struct file *do_sync_mmap_readahead(struct vm_fault *vmf)
    2362                 :            : {
    2363                 :       3406 :         struct file *file = vmf->vma->vm_file;
    2364                 :       3406 :         struct file_ra_state *ra = &file->f_ra;
    2365                 :       3406 :         struct address_space *mapping = file->f_mapping;
    2366                 :       3406 :         struct file *fpin = NULL;
    2367                 :       3406 :         pgoff_t offset = vmf->pgoff;
    2368                 :            : 
    2369                 :            :         /* If we don't want any read-ahead, don't bother */
    2370         [ +  - ]:       3406 :         if (vmf->vma->vm_flags & VM_RAND_READ)
    2371                 :            :                 return fpin;
    2372         [ +  - ]:       3406 :         if (!ra->ra_pages)
    2373                 :            :                 return fpin;
    2374                 :            : 
    2375         [ -  + ]:       3406 :         if (vmf->vma->vm_flags & VM_SEQ_READ) {
    2376                 :          0 :                 fpin = maybe_unlock_mmap_for_io(vmf, fpin);
    2377                 :          0 :                 page_cache_sync_readahead(mapping, ra, file, offset,
    2378                 :          0 :                                           ra->ra_pages);
    2379                 :          0 :                 return fpin;
    2380                 :            :         }
    2381                 :            : 
    2382                 :            :         /* Avoid banging the cache line if not needed */
    2383         [ +  - ]:       3406 :         if (ra->mmap_miss < MMAP_LOTSAMISS * 10)
    2384                 :       3406 :                 ra->mmap_miss++;
    2385                 :            : 
    2386                 :            :         /*
    2387                 :            :          * Do we miss much more than hit in this file? If so,
    2388                 :            :          * stop bothering with read-ahead. It will only hurt.
    2389                 :            :          */
    2390         [ +  - ]:       3406 :         if (ra->mmap_miss > MMAP_LOTSAMISS)
    2391                 :            :                 return fpin;
    2392                 :            : 
    2393                 :            :         /*
    2394                 :            :          * mmap read-around
    2395                 :            :          */
    2396                 :       3406 :         fpin = maybe_unlock_mmap_for_io(vmf, fpin);
    2397                 :       3406 :         ra->start = max_t(long, 0, offset - ra->ra_pages / 2);
    2398                 :       3406 :         ra->size = ra->ra_pages;
    2399                 :       3406 :         ra->async_size = ra->ra_pages / 4;
    2400                 :       3406 :         ra_submit(ra, mapping, file);
    2401                 :       3406 :         return fpin;
    2402                 :            : }
    2403                 :            : 
    2404                 :            : /*
    2405                 :            :  * Asynchronous readahead happens when we find the page and PG_readahead,
    2406                 :            :  * so we want to possibly extend the readahead further.  We return the file that
    2407                 :            :  * was pinned if we have to drop the mmap_sem in order to do IO.
    2408                 :            :  */
    2409                 :      89487 : static struct file *do_async_mmap_readahead(struct vm_fault *vmf,
    2410                 :            :                                             struct page *page)
    2411                 :            : {
    2412                 :      89487 :         struct file *file = vmf->vma->vm_file;
    2413                 :      89487 :         struct file_ra_state *ra = &file->f_ra;
    2414                 :      89487 :         struct address_space *mapping = file->f_mapping;
    2415                 :      89487 :         struct file *fpin = NULL;
    2416                 :      89487 :         pgoff_t offset = vmf->pgoff;
    2417                 :            : 
    2418                 :            :         /* If we don't want any read-ahead, don't bother */
    2419         [ +  - ]:      89487 :         if (vmf->vma->vm_flags & VM_RAND_READ)
    2420                 :            :                 return fpin;
    2421         [ +  + ]:      89487 :         if (ra->mmap_miss > 0)
    2422                 :         68 :                 ra->mmap_miss--;
    2423         [ +  + ]:      89487 :         if (PageReadahead(page)) {
    2424                 :       2574 :                 fpin = maybe_unlock_mmap_for_io(vmf, fpin);
    2425                 :       2574 :                 page_cache_async_readahead(mapping, ra, file,
    2426                 :       2574 :                                            page, offset, ra->ra_pages);
    2427                 :            :         }
    2428                 :            :         return fpin;
    2429                 :            : }
    2430                 :            : 
    2431                 :            : /**
    2432                 :            :  * filemap_fault - read in file data for page fault handling
    2433                 :            :  * @vmf:        struct vm_fault containing details of the fault
    2434                 :            :  *
    2435                 :            :  * filemap_fault() is invoked via the vma operations vector for a
    2436                 :            :  * mapped memory region to read in file data during a page fault.
    2437                 :            :  *
    2438                 :            :  * The goto's are kind of ugly, but this streamlines the normal case of having
    2439                 :            :  * it in the page cache, and handles the special cases reasonably without
    2440                 :            :  * having a lot of duplicated code.
    2441                 :            :  *
    2442                 :            :  * vma->vm_mm->mmap_sem must be held on entry.
    2443                 :            :  *
    2444                 :            :  * If our return value has VM_FAULT_RETRY set, it's because the mmap_sem
    2445                 :            :  * may be dropped before doing I/O or by lock_page_maybe_drop_mmap().
    2446                 :            :  *
    2447                 :            :  * If our return value does not have VM_FAULT_RETRY set, the mmap_sem
    2448                 :            :  * has not been released.
    2449                 :            :  *
    2450                 :            :  * We never return with VM_FAULT_RETRY and a bit from VM_FAULT_ERROR set.
    2451                 :            :  *
    2452                 :            :  * Return: bitwise-OR of %VM_FAULT_ codes.
    2453                 :            :  */
    2454                 :      94311 : vm_fault_t filemap_fault(struct vm_fault *vmf)
    2455                 :            : {
    2456                 :      94311 :         int error;
    2457                 :      94311 :         struct file *file = vmf->vma->vm_file;
    2458                 :      94311 :         struct file *fpin = NULL;
    2459                 :      94311 :         struct address_space *mapping = file->f_mapping;
    2460                 :      94311 :         struct file_ra_state *ra = &file->f_ra;
    2461                 :      94311 :         struct inode *inode = mapping->host;
    2462                 :      94311 :         pgoff_t offset = vmf->pgoff;
    2463                 :      94311 :         pgoff_t max_off;
    2464                 :      94311 :         struct page *page;
    2465                 :      94311 :         vm_fault_t ret = 0;
    2466                 :            : 
    2467         [ +  - ]:      94311 :         max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
    2468         [ +  - ]:      94311 :         if (unlikely(offset >= max_off))
    2469                 :            :                 return VM_FAULT_SIGBUS;
    2470                 :            : 
    2471                 :            :         /*
    2472                 :            :          * Do we have something in the page cache already?
    2473                 :            :          */
    2474                 :      94311 :         page = find_get_page(mapping, offset);
    2475   [ +  +  +  + ]:      94311 :         if (likely(page) && !(vmf->flags & FAULT_FLAG_TRIED)) {
    2476                 :            :                 /*
    2477                 :            :                  * We found the page, so try async readahead before
    2478                 :            :                  * waiting for the lock.
    2479                 :            :                  */
    2480                 :      89487 :                 fpin = do_async_mmap_readahead(vmf, page);
    2481         [ +  + ]:       4824 :         } else if (!page) {
    2482                 :            :                 /* No page in the page cache at all */
    2483                 :       3406 :                 count_vm_event(PGMAJFAULT);
    2484                 :       3406 :                 count_memcg_event_mm(vmf->vma->vm_mm, PGMAJFAULT);
    2485                 :       3406 :                 ret = VM_FAULT_MAJOR;
    2486                 :       3406 :                 fpin = do_sync_mmap_readahead(vmf);
    2487                 :       3406 : retry_find:
    2488                 :       3406 :                 page = pagecache_get_page(mapping, offset,
    2489                 :            :                                           FGP_CREAT|FGP_FOR_MMAP,
    2490                 :            :                                           vmf->gfp_mask);
    2491         [ -  + ]:       3406 :                 if (!page) {
    2492         [ #  # ]:          0 :                         if (fpin)
    2493                 :          0 :                                 goto out_retry;
    2494                 :            :                         return vmf_error(-ENOMEM);
    2495                 :            :                 }
    2496                 :            :         }
    2497                 :            : 
    2498         [ -  + ]:      94311 :         if (!lock_page_maybe_drop_mmap(vmf, page, &fpin))
    2499                 :          0 :                 goto out_retry;
    2500                 :            : 
    2501                 :            :         /* Did it get truncated? */
    2502   [ -  +  -  + ]:      94311 :         if (unlikely(compound_head(page)->mapping != mapping)) {
    2503                 :          0 :                 unlock_page(page);
    2504                 :          0 :                 put_page(page);
    2505                 :          0 :                 goto retry_find;
    2506                 :            :         }
    2507                 :      94311 :         VM_BUG_ON_PAGE(page_to_pgoff(page) != offset, page);
    2508                 :            : 
    2509                 :            :         /*
    2510                 :            :          * We have a locked page in the page cache, now we need to check
    2511                 :            :          * that it's up-to-date. If not, it is going to be due to an error.
    2512                 :            :          */
    2513         [ -  + ]:      94311 :         if (unlikely(!PageUptodate(page)))
    2514                 :          0 :                 goto page_not_uptodate;
    2515                 :            : 
    2516                 :            :         /*
    2517                 :            :          * We've made it this far and we had to drop our mmap_sem, now is the
    2518                 :            :          * time to return to the upper layer and have it re-find the vma and
    2519                 :            :          * redo the fault.
    2520                 :            :          */
    2521         [ +  + ]:      94311 :         if (fpin) {
    2522                 :       6235 :                 unlock_page(page);
    2523                 :       6235 :                 goto out_retry;
    2524                 :            :         }
    2525                 :            : 
    2526                 :            :         /*
    2527                 :            :          * Found the page and have a reference on it.
    2528                 :            :          * We must recheck i_size under page lock.
    2529                 :            :          */
    2530         [ -  + ]:      88076 :         max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
    2531         [ -  + ]:      88076 :         if (unlikely(offset >= max_off)) {
    2532                 :          0 :                 unlock_page(page);
    2533                 :          0 :                 put_page(page);
    2534                 :          0 :                 return VM_FAULT_SIGBUS;
    2535                 :            :         }
    2536                 :            : 
    2537                 :      88076 :         vmf->page = page;
    2538                 :      88076 :         return ret | VM_FAULT_LOCKED;
    2539                 :            : 
    2540                 :            : page_not_uptodate:
    2541                 :            :         /*
    2542                 :            :          * Umm, take care of errors if the page isn't up-to-date.
    2543                 :            :          * Try to re-read it _once_. We do this synchronously,
    2544                 :            :          * because there really aren't any performance issues here
    2545                 :            :          * and we need to check for errors.
    2546                 :            :          */
    2547         [ #  # ]:          0 :         ClearPageError(page);
    2548                 :          0 :         fpin = maybe_unlock_mmap_for_io(vmf, fpin);
    2549                 :          0 :         error = mapping->a_ops->readpage(file, page);
    2550         [ #  # ]:          0 :         if (!error) {
    2551                 :          0 :                 wait_on_page_locked(page);
    2552         [ #  # ]:          0 :                 if (!PageUptodate(page))
    2553                 :          0 :                         error = -EIO;
    2554                 :            :         }
    2555         [ #  # ]:          0 :         if (fpin)
    2556                 :          0 :                 goto out_retry;
    2557                 :          0 :         put_page(page);
    2558                 :            : 
    2559         [ #  # ]:          0 :         if (!error || error == AOP_TRUNCATED_PAGE)
    2560                 :          0 :                 goto retry_find;
    2561                 :            : 
    2562                 :            :         /* Things didn't work out. Return zero to tell the mm layer so. */
    2563                 :          0 :         shrink_readahead_size_eio(file, ra);
    2564                 :          0 :         return VM_FAULT_SIGBUS;
    2565                 :            : 
    2566                 :       6235 : out_retry:
    2567                 :            :         /*
    2568                 :            :          * We dropped the mmap_sem, we need to return to the fault handler to
    2569                 :            :          * re-find the vma and come back and find our hopefully still populated
    2570                 :            :          * page.
    2571                 :            :          */
    2572         [ +  - ]:       6235 :         if (page)
    2573                 :       6235 :                 put_page(page);
    2574         [ +  - ]:       6235 :         if (fpin)
    2575                 :       6235 :                 fput(fpin);
    2576                 :       6235 :         return ret | VM_FAULT_RETRY;
    2577                 :            : }
    2578                 :            : EXPORT_SYMBOL(filemap_fault);
    2579                 :            : 
    2580                 :     661583 : void filemap_map_pages(struct vm_fault *vmf,
    2581                 :            :                 pgoff_t start_pgoff, pgoff_t end_pgoff)
    2582                 :            : {
    2583                 :     661583 :         struct file *file = vmf->vma->vm_file;
    2584                 :     661583 :         struct address_space *mapping = file->f_mapping;
    2585                 :     661583 :         pgoff_t last_pgoff = start_pgoff;
    2586                 :     661583 :         unsigned long max_idx;
    2587                 :     661583 :         XA_STATE(xas, &mapping->i_pages, start_pgoff);
    2588                 :     661583 :         struct page *page;
    2589                 :            : 
    2590                 :     661583 :         rcu_read_lock();
    2591         [ +  + ]:    7992951 :         xas_for_each(&xas, page, end_pgoff) {
    2592         [ +  - ]:    7331368 :                 if (xas_retry(&xas, page))
    2593                 :          0 :                         continue;
    2594         [ -  + ]:    7331368 :                 if (xa_is_value(page))
    2595                 :          0 :                         goto next;
    2596                 :            : 
    2597                 :            :                 /*
    2598                 :            :                  * Check for a locked page first, as a speculative
    2599                 :            :                  * reference may adversely influence page migration.
    2600                 :            :                  */
    2601   [ -  +  +  + ]:   14662730 :                 if (PageLocked(page))
    2602                 :       4200 :                         goto next;
    2603                 :    7327168 :                 if (!page_cache_get_speculative(page))
    2604                 :          0 :                         goto next;
    2605                 :            : 
    2606                 :            :                 /* Has the page moved or been split? */
    2607   [ +  +  -  + ]:   14654330 :                 if (unlikely(page != xas_reload(&xas)))
    2608                 :          0 :                         goto skip;
    2609                 :    7327168 :                 page = find_subpage(page, xas.xa_index);
    2610                 :            : 
    2611   [ +  +  +  + ]:   14654290 :                 if (!PageUptodate(page) ||
    2612                 :            :                                 PageReadahead(page) ||
    2613                 :            :                                 PageHWPoison(page))
    2614                 :      49937 :                         goto skip;
    2615   [ -  +  -  + ]:   14554470 :                 if (!trylock_page(page))
    2616                 :          0 :                         goto skip;
    2617                 :            : 
    2618   [ +  -  -  + ]:    7277231 :                 if (page->mapping != mapping || !PageUptodate(page))
    2619                 :          0 :                         goto unlock;
    2620                 :            : 
    2621         [ -  + ]:    7277231 :                 max_idx = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE);
    2622         [ -  + ]:    7277231 :                 if (page->index >= max_idx)
    2623                 :          0 :                         goto unlock;
    2624                 :            : 
    2625         [ +  + ]:    7277231 :                 if (file->f_ra.mmap_miss > 0)
    2626                 :       3338 :                         file->f_ra.mmap_miss--;
    2627                 :            : 
    2628                 :    7277231 :                 vmf->address += (xas.xa_index - last_pgoff) << PAGE_SHIFT;
    2629         [ +  + ]:    7277231 :                 if (vmf->pte)
    2630                 :    6617321 :                         vmf->pte += xas.xa_index - last_pgoff;
    2631                 :    7277231 :                 last_pgoff = xas.xa_index;
    2632         [ +  + ]:    7277231 :                 if (alloc_set_pte(vmf, NULL, page))
    2633                 :     527214 :                         goto unlock;
    2634                 :    6750017 :                 unlock_page(page);
    2635                 :    6750017 :                 goto next;
    2636                 :     527214 : unlock:
    2637                 :     527214 :                 unlock_page(page);
    2638                 :     577151 : skip:
    2639                 :     577151 :                 put_page(page);
    2640                 :    7331368 : next:
    2641                 :            :                 /* Huge page is mapped? No need to proceed. */
    2642                 :    7331368 :                 if (pmd_trans_huge(*vmf->pmd))
    2643                 :            :                         break;
    2644                 :            :         }
    2645                 :     661583 :         rcu_read_unlock();
    2646                 :     661583 : }
    2647                 :            : EXPORT_SYMBOL(filemap_map_pages);
    2648                 :            : 
    2649                 :          0 : vm_fault_t filemap_page_mkwrite(struct vm_fault *vmf)
    2650                 :            : {
    2651                 :          0 :         struct page *page = vmf->page;
    2652                 :          0 :         struct inode *inode = file_inode(vmf->vma->vm_file);
    2653                 :          0 :         vm_fault_t ret = VM_FAULT_LOCKED;
    2654                 :            : 
    2655                 :          0 :         sb_start_pagefault(inode->i_sb);
    2656                 :          0 :         file_update_time(vmf->vma->vm_file);
    2657                 :          0 :         lock_page(page);
    2658         [ #  # ]:          0 :         if (page->mapping != inode->i_mapping) {
    2659                 :          0 :                 unlock_page(page);
    2660                 :          0 :                 ret = VM_FAULT_NOPAGE;
    2661                 :          0 :                 goto out;
    2662                 :            :         }
    2663                 :            :         /*
    2664                 :            :          * We mark the page dirty already here so that when freeze is in
    2665                 :            :          * progress, we are guaranteed that writeback during freezing will
    2666                 :            :          * see the dirty page and writeprotect it again.
    2667                 :            :          */
    2668                 :          0 :         set_page_dirty(page);
    2669                 :          0 :         wait_for_stable_page(page);
    2670                 :          0 : out:
    2671                 :          0 :         sb_end_pagefault(inode->i_sb);
    2672                 :          0 :         return ret;
    2673                 :            : }
    2674                 :            : 
    2675                 :            : const struct vm_operations_struct generic_file_vm_ops = {
    2676                 :            :         .fault          = filemap_fault,
    2677                 :            :         .map_pages      = filemap_map_pages,
    2678                 :            :         .page_mkwrite   = filemap_page_mkwrite,
    2679                 :            : };
    2680                 :            : 
    2681                 :            : /* This is used for a general mmap of a disk file */
    2682                 :            : 
    2683                 :          0 : int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
    2684                 :            : {
    2685                 :          0 :         struct address_space *mapping = file->f_mapping;
    2686                 :            : 
    2687         [ #  # ]:          0 :         if (!mapping->a_ops->readpage)
    2688                 :            :                 return -ENOEXEC;
    2689   [ #  #  #  # ]:          0 :         file_accessed(file);
    2690                 :          0 :         vma->vm_ops = &generic_file_vm_ops;
    2691                 :          0 :         return 0;
    2692                 :            : }
    2693                 :            : 
    2694                 :            : /*
    2695                 :            :  * This is for filesystems which do not implement ->writepage.
    2696                 :            :  */
    2697                 :          0 : int generic_file_readonly_mmap(struct file *file, struct vm_area_struct *vma)
    2698                 :            : {
    2699         [ #  # ]:          0 :         if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE))
    2700                 :            :                 return -EINVAL;
    2701         [ #  # ]:          0 :         return generic_file_mmap(file, vma);
    2702                 :            : }
    2703                 :            : #else
    2704                 :            : vm_fault_t filemap_page_mkwrite(struct vm_fault *vmf)
    2705                 :            : {
    2706                 :            :         return VM_FAULT_SIGBUS;
    2707                 :            : }
    2708                 :            : int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
    2709                 :            : {
    2710                 :            :         return -ENOSYS;
    2711                 :            : }
    2712                 :            : int generic_file_readonly_mmap(struct file * file, struct vm_area_struct * vma)
    2713                 :            : {
    2714                 :            :         return -ENOSYS;
    2715                 :            : }
    2716                 :            : #endif /* CONFIG_MMU */
    2717                 :            : 
    2718                 :            : EXPORT_SYMBOL(filemap_page_mkwrite);
    2719                 :            : EXPORT_SYMBOL(generic_file_mmap);
    2720                 :            : EXPORT_SYMBOL(generic_file_readonly_mmap);
    2721                 :            : 
    2722                 :         26 : static struct page *wait_on_page_read(struct page *page)
    2723                 :            : {
    2724         [ +  - ]:         26 :         if (!IS_ERR(page)) {
    2725                 :         26 :                 wait_on_page_locked(page);
    2726         [ -  + ]:         26 :                 if (!PageUptodate(page)) {
    2727                 :          0 :                         put_page(page);
    2728                 :          0 :                         page = ERR_PTR(-EIO);
    2729                 :            :                 }
    2730                 :            :         }
    2731                 :         26 :         return page;
    2732                 :            : }
    2733                 :            : 
    2734                 :         52 : static struct page *do_read_cache_page(struct address_space *mapping,
    2735                 :            :                                 pgoff_t index,
    2736                 :            :                                 int (*filler)(void *, struct page *),
    2737                 :            :                                 void *data,
    2738                 :            :                                 gfp_t gfp)
    2739                 :            : {
    2740                 :         52 :         struct page *page;
    2741                 :         52 :         int err;
    2742                 :            : repeat:
    2743                 :         52 :         page = find_get_page(mapping, index);
    2744         [ +  + ]:         52 :         if (!page) {
    2745                 :         26 :                 page = __page_cache_alloc(gfp);
    2746         [ +  - ]:         26 :                 if (!page)
    2747                 :            :                         return ERR_PTR(-ENOMEM);
    2748                 :         26 :                 err = add_to_page_cache_lru(page, mapping, index, gfp);
    2749         [ -  + ]:         26 :                 if (unlikely(err)) {
    2750                 :          0 :                         put_page(page);
    2751         [ #  # ]:          0 :                         if (err == -EEXIST)
    2752                 :          0 :                                 goto repeat;
    2753                 :            :                         /* Presumably ENOMEM for xarray node */
    2754                 :          0 :                         return ERR_PTR(err);
    2755                 :            :                 }
    2756                 :            : 
    2757                 :         26 : filler:
    2758         [ -  + ]:         26 :                 if (filler)
    2759                 :          0 :                         err = filler(data, page);
    2760                 :            :                 else
    2761                 :         26 :                         err = mapping->a_ops->readpage(data, page);
    2762                 :            : 
    2763         [ -  + ]:         26 :                 if (err < 0) {
    2764                 :          0 :                         put_page(page);
    2765                 :          0 :                         return ERR_PTR(err);
    2766                 :            :                 }
    2767                 :            : 
    2768                 :         26 :                 page = wait_on_page_read(page);
    2769         [ +  - ]:         26 :                 if (IS_ERR(page))
    2770                 :            :                         return page;
    2771                 :         26 :                 goto out;
    2772                 :            :         }
    2773         [ +  - ]:         26 :         if (PageUptodate(page))
    2774                 :         26 :                 goto out;
    2775                 :            : 
    2776                 :            :         /*
    2777                 :            :          * Page is not up to date and may be locked due one of the following
    2778                 :            :          * case a: Page is being filled and the page lock is held
    2779                 :            :          * case b: Read/write error clearing the page uptodate status
    2780                 :            :          * case c: Truncation in progress (page locked)
    2781                 :            :          * case d: Reclaim in progress
    2782                 :            :          *
    2783                 :            :          * Case a, the page will be up to date when the page is unlocked.
    2784                 :            :          *    There is no need to serialise on the page lock here as the page
    2785                 :            :          *    is pinned so the lock gives no additional protection. Even if the
    2786                 :            :          *    the page is truncated, the data is still valid if PageUptodate as
    2787                 :            :          *    it's a race vs truncate race.
    2788                 :            :          * Case b, the page will not be up to date
    2789                 :            :          * Case c, the page may be truncated but in itself, the data may still
    2790                 :            :          *    be valid after IO completes as it's a read vs truncate race. The
    2791                 :            :          *    operation must restart if the page is not uptodate on unlock but
    2792                 :            :          *    otherwise serialising on page lock to stabilise the mapping gives
    2793                 :            :          *    no additional guarantees to the caller as the page lock is
    2794                 :            :          *    released before return.
    2795                 :            :          * Case d, similar to truncation. If reclaim holds the page lock, it
    2796                 :            :          *    will be a race with remove_mapping that determines if the mapping
    2797                 :            :          *    is valid on unlock but otherwise the data is valid and there is
    2798                 :            :          *    no need to serialise with page lock.
    2799                 :            :          *
    2800                 :            :          * As the page lock gives no additional guarantee, we optimistically
    2801                 :            :          * wait on the page to be unlocked and check if it's up to date and
    2802                 :            :          * use the page if it is. Otherwise, the page lock is required to
    2803                 :            :          * distinguish between the different cases. The motivation is that we
    2804                 :            :          * avoid spurious serialisations and wakeups when multiple processes
    2805                 :            :          * wait on the same page for IO to complete.
    2806                 :            :          */
    2807                 :          0 :         wait_on_page_locked(page);
    2808         [ #  # ]:          0 :         if (PageUptodate(page))
    2809                 :          0 :                 goto out;
    2810                 :            : 
    2811                 :            :         /* Distinguish between all the cases under the safety of the lock */
    2812                 :          0 :         lock_page(page);
    2813                 :            : 
    2814                 :            :         /* Case c or d, restart the operation */
    2815         [ #  # ]:          0 :         if (!page->mapping) {
    2816                 :          0 :                 unlock_page(page);
    2817                 :          0 :                 put_page(page);
    2818                 :          0 :                 goto repeat;
    2819                 :            :         }
    2820                 :            : 
    2821                 :            :         /* Someone else locked and filled the page in a very small window */
    2822         [ #  # ]:          0 :         if (PageUptodate(page)) {
    2823                 :          0 :                 unlock_page(page);
    2824                 :          0 :                 goto out;
    2825                 :            :         }
    2826                 :          0 :         goto filler;
    2827                 :            : 
    2828                 :         52 : out:
    2829                 :         52 :         mark_page_accessed(page);
    2830                 :         52 :         return page;
    2831                 :            : }
    2832                 :            : 
    2833                 :            : /**
    2834                 :            :  * read_cache_page - read into page cache, fill it if needed
    2835                 :            :  * @mapping:    the page's address_space
    2836                 :            :  * @index:      the page index
    2837                 :            :  * @filler:     function to perform the read
    2838                 :            :  * @data:       first arg to filler(data, page) function, often left as NULL
    2839                 :            :  *
    2840                 :            :  * Read into the page cache. If a page already exists, and PageUptodate() is
    2841                 :            :  * not set, try to fill the page and wait for it to become unlocked.
    2842                 :            :  *
    2843                 :            :  * If the page does not get brought uptodate, return -EIO.
    2844                 :            :  *
    2845                 :            :  * Return: up to date page on success, ERR_PTR() on failure.
    2846                 :            :  */
    2847                 :         52 : struct page *read_cache_page(struct address_space *mapping,
    2848                 :            :                                 pgoff_t index,
    2849                 :            :                                 int (*filler)(void *, struct page *),
    2850                 :            :                                 void *data)
    2851                 :            : {
    2852                 :         52 :         return do_read_cache_page(mapping, index, filler, data,
    2853                 :            :                         mapping_gfp_mask(mapping));
    2854                 :            : }
    2855                 :            : EXPORT_SYMBOL(read_cache_page);
    2856                 :            : 
    2857                 :            : /**
    2858                 :            :  * read_cache_page_gfp - read into page cache, using specified page allocation flags.
    2859                 :            :  * @mapping:    the page's address_space
    2860                 :            :  * @index:      the page index
    2861                 :            :  * @gfp:        the page allocator flags to use if allocating
    2862                 :            :  *
    2863                 :            :  * This is the same as "read_mapping_page(mapping, index, NULL)", but with
    2864                 :            :  * any new page allocations done using the specified allocation flags.
    2865                 :            :  *
    2866                 :            :  * If the page does not get brought uptodate, return -EIO.
    2867                 :            :  *
    2868                 :            :  * Return: up to date page on success, ERR_PTR() on failure.
    2869                 :            :  */
    2870                 :          0 : struct page *read_cache_page_gfp(struct address_space *mapping,
    2871                 :            :                                 pgoff_t index,
    2872                 :            :                                 gfp_t gfp)
    2873                 :            : {
    2874                 :          0 :         return do_read_cache_page(mapping, index, NULL, NULL, gfp);
    2875                 :            : }
    2876                 :            : EXPORT_SYMBOL(read_cache_page_gfp);
    2877                 :            : 
    2878                 :            : /*
    2879                 :            :  * Don't operate on ranges the page cache doesn't support, and don't exceed the
    2880                 :            :  * LFS limits.  If pos is under the limit it becomes a short access.  If it
    2881                 :            :  * exceeds the limit we return -EFBIG.
    2882                 :            :  */
    2883                 :            : static int generic_write_check_limits(struct file *file, loff_t pos,
    2884                 :            :                                       loff_t *count)
    2885                 :            : {
    2886                 :            :         struct inode *inode = file->f_mapping->host;
    2887                 :            :         loff_t max_size = inode->i_sb->s_maxbytes;
    2888                 :            :         loff_t limit = rlimit(RLIMIT_FSIZE);
    2889                 :            : 
    2890                 :            :         if (limit != RLIM_INFINITY) {
    2891                 :            :                 if (pos >= limit) {
    2892                 :            :                         send_sig(SIGXFSZ, current, 0);
    2893                 :            :                         return -EFBIG;
    2894                 :            :                 }
    2895                 :            :                 *count = min(*count, limit - pos);
    2896                 :            :         }
    2897                 :            : 
    2898                 :            :         if (!(file->f_flags & O_LARGEFILE))
    2899                 :            :                 max_size = MAX_NON_LFS;
    2900                 :            : 
    2901                 :            :         if (unlikely(pos >= max_size))
    2902                 :            :                 return -EFBIG;
    2903                 :            : 
    2904                 :            :         *count = min(*count, max_size - pos);
    2905                 :            : 
    2906                 :            :         return 0;
    2907                 :            : }
    2908                 :            : 
    2909                 :            : /*
    2910                 :            :  * Performs necessary checks before doing a write
    2911                 :            :  *
    2912                 :            :  * Can adjust writing position or amount of bytes to write.
    2913                 :            :  * Returns appropriate error code that caller should return or
    2914                 :            :  * zero in case that write should be allowed.
    2915                 :            :  */
    2916                 :       6184 : inline ssize_t generic_write_checks(struct kiocb *iocb, struct iov_iter *from)
    2917                 :            : {
    2918                 :       6184 :         struct file *file = iocb->ki_filp;
    2919                 :       6184 :         struct inode *inode = file->f_mapping->host;
    2920                 :       6184 :         loff_t count;
    2921                 :       6184 :         int ret;
    2922                 :            : 
    2923         [ +  - ]:       6184 :         if (IS_SWAPFILE(inode))
    2924                 :            :                 return -ETXTBSY;
    2925                 :            : 
    2926         [ +  - ]:       6184 :         if (!iov_iter_count(from))
    2927                 :            :                 return 0;
    2928                 :            : 
    2929                 :            :         /* FIXME: this is for backwards compatibility with 2.4 */
    2930         [ +  + ]:       6184 :         if (iocb->ki_flags & IOCB_APPEND)
    2931                 :       1114 :                 iocb->ki_pos = i_size_read(inode);
    2932                 :            : 
    2933         [ +  - ]:       6184 :         if ((iocb->ki_flags & IOCB_NOWAIT) && !(iocb->ki_flags & IOCB_DIRECT))
    2934                 :            :                 return -EINVAL;
    2935                 :            : 
    2936                 :       6184 :         count = iov_iter_count(from);
    2937                 :       6184 :         ret = generic_write_check_limits(file, iocb->ki_pos, &count);
    2938         [ -  + ]:       6184 :         if (ret)
    2939                 :          0 :                 return ret;
    2940                 :            : 
    2941         [ -  + ]:       6184 :         iov_iter_truncate(from, count);
    2942                 :       6184 :         return iov_iter_count(from);
    2943                 :            : }
    2944                 :            : EXPORT_SYMBOL(generic_write_checks);
    2945                 :            : 
    2946                 :            : /*
    2947                 :            :  * Performs necessary checks before doing a clone.
    2948                 :            :  *
    2949                 :            :  * Can adjust amount of bytes to clone via @req_count argument.
    2950                 :            :  * Returns appropriate error code that caller should return or
    2951                 :            :  * zero in case the clone should be allowed.
    2952                 :            :  */
    2953                 :          0 : int generic_remap_checks(struct file *file_in, loff_t pos_in,
    2954                 :            :                          struct file *file_out, loff_t pos_out,
    2955                 :            :                          loff_t *req_count, unsigned int remap_flags)
    2956                 :            : {
    2957                 :          0 :         struct inode *inode_in = file_in->f_mapping->host;
    2958                 :          0 :         struct inode *inode_out = file_out->f_mapping->host;
    2959                 :          0 :         uint64_t count = *req_count;
    2960                 :          0 :         uint64_t bcount;
    2961                 :          0 :         loff_t size_in, size_out;
    2962                 :          0 :         loff_t bs = inode_out->i_sb->s_blocksize;
    2963                 :          0 :         int ret;
    2964                 :            : 
    2965                 :            :         /* The start of both ranges must be aligned to an fs block. */
    2966   [ #  #  #  # ]:          0 :         if (!IS_ALIGNED(pos_in, bs) || !IS_ALIGNED(pos_out, bs))
    2967                 :            :                 return -EINVAL;
    2968                 :            : 
    2969                 :            :         /* Ensure offsets don't wrap. */
    2970   [ #  #  #  # ]:          0 :         if (pos_in + count < pos_in || pos_out + count < pos_out)
    2971                 :            :                 return -EINVAL;
    2972                 :            : 
    2973         [ #  # ]:          0 :         size_in = i_size_read(inode_in);
    2974                 :          0 :         size_out = i_size_read(inode_out);
    2975                 :            : 
    2976                 :            :         /* Dedupe requires both ranges to be within EOF. */
    2977   [ #  #  #  # ]:          0 :         if ((remap_flags & REMAP_FILE_DEDUP) &&
    2978   [ #  #  #  # ]:          0 :             (pos_in >= size_in || pos_in + count > size_in ||
    2979         [ #  # ]:          0 :              pos_out >= size_out || pos_out + count > size_out))
    2980                 :            :                 return -EINVAL;
    2981                 :            : 
    2982                 :            :         /* Ensure the infile range is within the infile. */
    2983         [ #  # ]:          0 :         if (pos_in >= size_in)
    2984                 :            :                 return -EINVAL;
    2985                 :          0 :         count = min(count, size_in - (uint64_t)pos_in);
    2986                 :            : 
    2987                 :          0 :         ret = generic_write_check_limits(file_out, pos_out, &count);
    2988         [ #  # ]:          0 :         if (ret)
    2989                 :            :                 return ret;
    2990                 :            : 
    2991                 :            :         /*
    2992                 :            :          * If the user wanted us to link to the infile's EOF, round up to the
    2993                 :            :          * next block boundary for this check.
    2994                 :            :          *
    2995                 :            :          * Otherwise, make sure the count is also block-aligned, having
    2996                 :            :          * already confirmed the starting offsets' block alignment.
    2997                 :            :          */
    2998         [ #  # ]:          0 :         if (pos_in + count == size_in) {
    2999                 :          0 :                 bcount = ALIGN(size_in, bs) - pos_in;
    3000                 :            :         } else {
    3001         [ #  # ]:          0 :                 if (!IS_ALIGNED(count, bs))
    3002                 :          0 :                         count = ALIGN_DOWN(count, bs);
    3003                 :          0 :                 bcount = count;
    3004                 :            :         }
    3005                 :            : 
    3006                 :            :         /* Don't allow overlapped cloning within the same file. */
    3007         [ #  # ]:          0 :         if (inode_in == inode_out &&
    3008         [ #  # ]:          0 :             pos_out + bcount > pos_in &&
    3009         [ #  # ]:          0 :             pos_out < pos_in + bcount)
    3010                 :            :                 return -EINVAL;
    3011                 :            : 
    3012                 :            :         /*
    3013                 :            :          * We shortened the request but the caller can't deal with that, so
    3014                 :            :          * bounce the request back to userspace.
    3015                 :            :          */
    3016   [ #  #  #  # ]:          0 :         if (*req_count != count && !(remap_flags & REMAP_FILE_CAN_SHORTEN))
    3017                 :            :                 return -EINVAL;
    3018                 :            : 
    3019                 :          0 :         *req_count = count;
    3020                 :          0 :         return 0;
    3021                 :            : }
    3022                 :            : 
    3023                 :            : 
    3024                 :            : /*
    3025                 :            :  * Performs common checks before doing a file copy/clone
    3026                 :            :  * from @file_in to @file_out.
    3027                 :            :  */
    3028                 :        156 : int generic_file_rw_checks(struct file *file_in, struct file *file_out)
    3029                 :            : {
    3030         [ +  - ]:        156 :         struct inode *inode_in = file_inode(file_in);
    3031                 :        156 :         struct inode *inode_out = file_inode(file_out);
    3032                 :            : 
    3033                 :            :         /* Don't copy dirs, pipes, sockets... */
    3034   [ +  -  +  - ]:        156 :         if (S_ISDIR(inode_in->i_mode) || S_ISDIR(inode_out->i_mode))
    3035                 :            :                 return -EISDIR;
    3036   [ +  -  +  - ]:        156 :         if (!S_ISREG(inode_in->i_mode) || !S_ISREG(inode_out->i_mode))
    3037                 :            :                 return -EINVAL;
    3038                 :            : 
    3039         [ +  - ]:        156 :         if (!(file_in->f_mode & FMODE_READ) ||
    3040         [ -  + ]:        156 :             !(file_out->f_mode & FMODE_WRITE) ||
    3041                 :            :             (file_out->f_flags & O_APPEND))
    3042                 :          0 :                 return -EBADF;
    3043                 :            : 
    3044                 :            :         return 0;
    3045                 :            : }
    3046                 :            : 
    3047                 :            : /*
    3048                 :            :  * Performs necessary checks before doing a file copy
    3049                 :            :  *
    3050                 :            :  * Can adjust amount of bytes to copy via @req_count argument.
    3051                 :            :  * Returns appropriate error code that caller should return or
    3052                 :            :  * zero in case the copy should be allowed.
    3053                 :            :  */
    3054                 :        104 : int generic_copy_file_checks(struct file *file_in, loff_t pos_in,
    3055                 :            :                              struct file *file_out, loff_t pos_out,
    3056                 :            :                              size_t *req_count, unsigned int flags)
    3057                 :            : {
    3058         [ +  - ]:        104 :         struct inode *inode_in = file_inode(file_in);
    3059                 :        104 :         struct inode *inode_out = file_inode(file_out);
    3060                 :        104 :         uint64_t count = *req_count;
    3061                 :        104 :         loff_t size_in;
    3062                 :        104 :         int ret;
    3063                 :            : 
    3064                 :        104 :         ret = generic_file_rw_checks(file_in, file_out);
    3065         [ +  - ]:        104 :         if (ret)
    3066                 :            :                 return ret;
    3067                 :            : 
    3068                 :            :         /* Don't touch certain kinds of inodes */
    3069         [ +  - ]:        104 :         if (IS_IMMUTABLE(inode_out))
    3070                 :            :                 return -EPERM;
    3071                 :            : 
    3072   [ +  -  +  - ]:        104 :         if (IS_SWAPFILE(inode_in) || IS_SWAPFILE(inode_out))
    3073                 :            :                 return -ETXTBSY;
    3074                 :            : 
    3075                 :            :         /* Ensure offsets don't wrap. */
    3076   [ +  -  +  - ]:        104 :         if (pos_in + count < pos_in || pos_out + count < pos_out)
    3077                 :            :                 return -EOVERFLOW;
    3078                 :            : 
    3079                 :            :         /* Shorten the copy to EOF */
    3080         [ +  + ]:        104 :         size_in = i_size_read(inode_in);
    3081         [ +  + ]:        104 :         if (pos_in >= size_in)
    3082                 :         52 :                 count = 0;
    3083                 :            :         else
    3084                 :         52 :                 count = min(count, size_in - (uint64_t)pos_in);
    3085                 :            : 
    3086                 :        104 :         ret = generic_write_check_limits(file_out, pos_out, &count);
    3087         [ +  - ]:        104 :         if (ret)
    3088                 :            :                 return ret;
    3089                 :            : 
    3090                 :            :         /* Don't allow overlapped copying within the same file. */
    3091         [ -  + ]:        104 :         if (inode_in == inode_out &&
    3092         [ #  # ]:          0 :             pos_out + count > pos_in &&
    3093         [ #  # ]:          0 :             pos_out < pos_in + count)
    3094                 :            :                 return -EINVAL;
    3095                 :            : 
    3096                 :        104 :         *req_count = count;
    3097                 :        104 :         return 0;
    3098                 :            : }
    3099                 :            : 
    3100                 :          0 : int pagecache_write_begin(struct file *file, struct address_space *mapping,
    3101                 :            :                                 loff_t pos, unsigned len, unsigned flags,
    3102                 :            :                                 struct page **pagep, void **fsdata)
    3103                 :            : {
    3104                 :          0 :         const struct address_space_operations *aops = mapping->a_ops;
    3105                 :            : 
    3106                 :          0 :         return aops->write_begin(file, mapping, pos, len, flags,
    3107                 :            :                                                         pagep, fsdata);
    3108                 :            : }
    3109                 :            : EXPORT_SYMBOL(pagecache_write_begin);
    3110                 :            : 
    3111                 :          0 : int pagecache_write_end(struct file *file, struct address_space *mapping,
    3112                 :            :                                 loff_t pos, unsigned len, unsigned copied,
    3113                 :            :                                 struct page *page, void *fsdata)
    3114                 :            : {
    3115                 :          0 :         const struct address_space_operations *aops = mapping->a_ops;
    3116                 :            : 
    3117                 :          0 :         return aops->write_end(file, mapping, pos, len, copied, page, fsdata);
    3118                 :            : }
    3119                 :            : EXPORT_SYMBOL(pagecache_write_end);
    3120                 :            : 
    3121                 :            : /*
    3122                 :            :  * Warn about a page cache invalidation failure during a direct I/O write.
    3123                 :            :  */
    3124                 :          0 : void dio_warn_stale_pagecache(struct file *filp)
    3125                 :            : {
    3126                 :          0 :         static DEFINE_RATELIMIT_STATE(_rs, 86400 * HZ, DEFAULT_RATELIMIT_BURST);
    3127                 :          0 :         char pathname[128];
    3128                 :          0 :         struct inode *inode = file_inode(filp);
    3129                 :          0 :         char *path;
    3130                 :            : 
    3131                 :          0 :         errseq_set(&inode->i_mapping->wb_err, -EIO);
    3132         [ #  # ]:          0 :         if (__ratelimit(&_rs)) {
    3133                 :          0 :                 path = file_path(filp, pathname, sizeof(pathname));
    3134         [ #  # ]:          0 :                 if (IS_ERR(path))
    3135                 :          0 :                         path = "(unknown)";
    3136                 :          0 :                 pr_crit("Page cache invalidation failure on direct I/O.  Possible data corruption due to collision with buffered I/O!\n");
    3137                 :          0 :                 pr_crit("File: %s PID: %d Comm: %.20s\n", path, current->pid,
    3138                 :            :                         current->comm);
    3139                 :            :         }
    3140                 :          0 : }
    3141                 :            : 
    3142                 :            : ssize_t
    3143                 :          0 : generic_file_direct_write(struct kiocb *iocb, struct iov_iter *from)
    3144                 :            : {
    3145                 :          0 :         struct file     *file = iocb->ki_filp;
    3146                 :          0 :         struct address_space *mapping = file->f_mapping;
    3147                 :          0 :         struct inode    *inode = mapping->host;
    3148                 :          0 :         loff_t          pos = iocb->ki_pos;
    3149                 :          0 :         ssize_t         written;
    3150                 :          0 :         size_t          write_len;
    3151                 :          0 :         pgoff_t         end;
    3152                 :            : 
    3153         [ #  # ]:          0 :         write_len = iov_iter_count(from);
    3154                 :          0 :         end = (pos + write_len - 1) >> PAGE_SHIFT;
    3155                 :            : 
    3156         [ #  # ]:          0 :         if (iocb->ki_flags & IOCB_NOWAIT) {
    3157                 :            :                 /* If there are pages to writeback, return */
    3158         [ #  # ]:          0 :                 if (filemap_range_has_page(inode->i_mapping, pos,
    3159                 :            :                                            pos + write_len - 1))
    3160                 :            :                         return -EAGAIN;
    3161                 :            :         } else {
    3162                 :          0 :                 written = filemap_write_and_wait_range(mapping, pos,
    3163                 :            :                                                         pos + write_len - 1);
    3164         [ #  # ]:          0 :                 if (written)
    3165                 :          0 :                         goto out;
    3166                 :            :         }
    3167                 :            : 
    3168                 :            :         /*
    3169                 :            :          * After a write we want buffered reads to be sure to go to disk to get
    3170                 :            :          * the new data.  We invalidate clean cached page from the region we're
    3171                 :            :          * about to write.  We do this *before* the write so that we can return
    3172                 :            :          * without clobbering -EIOCBQUEUED from ->direct_IO().
    3173                 :            :          */
    3174                 :          0 :         written = invalidate_inode_pages2_range(mapping,
    3175                 :          0 :                                         pos >> PAGE_SHIFT, end);
    3176                 :            :         /*
    3177                 :            :          * If a page can not be invalidated, return 0 to fall back
    3178                 :            :          * to buffered write.
    3179                 :            :          */
    3180         [ #  # ]:          0 :         if (written) {
    3181         [ #  # ]:          0 :                 if (written == -EBUSY)
    3182                 :            :                         return 0;
    3183                 :          0 :                 goto out;
    3184                 :            :         }
    3185                 :            : 
    3186                 :          0 :         written = mapping->a_ops->direct_IO(iocb, from);
    3187                 :            : 
    3188                 :            :         /*
    3189                 :            :          * Finally, try again to invalidate clean pages which might have been
    3190                 :            :          * cached by non-direct readahead, or faulted in by get_user_pages()
    3191                 :            :          * if the source of the write was an mmap'ed region of the file
    3192                 :            :          * we're writing.  Either one is a pretty crazy thing to do,
    3193                 :            :          * so we don't support it 100%.  If this invalidation
    3194                 :            :          * fails, tough, the write still worked...
    3195                 :            :          *
    3196                 :            :          * Most of the time we do not need this since dio_complete() will do
    3197                 :            :          * the invalidation for us. However there are some file systems that
    3198                 :            :          * do not end up with dio_complete() being called, so let's not break
    3199                 :            :          * them by removing it completely.
    3200                 :            :          *
    3201                 :            :          * Noticeable example is a blkdev_direct_IO().
    3202                 :            :          *
    3203                 :            :          * Skip invalidation for async writes or if mapping has no pages.
    3204                 :            :          */
    3205   [ #  #  #  #  :          0 :         if (written > 0 && mapping->nrpages &&
                   #  # ]
    3206                 :          0 :             invalidate_inode_pages2_range(mapping, pos >> PAGE_SHIFT, end))
    3207                 :          0 :                 dio_warn_stale_pagecache(file);
    3208                 :            : 
    3209         [ #  # ]:          0 :         if (written > 0) {
    3210                 :          0 :                 pos += written;
    3211                 :          0 :                 write_len -= written;
    3212   [ #  #  #  # ]:          0 :                 if (pos > i_size_read(inode) && !S_ISBLK(inode->i_mode)) {
    3213                 :          0 :                         i_size_write(inode, pos);
    3214                 :          0 :                         mark_inode_dirty(inode);
    3215                 :            :                 }
    3216                 :          0 :                 iocb->ki_pos = pos;
    3217                 :            :         }
    3218                 :          0 :         iov_iter_revert(from, write_len - iov_iter_count(from));
    3219                 :            : out:
    3220                 :            :         return written;
    3221                 :            : }
    3222                 :            : EXPORT_SYMBOL(generic_file_direct_write);
    3223                 :            : 
    3224                 :            : /*
    3225                 :            :  * Find or create a page at the given pagecache position. Return the locked
    3226                 :            :  * page. This function is specifically for buffered writes.
    3227                 :            :  */
    3228                 :       5853 : struct page *grab_cache_page_write_begin(struct address_space *mapping,
    3229                 :            :                                         pgoff_t index, unsigned flags)
    3230                 :            : {
    3231                 :       5853 :         struct page *page;
    3232                 :       5853 :         int fgp_flags = FGP_LOCK|FGP_WRITE|FGP_CREAT;
    3233                 :            : 
    3234         [ -  + ]:       5853 :         if (flags & AOP_FLAG_NOFS)
    3235                 :          0 :                 fgp_flags |= FGP_NOFS;
    3236                 :            : 
    3237                 :       5853 :         page = pagecache_get_page(mapping, index, fgp_flags,
    3238                 :            :                         mapping_gfp_mask(mapping));
    3239         [ +  - ]:       5853 :         if (page)
    3240                 :       5853 :                 wait_for_stable_page(page);
    3241                 :            : 
    3242                 :       5853 :         return page;
    3243                 :            : }
    3244                 :            : EXPORT_SYMBOL(grab_cache_page_write_begin);
    3245                 :            : 
    3246                 :       6184 : ssize_t generic_perform_write(struct file *file,
    3247                 :            :                                 struct iov_iter *i, loff_t pos)
    3248                 :            : {
    3249                 :       6184 :         struct address_space *mapping = file->f_mapping;
    3250                 :       6184 :         const struct address_space_operations *a_ops = mapping->a_ops;
    3251                 :       6184 :         long status = 0;
    3252                 :       6184 :         ssize_t written = 0;
    3253                 :       6184 :         unsigned int flags = 0;
    3254                 :            : 
    3255                 :       7296 :         do {
    3256                 :       7296 :                 struct page *page;
    3257                 :       7296 :                 unsigned long offset;   /* Offset into pagecache page */
    3258                 :       7296 :                 unsigned long bytes;    /* Bytes to write to page */
    3259                 :       7296 :                 size_t copied;          /* Bytes copied from user */
    3260                 :       7296 :                 void *fsdata;
    3261                 :            : 
    3262                 :       7296 :                 offset = (pos & (PAGE_SIZE - 1));
    3263                 :       7296 :                 bytes = min_t(unsigned long, PAGE_SIZE - offset,
    3264                 :            :                                                 iov_iter_count(i));
    3265                 :            : 
    3266                 :       7296 : again:
    3267                 :            :                 /*
    3268                 :            :                  * Bring in the user page that we will copy from _first_.
    3269                 :            :                  * Otherwise there's a nasty deadlock on copying from the
    3270                 :            :                  * same page as we're writing to, without it being marked
    3271                 :            :                  * up-to-date.
    3272                 :            :                  *
    3273                 :            :                  * Not only is this an optimisation, but it is also required
    3274                 :            :                  * to check that the address is actually valid, when atomic
    3275                 :            :                  * usercopies are used, below.
    3276                 :            :                  */
    3277         [ +  - ]:       7296 :                 if (unlikely(iov_iter_fault_in_readable(i, bytes))) {
    3278                 :            :                         status = -EFAULT;
    3279                 :          0 :                         break;
    3280                 :            :                 }
    3281                 :            : 
    3282         [ +  - ]:       7296 :                 if (fatal_signal_pending(current)) {
    3283                 :            :                         status = -EINTR;
    3284                 :            :                         break;
    3285                 :            :                 }
    3286                 :            : 
    3287                 :       7296 :                 status = a_ops->write_begin(file, mapping, pos, bytes, flags,
    3288                 :            :                                                 &page, &fsdata);
    3289         [ +  - ]:       7296 :                 if (unlikely(status < 0))
    3290                 :            :                         break;
    3291                 :            : 
    3292                 :       7296 :                 if (mapping_writably_mapped(mapping))
    3293                 :            :                         flush_dcache_page(page);
    3294                 :            : 
    3295                 :       7296 :                 copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes);
    3296                 :       7296 :                 flush_dcache_page(page);
    3297                 :            : 
    3298                 :       7296 :                 status = a_ops->write_end(file, mapping, pos, bytes, copied,
    3299                 :            :                                                 page, fsdata);
    3300         [ +  - ]:       7296 :                 if (unlikely(status < 0))
    3301                 :            :                         break;
    3302                 :       7296 :                 copied = status;
    3303                 :            : 
    3304                 :       7296 :                 cond_resched();
    3305                 :            : 
    3306                 :       7296 :                 iov_iter_advance(i, copied);
    3307         [ -  + ]:       7296 :                 if (unlikely(copied == 0)) {
    3308                 :            :                         /*
    3309                 :            :                          * If we were unable to copy any data at all, we must
    3310                 :            :                          * fall back to a single segment length write.
    3311                 :            :                          *
    3312                 :            :                          * If we didn't fallback here, we could livelock
    3313                 :            :                          * because not all segments in the iov can be copied at
    3314                 :            :                          * once without a pagefault.
    3315                 :            :                          */
    3316                 :          0 :                         bytes = min_t(unsigned long, PAGE_SIZE - offset,
    3317                 :            :                                                 iov_iter_single_seg_count(i));
    3318                 :          0 :                         goto again;
    3319                 :            :                 }
    3320                 :       7296 :                 pos += copied;
    3321                 :       7296 :                 written += copied;
    3322                 :            : 
    3323                 :       7296 :                 balance_dirty_pages_ratelimited(mapping);
    3324         [ +  + ]:       7296 :         } while (iov_iter_count(i));
    3325                 :            : 
    3326         [ -  + ]:       6184 :         return written ? written : status;
    3327                 :            : }
    3328                 :            : EXPORT_SYMBOL(generic_perform_write);
    3329                 :            : 
    3330                 :            : /**
    3331                 :            :  * __generic_file_write_iter - write data to a file
    3332                 :            :  * @iocb:       IO state structure (file, offset, etc.)
    3333                 :            :  * @from:       iov_iter with data to write
    3334                 :            :  *
    3335                 :            :  * This function does all the work needed for actually writing data to a
    3336                 :            :  * file. It does all basic checks, removes SUID from the file, updates
    3337                 :            :  * modification times and calls proper subroutines depending on whether we
    3338                 :            :  * do direct IO or a standard buffered write.
    3339                 :            :  *
    3340                 :            :  * It expects i_mutex to be grabbed unless we work on a block device or similar
    3341                 :            :  * object which does not need locking at all.
    3342                 :            :  *
    3343                 :            :  * This function does *not* take care of syncing data in case of O_SYNC write.
    3344                 :            :  * A caller has to handle it. This is mainly due to the fact that we want to
    3345                 :            :  * avoid syncing under i_mutex.
    3346                 :            :  *
    3347                 :            :  * Return:
    3348                 :            :  * * number of bytes written, even for truncated writes
    3349                 :            :  * * negative error code if no data has been written at all
    3350                 :            :  */
    3351                 :       1443 : ssize_t __generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
    3352                 :            : {
    3353                 :       1443 :         struct file *file = iocb->ki_filp;
    3354                 :       1443 :         struct address_space * mapping = file->f_mapping;
    3355                 :       1443 :         struct inode    *inode = mapping->host;
    3356                 :       1443 :         ssize_t         written = 0;
    3357                 :       1443 :         ssize_t         err;
    3358                 :       1443 :         ssize_t         status;
    3359                 :            : 
    3360                 :            :         /* We can write back this queue in page reclaim */
    3361                 :       1443 :         current->backing_dev_info = inode_to_bdi(inode);
    3362                 :       1443 :         err = file_remove_privs(file);
    3363         [ -  + ]:       1443 :         if (err)
    3364                 :          0 :                 goto out;
    3365                 :            : 
    3366                 :       1443 :         err = file_update_time(file);
    3367         [ -  + ]:       1443 :         if (err)
    3368                 :          0 :                 goto out;
    3369                 :            : 
    3370         [ -  + ]:       1443 :         if (iocb->ki_flags & IOCB_DIRECT) {
    3371                 :          0 :                 loff_t pos, endbyte;
    3372                 :            : 
    3373                 :          0 :                 written = generic_file_direct_write(iocb, from);
    3374                 :            :                 /*
    3375                 :            :                  * If the write stopped short of completing, fall back to
    3376                 :            :                  * buffered writes.  Some filesystems do this for writes to
    3377                 :            :                  * holes, for example.  For DAX files, a buffered write will
    3378                 :            :                  * not succeed (even if it did, DAX does not handle dirty
    3379                 :            :                  * page-cache pages correctly).
    3380                 :            :                  */
    3381   [ #  #  #  # ]:          0 :                 if (written < 0 || !iov_iter_count(from) || IS_DAX(inode))
    3382                 :          0 :                         goto out;
    3383                 :            : 
    3384                 :          0 :                 status = generic_perform_write(file, from, pos = iocb->ki_pos);
    3385                 :            :                 /*
    3386                 :            :                  * If generic_perform_write() returned a synchronous error
    3387                 :            :                  * then we want to return the number of bytes which were
    3388                 :            :                  * direct-written, or the error code if that was zero.  Note
    3389                 :            :                  * that this differs from normal direct-io semantics, which
    3390                 :            :                  * will return -EFOO even if some bytes were written.
    3391                 :            :                  */
    3392         [ #  # ]:          0 :                 if (unlikely(status < 0)) {
    3393                 :          0 :                         err = status;
    3394                 :          0 :                         goto out;
    3395                 :            :                 }
    3396                 :            :                 /*
    3397                 :            :                  * We need to ensure that the page cache pages are written to
    3398                 :            :                  * disk and invalidated to preserve the expected O_DIRECT
    3399                 :            :                  * semantics.
    3400                 :            :                  */
    3401                 :          0 :                 endbyte = pos + status - 1;
    3402                 :          0 :                 err = filemap_write_and_wait_range(mapping, pos, endbyte);
    3403         [ #  # ]:          0 :                 if (err == 0) {
    3404                 :          0 :                         iocb->ki_pos = endbyte + 1;
    3405                 :          0 :                         written += status;
    3406                 :          0 :                         invalidate_mapping_pages(mapping,
    3407                 :          0 :                                                  pos >> PAGE_SHIFT,
    3408                 :          0 :                                                  endbyte >> PAGE_SHIFT);
    3409                 :            :                 } else {
    3410                 :            :                         /*
    3411                 :            :                          * We don't know how much we wrote, so just return
    3412                 :            :                          * the number of bytes which were direct-written
    3413                 :            :                          */
    3414                 :            :                 }
    3415                 :            :         } else {
    3416                 :       1443 :                 written = generic_perform_write(file, from, iocb->ki_pos);
    3417         [ -  + ]:       1443 :                 if (likely(written > 0))
    3418                 :       1443 :                         iocb->ki_pos += written;
    3419                 :            :         }
    3420                 :          0 : out:
    3421         [ -  + ]:       1443 :         current->backing_dev_info = NULL;
    3422         [ -  + ]:       1443 :         return written ? written : err;
    3423                 :            : }
    3424                 :            : EXPORT_SYMBOL(__generic_file_write_iter);
    3425                 :            : 
    3426                 :            : /**
    3427                 :            :  * generic_file_write_iter - write data to a file
    3428                 :            :  * @iocb:       IO state structure
    3429                 :            :  * @from:       iov_iter with data to write
    3430                 :            :  *
    3431                 :            :  * This is a wrapper around __generic_file_write_iter() to be used by most
    3432                 :            :  * filesystems. It takes care of syncing the file in case of O_SYNC file
    3433                 :            :  * and acquires i_mutex as needed.
    3434                 :            :  * Return:
    3435                 :            :  * * negative error code if no data has been written at all of
    3436                 :            :  *   vfs_fsync_range() failed for a synchronous write
    3437                 :            :  * * number of bytes written, even for truncated writes
    3438                 :            :  */
    3439                 :       1443 : ssize_t generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
    3440                 :            : {
    3441                 :       1443 :         struct file *file = iocb->ki_filp;
    3442                 :       1443 :         struct inode *inode = file->f_mapping->host;
    3443                 :       1443 :         ssize_t ret;
    3444                 :            : 
    3445                 :       1443 :         inode_lock(inode);
    3446                 :       1443 :         ret = generic_write_checks(iocb, from);
    3447         [ +  - ]:       1443 :         if (ret > 0)
    3448                 :       1443 :                 ret = __generic_file_write_iter(iocb, from);
    3449                 :       1443 :         inode_unlock(inode);
    3450                 :            : 
    3451         [ +  - ]:       1443 :         if (ret > 0)
    3452                 :       1443 :                 ret = generic_write_sync(iocb, ret);
    3453                 :       1443 :         return ret;
    3454                 :            : }
    3455                 :            : EXPORT_SYMBOL(generic_file_write_iter);
    3456                 :            : 
    3457                 :            : /**
    3458                 :            :  * try_to_release_page() - release old fs-specific metadata on a page
    3459                 :            :  *
    3460                 :            :  * @page: the page which the kernel is trying to free
    3461                 :            :  * @gfp_mask: memory allocation flags (and I/O mode)
    3462                 :            :  *
    3463                 :            :  * The address_space is to try to release any data against the page
    3464                 :            :  * (presumably at page->private).
    3465                 :            :  *
    3466                 :            :  * This may also be called if PG_fscache is set on a page, indicating that the
    3467                 :            :  * page is known to the local caching routines.
    3468                 :            :  *
    3469                 :            :  * The @gfp_mask argument specifies whether I/O may be performed to release
    3470                 :            :  * this page (__GFP_IO), and whether the call may block (__GFP_RECLAIM & __GFP_FS).
    3471                 :            :  *
    3472                 :            :  * Return: %1 if the release was successful, otherwise return zero.
    3473                 :            :  */
    3474                 :         78 : int try_to_release_page(struct page *page, gfp_t gfp_mask)
    3475                 :            : {
    3476                 :         78 :         struct address_space * const mapping = page->mapping;
    3477                 :            : 
    3478   [ -  +  -  + ]:        156 :         BUG_ON(!PageLocked(page));
    3479   [ -  +  +  - ]:        156 :         if (PageWriteback(page))
    3480                 :            :                 return 0;
    3481                 :            : 
    3482   [ +  -  +  - ]:         78 :         if (mapping && mapping->a_ops->releasepage)
    3483                 :         78 :                 return mapping->a_ops->releasepage(page, gfp_mask);
    3484                 :          0 :         return try_to_free_buffers(page);
    3485                 :            : }
    3486                 :            : 
    3487                 :            : EXPORT_SYMBOL(try_to_release_page);

Generated by: LCOV version 1.14