Branch data Line data Source code
1 : : // SPDX-License-Identifier: GPL-2.0-only
2 : : /*
3 : : * mm/truncate.c - code for taking down pages from address_spaces
4 : : *
5 : : * Copyright (C) 2002, Linus Torvalds
6 : : *
7 : : * 10Sep2002 Andrew Morton
8 : : * Initial version.
9 : : */
10 : :
11 : : #include <linux/kernel.h>
12 : : #include <linux/backing-dev.h>
13 : : #include <linux/dax.h>
14 : : #include <linux/gfp.h>
15 : : #include <linux/mm.h>
16 : : #include <linux/swap.h>
17 : : #include <linux/export.h>
18 : : #include <linux/pagemap.h>
19 : : #include <linux/highmem.h>
20 : : #include <linux/pagevec.h>
21 : : #include <linux/task_io_accounting_ops.h>
22 : : #include <linux/buffer_head.h> /* grr. try_to_release_page,
23 : : do_invalidatepage */
24 : : #include <linux/shmem_fs.h>
25 : : #include <linux/cleancache.h>
26 : : #include <linux/rmap.h>
27 : : #include "internal.h"
28 : :
29 : : /*
30 : : * Regular page slots are stabilized by the page lock even without the tree
31 : : * itself locked. These unlocked entries need verification under the tree
32 : : * lock.
33 : : */
34 : 0 : static inline void __clear_shadow_entry(struct address_space *mapping,
35 : : pgoff_t index, void *entry)
36 : : {
37 : 0 : XA_STATE(xas, &mapping->i_pages, index);
38 : :
39 : : xas_set_update(&xas, workingset_update_node);
40 [ # # ]: 0 : if (xas_load(&xas) != entry)
41 : 0 : return;
42 : 0 : xas_store(&xas, NULL);
43 : 0 : mapping->nrexceptional--;
44 : : }
45 : :
46 : 0 : static void clear_shadow_entry(struct address_space *mapping, pgoff_t index,
47 : : void *entry)
48 : : {
49 : : xa_lock_irq(&mapping->i_pages);
50 : 0 : __clear_shadow_entry(mapping, index, entry);
51 : : xa_unlock_irq(&mapping->i_pages);
52 : 0 : }
53 : :
54 : : /*
55 : : * Unconditionally remove exceptional entries. Usually called from truncate
56 : : * path. Note that the pagevec may be altered by this function by removing
57 : : * exceptional entries similar to what pagevec_remove_exceptionals does.
58 : : */
59 : 86648 : static void truncate_exceptional_pvec_entries(struct address_space *mapping,
60 : : struct pagevec *pvec, pgoff_t *indices,
61 : : pgoff_t end)
62 : : {
63 : : int i, j;
64 : : bool dax, lock;
65 : :
66 : : /* Handled by shmem itself */
67 [ + - ]: 86648 : if (shmem_mapping(mapping))
68 : : return;
69 : :
70 [ + + ]: 2361704 : for (j = 0; j < pagevec_count(pvec); j++)
71 [ + - ]: 2275056 : if (xa_is_value(pvec->pages[j]))
72 : : break;
73 : :
74 [ - + ]: 173296 : if (j == pagevec_count(pvec))
75 : : return;
76 : :
77 : : dax = dax_mapping(mapping);
78 [ # # ]: 0 : lock = !dax && indices[j] < end;
79 [ # # ]: 0 : if (lock)
80 : : xa_lock_irq(&mapping->i_pages);
81 : :
82 [ # # ]: 0 : for (i = j; i < pagevec_count(pvec); i++) {
83 : 0 : struct page *page = pvec->pages[i];
84 : 0 : pgoff_t index = indices[i];
85 : :
86 [ # # ]: 0 : if (!xa_is_value(page)) {
87 : 0 : pvec->pages[j++] = page;
88 : 0 : continue;
89 : : }
90 : :
91 [ # # ]: 0 : if (index >= end)
92 : 0 : continue;
93 : :
94 : : if (unlikely(dax)) {
95 : : dax_delete_mapping_entry(mapping, index);
96 : : continue;
97 : : }
98 : :
99 : 0 : __clear_shadow_entry(mapping, index, page);
100 : : }
101 : :
102 [ # # ]: 0 : if (lock)
103 : : xa_unlock_irq(&mapping->i_pages);
104 : 0 : pvec->nr = j;
105 : : }
106 : :
107 : : /*
108 : : * Invalidate exceptional entry if easily possible. This handles exceptional
109 : : * entries for invalidate_inode_pages().
110 : : */
111 : 0 : static int invalidate_exceptional_entry(struct address_space *mapping,
112 : : pgoff_t index, void *entry)
113 : : {
114 : : /* Handled by shmem itself, or for DAX we do nothing. */
115 [ # # ]: 0 : if (shmem_mapping(mapping) || dax_mapping(mapping))
116 : : return 1;
117 : 0 : clear_shadow_entry(mapping, index, entry);
118 : 0 : return 1;
119 : : }
120 : :
121 : : /*
122 : : * Invalidate exceptional entry if clean. This handles exceptional entries for
123 : : * invalidate_inode_pages2() so for DAX it evicts only clean entries.
124 : : */
125 : 0 : static int invalidate_exceptional_entry2(struct address_space *mapping,
126 : : pgoff_t index, void *entry)
127 : : {
128 : : /* Handled by shmem itself */
129 [ # # ]: 0 : if (shmem_mapping(mapping))
130 : : return 1;
131 : : if (dax_mapping(mapping))
132 : : return dax_invalidate_mapping_entry_sync(mapping, index);
133 : 0 : clear_shadow_entry(mapping, index, entry);
134 : 0 : return 1;
135 : : }
136 : :
137 : : /**
138 : : * do_invalidatepage - invalidate part or all of a page
139 : : * @page: the page which is affected
140 : : * @offset: start of the range to invalidate
141 : : * @length: length of the range to invalidate
142 : : *
143 : : * do_invalidatepage() is called when all or part of the page has become
144 : : * invalidated by a truncate operation.
145 : : *
146 : : * do_invalidatepage() does not have to release all buffers, but it must
147 : : * ensure that no dirty buffer is left outside @offset and that no I/O
148 : : * is underway against any of the blocks which are outside the truncation
149 : : * point. Because the caller is about to free (and possibly reuse) those
150 : : * blocks on-disk.
151 : : */
152 : 0 : void do_invalidatepage(struct page *page, unsigned int offset,
153 : : unsigned int length)
154 : : {
155 : : void (*invalidatepage)(struct page *, unsigned int, unsigned int);
156 : :
157 : 9914 : invalidatepage = page->mapping->a_ops->invalidatepage;
158 : : #ifdef CONFIG_BLOCK
159 [ # # # # : 9914 : if (!invalidatepage)
+ + # # ]
160 : : invalidatepage = block_invalidatepage;
161 : : #endif
162 [ # # # # : 9914 : if (invalidatepage)
+ - # # ]
163 : 9914 : (*invalidatepage)(page, offset, length);
164 : 0 : }
165 : :
166 : : /*
167 : : * If truncate cannot remove the fs-private metadata from the page, the page
168 : : * becomes orphaned. It will be left on the LRU and may even be mapped into
169 : : * user pagetables if we're racing with filemap_fault().
170 : : *
171 : : * We need to bale out if page->mapping is no longer equal to the original
172 : : * mapping. This happens a) when the VM reclaimed the page while we waited on
173 : : * its lock, b) when a concurrent invalidate_mapping_pages got there first and
174 : : * c) when tmpfs swizzles a page between a tmpfs inode and swapper_space.
175 : : */
176 : : static void
177 : 1171222 : truncate_cleanup_page(struct address_space *mapping, struct page *page)
178 : : {
179 [ - + ]: 1171222 : if (page_mapped(page)) {
180 : : pgoff_t nr = PageTransHuge(page) ? HPAGE_PMD_NR : 1;
181 : 0 : unmap_mapping_pages(mapping, page->index, nr, false);
182 : : }
183 : :
184 [ + + ]: 1171222 : if (page_has_private(page))
185 : : do_invalidatepage(page, 0, PAGE_SIZE);
186 : :
187 : : /*
188 : : * Some filesystems seem to re-dirty the page even after
189 : : * the VM has canceled the dirty bit (eg ext3 journaling).
190 : : * Hence dirty accounting check is placed after invalidation.
191 : : */
192 : 1171222 : cancel_dirty_page(page);
193 : : ClearPageMappedToDisk(page);
194 : 1171222 : }
195 : :
196 : : /*
197 : : * This is for invalidate_mapping_pages(). That function can be called at
198 : : * any time, and is not supposed to throw away dirty pages. But pages can
199 : : * be marked dirty at any time too, so use remove_mapping which safely
200 : : * discards clean, unused pages.
201 : : *
202 : : * Returns non-zero if the page was successfully invalidated.
203 : : */
204 : : static int
205 : 0 : invalidate_complete_page(struct address_space *mapping, struct page *page)
206 : : {
207 : : int ret;
208 : :
209 [ # # ]: 0 : if (page->mapping != mapping)
210 : : return 0;
211 : :
212 [ # # # # ]: 0 : if (page_has_private(page) && !try_to_release_page(page, 0))
213 : : return 0;
214 : :
215 : 0 : ret = remove_mapping(mapping, page);
216 : :
217 : 0 : return ret;
218 : : }
219 : :
220 : 33694 : int truncate_inode_page(struct address_space *mapping, struct page *page)
221 : : {
222 : : VM_BUG_ON_PAGE(PageTail(page), page);
223 : :
224 [ + - ]: 33694 : if (page->mapping != mapping)
225 : : return -EIO;
226 : :
227 : 33694 : truncate_cleanup_page(mapping, page);
228 : 33694 : delete_from_page_cache(page);
229 : 33694 : return 0;
230 : : }
231 : :
232 : : /*
233 : : * Used to get rid of pages on hardware memory corruption.
234 : : */
235 : 0 : int generic_error_remove_page(struct address_space *mapping, struct page *page)
236 : : {
237 [ # # ]: 0 : if (!mapping)
238 : : return -EINVAL;
239 : : /*
240 : : * Only punch for normal data pages for now.
241 : : * Handling other types like directories would need more auditing.
242 : : */
243 [ # # ]: 0 : if (!S_ISREG(mapping->host->i_mode))
244 : : return -EIO;
245 : 0 : return truncate_inode_page(mapping, page);
246 : : }
247 : : EXPORT_SYMBOL(generic_error_remove_page);
248 : :
249 : : /*
250 : : * Safely invalidate one page from its pagecache mapping.
251 : : * It only drops clean, unused pages. The page must be locked.
252 : : *
253 : : * Returns 1 if the page is successfully invalidated, otherwise 0.
254 : : */
255 : 0 : int invalidate_inode_page(struct page *page)
256 : : {
257 : 0 : struct address_space *mapping = page_mapping(page);
258 [ # # ]: 0 : if (!mapping)
259 : : return 0;
260 [ # # # # ]: 0 : if (PageDirty(page) || PageWriteback(page))
261 : : return 0;
262 [ # # ]: 0 : if (page_mapped(page))
263 : : return 0;
264 : 0 : return invalidate_complete_page(mapping, page);
265 : : }
266 : :
267 : : /**
268 : : * truncate_inode_pages_range - truncate range of pages specified by start & end byte offsets
269 : : * @mapping: mapping to truncate
270 : : * @lstart: offset from which to truncate
271 : : * @lend: offset to which to truncate (inclusive)
272 : : *
273 : : * Truncate the page cache, removing the pages that are between
274 : : * specified offsets (and zeroing out partial pages
275 : : * if lstart or lend + 1 is not page aligned).
276 : : *
277 : : * Truncate takes two passes - the first pass is nonblocking. It will not
278 : : * block on page locks and it will not block on writeback. The second pass
279 : : * will wait. This is to prevent as much IO as possible in the affected region.
280 : : * The first pass will remove most pages, so the search cost of the second pass
281 : : * is low.
282 : : *
283 : : * We pass down the cache-hot hint to the page freeing code. Even if the
284 : : * mapping is large, it is probably the case that the final pages are the most
285 : : * recently touched, and freeing happens in ascending file offset order.
286 : : *
287 : : * Note that since ->invalidatepage() accepts range to invalidate
288 : : * truncate_inode_pages_range is able to handle cases where lend + 1 is not
289 : : * page aligned properly.
290 : : */
291 : 3036962 : void truncate_inode_pages_range(struct address_space *mapping,
292 : : loff_t lstart, loff_t lend)
293 : : {
294 : : pgoff_t start; /* inclusive */
295 : : pgoff_t end; /* exclusive */
296 : : unsigned int partial_start; /* inclusive */
297 : : unsigned int partial_end; /* exclusive */
298 : : struct pagevec pvec;
299 : : pgoff_t indices[PAGEVEC_SIZE];
300 : : pgoff_t index;
301 : : int i;
302 : :
303 [ + + - + ]: 3036962 : if (mapping->nrpages == 0 && mapping->nrexceptional == 0)
304 : : goto out;
305 : :
306 : : /* Offsets within partial pages */
307 : 18776 : partial_start = lstart & (PAGE_SIZE - 1);
308 : 18776 : partial_end = (lend + 1) & (PAGE_SIZE - 1);
309 : :
310 : : /*
311 : : * 'start' and 'end' always covers the range of pages to be fully
312 : : * truncated. Partial pages are covered with 'partial_start' at the
313 : : * start of the range and 'partial_end' at the end of the range.
314 : : * Note that 'end' is exclusive while 'lend' is inclusive.
315 : : */
316 : 18776 : start = (lstart + PAGE_SIZE - 1) >> PAGE_SHIFT;
317 [ - + ]: 18776 : if (lend == -1)
318 : : /*
319 : : * lend == -1 indicates end-of-file so we have to set 'end'
320 : : * to the highest possible pgoff_t and since the type is
321 : : * unsigned we're using -1.
322 : : */
323 : : end = -1;
324 : : else
325 : 0 : end = (lend + 1) >> PAGE_SHIFT;
326 : :
327 : : pagevec_init(&pvec);
328 : : index = start;
329 [ + - + + ]: 229624 : while (index < end && pagevec_lookup_entries(&pvec, mapping, index,
330 : 105424 : min(end - index, (pgoff_t)PAGEVEC_SIZE),
331 : : indices)) {
332 : : /*
333 : : * Pagevec array has exceptional entries and we may also fail
334 : : * to lock some pages. So we store pages that can be deleted
335 : : * in a new pagevec.
336 : : */
337 : : struct pagevec locked_pvec;
338 : :
339 : : pagevec_init(&locked_pvec);
340 [ + + ]: 2448352 : for (i = 0; i < pagevec_count(&pvec); i++) {
341 : 1137528 : struct page *page = pvec.pages[i];
342 : :
343 : : /* We rely upon deletion not changing page->index */
344 : 1137528 : index = indices[i];
345 [ + - ]: 1137528 : if (index >= end)
346 : : break;
347 : :
348 [ - + ]: 1137528 : if (xa_is_value(page))
349 : 0 : continue;
350 : :
351 [ - + ]: 1137528 : if (!trylock_page(page))
352 : 0 : continue;
353 [ - + ]: 1137528 : WARN_ON(page_to_index(page) != index);
354 [ - + ]: 1137528 : if (PageWriteback(page)) {
355 : 0 : unlock_page(page);
356 : 0 : continue;
357 : : }
358 [ - + ]: 1137528 : if (page->mapping != mapping) {
359 : 0 : unlock_page(page);
360 : 0 : continue;
361 : : }
362 : : pagevec_add(&locked_pvec, page);
363 : : }
364 [ + + ]: 2448352 : for (i = 0; i < pagevec_count(&locked_pvec); i++)
365 : 1137528 : truncate_cleanup_page(mapping, locked_pvec.pages[i]);
366 : 86648 : delete_from_page_cache_batch(mapping, &locked_pvec);
367 [ + + ]: 2448352 : for (i = 0; i < pagevec_count(&locked_pvec); i++)
368 : 1137528 : unlock_page(locked_pvec.pages[i]);
369 : 86648 : truncate_exceptional_pvec_entries(mapping, &pvec, indices, end);
370 : : pagevec_release(&pvec);
371 : 86648 : cond_resched();
372 : 86648 : index++;
373 : : }
374 [ - + ]: 18776 : if (partial_start) {
375 : 0 : struct page *page = find_lock_page(mapping, start - 1);
376 [ # # ]: 0 : if (page) {
377 : : unsigned int top = PAGE_SIZE;
378 [ # # ]: 0 : if (start > end) {
379 : : /* Truncation within a single page */
380 : : top = partial_end;
381 : : partial_end = 0;
382 : : }
383 : 0 : wait_on_page_writeback(page);
384 : : zero_user_segment(page, partial_start, top);
385 : : cleancache_invalidate_page(mapping, page);
386 [ # # ]: 0 : if (page_has_private(page))
387 : 0 : do_invalidatepage(page, partial_start,
388 : : top - partial_start);
389 : 0 : unlock_page(page);
390 : 0 : put_page(page);
391 : : }
392 : : }
393 [ - + ]: 18768 : if (partial_end) {
394 : : struct page *page = find_lock_page(mapping, end);
395 [ # # ]: 0 : if (page) {
396 : 0 : wait_on_page_writeback(page);
397 : : zero_user_segment(page, 0, partial_end);
398 : : cleancache_invalidate_page(mapping, page);
399 [ # # ]: 0 : if (page_has_private(page))
400 : : do_invalidatepage(page, 0,
401 : : partial_end);
402 : 0 : unlock_page(page);
403 : 0 : put_page(page);
404 : : }
405 : : }
406 : : /*
407 : : * If the truncation happened within a single page no pages
408 : : * will be released, just zeroed, so we can bail out now.
409 : : */
410 [ + + ]: 18768 : if (start >= end)
411 : : goto out;
412 : :
413 : : index = start;
414 : : for ( ; ; ) {
415 : 18776 : cond_resched();
416 [ + - ]: 18776 : if (!pagevec_lookup_entries(&pvec, mapping, index,
417 : 18776 : min(end - index, (pgoff_t)PAGEVEC_SIZE), indices)) {
418 : : /* If all gone from start onwards, we're done */
419 [ - + ]: 18776 : if (index == start)
420 : : break;
421 : : /* Otherwise restart to make sure all gone */
422 : : index = start;
423 : 0 : continue;
424 : : }
425 [ # # # # ]: 0 : if (index == start && indices[0] >= end) {
426 : : /* All gone out of hole to be punched, we're done */
427 : 0 : pagevec_remove_exceptionals(&pvec);
428 : : pagevec_release(&pvec);
429 : : break;
430 : : }
431 : :
432 [ # # ]: 0 : for (i = 0; i < pagevec_count(&pvec); i++) {
433 : 0 : struct page *page = pvec.pages[i];
434 : :
435 : : /* We rely upon deletion not changing page->index */
436 : 0 : index = indices[i];
437 [ # # ]: 0 : if (index >= end) {
438 : : /* Restart punch to make sure all gone */
439 : 0 : index = start - 1;
440 : 0 : break;
441 : : }
442 : :
443 [ # # ]: 0 : if (xa_is_value(page))
444 : 0 : continue;
445 : :
446 : 0 : lock_page(page);
447 [ # # ]: 0 : WARN_ON(page_to_index(page) != index);
448 : 0 : wait_on_page_writeback(page);
449 : 0 : truncate_inode_page(mapping, page);
450 : 0 : unlock_page(page);
451 : : }
452 : 0 : truncate_exceptional_pvec_entries(mapping, &pvec, indices, end);
453 : : pagevec_release(&pvec);
454 : 0 : index++;
455 : : }
456 : :
457 : : out:
458 : : cleancache_invalidate_inode(mapping);
459 : 3036954 : }
460 : : EXPORT_SYMBOL(truncate_inode_pages_range);
461 : :
462 : : /**
463 : : * truncate_inode_pages - truncate *all* the pages from an offset
464 : : * @mapping: mapping to truncate
465 : : * @lstart: offset from which to truncate
466 : : *
467 : : * Called under (and serialised by) inode->i_mutex.
468 : : *
469 : : * Note: When this function returns, there can be a page in the process of
470 : : * deletion (inside __delete_from_page_cache()) in the specified range. Thus
471 : : * mapping->nrpages can be non-zero when this function returns even after
472 : : * truncation of the whole mapping.
473 : : */
474 : 16564 : void truncate_inode_pages(struct address_space *mapping, loff_t lstart)
475 : : {
476 : 3036952 : truncate_inode_pages_range(mapping, lstart, (loff_t)-1);
477 : 16564 : }
478 : : EXPORT_SYMBOL(truncate_inode_pages);
479 : :
480 : : /**
481 : : * truncate_inode_pages_final - truncate *all* pages before inode dies
482 : : * @mapping: mapping to truncate
483 : : *
484 : : * Called under (and serialized by) inode->i_mutex.
485 : : *
486 : : * Filesystems have to use this in the .evict_inode path to inform the
487 : : * VM that this is the final truncate and the inode is going away.
488 : : */
489 : 3019128 : void truncate_inode_pages_final(struct address_space *mapping)
490 : : {
491 : : unsigned long nrexceptional;
492 : : unsigned long nrpages;
493 : :
494 : : /*
495 : : * Page reclaim can not participate in regular inode lifetime
496 : : * management (can't call iput()) and thus can race with the
497 : : * inode teardown. Tell it when the address space is exiting,
498 : : * so that it does not install eviction information after the
499 : : * final truncate has begun.
500 : : */
501 : : mapping_set_exiting(mapping);
502 : :
503 : : /*
504 : : * When reclaim installs eviction entries, it increases
505 : : * nrexceptional first, then decreases nrpages. Make sure we see
506 : : * this in the right order or we might miss an entry.
507 : : */
508 : 3019122 : nrpages = mapping->nrpages;
509 : 3019122 : smp_rmb();
510 : 3019118 : nrexceptional = mapping->nrexceptional;
511 : :
512 [ + + ]: 3019118 : if (nrpages || nrexceptional) {
513 : : /*
514 : : * As truncation uses a lockless tree lookup, cycle
515 : : * the tree lock to make sure any ongoing tree
516 : : * modification that does not see AS_EXITING is
517 : : * completed before starting the final truncate.
518 : : */
519 : : xa_lock_irq(&mapping->i_pages);
520 : : xa_unlock_irq(&mapping->i_pages);
521 : : }
522 : :
523 : : /*
524 : : * Cleancache needs notification even if there are no pages or shadow
525 : : * entries.
526 : : */
527 : : truncate_inode_pages(mapping, 0);
528 : 3019108 : }
529 : : EXPORT_SYMBOL(truncate_inode_pages_final);
530 : :
531 : : /**
532 : : * invalidate_mapping_pages - Invalidate all the unlocked pages of one inode
533 : : * @mapping: the address_space which holds the pages to invalidate
534 : : * @start: the offset 'from' which to invalidate
535 : : * @end: the offset 'to' which to invalidate (inclusive)
536 : : *
537 : : * This function only removes the unlocked pages, if you want to
538 : : * remove all the pages of one inode, you must call truncate_inode_pages.
539 : : *
540 : : * invalidate_mapping_pages() will not block on IO activity. It will not
541 : : * invalidate pages which are dirty, locked, under writeback or mapped into
542 : : * pagetables.
543 : : *
544 : : * Return: the number of the pages that were invalidated
545 : : */
546 : 0 : unsigned long invalidate_mapping_pages(struct address_space *mapping,
547 : : pgoff_t start, pgoff_t end)
548 : : {
549 : : pgoff_t indices[PAGEVEC_SIZE];
550 : : struct pagevec pvec;
551 : : pgoff_t index = start;
552 : : unsigned long ret;
553 : : unsigned long count = 0;
554 : : int i;
555 : :
556 : : pagevec_init(&pvec);
557 [ # # # # ]: 0 : while (index <= end && pagevec_lookup_entries(&pvec, mapping, index,
558 : 0 : min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1,
559 : : indices)) {
560 [ # # ]: 0 : for (i = 0; i < pagevec_count(&pvec); i++) {
561 : 0 : struct page *page = pvec.pages[i];
562 : :
563 : : /* We rely upon deletion not changing page->index */
564 : 0 : index = indices[i];
565 [ # # ]: 0 : if (index > end)
566 : : break;
567 : :
568 [ # # ]: 0 : if (xa_is_value(page)) {
569 : 0 : invalidate_exceptional_entry(mapping, index,
570 : : page);
571 : 0 : continue;
572 : : }
573 : :
574 [ # # ]: 0 : if (!trylock_page(page))
575 : 0 : continue;
576 : :
577 [ # # ]: 0 : WARN_ON(page_to_index(page) != index);
578 : :
579 : : /* Middle of THP: skip */
580 : : if (PageTransTail(page)) {
581 : : unlock_page(page);
582 : : continue;
583 : : } else if (PageTransHuge(page)) {
584 : : index += HPAGE_PMD_NR - 1;
585 : : i += HPAGE_PMD_NR - 1;
586 : : /*
587 : : * 'end' is in the middle of THP. Don't
588 : : * invalidate the page as the part outside of
589 : : * 'end' could be still useful.
590 : : */
591 : : if (index > end) {
592 : : unlock_page(page);
593 : : continue;
594 : : }
595 : :
596 : : /* Take a pin outside pagevec */
597 : : get_page(page);
598 : :
599 : : /*
600 : : * Drop extra pins before trying to invalidate
601 : : * the huge page.
602 : : */
603 : : pagevec_remove_exceptionals(&pvec);
604 : : pagevec_release(&pvec);
605 : : }
606 : :
607 : 0 : ret = invalidate_inode_page(page);
608 : 0 : unlock_page(page);
609 : : /*
610 : : * Invalidation is a hint that the page is no longer
611 : : * of interest and try to speed up its reclaim.
612 : : */
613 [ # # ]: 0 : if (!ret)
614 : 0 : deactivate_file_page(page);
615 : : if (PageTransHuge(page))
616 : : put_page(page);
617 : 0 : count += ret;
618 : : }
619 : 0 : pagevec_remove_exceptionals(&pvec);
620 : : pagevec_release(&pvec);
621 : 0 : cond_resched();
622 : 0 : index++;
623 : : }
624 : 0 : return count;
625 : : }
626 : : EXPORT_SYMBOL(invalidate_mapping_pages);
627 : :
628 : : /*
629 : : * This is like invalidate_complete_page(), except it ignores the page's
630 : : * refcount. We do this because invalidate_inode_pages2() needs stronger
631 : : * invalidation guarantees, and cannot afford to leave pages behind because
632 : : * shrink_page_list() has a temp ref on them, or because they're transiently
633 : : * sitting in the lru_cache_add() pagevecs.
634 : : */
635 : : static int
636 : 0 : invalidate_complete_page2(struct address_space *mapping, struct page *page)
637 : : {
638 : : unsigned long flags;
639 : :
640 [ # # ]: 0 : if (page->mapping != mapping)
641 : : return 0;
642 : :
643 [ # # # # ]: 0 : if (page_has_private(page) && !try_to_release_page(page, GFP_KERNEL))
644 : : return 0;
645 : :
646 : 0 : xa_lock_irqsave(&mapping->i_pages, flags);
647 [ # # ]: 0 : if (PageDirty(page))
648 : : goto failed;
649 : :
650 [ # # ]: 0 : BUG_ON(page_has_private(page));
651 : 0 : __delete_from_page_cache(page, NULL);
652 : : xa_unlock_irqrestore(&mapping->i_pages, flags);
653 : :
654 [ # # ]: 0 : if (mapping->a_ops->freepage)
655 : 0 : mapping->a_ops->freepage(page);
656 : :
657 : 0 : put_page(page); /* pagecache ref */
658 : 0 : return 1;
659 : : failed:
660 : : xa_unlock_irqrestore(&mapping->i_pages, flags);
661 : 0 : return 0;
662 : : }
663 : :
664 : 0 : static int do_launder_page(struct address_space *mapping, struct page *page)
665 : : {
666 [ # # ]: 0 : if (!PageDirty(page))
667 : : return 0;
668 [ # # # # ]: 0 : if (page->mapping != mapping || mapping->a_ops->launder_page == NULL)
669 : : return 0;
670 : 0 : return mapping->a_ops->launder_page(page);
671 : : }
672 : :
673 : : /**
674 : : * invalidate_inode_pages2_range - remove range of pages from an address_space
675 : : * @mapping: the address_space
676 : : * @start: the page offset 'from' which to invalidate
677 : : * @end: the page offset 'to' which to invalidate (inclusive)
678 : : *
679 : : * Any pages which are found to be mapped into pagetables are unmapped prior to
680 : : * invalidation.
681 : : *
682 : : * Return: -EBUSY if any pages could not be invalidated.
683 : : */
684 : 0 : int invalidate_inode_pages2_range(struct address_space *mapping,
685 : : pgoff_t start, pgoff_t end)
686 : : {
687 : : pgoff_t indices[PAGEVEC_SIZE];
688 : : struct pagevec pvec;
689 : : pgoff_t index;
690 : : int i;
691 : : int ret = 0;
692 : : int ret2 = 0;
693 : : int did_range_unmap = 0;
694 : :
695 [ # # # # ]: 0 : if (mapping->nrpages == 0 && mapping->nrexceptional == 0)
696 : : goto out;
697 : :
698 : : pagevec_init(&pvec);
699 : : index = start;
700 [ # # # # ]: 0 : while (index <= end && pagevec_lookup_entries(&pvec, mapping, index,
701 : 0 : min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1,
702 : : indices)) {
703 [ # # ]: 0 : for (i = 0; i < pagevec_count(&pvec); i++) {
704 : 0 : struct page *page = pvec.pages[i];
705 : :
706 : : /* We rely upon deletion not changing page->index */
707 : 0 : index = indices[i];
708 [ # # ]: 0 : if (index > end)
709 : : break;
710 : :
711 [ # # ]: 0 : if (xa_is_value(page)) {
712 [ # # ]: 0 : if (!invalidate_exceptional_entry2(mapping,
713 : : index, page))
714 : : ret = -EBUSY;
715 : 0 : continue;
716 : : }
717 : :
718 : 0 : lock_page(page);
719 [ # # ]: 0 : WARN_ON(page_to_index(page) != index);
720 [ # # ]: 0 : if (page->mapping != mapping) {
721 : 0 : unlock_page(page);
722 : 0 : continue;
723 : : }
724 : 0 : wait_on_page_writeback(page);
725 [ # # ]: 0 : if (page_mapped(page)) {
726 [ # # ]: 0 : if (!did_range_unmap) {
727 : : /*
728 : : * Zap the rest of the file in one hit.
729 : : */
730 : 0 : unmap_mapping_pages(mapping, index,
731 : 0 : (1 + end - index), false);
732 : : did_range_unmap = 1;
733 : : } else {
734 : : /*
735 : : * Just zap this page
736 : : */
737 : 0 : unmap_mapping_pages(mapping, index,
738 : : 1, false);
739 : : }
740 : : }
741 [ # # ]: 0 : BUG_ON(page_mapped(page));
742 : 0 : ret2 = do_launder_page(mapping, page);
743 [ # # ]: 0 : if (ret2 == 0) {
744 [ # # ]: 0 : if (!invalidate_complete_page2(mapping, page))
745 : : ret2 = -EBUSY;
746 : : }
747 [ # # ]: 0 : if (ret2 < 0)
748 : : ret = ret2;
749 : 0 : unlock_page(page);
750 : : }
751 : 0 : pagevec_remove_exceptionals(&pvec);
752 : : pagevec_release(&pvec);
753 : 0 : cond_resched();
754 : 0 : index++;
755 : : }
756 : : /*
757 : : * For DAX we invalidate page tables after invalidating page cache. We
758 : : * could invalidate page tables while invalidating each entry however
759 : : * that would be expensive. And doing range unmapping before doesn't
760 : : * work as we have no cheap way to find whether page cache entry didn't
761 : : * get remapped later.
762 : : */
763 : : if (dax_mapping(mapping)) {
764 : : unmap_mapping_pages(mapping, start, end - start + 1, false);
765 : : }
766 : : out:
767 : : cleancache_invalidate_inode(mapping);
768 : 0 : return ret;
769 : : }
770 : : EXPORT_SYMBOL_GPL(invalidate_inode_pages2_range);
771 : :
772 : : /**
773 : : * invalidate_inode_pages2 - remove all pages from an address_space
774 : : * @mapping: the address_space
775 : : *
776 : : * Any pages which are found to be mapped into pagetables are unmapped prior to
777 : : * invalidation.
778 : : *
779 : : * Return: -EBUSY if any pages could not be invalidated.
780 : : */
781 : 0 : int invalidate_inode_pages2(struct address_space *mapping)
782 : : {
783 : 0 : return invalidate_inode_pages2_range(mapping, 0, -1);
784 : : }
785 : : EXPORT_SYMBOL_GPL(invalidate_inode_pages2);
786 : :
787 : : /**
788 : : * truncate_pagecache - unmap and remove pagecache that has been truncated
789 : : * @inode: inode
790 : : * @newsize: new file size
791 : : *
792 : : * inode's new i_size must already be written before truncate_pagecache
793 : : * is called.
794 : : *
795 : : * This function should typically be called before the filesystem
796 : : * releases resources associated with the freed range (eg. deallocates
797 : : * blocks). This way, pagecache will always stay logically coherent
798 : : * with on-disk format, and the filesystem would not have to deal with
799 : : * situations such as writepage being called for a page that has already
800 : : * had its underlying blocks deallocated.
801 : : */
802 : 1270 : void truncate_pagecache(struct inode *inode, loff_t newsize)
803 : : {
804 : 1270 : struct address_space *mapping = inode->i_mapping;
805 : 1270 : loff_t holebegin = round_up(newsize, PAGE_SIZE);
806 : :
807 : : /*
808 : : * unmap_mapping_range is called twice, first simply for
809 : : * efficiency so that truncate_inode_pages does fewer
810 : : * single-page unmaps. However after this first call, and
811 : : * before truncate_inode_pages finishes, it is possible for
812 : : * private pages to be COWed, which remain after
813 : : * truncate_inode_pages finishes, hence the second
814 : : * unmap_mapping_range call must be made for correctness.
815 : : */
816 : 1270 : unmap_mapping_range(mapping, holebegin, 0, 1);
817 : : truncate_inode_pages(mapping, newsize);
818 : 1270 : unmap_mapping_range(mapping, holebegin, 0, 1);
819 : 1270 : }
820 : : EXPORT_SYMBOL(truncate_pagecache);
821 : :
822 : : /**
823 : : * truncate_setsize - update inode and pagecache for a new file size
824 : : * @inode: inode
825 : : * @newsize: new file size
826 : : *
827 : : * truncate_setsize updates i_size and performs pagecache truncation (if
828 : : * necessary) to @newsize. It will be typically be called from the filesystem's
829 : : * setattr function when ATTR_SIZE is passed in.
830 : : *
831 : : * Must be called with a lock serializing truncates and writes (generally
832 : : * i_mutex but e.g. xfs uses a different lock) and before all filesystem
833 : : * specific block truncation has been performed.
834 : : */
835 : 0 : void truncate_setsize(struct inode *inode, loff_t newsize)
836 : : {
837 : 0 : loff_t oldsize = inode->i_size;
838 : :
839 : : i_size_write(inode, newsize);
840 [ # # ]: 0 : if (newsize > oldsize)
841 : 0 : pagecache_isize_extended(inode, oldsize, newsize);
842 : 0 : truncate_pagecache(inode, newsize);
843 : 0 : }
844 : : EXPORT_SYMBOL(truncate_setsize);
845 : :
846 : : /**
847 : : * pagecache_isize_extended - update pagecache after extension of i_size
848 : : * @inode: inode for which i_size was extended
849 : : * @from: original inode size
850 : : * @to: new inode size
851 : : *
852 : : * Handle extension of inode size either caused by extending truncate or by
853 : : * write starting after current i_size. We mark the page straddling current
854 : : * i_size RO so that page_mkwrite() is called on the nearest write access to
855 : : * the page. This way filesystem can be sure that page_mkwrite() is called on
856 : : * the page before user writes to the page via mmap after the i_size has been
857 : : * changed.
858 : : *
859 : : * The function must be called after i_size is updated so that page fault
860 : : * coming after we unlock the page will already see the new i_size.
861 : : * The function must be called while we still hold i_mutex - this not only
862 : : * makes sure i_size is stable but also that userspace cannot observe new
863 : : * i_size value before we are prepared to store mmap writes at new inode size.
864 : : */
865 : 0 : void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to)
866 : : {
867 : : int bsize = i_blocksize(inode);
868 : : loff_t rounded_from;
869 : : struct page *page;
870 : : pgoff_t index;
871 : :
872 [ # # ]: 0 : WARN_ON(to > inode->i_size);
873 : :
874 [ # # ]: 0 : if (from >= to || bsize == PAGE_SIZE)
875 : : return;
876 : : /* Page straddling @from will not have any hole block created? */
877 : 0 : rounded_from = round_up(from, bsize);
878 [ # # # # ]: 0 : if (to <= rounded_from || !(rounded_from & (PAGE_SIZE - 1)))
879 : : return;
880 : :
881 : 0 : index = from >> PAGE_SHIFT;
882 : 0 : page = find_lock_page(inode->i_mapping, index);
883 : : /* Page not cached? Nothing to do */
884 [ # # ]: 0 : if (!page)
885 : : return;
886 : : /*
887 : : * See clear_page_dirty_for_io() for details why set_page_dirty()
888 : : * is needed.
889 : : */
890 [ # # ]: 0 : if (page_mkclean(page))
891 : 0 : set_page_dirty(page);
892 : 0 : unlock_page(page);
893 : 0 : put_page(page);
894 : : }
895 : : EXPORT_SYMBOL(pagecache_isize_extended);
896 : :
897 : : /**
898 : : * truncate_pagecache_range - unmap and remove pagecache that is hole-punched
899 : : * @inode: inode
900 : : * @lstart: offset of beginning of hole
901 : : * @lend: offset of last byte of hole
902 : : *
903 : : * This function should typically be called before the filesystem
904 : : * releases resources associated with the freed range (eg. deallocates
905 : : * blocks). This way, pagecache will always stay logically coherent
906 : : * with on-disk format, and the filesystem would not have to deal with
907 : : * situations such as writepage being called for a page that has already
908 : : * had its underlying blocks deallocated.
909 : : */
910 : 0 : void truncate_pagecache_range(struct inode *inode, loff_t lstart, loff_t lend)
911 : : {
912 : 0 : struct address_space *mapping = inode->i_mapping;
913 : 0 : loff_t unmap_start = round_up(lstart, PAGE_SIZE);
914 : 0 : loff_t unmap_end = round_down(1 + lend, PAGE_SIZE) - 1;
915 : : /*
916 : : * This rounding is currently just for example: unmap_mapping_range
917 : : * expands its hole outwards, whereas we want it to contract the hole
918 : : * inwards. However, existing callers of truncate_pagecache_range are
919 : : * doing their own page rounding first. Note that unmap_mapping_range
920 : : * allows holelen 0 for all, and we allow lend -1 for end of file.
921 : : */
922 : :
923 : : /*
924 : : * Unlike in truncate_pagecache, unmap_mapping_range is called only
925 : : * once (before truncating pagecache), and without "even_cows" flag:
926 : : * hole-punching should not remove private COWed pages from the hole.
927 : : */
928 [ # # ]: 0 : if ((u64)unmap_end > (u64)unmap_start)
929 : 0 : unmap_mapping_range(mapping, unmap_start,
930 : : 1 + unmap_end - unmap_start, 0);
931 : 0 : truncate_inode_pages_range(mapping, lstart, lend);
932 : 0 : }
933 : : EXPORT_SYMBOL(truncate_pagecache_range);
|