Branch data Line data Source code
1 : : // SPDX-License-Identifier: GPL-2.0
2 : : /*
3 : : * Copyright (C) 2010 Red Hat, Inc.
4 : : * Copyright (c) 2016-2018 Christoph Hellwig.
5 : : */
6 : : #include <linux/module.h>
7 : : #include <linux/compiler.h>
8 : : #include <linux/fs.h>
9 : : #include <linux/iomap.h>
10 : : #include "trace.h"
11 : :
12 : : /*
13 : : * Execute a iomap write on a segment of the mapping that spans a
14 : : * contiguous range of pages that have identical block mapping state.
15 : : *
16 : : * This avoids the need to map pages individually, do individual allocations
17 : : * for each page and most importantly avoid the need for filesystem specific
18 : : * locking per page. Instead, all the operations are amortised over the entire
19 : : * range of pages. It is assumed that the filesystems will lock whatever
20 : : * resources they require in the iomap_begin call, and release them in the
21 : : * iomap_end call.
22 : : */
23 : : loff_t
24 : 0 : iomap_apply(struct inode *inode, loff_t pos, loff_t length, unsigned flags,
25 : : const struct iomap_ops *ops, void *data, iomap_actor_t actor)
26 : : {
27 : 0 : struct iomap iomap = { .type = IOMAP_HOLE };
28 : 0 : struct iomap srcmap = { .type = IOMAP_HOLE };
29 : 0 : loff_t written = 0, ret;
30 : 0 : u64 end;
31 : :
32 : 0 : trace_iomap_apply(inode, pos, length, flags, ops, actor, _RET_IP_);
33 : :
34 : : /*
35 : : * Need to map a range from start position for length bytes. This can
36 : : * span multiple pages - it is only guaranteed to return a range of a
37 : : * single type of pages (e.g. all into a hole, all mapped or all
38 : : * unwritten). Failure at this point has nothing to undo.
39 : : *
40 : : * If allocation is required for this range, reserve the space now so
41 : : * that the allocation is guaranteed to succeed later on. Once we copy
42 : : * the data into the page cache pages, then we cannot fail otherwise we
43 : : * expose transient stale data. If the reserve fails, we can safely
44 : : * back out at this point as there is nothing to undo.
45 : : */
46 : 0 : ret = ops->iomap_begin(inode, pos, length, flags, &iomap, &srcmap);
47 [ # # ]: 0 : if (ret)
48 : : return ret;
49 [ # # # # ]: 0 : if (WARN_ON(iomap.offset > pos))
50 : : return -EIO;
51 [ # # # # ]: 0 : if (WARN_ON(iomap.length == 0))
52 : : return -EIO;
53 : :
54 : 0 : trace_iomap_apply_dstmap(inode, &iomap);
55 [ # # ]: 0 : if (srcmap.type != IOMAP_HOLE)
56 : 0 : trace_iomap_apply_srcmap(inode, &srcmap);
57 : :
58 : : /*
59 : : * Cut down the length to the one actually provided by the filesystem,
60 : : * as it might not be able to give us the whole size that we requested.
61 : : */
62 : 0 : end = iomap.offset + iomap.length;
63 [ # # ]: 0 : if (srcmap.type != IOMAP_HOLE)
64 : 0 : end = min(end, srcmap.offset + srcmap.length);
65 [ # # ]: 0 : if (pos + length > end)
66 : 0 : length = end - pos;
67 : :
68 : : /*
69 : : * Now that we have guaranteed that the space allocation will succeed,
70 : : * we can do the copy-in page by page without having to worry about
71 : : * failures exposing transient data.
72 : : *
73 : : * To support COW operations, we read in data for partially blocks from
74 : : * the srcmap if the file system filled it in. In that case we the
75 : : * length needs to be limited to the earlier of the ends of the iomaps.
76 : : * If the file system did not provide a srcmap we pass in the normal
77 : : * iomap into the actors so that they don't need to have special
78 : : * handling for the two cases.
79 : : */
80 [ # # ]: 0 : written = actor(inode, pos, length, data, &iomap,
81 : : srcmap.type != IOMAP_HOLE ? &srcmap : &iomap);
82 : :
83 : : /*
84 : : * Now the data has been copied, commit the range we've copied. This
85 : : * should not fail unless the filesystem has had a fatal error.
86 : : */
87 [ # # ]: 0 : if (ops->iomap_end) {
88 : 0 : ret = ops->iomap_end(inode, pos, length,
89 : 0 : written > 0 ? written : 0,
90 : : flags, &iomap);
91 : : }
92 : :
93 [ # # ]: 0 : return written ? written : ret;
94 : : }
|