Branch data Line data Source code
1 : : // SPDX-License-Identifier: GPL-2.0+
2 : : /*
3 : : * linux/fs/jbd2/commit.c
4 : : *
5 : : * Written by Stephen C. Tweedie <sct@redhat.com>, 1998
6 : : *
7 : : * Copyright 1998 Red Hat corp --- All Rights Reserved
8 : : *
9 : : * Journal commit routines for the generic filesystem journaling code;
10 : : * part of the ext2fs journaling system.
11 : : */
12 : :
13 : : #include <linux/time.h>
14 : : #include <linux/fs.h>
15 : : #include <linux/jbd2.h>
16 : : #include <linux/errno.h>
17 : : #include <linux/slab.h>
18 : : #include <linux/mm.h>
19 : : #include <linux/pagemap.h>
20 : : #include <linux/jiffies.h>
21 : : #include <linux/crc32.h>
22 : : #include <linux/writeback.h>
23 : : #include <linux/backing-dev.h>
24 : : #include <linux/bio.h>
25 : : #include <linux/blkdev.h>
26 : : #include <linux/bitops.h>
27 : : #include <trace/events/jbd2.h>
28 : :
29 : : /*
30 : : * IO end handler for temporary buffer_heads handling writes to the journal.
31 : : */
32 : 2810 : static void journal_end_buffer_io_sync(struct buffer_head *bh, int uptodate)
33 : : {
34 : 2810 : struct buffer_head *orig_bh = bh->b_private;
35 : :
36 : 2810 : BUFFER_TRACE(bh, "");
37 [ + - ]: 2810 : if (uptodate)
38 : 2810 : set_buffer_uptodate(bh);
39 : : else
40 : 0 : clear_buffer_uptodate(bh);
41 [ + + ]: 2810 : if (orig_bh) {
42 : 2408 : clear_bit_unlock(BH_Shadow, &orig_bh->b_state);
43 : 2408 : smp_mb__after_atomic();
44 : 2408 : wake_up_bit(&orig_bh->b_state, BH_Shadow);
45 : : }
46 : 2810 : unlock_buffer(bh);
47 : 2810 : }
48 : :
49 : : /*
50 : : * When an ext4 file is truncated, it is possible that some pages are not
51 : : * successfully freed, because they are attached to a committing transaction.
52 : : * After the transaction commits, these pages are left on the LRU, with no
53 : : * ->mapping, and with attached buffers. These pages are trivially reclaimable
54 : : * by the VM, but their apparent absence upsets the VM accounting, and it makes
55 : : * the numbers in /proc/meminfo look odd.
56 : : *
57 : : * So here, we have a buffer which has just come off the forget list. Look to
58 : : * see if we can strip all buffers from the backing page.
59 : : *
60 : : * Called under lock_journal(), and possibly under journal_datalist_lock. The
61 : : * caller provided us with a ref against the buffer, and we drop that here.
62 : : */
63 : 0 : static void release_buffer_page(struct buffer_head *bh)
64 : : {
65 : 0 : struct page *page;
66 : :
67 [ # # ]: 0 : if (buffer_dirty(bh))
68 : 0 : goto nope;
69 [ # # ]: 0 : if (atomic_read(&bh->b_count) != 1)
70 : 0 : goto nope;
71 : 0 : page = bh->b_page;
72 [ # # ]: 0 : if (!page)
73 : 0 : goto nope;
74 [ # # ]: 0 : if (page->mapping)
75 : 0 : goto nope;
76 : :
77 : : /* OK, it's a truncated page */
78 [ # # # # ]: 0 : if (!trylock_page(page))
79 : 0 : goto nope;
80 : :
81 [ # # ]: 0 : get_page(page);
82 : 0 : __brelse(bh);
83 : 0 : try_to_free_buffers(page);
84 : 0 : unlock_page(page);
85 : 0 : put_page(page);
86 : 0 : return;
87 : :
88 : 0 : nope:
89 : 0 : __brelse(bh);
90 : : }
91 : :
92 : : static void jbd2_commit_block_csum_set(journal_t *j, struct buffer_head *bh)
93 : : {
94 : : struct commit_header *h;
95 : : __u32 csum;
96 : :
97 : : if (!jbd2_journal_has_csum_v2or3(j))
98 : : return;
99 : :
100 : : h = (struct commit_header *)(bh->b_data);
101 : : h->h_chksum_type = 0;
102 : : h->h_chksum_size = 0;
103 : : h->h_chksum[0] = 0;
104 : : csum = jbd2_chksum(j, j->j_csum_seed, bh->b_data, j->j_blocksize);
105 : : h->h_chksum[0] = cpu_to_be32(csum);
106 : : }
107 : :
108 : : /*
109 : : * Done it all: now submit the commit record. We should have
110 : : * cleaned up our previous buffers by now, so if we are in abort
111 : : * mode we can now just skip the rest of the journal write
112 : : * entirely.
113 : : *
114 : : * Returns 1 if the journal needs to be aborted or 0 on success
115 : : */
116 : 201 : static int journal_submit_commit_record(journal_t *journal,
117 : : transaction_t *commit_transaction,
118 : : struct buffer_head **cbh,
119 : : __u32 crc32_sum)
120 : : {
121 : 201 : struct commit_header *tmp;
122 : 201 : struct buffer_head *bh;
123 : 201 : int ret;
124 : 201 : struct timespec64 now;
125 : :
126 : 201 : *cbh = NULL;
127 : :
128 [ + - ]: 201 : if (is_journal_aborted(journal))
129 : : return 0;
130 : :
131 : 201 : bh = jbd2_journal_get_descriptor_buffer(commit_transaction,
132 : : JBD2_COMMIT_BLOCK);
133 [ + - ]: 201 : if (!bh)
134 : : return 1;
135 : :
136 : 201 : tmp = (struct commit_header *)bh->b_data;
137 : 201 : ktime_get_coarse_real_ts64(&now);
138 : 201 : tmp->h_commit_sec = cpu_to_be64(now.tv_sec);
139 : 201 : tmp->h_commit_nsec = cpu_to_be32(now.tv_nsec);
140 : :
141 [ + - - + ]: 402 : if (jbd2_has_feature_checksum(journal)) {
142 : 0 : tmp->h_chksum_type = JBD2_CRC32_CHKSUM;
143 : 0 : tmp->h_chksum_size = JBD2_CRC32_CHKSUM_SIZE;
144 : 0 : tmp->h_chksum[0] = cpu_to_be32(crc32_sum);
145 : : }
146 : 201 : jbd2_commit_block_csum_set(journal, bh);
147 : :
148 : 201 : BUFFER_TRACE(bh, "submit commit block");
149 : 201 : lock_buffer(bh);
150 : 201 : clear_buffer_dirty(bh);
151 : 201 : set_buffer_uptodate(bh);
152 : 201 : bh->b_end_io = journal_end_buffer_io_sync;
153 : :
154 [ + - + - ]: 402 : if (journal->j_flags & JBD2_BARRIER &&
155 [ + - ]: 201 : !jbd2_has_feature_async_commit(journal))
156 : 201 : ret = submit_bh(REQ_OP_WRITE,
157 : : REQ_SYNC | REQ_PREFLUSH | REQ_FUA, bh);
158 : : else
159 : 0 : ret = submit_bh(REQ_OP_WRITE, REQ_SYNC, bh);
160 : :
161 : 201 : *cbh = bh;
162 : 201 : return ret;
163 : : }
164 : :
165 : : /*
166 : : * This function along with journal_submit_commit_record
167 : : * allows to write the commit record asynchronously.
168 : : */
169 : : static int journal_wait_on_commit_record(journal_t *journal,
170 : : struct buffer_head *bh)
171 : : {
172 : : int ret = 0;
173 : :
174 : : clear_buffer_dirty(bh);
175 : : wait_on_buffer(bh);
176 : :
177 : : if (unlikely(!buffer_uptodate(bh)))
178 : : ret = -EIO;
179 : : put_bh(bh); /* One for getblk() */
180 : :
181 : : return ret;
182 : : }
183 : :
184 : : /*
185 : : * write the filemap data using writepage() address_space_operations.
186 : : * We don't do block allocation here even for delalloc. We don't
187 : : * use writepages() because with delayed allocation we may be doing
188 : : * block allocation in writepages().
189 : : */
190 : 0 : static int journal_submit_inode_data_buffers(struct address_space *mapping,
191 : : loff_t dirty_start, loff_t dirty_end)
192 : : {
193 : 0 : int ret;
194 : 0 : struct writeback_control wbc = {
195 : : .sync_mode = WB_SYNC_ALL,
196 : 0 : .nr_to_write = mapping->nrpages * 2,
197 : : .range_start = dirty_start,
198 : : .range_end = dirty_end,
199 : : };
200 : :
201 : 0 : ret = generic_writepages(mapping, &wbc);
202 : 0 : return ret;
203 : : }
204 : :
205 : : /*
206 : : * Submit all the data buffers of inode associated with the transaction to
207 : : * disk.
208 : : *
209 : : * We are in a committing transaction. Therefore no new inode can be added to
210 : : * our inode list. We use JI_COMMIT_RUNNING flag to protect inode we currently
211 : : * operate on from being released while we write out pages.
212 : : */
213 : 201 : static int journal_submit_data_buffers(journal_t *journal,
214 : : transaction_t *commit_transaction)
215 : : {
216 : 201 : struct jbd2_inode *jinode;
217 : 201 : int err, ret = 0;
218 : 201 : struct address_space *mapping;
219 : :
220 : 201 : spin_lock(&journal->j_list_lock);
221 [ - + ]: 201 : list_for_each_entry(jinode, &commit_transaction->t_inode_list, i_list) {
222 : 0 : loff_t dirty_start = jinode->i_dirty_start;
223 : 0 : loff_t dirty_end = jinode->i_dirty_end;
224 : :
225 [ # # ]: 0 : if (!(jinode->i_flags & JI_WRITE_DATA))
226 : 0 : continue;
227 : 0 : mapping = jinode->i_vfs_inode->i_mapping;
228 : 0 : jinode->i_flags |= JI_COMMIT_RUNNING;
229 : 0 : spin_unlock(&journal->j_list_lock);
230 : : /*
231 : : * submit the inode data buffers. We use writepage
232 : : * instead of writepages. Because writepages can do
233 : : * block allocation with delalloc. We need to write
234 : : * only allocated blocks here.
235 : : */
236 : 0 : trace_jbd2_submit_inode_data(jinode->i_vfs_inode);
237 : 0 : err = journal_submit_inode_data_buffers(mapping, dirty_start,
238 : : dirty_end);
239 [ # # ]: 0 : if (!ret)
240 : 0 : ret = err;
241 : 0 : spin_lock(&journal->j_list_lock);
242 [ # # ]: 0 : J_ASSERT(jinode->i_transaction == commit_transaction);
243 : 0 : jinode->i_flags &= ~JI_COMMIT_RUNNING;
244 : 0 : smp_mb();
245 : 0 : wake_up_bit(&jinode->i_flags, __JI_COMMIT_RUNNING);
246 : : }
247 : 201 : spin_unlock(&journal->j_list_lock);
248 : 201 : return ret;
249 : : }
250 : :
251 : : /*
252 : : * Wait for data submitted for writeout, refile inodes to proper
253 : : * transaction if needed.
254 : : *
255 : : */
256 : 201 : static int journal_finish_inode_data_buffers(journal_t *journal,
257 : : transaction_t *commit_transaction)
258 : : {
259 : 201 : struct jbd2_inode *jinode, *next_i;
260 : 201 : int err, ret = 0;
261 : :
262 : : /* For locking, see the comment in journal_submit_data_buffers() */
263 : 201 : spin_lock(&journal->j_list_lock);
264 [ - + ]: 201 : list_for_each_entry(jinode, &commit_transaction->t_inode_list, i_list) {
265 : 0 : loff_t dirty_start = jinode->i_dirty_start;
266 : 0 : loff_t dirty_end = jinode->i_dirty_end;
267 : :
268 [ # # ]: 0 : if (!(jinode->i_flags & JI_WAIT_DATA))
269 : 0 : continue;
270 : 0 : jinode->i_flags |= JI_COMMIT_RUNNING;
271 : 0 : spin_unlock(&journal->j_list_lock);
272 : 0 : err = filemap_fdatawait_range_keep_errors(
273 : 0 : jinode->i_vfs_inode->i_mapping, dirty_start,
274 : : dirty_end);
275 [ # # ]: 0 : if (!ret)
276 : 0 : ret = err;
277 : 0 : spin_lock(&journal->j_list_lock);
278 : 0 : jinode->i_flags &= ~JI_COMMIT_RUNNING;
279 : 0 : smp_mb();
280 : 0 : wake_up_bit(&jinode->i_flags, __JI_COMMIT_RUNNING);
281 : : }
282 : :
283 : : /* Now refile inode to proper lists */
284 [ - + ]: 201 : list_for_each_entry_safe(jinode, next_i,
285 : : &commit_transaction->t_inode_list, i_list) {
286 [ # # ]: 0 : list_del(&jinode->i_list);
287 [ # # ]: 0 : if (jinode->i_next_transaction) {
288 : 0 : jinode->i_transaction = jinode->i_next_transaction;
289 : 0 : jinode->i_next_transaction = NULL;
290 : 0 : list_add(&jinode->i_list,
291 : : &jinode->i_transaction->t_inode_list);
292 : : } else {
293 : 0 : jinode->i_transaction = NULL;
294 : 0 : jinode->i_dirty_start = 0;
295 : 0 : jinode->i_dirty_end = 0;
296 : : }
297 : : }
298 : 201 : spin_unlock(&journal->j_list_lock);
299 : :
300 : 201 : return ret;
301 : : }
302 : :
303 : 0 : static __u32 jbd2_checksum_data(__u32 crc32_sum, struct buffer_head *bh)
304 : : {
305 : 0 : struct page *page = bh->b_page;
306 : 0 : char *addr;
307 : 0 : __u32 checksum;
308 : :
309 : 0 : addr = kmap_atomic(page);
310 : 0 : checksum = crc32_be(crc32_sum,
311 : 0 : (void *)(addr + offset_in_page(bh->b_data)), bh->b_size);
312 : 0 : kunmap_atomic(addr);
313 : :
314 : 0 : return checksum;
315 : : }
316 : :
317 : 2408 : static void write_tag_block(journal_t *j, journal_block_tag_t *tag,
318 : : unsigned long long block)
319 : : {
320 : 2408 : tag->t_blocknr = cpu_to_be32(block & (u32)~0);
321 [ + - + - ]: 4816 : if (jbd2_has_feature_64bit(j))
322 : 2408 : tag->t_blocknr_high = cpu_to_be32((block >> 31) >> 1);
323 : : }
324 : :
325 : 2408 : static void jbd2_block_tag_csum_set(journal_t *j, journal_block_tag_t *tag,
326 : : struct buffer_head *bh, __u32 sequence)
327 : : {
328 : 2408 : journal_block_tag3_t *tag3 = (journal_block_tag3_t *)tag;
329 : 2408 : struct page *page = bh->b_page;
330 : 2408 : __u8 *addr;
331 : 2408 : __u32 csum32;
332 : 2408 : __be32 seq;
333 : :
334 [ - + ]: 2408 : if (!jbd2_journal_has_csum_v2or3(j))
335 : 0 : return;
336 : :
337 : 2408 : seq = cpu_to_be32(sequence);
338 : 2408 : addr = kmap_atomic(page);
339 : 2408 : csum32 = jbd2_chksum(j, j->j_csum_seed, (__u8 *)&seq, sizeof(seq));
340 : 2408 : csum32 = jbd2_chksum(j, csum32, addr + offset_in_page(bh->b_data),
341 : 2408 : bh->b_size);
342 : 2408 : kunmap_atomic(addr);
343 : :
344 [ + - + - ]: 4816 : if (jbd2_has_feature_csum3(j))
345 : 2408 : tag3->t_checksum = cpu_to_be32(csum32);
346 : : else
347 : 0 : tag->t_checksum = cpu_to_be16(csum32);
348 : : }
349 : : /*
350 : : * jbd2_journal_commit_transaction
351 : : *
352 : : * The primary function for committing a transaction to the log. This
353 : : * function is called by the journal thread to begin a complete commit.
354 : : */
355 : 201 : void jbd2_journal_commit_transaction(journal_t *journal)
356 : : {
357 : 201 : struct transaction_stats_s stats;
358 : 201 : transaction_t *commit_transaction;
359 : 201 : struct journal_head *jh;
360 : 201 : struct buffer_head *descriptor;
361 : 201 : struct buffer_head **wbuf = journal->j_wbuf;
362 : 201 : int bufs;
363 : 201 : int flags;
364 : 201 : int err;
365 : 201 : unsigned long long blocknr;
366 : 201 : ktime_t start_time;
367 : 201 : u64 commit_time;
368 : 201 : char *tagp = NULL;
369 : 201 : journal_block_tag_t *tag = NULL;
370 : 201 : int space_left = 0;
371 : 201 : int first_tag = 0;
372 : 201 : int tag_flag;
373 : 201 : int i;
374 : 201 : int tag_bytes = journal_tag_bytes(journal);
375 : 201 : struct buffer_head *cbh = NULL; /* For transactional checksums */
376 : 201 : __u32 crc32_sum = ~0;
377 : 201 : struct blk_plug plug;
378 : : /* Tail of the journal */
379 : 201 : unsigned long first_block;
380 : 201 : tid_t first_tid;
381 : 201 : int update_tail;
382 : 201 : int csum_size = 0;
383 : 201 : LIST_HEAD(io_bufs);
384 : 201 : LIST_HEAD(log_bufs);
385 : :
386 [ + - ]: 201 : if (jbd2_journal_has_csum_v2or3(journal))
387 : 201 : csum_size = sizeof(struct jbd2_journal_block_tail);
388 : :
389 : : /*
390 : : * First job: lock down the current transaction and wait for
391 : : * all outstanding updates to complete.
392 : : */
393 : :
394 : : /* Do we need to erase the effects of a prior jbd2_journal_flush? */
395 [ + + ]: 201 : if (journal->j_flags & JBD2_FLUSHED) {
396 : 28 : jbd_debug(3, "super block updated\n");
397 : 28 : mutex_lock_io(&journal->j_checkpoint_mutex);
398 : : /*
399 : : * We hold j_checkpoint_mutex so tail cannot change under us.
400 : : * We don't need any special data guarantees for writing sb
401 : : * since journal is empty and it is ok for write to be
402 : : * flushed only with transaction commit.
403 : : */
404 : 28 : jbd2_journal_update_sb_log_tail(journal,
405 : : journal->j_tail_sequence,
406 : : journal->j_tail,
407 : : REQ_SYNC);
408 : 28 : mutex_unlock(&journal->j_checkpoint_mutex);
409 : : } else {
410 : 201 : jbd_debug(3, "superblock not updated\n");
411 : : }
412 : :
413 [ - + ]: 201 : J_ASSERT(journal->j_running_transaction != NULL);
414 [ - + ]: 201 : J_ASSERT(journal->j_committing_transaction == NULL);
415 : :
416 : 201 : commit_transaction = journal->j_running_transaction;
417 : :
418 : 201 : trace_jbd2_start_commit(journal, commit_transaction);
419 : : jbd_debug(1, "JBD2: starting commit of transaction %d\n",
420 : 201 : commit_transaction->t_tid);
421 : :
422 : 201 : write_lock(&journal->j_state_lock);
423 [ - + ]: 201 : J_ASSERT(commit_transaction->t_state == T_RUNNING);
424 : 201 : commit_transaction->t_state = T_LOCKED;
425 : :
426 : 201 : trace_jbd2_commit_locking(journal, commit_transaction);
427 : 201 : stats.run.rs_wait = commit_transaction->t_max_wait;
428 : 201 : stats.run.rs_request_delay = 0;
429 : 201 : stats.run.rs_locked = jiffies;
430 [ + - ]: 201 : if (commit_transaction->t_requested)
431 [ + - ]: 402 : stats.run.rs_request_delay =
432 : : jbd2_time_diff(commit_transaction->t_requested,
433 : : stats.run.rs_locked);
434 [ + - ]: 201 : stats.run.rs_running = jbd2_time_diff(commit_transaction->t_start,
435 : : stats.run.rs_locked);
436 : :
437 : 201 : spin_lock(&commit_transaction->t_handle_lock);
438 [ - + ]: 201 : while (atomic_read(&commit_transaction->t_updates)) {
439 : 0 : DEFINE_WAIT(wait);
440 : :
441 : 0 : prepare_to_wait(&journal->j_wait_updates, &wait,
442 : : TASK_UNINTERRUPTIBLE);
443 [ # # ]: 0 : if (atomic_read(&commit_transaction->t_updates)) {
444 : 0 : spin_unlock(&commit_transaction->t_handle_lock);
445 : 0 : write_unlock(&journal->j_state_lock);
446 : 0 : schedule();
447 : 0 : write_lock(&journal->j_state_lock);
448 : 0 : spin_lock(&commit_transaction->t_handle_lock);
449 : : }
450 : 0 : finish_wait(&journal->j_wait_updates, &wait);
451 : : }
452 : 201 : spin_unlock(&commit_transaction->t_handle_lock);
453 : 201 : commit_transaction->t_state = T_SWITCH;
454 : 201 : write_unlock(&journal->j_state_lock);
455 : :
456 [ - + ]: 201 : J_ASSERT (atomic_read(&commit_transaction->t_outstanding_credits) <=
457 : : journal->j_max_transaction_buffers);
458 : :
459 : : /*
460 : : * First thing we are allowed to do is to discard any remaining
461 : : * BJ_Reserved buffers. Note, it is _not_ permissible to assume
462 : : * that there are no such buffers: if a large filesystem
463 : : * operation like a truncate needs to split itself over multiple
464 : : * transactions, then it may try to do a jbd2_journal_restart() while
465 : : * there are still BJ_Reserved buffers outstanding. These must
466 : : * be released cleanly from the current transaction.
467 : : *
468 : : * In this case, the filesystem must still reserve write access
469 : : * again before modifying the buffer in the new transaction, but
470 : : * we do not require it to remember exactly which old buffers it
471 : : * has reserved. This is consistent with the existing behaviour
472 : : * that multiple jbd2_journal_get_write_access() calls to the same
473 : : * buffer are perfectly permissible.
474 : : */
475 [ - + ]: 201 : while (commit_transaction->t_reserved_list) {
476 : 0 : jh = commit_transaction->t_reserved_list;
477 : 0 : JBUFFER_TRACE(jh, "reserved, unused: refile");
478 : : /*
479 : : * A jbd2_journal_get_undo_access()+jbd2_journal_release_buffer() may
480 : : * leave undo-committed data.
481 : : */
482 [ # # ]: 0 : if (jh->b_committed_data) {
483 : 0 : struct buffer_head *bh = jh2bh(jh);
484 : :
485 : 0 : spin_lock(&jh->b_state_lock);
486 : 0 : jbd2_free(jh->b_committed_data, bh->b_size);
487 : 0 : jh->b_committed_data = NULL;
488 : 0 : spin_unlock(&jh->b_state_lock);
489 : : }
490 : 0 : jbd2_journal_refile_buffer(journal, jh);
491 : : }
492 : :
493 : : /*
494 : : * Now try to drop any written-back buffers from the journal's
495 : : * checkpoint lists. We do this *before* commit because it potentially
496 : : * frees some memory
497 : : */
498 : 201 : spin_lock(&journal->j_list_lock);
499 : 201 : __jbd2_journal_clean_checkpoint_list(journal, false);
500 : 201 : spin_unlock(&journal->j_list_lock);
501 : :
502 : 201 : jbd_debug(3, "JBD2: commit phase 1\n");
503 : :
504 : : /*
505 : : * Clear revoked flag to reflect there is no revoked buffers
506 : : * in the next transaction which is going to be started.
507 : : */
508 : 201 : jbd2_clear_buffer_revoked_flags(journal);
509 : :
510 : : /*
511 : : * Switch to a new revoke table.
512 : : */
513 : 201 : jbd2_journal_switch_revoke_table(journal);
514 : :
515 : : /*
516 : : * Reserved credits cannot be claimed anymore, free them
517 : : */
518 : 201 : atomic_sub(atomic_read(&journal->j_reserved_credits),
519 : : &commit_transaction->t_outstanding_credits);
520 : :
521 : 201 : write_lock(&journal->j_state_lock);
522 : 201 : trace_jbd2_commit_flushing(journal, commit_transaction);
523 : 201 : stats.run.rs_flushing = jiffies;
524 [ + - ]: 201 : stats.run.rs_locked = jbd2_time_diff(stats.run.rs_locked,
525 : : stats.run.rs_flushing);
526 : :
527 : 201 : commit_transaction->t_state = T_FLUSH;
528 : 201 : journal->j_committing_transaction = commit_transaction;
529 : 201 : journal->j_running_transaction = NULL;
530 : 201 : start_time = ktime_get();
531 : 201 : commit_transaction->t_log_start = journal->j_head;
532 : 201 : wake_up(&journal->j_wait_transaction_locked);
533 : 201 : write_unlock(&journal->j_state_lock);
534 : :
535 : 201 : jbd_debug(3, "JBD2: commit phase 2a\n");
536 : :
537 : : /*
538 : : * Now start flushing things to disk, in the order they appear
539 : : * on the transaction lists. Data blocks go first.
540 : : */
541 : 201 : err = journal_submit_data_buffers(journal, commit_transaction);
542 [ - + ]: 201 : if (err)
543 : 0 : jbd2_journal_abort(journal, err);
544 : :
545 : 201 : blk_start_plug(&plug);
546 : 201 : jbd2_journal_write_revoke_records(commit_transaction, &log_bufs);
547 : :
548 : 201 : jbd_debug(3, "JBD2: commit phase 2b\n");
549 : :
550 : : /*
551 : : * Way to go: we have now written out all of the data for a
552 : : * transaction! Now comes the tricky part: we need to write out
553 : : * metadata. Loop over the transaction's entire buffer list:
554 : : */
555 : 201 : write_lock(&journal->j_state_lock);
556 : 201 : commit_transaction->t_state = T_COMMIT;
557 : 201 : write_unlock(&journal->j_state_lock);
558 : :
559 : 201 : trace_jbd2_commit_logging(journal, commit_transaction);
560 : 201 : stats.run.rs_logging = jiffies;
561 [ + - ]: 201 : stats.run.rs_flushing = jbd2_time_diff(stats.run.rs_flushing,
562 : : stats.run.rs_logging);
563 : 201 : stats.run.rs_blocks = commit_transaction->t_nr_buffers;
564 : 201 : stats.run.rs_blocks_logged = 0;
565 : :
566 [ - + ]: 201 : J_ASSERT(commit_transaction->t_nr_buffers <=
567 : : atomic_read(&commit_transaction->t_outstanding_credits));
568 : :
569 : : err = 0;
570 : : bufs = 0;
571 : : descriptor = NULL;
572 [ + + ]: 2609 : while (commit_transaction->t_buffers) {
573 : :
574 : : /* Find the next buffer to be journaled... */
575 : :
576 : 2408 : jh = commit_transaction->t_buffers;
577 : :
578 : : /* If we're in abort mode, we just un-journal the buffer and
579 : : release it. */
580 : :
581 [ - + ]: 2408 : if (is_journal_aborted(journal)) {
582 : 0 : clear_buffer_jbddirty(jh2bh(jh));
583 : 0 : JBUFFER_TRACE(jh, "journal is aborting: refile");
584 : 0 : jbd2_buffer_abort_trigger(jh,
585 [ # # ]: 0 : jh->b_frozen_data ?
586 : : jh->b_frozen_triggers :
587 : : jh->b_triggers);
588 : 0 : jbd2_journal_refile_buffer(journal, jh);
589 : : /* If that was the last one, we need to clean up
590 : : * any descriptor buffers which may have been
591 : : * already allocated, even if we are now
592 : : * aborting. */
593 [ # # ]: 0 : if (!commit_transaction->t_buffers)
594 : 0 : goto start_journal_io;
595 : 0 : continue;
596 : : }
597 : :
598 : : /* Make sure we have a descriptor block in which to
599 : : record the metadata buffer. */
600 : :
601 [ + + ]: 2408 : if (!descriptor) {
602 [ - + ]: 201 : J_ASSERT (bufs == 0);
603 : :
604 : 201 : jbd_debug(4, "JBD2: get descriptor\n");
605 : :
606 : 201 : descriptor = jbd2_journal_get_descriptor_buffer(
607 : : commit_transaction,
608 : : JBD2_DESCRIPTOR_BLOCK);
609 [ - + ]: 201 : if (!descriptor) {
610 : 0 : jbd2_journal_abort(journal, -EIO);
611 : 0 : continue;
612 : : }
613 : :
614 : : jbd_debug(4, "JBD2: got buffer %llu (%p)\n",
615 : : (unsigned long long)descriptor->b_blocknr,
616 : 201 : descriptor->b_data);
617 : 201 : tagp = &descriptor->b_data[sizeof(journal_header_t)];
618 : 201 : space_left = descriptor->b_size -
619 : : sizeof(journal_header_t);
620 : 201 : first_tag = 1;
621 : 201 : set_buffer_jwrite(descriptor);
622 : 201 : set_buffer_dirty(descriptor);
623 : 201 : wbuf[bufs++] = descriptor;
624 : :
625 : : /* Record it so that we can wait for IO
626 : : completion later */
627 : 201 : BUFFER_TRACE(descriptor, "ph3: file as descriptor");
628 : 201 : jbd2_file_log_bh(&log_bufs, descriptor);
629 : : }
630 : :
631 : : /* Where is the buffer to be written? */
632 : :
633 : 2408 : err = jbd2_journal_next_log_block(journal, &blocknr);
634 : : /* If the block mapping failed, just abandon the buffer
635 : : and repeat this loop: we'll fall into the
636 : : refile-on-abort condition above. */
637 [ - + ]: 2408 : if (err) {
638 : 0 : jbd2_journal_abort(journal, err);
639 : 0 : continue;
640 : : }
641 : :
642 : : /*
643 : : * start_this_handle() uses t_outstanding_credits to determine
644 : : * the free space in the log.
645 : : */
646 : 2408 : atomic_dec(&commit_transaction->t_outstanding_credits);
647 : :
648 : : /* Bump b_count to prevent truncate from stumbling over
649 : : the shadowed buffer! @@@ This can go if we ever get
650 : : rid of the shadow pairing of buffers. */
651 : 2408 : atomic_inc(&jh2bh(jh)->b_count);
652 : :
653 : : /*
654 : : * Make a temporary IO buffer with which to write it out
655 : : * (this will requeue the metadata buffer to BJ_Shadow).
656 : : */
657 : 2408 : set_bit(BH_JWrite, &jh2bh(jh)->b_state);
658 : 2408 : JBUFFER_TRACE(jh, "ph3: write metadata");
659 : 2408 : flags = jbd2_journal_write_metadata_buffer(commit_transaction,
660 : 2408 : jh, &wbuf[bufs], blocknr);
661 [ - + ]: 2408 : if (flags < 0) {
662 : 0 : jbd2_journal_abort(journal, flags);
663 : 0 : continue;
664 : : }
665 [ + + ]: 2408 : jbd2_file_log_bh(&io_bufs, wbuf[bufs]);
666 : :
667 : : /* Record the new block's tag in the current descriptor
668 : : buffer */
669 : :
670 : 2408 : tag_flag = 0;
671 : 2408 : if (flags & 1)
672 : : tag_flag |= JBD2_FLAG_ESCAPE;
673 [ + + ]: 2408 : if (!first_tag)
674 : 2207 : tag_flag |= JBD2_FLAG_SAME_UUID;
675 : :
676 : 2408 : tag = (journal_block_tag_t *) tagp;
677 [ + - ]: 2408 : write_tag_block(journal, tag, jh2bh(jh)->b_blocknr);
678 : 2408 : tag->t_flags = cpu_to_be16(tag_flag);
679 : 2408 : jbd2_block_tag_csum_set(journal, tag, wbuf[bufs],
680 : : commit_transaction->t_tid);
681 : 2408 : tagp += tag_bytes;
682 : 2408 : space_left -= tag_bytes;
683 : 2408 : bufs++;
684 : :
685 [ + + ]: 2408 : if (first_tag) {
686 : 201 : memcpy (tagp, journal->j_uuid, 16);
687 : 201 : tagp += 16;
688 : 201 : space_left -= 16;
689 : 201 : first_tag = 0;
690 : : }
691 : :
692 : : /* If there's no more to do, or if the descriptor is full,
693 : : let the IO rip! */
694 : :
695 [ + - ]: 2408 : if (bufs == journal->j_wbufsize ||
696 [ + + ]: 2408 : commit_transaction->t_buffers == NULL ||
697 [ - + ]: 2207 : space_left < tag_bytes + 16 + csum_size) {
698 : :
699 : 201 : jbd_debug(4, "JBD2: Submit %d IOs\n", bufs);
700 : :
701 : : /* Write an end-of-descriptor marker before
702 : : submitting the IOs. "tag" still points to
703 : : the last tag we set up. */
704 : :
705 : 201 : tag->t_flags |= cpu_to_be16(JBD2_FLAG_LAST_TAG);
706 : 201 : start_journal_io:
707 [ + - ]: 201 : if (descriptor)
708 : 201 : jbd2_descriptor_block_csum_set(journal,
709 : : descriptor);
710 : :
711 [ + + ]: 2810 : for (i = 0; i < bufs; i++) {
712 : 2609 : struct buffer_head *bh = wbuf[i];
713 : : /*
714 : : * Compute checksum.
715 : : */
716 [ + - - + ]: 5218 : if (jbd2_has_feature_checksum(journal)) {
717 : 0 : crc32_sum =
718 : 0 : jbd2_checksum_data(crc32_sum, bh);
719 : : }
720 : :
721 : 2609 : lock_buffer(bh);
722 : 2609 : clear_buffer_dirty(bh);
723 : 2609 : set_buffer_uptodate(bh);
724 : 2609 : bh->b_end_io = journal_end_buffer_io_sync;
725 : 2609 : submit_bh(REQ_OP_WRITE, REQ_SYNC, bh);
726 : : }
727 : 201 : cond_resched();
728 : :
729 : : /* Force a new descriptor to be generated next
730 : : time round the loop. */
731 : 201 : descriptor = NULL;
732 : 201 : bufs = 0;
733 : : }
734 : : }
735 : :
736 : 201 : err = journal_finish_inode_data_buffers(journal, commit_transaction);
737 [ - + ]: 201 : if (err) {
738 : 0 : printk(KERN_WARNING
739 : : "JBD2: Detected IO errors while flushing file data "
740 : 0 : "on %s\n", journal->j_devname);
741 [ # # ]: 0 : if (journal->j_flags & JBD2_ABORT_ON_SYNCDATA_ERR)
742 : 0 : jbd2_journal_abort(journal, err);
743 : : err = 0;
744 : : }
745 : :
746 : : /*
747 : : * Get current oldest transaction in the log before we issue flush
748 : : * to the filesystem device. After the flush we can be sure that
749 : : * blocks of all older transactions are checkpointed to persistent
750 : : * storage and we will be safe to update journal start in the
751 : : * superblock with the numbers we get here.
752 : : */
753 : 201 : update_tail =
754 : 201 : jbd2_journal_get_log_tail(journal, &first_tid, &first_block);
755 : :
756 : 201 : write_lock(&journal->j_state_lock);
757 [ - + ]: 201 : if (update_tail) {
758 : 0 : long freed = first_block - journal->j_tail;
759 : :
760 [ # # ]: 0 : if (first_block < journal->j_tail)
761 : 0 : freed += journal->j_last - journal->j_first;
762 : : /* Update tail only if we free significant amount of space */
763 [ # # ]: 0 : if (freed < journal->j_maxlen / 4)
764 : 0 : update_tail = 0;
765 : : }
766 [ - + ]: 201 : J_ASSERT(commit_transaction->t_state == T_COMMIT);
767 : 201 : commit_transaction->t_state = T_COMMIT_DFLUSH;
768 : 201 : write_unlock(&journal->j_state_lock);
769 : :
770 : : /*
771 : : * If the journal is not located on the file system device,
772 : : * then we must flush the file system device before we issue
773 : : * the commit record
774 : : */
775 [ - + ]: 201 : if (commit_transaction->t_need_data_flush &&
776 [ # # ]: 0 : (journal->j_fs_dev != journal->j_dev) &&
777 [ # # ]: 0 : (journal->j_flags & JBD2_BARRIER))
778 : 0 : blkdev_issue_flush(journal->j_fs_dev, GFP_NOFS, NULL);
779 : :
780 : : /* Done it all: now write the commit record asynchronously. */
781 [ + - - + ]: 402 : if (jbd2_has_feature_async_commit(journal)) {
782 : 0 : err = journal_submit_commit_record(journal, commit_transaction,
783 : : &cbh, crc32_sum);
784 [ # # ]: 0 : if (err)
785 : 0 : jbd2_journal_abort(journal, err);
786 : : }
787 : :
788 : 201 : blk_finish_plug(&plug);
789 : :
790 : : /* Lo and behold: we have just managed to send a transaction to
791 : : the log. Before we can commit it, wait for the IO so far to
792 : : complete. Control buffers being written are on the
793 : : transaction's t_log_list queue, and metadata buffers are on
794 : : the io_bufs list.
795 : :
796 : : Wait for the buffers in reverse order. That way we are
797 : : less likely to be woken up until all IOs have completed, and
798 : : so we incur less scheduling load.
799 : : */
800 : :
801 : 201 : jbd_debug(3, "JBD2: commit phase 3\n");
802 : :
803 [ + + ]: 2609 : while (!list_empty(&io_bufs)) {
804 : 2408 : struct buffer_head *bh = list_entry(io_bufs.prev,
805 : : struct buffer_head,
806 : : b_assoc_buffers);
807 : :
808 : 2408 : wait_on_buffer(bh);
809 : 2408 : cond_resched();
810 : :
811 [ - + ]: 2408 : if (unlikely(!buffer_uptodate(bh)))
812 : 0 : err = -EIO;
813 : 2408 : jbd2_unfile_log_bh(bh);
814 : 2408 : stats.run.rs_blocks_logged++;
815 : :
816 : : /*
817 : : * The list contains temporary buffer heads created by
818 : : * jbd2_journal_write_metadata_buffer().
819 : : */
820 : 2408 : BUFFER_TRACE(bh, "dumping temporary bh");
821 : 2408 : __brelse(bh);
822 [ - + ]: 2408 : J_ASSERT_BH(bh, atomic_read(&bh->b_count) == 0);
823 : 2408 : free_buffer_head(bh);
824 : :
825 : : /* We also have to refile the corresponding shadowed buffer */
826 : 2408 : jh = commit_transaction->t_shadow_list->b_tprev;
827 : 2408 : bh = jh2bh(jh);
828 : 2408 : clear_buffer_jwrite(bh);
829 [ - + ]: 2408 : J_ASSERT_BH(bh, buffer_jbddirty(bh));
830 [ - + ]: 2408 : J_ASSERT_BH(bh, !buffer_shadow(bh));
831 : :
832 : : /* The metadata is now released for reuse, but we need
833 : : to remember it against this transaction so that when
834 : : we finally commit, we can do any checkpointing
835 : : required. */
836 : 2408 : JBUFFER_TRACE(jh, "file as BJ_Forget");
837 : 2408 : jbd2_journal_file_buffer(jh, commit_transaction, BJ_Forget);
838 : 2408 : JBUFFER_TRACE(jh, "brelse shadowed buffer");
839 : 2408 : __brelse(bh);
840 : : }
841 : :
842 [ - + ]: 201 : J_ASSERT (commit_transaction->t_shadow_list == NULL);
843 : :
844 : : jbd_debug(3, "JBD2: commit phase 4\n");
845 : :
846 : : /* Here we wait for the revoke record and descriptor record buffers */
847 [ + + ]: 402 : while (!list_empty(&log_bufs)) {
848 : 201 : struct buffer_head *bh;
849 : :
850 : 201 : bh = list_entry(log_bufs.prev, struct buffer_head, b_assoc_buffers);
851 : 201 : wait_on_buffer(bh);
852 : 201 : cond_resched();
853 : :
854 [ - + ]: 201 : if (unlikely(!buffer_uptodate(bh)))
855 : 0 : err = -EIO;
856 : :
857 : 201 : BUFFER_TRACE(bh, "ph5: control buffer writeout done: unfile");
858 : 201 : clear_buffer_jwrite(bh);
859 : 201 : jbd2_unfile_log_bh(bh);
860 : 201 : stats.run.rs_blocks_logged++;
861 : 201 : __brelse(bh); /* One for getblk */
862 : : /* AKPM: bforget here */
863 : : }
864 : :
865 [ - + ]: 201 : if (err)
866 : 0 : jbd2_journal_abort(journal, err);
867 : :
868 : 201 : jbd_debug(3, "JBD2: commit phase 5\n");
869 : 201 : write_lock(&journal->j_state_lock);
870 [ - + ]: 201 : J_ASSERT(commit_transaction->t_state == T_COMMIT_DFLUSH);
871 : 201 : commit_transaction->t_state = T_COMMIT_JFLUSH;
872 : 201 : write_unlock(&journal->j_state_lock);
873 : :
874 [ + - + - ]: 402 : if (!jbd2_has_feature_async_commit(journal)) {
875 : 201 : err = journal_submit_commit_record(journal, commit_transaction,
876 : : &cbh, crc32_sum);
877 [ - + ]: 201 : if (err)
878 : 0 : jbd2_journal_abort(journal, err);
879 : : }
880 [ + - ]: 201 : if (cbh)
881 : 201 : err = journal_wait_on_commit_record(journal, cbh);
882 : 201 : stats.run.rs_blocks_logged++;
883 [ + - - + ]: 402 : if (jbd2_has_feature_async_commit(journal) &&
884 [ # # ]: 0 : journal->j_flags & JBD2_BARRIER) {
885 : 0 : blkdev_issue_flush(journal->j_dev, GFP_NOFS, NULL);
886 : : }
887 : :
888 [ - + ]: 201 : if (err)
889 : 0 : jbd2_journal_abort(journal, err);
890 : :
891 [ - + ]: 201 : WARN_ON_ONCE(
892 : : atomic_read(&commit_transaction->t_outstanding_credits) < 0);
893 : :
894 : : /*
895 : : * Now disk caches for filesystem device are flushed so we are safe to
896 : : * erase checkpointed transactions from the log by updating journal
897 : : * superblock.
898 : : */
899 [ - + ]: 201 : if (update_tail)
900 : 0 : jbd2_update_log_tail(journal, first_tid, first_block);
901 : :
902 : : /* End of a transaction! Finally, we can do checkpoint
903 : : processing: any buffers committed as a result of this
904 : : transaction can be removed from any checkpoint list it was on
905 : : before. */
906 : :
907 : 201 : jbd_debug(3, "JBD2: commit phase 6\n");
908 : :
909 [ - + ]: 201 : J_ASSERT(list_empty(&commit_transaction->t_inode_list));
910 [ - + ]: 201 : J_ASSERT(commit_transaction->t_buffers == NULL);
911 [ - + ]: 201 : J_ASSERT(commit_transaction->t_checkpoint_list == NULL);
912 [ - + ]: 201 : J_ASSERT(commit_transaction->t_shadow_list == NULL);
913 : :
914 : 201 : restart_loop:
915 : : /*
916 : : * As there are other places (journal_unmap_buffer()) adding buffers
917 : : * to this list we have to be careful and hold the j_list_lock.
918 : : */
919 : 201 : spin_lock(&journal->j_list_lock);
920 [ + + ]: 2609 : while (commit_transaction->t_forget) {
921 : 2408 : transaction_t *cp_transaction;
922 : 2408 : struct buffer_head *bh;
923 : 2408 : int try_to_free = 0;
924 : 2408 : bool drop_ref;
925 : :
926 : 2408 : jh = commit_transaction->t_forget;
927 : 2408 : spin_unlock(&journal->j_list_lock);
928 : 2408 : bh = jh2bh(jh);
929 : : /*
930 : : * Get a reference so that bh cannot be freed before we are
931 : : * done with it.
932 : : */
933 : 2408 : get_bh(bh);
934 : 2408 : spin_lock(&jh->b_state_lock);
935 [ - + ]: 2408 : J_ASSERT_JH(jh, jh->b_transaction == commit_transaction);
936 : :
937 : : /*
938 : : * If there is undo-protected committed data against
939 : : * this buffer, then we can remove it now. If it is a
940 : : * buffer needing such protection, the old frozen_data
941 : : * field now points to a committed version of the
942 : : * buffer, so rotate that field to the new committed
943 : : * data.
944 : : *
945 : : * Otherwise, we can just throw away the frozen data now.
946 : : *
947 : : * We also know that the frozen data has already fired
948 : : * its triggers if they exist, so we can clear that too.
949 : : */
950 [ - + ]: 2408 : if (jh->b_committed_data) {
951 : 0 : jbd2_free(jh->b_committed_data, bh->b_size);
952 : 0 : jh->b_committed_data = NULL;
953 [ # # ]: 0 : if (jh->b_frozen_data) {
954 : 0 : jh->b_committed_data = jh->b_frozen_data;
955 : 0 : jh->b_frozen_data = NULL;
956 : 0 : jh->b_frozen_triggers = NULL;
957 : : }
958 [ - + ]: 2408 : } else if (jh->b_frozen_data) {
959 : 0 : jbd2_free(jh->b_frozen_data, bh->b_size);
960 : 0 : jh->b_frozen_data = NULL;
961 : 0 : jh->b_frozen_triggers = NULL;
962 : : }
963 : :
964 : 2408 : spin_lock(&journal->j_list_lock);
965 : 2408 : cp_transaction = jh->b_cp_transaction;
966 [ + + ]: 2408 : if (cp_transaction) {
967 : 1278 : JBUFFER_TRACE(jh, "remove from old cp transaction");
968 : 1278 : cp_transaction->t_chp_stats.cs_dropped++;
969 : 1278 : __jbd2_journal_remove_checkpoint(jh);
970 : : }
971 : :
972 : : /* Only re-checkpoint the buffer_head if it is marked
973 : : * dirty. If the buffer was added to the BJ_Forget list
974 : : * by jbd2_journal_forget, it may no longer be dirty and
975 : : * there's no point in keeping a checkpoint record for
976 : : * it. */
977 : :
978 : : /*
979 : : * A buffer which has been freed while still being journaled
980 : : * by a previous transaction, refile the buffer to BJ_Forget of
981 : : * the running transaction. If the just committed transaction
982 : : * contains "add to orphan" operation, we can completely
983 : : * invalidate the buffer now. We are rather through in that
984 : : * since the buffer may be still accessible when blocksize <
985 : : * pagesize and it is attached to the last partial page.
986 : : */
987 [ - + - - ]: 2408 : if (buffer_freed(bh) && !jh->b_next_transaction) {
988 : 0 : struct address_space *mapping;
989 : :
990 : 0 : clear_buffer_freed(bh);
991 : 0 : clear_buffer_jbddirty(bh);
992 : :
993 : : /*
994 : : * Block device buffers need to stay mapped all the
995 : : * time, so it is enough to clear buffer_jbddirty and
996 : : * buffer_freed bits. For the file mapping buffers (i.e.
997 : : * journalled data) we need to unmap buffer and clear
998 : : * more bits. We also need to be careful about the check
999 : : * because the data page mapping can get cleared under
1000 : : * out hands, which alse need not to clear more bits
1001 : : * because the page and buffers will be freed and can
1002 : : * never be reused once we are done with them.
1003 : : */
1004 [ # # ]: 0 : mapping = READ_ONCE(bh->b_page->mapping);
1005 [ # # # # ]: 0 : if (mapping && !sb_is_blkdev_sb(mapping->host->i_sb)) {
1006 : 0 : clear_buffer_mapped(bh);
1007 : 0 : clear_buffer_new(bh);
1008 : 0 : clear_buffer_req(bh);
1009 : 0 : bh->b_bdev = NULL;
1010 : : }
1011 : : }
1012 : :
1013 [ + - ]: 2408 : if (buffer_jbddirty(bh)) {
1014 : 2408 : JBUFFER_TRACE(jh, "add to new checkpointing trans");
1015 : 2408 : __jbd2_journal_insert_checkpoint(jh, commit_transaction);
1016 [ - + ]: 2408 : if (is_journal_aborted(journal))
1017 : 0 : clear_buffer_jbddirty(bh);
1018 : : } else {
1019 [ # # ]: 0 : J_ASSERT_BH(bh, !buffer_dirty(bh));
1020 : : /*
1021 : : * The buffer on BJ_Forget list and not jbddirty means
1022 : : * it has been freed by this transaction and hence it
1023 : : * could not have been reallocated until this
1024 : : * transaction has committed. *BUT* it could be
1025 : : * reallocated once we have written all the data to
1026 : : * disk and before we process the buffer on BJ_Forget
1027 : : * list.
1028 : : */
1029 [ # # ]: 0 : if (!jh->b_next_transaction)
1030 : 0 : try_to_free = 1;
1031 : : }
1032 : 2408 : JBUFFER_TRACE(jh, "refile or unfile buffer");
1033 : 2408 : drop_ref = __jbd2_journal_refile_buffer(jh);
1034 : 2408 : spin_unlock(&jh->b_state_lock);
1035 [ + + ]: 2408 : if (drop_ref)
1036 : 2407 : jbd2_journal_put_journal_head(jh);
1037 [ - + ]: 2408 : if (try_to_free)
1038 : 0 : release_buffer_page(bh); /* Drops bh reference */
1039 : : else
1040 : 2408 : __brelse(bh);
1041 : 2408 : cond_resched_lock(&journal->j_list_lock);
1042 : : }
1043 : 201 : spin_unlock(&journal->j_list_lock);
1044 : : /*
1045 : : * This is a bit sleazy. We use j_list_lock to protect transition
1046 : : * of a transaction into T_FINISHED state and calling
1047 : : * __jbd2_journal_drop_transaction(). Otherwise we could race with
1048 : : * other checkpointing code processing the transaction...
1049 : : */
1050 : 201 : write_lock(&journal->j_state_lock);
1051 : 201 : spin_lock(&journal->j_list_lock);
1052 : : /*
1053 : : * Now recheck if some buffers did not get attached to the transaction
1054 : : * while the lock was dropped...
1055 : : */
1056 [ - + ]: 201 : if (commit_transaction->t_forget) {
1057 : 0 : spin_unlock(&journal->j_list_lock);
1058 : 0 : write_unlock(&journal->j_state_lock);
1059 : 0 : goto restart_loop;
1060 : : }
1061 : :
1062 : : /* Add the transaction to the checkpoint list
1063 : : * __journal_remove_checkpoint() can not destroy transaction
1064 : : * under us because it is not marked as T_FINISHED yet */
1065 [ + + ]: 201 : if (journal->j_checkpoint_transactions == NULL) {
1066 : 28 : journal->j_checkpoint_transactions = commit_transaction;
1067 : 28 : commit_transaction->t_cpnext = commit_transaction;
1068 : 28 : commit_transaction->t_cpprev = commit_transaction;
1069 : : } else {
1070 : 173 : commit_transaction->t_cpnext =
1071 : : journal->j_checkpoint_transactions;
1072 : 173 : commit_transaction->t_cpprev =
1073 : 173 : commit_transaction->t_cpnext->t_cpprev;
1074 : 173 : commit_transaction->t_cpnext->t_cpprev =
1075 : : commit_transaction;
1076 : 173 : commit_transaction->t_cpprev->t_cpnext =
1077 : : commit_transaction;
1078 : : }
1079 : 201 : spin_unlock(&journal->j_list_lock);
1080 : :
1081 : : /* Done with this transaction! */
1082 : :
1083 : 201 : jbd_debug(3, "JBD2: commit phase 7\n");
1084 : :
1085 [ - + ]: 201 : J_ASSERT(commit_transaction->t_state == T_COMMIT_JFLUSH);
1086 : :
1087 : 201 : commit_transaction->t_start = jiffies;
1088 [ + - ]: 201 : stats.run.rs_logging = jbd2_time_diff(stats.run.rs_logging,
1089 : : commit_transaction->t_start);
1090 : :
1091 : : /*
1092 : : * File the transaction statistics
1093 : : */
1094 : 201 : stats.ts_tid = commit_transaction->t_tid;
1095 : 402 : stats.run.rs_handle_count =
1096 : 201 : atomic_read(&commit_transaction->t_handle_count);
1097 : 201 : trace_jbd2_run_stats(journal->j_fs_dev->bd_dev,
1098 : 201 : commit_transaction->t_tid, &stats.run);
1099 : 201 : stats.ts_requested = (commit_transaction->t_requested) ? 1 : 0;
1100 : :
1101 : 201 : commit_transaction->t_state = T_COMMIT_CALLBACK;
1102 [ - + ]: 201 : J_ASSERT(commit_transaction == journal->j_committing_transaction);
1103 : 201 : journal->j_commit_sequence = commit_transaction->t_tid;
1104 : 201 : journal->j_committing_transaction = NULL;
1105 [ + + ]: 201 : commit_time = ktime_to_ns(ktime_sub(ktime_get(), start_time));
1106 : :
1107 : : /*
1108 : : * weight the commit time higher than the average time so we don't
1109 : : * react too strongly to vast changes in the commit time
1110 : : */
1111 [ + + ]: 201 : if (likely(journal->j_average_commit_time))
1112 : 173 : journal->j_average_commit_time = (commit_time +
1113 : 173 : journal->j_average_commit_time*3) / 4;
1114 : : else
1115 : 28 : journal->j_average_commit_time = commit_time;
1116 : :
1117 : 201 : write_unlock(&journal->j_state_lock);
1118 : :
1119 [ + - ]: 201 : if (journal->j_commit_callback)
1120 : 201 : journal->j_commit_callback(journal, commit_transaction);
1121 : :
1122 : 201 : trace_jbd2_end_commit(journal, commit_transaction);
1123 : : jbd_debug(1, "JBD2: commit %d complete, head %d\n",
1124 : 201 : journal->j_commit_sequence, journal->j_tail_sequence);
1125 : :
1126 : 201 : write_lock(&journal->j_state_lock);
1127 : 201 : spin_lock(&journal->j_list_lock);
1128 : 201 : commit_transaction->t_state = T_FINISHED;
1129 : : /* Check if the transaction can be dropped now that we are finished */
1130 [ - + ]: 201 : if (commit_transaction->t_checkpoint_list == NULL &&
1131 [ # # ]: 0 : commit_transaction->t_checkpoint_io_list == NULL) {
1132 : 0 : __jbd2_journal_drop_transaction(journal, commit_transaction);
1133 : 0 : jbd2_journal_free_transaction(commit_transaction);
1134 : : }
1135 : 201 : spin_unlock(&journal->j_list_lock);
1136 : 201 : write_unlock(&journal->j_state_lock);
1137 : 201 : wake_up(&journal->j_wait_done_commit);
1138 : :
1139 : : /*
1140 : : * Calculate overall stats
1141 : : */
1142 : 201 : spin_lock(&journal->j_history_lock);
1143 : 201 : journal->j_stats.ts_tid++;
1144 : 201 : journal->j_stats.ts_requested += stats.ts_requested;
1145 : 201 : journal->j_stats.run.rs_wait += stats.run.rs_wait;
1146 : 201 : journal->j_stats.run.rs_request_delay += stats.run.rs_request_delay;
1147 : 201 : journal->j_stats.run.rs_running += stats.run.rs_running;
1148 : 201 : journal->j_stats.run.rs_locked += stats.run.rs_locked;
1149 : 201 : journal->j_stats.run.rs_flushing += stats.run.rs_flushing;
1150 : 201 : journal->j_stats.run.rs_logging += stats.run.rs_logging;
1151 : 201 : journal->j_stats.run.rs_handle_count += stats.run.rs_handle_count;
1152 : 201 : journal->j_stats.run.rs_blocks += stats.run.rs_blocks;
1153 : 201 : journal->j_stats.run.rs_blocks_logged += stats.run.rs_blocks_logged;
1154 : 201 : spin_unlock(&journal->j_history_lock);
1155 : 201 : }
|