LCOV - code coverage report
Current view: top level - block - blk-core.c (source / functions) Hit Total Coverage
Test: combined.info Lines: 319 627 50.9 %
Date: 2022-03-28 15:32:58 Functions: 32 57 56.1 %
Branches: 135 443 30.5 %

           Branch data     Line data    Source code
       1                 :            : // SPDX-License-Identifier: GPL-2.0
       2                 :            : /*
       3                 :            :  * Copyright (C) 1991, 1992 Linus Torvalds
       4                 :            :  * Copyright (C) 1994,      Karl Keyte: Added support for disk statistics
       5                 :            :  * Elevator latency, (C) 2000  Andrea Arcangeli <andrea@suse.de> SuSE
       6                 :            :  * Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de>
       7                 :            :  * kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au>
       8                 :            :  *      -  July2000
       9                 :            :  * bio rewrite, highmem i/o, etc, Jens Axboe <axboe@suse.de> - may 2001
      10                 :            :  */
      11                 :            : 
      12                 :            : /*
      13                 :            :  * This handles all read/write requests to block devices
      14                 :            :  */
      15                 :            : #include <linux/kernel.h>
      16                 :            : #include <linux/module.h>
      17                 :            : #include <linux/backing-dev.h>
      18                 :            : #include <linux/bio.h>
      19                 :            : #include <linux/blkdev.h>
      20                 :            : #include <linux/blk-mq.h>
      21                 :            : #include <linux/highmem.h>
      22                 :            : #include <linux/mm.h>
      23                 :            : #include <linux/kernel_stat.h>
      24                 :            : #include <linux/string.h>
      25                 :            : #include <linux/init.h>
      26                 :            : #include <linux/completion.h>
      27                 :            : #include <linux/slab.h>
      28                 :            : #include <linux/swap.h>
      29                 :            : #include <linux/writeback.h>
      30                 :            : #include <linux/task_io_accounting_ops.h>
      31                 :            : #include <linux/fault-inject.h>
      32                 :            : #include <linux/list_sort.h>
      33                 :            : #include <linux/delay.h>
      34                 :            : #include <linux/ratelimit.h>
      35                 :            : #include <linux/pm_runtime.h>
      36                 :            : #include <linux/blk-cgroup.h>
      37                 :            : #include <linux/t10-pi.h>
      38                 :            : #include <linux/debugfs.h>
      39                 :            : #include <linux/bpf.h>
      40                 :            : #include <linux/psi.h>
      41                 :            : 
      42                 :            : #define CREATE_TRACE_POINTS
      43                 :            : #include <trace/events/block.h>
      44                 :            : 
      45                 :            : #include "blk.h"
      46                 :            : #include "blk-mq.h"
      47                 :            : #include "blk-mq-sched.h"
      48                 :            : #include "blk-pm.h"
      49                 :            : #include "blk-rq-qos.h"
      50                 :            : 
      51                 :            : #ifdef CONFIG_DEBUG_FS
      52                 :            : struct dentry *blk_debugfs_root;
      53                 :            : #endif
      54                 :            : 
      55                 :            : EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap);
      56                 :            : EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
      57                 :            : EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete);
      58                 :            : EXPORT_TRACEPOINT_SYMBOL_GPL(block_split);
      59                 :            : EXPORT_TRACEPOINT_SYMBOL_GPL(block_unplug);
      60                 :            : 
      61                 :            : DEFINE_IDA(blk_queue_ida);
      62                 :            : 
      63                 :            : /*
      64                 :            :  * For queue allocation
      65                 :            :  */
      66                 :            : struct kmem_cache *blk_requestq_cachep;
      67                 :            : 
      68                 :            : /*
      69                 :            :  * Controlling structure to kblockd
      70                 :            :  */
      71                 :            : static struct workqueue_struct *kblockd_workqueue;
      72                 :            : 
      73                 :            : /**
      74                 :            :  * blk_queue_flag_set - atomically set a queue flag
      75                 :            :  * @flag: flag to be set
      76                 :            :  * @q: request queue
      77                 :            :  */
      78                 :       2016 : void blk_queue_flag_set(unsigned int flag, struct request_queue *q)
      79                 :            : {
      80                 :       2016 :         set_bit(flag, &q->queue_flags);
      81                 :       2016 : }
      82                 :            : EXPORT_SYMBOL(blk_queue_flag_set);
      83                 :            : 
      84                 :            : /**
      85                 :            :  * blk_queue_flag_clear - atomically clear a queue flag
      86                 :            :  * @flag: flag to be cleared
      87                 :            :  * @q: request queue
      88                 :            :  */
      89                 :        644 : void blk_queue_flag_clear(unsigned int flag, struct request_queue *q)
      90                 :            : {
      91                 :        644 :         clear_bit(flag, &q->queue_flags);
      92                 :        644 : }
      93                 :            : EXPORT_SYMBOL(blk_queue_flag_clear);
      94                 :            : 
      95                 :            : /**
      96                 :            :  * blk_queue_flag_test_and_set - atomically test and set a queue flag
      97                 :            :  * @flag: flag to be set
      98                 :            :  * @q: request queue
      99                 :            :  *
     100                 :            :  * Returns the previous value of @flag - 0 if the flag was not set and 1 if
     101                 :            :  * the flag was already set.
     102                 :            :  */
     103                 :          0 : bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q)
     104                 :            : {
     105                 :          0 :         return test_and_set_bit(flag, &q->queue_flags);
     106                 :            : }
     107                 :            : EXPORT_SYMBOL_GPL(blk_queue_flag_test_and_set);
     108                 :            : 
     109                 :        458 : void blk_rq_init(struct request_queue *q, struct request *rq)
     110                 :            : {
     111                 :        458 :         memset(rq, 0, sizeof(*rq));
     112                 :            : 
     113                 :        458 :         INIT_LIST_HEAD(&rq->queuelist);
     114                 :        458 :         rq->q = q;
     115                 :        458 :         rq->__sector = (sector_t) -1;
     116                 :        458 :         INIT_HLIST_NODE(&rq->hash);
     117                 :        458 :         RB_CLEAR_NODE(&rq->rb_node);
     118                 :        458 :         rq->tag = -1;
     119                 :        458 :         rq->internal_tag = -1;
     120                 :        458 :         rq->start_time_ns = ktime_get_ns();
     121                 :        458 :         rq->part = NULL;
     122                 :        458 :         refcount_set(&rq->ref, 1);
     123                 :        458 : }
     124                 :            : EXPORT_SYMBOL(blk_rq_init);
     125                 :            : 
     126                 :            : #define REQ_OP_NAME(name) [REQ_OP_##name] = #name
     127                 :            : static const char *const blk_op_name[] = {
     128                 :            :         REQ_OP_NAME(READ),
     129                 :            :         REQ_OP_NAME(WRITE),
     130                 :            :         REQ_OP_NAME(FLUSH),
     131                 :            :         REQ_OP_NAME(DISCARD),
     132                 :            :         REQ_OP_NAME(SECURE_ERASE),
     133                 :            :         REQ_OP_NAME(ZONE_RESET),
     134                 :            :         REQ_OP_NAME(ZONE_RESET_ALL),
     135                 :            :         REQ_OP_NAME(ZONE_OPEN),
     136                 :            :         REQ_OP_NAME(ZONE_CLOSE),
     137                 :            :         REQ_OP_NAME(ZONE_FINISH),
     138                 :            :         REQ_OP_NAME(WRITE_SAME),
     139                 :            :         REQ_OP_NAME(WRITE_ZEROES),
     140                 :            :         REQ_OP_NAME(SCSI_IN),
     141                 :            :         REQ_OP_NAME(SCSI_OUT),
     142                 :            :         REQ_OP_NAME(DRV_IN),
     143                 :            :         REQ_OP_NAME(DRV_OUT),
     144                 :            : };
     145                 :            : #undef REQ_OP_NAME
     146                 :            : 
     147                 :            : /**
     148                 :            :  * blk_op_str - Return string XXX in the REQ_OP_XXX.
     149                 :            :  * @op: REQ_OP_XXX.
     150                 :            :  *
     151                 :            :  * Description: Centralize block layer function to convert REQ_OP_XXX into
     152                 :            :  * string format. Useful in the debugging and tracing bio or request. For
     153                 :            :  * invalid REQ_OP_XXX it returns string "UNKNOWN".
     154                 :            :  */
     155                 :          0 : inline const char *blk_op_str(unsigned int op)
     156                 :            : {
     157                 :          0 :         const char *op_str = "UNKNOWN";
     158                 :            : 
     159   [ #  #  #  #  :          0 :         if (op < ARRAY_SIZE(blk_op_name) && blk_op_name[op])
                   #  # ]
     160                 :          0 :                 op_str = blk_op_name[op];
     161                 :            : 
     162                 :          0 :         return op_str;
     163                 :            : }
     164                 :            : EXPORT_SYMBOL_GPL(blk_op_str);
     165                 :            : 
     166                 :            : static const struct {
     167                 :            :         int             errno;
     168                 :            :         const char      *name;
     169                 :            : } blk_errors[] = {
     170                 :            :         [BLK_STS_OK]            = { 0,          "" },
     171                 :            :         [BLK_STS_NOTSUPP]       = { -EOPNOTSUPP, "operation not supported" },
     172                 :            :         [BLK_STS_TIMEOUT]       = { -ETIMEDOUT, "timeout" },
     173                 :            :         [BLK_STS_NOSPC]         = { -ENOSPC,    "critical space allocation" },
     174                 :            :         [BLK_STS_TRANSPORT]     = { -ENOLINK,   "recoverable transport" },
     175                 :            :         [BLK_STS_TARGET]        = { -EREMOTEIO, "critical target" },
     176                 :            :         [BLK_STS_NEXUS]         = { -EBADE,     "critical nexus" },
     177                 :            :         [BLK_STS_MEDIUM]        = { -ENODATA,   "critical medium" },
     178                 :            :         [BLK_STS_PROTECTION]    = { -EILSEQ,    "protection" },
     179                 :            :         [BLK_STS_RESOURCE]      = { -ENOMEM,    "kernel resource" },
     180                 :            :         [BLK_STS_DEV_RESOURCE]  = { -EBUSY,     "device resource" },
     181                 :            :         [BLK_STS_AGAIN]         = { -EAGAIN,    "nonblocking retry" },
     182                 :            : 
     183                 :            :         /* device mapper special case, should not leak out: */
     184                 :            :         [BLK_STS_DM_REQUEUE]    = { -EREMCHG, "dm internal retry" },
     185                 :            : 
     186                 :            :         /* everything else not covered above: */
     187                 :            :         [BLK_STS_IOERR]         = { -EIO,       "I/O" },
     188                 :            : };
     189                 :            : 
     190                 :          0 : blk_status_t errno_to_blk_status(int errno)
     191                 :            : {
     192                 :          0 :         int i;
     193                 :            : 
     194         [ #  # ]:          0 :         for (i = 0; i < ARRAY_SIZE(blk_errors); i++) {
     195         [ #  # ]:          0 :                 if (blk_errors[i].errno == errno)
     196                 :          0 :                         return (__force blk_status_t)i;
     197                 :            :         }
     198                 :            : 
     199                 :            :         return BLK_STS_IOERR;
     200                 :            : }
     201                 :            : EXPORT_SYMBOL_GPL(errno_to_blk_status);
     202                 :            : 
     203                 :      54140 : int blk_status_to_errno(blk_status_t status)
     204                 :            : {
     205                 :      54140 :         int idx = (__force int)status;
     206                 :            : 
     207   [ +  -  -  +  :      54140 :         if (WARN_ON_ONCE(idx >= ARRAY_SIZE(blk_errors)))
                   +  - ]
     208                 :            :                 return -EIO;
     209                 :      54140 :         return blk_errors[idx].errno;
     210                 :            : }
     211                 :            : EXPORT_SYMBOL_GPL(blk_status_to_errno);
     212                 :            : 
     213                 :          0 : static void print_req_error(struct request *req, blk_status_t status,
     214                 :            :                 const char *caller)
     215                 :            : {
     216                 :          0 :         int idx = (__force int)status;
     217                 :            : 
     218   [ #  #  #  # ]:          0 :         if (WARN_ON_ONCE(idx >= ARRAY_SIZE(blk_errors)))
     219                 :            :                 return;
     220                 :            : 
     221   [ #  #  #  #  :          0 :         printk_ratelimited(KERN_ERR
                   #  # ]
     222                 :            :                 "%s: %s error, dev %s, sector %llu op 0x%x:(%s) flags 0x%x "
     223                 :            :                 "phys_seg %u prio class %u\n",
     224                 :            :                 caller, blk_errors[idx].name,
     225                 :            :                 req->rq_disk ? req->rq_disk->disk_name : "?",
     226                 :            :                 blk_rq_pos(req), req_op(req), blk_op_str(req_op(req)),
     227                 :            :                 req->cmd_flags & ~REQ_OP_MASK,
     228                 :            :                 req->nr_phys_segments,
     229                 :            :                 IOPRIO_PRIO_CLASS(req->ioprio));
     230                 :            : }
     231                 :            : 
     232                 :      46916 : static void req_bio_endio(struct request *rq, struct bio *bio,
     233                 :            :                           unsigned int nbytes, blk_status_t error)
     234                 :            : {
     235         [ -  + ]:      46916 :         if (error)
     236                 :          0 :                 bio->bi_status = error;
     237                 :            : 
     238         [ +  + ]:      46916 :         if (unlikely(rq->rq_flags & RQF_QUIET))
     239                 :       3052 :                 bio_set_flag(bio, BIO_QUIET);
     240                 :            : 
     241                 :      46916 :         bio_advance(bio, nbytes);
     242                 :            : 
     243                 :            :         /* don't actually finish bio if it's part of flush sequence */
     244   [ +  -  +  + ]:      46916 :         if (bio->bi_iter.bi_size == 0 && !(rq->rq_flags & RQF_FLUSH_SEQ))
     245                 :      46659 :                 bio_endio(bio);
     246                 :      46916 : }
     247                 :            : 
     248                 :          0 : void blk_dump_rq_flags(struct request *rq, char *msg)
     249                 :            : {
     250                 :          0 :         printk(KERN_INFO "%s: dev %s: flags=%llx\n", msg,
     251                 :          0 :                 rq->rq_disk ? rq->rq_disk->disk_name : "?",
     252         [ #  # ]:          0 :                 (unsigned long long) rq->cmd_flags);
     253                 :            : 
     254         [ #  # ]:          0 :         printk(KERN_INFO "  sector %llu, nr/cnr %u/%u\n",
     255                 :          0 :                (unsigned long long)blk_rq_pos(rq),
     256                 :            :                blk_rq_sectors(rq), blk_rq_cur_sectors(rq));
     257                 :          0 :         printk(KERN_INFO "  bio %p, biotail %p, len %u\n",
     258                 :            :                rq->bio, rq->biotail, blk_rq_bytes(rq));
     259                 :          0 : }
     260                 :            : EXPORT_SYMBOL(blk_dump_rq_flags);
     261                 :            : 
     262                 :            : /**
     263                 :            :  * blk_sync_queue - cancel any pending callbacks on a queue
     264                 :            :  * @q: the queue
     265                 :            :  *
     266                 :            :  * Description:
     267                 :            :  *     The block layer may perform asynchronous callback activity
     268                 :            :  *     on a queue, such as calling the unplug function after a timeout.
     269                 :            :  *     A block device may call blk_sync_queue to ensure that any
     270                 :            :  *     such activity is cancelled, thus allowing it to release resources
     271                 :            :  *     that the callbacks might use. The caller must already have made sure
     272                 :            :  *     that its ->make_request_fn will not re-add plugging prior to calling
     273                 :            :  *     this function.
     274                 :            :  *
     275                 :            :  *     This function does not cancel any asynchronous activity arising
     276                 :            :  *     out of elevator or throttling code. That would require elevator_exit()
     277                 :            :  *     and blkcg_exit_queue() to be called with queue lock initialized.
     278                 :            :  *
     279                 :            :  */
     280                 :          0 : void blk_sync_queue(struct request_queue *q)
     281                 :            : {
     282                 :          0 :         del_timer_sync(&q->timeout);
     283                 :          0 :         cancel_work_sync(&q->timeout_work);
     284                 :          0 : }
     285                 :            : EXPORT_SYMBOL(blk_sync_queue);
     286                 :            : 
     287                 :            : /**
     288                 :            :  * blk_set_pm_only - increment pm_only counter
     289                 :            :  * @q: request queue pointer
     290                 :            :  */
     291                 :          0 : void blk_set_pm_only(struct request_queue *q)
     292                 :            : {
     293                 :          0 :         atomic_inc(&q->pm_only);
     294                 :          0 : }
     295                 :            : EXPORT_SYMBOL_GPL(blk_set_pm_only);
     296                 :            : 
     297                 :          0 : void blk_clear_pm_only(struct request_queue *q)
     298                 :            : {
     299                 :          0 :         int pm_only;
     300                 :            : 
     301                 :          0 :         pm_only = atomic_dec_return(&q->pm_only);
     302         [ #  # ]:          0 :         WARN_ON_ONCE(pm_only < 0);
     303         [ #  # ]:          0 :         if (pm_only == 0)
     304                 :          0 :                 wake_up_all(&q->mq_freeze_wq);
     305                 :          0 : }
     306                 :            : EXPORT_SYMBOL_GPL(blk_clear_pm_only);
     307                 :            : 
     308                 :          0 : void blk_put_queue(struct request_queue *q)
     309                 :            : {
     310                 :          0 :         kobject_put(&q->kobj);
     311                 :          0 : }
     312                 :            : EXPORT_SYMBOL(blk_put_queue);
     313                 :            : 
     314                 :          0 : void blk_set_queue_dying(struct request_queue *q)
     315                 :            : {
     316                 :          0 :         blk_queue_flag_set(QUEUE_FLAG_DYING, q);
     317                 :            : 
     318                 :            :         /*
     319                 :            :          * When queue DYING flag is set, we need to block new req
     320                 :            :          * entering queue, so we call blk_freeze_queue_start() to
     321                 :            :          * prevent I/O from crossing blk_queue_enter().
     322                 :            :          */
     323                 :          0 :         blk_freeze_queue_start(q);
     324                 :            : 
     325         [ #  # ]:          0 :         if (queue_is_mq(q))
     326                 :          0 :                 blk_mq_wake_waiters(q);
     327                 :            : 
     328                 :            :         /* Make blk_queue_enter() reexamine the DYING flag. */
     329                 :          0 :         wake_up_all(&q->mq_freeze_wq);
     330                 :          0 : }
     331                 :            : EXPORT_SYMBOL_GPL(blk_set_queue_dying);
     332                 :            : 
     333                 :            : /**
     334                 :            :  * blk_cleanup_queue - shutdown a request queue
     335                 :            :  * @q: request queue to shutdown
     336                 :            :  *
     337                 :            :  * Mark @q DYING, drain all pending requests, mark @q DEAD, destroy and
     338                 :            :  * put it.  All future requests will be failed immediately with -ENODEV.
     339                 :            :  */
     340                 :          0 : void blk_cleanup_queue(struct request_queue *q)
     341                 :            : {
     342         [ #  # ]:          0 :         WARN_ON_ONCE(blk_queue_registered(q));
     343                 :            : 
     344                 :            :         /* mark @q DYING, no new request or merges will be allowed afterwards */
     345                 :          0 :         blk_set_queue_dying(q);
     346                 :            : 
     347                 :          0 :         blk_queue_flag_set(QUEUE_FLAG_NOMERGES, q);
     348                 :          0 :         blk_queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
     349                 :          0 :         blk_queue_flag_set(QUEUE_FLAG_DYING, q);
     350                 :            : 
     351                 :            :         /*
     352                 :            :          * Drain all requests queued before DYING marking. Set DEAD flag to
     353                 :            :          * prevent that blk_mq_run_hw_queues() accesses the hardware queues
     354                 :            :          * after draining finished.
     355                 :            :          */
     356                 :          0 :         blk_freeze_queue(q);
     357                 :            : 
     358                 :          0 :         rq_qos_exit(q);
     359                 :            : 
     360                 :          0 :         blk_queue_flag_set(QUEUE_FLAG_DEAD, q);
     361                 :            : 
     362                 :            :         /* for synchronous bio-based driver finish in-flight integrity i/o */
     363                 :          0 :         blk_flush_integrity();
     364                 :            : 
     365                 :            :         /* @q won't process any more request, flush async actions */
     366                 :          0 :         del_timer_sync(&q->backing_dev_info->laptop_mode_wb_timer);
     367                 :          0 :         blk_sync_queue(q);
     368                 :            : 
     369         [ #  # ]:          0 :         if (queue_is_mq(q))
     370                 :          0 :                 blk_mq_exit_queue(q);
     371                 :            : 
     372                 :            :         /*
     373                 :            :          * In theory, request pool of sched_tags belongs to request queue.
     374                 :            :          * However, the current implementation requires tag_set for freeing
     375                 :            :          * requests, so free the pool now.
     376                 :            :          *
     377                 :            :          * Queue has become frozen, there can't be any in-queue requests, so
     378                 :            :          * it is safe to free requests now.
     379                 :            :          */
     380                 :          0 :         mutex_lock(&q->sysfs_lock);
     381         [ #  # ]:          0 :         if (q->elevator)
     382                 :          0 :                 blk_mq_sched_free_requests(q);
     383                 :          0 :         mutex_unlock(&q->sysfs_lock);
     384                 :            : 
     385                 :          0 :         percpu_ref_exit(&q->q_usage_counter);
     386                 :            : 
     387                 :            :         /* @q is and will stay empty, shutdown and put */
     388                 :          0 :         blk_put_queue(q);
     389                 :          0 : }
     390                 :            : EXPORT_SYMBOL(blk_cleanup_queue);
     391                 :            : 
     392                 :         28 : struct request_queue *blk_alloc_queue(gfp_t gfp_mask)
     393                 :            : {
     394                 :         28 :         return blk_alloc_queue_node(gfp_mask, NUMA_NO_NODE);
     395                 :            : }
     396                 :            : EXPORT_SYMBOL(blk_alloc_queue);
     397                 :            : 
     398                 :            : /**
     399                 :            :  * blk_queue_enter() - try to increase q->q_usage_counter
     400                 :            :  * @q: request queue pointer
     401                 :            :  * @flags: BLK_MQ_REQ_NOWAIT and/or BLK_MQ_REQ_PREEMPT
     402                 :            :  */
     403                 :      47135 : int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
     404                 :            : {
     405                 :      47135 :         const bool pm = flags & BLK_MQ_REQ_PREEMPT;
     406                 :            : 
     407                 :      47163 :         while (true) {
     408                 :      47163 :                 bool success = false;
     409                 :            : 
     410                 :      47163 :                 rcu_read_lock();
     411         [ +  + ]:      47163 :                 if (percpu_ref_tryget_live(&q->q_usage_counter)) {
     412                 :            :                         /*
     413                 :            :                          * The code that increments the pm_only counter is
     414                 :            :                          * responsible for ensuring that that counter is
     415                 :            :                          * globally visible before the queue is unfrozen.
     416                 :            :                          */
     417   [ +  +  +  - ]:      47135 :                         if (pm || !blk_queue_pm_only(q)) {
     418                 :      47135 :                                 success = true;
     419                 :            :                         } else {
     420                 :          0 :                                 percpu_ref_put(&q->q_usage_counter);
     421                 :            :                         }
     422                 :            :                 }
     423                 :      47163 :                 rcu_read_unlock();
     424                 :            : 
     425                 :         28 :                 if (success)
     426                 :      47135 :                         return 0;
     427                 :            : 
     428         [ +  - ]:         28 :                 if (flags & BLK_MQ_REQ_NOWAIT)
     429                 :            :                         return -EBUSY;
     430                 :            : 
     431                 :            :                 /*
     432                 :            :                  * read pair of barrier in blk_freeze_queue_start(),
     433                 :            :                  * we need to order reading __PERCPU_REF_DEAD flag of
     434                 :            :                  * .q_usage_counter and reading .mq_freeze_depth or
     435                 :            :                  * queue dying flag, otherwise the following wait may
     436                 :            :                  * never return if the two reads are reordered.
     437                 :            :                  */
     438                 :         28 :                 smp_rmb();
     439                 :            : 
     440   [ -  +  -  -  :         56 :                 wait_event(q->mq_freeze_wq,
          -  -  -  -  +  
          -  +  +  -  +  
          -  -  -  -  +  
                      - ]
     441                 :            :                            (!q->mq_freeze_depth &&
     442                 :            :                             (pm || (blk_pm_request_resume(q),
     443                 :            :                                     !blk_queue_pm_only(q)))) ||
     444                 :            :                            blk_queue_dying(q));
     445         [ +  - ]:         28 :                 if (blk_queue_dying(q))
     446                 :            :                         return -ENODEV;
     447                 :            :         }
     448                 :            : }
     449                 :            : 
     450                 :      79616 : void blk_queue_exit(struct request_queue *q)
     451                 :            : {
     452                 :      36261 :         percpu_ref_put(&q->q_usage_counter);
     453                 :      36261 : }
     454                 :            : 
     455                 :        336 : static void blk_queue_usage_counter_release(struct percpu_ref *ref)
     456                 :            : {
     457                 :        336 :         struct request_queue *q =
     458                 :        336 :                 container_of(ref, struct request_queue, q_usage_counter);
     459                 :            : 
     460                 :        336 :         wake_up_all(&q->mq_freeze_wq);
     461                 :        336 : }
     462                 :            : 
     463                 :          0 : static void blk_rq_timed_out_timer(struct timer_list *t)
     464                 :            : {
     465                 :          0 :         struct request_queue *q = from_timer(q, t, timeout);
     466                 :            : 
     467                 :          0 :         kblockd_schedule_work(&q->timeout_work);
     468                 :          0 : }
     469                 :            : 
     470                 :          0 : static void blk_timeout_work(struct work_struct *work)
     471                 :            : {
     472                 :          0 : }
     473                 :            : 
     474                 :            : /**
     475                 :            :  * blk_alloc_queue_node - allocate a request queue
     476                 :            :  * @gfp_mask: memory allocation flags
     477                 :            :  * @node_id: NUMA node to allocate memory from
     478                 :            :  */
     479                 :        336 : struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
     480                 :            : {
     481                 :        336 :         struct request_queue *q;
     482                 :        336 :         int ret;
     483                 :            : 
     484                 :        336 :         q = kmem_cache_alloc_node(blk_requestq_cachep,
     485                 :            :                                 gfp_mask | __GFP_ZERO, node_id);
     486         [ +  - ]:        336 :         if (!q)
     487                 :            :                 return NULL;
     488                 :            : 
     489                 :        336 :         q->last_merge = NULL;
     490                 :            : 
     491                 :        336 :         q->id = ida_simple_get(&blk_queue_ida, 0, 0, gfp_mask);
     492         [ -  + ]:        336 :         if (q->id < 0)
     493                 :          0 :                 goto fail_q;
     494                 :            : 
     495                 :        336 :         ret = bioset_init(&q->bio_split, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
     496         [ -  + ]:        336 :         if (ret)
     497                 :          0 :                 goto fail_id;
     498                 :            : 
     499                 :        336 :         q->backing_dev_info = bdi_alloc_node(gfp_mask, node_id);
     500         [ -  + ]:        336 :         if (!q->backing_dev_info)
     501                 :          0 :                 goto fail_split;
     502                 :            : 
     503                 :        336 :         q->stats = blk_alloc_queue_stats();
     504         [ -  + ]:        336 :         if (!q->stats)
     505                 :          0 :                 goto fail_stats;
     506                 :            : 
     507                 :        336 :         q->backing_dev_info->ra_pages = VM_READAHEAD_PAGES;
     508                 :        336 :         q->backing_dev_info->capabilities = BDI_CAP_CGROUP_WRITEBACK;
     509                 :        336 :         q->backing_dev_info->name = "block";
     510                 :        336 :         q->node = node_id;
     511                 :            : 
     512                 :        336 :         timer_setup(&q->backing_dev_info->laptop_mode_wb_timer,
     513                 :            :                     laptop_mode_timer_fn, 0);
     514                 :        336 :         timer_setup(&q->timeout, blk_rq_timed_out_timer, 0);
     515                 :        336 :         INIT_WORK(&q->timeout_work, blk_timeout_work);
     516                 :        336 :         INIT_LIST_HEAD(&q->icq_list);
     517                 :            : #ifdef CONFIG_BLK_CGROUP
     518                 :            :         INIT_LIST_HEAD(&q->blkg_list);
     519                 :            : #endif
     520                 :            : 
     521                 :        336 :         kobject_init(&q->kobj, &blk_queue_ktype);
     522                 :            : 
     523                 :            : #ifdef CONFIG_BLK_DEV_IO_TRACE
     524                 :        336 :         mutex_init(&q->blk_trace_mutex);
     525                 :            : #endif
     526                 :        336 :         mutex_init(&q->sysfs_lock);
     527                 :        336 :         mutex_init(&q->sysfs_dir_lock);
     528                 :        336 :         spin_lock_init(&q->queue_lock);
     529                 :            : 
     530                 :        336 :         init_waitqueue_head(&q->mq_freeze_wq);
     531                 :        336 :         mutex_init(&q->mq_freeze_lock);
     532                 :            : 
     533                 :            :         /*
     534                 :            :          * Init percpu_ref in atomic mode so that it's faster to shutdown.
     535                 :            :          * See blk_register_queue() for details.
     536                 :            :          */
     537         [ -  + ]:        336 :         if (percpu_ref_init(&q->q_usage_counter,
     538                 :            :                                 blk_queue_usage_counter_release,
     539                 :            :                                 PERCPU_REF_INIT_ATOMIC, GFP_KERNEL))
     540                 :          0 :                 goto fail_bdi;
     541                 :            : 
     542                 :            :         if (blkcg_init_queue(q))
     543                 :            :                 goto fail_ref;
     544                 :            : 
     545                 :            :         return q;
     546                 :            : 
     547                 :            : fail_ref:
     548                 :            :         percpu_ref_exit(&q->q_usage_counter);
     549                 :            : fail_bdi:
     550                 :          0 :         blk_free_queue_stats(q->stats);
     551                 :          0 : fail_stats:
     552                 :          0 :         bdi_put(q->backing_dev_info);
     553                 :          0 : fail_split:
     554                 :          0 :         bioset_exit(&q->bio_split);
     555                 :          0 : fail_id:
     556                 :          0 :         ida_simple_remove(&blk_queue_ida, q->id);
     557                 :          0 : fail_q:
     558                 :          0 :         kmem_cache_free(blk_requestq_cachep, q);
     559                 :          0 :         return NULL;
     560                 :            : }
     561                 :            : EXPORT_SYMBOL(blk_alloc_queue_node);
     562                 :            : 
     563                 :        420 : bool blk_get_queue(struct request_queue *q)
     564                 :            : {
     565         [ +  - ]:        420 :         if (likely(!blk_queue_dying(q))) {
     566                 :        420 :                 __blk_get_queue(q);
     567                 :        420 :                 return true;
     568                 :            :         }
     569                 :            : 
     570                 :            :         return false;
     571                 :            : }
     572                 :            : EXPORT_SYMBOL(blk_get_queue);
     573                 :            : 
     574                 :            : /**
     575                 :            :  * blk_get_request - allocate a request
     576                 :            :  * @q: request queue to allocate a request for
     577                 :            :  * @op: operation (REQ_OP_*) and REQ_* flags, e.g. REQ_SYNC.
     578                 :            :  * @flags: BLK_MQ_REQ_* flags, e.g. BLK_MQ_REQ_NOWAIT.
     579                 :            :  */
     580                 :       3780 : struct request *blk_get_request(struct request_queue *q, unsigned int op,
     581                 :            :                                 blk_mq_req_flags_t flags)
     582                 :            : {
     583                 :       3780 :         struct request *req;
     584                 :            : 
     585         [ -  + ]:       3780 :         WARN_ON_ONCE(op & REQ_NOWAIT);
     586         [ -  + ]:       3780 :         WARN_ON_ONCE(flags & ~(BLK_MQ_REQ_NOWAIT | BLK_MQ_REQ_PREEMPT));
     587                 :            : 
     588                 :       3780 :         req = blk_mq_alloc_request(q, op, flags);
     589   [ +  -  +  - ]:       3780 :         if (!IS_ERR(req) && q->mq_ops->initialize_rq_fn)
     590                 :       3780 :                 q->mq_ops->initialize_rq_fn(req);
     591                 :            : 
     592                 :       3780 :         return req;
     593                 :            : }
     594                 :            : EXPORT_SYMBOL(blk_get_request);
     595                 :            : 
     596                 :       4116 : void blk_put_request(struct request *req)
     597                 :            : {
     598                 :       4116 :         blk_mq_free_request(req);
     599                 :       4116 : }
     600                 :            : EXPORT_SYMBOL(blk_put_request);
     601                 :            : 
     602                 :      14794 : bool bio_attempt_back_merge(struct request *req, struct bio *bio,
     603                 :            :                 unsigned int nr_segs)
     604                 :            : {
     605                 :      14794 :         const int ff = bio->bi_opf & REQ_FAILFAST_MASK;
     606                 :            : 
     607         [ +  - ]:      14794 :         if (!ll_back_merge_fn(req, bio, nr_segs))
     608                 :            :                 return false;
     609                 :            : 
     610                 :      14794 :         trace_block_bio_backmerge(req->q, req, bio);
     611         [ -  + ]:      14794 :         rq_qos_merge(req->q, req, bio);
     612                 :            : 
     613         [ +  + ]:      14794 :         if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
     614                 :      12386 :                 blk_rq_set_mixed_merge(req);
     615                 :            : 
     616                 :      14794 :         req->biotail->bi_next = bio;
     617                 :      14794 :         req->biotail = bio;
     618                 :      14794 :         req->__data_len += bio->bi_iter.bi_size;
     619                 :            : 
     620                 :      14794 :         blk_account_io_start(req, false);
     621                 :      14794 :         return true;
     622                 :            : }
     623                 :            : 
     624                 :        420 : bool bio_attempt_front_merge(struct request *req, struct bio *bio,
     625                 :            :                 unsigned int nr_segs)
     626                 :            : {
     627                 :        420 :         const int ff = bio->bi_opf & REQ_FAILFAST_MASK;
     628                 :            : 
     629         [ +  - ]:        420 :         if (!ll_front_merge_fn(req, bio, nr_segs))
     630                 :            :                 return false;
     631                 :            : 
     632                 :        420 :         trace_block_bio_frontmerge(req->q, req, bio);
     633         [ -  + ]:        420 :         rq_qos_merge(req->q, req, bio);
     634                 :            : 
     635         [ +  - ]:        420 :         if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
     636                 :        420 :                 blk_rq_set_mixed_merge(req);
     637                 :            : 
     638                 :        420 :         bio->bi_next = req->bio;
     639                 :        420 :         req->bio = bio;
     640                 :            : 
     641                 :        420 :         req->__sector = bio->bi_iter.bi_sector;
     642                 :        420 :         req->__data_len += bio->bi_iter.bi_size;
     643                 :            : 
     644                 :        420 :         blk_account_io_start(req, false);
     645                 :        420 :         return true;
     646                 :            : }
     647                 :            : 
     648                 :          0 : bool bio_attempt_discard_merge(struct request_queue *q, struct request *req,
     649                 :            :                 struct bio *bio)
     650                 :            : {
     651         [ #  # ]:          0 :         unsigned short segments = blk_rq_nr_discard_segments(req);
     652                 :            : 
     653         [ #  # ]:          0 :         if (segments >= queue_max_discard_segments(q))
     654                 :          0 :                 goto no_merge;
     655         [ #  # ]:          0 :         if (blk_rq_sectors(req) + bio_sectors(bio) >
     656         [ #  # ]:          0 :             blk_rq_get_max_sectors(req, blk_rq_pos(req)))
     657                 :          0 :                 goto no_merge;
     658                 :            : 
     659         [ #  # ]:          0 :         rq_qos_merge(q, req, bio);
     660                 :            : 
     661                 :          0 :         req->biotail->bi_next = bio;
     662                 :          0 :         req->biotail = bio;
     663                 :          0 :         req->__data_len += bio->bi_iter.bi_size;
     664                 :          0 :         req->nr_phys_segments = segments + 1;
     665                 :            : 
     666                 :          0 :         blk_account_io_start(req, false);
     667                 :          0 :         return true;
     668                 :          0 : no_merge:
     669         [ #  # ]:          0 :         req_set_nomerge(q, req);
     670                 :            :         return false;
     671                 :            : }
     672                 :            : 
     673                 :            : /**
     674                 :            :  * blk_attempt_plug_merge - try to merge with %current's plugged list
     675                 :            :  * @q: request_queue new bio is being queued at
     676                 :            :  * @bio: new bio being queued
     677                 :            :  * @nr_segs: number of segments in @bio
     678                 :            :  * @same_queue_rq: pointer to &struct request that gets filled in when
     679                 :            :  * another request associated with @q is found on the plug list
     680                 :            :  * (optional, may be %NULL)
     681                 :            :  *
     682                 :            :  * Determine whether @bio being queued on @q can be merged with a request
     683                 :            :  * on %current's plugged list.  Returns %true if merge was successful,
     684                 :            :  * otherwise %false.
     685                 :            :  *
     686                 :            :  * Plugging coalesces IOs from the same issuer for the same purpose without
     687                 :            :  * going through @q->queue_lock.  As such it's more of an issuing mechanism
     688                 :            :  * than scheduling, and the request, while may have elvpriv data, is not
     689                 :            :  * added on the elevator at this point.  In addition, we don't have
     690                 :            :  * reliable access to the elevator outside queue lock.  Only check basic
     691                 :            :  * merging parameters without querying the elevator.
     692                 :            :  *
     693                 :            :  * Caller must ensure !blk_queue_nomerges(q) beforehand.
     694                 :            :  */
     695                 :      43098 : bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
     696                 :            :                 unsigned int nr_segs, struct request **same_queue_rq)
     697                 :            : {
     698                 :      43098 :         struct blk_plug *plug;
     699                 :      43098 :         struct request *rq;
     700                 :      43098 :         struct list_head *plug_list;
     701                 :            : 
     702         [ -  + ]:      43098 :         plug = blk_mq_plug(q, bio);
     703         [ +  + ]:      43098 :         if (!plug)
     704                 :            :                 return false;
     705                 :            : 
     706                 :      39598 :         plug_list = &plug->mq_list;
     707                 :            : 
     708         [ +  + ]:      40214 :         list_for_each_entry_reverse(rq, plug_list, queuelist) {
     709                 :      15820 :                 bool merged = false;
     710                 :            : 
     711   [ +  -  +  - ]:      15820 :                 if (rq->q == q && same_queue_rq) {
     712                 :            :                         /*
     713                 :            :                          * Only blk-mq multiple hardware queues case checks the
     714                 :            :                          * rq in the same queue, there should be only one such
     715                 :            :                          * rq in a queue
     716                 :            :                          **/
     717                 :      15820 :                         *same_queue_rq = rq;
     718                 :            :                 }
     719                 :            : 
     720   [ +  -  -  + ]:      15820 :                 if (rq->q != q || !blk_rq_merge_ok(rq, bio))
     721                 :          0 :                         continue;
     722                 :            : 
     723   [ +  +  -  + ]:      15820 :                 switch (blk_try_merge(rq, bio)) {
     724                 :      14784 :                 case ELEVATOR_BACK_MERGE:
     725                 :      14784 :                         merged = bio_attempt_back_merge(rq, bio, nr_segs);
     726                 :      14784 :                         break;
     727                 :        420 :                 case ELEVATOR_FRONT_MERGE:
     728                 :        420 :                         merged = bio_attempt_front_merge(rq, bio, nr_segs);
     729                 :        420 :                         break;
     730                 :          0 :                 case ELEVATOR_DISCARD_MERGE:
     731                 :          0 :                         merged = bio_attempt_discard_merge(q, rq, bio);
     732                 :          0 :                         break;
     733                 :            :                 default:
     734                 :            :                         break;
     735                 :            :                 }
     736                 :            : 
     737         [ -  + ]:      15204 :                 if (merged)
     738                 :            :                         return true;
     739                 :            :         }
     740                 :            : 
     741                 :            :         return false;
     742                 :            : }
     743                 :            : 
     744                 :          0 : static void handle_bad_sector(struct bio *bio, sector_t maxsector)
     745                 :            : {
     746                 :          0 :         char b[BDEVNAME_SIZE];
     747                 :            : 
     748                 :          0 :         printk(KERN_INFO "attempt to access beyond end of device\n");
     749                 :          0 :         printk(KERN_INFO "%s: rw=%d, want=%Lu, limit=%Lu\n",
     750                 :            :                         bio_devname(bio, b), bio->bi_opf,
     751                 :          0 :                         (unsigned long long)bio_end_sector(bio),
     752                 :            :                         (long long)maxsector);
     753                 :          0 : }
     754                 :            : 
     755                 :            : #ifdef CONFIG_FAIL_MAKE_REQUEST
     756                 :            : 
     757                 :            : static DECLARE_FAULT_ATTR(fail_make_request);
     758                 :            : 
     759                 :            : static int __init setup_fail_make_request(char *str)
     760                 :            : {
     761                 :            :         return setup_fault_attr(&fail_make_request, str);
     762                 :            : }
     763                 :            : __setup("fail_make_request=", setup_fail_make_request);
     764                 :            : 
     765                 :            : static bool should_fail_request(struct hd_struct *part, unsigned int bytes)
     766                 :            : {
     767                 :            :         return part->make_it_fail && should_fail(&fail_make_request, bytes);
     768                 :            : }
     769                 :            : 
     770                 :            : static int __init fail_make_request_debugfs(void)
     771                 :            : {
     772                 :            :         struct dentry *dir = fault_create_debugfs_attr("fail_make_request",
     773                 :            :                                                 NULL, &fail_make_request);
     774                 :            : 
     775                 :            :         return PTR_ERR_OR_ZERO(dir);
     776                 :            : }
     777                 :            : 
     778                 :            : late_initcall(fail_make_request_debugfs);
     779                 :            : 
     780                 :            : #else /* CONFIG_FAIL_MAKE_REQUEST */
     781                 :            : 
     782                 :      43355 : static inline bool should_fail_request(struct hd_struct *part,
     783                 :            :                                         unsigned int bytes)
     784                 :            : {
     785                 :      43355 :         return false;
     786                 :            : }
     787                 :            : 
     788                 :            : #endif /* CONFIG_FAIL_MAKE_REQUEST */
     789                 :            : 
     790                 :            : static inline bool bio_check_ro(struct bio *bio, struct hd_struct *part)
     791                 :            : {
     792                 :            :         const int op = bio_op(bio);
     793                 :            : 
     794                 :            :         if (part->policy && op_is_write(op)) {
     795                 :            :                 char b[BDEVNAME_SIZE];
     796                 :            : 
     797                 :            :                 if (op_is_flush(bio->bi_opf) && !bio_sectors(bio))
     798                 :            :                         return false;
     799                 :            : 
     800                 :            :                 WARN_ONCE(1,
     801                 :            :                        "generic_make_request: Trying to write "
     802                 :            :                         "to read-only block-device %s (partno %d)\n",
     803                 :            :                         bio_devname(bio, b), part->partno);
     804                 :            :                 /* Older lvm-tools actually trigger this */
     805                 :            :                 return false;
     806                 :            :         }
     807                 :            : 
     808                 :            :         return false;
     809                 :            : }
     810                 :            : 
     811                 :      43355 : static noinline int should_fail_bio(struct bio *bio)
     812                 :            : {
     813                 :      43355 :         if (should_fail_request(&bio->bi_disk->part0, bio->bi_iter.bi_size))
     814                 :            :                 return -EIO;
     815                 :      43355 :         return 0;
     816                 :            : }
     817                 :            : ALLOW_ERROR_INJECTION(should_fail_bio, ERRNO);
     818                 :            : 
     819                 :            : /*
     820                 :            :  * Check whether this bio extends beyond the end of the device or partition.
     821                 :            :  * This may well happen - the kernel calls bread() without checking the size of
     822                 :            :  * the device, e.g., when mounting a file system.
     823                 :            :  */
     824                 :      43355 : static inline int bio_check_eod(struct bio *bio, sector_t maxsector)
     825                 :            : {
     826                 :      43355 :         unsigned int nr_sectors = bio_sectors(bio);
     827                 :            : 
     828         [ +  - ]:      43355 :         if (nr_sectors && maxsector &&
     829         [ +  - ]:      43355 :             (nr_sectors > maxsector ||
     830         [ -  + ]:      43355 :              bio->bi_iter.bi_sector > maxsector - nr_sectors)) {
     831                 :          0 :                 handle_bad_sector(bio, maxsector);
     832                 :          0 :                 return -EIO;
     833                 :            :         }
     834                 :            :         return 0;
     835                 :            : }
     836                 :            : 
     837                 :            : /*
     838                 :            :  * Remap block n of partition p to block n+start(p) of the disk.
     839                 :            :  */
     840                 :          0 : static inline int blk_partition_remap(struct bio *bio)
     841                 :            : {
     842                 :          0 :         struct hd_struct *p;
     843                 :          0 :         int ret = -EIO;
     844                 :            : 
     845                 :          0 :         rcu_read_lock();
     846                 :          0 :         p = __disk_get_part(bio->bi_disk, bio->bi_partno);
     847         [ #  # ]:          0 :         if (unlikely(!p))
     848                 :          0 :                 goto out;
     849                 :          0 :         if (unlikely(should_fail_request(p, bio->bi_iter.bi_size)))
     850                 :            :                 goto out;
     851         [ #  # ]:          0 :         if (unlikely(bio_check_ro(bio, p)))
     852                 :          0 :                 goto out;
     853                 :            : 
     854         [ #  # ]:          0 :         if (bio_sectors(bio)) {
     855         [ #  # ]:          0 :                 if (bio_check_eod(bio, part_nr_sects_read(p)))
     856                 :          0 :                         goto out;
     857                 :          0 :                 bio->bi_iter.bi_sector += p->start_sect;
     858                 :          0 :                 trace_block_bio_remap(bio->bi_disk->queue, bio, part_devt(p),
     859                 :          0 :                                       bio->bi_iter.bi_sector - p->start_sect);
     860                 :            :         }
     861                 :          0 :         bio->bi_partno = 0;
     862                 :          0 :         ret = 0;
     863                 :          0 : out:
     864                 :          0 :         rcu_read_unlock();
     865                 :          0 :         return ret;
     866                 :            : }
     867                 :            : 
     868                 :            : static noinline_for_stack bool
     869                 :      43355 : generic_make_request_checks(struct bio *bio)
     870                 :            : {
     871                 :      43355 :         struct request_queue *q;
     872                 :      43355 :         int nr_sectors = bio_sectors(bio);
     873                 :      43355 :         blk_status_t status = BLK_STS_IOERR;
     874                 :      43355 :         char b[BDEVNAME_SIZE];
     875                 :            : 
     876                 :      43355 :         might_sleep();
     877                 :            : 
     878                 :      43355 :         q = bio->bi_disk->queue;
     879         [ -  + ]:      43355 :         if (unlikely(!q)) {
     880                 :          0 :                 printk(KERN_ERR
     881                 :            :                        "generic_make_request: Trying to access "
     882                 :            :                         "nonexistent block-device %s (%Lu)\n",
     883                 :          0 :                         bio_devname(bio, b), (long long)bio->bi_iter.bi_sector);
     884                 :          0 :                 goto end_io;
     885                 :            :         }
     886                 :            : 
     887                 :            :         /*
     888                 :            :          * Non-mq queues do not honor REQ_NOWAIT, so complete a bio
     889                 :            :          * with BLK_STS_AGAIN status in order to catch -EAGAIN and
     890                 :            :          * to give a chance to the caller to repeat request gracefully.
     891                 :            :          */
     892   [ -  +  -  - ]:      43355 :         if ((bio->bi_opf & REQ_NOWAIT) && !queue_is_mq(q)) {
     893                 :          0 :                 status = BLK_STS_AGAIN;
     894                 :          0 :                 goto end_io;
     895                 :            :         }
     896                 :            : 
     897         [ -  + ]:      43355 :         if (should_fail_bio(bio))
     898                 :          0 :                 goto end_io;
     899                 :            : 
     900         [ -  + ]:      43355 :         if (bio->bi_partno) {
     901         [ #  # ]:          0 :                 if (unlikely(blk_partition_remap(bio)))
     902                 :          0 :                         goto end_io;
     903                 :            :         } else {
     904         [ -  + ]:      43355 :                 if (unlikely(bio_check_ro(bio, &bio->bi_disk->part0)))
     905                 :          0 :                         goto end_io;
     906         [ -  + ]:      43355 :                 if (unlikely(bio_check_eod(bio, get_capacity(bio->bi_disk))))
     907                 :          0 :                         goto end_io;
     908                 :            :         }
     909                 :            : 
     910                 :            :         /*
     911                 :            :          * Filter flush bio's early so that make_request based
     912                 :            :          * drivers without flush support don't have to worry
     913                 :            :          * about them.
     914                 :            :          */
     915   [ +  +  -  + ]:      43612 :         if (op_is_flush(bio->bi_opf) &&
     916                 :        257 :             !test_bit(QUEUE_FLAG_WC, &q->queue_flags)) {
     917                 :          0 :                 bio->bi_opf &= ~(REQ_PREFLUSH | REQ_FUA);
     918         [ #  # ]:          0 :                 if (!nr_sectors) {
     919                 :          0 :                         status = BLK_STS_OK;
     920                 :          0 :                         goto end_io;
     921                 :            :                 }
     922                 :            :         }
     923                 :            : 
     924         [ +  - ]:      43355 :         if (!test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
     925                 :      43355 :                 bio->bi_opf &= ~REQ_HIPRI;
     926                 :            : 
     927   [ -  -  -  -  :      43355 :         switch (bio_op(bio)) {
                -  -  + ]
     928                 :          0 :         case REQ_OP_DISCARD:
     929         [ #  # ]:          0 :                 if (!blk_queue_discard(q))
     930                 :          0 :                         goto not_supported;
     931                 :            :                 break;
     932                 :          0 :         case REQ_OP_SECURE_ERASE:
     933         [ #  # ]:          0 :                 if (!blk_queue_secure_erase(q))
     934                 :          0 :                         goto not_supported;
     935                 :            :                 break;
     936                 :          0 :         case REQ_OP_WRITE_SAME:
     937         [ #  # ]:          0 :                 if (!q->limits.max_write_same_sectors)
     938                 :          0 :                         goto not_supported;
     939                 :            :                 break;
     940                 :            :         case REQ_OP_ZONE_RESET:
     941                 :            :         case REQ_OP_ZONE_OPEN:
     942                 :            :         case REQ_OP_ZONE_CLOSE:
     943                 :            :         case REQ_OP_ZONE_FINISH:
     944         [ #  # ]:          0 :                 if (!blk_queue_is_zoned(q))
     945                 :          0 :                         goto not_supported;
     946                 :            :                 break;
     947                 :            :         case REQ_OP_ZONE_RESET_ALL:
     948   [ #  #  #  # ]:          0 :                 if (!blk_queue_is_zoned(q) || !blk_queue_zone_resetall(q))
     949                 :          0 :                         goto not_supported;
     950                 :            :                 break;
     951                 :          0 :         case REQ_OP_WRITE_ZEROES:
     952         [ #  # ]:          0 :                 if (!q->limits.max_write_zeroes_sectors)
     953                 :          0 :                         goto not_supported;
     954                 :            :                 break;
     955                 :            :         default:
     956                 :            :                 break;
     957                 :            :         }
     958                 :            : 
     959                 :            :         /*
     960                 :            :          * Various block parts want %current->io_context and lazy ioc
     961                 :            :          * allocation ends up trading a lot of pain for a small amount of
     962                 :            :          * memory.  Just allocate it upfront.  This may fail and block
     963                 :            :          * layer knows how to live with it.
     964                 :            :          */
     965                 :      43355 :         create_io_context(GFP_ATOMIC, q->node);
     966                 :            : 
     967         [ +  - ]:      43355 :         if (!blkcg_bio_issue_check(q, bio))
     968                 :            :                 return false;
     969                 :            : 
     970         [ +  - ]:      43355 :         if (!bio_flagged(bio, BIO_TRACE_COMPLETION)) {
     971                 :      43355 :                 trace_block_bio_queue(q, bio);
     972                 :            :                 /* Now that enqueuing has been traced, we need to trace
     973                 :            :                  * completion as well.
     974                 :            :                  */
     975                 :      43355 :                 bio_set_flag(bio, BIO_TRACE_COMPLETION);
     976                 :            :         }
     977                 :            :         return true;
     978                 :            : 
     979                 :            : not_supported:
     980                 :            :         status = BLK_STS_NOTSUPP;
     981                 :          0 : end_io:
     982                 :          0 :         bio->bi_status = status;
     983                 :          0 :         bio_endio(bio);
     984                 :          0 :         return false;
     985                 :            : }
     986                 :            : 
     987                 :            : /**
     988                 :            :  * generic_make_request - hand a buffer to its device driver for I/O
     989                 :            :  * @bio:  The bio describing the location in memory and on the device.
     990                 :            :  *
     991                 :            :  * generic_make_request() is used to make I/O requests of block
     992                 :            :  * devices. It is passed a &struct bio, which describes the I/O that needs
     993                 :            :  * to be done.
     994                 :            :  *
     995                 :            :  * generic_make_request() does not return any status.  The
     996                 :            :  * success/failure status of the request, along with notification of
     997                 :            :  * completion, is delivered asynchronously through the bio->bi_end_io
     998                 :            :  * function described (one day) else where.
     999                 :            :  *
    1000                 :            :  * The caller of generic_make_request must make sure that bi_io_vec
    1001                 :            :  * are set to describe the memory buffer, and that bi_dev and bi_sector are
    1002                 :            :  * set to describe the device address, and the
    1003                 :            :  * bi_end_io and optionally bi_private are set to describe how
    1004                 :            :  * completion notification should be signaled.
    1005                 :            :  *
    1006                 :            :  * generic_make_request and the drivers it calls may use bi_next if this
    1007                 :            :  * bio happens to be merged with someone else, and may resubmit the bio to
    1008                 :            :  * a lower device by calling into generic_make_request recursively, which
    1009                 :            :  * means the bio should NOT be touched after the call to ->make_request_fn.
    1010                 :            :  */
    1011                 :      43355 : blk_qc_t generic_make_request(struct bio *bio)
    1012                 :            : {
    1013                 :            :         /*
    1014                 :            :          * bio_list_on_stack[0] contains bios submitted by the current
    1015                 :            :          * make_request_fn.
    1016                 :            :          * bio_list_on_stack[1] contains bios that were submitted before
    1017                 :            :          * the current make_request_fn, but that haven't been processed
    1018                 :            :          * yet.
    1019                 :            :          */
    1020                 :      43355 :         struct bio_list bio_list_on_stack[2];
    1021                 :      43355 :         blk_qc_t ret = BLK_QC_T_NONE;
    1022                 :            : 
    1023         [ -  + ]:      43355 :         if (!generic_make_request_checks(bio))
    1024                 :          0 :                 goto out;
    1025                 :            : 
    1026                 :            :         /*
    1027                 :            :          * We only want one ->make_request_fn to be active at a time, else
    1028                 :            :          * stack usage with stacked devices could be a problem.  So use
    1029                 :            :          * current->bio_list to keep a list of requests submited by a
    1030                 :            :          * make_request_fn function.  current->bio_list is also used as a
    1031                 :            :          * flag to say if generic_make_request is currently active in this
    1032                 :            :          * task or not.  If it is NULL, then no make_request is active.  If
    1033                 :            :          * it is non-NULL, then a make_request is active, and new requests
    1034                 :            :          * should be added at the tail
    1035                 :            :          */
    1036         [ -  + ]:      43355 :         if (current->bio_list) {
    1037         [ #  # ]:          0 :                 bio_list_add(&current->bio_list[0], bio);
    1038                 :          0 :                 goto out;
    1039                 :            :         }
    1040                 :            : 
    1041                 :            :         /* following loop may be a bit non-obvious, and so deserves some
    1042                 :            :          * explanation.
    1043                 :            :          * Before entering the loop, bio->bi_next is NULL (as all callers
    1044                 :            :          * ensure that) so we have a list with a single bio.
    1045                 :            :          * We pretend that we have just taken it off a longer list, so
    1046                 :            :          * we assign bio_list to a pointer to the bio_list_on_stack,
    1047                 :            :          * thus initialising the bio_list of new bios to be
    1048                 :            :          * added.  ->make_request() may indeed add some more bios
    1049                 :            :          * through a recursive call to generic_make_request.  If it
    1050                 :            :          * did, we find a non-NULL value in bio_list and re-enter the loop
    1051                 :            :          * from the top.  In this case we really did just take the bio
    1052                 :            :          * of the top of the list (no pretending) and so remove it from
    1053                 :            :          * bio_list, and call into ->make_request() again.
    1054                 :            :          */
    1055         [ -  + ]:      43355 :         BUG_ON(bio->bi_next);
    1056                 :      43355 :         bio_list_init(&bio_list_on_stack[0]);
    1057                 :      43355 :         current->bio_list = bio_list_on_stack;
    1058                 :      43355 :         do {
    1059                 :      43355 :                 struct request_queue *q = bio->bi_disk->queue;
    1060                 :      43355 :                 blk_mq_req_flags_t flags = bio->bi_opf & REQ_NOWAIT ?
    1061                 :      43355 :                         BLK_MQ_REQ_NOWAIT : 0;
    1062                 :            : 
    1063         [ +  - ]:      43355 :                 if (likely(blk_queue_enter(q, flags) == 0)) {
    1064                 :      43355 :                         struct bio_list lower, same;
    1065                 :            : 
    1066                 :            :                         /* Create a fresh bio_list for all subordinate requests */
    1067                 :      43355 :                         bio_list_on_stack[1] = bio_list_on_stack[0];
    1068                 :      43355 :                         bio_list_init(&bio_list_on_stack[0]);
    1069                 :      43355 :                         ret = q->make_request_fn(q, bio);
    1070                 :            : 
    1071                 :      43355 :                         blk_queue_exit(q);
    1072                 :            : 
    1073                 :            :                         /* sort new bios into those for a lower level
    1074                 :            :                          * and those for the same level
    1075                 :            :                          */
    1076                 :      43355 :                         bio_list_init(&lower);
    1077                 :      43355 :                         bio_list_init(&same);
    1078   [ -  +  -  + ]:      43355 :                         while ((bio = bio_list_pop(&bio_list_on_stack[0])) != NULL)
    1079         [ #  # ]:          0 :                                 if (q == bio->bi_disk->queue)
    1080         [ #  # ]:          0 :                                         bio_list_add(&same, bio);
    1081                 :            :                                 else
    1082         [ #  # ]:          0 :                                         bio_list_add(&lower, bio);
    1083                 :            :                         /* now assemble so we handle the lowest level first */
    1084         [ -  + ]:      43355 :                         bio_list_merge(&bio_list_on_stack[0], &lower);
    1085         [ -  + ]:      43355 :                         bio_list_merge(&bio_list_on_stack[0], &same);
    1086         [ -  + ]:      43355 :                         bio_list_merge(&bio_list_on_stack[0], &bio_list_on_stack[1]);
    1087                 :            :                 } else {
    1088   [ #  #  #  # ]:          0 :                         if (unlikely(!blk_queue_dying(q) &&
    1089                 :            :                                         (bio->bi_opf & REQ_NOWAIT)))
    1090                 :          0 :                                 bio_wouldblock_error(bio);
    1091                 :            :                         else
    1092                 :          0 :                                 bio_io_error(bio);
    1093                 :            :                 }
    1094         [ -  + ]:      43355 :                 bio = bio_list_pop(&bio_list_on_stack[0]);
    1095         [ -  + ]:      43355 :         } while (bio);
    1096                 :      43355 :         current->bio_list = NULL; /* deactivate */
    1097                 :            : 
    1098                 :      43355 : out:
    1099                 :      43355 :         return ret;
    1100                 :            : }
    1101                 :            : EXPORT_SYMBOL(generic_make_request);
    1102                 :            : 
    1103                 :            : /**
    1104                 :            :  * direct_make_request - hand a buffer directly to its device driver for I/O
    1105                 :            :  * @bio:  The bio describing the location in memory and on the device.
    1106                 :            :  *
    1107                 :            :  * This function behaves like generic_make_request(), but does not protect
    1108                 :            :  * against recursion.  Must only be used if the called driver is known
    1109                 :            :  * to not call generic_make_request (or direct_make_request) again from
    1110                 :            :  * its make_request function.  (Calling direct_make_request again from
    1111                 :            :  * a workqueue is perfectly fine as that doesn't recurse).
    1112                 :            :  */
    1113                 :          0 : blk_qc_t direct_make_request(struct bio *bio)
    1114                 :            : {
    1115                 :          0 :         struct request_queue *q = bio->bi_disk->queue;
    1116                 :          0 :         bool nowait = bio->bi_opf & REQ_NOWAIT;
    1117                 :          0 :         blk_qc_t ret;
    1118                 :            : 
    1119         [ #  # ]:          0 :         if (!generic_make_request_checks(bio))
    1120                 :            :                 return BLK_QC_T_NONE;
    1121                 :            : 
    1122         [ #  # ]:          0 :         if (unlikely(blk_queue_enter(q, nowait ? BLK_MQ_REQ_NOWAIT : 0))) {
    1123   [ #  #  #  # ]:          0 :                 if (nowait && !blk_queue_dying(q))
    1124                 :          0 :                         bio->bi_status = BLK_STS_AGAIN;
    1125                 :            :                 else
    1126                 :          0 :                         bio->bi_status = BLK_STS_IOERR;
    1127                 :          0 :                 bio_endio(bio);
    1128                 :          0 :                 return BLK_QC_T_NONE;
    1129                 :            :         }
    1130                 :            : 
    1131                 :          0 :         ret = q->make_request_fn(q, bio);
    1132                 :          0 :         blk_queue_exit(q);
    1133                 :          0 :         return ret;
    1134                 :            : }
    1135                 :            : EXPORT_SYMBOL_GPL(direct_make_request);
    1136                 :            : 
    1137                 :            : /**
    1138                 :            :  * submit_bio - submit a bio to the block device layer for I/O
    1139                 :            :  * @bio: The &struct bio which describes the I/O
    1140                 :            :  *
    1141                 :            :  * submit_bio() is very similar in purpose to generic_make_request(), and
    1142                 :            :  * uses that function to do most of the work. Both are fairly rough
    1143                 :            :  * interfaces; @bio must be presetup and ready for I/O.
    1144                 :            :  *
    1145                 :            :  */
    1146                 :      43355 : blk_qc_t submit_bio(struct bio *bio)
    1147                 :            : {
    1148                 :      43355 :         bool workingset_read = false;
    1149                 :      43355 :         unsigned long pflags;
    1150                 :      43355 :         blk_qc_t ret;
    1151                 :            : 
    1152         [ +  - ]:      43355 :         if (blkcg_punt_bio_submit(bio))
    1153                 :            :                 return BLK_QC_T_NONE;
    1154                 :            : 
    1155                 :            :         /*
    1156                 :            :          * If it's a regular read/write or a barrier with data attached,
    1157                 :            :          * go through the normal accounting stuff before submission.
    1158                 :            :          */
    1159         [ +  - ]:      43355 :         if (bio_has_data(bio)) {
    1160                 :      43355 :                 unsigned int count;
    1161                 :            : 
    1162         [ -  + ]:      43355 :                 if (unlikely(bio_op(bio) == REQ_OP_WRITE_SAME))
    1163         [ #  # ]:          0 :                         count = queue_logical_block_size(bio->bi_disk->queue) >> 9;
    1164                 :            :                 else
    1165                 :      43355 :                         count = bio_sectors(bio);
    1166                 :            : 
    1167         [ +  + ]:      43355 :                 if (op_is_write(bio_op(bio))) {
    1168         [ -  + ]:       3118 :                         count_vm_events(PGPGOUT, count);
    1169                 :            :                 } else {
    1170         [ -  + ]:      40237 :                         if (bio_flagged(bio, BIO_WORKINGSET))
    1171                 :            :                                 workingset_read = true;
    1172         [ -  + ]:      40237 :                         task_io_account_read(bio->bi_iter.bi_size);
    1173         [ -  + ]:      40237 :                         count_vm_events(PGPGIN, count);
    1174                 :            :                 }
    1175                 :            : 
    1176         [ -  + ]:      43355 :                 if (unlikely(block_dump)) {
    1177                 :          0 :                         char b[BDEVNAME_SIZE];
    1178         [ #  # ]:          0 :                         printk(KERN_DEBUG "%s(%d): %s block %Lu on %s (%u sectors)\n",
    1179                 :          0 :                         current->comm, task_pid_nr(current),
    1180                 :          0 :                                 op_is_write(bio_op(bio)) ? "WRITE" : "READ",
    1181         [ #  # ]:          0 :                                 (unsigned long long)bio->bi_iter.bi_sector,
    1182                 :            :                                 bio_devname(bio, b), count);
    1183                 :            :                 }
    1184                 :            :         }
    1185                 :            : 
    1186                 :            :         /*
    1187                 :            :          * If we're reading data that is part of the userspace
    1188                 :            :          * workingset, count submission time as memory stall. When the
    1189                 :            :          * device is congested, or the submitting cgroup IO-throttled,
    1190                 :            :          * submission can be a significant part of overall IO time.
    1191                 :            :          */
    1192                 :      43355 :         if (workingset_read)
    1193                 :            :                 psi_memstall_enter(&pflags);
    1194                 :            : 
    1195                 :      43355 :         ret = generic_make_request(bio);
    1196                 :            : 
    1197                 :      43355 :         if (workingset_read)
    1198                 :            :                 psi_memstall_leave(&pflags);
    1199                 :            : 
    1200                 :      43355 :         return ret;
    1201                 :            : }
    1202                 :            : EXPORT_SYMBOL(submit_bio);
    1203                 :            : 
    1204                 :            : /**
    1205                 :            :  * blk_cloned_rq_check_limits - Helper function to check a cloned request
    1206                 :            :  *                              for new the queue limits
    1207                 :            :  * @q:  the queue
    1208                 :            :  * @rq: the request being checked
    1209                 :            :  *
    1210                 :            :  * Description:
    1211                 :            :  *    @rq may have been made based on weaker limitations of upper-level queues
    1212                 :            :  *    in request stacking drivers, and it may violate the limitation of @q.
    1213                 :            :  *    Since the block layer and the underlying device driver trust @rq
    1214                 :            :  *    after it is inserted to @q, it should be checked against @q before
    1215                 :            :  *    the insertion using this generic function.
    1216                 :            :  *
    1217                 :            :  *    Request stacking drivers like request-based dm may change the queue
    1218                 :            :  *    limits when retrying requests on other queues. Those requests need
    1219                 :            :  *    to be checked against the new queue limits again during dispatch.
    1220                 :            :  */
    1221                 :          0 : static int blk_cloned_rq_check_limits(struct request_queue *q,
    1222                 :            :                                       struct request *rq)
    1223                 :            : {
    1224   [ #  #  #  # ]:          0 :         if (blk_rq_sectors(rq) > blk_queue_get_max_sectors(q, req_op(rq))) {
    1225         [ #  # ]:          0 :                 printk(KERN_ERR "%s: over max size limit. (%u > %u)\n",
    1226                 :            :                         __func__, blk_rq_sectors(rq),
    1227                 :            :                         blk_queue_get_max_sectors(q, req_op(rq)));
    1228                 :          0 :                 return -EIO;
    1229                 :            :         }
    1230                 :            : 
    1231                 :            :         /*
    1232                 :            :          * queue's settings related to segment counting like q->bounce_pfn
    1233                 :            :          * may differ from that of other stacking queues.
    1234                 :            :          * Recalculate it to check the request correctly on this queue's
    1235                 :            :          * limitation.
    1236                 :            :          */
    1237                 :          0 :         rq->nr_phys_segments = blk_recalc_rq_segments(rq);
    1238         [ #  # ]:          0 :         if (rq->nr_phys_segments > queue_max_segments(q)) {
    1239                 :          0 :                 printk(KERN_ERR "%s: over max segments limit. (%hu > %hu)\n",
    1240                 :            :                         __func__, rq->nr_phys_segments, queue_max_segments(q));
    1241                 :          0 :                 return -EIO;
    1242                 :            :         }
    1243                 :            : 
    1244                 :            :         return 0;
    1245                 :            : }
    1246                 :            : 
    1247                 :            : /**
    1248                 :            :  * blk_insert_cloned_request - Helper for stacking drivers to submit a request
    1249                 :            :  * @q:  the queue to submit the request
    1250                 :            :  * @rq: the request being queued
    1251                 :            :  */
    1252                 :          0 : blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request *rq)
    1253                 :            : {
    1254         [ #  # ]:          0 :         if (blk_cloned_rq_check_limits(q, rq))
    1255                 :            :                 return BLK_STS_IOERR;
    1256                 :            : 
    1257                 :          0 :         if (rq->rq_disk &&
    1258                 :            :             should_fail_request(&rq->rq_disk->part0, blk_rq_bytes(rq)))
    1259                 :            :                 return BLK_STS_IOERR;
    1260                 :            : 
    1261         [ #  # ]:          0 :         if (blk_queue_io_stat(q))
    1262                 :          0 :                 blk_account_io_start(rq, true);
    1263                 :            : 
    1264                 :            :         /*
    1265                 :            :          * Since we have a scheduler attached on the top device,
    1266                 :            :          * bypass a potential scheduler on the bottom device for
    1267                 :            :          * insert.
    1268                 :            :          */
    1269                 :          0 :         return blk_mq_request_issue_directly(rq, true);
    1270                 :            : }
    1271                 :            : EXPORT_SYMBOL_GPL(blk_insert_cloned_request);
    1272                 :            : 
    1273                 :            : /**
    1274                 :            :  * blk_rq_err_bytes - determine number of bytes till the next failure boundary
    1275                 :            :  * @rq: request to examine
    1276                 :            :  *
    1277                 :            :  * Description:
    1278                 :            :  *     A request could be merge of IOs which require different failure
    1279                 :            :  *     handling.  This function determines the number of bytes which
    1280                 :            :  *     can be failed from the beginning of the request without
    1281                 :            :  *     crossing into area which need to be retried further.
    1282                 :            :  *
    1283                 :            :  * Return:
    1284                 :            :  *     The number of bytes to fail.
    1285                 :            :  */
    1286                 :          0 : unsigned int blk_rq_err_bytes(const struct request *rq)
    1287                 :            : {
    1288                 :          0 :         unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK;
    1289                 :          0 :         unsigned int bytes = 0;
    1290                 :          0 :         struct bio *bio;
    1291                 :            : 
    1292         [ #  # ]:          0 :         if (!(rq->rq_flags & RQF_MIXED_MERGE))
    1293                 :          0 :                 return blk_rq_bytes(rq);
    1294                 :            : 
    1295                 :            :         /*
    1296                 :            :          * Currently the only 'mixing' which can happen is between
    1297                 :            :          * different fastfail types.  We can safely fail portions
    1298                 :            :          * which have all the failfast bits that the first one has -
    1299                 :            :          * the ones which are at least as eager to fail as the first
    1300                 :            :          * one.
    1301                 :            :          */
    1302         [ #  # ]:          0 :         for (bio = rq->bio; bio; bio = bio->bi_next) {
    1303         [ #  # ]:          0 :                 if ((bio->bi_opf & ff) != ff)
    1304                 :            :                         break;
    1305                 :          0 :                 bytes += bio->bi_iter.bi_size;
    1306                 :            :         }
    1307                 :            : 
    1308                 :            :         /* this could lead to infinite loop */
    1309   [ #  #  #  # ]:          0 :         BUG_ON(blk_rq_bytes(rq) && !bytes);
    1310                 :            :         return bytes;
    1311                 :            : }
    1312                 :            : EXPORT_SYMBOL_GPL(blk_rq_err_bytes);
    1313                 :            : 
    1314                 :      31366 : void blk_account_io_completion(struct request *req, unsigned int bytes)
    1315                 :            : {
    1316   [ +  +  +  -  :      59680 :         if (req->part && blk_do_io_stat(req)) {
                   +  - ]
    1317         [ +  - ]:      28314 :                 const int sgrp = op_stat_group(req_op(req));
    1318                 :      28314 :                 struct hd_struct *part;
    1319                 :            : 
    1320         [ -  + ]:      28314 :                 part_stat_lock();
    1321                 :      28314 :                 part = req->part;
    1322   [ -  +  -  - ]:      28314 :                 part_stat_add(part, sectors[sgrp], bytes >> 9);
    1323                 :      28314 :                 part_stat_unlock();
    1324                 :            :         }
    1325                 :      31366 : }
    1326                 :            : 
    1327                 :      32300 : void blk_account_io_done(struct request *req, u64 now)
    1328                 :            : {
    1329                 :            :         /*
    1330                 :            :          * Account IO completion.  flush_rq isn't accounted as a
    1331                 :            :          * normal IO on queueing nor completion.  Accounting the
    1332                 :            :          * containing request is enough.
    1333                 :            :          */
    1334   [ +  +  +  -  :      60614 :         if (req->part && blk_do_io_stat(req) &&
                   +  - ]
    1335         [ +  + ]:      28314 :             !(req->rq_flags & RQF_FLUSH_SEQ)) {
    1336         [ +  - ]:      28057 :                 const int sgrp = op_stat_group(req_op(req));
    1337                 :      28057 :                 struct hd_struct *part;
    1338                 :            : 
    1339                 :      28057 :                 part_stat_lock();
    1340                 :      28057 :                 part = req->part;
    1341                 :            : 
    1342                 :      28057 :                 update_io_ticks(part, jiffies);
    1343   [ -  +  -  - ]:      28057 :                 part_stat_inc(part, ios[sgrp]);
    1344   [ -  +  -  - ]:      28057 :                 part_stat_add(part, nsecs[sgrp], now - req->start_time_ns);
    1345   [ -  +  -  - ]:      28057 :                 part_stat_add(part, time_in_queue, nsecs_to_jiffies64(now - req->start_time_ns));
    1346                 :      28057 :                 part_dec_in_flight(req->q, part, rq_data_dir(req));
    1347                 :            : 
    1348                 :      28057 :                 hd_struct_put(part);
    1349                 :      28057 :                 part_stat_unlock();
    1350                 :            :         }
    1351                 :      32300 : }
    1352                 :            : 
    1353                 :      47135 : void blk_account_io_start(struct request *rq, bool new_io)
    1354                 :            : {
    1355                 :      47135 :         struct hd_struct *part;
    1356         [ +  + ]:      47135 :         int rw = rq_data_dir(rq);
    1357                 :            : 
    1358   [ +  +  +  + ]:      94270 :         if (!blk_do_io_stat(rq))
    1359                 :            :                 return;
    1360                 :            : 
    1361         [ +  + ]:      43607 :         part_stat_lock();
    1362                 :            : 
    1363         [ +  + ]:      43607 :         if (!new_io) {
    1364                 :      15214 :                 part = rq->part;
    1365   [ -  +  -  - ]:      15214 :                 part_stat_inc(part, merges[rw]);
    1366                 :            :         } else {
    1367                 :      28393 :                 part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq));
    1368         [ -  + ]:      28393 :                 if (!hd_struct_try_get(part)) {
    1369                 :            :                         /*
    1370                 :            :                          * The partition is already being removed,
    1371                 :            :                          * the request will be accounted on the disk only
    1372                 :            :                          *
    1373                 :            :                          * We take a reference on disk->part0 although that
    1374                 :            :                          * partition will never be deleted, so we can treat
    1375                 :            :                          * it as any other partition.
    1376                 :            :                          */
    1377                 :          0 :                         part = &rq->rq_disk->part0;
    1378                 :          0 :                         hd_struct_get(part);
    1379                 :            :                 }
    1380                 :      28393 :                 part_inc_in_flight(rq->q, part, rw);
    1381                 :      28393 :                 rq->part = part;
    1382                 :            :         }
    1383                 :            : 
    1384                 :      43607 :         update_io_ticks(part, jiffies);
    1385                 :            : 
    1386                 :      43607 :         part_stat_unlock();
    1387                 :            : }
    1388                 :            : 
    1389                 :            : /*
    1390                 :            :  * Steal bios from a request and add them to a bio list.
    1391                 :            :  * The request must not have been partially completed before.
    1392                 :            :  */
    1393                 :          0 : void blk_steal_bios(struct bio_list *list, struct request *rq)
    1394                 :            : {
    1395         [ #  # ]:          0 :         if (rq->bio) {
    1396         [ #  # ]:          0 :                 if (list->tail)
    1397                 :          0 :                         list->tail->bi_next = rq->bio;
    1398                 :            :                 else
    1399                 :          0 :                         list->head = rq->bio;
    1400                 :          0 :                 list->tail = rq->biotail;
    1401                 :            : 
    1402                 :          0 :                 rq->bio = NULL;
    1403                 :          0 :                 rq->biotail = NULL;
    1404                 :            :         }
    1405                 :            : 
    1406                 :          0 :         rq->__data_len = 0;
    1407                 :          0 : }
    1408                 :            : EXPORT_SYMBOL_GPL(blk_steal_bios);
    1409                 :            : 
    1410                 :            : /**
    1411                 :            :  * blk_update_request - Special helper function for request stacking drivers
    1412                 :            :  * @req:      the request being processed
    1413                 :            :  * @error:    block status code
    1414                 :            :  * @nr_bytes: number of bytes to complete @req
    1415                 :            :  *
    1416                 :            :  * Description:
    1417                 :            :  *     Ends I/O on a number of bytes attached to @req, but doesn't complete
    1418                 :            :  *     the request structure even if @req doesn't have leftover.
    1419                 :            :  *     If @req has leftover, sets it up for the next range of segments.
    1420                 :            :  *
    1421                 :            :  *     This special helper function is only for request stacking drivers
    1422                 :            :  *     (e.g. request-based dm) so that they can handle partial completion.
    1423                 :            :  *     Actual device drivers should use blk_mq_end_request instead.
    1424                 :            :  *
    1425                 :            :  *     Passing the result of blk_rq_bytes() as @nr_bytes guarantees
    1426                 :            :  *     %false return from this function.
    1427                 :            :  *
    1428                 :            :  * Note:
    1429                 :            :  *      The RQF_SPECIAL_PAYLOAD flag is ignored on purpose in both
    1430                 :            :  *      blk_rq_bytes() and in blk_update_request().
    1431                 :            :  *
    1432                 :            :  * Return:
    1433                 :            :  *     %false - this request doesn't have any more data
    1434                 :            :  *     %true  - this request has more data
    1435                 :            :  **/
    1436                 :      32300 : bool blk_update_request(struct request *req, blk_status_t error,
    1437                 :            :                 unsigned int nr_bytes)
    1438                 :            : {
    1439                 :      32300 :         int total_bytes;
    1440                 :            : 
    1441         [ -  + ]:      64600 :         trace_block_rq_complete(req, blk_status_to_errno(error), nr_bytes);
    1442                 :            : 
    1443         [ +  + ]:      32300 :         if (!req->bio)
    1444                 :            :                 return false;
    1445                 :            : 
    1446                 :            : #ifdef CONFIG_BLK_DEV_INTEGRITY
    1447                 :            :         if (blk_integrity_rq(req) && req_op(req) == REQ_OP_READ &&
    1448                 :            :             error == BLK_STS_OK)
    1449                 :            :                 req->q->integrity.profile->complete_fn(req, nr_bytes);
    1450                 :            : #endif
    1451                 :            : 
    1452   [ -  +  -  -  :      31366 :         if (unlikely(error && !blk_rq_is_passthrough(req) &&
                   -  - ]
    1453                 :            :                      !(req->rq_flags & RQF_QUIET)))
    1454                 :          0 :                 print_req_error(req, error, __func__);
    1455                 :            : 
    1456                 :      31366 :         blk_account_io_completion(req, nr_bytes);
    1457                 :            : 
    1458                 :      31366 :         total_bytes = 0;
    1459         [ +  - ]:      46916 :         while (req->bio) {
    1460                 :      46916 :                 struct bio *bio = req->bio;
    1461                 :      46916 :                 unsigned bio_bytes = min(bio->bi_iter.bi_size, nr_bytes);
    1462                 :            : 
    1463         [ +  - ]:      46916 :                 if (bio_bytes == bio->bi_iter.bi_size)
    1464                 :      46916 :                         req->bio = bio->bi_next;
    1465                 :            : 
    1466                 :            :                 /* Completion has already been traced */
    1467                 :      46916 :                 bio_clear_flag(bio, BIO_TRACE_COMPLETION);
    1468                 :      46916 :                 req_bio_endio(req, bio, bio_bytes, error);
    1469                 :            : 
    1470                 :      46916 :                 total_bytes += bio_bytes;
    1471                 :      46916 :                 nr_bytes -= bio_bytes;
    1472                 :            : 
    1473         [ +  + ]:      46916 :                 if (!nr_bytes)
    1474                 :            :                         break;
    1475                 :            :         }
    1476                 :            : 
    1477                 :            :         /*
    1478                 :            :          * completely done
    1479                 :            :          */
    1480         [ +  - ]:      31366 :         if (!req->bio) {
    1481                 :            :                 /*
    1482                 :            :                  * Reset counters so that the request stacking driver
    1483                 :            :                  * can find how many bytes remain in the request
    1484                 :            :                  * later.
    1485                 :            :                  */
    1486                 :      31366 :                 req->__data_len = 0;
    1487                 :      31366 :                 return false;
    1488                 :            :         }
    1489                 :            : 
    1490                 :          0 :         req->__data_len -= total_bytes;
    1491                 :            : 
    1492                 :            :         /* update sector only for requests with clear definition of sector */
    1493   [ #  #  #  # ]:          0 :         if (!blk_rq_is_passthrough(req))
    1494                 :          0 :                 req->__sector += total_bytes >> 9;
    1495                 :            : 
    1496                 :            :         /* mixed attributes always follow the first bio */
    1497         [ #  # ]:          0 :         if (req->rq_flags & RQF_MIXED_MERGE) {
    1498                 :          0 :                 req->cmd_flags &= ~REQ_FAILFAST_MASK;
    1499                 :          0 :                 req->cmd_flags |= req->bio->bi_opf & REQ_FAILFAST_MASK;
    1500                 :            :         }
    1501                 :            : 
    1502         [ #  # ]:          0 :         if (!(req->rq_flags & RQF_SPECIAL_PAYLOAD)) {
    1503                 :            :                 /*
    1504                 :            :                  * If total number of sectors is less than the first segment
    1505                 :            :                  * size, something has gone terribly wrong.
    1506                 :            :                  */
    1507         [ #  # ]:          0 :                 if (blk_rq_bytes(req) < blk_rq_cur_bytes(req)) {
    1508                 :          0 :                         blk_dump_rq_flags(req, "request botched");
    1509         [ #  # ]:          0 :                         req->__data_len = blk_rq_cur_bytes(req);
    1510                 :            :                 }
    1511                 :            : 
    1512                 :            :                 /* recalculate the number of segments */
    1513                 :          0 :                 req->nr_phys_segments = blk_recalc_rq_segments(req);
    1514                 :            :         }
    1515                 :            : 
    1516                 :            :         return true;
    1517                 :            : }
    1518                 :            : EXPORT_SYMBOL_GPL(blk_update_request);
    1519                 :            : 
    1520                 :            : #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
    1521                 :            : /**
    1522                 :            :  * rq_flush_dcache_pages - Helper function to flush all pages in a request
    1523                 :            :  * @rq: the request to be flushed
    1524                 :            :  *
    1525                 :            :  * Description:
    1526                 :            :  *     Flush all pages in @rq.
    1527                 :            :  */
    1528                 :            : void rq_flush_dcache_pages(struct request *rq)
    1529                 :            : {
    1530                 :            :         struct req_iterator iter;
    1531                 :            :         struct bio_vec bvec;
    1532                 :            : 
    1533                 :            :         rq_for_each_segment(bvec, rq, iter)
    1534                 :            :                 flush_dcache_page(bvec.bv_page);
    1535                 :            : }
    1536                 :            : EXPORT_SYMBOL_GPL(rq_flush_dcache_pages);
    1537                 :            : #endif
    1538                 :            : 
    1539                 :            : /**
    1540                 :            :  * blk_lld_busy - Check if underlying low-level drivers of a device are busy
    1541                 :            :  * @q : the queue of the device being checked
    1542                 :            :  *
    1543                 :            :  * Description:
    1544                 :            :  *    Check if underlying low-level drivers of a device are busy.
    1545                 :            :  *    If the drivers want to export their busy state, they must set own
    1546                 :            :  *    exporting function using blk_queue_lld_busy() first.
    1547                 :            :  *
    1548                 :            :  *    Basically, this function is used only by request stacking drivers
    1549                 :            :  *    to stop dispatching requests to underlying devices when underlying
    1550                 :            :  *    devices are busy.  This behavior helps more I/O merging on the queue
    1551                 :            :  *    of the request stacking driver and prevents I/O throughput regression
    1552                 :            :  *    on burst I/O load.
    1553                 :            :  *
    1554                 :            :  * Return:
    1555                 :            :  *    0 - Not busy (The request stacking driver should dispatch request)
    1556                 :            :  *    1 - Busy (The request stacking driver should stop dispatching request)
    1557                 :            :  */
    1558                 :          0 : int blk_lld_busy(struct request_queue *q)
    1559                 :            : {
    1560   [ #  #  #  # ]:          0 :         if (queue_is_mq(q) && q->mq_ops->busy)
    1561                 :          0 :                 return q->mq_ops->busy(q);
    1562                 :            : 
    1563                 :            :         return 0;
    1564                 :            : }
    1565                 :            : EXPORT_SYMBOL_GPL(blk_lld_busy);
    1566                 :            : 
    1567                 :            : /**
    1568                 :            :  * blk_rq_unprep_clone - Helper function to free all bios in a cloned request
    1569                 :            :  * @rq: the clone request to be cleaned up
    1570                 :            :  *
    1571                 :            :  * Description:
    1572                 :            :  *     Free all bios in @rq for a cloned request.
    1573                 :            :  */
    1574                 :          0 : void blk_rq_unprep_clone(struct request *rq)
    1575                 :            : {
    1576                 :          0 :         struct bio *bio;
    1577                 :            : 
    1578   [ #  #  #  # ]:          0 :         while ((bio = rq->bio) != NULL) {
    1579                 :          0 :                 rq->bio = bio->bi_next;
    1580                 :            : 
    1581                 :          0 :                 bio_put(bio);
    1582                 :            :         }
    1583                 :          0 : }
    1584                 :            : EXPORT_SYMBOL_GPL(blk_rq_unprep_clone);
    1585                 :            : 
    1586                 :            : /*
    1587                 :            :  * Copy attributes of the original request to the clone request.
    1588                 :            :  * The actual data parts (e.g. ->cmd, ->sense) are not copied.
    1589                 :            :  */
    1590                 :          0 : static void __blk_rq_prep_clone(struct request *dst, struct request *src)
    1591                 :            : {
    1592                 :          0 :         dst->__sector = blk_rq_pos(src);
    1593         [ #  # ]:          0 :         dst->__data_len = blk_rq_bytes(src);
    1594         [ #  # ]:          0 :         if (src->rq_flags & RQF_SPECIAL_PAYLOAD) {
    1595                 :          0 :                 dst->rq_flags |= RQF_SPECIAL_PAYLOAD;
    1596                 :          0 :                 dst->special_vec = src->special_vec;
    1597                 :            :         }
    1598                 :          0 :         dst->nr_phys_segments = src->nr_phys_segments;
    1599                 :          0 :         dst->ioprio = src->ioprio;
    1600                 :          0 :         dst->extra_len = src->extra_len;
    1601                 :            : }
    1602                 :            : 
    1603                 :            : /**
    1604                 :            :  * blk_rq_prep_clone - Helper function to setup clone request
    1605                 :            :  * @rq: the request to be setup
    1606                 :            :  * @rq_src: original request to be cloned
    1607                 :            :  * @bs: bio_set that bios for clone are allocated from
    1608                 :            :  * @gfp_mask: memory allocation mask for bio
    1609                 :            :  * @bio_ctr: setup function to be called for each clone bio.
    1610                 :            :  *           Returns %0 for success, non %0 for failure.
    1611                 :            :  * @data: private data to be passed to @bio_ctr
    1612                 :            :  *
    1613                 :            :  * Description:
    1614                 :            :  *     Clones bios in @rq_src to @rq, and copies attributes of @rq_src to @rq.
    1615                 :            :  *     The actual data parts of @rq_src (e.g. ->cmd, ->sense)
    1616                 :            :  *     are not copied, and copying such parts is the caller's responsibility.
    1617                 :            :  *     Also, pages which the original bios are pointing to are not copied
    1618                 :            :  *     and the cloned bios just point same pages.
    1619                 :            :  *     So cloned bios must be completed before original bios, which means
    1620                 :            :  *     the caller must complete @rq before @rq_src.
    1621                 :            :  */
    1622                 :          0 : int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
    1623                 :            :                       struct bio_set *bs, gfp_t gfp_mask,
    1624                 :            :                       int (*bio_ctr)(struct bio *, struct bio *, void *),
    1625                 :            :                       void *data)
    1626                 :            : {
    1627                 :          0 :         struct bio *bio, *bio_src;
    1628                 :            : 
    1629         [ #  # ]:          0 :         if (!bs)
    1630                 :          0 :                 bs = &fs_bio_set;
    1631                 :            : 
    1632   [ #  #  #  # ]:          0 :         __rq_for_each_bio(bio_src, rq_src) {
    1633                 :          0 :                 bio = bio_clone_fast(bio_src, gfp_mask, bs);
    1634         [ #  # ]:          0 :                 if (!bio)
    1635                 :          0 :                         goto free_and_out;
    1636                 :            : 
    1637   [ #  #  #  # ]:          0 :                 if (bio_ctr && bio_ctr(bio, bio_src, data))
    1638                 :          0 :                         goto free_and_out;
    1639                 :            : 
    1640         [ #  # ]:          0 :                 if (rq->bio) {
    1641                 :          0 :                         rq->biotail->bi_next = bio;
    1642                 :          0 :                         rq->biotail = bio;
    1643                 :            :                 } else
    1644                 :          0 :                         rq->bio = rq->biotail = bio;
    1645                 :            :         }
    1646                 :            : 
    1647         [ #  # ]:          0 :         __blk_rq_prep_clone(rq, rq_src);
    1648                 :            : 
    1649                 :          0 :         return 0;
    1650                 :            : 
    1651                 :          0 : free_and_out:
    1652         [ #  # ]:          0 :         if (bio)
    1653                 :          0 :                 bio_put(bio);
    1654                 :            :         blk_rq_unprep_clone(rq);
    1655                 :            : 
    1656                 :            :         return -ENOMEM;
    1657                 :            : }
    1658                 :            : EXPORT_SYMBOL_GPL(blk_rq_prep_clone);
    1659                 :            : 
    1660                 :        280 : int kblockd_schedule_work(struct work_struct *work)
    1661                 :            : {
    1662                 :        280 :         return queue_work(kblockd_workqueue, work);
    1663                 :            : }
    1664                 :            : EXPORT_SYMBOL(kblockd_schedule_work);
    1665                 :            : 
    1666                 :       7616 : int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork,
    1667                 :            :                                 unsigned long delay)
    1668                 :            : {
    1669                 :       7616 :         return mod_delayed_work_on(cpu, kblockd_workqueue, dwork, delay);
    1670                 :            : }
    1671                 :            : EXPORT_SYMBOL(kblockd_mod_delayed_work_on);
    1672                 :            : 
    1673                 :            : /**
    1674                 :            :  * blk_start_plug - initialize blk_plug and track it inside the task_struct
    1675                 :            :  * @plug:       The &struct blk_plug that needs to be initialized
    1676                 :            :  *
    1677                 :            :  * Description:
    1678                 :            :  *   blk_start_plug() indicates to the block layer an intent by the caller
    1679                 :            :  *   to submit multiple I/O requests in a batch.  The block layer may use
    1680                 :            :  *   this hint to defer submitting I/Os from the caller until blk_finish_plug()
    1681                 :            :  *   is called.  However, the block layer may choose to submit requests
    1682                 :            :  *   before a call to blk_finish_plug() if the number of queued I/Os
    1683                 :            :  *   exceeds %BLK_MAX_REQUEST_COUNT, or if the size of the I/O is larger than
    1684                 :            :  *   %BLK_PLUG_FLUSH_SIZE.  The queued I/Os may also be submitted early if
    1685                 :            :  *   the task schedules (see below).
    1686                 :            :  *
    1687                 :            :  *   Tracking blk_plug inside the task_struct will help with auto-flushing the
    1688                 :            :  *   pending I/O should the task end up blocking between blk_start_plug() and
    1689                 :            :  *   blk_finish_plug(). This is important from a performance perspective, but
    1690                 :            :  *   also ensures that we don't deadlock. For instance, if the task is blocking
    1691                 :            :  *   for a memory allocation, memory reclaim could end up wanting to free a
    1692                 :            :  *   page belonging to that request that is currently residing in our private
    1693                 :            :  *   plug. By flushing the pending I/O when the process goes to sleep, we avoid
    1694                 :            :  *   this kind of deadlock.
    1695                 :            :  */
    1696                 :      23723 : void blk_start_plug(struct blk_plug *plug)
    1697                 :            : {
    1698         [ +  - ]:      23723 :         struct task_struct *tsk = current;
    1699                 :            : 
    1700                 :            :         /*
    1701                 :            :          * If this is a nested plug, don't actually assign it.
    1702                 :            :          */
    1703         [ +  - ]:      23723 :         if (tsk->plug)
    1704                 :            :                 return;
    1705                 :            : 
    1706                 :      23723 :         INIT_LIST_HEAD(&plug->mq_list);
    1707                 :      23723 :         INIT_LIST_HEAD(&plug->cb_list);
    1708                 :      23723 :         plug->rq_count = 0;
    1709                 :      23723 :         plug->multiple_queues = false;
    1710                 :            : 
    1711                 :            :         /*
    1712                 :            :          * Store ordering should not be needed here, since a potential
    1713                 :            :          * preempt will imply a full memory barrier
    1714                 :            :          */
    1715                 :      23723 :         tsk->plug = plug;
    1716                 :            : }
    1717                 :            : EXPORT_SYMBOL(blk_start_plug);
    1718                 :            : 
    1719                 :      24251 : static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule)
    1720                 :            : {
    1721                 :      24251 :         LIST_HEAD(callbacks);
    1722                 :            : 
    1723         [ -  + ]:      24251 :         while (!list_empty(&plug->cb_list)) {
    1724         [ #  # ]:          0 :                 list_splice_init(&plug->cb_list, &callbacks);
    1725                 :            : 
    1726         [ #  # ]:          0 :                 while (!list_empty(&callbacks)) {
    1727                 :          0 :                         struct blk_plug_cb *cb = list_first_entry(&callbacks,
    1728                 :            :                                                           struct blk_plug_cb,
    1729                 :            :                                                           list);
    1730                 :          0 :                         list_del(&cb->list);
    1731                 :          0 :                         cb->callback(cb, from_schedule);
    1732                 :            :                 }
    1733                 :            :         }
    1734                 :      24251 : }
    1735                 :            : 
    1736                 :          0 : struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug, void *data,
    1737                 :            :                                       int size)
    1738                 :            : {
    1739         [ #  # ]:          0 :         struct blk_plug *plug = current->plug;
    1740                 :          0 :         struct blk_plug_cb *cb;
    1741                 :            : 
    1742         [ #  # ]:          0 :         if (!plug)
    1743                 :            :                 return NULL;
    1744                 :            : 
    1745         [ #  # ]:          0 :         list_for_each_entry(cb, &plug->cb_list, list)
    1746   [ #  #  #  # ]:          0 :                 if (cb->callback == unplug && cb->data == data)
    1747                 :          0 :                         return cb;
    1748                 :            : 
    1749                 :            :         /* Not currently on the callback list */
    1750         [ #  # ]:          0 :         BUG_ON(size < sizeof(*cb));
    1751                 :          0 :         cb = kzalloc(size, GFP_ATOMIC);
    1752         [ #  # ]:          0 :         if (cb) {
    1753                 :          0 :                 cb->data = data;
    1754                 :          0 :                 cb->callback = unplug;
    1755                 :          0 :                 list_add(&cb->list, &plug->cb_list);
    1756                 :            :         }
    1757                 :            :         return cb;
    1758                 :            : }
    1759                 :            : EXPORT_SYMBOL(blk_check_plugged);
    1760                 :            : 
    1761                 :      24251 : void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
    1762                 :            : {
    1763                 :      24251 :         flush_plug_callbacks(plug, from_schedule);
    1764                 :            : 
    1765         [ +  + ]:      24251 :         if (!list_empty(&plug->mq_list))
    1766                 :      23956 :                 blk_mq_flush_plug_list(plug, from_schedule);
    1767                 :      24251 : }
    1768                 :            : 
    1769                 :            : /**
    1770                 :            :  * blk_finish_plug - mark the end of a batch of submitted I/O
    1771                 :            :  * @plug:       The &struct blk_plug passed to blk_start_plug()
    1772                 :            :  *
    1773                 :            :  * Description:
    1774                 :            :  * Indicate that a batch of I/O submissions is complete.  This function
    1775                 :            :  * must be paired with an initial call to blk_start_plug().  The intent
    1776                 :            :  * is to allow the block layer to optimize I/O submission.  See the
    1777                 :            :  * documentation for blk_start_plug() for more information.
    1778                 :            :  */
    1779                 :      23723 : void blk_finish_plug(struct blk_plug *plug)
    1780                 :            : {
    1781         [ +  - ]:      23723 :         if (plug != current->plug)
    1782                 :            :                 return;
    1783                 :      23723 :         blk_flush_plug_list(plug, false);
    1784                 :            : 
    1785                 :      23723 :         current->plug = NULL;
    1786                 :            : }
    1787                 :            : EXPORT_SYMBOL(blk_finish_plug);
    1788                 :            : 
    1789                 :         28 : int __init blk_dev_init(void)
    1790                 :            : {
    1791                 :         28 :         BUILD_BUG_ON(REQ_OP_LAST >= (1 << REQ_OP_BITS));
    1792                 :         28 :         BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 *
    1793                 :            :                         sizeof_field(struct request, cmd_flags));
    1794                 :         28 :         BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 *
    1795                 :            :                         sizeof_field(struct bio, bi_opf));
    1796                 :            : 
    1797                 :            :         /* used for unplugging and affects IO latency/throughput - HIGHPRI */
    1798                 :         28 :         kblockd_workqueue = alloc_workqueue("kblockd",
    1799                 :            :                                             WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
    1800         [ -  + ]:         28 :         if (!kblockd_workqueue)
    1801                 :          0 :                 panic("Failed to create kblockd\n");
    1802                 :            : 
    1803                 :         28 :         blk_requestq_cachep = kmem_cache_create("request_queue",
    1804                 :            :                         sizeof(struct request_queue), 0, SLAB_PANIC, NULL);
    1805                 :            : 
    1806                 :            : #ifdef CONFIG_DEBUG_FS
    1807                 :         28 :         blk_debugfs_root = debugfs_create_dir("block", NULL);
    1808                 :            : #endif
    1809                 :            : 
    1810                 :         28 :         return 0;
    1811                 :            : }

Generated by: LCOV version 1.14