blob: 1b8b47f6e79bbccb612c53c8d047d0af9d57cfb7 [file] [log] [blame]
Christoph Hellwig3dcf60b2019-04-30 14:42:43 -04001// SPDX-License-Identifier: GPL-2.0
Jens Axboe86db1e22008-01-29 14:53:40 +01002/*
3 * Functions related to setting various queue properties from drivers
4 */
5#include <linux/kernel.h>
6#include <linux/module.h>
7#include <linux/bio.h>
8#include <linux/blkdev.h>
Jens Axboe320ae512013-10-24 09:20:05 +01009#include <linux/blk-mq.h>
Clark Williamscf4aebc22013-02-07 09:46:59 -060010#include <linux/sched/sysctl.h>
Jens Axboe86db1e22008-01-29 14:53:40 +010011
12#include "blk.h"
Jens Axboebd166ef2017-01-17 06:03:22 -070013#include "blk-mq-sched.h"
Jens Axboe86db1e22008-01-29 14:53:40 +010014
Jens Axboe86db1e22008-01-29 14:53:40 +010015/**
16 * blk_end_sync_rq - executes a completion event on a request
17 * @rq: request to complete
Randy Dunlap710027a2008-08-19 20:13:11 +020018 * @error: end I/O status of the request
Jens Axboe86db1e22008-01-29 14:53:40 +010019 */
Christoph Hellwig2a842ac2017-06-03 09:38:04 +020020static void blk_end_sync_rq(struct request *rq, blk_status_t error)
Jens Axboe86db1e22008-01-29 14:53:40 +010021{
22 struct completion *waiting = rq->end_io_data;
23
Keith Buschfb9b16e2021-06-10 14:44:36 -070024 rq->end_io_data = (void *)(uintptr_t)error;
Jens Axboe86db1e22008-01-29 14:53:40 +010025
26 /*
27 * complete last, if this is a stack request the process (and thus
28 * the rq pointer) could be invalid right after this complete()
29 */
30 complete(waiting);
31}
Jens Axboe86db1e22008-01-29 14:53:40 +010032
33/**
Guoqing Jiang8eeed0b2021-01-25 05:49:57 +010034 * blk_execute_rq_nowait - insert a request to I/O scheduler for execution
Jens Axboe86db1e22008-01-29 14:53:40 +010035 * @bd_disk: matching gendisk
36 * @rq: request to insert
37 * @at_head: insert request at head or tail of queue
38 * @done: I/O completion handler
39 *
40 * Description:
Randy Dunlap710027a2008-08-19 20:13:11 +020041 * Insert a fully prepared request at the back of the I/O scheduler queue
Jens Axboe86db1e22008-01-29 14:53:40 +010042 * for execution. Don't wait for completion.
Muthukumar Rattye81ca6f2012-06-29 15:31:49 +000043 *
44 * Note:
45 * This function will invoke @done directly if the queue is dead.
Jens Axboe86db1e22008-01-29 14:53:40 +010046 */
Guoqing Jiang8eeed0b2021-01-25 05:49:57 +010047void blk_execute_rq_nowait(struct gendisk *bd_disk, struct request *rq,
48 int at_head, rq_end_io_fn *done)
Jens Axboe86db1e22008-01-29 14:53:40 +010049{
Tejun Heo8ba61432011-12-14 00:33:37 +010050 WARN_ON(irqs_disabled());
Christoph Hellwig57292b52017-01-31 16:57:29 +010051 WARN_ON(!blk_rq_is_passthrough(rq));
James Bottomleybfe159a2011-07-07 15:45:40 -050052
Jens Axboe86db1e22008-01-29 14:53:40 +010053 rq->rq_disk = bd_disk;
Jens Axboe86db1e22008-01-29 14:53:40 +010054 rq->end_io = done;
Jens Axboe320ae512013-10-24 09:20:05 +010055
Konstantin Khlebnikovb5af37a2020-05-27 07:24:16 +020056 blk_account_io_start(rq);
Logan Gunthorpe48d9b0d2019-10-10 17:36:26 -060057
Ming Lei43a5e4e2013-12-26 21:31:35 +080058 /*
59 * don't check dying flag for MQ because the request won't
Bart Van Assche68bdf1a2016-07-19 08:18:06 -070060 * be reused after dying flag is set
Ming Lei43a5e4e2013-12-26 21:31:35 +080061 */
Jens Axboea1ce35f2018-10-29 10:23:51 -060062 blk_mq_sched_insert_request(rq, at_head, true, false);
Jens Axboe86db1e22008-01-29 14:53:40 +010063}
64EXPORT_SYMBOL_GPL(blk_execute_rq_nowait);
65
Keith Buschc01b5a82021-06-10 14:44:34 -070066static bool blk_rq_is_poll(struct request *rq)
67{
Christoph Hellwig3e087732021-10-12 13:12:24 +020068 if (!rq->mq_hctx)
69 return false;
70 if (rq->mq_hctx->type != HCTX_TYPE_POLL)
71 return false;
72 if (WARN_ON_ONCE(!rq->bio))
73 return false;
74 return true;
Keith Buschc01b5a82021-06-10 14:44:34 -070075}
76
77static void blk_rq_poll_completion(struct request *rq, struct completion *wait)
78{
79 do {
Jens Axboe5a72e892021-10-12 09:24:29 -060080 bio_poll(rq->bio, NULL, 0);
Keith Buschc01b5a82021-06-10 14:44:34 -070081 cond_resched();
82 } while (!completion_done(wait));
83}
84
Jens Axboe86db1e22008-01-29 14:53:40 +010085/**
86 * blk_execute_rq - insert a request into queue for execution
Jens Axboe86db1e22008-01-29 14:53:40 +010087 * @bd_disk: matching gendisk
88 * @rq: request to insert
89 * @at_head: insert request at head or tail of queue
90 *
91 * Description:
Randy Dunlap710027a2008-08-19 20:13:11 +020092 * Insert a fully prepared request at the back of the I/O scheduler queue
Jens Axboe86db1e22008-01-29 14:53:40 +010093 * for execution and wait for completion.
Keith Buschfb9b16e2021-06-10 14:44:36 -070094 * Return: The blk_status_t result provided to blk_mq_end_request().
Jens Axboe86db1e22008-01-29 14:53:40 +010095 */
Keith Buschfb9b16e2021-06-10 14:44:36 -070096blk_status_t blk_execute_rq(struct gendisk *bd_disk, struct request *rq, int at_head)
Jens Axboe86db1e22008-01-29 14:53:40 +010097{
98 DECLARE_COMPLETION_ONSTACK(wait);
Mark Lord4b197762010-09-24 09:51:13 -040099 unsigned long hang_check;
Jens Axboe86db1e22008-01-29 14:53:40 +0100100
Jens Axboe86db1e22008-01-29 14:53:40 +0100101 rq->end_io_data = &wait;
Guoqing Jiang8eeed0b2021-01-25 05:49:57 +0100102 blk_execute_rq_nowait(bd_disk, rq, at_head, blk_end_sync_rq);
Mark Lord4b197762010-09-24 09:51:13 -0400103
104 /* Prevent hang_check timer from firing at us during very long I/O */
105 hang_check = sysctl_hung_task_timeout_secs;
Keith Buschc01b5a82021-06-10 14:44:34 -0700106
107 if (blk_rq_is_poll(rq))
108 blk_rq_poll_completion(rq, &wait);
109 else if (hang_check)
Vladimir Davydov55770222013-02-14 18:19:59 +0400110 while (!wait_for_completion_io_timeout(&wait, hang_check * (HZ/2)));
Mark Lord4b197762010-09-24 09:51:13 -0400111 else
Vladimir Davydov55770222013-02-14 18:19:59 +0400112 wait_for_completion_io(&wait);
Keith Buschfb9b16e2021-06-10 14:44:36 -0700113
114 return (blk_status_t)(uintptr_t)rq->end_io_data;
Jens Axboe86db1e22008-01-29 14:53:40 +0100115}
Jens Axboe86db1e22008-01-29 14:53:40 +0100116EXPORT_SYMBOL(blk_execute_rq);