blob: 65875a598d6291feb0318559678d5bdd1fc5e3f2 [file] [log] [blame]
Thomas Gleixner09c434b2019-05-19 13:08:20 +01001// SPDX-License-Identifier: GPL-2.0-only
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * sd.c Copyright (C) 1992 Drew Eckhardt
4 * Copyright (C) 1993, 1994, 1995, 1999 Eric Youngdale
5 *
6 * Linux scsi disk driver
7 * Initial versions: Drew Eckhardt
8 * Subsequent revisions: Eric Youngdale
9 * Modification history:
10 * - Drew Eckhardt <drew@colorado.edu> original
11 * - Eric Youngdale <eric@andante.org> add scatter-gather, multiple
12 * outstanding request, and other enhancements.
13 * Support loadable low-level scsi drivers.
14 * - Jirka Hanika <geo@ff.cuni.cz> support more scsi disks using
15 * eight major numbers.
16 * - Richard Gooch <rgooch@atnf.csiro.au> support devfs.
17 * - Torben Mathiasen <tmm@image.dk> Resource allocation fixes in
18 * sd_init and cleanups.
19 * - Alex Davis <letmein@erols.com> Fix problem where partition info
20 * not being read in sd_open. Fix problem where removable media
21 * could be ejected after sd_open.
22 * - Douglas Gilbert <dgilbert@interlog.com> cleanup for lk 2.5.x
23 * - Badari Pulavarty <pbadari@us.ibm.com>, Matthew Wilcox
24 * <willy@debian.org>, Kurt Garloff <garloff@suse.de>:
25 * Support 32k/1M disks.
26 *
27 * Logging policy (needs CONFIG_SCSI_LOGGING defined):
28 * - setting up transfer: SCSI_LOG_HLQUEUE levels 1 and 2
29 * - end of transfer (bh + scsi_lib): SCSI_LOG_HLCOMPLETE level 1
30 * - entering sd_ioctl: SCSI_LOG_IOCTL level 1
31 * - entering other commands: SCSI_LOG_HLQUEUE level 3
32 * Note: when the logging level is set by the user, it must be greater
33 * than the level indicated above to trigger output.
34 */
35
Linus Torvalds1da177e2005-04-16 15:20:36 -070036#include <linux/module.h>
37#include <linux/fs.h>
38#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039#include <linux/mm.h>
40#include <linux/bio.h>
41#include <linux/genhd.h>
42#include <linux/hdreg.h>
43#include <linux/errno.h>
44#include <linux/idr.h>
45#include <linux/interrupt.h>
46#include <linux/init.h>
47#include <linux/blkdev.h>
48#include <linux/blkpg.h>
Bart Van Asschebca6b062018-09-26 14:01:03 -070049#include <linux/blk-pm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070050#include <linux/delay.h>
Christoph Hellwigb81e0c22021-09-20 14:33:25 +020051#include <linux/major.h>
Arjan van de Ven0b950672006-01-11 13:16:10 +010052#include <linux/mutex.h>
James Bottomley7404ad3b2008-08-31 10:41:52 -050053#include <linux/string_helpers.h>
Arjan van de Ven4ace92f2009-01-04 05:32:28 -080054#include <linux/async.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090055#include <linux/slab.h>
Christoph Hellwigd80210f2017-06-19 14:26:46 +020056#include <linux/sed-opal.h>
Lin Ming54f575882011-12-05 09:20:26 +080057#include <linux/pm_runtime.h>
Christoph Hellwig924d55b2015-10-15 14:10:49 +020058#include <linux/pr.h>
Christoph Hellwig8475c812016-09-11 19:35:41 +020059#include <linux/t10-pi.h>
Linus Torvalds7c0f6ba2016-12-24 11:46:01 -080060#include <linux/uaccess.h>
Dave Hansen8f76d152009-04-21 16:43:27 -070061#include <asm/unaligned.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070062
63#include <scsi/scsi.h>
64#include <scsi/scsi_cmnd.h>
65#include <scsi/scsi_dbg.h>
66#include <scsi/scsi_device.h>
67#include <scsi/scsi_driver.h>
68#include <scsi/scsi_eh.h>
69#include <scsi/scsi_host.h>
70#include <scsi/scsi_ioctl.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070071#include <scsi/scsicam.h>
72
Martin K. Petersenaa916962008-06-17 12:47:32 -040073#include "sd.h"
Dan Williamsa7a20d12012-03-22 17:05:11 -070074#include "scsi_priv.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070075#include "scsi_logging.h"
76
Rene Hermanf018fa52006-03-08 00:14:20 -080077MODULE_AUTHOR("Eric Youngdale");
78MODULE_DESCRIPTION("SCSI disk (sd) driver");
79MODULE_LICENSE("GPL");
80
81MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK0_MAJOR);
82MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK1_MAJOR);
83MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK2_MAJOR);
84MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK3_MAJOR);
85MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK4_MAJOR);
86MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK5_MAJOR);
87MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK6_MAJOR);
88MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK7_MAJOR);
89MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK8_MAJOR);
90MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK9_MAJOR);
91MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK10_MAJOR);
92MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK11_MAJOR);
93MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK12_MAJOR);
94MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK13_MAJOR);
95MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK14_MAJOR);
96MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK15_MAJOR);
Michael Tokarevd7b8bcb02006-10-27 16:02:37 +040097MODULE_ALIAS_SCSI_DEVICE(TYPE_DISK);
98MODULE_ALIAS_SCSI_DEVICE(TYPE_MOD);
99MODULE_ALIAS_SCSI_DEVICE(TYPE_RBC);
Hannes Reinecke89d94752016-10-18 15:40:34 +0900100MODULE_ALIAS_SCSI_DEVICE(TYPE_ZBC);
Rene Hermanf018fa52006-03-08 00:14:20 -0800101
Tejun Heof615b482008-08-25 19:47:24 +0900102#define SD_MINORS 16
Tejun Heo870d6652008-08-25 19:47:25 +0900103
Martin K. Petersenc98a0eb2011-03-08 02:07:15 -0500104static void sd_config_discard(struct scsi_disk *, unsigned int);
Martin K. Petersen5db44862012-09-18 12:19:32 -0400105static void sd_config_write_same(struct scsi_disk *);
Linus Torvalds7b3d9542008-01-06 10:17:12 -0800106static int sd_revalidate_disk(struct gendisk *);
Tejun Heo72ec24b2010-05-15 20:09:32 +0200107static void sd_unlock_native_capacity(struct gendisk *disk);
Linus Torvalds7b3d9542008-01-06 10:17:12 -0800108static int sd_probe(struct device *);
109static int sd_remove(struct device *);
110static void sd_shutdown(struct device *);
Oliver Neukum95897912013-09-16 13:28:15 +0200111static int sd_suspend_system(struct device *);
112static int sd_suspend_runtime(struct device *);
Bart Van Assche1c957532021-10-06 14:54:52 -0700113static int sd_resume_system(struct device *);
Martin Kepplingered4246d2021-07-04 09:54:02 +0200114static int sd_resume_runtime(struct device *);
Linus Torvalds7b3d9542008-01-06 10:17:12 -0800115static void sd_rescan(struct device *);
Christoph Hellwig159b2cb2018-11-09 14:42:39 +0100116static blk_status_t sd_init_command(struct scsi_cmnd *SCpnt);
Christoph Hellwiga1b73fc2014-05-01 16:51:04 +0200117static void sd_uninit_command(struct scsi_cmnd *SCpnt);
Linus Torvalds7b3d9542008-01-06 10:17:12 -0800118static int sd_done(struct scsi_cmnd *);
Hannes Reinecke7a38dc02017-04-06 15:36:29 +0200119static void sd_eh_reset(struct scsi_cmnd *);
James Bottomley24510792013-11-11 13:44:53 +0100120static int sd_eh_action(struct scsi_cmnd *, int);
Linus Torvalds7b3d9542008-01-06 10:17:12 -0800121static void sd_read_capacity(struct scsi_disk *sdkp, unsigned char *buffer);
Tony Jonesee959b02008-02-22 00:13:36 +0100122static void scsi_disk_release(struct device *cdev);
Linus Torvalds7b3d9542008-01-06 10:17:12 -0800123
Tejun Heof27bac22008-07-14 14:59:30 +0900124static DEFINE_IDA(sd_index_ida);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700125
126/* This semaphore is used to mediate the 0->1 reference get in the
127 * face of object destruction (i.e. we can't allow a get on an
128 * object after last put) */
Arjan van de Ven0b950672006-01-11 13:16:10 +0100129static DEFINE_MUTEX(sd_ref_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130
H Hartley Sweeten439d77f2010-08-10 18:01:20 -0700131static struct kmem_cache *sd_cdb_cache;
132static mempool_t *sd_cdb_pool;
Jens Axboe61cce6f2018-12-12 06:46:55 -0700133static mempool_t *sd_page_pool;
Christoph Hellwig9c2b9db2021-08-16 15:19:06 +0200134static struct lock_class_key sd_bio_compl_lkclass;
Martin K. Petersen4e7392e2009-09-20 16:49:38 -0400135
James Bottomley6bdaa1f2006-03-18 14:14:21 -0600136static const char *sd_cache_types[] = {
137 "write through", "none", "write back",
138 "write back, no read (daft)"
139};
140
Vaughan Caocb2fb682014-06-03 17:37:30 +0800141static void sd_set_flush_flag(struct scsi_disk *sdkp)
142{
Jens Axboeeb310e22016-03-30 10:06:11 -0600143 bool wc = false, fua = false;
Vaughan Caocb2fb682014-06-03 17:37:30 +0800144
145 if (sdkp->WCE) {
Jens Axboeeb310e22016-03-30 10:06:11 -0600146 wc = true;
Vaughan Caocb2fb682014-06-03 17:37:30 +0800147 if (sdkp->DPOFUA)
Jens Axboeeb310e22016-03-30 10:06:11 -0600148 fua = true;
Vaughan Caocb2fb682014-06-03 17:37:30 +0800149 }
150
Jens Axboeeb310e22016-03-30 10:06:11 -0600151 blk_queue_write_cache(sdkp->disk->queue, wc, fua);
Vaughan Caocb2fb682014-06-03 17:37:30 +0800152}
153
Tony Jonesee959b02008-02-22 00:13:36 +0100154static ssize_t
Greg Kroah-Hartmane1ea2352013-07-24 15:05:28 -0700155cache_type_store(struct device *dev, struct device_attribute *attr,
156 const char *buf, size_t count)
James Bottomley6bdaa1f2006-03-18 14:14:21 -0600157{
Martin K. Petersen4c117122017-05-25 09:34:30 -0400158 int ct, rcd, wce, sp;
Tony Jonesee959b02008-02-22 00:13:36 +0100159 struct scsi_disk *sdkp = to_scsi_disk(dev);
James Bottomley6bdaa1f2006-03-18 14:14:21 -0600160 struct scsi_device *sdp = sdkp->device;
161 char buffer[64];
162 char *buffer_data;
163 struct scsi_mode_data data;
164 struct scsi_sense_hdr sshdr;
Ben Hutchings2ee3e262013-05-27 19:07:19 +0100165 static const char temp[] = "temporary ";
James Bottomley6bdaa1f2006-03-18 14:14:21 -0600166 int len;
167
Hannes Reinecke89d94752016-10-18 15:40:34 +0900168 if (sdp->type != TYPE_DISK && sdp->type != TYPE_ZBC)
James Bottomley6bdaa1f2006-03-18 14:14:21 -0600169 /* no cache control on RBC devices; theoretically they
170 * can do it, but there's probably so many exceptions
171 * it's not worth the risk */
172 return -EINVAL;
173
James Bottomley39c60a02013-04-24 14:02:53 -0700174 if (strncmp(buf, temp, sizeof(temp) - 1) == 0) {
175 buf += sizeof(temp) - 1;
176 sdkp->cache_override = 1;
177 } else {
178 sdkp->cache_override = 0;
179 }
180
Martin K. Petersen4c117122017-05-25 09:34:30 -0400181 ct = sysfs_match_string(sd_cache_types, buf);
James Bottomley6bdaa1f2006-03-18 14:14:21 -0600182 if (ct < 0)
183 return -EINVAL;
Martin K. Petersen4c117122017-05-25 09:34:30 -0400184
James Bottomley6bdaa1f2006-03-18 14:14:21 -0600185 rcd = ct & 0x01 ? 1 : 0;
Sujit Reddy Thumma2eefd572014-08-11 15:40:37 +0300186 wce = (ct & 0x02) && !sdkp->write_prot ? 1 : 0;
James Bottomley39c60a02013-04-24 14:02:53 -0700187
188 if (sdkp->cache_override) {
189 sdkp->WCE = wce;
190 sdkp->RCD = rcd;
Vaughan Caocb2fb682014-06-03 17:37:30 +0800191 sd_set_flush_flag(sdkp);
James Bottomley39c60a02013-04-24 14:02:53 -0700192 return count;
193 }
194
James Bottomley6bdaa1f2006-03-18 14:14:21 -0600195 if (scsi_mode_sense(sdp, 0x08, 8, buffer, sizeof(buffer), SD_TIMEOUT,
Mike Christie06109592020-10-01 10:35:54 -0500196 sdkp->max_retries, &data, NULL))
James Bottomley6bdaa1f2006-03-18 14:14:21 -0600197 return -EINVAL;
Andrew Mortona9312fb2006-03-25 03:08:30 -0800198 len = min_t(size_t, sizeof(buffer), data.length - data.header_length -
James Bottomley6bdaa1f2006-03-18 14:14:21 -0600199 data.block_descriptor_length);
200 buffer_data = buffer + data.header_length +
201 data.block_descriptor_length;
202 buffer_data[2] &= ~0x05;
203 buffer_data[2] |= wce << 2 | rcd;
204 sp = buffer_data[0] & 0x80 ? 1 : 0;
Gabriel Krisman Bertazi2c5d16d2015-10-30 16:04:43 -0200205 buffer_data[0] &= ~0x80;
James Bottomley6bdaa1f2006-03-18 14:14:21 -0600206
Ivan Mironov44759972018-12-23 12:41:58 +0500207 /*
208 * Ensure WP, DPOFUA, and RESERVED fields are cleared in
209 * received mode parameter buffer before doing MODE SELECT.
210 */
211 data.device_specific = 0;
212
James Bottomley6bdaa1f2006-03-18 14:14:21 -0600213 if (scsi_mode_select(sdp, 1, sp, 8, buffer_data, len, SD_TIMEOUT,
Mike Christie06109592020-10-01 10:35:54 -0500214 sdkp->max_retries, &data, &sshdr)) {
James Bottomley6bdaa1f2006-03-18 14:14:21 -0600215 if (scsi_sense_valid(&sshdr))
Martin K. Petersene73aec82007-02-27 22:40:55 -0500216 sd_print_sense_hdr(sdkp, &sshdr);
James Bottomley6bdaa1f2006-03-18 14:14:21 -0600217 return -EINVAL;
218 }
Christoph Hellwig033a1b92020-09-01 17:57:46 +0200219 sd_revalidate_disk(sdkp->disk);
James Bottomley6bdaa1f2006-03-18 14:14:21 -0600220 return count;
221}
222
Tony Jonesee959b02008-02-22 00:13:36 +0100223static ssize_t
Greg Kroah-Hartmane1ea2352013-07-24 15:05:28 -0700224manage_start_stop_show(struct device *dev, struct device_attribute *attr,
225 char *buf)
226{
227 struct scsi_disk *sdkp = to_scsi_disk(dev);
228 struct scsi_device *sdp = sdkp->device;
229
Martin K. Petersen4c117122017-05-25 09:34:30 -0400230 return sprintf(buf, "%u\n", sdp->manage_start_stop);
Greg Kroah-Hartmane1ea2352013-07-24 15:05:28 -0700231}
232
233static ssize_t
234manage_start_stop_store(struct device *dev, struct device_attribute *attr,
235 const char *buf, size_t count)
Tejun Heoc3c94c5a2007-03-21 00:13:59 +0900236{
Tony Jonesee959b02008-02-22 00:13:36 +0100237 struct scsi_disk *sdkp = to_scsi_disk(dev);
Tejun Heoc3c94c5a2007-03-21 00:13:59 +0900238 struct scsi_device *sdp = sdkp->device;
weiping zhang623401e2017-10-12 14:57:06 +0800239 bool v;
Tejun Heoc3c94c5a2007-03-21 00:13:59 +0900240
241 if (!capable(CAP_SYS_ADMIN))
242 return -EACCES;
243
weiping zhang623401e2017-10-12 14:57:06 +0800244 if (kstrtobool(buf, &v))
245 return -EINVAL;
246
247 sdp->manage_start_stop = v;
Tejun Heoc3c94c5a2007-03-21 00:13:59 +0900248
249 return count;
250}
Greg Kroah-Hartmane1ea2352013-07-24 15:05:28 -0700251static DEVICE_ATTR_RW(manage_start_stop);
Tejun Heoc3c94c5a2007-03-21 00:13:59 +0900252
Tony Jonesee959b02008-02-22 00:13:36 +0100253static ssize_t
Greg Kroah-Hartmane1ea2352013-07-24 15:05:28 -0700254allow_restart_show(struct device *dev, struct device_attribute *attr, char *buf)
255{
256 struct scsi_disk *sdkp = to_scsi_disk(dev);
257
Martin K. Petersen4c117122017-05-25 09:34:30 -0400258 return sprintf(buf, "%u\n", sdkp->device->allow_restart);
Greg Kroah-Hartmane1ea2352013-07-24 15:05:28 -0700259}
260
261static ssize_t
262allow_restart_store(struct device *dev, struct device_attribute *attr,
263 const char *buf, size_t count)
Brian Kinga144c5a2006-06-27 11:10:31 -0500264{
weiping zhang658e9a62017-10-12 14:56:44 +0800265 bool v;
Tony Jonesee959b02008-02-22 00:13:36 +0100266 struct scsi_disk *sdkp = to_scsi_disk(dev);
Brian Kinga144c5a2006-06-27 11:10:31 -0500267 struct scsi_device *sdp = sdkp->device;
268
269 if (!capable(CAP_SYS_ADMIN))
270 return -EACCES;
271
Hannes Reinecke89d94752016-10-18 15:40:34 +0900272 if (sdp->type != TYPE_DISK && sdp->type != TYPE_ZBC)
Brian Kinga144c5a2006-06-27 11:10:31 -0500273 return -EINVAL;
274
weiping zhang658e9a62017-10-12 14:56:44 +0800275 if (kstrtobool(buf, &v))
276 return -EINVAL;
277
278 sdp->allow_restart = v;
Brian Kinga144c5a2006-06-27 11:10:31 -0500279
280 return count;
281}
Greg Kroah-Hartmane1ea2352013-07-24 15:05:28 -0700282static DEVICE_ATTR_RW(allow_restart);
Brian Kinga144c5a2006-06-27 11:10:31 -0500283
Tony Jonesee959b02008-02-22 00:13:36 +0100284static ssize_t
Greg Kroah-Hartmane1ea2352013-07-24 15:05:28 -0700285cache_type_show(struct device *dev, struct device_attribute *attr, char *buf)
James Bottomley6bdaa1f2006-03-18 14:14:21 -0600286{
Tony Jonesee959b02008-02-22 00:13:36 +0100287 struct scsi_disk *sdkp = to_scsi_disk(dev);
James Bottomley6bdaa1f2006-03-18 14:14:21 -0600288 int ct = sdkp->RCD + 2*sdkp->WCE;
289
Martin K. Petersen4c117122017-05-25 09:34:30 -0400290 return sprintf(buf, "%s\n", sd_cache_types[ct]);
James Bottomley6bdaa1f2006-03-18 14:14:21 -0600291}
Greg Kroah-Hartmane1ea2352013-07-24 15:05:28 -0700292static DEVICE_ATTR_RW(cache_type);
James Bottomley6bdaa1f2006-03-18 14:14:21 -0600293
Tony Jonesee959b02008-02-22 00:13:36 +0100294static ssize_t
Greg Kroah-Hartmane1ea2352013-07-24 15:05:28 -0700295FUA_show(struct device *dev, struct device_attribute *attr, char *buf)
James Bottomley6bdaa1f2006-03-18 14:14:21 -0600296{
Tony Jonesee959b02008-02-22 00:13:36 +0100297 struct scsi_disk *sdkp = to_scsi_disk(dev);
James Bottomley6bdaa1f2006-03-18 14:14:21 -0600298
Martin K. Petersen4c117122017-05-25 09:34:30 -0400299 return sprintf(buf, "%u\n", sdkp->DPOFUA);
James Bottomley6bdaa1f2006-03-18 14:14:21 -0600300}
Greg Kroah-Hartmane1ea2352013-07-24 15:05:28 -0700301static DEVICE_ATTR_RO(FUA);
James Bottomley6bdaa1f2006-03-18 14:14:21 -0600302
Tony Jonesee959b02008-02-22 00:13:36 +0100303static ssize_t
Greg Kroah-Hartmane1ea2352013-07-24 15:05:28 -0700304protection_type_show(struct device *dev, struct device_attribute *attr,
305 char *buf)
Martin K. Petersene0597d72008-07-17 04:28:34 -0400306{
307 struct scsi_disk *sdkp = to_scsi_disk(dev);
308
Martin K. Petersen4c117122017-05-25 09:34:30 -0400309 return sprintf(buf, "%u\n", sdkp->protection_type);
Martin K. Petersene0597d72008-07-17 04:28:34 -0400310}
311
312static ssize_t
Greg Kroah-Hartmane1ea2352013-07-24 15:05:28 -0700313protection_type_store(struct device *dev, struct device_attribute *attr,
314 const char *buf, size_t count)
Martin K. Petersen81724992012-08-28 14:29:34 -0400315{
316 struct scsi_disk *sdkp = to_scsi_disk(dev);
317 unsigned int val;
318 int err;
319
320 if (!capable(CAP_SYS_ADMIN))
321 return -EACCES;
322
323 err = kstrtouint(buf, 10, &val);
324
325 if (err)
326 return err;
327
Bart Van Assche830cc352017-08-25 13:46:35 -0700328 if (val <= T10_PI_TYPE3_PROTECTION)
Martin K. Petersen81724992012-08-28 14:29:34 -0400329 sdkp->protection_type = val;
330
331 return count;
332}
Greg Kroah-Hartmane1ea2352013-07-24 15:05:28 -0700333static DEVICE_ATTR_RW(protection_type);
Martin K. Petersen81724992012-08-28 14:29:34 -0400334
335static ssize_t
Greg Kroah-Hartmane1ea2352013-07-24 15:05:28 -0700336protection_mode_show(struct device *dev, struct device_attribute *attr,
337 char *buf)
Martin K. Petersen518fa8e2010-10-08 01:36:24 -0400338{
339 struct scsi_disk *sdkp = to_scsi_disk(dev);
340 struct scsi_device *sdp = sdkp->device;
341 unsigned int dif, dix;
342
343 dif = scsi_host_dif_capable(sdp->host, sdkp->protection_type);
344 dix = scsi_host_dix_capable(sdp->host, sdkp->protection_type);
345
Christoph Hellwig8475c812016-09-11 19:35:41 +0200346 if (!dix && scsi_host_dix_capable(sdp->host, T10_PI_TYPE0_PROTECTION)) {
Martin K. Petersen518fa8e2010-10-08 01:36:24 -0400347 dif = 0;
348 dix = 1;
349 }
350
351 if (!dif && !dix)
Martin K. Petersen4c117122017-05-25 09:34:30 -0400352 return sprintf(buf, "none\n");
Martin K. Petersen518fa8e2010-10-08 01:36:24 -0400353
Martin K. Petersen4c117122017-05-25 09:34:30 -0400354 return sprintf(buf, "%s%u\n", dix ? "dix" : "dif", dif);
Martin K. Petersen518fa8e2010-10-08 01:36:24 -0400355}
Greg Kroah-Hartmane1ea2352013-07-24 15:05:28 -0700356static DEVICE_ATTR_RO(protection_mode);
Martin K. Petersen518fa8e2010-10-08 01:36:24 -0400357
358static ssize_t
Greg Kroah-Hartmane1ea2352013-07-24 15:05:28 -0700359app_tag_own_show(struct device *dev, struct device_attribute *attr, char *buf)
Martin K. Petersene0597d72008-07-17 04:28:34 -0400360{
361 struct scsi_disk *sdkp = to_scsi_disk(dev);
362
Martin K. Petersen4c117122017-05-25 09:34:30 -0400363 return sprintf(buf, "%u\n", sdkp->ATO);
Martin K. Petersene0597d72008-07-17 04:28:34 -0400364}
Greg Kroah-Hartmane1ea2352013-07-24 15:05:28 -0700365static DEVICE_ATTR_RO(app_tag_own);
Martin K. Petersene0597d72008-07-17 04:28:34 -0400366
Martin K. Petersene339c1a2009-11-26 12:00:40 -0500367static ssize_t
Greg Kroah-Hartmane1ea2352013-07-24 15:05:28 -0700368thin_provisioning_show(struct device *dev, struct device_attribute *attr,
369 char *buf)
Martin K. Petersene339c1a2009-11-26 12:00:40 -0500370{
371 struct scsi_disk *sdkp = to_scsi_disk(dev);
372
Martin K. Petersen4c117122017-05-25 09:34:30 -0400373 return sprintf(buf, "%u\n", sdkp->lbpme);
Martin K. Petersenc98a0eb2011-03-08 02:07:15 -0500374}
Greg Kroah-Hartmane1ea2352013-07-24 15:05:28 -0700375static DEVICE_ATTR_RO(thin_provisioning);
Martin K. Petersenc98a0eb2011-03-08 02:07:15 -0500376
Martin K. Petersen4c117122017-05-25 09:34:30 -0400377/* sysfs_match_string() requires dense arrays */
Martin K. Petersenc98a0eb2011-03-08 02:07:15 -0500378static const char *lbp_mode[] = {
379 [SD_LBP_FULL] = "full",
380 [SD_LBP_UNMAP] = "unmap",
381 [SD_LBP_WS16] = "writesame_16",
382 [SD_LBP_WS10] = "writesame_10",
383 [SD_LBP_ZERO] = "writesame_zero",
384 [SD_LBP_DISABLE] = "disabled",
385};
386
387static ssize_t
Greg Kroah-Hartmane1ea2352013-07-24 15:05:28 -0700388provisioning_mode_show(struct device *dev, struct device_attribute *attr,
389 char *buf)
Martin K. Petersenc98a0eb2011-03-08 02:07:15 -0500390{
391 struct scsi_disk *sdkp = to_scsi_disk(dev);
392
Martin K. Petersen4c117122017-05-25 09:34:30 -0400393 return sprintf(buf, "%s\n", lbp_mode[sdkp->provisioning_mode]);
Martin K. Petersenc98a0eb2011-03-08 02:07:15 -0500394}
395
396static ssize_t
Greg Kroah-Hartmane1ea2352013-07-24 15:05:28 -0700397provisioning_mode_store(struct device *dev, struct device_attribute *attr,
398 const char *buf, size_t count)
Martin K. Petersenc98a0eb2011-03-08 02:07:15 -0500399{
400 struct scsi_disk *sdkp = to_scsi_disk(dev);
401 struct scsi_device *sdp = sdkp->device;
Martin K. Petersen4c117122017-05-25 09:34:30 -0400402 int mode;
Martin K. Petersenc98a0eb2011-03-08 02:07:15 -0500403
404 if (!capable(CAP_SYS_ADMIN))
405 return -EACCES;
406
Hannes Reinecke89d94752016-10-18 15:40:34 +0900407 if (sd_is_zoned(sdkp)) {
408 sd_config_discard(sdkp, SD_LBP_DISABLE);
409 return count;
410 }
411
Martin K. Petersenc98a0eb2011-03-08 02:07:15 -0500412 if (sdp->type != TYPE_DISK)
413 return -EINVAL;
414
Martin K. Petersen4c117122017-05-25 09:34:30 -0400415 mode = sysfs_match_string(lbp_mode, buf);
416 if (mode < 0)
Martin K. Petersenc98a0eb2011-03-08 02:07:15 -0500417 return -EINVAL;
418
Martin K. Petersen4c117122017-05-25 09:34:30 -0400419 sd_config_discard(sdkp, mode);
420
Martin K. Petersenc98a0eb2011-03-08 02:07:15 -0500421 return count;
Martin K. Petersene339c1a2009-11-26 12:00:40 -0500422}
Greg Kroah-Hartmane1ea2352013-07-24 15:05:28 -0700423static DEVICE_ATTR_RW(provisioning_mode);
Martin K. Petersene339c1a2009-11-26 12:00:40 -0500424
Martin K. Petersen4c117122017-05-25 09:34:30 -0400425/* sysfs_match_string() requires dense arrays */
Martin K. Petersene6bd9312017-04-05 19:21:24 +0200426static const char *zeroing_mode[] = {
427 [SD_ZERO_WRITE] = "write",
428 [SD_ZERO_WS] = "writesame",
429 [SD_ZERO_WS16_UNMAP] = "writesame_16_unmap",
430 [SD_ZERO_WS10_UNMAP] = "writesame_10_unmap",
431};
432
433static ssize_t
434zeroing_mode_show(struct device *dev, struct device_attribute *attr,
435 char *buf)
436{
437 struct scsi_disk *sdkp = to_scsi_disk(dev);
438
Martin K. Petersen4c117122017-05-25 09:34:30 -0400439 return sprintf(buf, "%s\n", zeroing_mode[sdkp->zeroing_mode]);
Martin K. Petersene6bd9312017-04-05 19:21:24 +0200440}
441
442static ssize_t
443zeroing_mode_store(struct device *dev, struct device_attribute *attr,
444 const char *buf, size_t count)
445{
446 struct scsi_disk *sdkp = to_scsi_disk(dev);
Martin K. Petersen4c117122017-05-25 09:34:30 -0400447 int mode;
Martin K. Petersene6bd9312017-04-05 19:21:24 +0200448
449 if (!capable(CAP_SYS_ADMIN))
450 return -EACCES;
451
Martin K. Petersen4c117122017-05-25 09:34:30 -0400452 mode = sysfs_match_string(zeroing_mode, buf);
453 if (mode < 0)
Martin K. Petersene6bd9312017-04-05 19:21:24 +0200454 return -EINVAL;
455
Martin K. Petersen4c117122017-05-25 09:34:30 -0400456 sdkp->zeroing_mode = mode;
457
Martin K. Petersene6bd9312017-04-05 19:21:24 +0200458 return count;
459}
460static DEVICE_ATTR_RW(zeroing_mode);
461
Martin K. Petersen18a4d0a2012-02-09 13:48:53 -0500462static ssize_t
Greg Kroah-Hartmane1ea2352013-07-24 15:05:28 -0700463max_medium_access_timeouts_show(struct device *dev,
464 struct device_attribute *attr, char *buf)
Martin K. Petersen18a4d0a2012-02-09 13:48:53 -0500465{
466 struct scsi_disk *sdkp = to_scsi_disk(dev);
467
Martin K. Petersen4c117122017-05-25 09:34:30 -0400468 return sprintf(buf, "%u\n", sdkp->max_medium_access_timeouts);
Martin K. Petersen18a4d0a2012-02-09 13:48:53 -0500469}
470
471static ssize_t
Greg Kroah-Hartmane1ea2352013-07-24 15:05:28 -0700472max_medium_access_timeouts_store(struct device *dev,
473 struct device_attribute *attr, const char *buf,
474 size_t count)
Martin K. Petersen18a4d0a2012-02-09 13:48:53 -0500475{
476 struct scsi_disk *sdkp = to_scsi_disk(dev);
477 int err;
478
479 if (!capable(CAP_SYS_ADMIN))
480 return -EACCES;
481
482 err = kstrtouint(buf, 10, &sdkp->max_medium_access_timeouts);
483
484 return err ? err : count;
485}
Greg Kroah-Hartmane1ea2352013-07-24 15:05:28 -0700486static DEVICE_ATTR_RW(max_medium_access_timeouts);
Martin K. Petersen18a4d0a2012-02-09 13:48:53 -0500487
Martin K. Petersen5db44862012-09-18 12:19:32 -0400488static ssize_t
Greg Kroah-Hartmane1ea2352013-07-24 15:05:28 -0700489max_write_same_blocks_show(struct device *dev, struct device_attribute *attr,
490 char *buf)
Martin K. Petersen5db44862012-09-18 12:19:32 -0400491{
492 struct scsi_disk *sdkp = to_scsi_disk(dev);
493
Martin K. Petersen4c117122017-05-25 09:34:30 -0400494 return sprintf(buf, "%u\n", sdkp->max_ws_blocks);
Martin K. Petersen5db44862012-09-18 12:19:32 -0400495}
496
497static ssize_t
Greg Kroah-Hartmane1ea2352013-07-24 15:05:28 -0700498max_write_same_blocks_store(struct device *dev, struct device_attribute *attr,
499 const char *buf, size_t count)
Martin K. Petersen5db44862012-09-18 12:19:32 -0400500{
501 struct scsi_disk *sdkp = to_scsi_disk(dev);
502 struct scsi_device *sdp = sdkp->device;
503 unsigned long max;
504 int err;
505
506 if (!capable(CAP_SYS_ADMIN))
507 return -EACCES;
508
Hannes Reinecke89d94752016-10-18 15:40:34 +0900509 if (sdp->type != TYPE_DISK && sdp->type != TYPE_ZBC)
Martin K. Petersen5db44862012-09-18 12:19:32 -0400510 return -EINVAL;
511
512 err = kstrtoul(buf, 10, &max);
513
514 if (err)
515 return err;
516
517 if (max == 0)
518 sdp->no_write_same = 1;
Martin K. Petersen66c28f92013-06-06 22:15:55 -0400519 else if (max <= SD_MAX_WS16_BLOCKS) {
520 sdp->no_write_same = 0;
Martin K. Petersen5db44862012-09-18 12:19:32 -0400521 sdkp->max_ws_blocks = max;
Martin K. Petersen66c28f92013-06-06 22:15:55 -0400522 }
Martin K. Petersen5db44862012-09-18 12:19:32 -0400523
524 sd_config_write_same(sdkp);
525
526 return count;
527}
Greg Kroah-Hartmane1ea2352013-07-24 15:05:28 -0700528static DEVICE_ATTR_RW(max_write_same_blocks);
Martin K. Petersen5db44862012-09-18 12:19:32 -0400529
Damien Le Moalc5f88522020-05-15 14:48:56 +0900530static ssize_t
531zoned_cap_show(struct device *dev, struct device_attribute *attr, char *buf)
532{
533 struct scsi_disk *sdkp = to_scsi_disk(dev);
534
535 if (sdkp->device->type == TYPE_ZBC)
536 return sprintf(buf, "host-managed\n");
537 if (sdkp->zoned == 1)
538 return sprintf(buf, "host-aware\n");
539 if (sdkp->zoned == 2)
540 return sprintf(buf, "drive-managed\n");
541 return sprintf(buf, "none\n");
542}
543static DEVICE_ATTR_RO(zoned_cap);
544
Mike Christie06109592020-10-01 10:35:54 -0500545static ssize_t
546max_retries_store(struct device *dev, struct device_attribute *attr,
547 const char *buf, size_t count)
548{
549 struct scsi_disk *sdkp = to_scsi_disk(dev);
550 struct scsi_device *sdev = sdkp->device;
551 int retries, err;
552
553 err = kstrtoint(buf, 10, &retries);
554 if (err)
555 return err;
556
557 if (retries == SCSI_CMD_RETRIES_NO_LIMIT || retries <= SD_MAX_RETRIES) {
558 sdkp->max_retries = retries;
559 return count;
560 }
561
562 sdev_printk(KERN_ERR, sdev, "max_retries must be between -1 and %d\n",
563 SD_MAX_RETRIES);
564 return -EINVAL;
565}
566
567static ssize_t
568max_retries_show(struct device *dev, struct device_attribute *attr,
569 char *buf)
570{
571 struct scsi_disk *sdkp = to_scsi_disk(dev);
572
573 return sprintf(buf, "%d\n", sdkp->max_retries);
574}
575
576static DEVICE_ATTR_RW(max_retries);
577
Greg Kroah-Hartmane1ea2352013-07-24 15:05:28 -0700578static struct attribute *sd_disk_attrs[] = {
579 &dev_attr_cache_type.attr,
580 &dev_attr_FUA.attr,
581 &dev_attr_allow_restart.attr,
582 &dev_attr_manage_start_stop.attr,
583 &dev_attr_protection_type.attr,
584 &dev_attr_protection_mode.attr,
585 &dev_attr_app_tag_own.attr,
586 &dev_attr_thin_provisioning.attr,
587 &dev_attr_provisioning_mode.attr,
Martin K. Petersene6bd9312017-04-05 19:21:24 +0200588 &dev_attr_zeroing_mode.attr,
Greg Kroah-Hartmane1ea2352013-07-24 15:05:28 -0700589 &dev_attr_max_write_same_blocks.attr,
590 &dev_attr_max_medium_access_timeouts.attr,
Damien Le Moalc5f88522020-05-15 14:48:56 +0900591 &dev_attr_zoned_cap.attr,
Mike Christie06109592020-10-01 10:35:54 -0500592 &dev_attr_max_retries.attr,
Greg Kroah-Hartmane1ea2352013-07-24 15:05:28 -0700593 NULL,
James Bottomley6bdaa1f2006-03-18 14:14:21 -0600594};
Greg Kroah-Hartmane1ea2352013-07-24 15:05:28 -0700595ATTRIBUTE_GROUPS(sd_disk);
James Bottomley6bdaa1f2006-03-18 14:14:21 -0600596
597static struct class sd_disk_class = {
598 .name = "scsi_disk",
599 .owner = THIS_MODULE,
Tony Jonesee959b02008-02-22 00:13:36 +0100600 .dev_release = scsi_disk_release,
Greg Kroah-Hartmane1ea2352013-07-24 15:05:28 -0700601 .dev_groups = sd_disk_groups,
James Bottomley6bdaa1f2006-03-18 14:14:21 -0600602};
Linus Torvalds1da177e2005-04-16 15:20:36 -0700603
Aaron Lu691e3d32012-11-09 15:27:55 +0800604static const struct dev_pm_ops sd_pm_ops = {
Oliver Neukum95897912013-09-16 13:28:15 +0200605 .suspend = sd_suspend_system,
Bart Van Assche1c957532021-10-06 14:54:52 -0700606 .resume = sd_resume_system,
Oliver Neukum95897912013-09-16 13:28:15 +0200607 .poweroff = sd_suspend_system,
Bart Van Assche1c957532021-10-06 14:54:52 -0700608 .restore = sd_resume_system,
Oliver Neukum95897912013-09-16 13:28:15 +0200609 .runtime_suspend = sd_suspend_runtime,
Martin Kepplingered4246d2021-07-04 09:54:02 +0200610 .runtime_resume = sd_resume_runtime,
Aaron Lu691e3d32012-11-09 15:27:55 +0800611};
612
Linus Torvalds1da177e2005-04-16 15:20:36 -0700613static struct scsi_driver sd_template = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700614 .gendrv = {
615 .name = "sd",
Christoph Hellwig3af6b352014-11-12 18:34:51 +0100616 .owner = THIS_MODULE,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700617 .probe = sd_probe,
Bart Van Asschef049cf12019-04-30 14:39:18 -0700618 .probe_type = PROBE_PREFER_ASYNCHRONOUS,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700619 .remove = sd_remove,
620 .shutdown = sd_shutdown,
Aaron Lu691e3d32012-11-09 15:27:55 +0800621 .pm = &sd_pm_ops,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700622 },
623 .rescan = sd_rescan,
Christoph Hellwiga1b73fc2014-05-01 16:51:04 +0200624 .init_command = sd_init_command,
625 .uninit_command = sd_uninit_command,
Linus Torvalds7b3d9542008-01-06 10:17:12 -0800626 .done = sd_done,
Martin K. Petersen18a4d0a2012-02-09 13:48:53 -0500627 .eh_action = sd_eh_action,
Hannes Reinecke7a38dc02017-04-06 15:36:29 +0200628 .eh_reset = sd_eh_reset,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700629};
630
631/*
Christoph Hellwig996e5092020-10-29 15:58:31 +0100632 * Don't request a new module, as that could deadlock in multipath
633 * environment.
Hannes Reinecke0761df92013-05-10 11:06:16 +0200634 */
Christoph Hellwig996e5092020-10-29 15:58:31 +0100635static void sd_default_probe(dev_t devt)
Hannes Reinecke0761df92013-05-10 11:06:16 +0200636{
Hannes Reinecke0761df92013-05-10 11:06:16 +0200637}
638
639/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700640 * Device no to disk mapping:
641 *
642 * major disc2 disc p1
643 * |............|.............|....|....| <- dev_t
644 * 31 20 19 8 7 4 3 0
645 *
646 * Inside a major, we have 16k disks, however mapped non-
647 * contiguously. The first 16 disks are for major0, the next
648 * ones with major1, ... Disk 256 is for major0 again, disk 272
649 * for major1, ...
650 * As we stay compatible with our numbering scheme, we can reuse
651 * the well-know SCSI majors 8, 65--71, 136--143.
652 */
653static int sd_major(int major_idx)
654{
655 switch (major_idx) {
656 case 0:
657 return SCSI_DISK0_MAJOR;
658 case 1 ... 7:
659 return SCSI_DISK1_MAJOR + major_idx - 1;
660 case 8 ... 15:
661 return SCSI_DISK8_MAJOR + major_idx - 8;
662 default:
663 BUG();
664 return 0; /* shut up gcc */
665 }
666}
667
Christoph Hellwig3d9a1f52015-02-02 14:01:25 +0100668static struct scsi_disk *scsi_disk_get(struct gendisk *disk)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700669{
670 struct scsi_disk *sdkp = NULL;
671
Christoph Hellwig3d9a1f52015-02-02 14:01:25 +0100672 mutex_lock(&sd_ref_mutex);
673
Alan Stern39b7f1e2005-11-04 14:44:41 -0500674 if (disk->private_data) {
675 sdkp = scsi_disk(disk);
676 if (scsi_device_get(sdkp->device) == 0)
Tony Jonesee959b02008-02-22 00:13:36 +0100677 get_device(&sdkp->dev);
Alan Stern39b7f1e2005-11-04 14:44:41 -0500678 else
679 sdkp = NULL;
680 }
Arjan van de Ven0b950672006-01-11 13:16:10 +0100681 mutex_unlock(&sd_ref_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700682 return sdkp;
683}
684
685static void scsi_disk_put(struct scsi_disk *sdkp)
686{
687 struct scsi_device *sdev = sdkp->device;
688
Arjan van de Ven0b950672006-01-11 13:16:10 +0100689 mutex_lock(&sd_ref_mutex);
Tony Jonesee959b02008-02-22 00:13:36 +0100690 put_device(&sdkp->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700691 scsi_device_put(sdev);
Arjan van de Ven0b950672006-01-11 13:16:10 +0100692 mutex_unlock(&sd_ref_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700693}
694
Christoph Hellwigd80210f2017-06-19 14:26:46 +0200695#ifdef CONFIG_BLK_SED_OPAL
696static int sd_sec_submit(void *data, u16 spsp, u8 secp, void *buffer,
697 size_t len, bool send)
698{
Mike Christie06109592020-10-01 10:35:54 -0500699 struct scsi_disk *sdkp = data;
700 struct scsi_device *sdev = sdkp->device;
Christoph Hellwigd80210f2017-06-19 14:26:46 +0200701 u8 cdb[12] = { 0, };
702 int ret;
703
704 cdb[0] = send ? SECURITY_PROTOCOL_OUT : SECURITY_PROTOCOL_IN;
705 cdb[1] = secp;
706 put_unaligned_be16(spsp, &cdb[2]);
707 put_unaligned_be32(len, &cdb[6]);
708
Bart Van Asscheaaf15f82021-02-21 18:10:42 -0800709 ret = scsi_execute(sdev, cdb, send ? DMA_TO_DEVICE : DMA_FROM_DEVICE,
710 buffer, len, NULL, NULL, SD_TIMEOUT, sdkp->max_retries, 0,
711 RQF_PM, NULL);
Christoph Hellwigd80210f2017-06-19 14:26:46 +0200712 return ret <= 0 ? ret : -EIO;
713}
714#endif /* CONFIG_BLK_SED_OPAL */
715
John Garry082c2cd2019-01-08 23:14:52 +0800716/*
717 * Look up the DIX operation based on whether the command is read or
718 * write and whether dix and dif are enabled.
719 */
720static unsigned int sd_prot_op(bool write, bool dix, bool dif)
721{
722 /* Lookup table: bit 2 (write), bit 1 (dix), bit 0 (dif) */
723 static const unsigned int ops[] = { /* wrt dix dif */
724 SCSI_PROT_NORMAL, /* 0 0 0 */
725 SCSI_PROT_READ_STRIP, /* 0 0 1 */
726 SCSI_PROT_READ_INSERT, /* 0 1 0 */
727 SCSI_PROT_READ_PASS, /* 0 1 1 */
728 SCSI_PROT_NORMAL, /* 1 0 0 */
729 SCSI_PROT_WRITE_INSERT, /* 1 0 1 */
730 SCSI_PROT_WRITE_STRIP, /* 1 1 0 */
731 SCSI_PROT_WRITE_PASS, /* 1 1 1 */
732 };
733
734 return ops[write << 2 | dix << 1 | dif];
735}
736
737/*
738 * Returns a mask of the protection flags that are valid for a given DIX
739 * operation.
740 */
741static unsigned int sd_prot_flag_mask(unsigned int prot_op)
742{
743 static const unsigned int flag_mask[] = {
744 [SCSI_PROT_NORMAL] = 0,
745
746 [SCSI_PROT_READ_STRIP] = SCSI_PROT_TRANSFER_PI |
747 SCSI_PROT_GUARD_CHECK |
748 SCSI_PROT_REF_CHECK |
749 SCSI_PROT_REF_INCREMENT,
750
751 [SCSI_PROT_READ_INSERT] = SCSI_PROT_REF_INCREMENT |
752 SCSI_PROT_IP_CHECKSUM,
753
754 [SCSI_PROT_READ_PASS] = SCSI_PROT_TRANSFER_PI |
755 SCSI_PROT_GUARD_CHECK |
756 SCSI_PROT_REF_CHECK |
757 SCSI_PROT_REF_INCREMENT |
758 SCSI_PROT_IP_CHECKSUM,
759
760 [SCSI_PROT_WRITE_INSERT] = SCSI_PROT_TRANSFER_PI |
761 SCSI_PROT_REF_INCREMENT,
762
763 [SCSI_PROT_WRITE_STRIP] = SCSI_PROT_GUARD_CHECK |
764 SCSI_PROT_REF_CHECK |
765 SCSI_PROT_REF_INCREMENT |
766 SCSI_PROT_IP_CHECKSUM,
767
768 [SCSI_PROT_WRITE_PASS] = SCSI_PROT_TRANSFER_PI |
769 SCSI_PROT_GUARD_CHECK |
770 SCSI_PROT_REF_CHECK |
771 SCSI_PROT_REF_INCREMENT |
772 SCSI_PROT_IP_CHECKSUM,
773 };
774
775 return flag_mask[prot_op];
776}
777
Martin K. Petersenc6115292014-09-26 19:20:08 -0400778static unsigned char sd_setup_protect_cmnd(struct scsi_cmnd *scmd,
779 unsigned int dix, unsigned int dif)
780{
Bart Van Assche5999ccf2021-08-09 16:03:06 -0700781 struct request *rq = scsi_cmd_to_rq(scmd);
782 struct bio *bio = rq->bio;
783 unsigned int prot_op = sd_prot_op(rq_data_dir(rq), dix, dif);
Martin K. Petersenc6115292014-09-26 19:20:08 -0400784 unsigned int protect = 0;
785
786 if (dix) { /* DIX Type 0, 1, 2, 3 */
787 if (bio_integrity_flagged(bio, BIP_IP_CHECKSUM))
788 scmd->prot_flags |= SCSI_PROT_IP_CHECKSUM;
789
790 if (bio_integrity_flagged(bio, BIP_CTRL_NOCHECK) == false)
791 scmd->prot_flags |= SCSI_PROT_GUARD_CHECK;
792 }
793
Christoph Hellwig8475c812016-09-11 19:35:41 +0200794 if (dif != T10_PI_TYPE3_PROTECTION) { /* DIX/DIF Type 0, 1, 2 */
Martin K. Petersenc6115292014-09-26 19:20:08 -0400795 scmd->prot_flags |= SCSI_PROT_REF_INCREMENT;
796
797 if (bio_integrity_flagged(bio, BIP_CTRL_NOCHECK) == false)
798 scmd->prot_flags |= SCSI_PROT_REF_CHECK;
799 }
800
801 if (dif) { /* DIX/DIF Type 1, 2, 3 */
802 scmd->prot_flags |= SCSI_PROT_TRANSFER_PI;
803
804 if (bio_integrity_flagged(bio, BIP_DISK_NOCHECK))
805 protect = 3 << 5; /* Disable target PI checking */
806 else
807 protect = 1 << 5; /* Enable target PI checking */
Martin K. Petersen35e1a5d2009-09-18 17:33:00 -0400808 }
809
810 scsi_set_prot_op(scmd, prot_op);
811 scsi_set_prot_type(scmd, dif);
Martin K. Petersenc6115292014-09-26 19:20:08 -0400812 scmd->prot_flags &= sd_prot_flag_mask(prot_op);
813
814 return protect;
Martin K. Petersen35e1a5d2009-09-18 17:33:00 -0400815}
816
Martin K. Petersenc98a0eb2011-03-08 02:07:15 -0500817static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode)
818{
819 struct request_queue *q = sdkp->disk->queue;
820 unsigned int logical_block_size = sdkp->device->sector_size;
821 unsigned int max_blocks = 0;
822
Martin K. Petersenbcd069b2017-04-05 19:21:25 +0200823 q->limits.discard_alignment =
824 sdkp->unmap_alignment * logical_block_size;
825 q->limits.discard_granularity =
826 max(sdkp->physical_block_size,
827 sdkp->unmap_granularity * logical_block_size);
Martin K. Petersen89730392012-02-13 15:39:00 -0500828 sdkp->provisioning_mode = mode;
829
Martin K. Petersenc98a0eb2011-03-08 02:07:15 -0500830 switch (mode) {
831
Martin K. Petersen4c117122017-05-25 09:34:30 -0400832 case SD_LBP_FULL:
Martin K. Petersenc98a0eb2011-03-08 02:07:15 -0500833 case SD_LBP_DISABLE:
Jens Axboe2bb4cd52015-07-14 08:15:12 -0600834 blk_queue_max_discard_sectors(q, 0);
Bart Van Assche8b904b52018-03-07 17:10:10 -0800835 blk_queue_flag_clear(QUEUE_FLAG_DISCARD, q);
Martin K. Petersenc98a0eb2011-03-08 02:07:15 -0500836 return;
837
838 case SD_LBP_UNMAP:
Martin K. Petersen5db44862012-09-18 12:19:32 -0400839 max_blocks = min_not_zero(sdkp->max_unmap_blocks,
840 (u32)SD_MAX_WS16_BLOCKS);
Martin K. Petersenc98a0eb2011-03-08 02:07:15 -0500841 break;
842
843 case SD_LBP_WS16:
Martin K. Petersen28a0bc42017-09-27 21:35:12 -0400844 if (sdkp->device->unmap_limit_for_ws)
845 max_blocks = sdkp->max_unmap_blocks;
846 else
847 max_blocks = sdkp->max_ws_blocks;
848
849 max_blocks = min_not_zero(max_blocks, (u32)SD_MAX_WS16_BLOCKS);
Martin K. Petersenc98a0eb2011-03-08 02:07:15 -0500850 break;
851
852 case SD_LBP_WS10:
Martin K. Petersen28a0bc42017-09-27 21:35:12 -0400853 if (sdkp->device->unmap_limit_for_ws)
854 max_blocks = sdkp->max_unmap_blocks;
855 else
856 max_blocks = sdkp->max_ws_blocks;
857
858 max_blocks = min_not_zero(max_blocks, (u32)SD_MAX_WS10_BLOCKS);
Martin K. Petersenc98a0eb2011-03-08 02:07:15 -0500859 break;
860
861 case SD_LBP_ZERO:
Martin K. Petersen5db44862012-09-18 12:19:32 -0400862 max_blocks = min_not_zero(sdkp->max_ws_blocks,
863 (u32)SD_MAX_WS10_BLOCKS);
Martin K. Petersenc98a0eb2011-03-08 02:07:15 -0500864 break;
865 }
866
Jens Axboe2bb4cd52015-07-14 08:15:12 -0600867 blk_queue_max_discard_sectors(q, max_blocks * (logical_block_size >> 9));
Bart Van Assche8b904b52018-03-07 17:10:10 -0800868 blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
Martin K. Petersenc98a0eb2011-03-08 02:07:15 -0500869}
870
Christoph Hellwig159b2cb2018-11-09 14:42:39 +0100871static blk_status_t sd_setup_unmap_cmnd(struct scsi_cmnd *cmd)
Martin K. Petersene339c1a2009-11-26 12:00:40 -0500872{
Christoph Hellwig6a7b4392014-06-28 12:35:13 +0200873 struct scsi_device *sdp = cmd->device;
Bart Van Assche5999ccf2021-08-09 16:03:06 -0700874 struct request *rq = scsi_cmd_to_rq(cmd);
Mike Christie06109592020-10-01 10:35:54 -0500875 struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
Martin K. Petersenc6c93fd2019-01-15 16:49:58 -0800876 u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq));
877 u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
Christoph Hellwig81d926e2017-04-05 19:20:59 +0200878 unsigned int data_len = 24;
Martin K. Petersenc98a0eb2011-03-08 02:07:15 -0500879 char *buf;
Martin K. Petersene339c1a2009-11-26 12:00:40 -0500880
Jens Axboe61cce6f2018-12-12 06:46:55 -0700881 rq->special_vec.bv_page = mempool_alloc(sd_page_pool, GFP_ATOMIC);
Christoph Hellwig81d926e2017-04-05 19:20:59 +0200882 if (!rq->special_vec.bv_page)
Christoph Hellwig159b2cb2018-11-09 14:42:39 +0100883 return BLK_STS_RESOURCE;
Jens Axboe61cce6f2018-12-12 06:46:55 -0700884 clear_highpage(rq->special_vec.bv_page);
Christoph Hellwigf9d03f92016-12-08 15:20:32 -0700885 rq->special_vec.bv_offset = 0;
Christoph Hellwig81d926e2017-04-05 19:20:59 +0200886 rq->special_vec.bv_len = data_len;
Christoph Hellwigf9d03f92016-12-08 15:20:32 -0700887 rq->rq_flags |= RQF_SPECIAL_PAYLOAD;
Christoph Hellwigf9d03f92016-12-08 15:20:32 -0700888
Christoph Hellwig81d926e2017-04-05 19:20:59 +0200889 cmd->cmd_len = 10;
890 cmd->cmnd[0] = UNMAP;
891 cmd->cmnd[8] = 24;
892
Christoph Hellwigc3c77052021-08-04 11:56:29 +0200893 buf = bvec_virt(&rq->special_vec);
Christoph Hellwig81d926e2017-04-05 19:20:59 +0200894 put_unaligned_be16(6 + 16, &buf[0]);
895 put_unaligned_be16(16, &buf[2]);
Martin K. Petersenc6c93fd2019-01-15 16:49:58 -0800896 put_unaligned_be64(lba, &buf[8]);
897 put_unaligned_be32(nr_blocks, &buf[16]);
Christoph Hellwig81d926e2017-04-05 19:20:59 +0200898
Mike Christie06109592020-10-01 10:35:54 -0500899 cmd->allowed = sdkp->max_retries;
Christoph Hellwig81d926e2017-04-05 19:20:59 +0200900 cmd->transfersize = data_len;
901 rq->timeout = SD_TIMEOUT;
Christoph Hellwig81d926e2017-04-05 19:20:59 +0200902
Christoph Hellwig7007e9d2020-10-05 10:41:28 +0200903 return scsi_alloc_sgtables(cmd);
Christoph Hellwig81d926e2017-04-05 19:20:59 +0200904}
905
Christoph Hellwig159b2cb2018-11-09 14:42:39 +0100906static blk_status_t sd_setup_write_same16_cmnd(struct scsi_cmnd *cmd,
907 bool unmap)
Christoph Hellwig81d926e2017-04-05 19:20:59 +0200908{
909 struct scsi_device *sdp = cmd->device;
Bart Van Assche5999ccf2021-08-09 16:03:06 -0700910 struct request *rq = scsi_cmd_to_rq(cmd);
Mike Christie06109592020-10-01 10:35:54 -0500911 struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
Martin K. Petersenc6c93fd2019-01-15 16:49:58 -0800912 u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq));
913 u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
Christoph Hellwig81d926e2017-04-05 19:20:59 +0200914 u32 data_len = sdp->sector_size;
915
Jens Axboe61cce6f2018-12-12 06:46:55 -0700916 rq->special_vec.bv_page = mempool_alloc(sd_page_pool, GFP_ATOMIC);
Christoph Hellwig81d926e2017-04-05 19:20:59 +0200917 if (!rq->special_vec.bv_page)
Christoph Hellwig159b2cb2018-11-09 14:42:39 +0100918 return BLK_STS_RESOURCE;
Jens Axboe61cce6f2018-12-12 06:46:55 -0700919 clear_highpage(rq->special_vec.bv_page);
Christoph Hellwig81d926e2017-04-05 19:20:59 +0200920 rq->special_vec.bv_offset = 0;
921 rq->special_vec.bv_len = data_len;
922 rq->rq_flags |= RQF_SPECIAL_PAYLOAD;
923
924 cmd->cmd_len = 16;
925 cmd->cmnd[0] = WRITE_SAME_16;
Christoph Hellwig02d26102017-04-05 19:21:02 +0200926 if (unmap)
927 cmd->cmnd[1] = 0x8; /* UNMAP */
Martin K. Petersenc6c93fd2019-01-15 16:49:58 -0800928 put_unaligned_be64(lba, &cmd->cmnd[2]);
929 put_unaligned_be32(nr_blocks, &cmd->cmnd[10]);
Christoph Hellwig81d926e2017-04-05 19:20:59 +0200930
Mike Christie06109592020-10-01 10:35:54 -0500931 cmd->allowed = sdkp->max_retries;
Christoph Hellwig81d926e2017-04-05 19:20:59 +0200932 cmd->transfersize = data_len;
Christoph Hellwig02d26102017-04-05 19:21:02 +0200933 rq->timeout = unmap ? SD_TIMEOUT : SD_WRITE_SAME_TIMEOUT;
Christoph Hellwig81d926e2017-04-05 19:20:59 +0200934
Christoph Hellwig7007e9d2020-10-05 10:41:28 +0200935 return scsi_alloc_sgtables(cmd);
Christoph Hellwig81d926e2017-04-05 19:20:59 +0200936}
937
Christoph Hellwig159b2cb2018-11-09 14:42:39 +0100938static blk_status_t sd_setup_write_same10_cmnd(struct scsi_cmnd *cmd,
939 bool unmap)
Christoph Hellwig81d926e2017-04-05 19:20:59 +0200940{
941 struct scsi_device *sdp = cmd->device;
Bart Van Assche5999ccf2021-08-09 16:03:06 -0700942 struct request *rq = scsi_cmd_to_rq(cmd);
Mike Christie06109592020-10-01 10:35:54 -0500943 struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
Martin K. Petersenc6c93fd2019-01-15 16:49:58 -0800944 u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq));
945 u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
Christoph Hellwig81d926e2017-04-05 19:20:59 +0200946 u32 data_len = sdp->sector_size;
947
Jens Axboe61cce6f2018-12-12 06:46:55 -0700948 rq->special_vec.bv_page = mempool_alloc(sd_page_pool, GFP_ATOMIC);
Christoph Hellwig81d926e2017-04-05 19:20:59 +0200949 if (!rq->special_vec.bv_page)
Christoph Hellwig159b2cb2018-11-09 14:42:39 +0100950 return BLK_STS_RESOURCE;
Jens Axboe61cce6f2018-12-12 06:46:55 -0700951 clear_highpage(rq->special_vec.bv_page);
Christoph Hellwig81d926e2017-04-05 19:20:59 +0200952 rq->special_vec.bv_offset = 0;
953 rq->special_vec.bv_len = data_len;
954 rq->rq_flags |= RQF_SPECIAL_PAYLOAD;
955
956 cmd->cmd_len = 10;
957 cmd->cmnd[0] = WRITE_SAME;
958 if (unmap)
959 cmd->cmnd[1] = 0x8; /* UNMAP */
Martin K. Petersenc6c93fd2019-01-15 16:49:58 -0800960 put_unaligned_be32(lba, &cmd->cmnd[2]);
961 put_unaligned_be16(nr_blocks, &cmd->cmnd[7]);
Christoph Hellwig81d926e2017-04-05 19:20:59 +0200962
Mike Christie06109592020-10-01 10:35:54 -0500963 cmd->allowed = sdkp->max_retries;
Christoph Hellwig81d926e2017-04-05 19:20:59 +0200964 cmd->transfersize = data_len;
Christoph Hellwig02d26102017-04-05 19:21:02 +0200965 rq->timeout = unmap ? SD_TIMEOUT : SD_WRITE_SAME_TIMEOUT;
Christoph Hellwig81d926e2017-04-05 19:20:59 +0200966
Christoph Hellwig7007e9d2020-10-05 10:41:28 +0200967 return scsi_alloc_sgtables(cmd);
FUJITA Tomonorif1126e92010-07-01 19:49:18 +0900968}
969
Christoph Hellwig159b2cb2018-11-09 14:42:39 +0100970static blk_status_t sd_setup_write_zeroes_cmnd(struct scsi_cmnd *cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700971{
Bart Van Assche5999ccf2021-08-09 16:03:06 -0700972 struct request *rq = scsi_cmd_to_rq(cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700973 struct scsi_device *sdp = cmd->device;
974 struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
Martin K. Petersenc6c93fd2019-01-15 16:49:58 -0800975 u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq));
976 u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700977
Christoph Hellwige4b87832017-04-05 19:21:12 +0200978 if (!(rq->cmd_flags & REQ_NOUNMAP)) {
Martin K. Petersene6bd9312017-04-05 19:21:24 +0200979 switch (sdkp->zeroing_mode) {
980 case SD_ZERO_WS16_UNMAP:
Damien Le Moal39051dd2017-12-21 15:43:44 +0900981 return sd_setup_write_same16_cmnd(cmd, true);
Martin K. Petersene6bd9312017-04-05 19:21:24 +0200982 case SD_ZERO_WS10_UNMAP:
Damien Le Moal39051dd2017-12-21 15:43:44 +0900983 return sd_setup_write_same10_cmnd(cmd, true);
Christoph Hellwige4b87832017-04-05 19:21:12 +0200984 }
Martin K. Petersene339c1a2009-11-26 12:00:40 -0500985 }
986
Ewan D. Milnee5cc9002020-12-07 17:10:21 -0500987 if (sdp->no_write_same) {
988 rq->rq_flags |= RQF_QUIET;
Christoph Hellwig159b2cb2018-11-09 14:42:39 +0100989 return BLK_STS_TARGET;
Ewan D. Milnee5cc9002020-12-07 17:10:21 -0500990 }
Damien Le Moaled44fd72017-05-08 15:48:19 +0900991
Martin K. Petersenc6c93fd2019-01-15 16:49:58 -0800992 if (sdkp->ws16 || lba > 0xffffffff || nr_blocks > 0xffff)
Damien Le Moal39051dd2017-12-21 15:43:44 +0900993 return sd_setup_write_same16_cmnd(cmd, false);
Damien Le Moaled44fd72017-05-08 15:48:19 +0900994
Damien Le Moal39051dd2017-12-21 15:43:44 +0900995 return sd_setup_write_same10_cmnd(cmd, false);
FUJITA Tomonorif1126e92010-07-01 19:49:18 +0900996}
997
Martin K. Petersen5db44862012-09-18 12:19:32 -0400998static void sd_config_write_same(struct scsi_disk *sdkp)
999{
1000 struct request_queue *q = sdkp->disk->queue;
1001 unsigned int logical_block_size = sdkp->device->sector_size;
Martin K. Petersen5db44862012-09-18 12:19:32 -04001002
1003 if (sdkp->device->no_write_same) {
1004 sdkp->max_ws_blocks = 0;
1005 goto out;
1006 }
1007
1008 /* Some devices can not handle block counts above 0xffff despite
1009 * supporting WRITE SAME(16). Consequently we default to 64k
1010 * blocks per I/O unless the device explicitly advertises a
1011 * bigger limit.
1012 */
Martin K. Petersen66c28f92013-06-06 22:15:55 -04001013 if (sdkp->max_ws_blocks > SD_MAX_WS10_BLOCKS)
1014 sdkp->max_ws_blocks = min_not_zero(sdkp->max_ws_blocks,
1015 (u32)SD_MAX_WS16_BLOCKS);
1016 else if (sdkp->ws16 || sdkp->ws10 || sdkp->device->no_report_opcodes)
1017 sdkp->max_ws_blocks = min_not_zero(sdkp->max_ws_blocks,
1018 (u32)SD_MAX_WS10_BLOCKS);
1019 else {
1020 sdkp->device->no_write_same = 1;
1021 sdkp->max_ws_blocks = 0;
1022 }
Martin K. Petersen5db44862012-09-18 12:19:32 -04001023
Martin K. Petersene6bd9312017-04-05 19:21:24 +02001024 if (sdkp->lbprz && sdkp->lbpws)
1025 sdkp->zeroing_mode = SD_ZERO_WS16_UNMAP;
1026 else if (sdkp->lbprz && sdkp->lbpws10)
1027 sdkp->zeroing_mode = SD_ZERO_WS10_UNMAP;
1028 else if (sdkp->max_ws_blocks)
1029 sdkp->zeroing_mode = SD_ZERO_WS;
1030 else
1031 sdkp->zeroing_mode = SD_ZERO_WRITE;
1032
Damien Le Moalb7af62a2017-09-05 20:55:35 +09001033 if (sdkp->max_ws_blocks &&
1034 sdkp->physical_block_size > logical_block_size) {
1035 /*
1036 * Reporting a maximum number of blocks that is not aligned
1037 * on the device physical size would cause a large write same
1038 * request to be split into physically unaligned chunks by
1039 * __blkdev_issue_write_zeroes() and __blkdev_issue_write_same()
1040 * even if the caller of these functions took care to align the
1041 * large request. So make sure the maximum reported is aligned
1042 * to the device physical block size. This is only an optional
1043 * optimization for regular disks, but this is mandatory to
1044 * avoid failure of large write same requests directed at
1045 * sequential write required zones of host-managed ZBC disks.
1046 */
1047 sdkp->max_ws_blocks =
1048 round_down(sdkp->max_ws_blocks,
1049 bytes_to_logical(sdkp->device,
1050 sdkp->physical_block_size));
1051 }
1052
Martin K. Petersen5db44862012-09-18 12:19:32 -04001053out:
Martin K. Petersen66c28f92013-06-06 22:15:55 -04001054 blk_queue_max_write_same_sectors(q, sdkp->max_ws_blocks *
1055 (logical_block_size >> 9));
Christoph Hellwig02d26102017-04-05 19:21:02 +02001056 blk_queue_max_write_zeroes_sectors(q, sdkp->max_ws_blocks *
1057 (logical_block_size >> 9));
Martin K. Petersen5db44862012-09-18 12:19:32 -04001058}
1059
1060/**
1061 * sd_setup_write_same_cmnd - write the same data to multiple blocks
Christoph Hellwig59b11342014-06-28 12:22:22 +02001062 * @cmd: command to prepare
Martin K. Petersen5db44862012-09-18 12:19:32 -04001063 *
Damien Le Moal7529fbb2017-04-24 16:51:09 +09001064 * Will set up either WRITE SAME(10) or WRITE SAME(16) depending on
1065 * the preference indicated by the target device.
Martin K. Petersen5db44862012-09-18 12:19:32 -04001066 **/
Christoph Hellwig159b2cb2018-11-09 14:42:39 +01001067static blk_status_t sd_setup_write_same_cmnd(struct scsi_cmnd *cmd)
Martin K. Petersen5db44862012-09-18 12:19:32 -04001068{
Bart Van Assche5999ccf2021-08-09 16:03:06 -07001069 struct request *rq = scsi_cmd_to_rq(cmd);
Christoph Hellwig59b11342014-06-28 12:22:22 +02001070 struct scsi_device *sdp = cmd->device;
Martin K. Petersen5db44862012-09-18 12:19:32 -04001071 struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
1072 struct bio *bio = rq->bio;
Martin K. Petersenc6c93fd2019-01-15 16:49:58 -08001073 u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq));
1074 u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
Christoph Hellwig159b2cb2018-11-09 14:42:39 +01001075 blk_status_t ret;
Martin K. Petersen5db44862012-09-18 12:19:32 -04001076
1077 if (sdkp->device->no_write_same)
Christoph Hellwig159b2cb2018-11-09 14:42:39 +01001078 return BLK_STS_TARGET;
Martin K. Petersen5db44862012-09-18 12:19:32 -04001079
Kent Overstreeta4ad39b12013-08-07 14:24:32 -07001080 BUG_ON(bio_offset(bio) || bio_iovec(bio).bv_len != sdp->sector_size);
Martin K. Petersen5db44862012-09-18 12:19:32 -04001081
Martin K. Petersen5db44862012-09-18 12:19:32 -04001082 rq->timeout = SD_WRITE_SAME_TIMEOUT;
Martin K. Petersen5db44862012-09-18 12:19:32 -04001083
Martin K. Petersenc6c93fd2019-01-15 16:49:58 -08001084 if (sdkp->ws16 || lba > 0xffffffff || nr_blocks > 0xffff) {
Christoph Hellwig59b11342014-06-28 12:22:22 +02001085 cmd->cmd_len = 16;
1086 cmd->cmnd[0] = WRITE_SAME_16;
Martin K. Petersenc6c93fd2019-01-15 16:49:58 -08001087 put_unaligned_be64(lba, &cmd->cmnd[2]);
1088 put_unaligned_be32(nr_blocks, &cmd->cmnd[10]);
Martin K. Petersen5db44862012-09-18 12:19:32 -04001089 } else {
Christoph Hellwig59b11342014-06-28 12:22:22 +02001090 cmd->cmd_len = 10;
1091 cmd->cmnd[0] = WRITE_SAME;
Martin K. Petersenc6c93fd2019-01-15 16:49:58 -08001092 put_unaligned_be32(lba, &cmd->cmnd[2]);
1093 put_unaligned_be16(nr_blocks, &cmd->cmnd[7]);
Martin K. Petersen5db44862012-09-18 12:19:32 -04001094 }
1095
Christoph Hellwig59b11342014-06-28 12:22:22 +02001096 cmd->transfersize = sdp->sector_size;
Mike Christie06109592020-10-01 10:35:54 -05001097 cmd->allowed = sdkp->max_retries;
Bart Van Assche08965c22017-01-25 13:43:56 -08001098
1099 /*
1100 * For WRITE SAME the data transferred via the DATA OUT buffer is
1101 * different from the amount of data actually written to the target.
1102 *
1103 * We set up __data_len to the amount of data transferred via the
1104 * DATA OUT buffer so that blk_rq_map_sg sets up the proper S/G list
1105 * to transfer a single sector of data first, but then reset it to
1106 * the amount of data to be written right after so that the I/O path
1107 * knows how much to actually write.
1108 */
1109 rq->__data_len = sdp->sector_size;
Christoph Hellwig7007e9d2020-10-05 10:41:28 +02001110 ret = scsi_alloc_sgtables(cmd);
Bart Van Assche84f7a9d2019-01-15 16:49:57 -08001111 rq->__data_len = blk_rq_bytes(rq);
Damien Le Moal29f6ca62017-05-08 14:59:02 +09001112
Bart Van Assche08965c22017-01-25 13:43:56 -08001113 return ret;
Martin K. Petersen5db44862012-09-18 12:19:32 -04001114}
1115
Christoph Hellwig159b2cb2018-11-09 14:42:39 +01001116static blk_status_t sd_setup_flush_cmnd(struct scsi_cmnd *cmd)
FUJITA Tomonori90467c22010-07-03 17:45:34 +09001117{
Bart Van Assche5999ccf2021-08-09 16:03:06 -07001118 struct request *rq = scsi_cmd_to_rq(cmd);
Mike Christie06109592020-10-01 10:35:54 -05001119 struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
FUJITA Tomonori90467c22010-07-03 17:45:34 +09001120
Christoph Hellwiga118c6c2014-06-28 12:08:05 +02001121 /* flush requests don't perform I/O, zero the S/G table */
1122 memset(&cmd->sdb, 0, sizeof(cmd->sdb));
1123
1124 cmd->cmnd[0] = SYNCHRONIZE_CACHE;
1125 cmd->cmd_len = 10;
1126 cmd->transfersize = 0;
Mike Christie06109592020-10-01 10:35:54 -05001127 cmd->allowed = sdkp->max_retries;
Christoph Hellwiga118c6c2014-06-28 12:08:05 +02001128
K. Y. Srinivasan26b9fd82014-07-18 17:11:27 +02001129 rq->timeout = rq->q->rq_timeout * SD_FLUSH_TIMEOUT_MULTIPLIER;
Christoph Hellwig159b2cb2018-11-09 14:42:39 +01001130 return BLK_STS_OK;
FUJITA Tomonori90467c22010-07-03 17:45:34 +09001131}
1132
Martin K. Petersen78a02f42019-01-15 16:50:00 -08001133static blk_status_t sd_setup_rw32_cmnd(struct scsi_cmnd *cmd, bool write,
1134 sector_t lba, unsigned int nr_blocks,
1135 unsigned char flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001136{
Martin K. Petersen78a02f42019-01-15 16:50:00 -08001137 cmd->cmnd = mempool_alloc(sd_cdb_pool, GFP_ATOMIC);
1138 if (unlikely(cmd->cmnd == NULL))
1139 return BLK_STS_RESOURCE;
1140
1141 cmd->cmd_len = SD_EXT_CDB_SIZE;
1142 memset(cmd->cmnd, 0, cmd->cmd_len);
1143
1144 cmd->cmnd[0] = VARIABLE_LENGTH_CMD;
1145 cmd->cmnd[7] = 0x18; /* Additional CDB len */
1146 cmd->cmnd[9] = write ? WRITE_32 : READ_32;
1147 cmd->cmnd[10] = flags;
1148 put_unaligned_be64(lba, &cmd->cmnd[12]);
1149 put_unaligned_be32(lba, &cmd->cmnd[20]); /* Expected Indirect LBA */
1150 put_unaligned_be32(nr_blocks, &cmd->cmnd[28]);
1151
1152 return BLK_STS_OK;
1153}
1154
1155static blk_status_t sd_setup_rw16_cmnd(struct scsi_cmnd *cmd, bool write,
1156 sector_t lba, unsigned int nr_blocks,
1157 unsigned char flags)
1158{
1159 cmd->cmd_len = 16;
1160 cmd->cmnd[0] = write ? WRITE_16 : READ_16;
1161 cmd->cmnd[1] = flags;
1162 cmd->cmnd[14] = 0;
1163 cmd->cmnd[15] = 0;
1164 put_unaligned_be64(lba, &cmd->cmnd[2]);
1165 put_unaligned_be32(nr_blocks, &cmd->cmnd[10]);
1166
1167 return BLK_STS_OK;
1168}
1169
1170static blk_status_t sd_setup_rw10_cmnd(struct scsi_cmnd *cmd, bool write,
1171 sector_t lba, unsigned int nr_blocks,
1172 unsigned char flags)
1173{
1174 cmd->cmd_len = 10;
1175 cmd->cmnd[0] = write ? WRITE_10 : READ_10;
1176 cmd->cmnd[1] = flags;
1177 cmd->cmnd[6] = 0;
1178 cmd->cmnd[9] = 0;
1179 put_unaligned_be32(lba, &cmd->cmnd[2]);
1180 put_unaligned_be16(nr_blocks, &cmd->cmnd[7]);
1181
1182 return BLK_STS_OK;
1183}
1184
1185static blk_status_t sd_setup_rw6_cmnd(struct scsi_cmnd *cmd, bool write,
1186 sector_t lba, unsigned int nr_blocks,
1187 unsigned char flags)
1188{
Bart Van Asschedb5db4b2019-01-23 11:12:37 -08001189 /* Avoid that 0 blocks gets translated into 256 blocks. */
1190 if (WARN_ON_ONCE(nr_blocks == 0))
1191 return BLK_STS_IOERR;
1192
Martin K. Petersen78a02f42019-01-15 16:50:00 -08001193 if (unlikely(flags & 0x8)) {
1194 /*
1195 * This happens only if this drive failed 10byte rw
1196 * command with ILLEGAL_REQUEST during operation and
1197 * thus turned off use_10_for_rw.
1198 */
1199 scmd_printk(KERN_ERR, cmd, "FUA write on READ/WRITE(6) drive\n");
1200 return BLK_STS_IOERR;
1201 }
1202
1203 cmd->cmd_len = 6;
1204 cmd->cmnd[0] = write ? WRITE_6 : READ_6;
1205 cmd->cmnd[1] = (lba >> 16) & 0x1f;
1206 cmd->cmnd[2] = (lba >> 8) & 0xff;
1207 cmd->cmnd[3] = lba & 0xff;
1208 cmd->cmnd[4] = nr_blocks;
1209 cmd->cmnd[5] = 0;
1210
1211 return BLK_STS_OK;
1212}
1213
Bart Van Asschecf64e5a2019-01-15 16:50:02 -08001214static blk_status_t sd_setup_read_write_cmnd(struct scsi_cmnd *cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001215{
Bart Van Assche5999ccf2021-08-09 16:03:06 -07001216 struct request *rq = scsi_cmd_to_rq(cmd);
Bart Van Asschecf64e5a2019-01-15 16:50:02 -08001217 struct scsi_device *sdp = cmd->device;
Martin K. Petersene249e422019-01-15 16:50:01 -08001218 struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
Martin K. Petersenec029752019-01-15 16:49:59 -08001219 sector_t lba = sectors_to_logical(sdp, blk_rq_pos(rq));
Linus Torvalds18351072008-08-05 21:42:21 -07001220 sector_t threshold;
Martin K. Petersenec029752019-01-15 16:49:59 -08001221 unsigned int nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
Martin K. Petersenec029752019-01-15 16:49:59 -08001222 unsigned int mask = logical_to_sectors(sdp, 1) - 1;
Martin K. Petersen78a02f42019-01-15 16:50:00 -08001223 bool write = rq_data_dir(rq) == WRITE;
1224 unsigned char protect, fua;
Christoph Hellwig159b2cb2018-11-09 14:42:39 +01001225 blk_status_t ret;
Xiang Chen0cf9f4e2019-10-22 14:27:08 +08001226 unsigned int dif;
1227 bool dix;
James Bottomley7f9a6bc2007-08-04 10:06:25 -05001228
Christoph Hellwig7007e9d2020-10-05 10:41:28 +02001229 ret = scsi_alloc_sgtables(cmd);
Christoph Hellwig159b2cb2018-11-09 14:42:39 +01001230 if (ret != BLK_STS_OK)
Damien Le Moal39051dd2017-12-21 15:43:44 +09001231 return ret;
James Bottomley7f9a6bc2007-08-04 10:06:25 -05001232
Christoph Hellwig7007e9d2020-10-05 10:41:28 +02001233 ret = BLK_STS_IOERR;
Martin K. Petersene249e422019-01-15 16:50:01 -08001234 if (!scsi_device_online(sdp) || sdp->changed) {
Bart Van Asschecf64e5a2019-01-15 16:50:02 -08001235 scmd_printk(KERN_ERR, cmd, "device offline or changed\n");
Christoph Hellwig7007e9d2020-10-05 10:41:28 +02001236 goto fail;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001237 }
1238
Martin K. Petersene249e422019-01-15 16:50:01 -08001239 if (blk_rq_pos(rq) + blk_rq_sectors(rq) > get_capacity(rq->rq_disk)) {
Bart Van Asschecf64e5a2019-01-15 16:50:02 -08001240 scmd_printk(KERN_ERR, cmd, "access beyond end of device\n");
Christoph Hellwig7007e9d2020-10-05 10:41:28 +02001241 goto fail;
Martin K. Petersene249e422019-01-15 16:50:01 -08001242 }
1243
1244 if ((blk_rq_pos(rq) & mask) || (blk_rq_sectors(rq) & mask)) {
Bart Van Asschecf64e5a2019-01-15 16:50:02 -08001245 scmd_printk(KERN_ERR, cmd, "request not aligned to the logical block size\n");
Christoph Hellwig7007e9d2020-10-05 10:41:28 +02001246 goto fail;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001247 }
James Bottomley7f9a6bc2007-08-04 10:06:25 -05001248
Hans de Goedea0899d42008-01-20 11:12:26 +01001249 /*
Martin K. Petersenec029752019-01-15 16:49:59 -08001250 * Some SD card readers can't handle accesses which touch the
1251 * last one or two logical blocks. Split accesses as needed.
Hans de Goedea0899d42008-01-20 11:12:26 +01001252 */
Martin K. Petersenec029752019-01-15 16:49:59 -08001253 threshold = sdkp->capacity - SD_LAST_BUGGY_SECTORS;
Linus Torvalds18351072008-08-05 21:42:21 -07001254
Martin K. Petersenc6c93fd2019-01-15 16:49:58 -08001255 if (unlikely(sdp->last_sector_bug && lba + nr_blocks > threshold)) {
1256 if (lba < threshold) {
Linus Torvalds18351072008-08-05 21:42:21 -07001257 /* Access up to the threshold but not beyond */
Martin K. Petersenc6c93fd2019-01-15 16:49:58 -08001258 nr_blocks = threshold - lba;
Linus Torvalds18351072008-08-05 21:42:21 -07001259 } else {
Martin K. Petersenec029752019-01-15 16:49:59 -08001260 /* Access only a single logical block */
1261 nr_blocks = 1;
Linus Torvalds18351072008-08-05 21:42:21 -07001262 }
1263 }
Hans de Goedea0899d42008-01-20 11:12:26 +01001264
Johannes Thumshirn5795eb42020-05-12 17:55:51 +09001265 if (req_op(rq) == REQ_OP_ZONE_APPEND) {
1266 ret = sd_zbc_prepare_zone_append(cmd, &lba, nr_blocks);
1267 if (ret)
Christoph Hellwig7007e9d2020-10-05 10:41:28 +02001268 goto fail;
Johannes Thumshirn5795eb42020-05-12 17:55:51 +09001269 }
1270
Martin K. Petersen78a02f42019-01-15 16:50:00 -08001271 fua = rq->cmd_flags & REQ_FUA ? 0x8 : 0;
Bart Van Asschecf64e5a2019-01-15 16:50:02 -08001272 dix = scsi_prot_sg_count(cmd);
1273 dif = scsi_host_dif_capable(cmd->device->host, sdkp->protection_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001274
Martin K. Petersenc6115292014-09-26 19:20:08 -04001275 if (dif || dix)
Bart Van Asschecf64e5a2019-01-15 16:50:02 -08001276 protect = sd_setup_protect_cmnd(cmd, dix, dif);
Martin K. Petersenaf55ff62008-07-17 04:28:35 -04001277 else
Martin K. Petersen4e7392e2009-09-20 16:49:38 -04001278 protect = 0;
Martin K. Petersenaf55ff62008-07-17 04:28:35 -04001279
Christoph Hellwig8475c812016-09-11 19:35:41 +02001280 if (protect && sdkp->protection_type == T10_PI_TYPE2_PROTECTION) {
Bart Van Asschecf64e5a2019-01-15 16:50:02 -08001281 ret = sd_setup_rw32_cmnd(cmd, write, lba, nr_blocks,
Martin K. Petersen78a02f42019-01-15 16:50:00 -08001282 protect | fua);
Martin K. Petersenc6c93fd2019-01-15 16:49:58 -08001283 } else if (sdp->use_16_for_rw || (nr_blocks > 0xffff)) {
Bart Van Asschecf64e5a2019-01-15 16:50:02 -08001284 ret = sd_setup_rw16_cmnd(cmd, write, lba, nr_blocks,
Martin K. Petersen78a02f42019-01-15 16:50:00 -08001285 protect | fua);
Martin K. Petersenc6c93fd2019-01-15 16:49:58 -08001286 } else if ((nr_blocks > 0xff) || (lba > 0x1fffff) ||
Martin K. Petersene249e422019-01-15 16:50:01 -08001287 sdp->use_10_for_rw || protect) {
Bart Van Asschecf64e5a2019-01-15 16:50:02 -08001288 ret = sd_setup_rw10_cmnd(cmd, write, lba, nr_blocks,
Martin K. Petersen78a02f42019-01-15 16:50:00 -08001289 protect | fua);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001290 } else {
Bart Van Asschecf64e5a2019-01-15 16:50:02 -08001291 ret = sd_setup_rw6_cmnd(cmd, write, lba, nr_blocks,
Martin K. Petersen78a02f42019-01-15 16:50:00 -08001292 protect | fua);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001293 }
Martin K. Petersen78a02f42019-01-15 16:50:00 -08001294
1295 if (unlikely(ret != BLK_STS_OK))
Christoph Hellwig7007e9d2020-10-05 10:41:28 +02001296 goto fail;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001297
1298 /*
1299 * We shouldn't disconnect in the middle of a sector, so with a dumb
1300 * host adapter, it's safe to assume that we can at least transfer
1301 * this many bytes between each connect / disconnect.
1302 */
Bart Van Asschecf64e5a2019-01-15 16:50:02 -08001303 cmd->transfersize = sdp->sector_size;
1304 cmd->underflow = nr_blocks << 9;
Mike Christie06109592020-10-01 10:35:54 -05001305 cmd->allowed = sdkp->max_retries;
Bart Van Asschecf64e5a2019-01-15 16:50:02 -08001306 cmd->sdb.length = nr_blocks * sdp->sector_size;
Martin K. Petersene249e422019-01-15 16:50:01 -08001307
1308 SCSI_LOG_HLQUEUE(1,
Bart Van Asschecf64e5a2019-01-15 16:50:02 -08001309 scmd_printk(KERN_INFO, cmd,
Martin K. Petersene249e422019-01-15 16:50:01 -08001310 "%s: block=%llu, count=%d\n", __func__,
1311 (unsigned long long)blk_rq_pos(rq),
1312 blk_rq_sectors(rq)));
1313 SCSI_LOG_HLQUEUE(2,
Bart Van Asschecf64e5a2019-01-15 16:50:02 -08001314 scmd_printk(KERN_INFO, cmd,
Martin K. Petersene249e422019-01-15 16:50:01 -08001315 "%s %d/%u 512 byte blocks.\n",
1316 write ? "writing" : "reading", nr_blocks,
1317 blk_rq_sectors(rq)));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001318
1319 /*
Christoph Hellwig7007e9d2020-10-05 10:41:28 +02001320 * This indicates that the command is ready from our end to be queued.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001321 */
Christoph Hellwig159b2cb2018-11-09 14:42:39 +01001322 return BLK_STS_OK;
Christoph Hellwig7007e9d2020-10-05 10:41:28 +02001323fail:
1324 scsi_free_sgtables(cmd);
1325 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001326}
1327
Christoph Hellwig159b2cb2018-11-09 14:42:39 +01001328static blk_status_t sd_init_command(struct scsi_cmnd *cmd)
Christoph Hellwig87949ee2014-06-28 12:40:18 +02001329{
Bart Van Assche5999ccf2021-08-09 16:03:06 -07001330 struct request *rq = scsi_cmd_to_rq(cmd);
Christoph Hellwig87949ee2014-06-28 12:40:18 +02001331
Mike Christiec2df40d2016-06-05 14:32:17 -05001332 switch (req_op(rq)) {
1333 case REQ_OP_DISCARD:
Christoph Hellwig81d926e2017-04-05 19:20:59 +02001334 switch (scsi_disk(rq->rq_disk)->provisioning_mode) {
1335 case SD_LBP_UNMAP:
1336 return sd_setup_unmap_cmnd(cmd);
1337 case SD_LBP_WS16:
Christoph Hellwig02d26102017-04-05 19:21:02 +02001338 return sd_setup_write_same16_cmnd(cmd, true);
Christoph Hellwig81d926e2017-04-05 19:20:59 +02001339 case SD_LBP_WS10:
1340 return sd_setup_write_same10_cmnd(cmd, true);
1341 case SD_LBP_ZERO:
1342 return sd_setup_write_same10_cmnd(cmd, false);
1343 default:
Christoph Hellwig159b2cb2018-11-09 14:42:39 +01001344 return BLK_STS_TARGET;
Christoph Hellwig81d926e2017-04-05 19:20:59 +02001345 }
Christoph Hellwig02d26102017-04-05 19:21:02 +02001346 case REQ_OP_WRITE_ZEROES:
1347 return sd_setup_write_zeroes_cmnd(cmd);
Mike Christiec2df40d2016-06-05 14:32:17 -05001348 case REQ_OP_WRITE_SAME:
Christoph Hellwig87949ee2014-06-28 12:40:18 +02001349 return sd_setup_write_same_cmnd(cmd);
Mike Christie3a5e02c2016-06-05 14:32:23 -05001350 case REQ_OP_FLUSH:
Christoph Hellwig87949ee2014-06-28 12:40:18 +02001351 return sd_setup_flush_cmnd(cmd);
Mike Christiec2df40d2016-06-05 14:32:17 -05001352 case REQ_OP_READ:
1353 case REQ_OP_WRITE:
Johannes Thumshirn5795eb42020-05-12 17:55:51 +09001354 case REQ_OP_ZONE_APPEND:
Christoph Hellwig87949ee2014-06-28 12:40:18 +02001355 return sd_setup_read_write_cmnd(cmd);
Hannes Reinecke89d94752016-10-18 15:40:34 +09001356 case REQ_OP_ZONE_RESET:
Ajay Joshiad512f22019-10-27 23:05:47 +09001357 return sd_zbc_setup_zone_mgmt_cmnd(cmd, ZO_RESET_WRITE_POINTER,
1358 false);
Chaitanya Kulkarnid81e9d42019-08-01 10:26:37 -07001359 case REQ_OP_ZONE_RESET_ALL:
Ajay Joshiad512f22019-10-27 23:05:47 +09001360 return sd_zbc_setup_zone_mgmt_cmnd(cmd, ZO_RESET_WRITE_POINTER,
1361 true);
1362 case REQ_OP_ZONE_OPEN:
1363 return sd_zbc_setup_zone_mgmt_cmnd(cmd, ZO_OPEN_ZONE, false);
1364 case REQ_OP_ZONE_CLOSE:
1365 return sd_zbc_setup_zone_mgmt_cmnd(cmd, ZO_CLOSE_ZONE, false);
1366 case REQ_OP_ZONE_FINISH:
1367 return sd_zbc_setup_zone_mgmt_cmnd(cmd, ZO_FINISH_ZONE, false);
Mike Christiec2df40d2016-06-05 14:32:17 -05001368 default:
Johannes Thumshirnf1f1fad2018-09-21 09:01:01 +02001369 WARN_ON_ONCE(1);
Christoph Hellwig159b2cb2018-11-09 14:42:39 +01001370 return BLK_STS_NOTSUPP;
Mike Christiec2df40d2016-06-05 14:32:17 -05001371 }
Christoph Hellwig87949ee2014-06-28 12:40:18 +02001372}
1373
1374static void sd_uninit_command(struct scsi_cmnd *SCpnt)
1375{
Bart Van Assche5999ccf2021-08-09 16:03:06 -07001376 struct request *rq = scsi_cmd_to_rq(SCpnt);
Bart Van Assche14e30622017-12-05 16:57:51 -08001377 u8 *cmnd;
Christoph Hellwig87949ee2014-06-28 12:40:18 +02001378
Christoph Hellwigf9d03f92016-12-08 15:20:32 -07001379 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
Jens Axboe61cce6f2018-12-12 06:46:55 -07001380 mempool_free(rq->special_vec.bv_page, sd_page_pool);
Christoph Hellwig87949ee2014-06-28 12:40:18 +02001381
Christoph Hellwig82ed4db2017-01-27 09:46:29 +01001382 if (SCpnt->cmnd != scsi_req(rq)->cmd) {
Bart Van Assche14e30622017-12-05 16:57:51 -08001383 cmnd = SCpnt->cmnd;
Christoph Hellwig87949ee2014-06-28 12:40:18 +02001384 SCpnt->cmnd = NULL;
1385 SCpnt->cmd_len = 0;
Bart Van Assche14e30622017-12-05 16:57:51 -08001386 mempool_free(cmnd, sd_cdb_pool);
Christoph Hellwig87949ee2014-06-28 12:40:18 +02001387 }
1388}
1389
Christoph Hellwigd1b7f922021-06-17 13:55:04 +02001390static bool sd_need_revalidate(struct block_device *bdev,
1391 struct scsi_disk *sdkp)
1392{
1393 if (sdkp->device->removable || sdkp->write_prot) {
1394 if (bdev_check_media_change(bdev))
1395 return true;
1396 }
1397
1398 /*
1399 * Force a full rescan after ioctl(BLKRRPART). While the disk state has
1400 * nothing to do with partitions, BLKRRPART is used to force a full
1401 * revalidate after things like a format for historical reasons.
1402 */
1403 return test_bit(GD_NEED_PART_SCAN, &bdev->bd_disk->state);
1404}
1405
Linus Torvalds1da177e2005-04-16 15:20:36 -07001406/**
1407 * sd_open - open a scsi disk device
Damien Le Moal7529fbb2017-04-24 16:51:09 +09001408 * @bdev: Block device of the scsi disk to open
1409 * @mode: FMODE_* mask
Linus Torvalds1da177e2005-04-16 15:20:36 -07001410 *
1411 * Returns 0 if successful. Returns a negated errno value in case
1412 * of error.
1413 *
1414 * Note: This can be called from a user context (e.g. fsck(1) )
1415 * or from within the kernel (e.g. as a result of a mount(1) ).
1416 * In the latter case @inode and @filp carry an abridged amount
1417 * of information as noted above.
Arnd Bergmann409f3492010-07-07 16:51:29 +02001418 *
Christoph Hellwiga8698702021-05-25 08:12:56 +02001419 * Locking: called with bdev->bd_disk->open_mutex held.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001420 **/
Al Viro0338e292008-03-02 10:41:04 -05001421static int sd_open(struct block_device *bdev, fmode_t mode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001422{
Al Viro0338e292008-03-02 10:41:04 -05001423 struct scsi_disk *sdkp = scsi_disk_get(bdev->bd_disk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001424 struct scsi_device *sdev;
1425 int retval;
1426
Al Viro0338e292008-03-02 10:41:04 -05001427 if (!sdkp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001428 return -ENXIO;
1429
Martin K. Petersenfa0d34b2007-02-27 22:41:19 -05001430 SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_open\n"));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001431
1432 sdev = sdkp->device;
1433
1434 /*
1435 * If the device is in error recovery, wait until it is done.
1436 * If the device is offline, then disallow any access to it.
1437 */
1438 retval = -ENXIO;
1439 if (!scsi_block_when_processing_errors(sdev))
1440 goto error_out;
1441
Christoph Hellwigd1b7f922021-06-17 13:55:04 +02001442 if (sd_need_revalidate(bdev, sdkp))
1443 sd_revalidate_disk(bdev->bd_disk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001444
1445 /*
1446 * If the drive is empty, just let the open fail.
1447 */
1448 retval = -ENOMEDIUM;
Al Viro0338e292008-03-02 10:41:04 -05001449 if (sdev->removable && !sdkp->media_present && !(mode & FMODE_NDELAY))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001450 goto error_out;
1451
1452 /*
1453 * If the device has the write protect tab set, have the open fail
1454 * if the user expects to be able to write to the thing.
1455 */
1456 retval = -EROFS;
Al Viro0338e292008-03-02 10:41:04 -05001457 if (sdkp->write_prot && (mode & FMODE_WRITE))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001458 goto error_out;
1459
1460 /*
1461 * It is possible that the disk changing stuff resulted in
1462 * the device being taken offline. If this is the case,
1463 * report this to the user, and don't pretend that the
1464 * open actually succeeded.
1465 */
1466 retval = -ENXIO;
1467 if (!scsi_device_online(sdev))
1468 goto error_out;
1469
Arnd Bergmann409f3492010-07-07 16:51:29 +02001470 if ((atomic_inc_return(&sdkp->openers) == 1) && sdev->removable) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001471 if (scsi_block_when_processing_errors(sdev))
1472 scsi_set_medium_removal(sdev, SCSI_REMOVAL_PREVENT);
1473 }
1474
1475 return 0;
1476
1477error_out:
1478 scsi_disk_put(sdkp);
1479 return retval;
1480}
1481
1482/**
1483 * sd_release - invoked when the (last) close(2) is called on this
1484 * scsi disk.
Damien Le Moal7529fbb2017-04-24 16:51:09 +09001485 * @disk: disk to release
1486 * @mode: FMODE_* mask
Linus Torvalds1da177e2005-04-16 15:20:36 -07001487 *
1488 * Returns 0.
1489 *
1490 * Note: may block (uninterruptible) if error recovery is underway
1491 * on this disk.
Arnd Bergmann409f3492010-07-07 16:51:29 +02001492 *
Christoph Hellwiga8698702021-05-25 08:12:56 +02001493 * Locking: called with bdev->bd_disk->open_mutex held.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001494 **/
Al Virodb2a1442013-05-05 21:52:57 -04001495static void sd_release(struct gendisk *disk, fmode_t mode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001496{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001497 struct scsi_disk *sdkp = scsi_disk(disk);
1498 struct scsi_device *sdev = sdkp->device;
1499
James Bottomley56937f72007-03-11 12:25:33 -05001500 SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_release\n"));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001501
Alan Stern7e443312010-09-07 11:27:52 -04001502 if (atomic_dec_return(&sdkp->openers) == 0 && sdev->removable) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001503 if (scsi_block_when_processing_errors(sdev))
1504 scsi_set_medium_removal(sdev, SCSI_REMOVAL_ALLOW);
1505 }
1506
Linus Torvalds1da177e2005-04-16 15:20:36 -07001507 scsi_disk_put(sdkp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001508}
1509
Christoph Hellwiga885c8c2006-01-08 01:02:50 -08001510static int sd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001511{
1512 struct scsi_disk *sdkp = scsi_disk(bdev->bd_disk);
1513 struct scsi_device *sdp = sdkp->device;
1514 struct Scsi_Host *host = sdp->host;
Martin K. Petersenf08bb1e2016-03-28 21:18:56 -04001515 sector_t capacity = logical_to_sectors(sdp, sdkp->capacity);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001516 int diskinfo[4];
1517
1518 /* default to most commonly used values */
Martin K. Petersenf08bb1e2016-03-28 21:18:56 -04001519 diskinfo[0] = 0x40; /* 1 << 6 */
1520 diskinfo[1] = 0x20; /* 1 << 5 */
1521 diskinfo[2] = capacity >> 11;
1522
Linus Torvalds1da177e2005-04-16 15:20:36 -07001523 /* override with calculated, extended default, or driver values */
1524 if (host->hostt->bios_param)
Martin K. Petersenf08bb1e2016-03-28 21:18:56 -04001525 host->hostt->bios_param(sdp, bdev, capacity, diskinfo);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001526 else
Martin K. Petersenf08bb1e2016-03-28 21:18:56 -04001527 scsicam_bios_param(bdev, capacity, diskinfo);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001528
Christoph Hellwiga885c8c2006-01-08 01:02:50 -08001529 geo->heads = diskinfo[0];
1530 geo->sectors = diskinfo[1];
1531 geo->cylinders = diskinfo[2];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001532 return 0;
1533}
1534
1535/**
Christoph Hellwig44328312021-07-24 09:20:12 +02001536 * sd_ioctl - process an ioctl
Damien Le Moal7529fbb2017-04-24 16:51:09 +09001537 * @bdev: target block device
1538 * @mode: FMODE_* mask
Linus Torvalds1da177e2005-04-16 15:20:36 -07001539 * @cmd: ioctl command number
Christoph Hellwig44328312021-07-24 09:20:12 +02001540 * @arg: this is third argument given to ioctl(2) system call.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001541 * Often contains a pointer.
1542 *
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001543 * Returns 0 if successful (some ioctls return positive numbers on
Linus Torvalds1da177e2005-04-16 15:20:36 -07001544 * success as well). Returns a negated errno value in case of error.
1545 *
1546 * Note: most ioctls are forward onto the block subsystem or further
Robert P. J. Day3a4fa0a2007-10-19 23:10:43 +02001547 * down in the scsi subsystem.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001548 **/
Christoph Hellwig44328312021-07-24 09:20:12 +02001549static int sd_ioctl(struct block_device *bdev, fmode_t mode,
1550 unsigned int cmd, unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001551{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001552 struct gendisk *disk = bdev->bd_disk;
Nao Nishijimafe2d1852011-08-25 18:04:14 +09001553 struct scsi_disk *sdkp = scsi_disk(disk);
1554 struct scsi_device *sdp = sdkp->device;
Christoph Hellwig44328312021-07-24 09:20:12 +02001555 void __user *p = (void __user *)arg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001556 int error;
1557
Nao Nishijimafe2d1852011-08-25 18:04:14 +09001558 SCSI_LOG_IOCTL(1, sd_printk(KERN_INFO, sdkp, "sd_ioctl: disk=%s, "
1559 "cmd=0x%x\n", disk->disk_name, cmd));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001560
Christoph Hellwig4f07bfc2021-07-24 09:20:19 +02001561 if (bdev_is_partition(bdev) && !capable(CAP_SYS_RAWIO))
1562 return -ENOIOCTLCMD;
Paolo Bonzini0bfc96c2012-01-12 16:01:28 +01001563
Linus Torvalds1da177e2005-04-16 15:20:36 -07001564 /*
1565 * If we are in the middle of error recovery, don't let anyone
1566 * else try and use this device. Also, if error recovery fails, it
1567 * may try and take the device offline, in which case all further
1568 * access to the device is prohibited.
1569 */
Christoph Hellwig906d15f2014-10-11 16:25:31 +02001570 error = scsi_ioctl_block_when_processing_errors(sdp, cmd,
1571 (mode & FMODE_NDELAY) != 0);
1572 if (error)
Christoph Hellwig44328312021-07-24 09:20:12 +02001573 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001574
Christoph Hellwigd80210f2017-06-19 14:26:46 +02001575 if (is_sed_ioctl(cmd))
1576 return sed_ioctl(sdkp->opal_dev, cmd, p);
Christoph Hellwig2e27f572021-07-24 09:20:20 +02001577 return scsi_ioctl(sdp, disk, mode, cmd, p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001578}
1579
1580static void set_media_not_present(struct scsi_disk *sdkp)
1581{
Tejun Heo2bae0092010-12-18 18:42:23 +01001582 if (sdkp->media_present)
1583 sdkp->device->changed = 1;
1584
1585 if (sdkp->device->removable) {
1586 sdkp->media_present = 0;
1587 sdkp->capacity = 0;
1588 }
1589}
1590
1591static int media_not_present(struct scsi_disk *sdkp,
1592 struct scsi_sense_hdr *sshdr)
1593{
1594 if (!scsi_sense_valid(sshdr))
1595 return 0;
1596
1597 /* not invoked for commands that could return deferred errors */
1598 switch (sshdr->sense_key) {
1599 case UNIT_ATTENTION:
1600 case NOT_READY:
1601 /* medium not present */
1602 if (sshdr->asc == 0x3A) {
1603 set_media_not_present(sdkp);
1604 return 1;
1605 }
1606 }
1607 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001608}
1609
1610/**
Tejun Heo2bae0092010-12-18 18:42:23 +01001611 * sd_check_events - check media events
1612 * @disk: kernel device descriptor
1613 * @clearing: disk events currently being cleared
Linus Torvalds1da177e2005-04-16 15:20:36 -07001614 *
Tejun Heo2bae0092010-12-18 18:42:23 +01001615 * Returns mask of DISK_EVENT_*.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001616 *
1617 * Note: this function is invoked from the block subsystem.
1618 **/
Tejun Heo2bae0092010-12-18 18:42:23 +01001619static unsigned int sd_check_events(struct gendisk *disk, unsigned int clearing)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001620{
Hannes Reineckeeb72d0b2016-04-26 08:06:58 +02001621 struct scsi_disk *sdkp = scsi_disk_get(disk);
1622 struct scsi_device *sdp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001623 int retval;
Bart Van Assche41e70e32021-04-15 15:08:22 -07001624 bool disk_changed;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001625
Hannes Reineckeeb72d0b2016-04-26 08:06:58 +02001626 if (!sdkp)
1627 return 0;
1628
1629 sdp = sdkp->device;
Tejun Heo2bae0092010-12-18 18:42:23 +01001630 SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_check_events\n"));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001631
1632 /*
1633 * If the device is offline, don't send any commands - just pretend as
1634 * if the command failed. If the device ever comes back online, we
1635 * can deal with it then. It is only because of unrecoverable errors
1636 * that we would ever take a device offline in the first place.
1637 */
Kay Sievers285e9672007-08-14 14:10:39 +02001638 if (!scsi_device_online(sdp)) {
1639 set_media_not_present(sdkp);
Kay Sievers285e9672007-08-14 14:10:39 +02001640 goto out;
1641 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001642
1643 /*
1644 * Using TEST_UNIT_READY enables differentiation between drive with
1645 * no cartridge loaded - NOT READY, drive with changed cartridge -
1646 * UNIT ATTENTION, or with same cartridge - GOOD STATUS.
1647 *
1648 * Drives that auto spin down. eg iomega jaz 1G, will be started
1649 * by sd_spinup_disk() from sd_revalidate_disk(), which happens whenever
1650 * sd_revalidate() is called.
1651 */
James Bottomley001aac22007-12-02 19:10:40 +02001652 if (scsi_block_when_processing_errors(sdp)) {
Christoph Hellwig6fa2b8f2017-02-14 20:15:56 +01001653 struct scsi_sense_hdr sshdr = { 0, };
1654
Mike Christie06109592020-10-01 10:35:54 -05001655 retval = scsi_test_unit_ready(sdp, SD_TIMEOUT, sdkp->max_retries,
Christoph Hellwig6fa2b8f2017-02-14 20:15:56 +01001656 &sshdr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001657
Christoph Hellwig6fa2b8f2017-02-14 20:15:56 +01001658 /* failed to execute TUR, assume media not present */
Hannes Reineckeced202f72021-04-27 10:30:12 +02001659 if (retval < 0 || host_byte(retval)) {
Christoph Hellwig6fa2b8f2017-02-14 20:15:56 +01001660 set_media_not_present(sdkp);
1661 goto out;
1662 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001663
Christoph Hellwig6fa2b8f2017-02-14 20:15:56 +01001664 if (media_not_present(sdkp, &sshdr))
1665 goto out;
1666 }
Tejun Heo2bae0092010-12-18 18:42:23 +01001667
Linus Torvalds1da177e2005-04-16 15:20:36 -07001668 /*
1669 * For removable scsi disk we have to recognise the presence
Tejun Heo2bae0092010-12-18 18:42:23 +01001670 * of a disk in the drive.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001671 */
Tejun Heo2bae0092010-12-18 18:42:23 +01001672 if (!sdkp->media_present)
1673 sdp->changed = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001674 sdkp->media_present = 1;
Kay Sievers285e9672007-08-14 14:10:39 +02001675out:
Alan Stern3ff55882010-09-07 11:44:01 -04001676 /*
Tejun Heo2bae0092010-12-18 18:42:23 +01001677 * sdp->changed is set under the following conditions:
Alan Stern3ff55882010-09-07 11:44:01 -04001678 *
Tejun Heo2bae0092010-12-18 18:42:23 +01001679 * Medium present state has changed in either direction.
1680 * Device has indicated UNIT_ATTENTION.
Alan Stern3ff55882010-09-07 11:44:01 -04001681 */
Bart Van Assche41e70e32021-04-15 15:08:22 -07001682 disk_changed = sdp->changed;
Tejun Heo2bae0092010-12-18 18:42:23 +01001683 sdp->changed = 0;
Hannes Reineckeeb72d0b2016-04-26 08:06:58 +02001684 scsi_disk_put(sdkp);
Bart Van Assche41e70e32021-04-15 15:08:22 -07001685 return disk_changed ? DISK_EVENT_MEDIA_CHANGE : 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001686}
1687
Derek Basehore4fa83242017-05-11 14:34:24 +02001688static int sd_sync_cache(struct scsi_disk *sdkp, struct scsi_sense_hdr *sshdr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001689{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001690 int retries, res;
Martin K. Petersene73aec82007-02-27 22:40:55 -05001691 struct scsi_device *sdp = sdkp->device;
James Bottomley7e660102013-10-04 21:42:24 +00001692 const int timeout = sdp->request_queue->rq_timeout
1693 * SD_FLUSH_TIMEOUT_MULTIPLIER;
Derek Basehore4fa83242017-05-11 14:34:24 +02001694 struct scsi_sense_hdr my_sshdr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001695
1696 if (!scsi_device_online(sdp))
1697 return -ENODEV;
1698
Derek Basehore4fa83242017-05-11 14:34:24 +02001699 /* caller might not be interested in sense, but we need it */
1700 if (!sshdr)
1701 sshdr = &my_sshdr;
1702
Linus Torvalds1da177e2005-04-16 15:20:36 -07001703 for (retries = 3; retries > 0; --retries) {
1704 unsigned char cmd[10] = { 0 };
1705
1706 cmd[0] = SYNCHRONIZE_CACHE;
1707 /*
1708 * Leave the rest of the command zero to indicate
1709 * flush everything.
1710 */
Derek Basehore4fa83242017-05-11 14:34:24 +02001711 res = scsi_execute(sdp, cmd, DMA_NONE, NULL, 0, NULL, sshdr,
Mike Christie06109592020-10-01 10:35:54 -05001712 timeout, sdkp->max_retries, 0, RQF_PM, NULL);
James Bottomleyea73a9f2005-08-28 11:33:52 -05001713 if (res == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001714 break;
1715 }
1716
Martin K. Petersene73aec82007-02-27 22:40:55 -05001717 if (res) {
Hannes Reineckeef613292014-10-24 14:27:00 +02001718 sd_print_result(sdkp, "Synchronize Cache(10) failed", res);
Oliver Neukum95897912013-09-16 13:28:15 +02001719
Hannes Reineckeced202f72021-04-27 10:30:12 +02001720 if (res < 0)
1721 return res;
1722
Hannes Reinecke464a00c2021-04-27 10:30:15 +02001723 if (scsi_status_is_check_condition(res) &&
1724 scsi_sense_valid(sshdr)) {
Derek Basehore4fa83242017-05-11 14:34:24 +02001725 sd_print_sense_hdr(sdkp, sshdr);
1726
Hannes Reinecke464a00c2021-04-27 10:30:15 +02001727 /* we need to evaluate the error return */
1728 if (sshdr->asc == 0x3a || /* medium not present */
1729 sshdr->asc == 0x20 || /* invalid command */
1730 (sshdr->asc == 0x74 && sshdr->ascq == 0x71)) /* drive is password locked */
Oliver Neukum95897912013-09-16 13:28:15 +02001731 /* this is no error here */
1732 return 0;
Hannes Reinecke464a00c2021-04-27 10:30:15 +02001733 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001734
Oliver Neukum95897912013-09-16 13:28:15 +02001735 switch (host_byte(res)) {
1736 /* ignore errors due to racing a disconnection */
1737 case DID_BAD_TARGET:
1738 case DID_NO_CONNECT:
1739 return 0;
1740 /* signal the upper layer it might try again */
1741 case DID_BUS_BUSY:
1742 case DID_IMM_RETRY:
1743 case DID_REQUEUE:
1744 case DID_SOFT_ERROR:
1745 return -EBUSY;
1746 default:
1747 return -EIO;
1748 }
1749 }
Tejun Heo37210502007-03-21 00:07:18 +09001750 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001751}
1752
Linus Torvalds1da177e2005-04-16 15:20:36 -07001753static void sd_rescan(struct device *dev)
1754{
Christoph Hellwig3d9a1f52015-02-02 14:01:25 +01001755 struct scsi_disk *sdkp = dev_get_drvdata(dev);
Alan Stern39b7f1e2005-11-04 14:44:41 -05001756
Christoph Hellwigb200e382020-11-16 15:56:55 +01001757 sd_revalidate_disk(sdkp->disk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001758}
1759
Christoph Hellwigb83ce212021-10-21 08:06:02 +02001760static int sd_get_unique_id(struct gendisk *disk, u8 id[16],
1761 enum blk_unique_id type)
1762{
1763 struct scsi_device *sdev = scsi_disk(disk)->device;
1764 const struct scsi_vpd *vpd;
1765 const unsigned char *d;
1766 int ret = -ENXIO, len;
1767
1768 rcu_read_lock();
1769 vpd = rcu_dereference(sdev->vpd_pg83);
1770 if (!vpd)
1771 goto out_unlock;
1772
1773 ret = -EINVAL;
1774 for (d = vpd->data + 4; d < vpd->data + vpd->len; d += d[3] + 4) {
1775 /* we only care about designators with LU association */
1776 if (((d[1] >> 4) & 0x3) != 0x00)
1777 continue;
1778 if ((d[1] & 0xf) != type)
1779 continue;
1780
1781 /*
1782 * Only exit early if a 16-byte descriptor was found. Otherwise
1783 * keep looking as one with more entropy might still show up.
1784 */
1785 len = d[3];
1786 if (len != 8 && len != 12 && len != 16)
1787 continue;
1788 ret = len;
1789 memcpy(id, d + 4, len);
1790 if (len == 16)
1791 break;
1792 }
1793out_unlock:
1794 rcu_read_unlock();
1795 return ret;
1796}
1797
Christoph Hellwig924d55b2015-10-15 14:10:49 +02001798static char sd_pr_type(enum pr_type type)
1799{
1800 switch (type) {
1801 case PR_WRITE_EXCLUSIVE:
1802 return 0x01;
1803 case PR_EXCLUSIVE_ACCESS:
1804 return 0x03;
1805 case PR_WRITE_EXCLUSIVE_REG_ONLY:
1806 return 0x05;
1807 case PR_EXCLUSIVE_ACCESS_REG_ONLY:
1808 return 0x06;
1809 case PR_WRITE_EXCLUSIVE_ALL_REGS:
1810 return 0x07;
1811 case PR_EXCLUSIVE_ACCESS_ALL_REGS:
1812 return 0x08;
1813 default:
1814 return 0;
1815 }
1816};
1817
1818static int sd_pr_command(struct block_device *bdev, u8 sa,
1819 u64 key, u64 sa_key, u8 type, u8 flags)
1820{
Mike Christie06109592020-10-01 10:35:54 -05001821 struct scsi_disk *sdkp = scsi_disk(bdev->bd_disk);
1822 struct scsi_device *sdev = sdkp->device;
Christoph Hellwig924d55b2015-10-15 14:10:49 +02001823 struct scsi_sense_hdr sshdr;
1824 int result;
1825 u8 cmd[16] = { 0, };
1826 u8 data[24] = { 0, };
1827
1828 cmd[0] = PERSISTENT_RESERVE_OUT;
1829 cmd[1] = sa;
1830 cmd[2] = type;
1831 put_unaligned_be32(sizeof(data), &cmd[5]);
1832
1833 put_unaligned_be64(key, &data[0]);
1834 put_unaligned_be64(sa_key, &data[8]);
1835 data[20] = flags;
1836
1837 result = scsi_execute_req(sdev, cmd, DMA_TO_DEVICE, &data, sizeof(data),
Mike Christie06109592020-10-01 10:35:54 -05001838 &sshdr, SD_TIMEOUT, sdkp->max_retries, NULL);
Christoph Hellwig924d55b2015-10-15 14:10:49 +02001839
Hannes Reinecke464a00c2021-04-27 10:30:15 +02001840 if (scsi_status_is_check_condition(result) &&
Johannes Thumshirnc65be1a2018-06-25 13:20:58 +02001841 scsi_sense_valid(&sshdr)) {
Christoph Hellwig924d55b2015-10-15 14:10:49 +02001842 sdev_printk(KERN_INFO, sdev, "PR command failed: %d\n", result);
1843 scsi_print_sense_hdr(sdev, NULL, &sshdr);
1844 }
1845
1846 return result;
1847}
1848
1849static int sd_pr_register(struct block_device *bdev, u64 old_key, u64 new_key,
1850 u32 flags)
1851{
1852 if (flags & ~PR_FL_IGNORE_KEY)
1853 return -EOPNOTSUPP;
1854 return sd_pr_command(bdev, (flags & PR_FL_IGNORE_KEY) ? 0x06 : 0x00,
1855 old_key, new_key, 0,
Christoph Hellwig01f90dd2016-07-08 21:23:50 +09001856 (1 << 0) /* APTPL */);
Christoph Hellwig924d55b2015-10-15 14:10:49 +02001857}
1858
1859static int sd_pr_reserve(struct block_device *bdev, u64 key, enum pr_type type,
1860 u32 flags)
1861{
1862 if (flags)
1863 return -EOPNOTSUPP;
1864 return sd_pr_command(bdev, 0x01, key, 0, sd_pr_type(type), 0);
1865}
1866
1867static int sd_pr_release(struct block_device *bdev, u64 key, enum pr_type type)
1868{
1869 return sd_pr_command(bdev, 0x02, key, 0, sd_pr_type(type), 0);
1870}
1871
1872static int sd_pr_preempt(struct block_device *bdev, u64 old_key, u64 new_key,
1873 enum pr_type type, bool abort)
1874{
1875 return sd_pr_command(bdev, abort ? 0x05 : 0x04, old_key, new_key,
1876 sd_pr_type(type), 0);
1877}
1878
1879static int sd_pr_clear(struct block_device *bdev, u64 key)
1880{
1881 return sd_pr_command(bdev, 0x03, key, 0, 0, 0);
1882}
1883
1884static const struct pr_ops sd_pr_ops = {
1885 .pr_register = sd_pr_register,
1886 .pr_reserve = sd_pr_reserve,
1887 .pr_release = sd_pr_release,
1888 .pr_preempt = sd_pr_preempt,
1889 .pr_clear = sd_pr_clear,
1890};
1891
Alexey Dobriyan83d5cde2009-09-21 17:01:13 -07001892static const struct block_device_operations sd_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001893 .owner = THIS_MODULE,
Al Viro0338e292008-03-02 10:41:04 -05001894 .open = sd_open,
1895 .release = sd_release,
Arnd Bergmann8a6cfeb2010-07-08 10:18:46 +02001896 .ioctl = sd_ioctl,
Christoph Hellwiga885c8c2006-01-08 01:02:50 -08001897 .getgeo = sd_getgeo,
Christoph Hellwig44328312021-07-24 09:20:12 +02001898 .compat_ioctl = blkdev_compat_ptr_ioctl,
Tejun Heo2bae0092010-12-18 18:42:23 +01001899 .check_events = sd_check_events,
Tejun Heo72ec24b2010-05-15 20:09:32 +02001900 .unlock_native_capacity = sd_unlock_native_capacity,
Christoph Hellwige76239a2018-10-12 19:08:49 +09001901 .report_zones = sd_zbc_report_zones,
Christoph Hellwigb83ce212021-10-21 08:06:02 +02001902 .get_unique_id = sd_get_unique_id,
Christoph Hellwig924d55b2015-10-15 14:10:49 +02001903 .pr_ops = &sd_pr_ops,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001904};
1905
Martin K. Petersen18a4d0a2012-02-09 13:48:53 -05001906/**
Hannes Reinecke7a38dc02017-04-06 15:36:29 +02001907 * sd_eh_reset - reset error handling callback
1908 * @scmd: sd-issued command that has failed
1909 *
1910 * This function is called by the SCSI midlayer before starting
1911 * SCSI EH. When counting medium access failures we have to be
1912 * careful to register it only only once per device and SCSI EH run;
1913 * there might be several timed out commands which will cause the
1914 * 'max_medium_access_timeouts' counter to trigger after the first
1915 * SCSI EH run already and set the device to offline.
1916 * So this function resets the internal counter before starting SCSI EH.
1917 **/
1918static void sd_eh_reset(struct scsi_cmnd *scmd)
1919{
Bart Van Assche5999ccf2021-08-09 16:03:06 -07001920 struct scsi_disk *sdkp = scsi_disk(scsi_cmd_to_rq(scmd)->rq_disk);
Hannes Reinecke7a38dc02017-04-06 15:36:29 +02001921
1922 /* New SCSI EH run, reset gate variable */
1923 sdkp->ignore_medium_access_errors = false;
1924}
1925
1926/**
Martin K. Petersen18a4d0a2012-02-09 13:48:53 -05001927 * sd_eh_action - error handling callback
1928 * @scmd: sd-issued command that has failed
Martin K. Petersen18a4d0a2012-02-09 13:48:53 -05001929 * @eh_disp: The recovery disposition suggested by the midlayer
1930 *
James Bottomley24510792013-11-11 13:44:53 +01001931 * This function is called by the SCSI midlayer upon completion of an
1932 * error test command (currently TEST UNIT READY). The result of sending
1933 * the eh command is passed in eh_disp. We're looking for devices that
1934 * fail medium access commands but are OK with non access commands like
1935 * test unit ready (so wrongly see the device as having a successful
1936 * recovery)
Martin K. Petersen18a4d0a2012-02-09 13:48:53 -05001937 **/
James Bottomley24510792013-11-11 13:44:53 +01001938static int sd_eh_action(struct scsi_cmnd *scmd, int eh_disp)
Martin K. Petersen18a4d0a2012-02-09 13:48:53 -05001939{
Bart Van Assche5999ccf2021-08-09 16:03:06 -07001940 struct scsi_disk *sdkp = scsi_disk(scsi_cmd_to_rq(scmd)->rq_disk);
Bart Van Assche0db6ca82017-06-02 14:21:55 -07001941 struct scsi_device *sdev = scmd->device;
Martin K. Petersen18a4d0a2012-02-09 13:48:53 -05001942
Bart Van Assche0db6ca82017-06-02 14:21:55 -07001943 if (!scsi_device_online(sdev) ||
James Bottomley24510792013-11-11 13:44:53 +01001944 !scsi_medium_access_command(scmd) ||
1945 host_byte(scmd->result) != DID_TIME_OUT ||
1946 eh_disp != SUCCESS)
Martin K. Petersen18a4d0a2012-02-09 13:48:53 -05001947 return eh_disp;
1948
1949 /*
1950 * The device has timed out executing a medium access command.
1951 * However, the TEST UNIT READY command sent during error
1952 * handling completed successfully. Either the device is in the
1953 * process of recovering or has it suffered an internal failure
1954 * that prevents access to the storage medium.
1955 */
Hannes Reinecke7a38dc02017-04-06 15:36:29 +02001956 if (!sdkp->ignore_medium_access_errors) {
1957 sdkp->medium_access_timed_out++;
1958 sdkp->ignore_medium_access_errors = true;
1959 }
Martin K. Petersen18a4d0a2012-02-09 13:48:53 -05001960
1961 /*
1962 * If the device keeps failing read/write commands but TEST UNIT
1963 * READY always completes successfully we assume that medium
1964 * access is no longer possible and take the device offline.
1965 */
1966 if (sdkp->medium_access_timed_out >= sdkp->max_medium_access_timeouts) {
1967 scmd_printk(KERN_ERR, scmd,
1968 "Medium access timeout failure. Offlining disk!\n");
Bart Van Assche0db6ca82017-06-02 14:21:55 -07001969 mutex_lock(&sdev->state_mutex);
1970 scsi_device_set_state(sdev, SDEV_OFFLINE);
1971 mutex_unlock(&sdev->state_mutex);
Martin K. Petersen18a4d0a2012-02-09 13:48:53 -05001972
Hannes Reineckee8f8d502017-04-06 15:36:30 +02001973 return SUCCESS;
Martin K. Petersen18a4d0a2012-02-09 13:48:53 -05001974 }
1975
1976 return eh_disp;
1977}
1978
Martin K. Petersenaf55ff62008-07-17 04:28:35 -04001979static unsigned int sd_completed_bytes(struct scsi_cmnd *scmd)
1980{
Bart Van Assche5999ccf2021-08-09 16:03:06 -07001981 struct request *req = scsi_cmd_to_rq(scmd);
Damien Le Moal6eadc612017-04-24 16:51:10 +09001982 struct scsi_device *sdev = scmd->device;
1983 unsigned int transferred, good_bytes;
1984 u64 start_lba, end_lba, bad_lba;
1985
1986 /*
1987 * Some commands have a payload smaller than the device logical
1988 * block size (e.g. INQUIRY on a 4K disk).
1989 */
1990 if (scsi_bufflen(scmd) <= sdev->sector_size)
1991 return 0;
1992
1993 /* Check if we have a 'bad_lba' information */
1994 if (!scsi_get_sense_info_fld(scmd->sense_buffer,
1995 SCSI_SENSE_BUFFERSIZE,
1996 &bad_lba))
1997 return 0;
1998
1999 /*
2000 * If the bad lba was reported incorrectly, we have no idea where
2001 * the error is.
2002 */
2003 start_lba = sectors_to_logical(sdev, blk_rq_pos(req));
2004 end_lba = start_lba + bytes_to_logical(sdev, scsi_bufflen(scmd));
2005 if (bad_lba < start_lba || bad_lba >= end_lba)
2006 return 0;
2007
James Bottomleya8733c72010-12-17 15:36:34 -05002008 /*
2009 * resid is optional but mostly filled in. When it's unused,
2010 * its value is zero, so we assume the whole buffer transferred
2011 */
Damien Le Moal6eadc612017-04-24 16:51:10 +09002012 transferred = scsi_bufflen(scmd) - scsi_get_resid(scmd);
Martin K. Petersenaf55ff62008-07-17 04:28:35 -04002013
Damien Le Moal6eadc612017-04-24 16:51:10 +09002014 /* This computation should always be done in terms of the
2015 * resolution of the device's medium.
Martin K. Petersenaf55ff62008-07-17 04:28:35 -04002016 */
Damien Le Moal6eadc612017-04-24 16:51:10 +09002017 good_bytes = logical_to_bytes(sdev, bad_lba - start_lba);
Martin K. Petersenaf55ff62008-07-17 04:28:35 -04002018
James Bottomleya8733c72010-12-17 15:36:34 -05002019 return min(good_bytes, transferred);
Martin K. Petersenaf55ff62008-07-17 04:28:35 -04002020}
2021
Linus Torvalds1da177e2005-04-16 15:20:36 -07002022/**
Linus Torvalds7b3d9542008-01-06 10:17:12 -08002023 * sd_done - bottom half handler: called when the lower level
Linus Torvalds1da177e2005-04-16 15:20:36 -07002024 * driver has completed (successfully or otherwise) a scsi command.
2025 * @SCpnt: mid-level's per command structure.
2026 *
2027 * Note: potentially run from within an ISR. Must not block.
2028 **/
Linus Torvalds7b3d9542008-01-06 10:17:12 -08002029static int sd_done(struct scsi_cmnd *SCpnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002030{
2031 int result = SCpnt->result;
Martin K. Petersenaf55ff62008-07-17 04:28:35 -04002032 unsigned int good_bytes = result ? 0 : scsi_bufflen(SCpnt);
Damien Le Moalc46f0912017-03-01 17:27:00 +09002033 unsigned int sector_size = SCpnt->device->sector_size;
2034 unsigned int resid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002035 struct scsi_sense_hdr sshdr;
Bart Van Assche5999ccf2021-08-09 16:03:06 -07002036 struct request *req = scsi_cmd_to_rq(SCpnt);
2037 struct scsi_disk *sdkp = scsi_disk(req->rq_disk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002038 int sense_valid = 0;
2039 int sense_deferred = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002040
Hannes Reinecke89d94752016-10-18 15:40:34 +09002041 switch (req_op(req)) {
2042 case REQ_OP_DISCARD:
Christoph Hellwig02d26102017-04-05 19:21:02 +02002043 case REQ_OP_WRITE_ZEROES:
Hannes Reinecke89d94752016-10-18 15:40:34 +09002044 case REQ_OP_WRITE_SAME:
2045 case REQ_OP_ZONE_RESET:
Chaitanya Kulkarnid81e9d42019-08-01 10:26:37 -07002046 case REQ_OP_ZONE_RESET_ALL:
Ajay Joshiad512f22019-10-27 23:05:47 +09002047 case REQ_OP_ZONE_OPEN:
2048 case REQ_OP_ZONE_CLOSE:
2049 case REQ_OP_ZONE_FINISH:
Martin K. Petersen26e85fc2012-09-18 12:19:31 -04002050 if (!result) {
2051 good_bytes = blk_rq_bytes(req);
2052 scsi_set_resid(SCpnt, 0);
2053 } else {
2054 good_bytes = 0;
2055 scsi_set_resid(SCpnt, blk_rq_bytes(req));
2056 }
Hannes Reinecke89d94752016-10-18 15:40:34 +09002057 break;
Damien Le Moalc46f0912017-03-01 17:27:00 +09002058 default:
2059 /*
2060 * In case of bogus fw or device, we could end up having
2061 * an unaligned partial completion. Check this here and force
2062 * alignment.
2063 */
2064 resid = scsi_get_resid(SCpnt);
2065 if (resid & (sector_size - 1)) {
2066 sd_printk(KERN_INFO, sdkp,
2067 "Unaligned partial completion (resid=%u, sector_sz=%u)\n",
2068 resid, sector_size);
Damien Le Moal670d8be2019-08-28 14:35:11 +09002069 scsi_print_command(SCpnt);
Damien Le Moalc46f0912017-03-01 17:27:00 +09002070 resid = min(scsi_bufflen(SCpnt),
2071 round_up(resid, sector_size));
2072 scsi_set_resid(SCpnt, resid);
2073 }
Martin K. Petersen26e85fc2012-09-18 12:19:31 -04002074 }
FUJITA Tomonori6a32a8a2010-07-21 10:29:37 +09002075
Linus Torvalds1da177e2005-04-16 15:20:36 -07002076 if (result) {
2077 sense_valid = scsi_command_normalize_sense(SCpnt, &sshdr);
2078 if (sense_valid)
2079 sense_deferred = scsi_sense_is_deferred(&sshdr);
2080 }
David Jeffery2a863ba2014-04-10 11:08:30 -04002081 sdkp->medium_access_timed_out = 0;
2082
Hannes Reinecke464a00c2021-04-27 10:30:15 +02002083 if (!scsi_status_is_check_condition(result) &&
Luben Tuikov03aba2f2006-06-23 09:39:09 -07002084 (!sense_valid || sense_deferred))
2085 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002086
Luben Tuikov03aba2f2006-06-23 09:39:09 -07002087 switch (sshdr.sense_key) {
2088 case HARDWARE_ERROR:
2089 case MEDIUM_ERROR:
Martin K. Petersenaf55ff62008-07-17 04:28:35 -04002090 good_bytes = sd_completed_bytes(SCpnt);
Luben Tuikov03aba2f2006-06-23 09:39:09 -07002091 break;
2092 case RECOVERED_ERROR:
Martin K. Petersenaf55ff62008-07-17 04:28:35 -04002093 good_bytes = scsi_bufflen(SCpnt);
2094 break;
Jamie Wellnitz10dab222008-09-11 21:39:36 -04002095 case NO_SENSE:
2096 /* This indicates a false check condition, so ignore it. An
2097 * unknown amount of data was transferred so treat it as an
2098 * error.
2099 */
Jamie Wellnitz10dab222008-09-11 21:39:36 -04002100 SCpnt->result = 0;
2101 memset(SCpnt->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
2102 break;
Martin K. Petersenc98a0eb2011-03-08 02:07:15 -05002103 case ABORTED_COMMAND:
2104 if (sshdr.asc == 0x10) /* DIF: Target detected corruption */
Martin K. Petersenaf55ff62008-07-17 04:28:35 -04002105 good_bytes = sd_completed_bytes(SCpnt);
Luben Tuikov03aba2f2006-06-23 09:39:09 -07002106 break;
Martin K. Petersenc98a0eb2011-03-08 02:07:15 -05002107 case ILLEGAL_REQUEST:
Christoph Hellwigd227ec22017-04-24 19:05:12 -04002108 switch (sshdr.asc) {
2109 case 0x10: /* DIX: Host detected corruption */
Martin K. Petersenc98a0eb2011-03-08 02:07:15 -05002110 good_bytes = sd_completed_bytes(SCpnt);
Christoph Hellwigd227ec22017-04-24 19:05:12 -04002111 break;
2112 case 0x20: /* INVALID COMMAND OPCODE */
2113 case 0x24: /* INVALID FIELD IN CDB */
2114 switch (SCpnt->cmnd[0]) {
Martin K. Petersen5db44862012-09-18 12:19:32 -04002115 case UNMAP:
2116 sd_config_discard(sdkp, SD_LBP_DISABLE);
2117 break;
2118 case WRITE_SAME_16:
2119 case WRITE_SAME:
Christoph Hellwigd227ec22017-04-24 19:05:12 -04002120 if (SCpnt->cmnd[1] & 8) { /* UNMAP */
Martin K. Petersen5db44862012-09-18 12:19:32 -04002121 sd_config_discard(sdkp, SD_LBP_DISABLE);
Christoph Hellwigd227ec22017-04-24 19:05:12 -04002122 } else {
Martin K. Petersen5db44862012-09-18 12:19:32 -04002123 sdkp->device->no_write_same = 1;
2124 sd_config_write_same(sdkp);
Christoph Hellwige8064022016-10-20 15:12:13 +02002125 req->rq_flags |= RQF_QUIET;
Martin K. Petersen5db44862012-09-18 12:19:32 -04002126 }
Christoph Hellwigd227ec22017-04-24 19:05:12 -04002127 break;
Martin K. Petersen5db44862012-09-18 12:19:32 -04002128 }
2129 }
Martin K. Petersenc98a0eb2011-03-08 02:07:15 -05002130 break;
Luben Tuikov03aba2f2006-06-23 09:39:09 -07002131 default:
2132 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002133 }
Hannes Reinecke89d94752016-10-18 15:40:34 +09002134
Luben Tuikov03aba2f2006-06-23 09:39:09 -07002135 out:
Hannes Reinecke89d94752016-10-18 15:40:34 +09002136 if (sd_is_zoned(sdkp))
Johannes Thumshirn5795eb42020-05-12 17:55:51 +09002137 good_bytes = sd_zbc_complete(SCpnt, good_bytes, &sshdr);
Hannes Reinecke89d94752016-10-18 15:40:34 +09002138
Hannes Reineckeef613292014-10-24 14:27:00 +02002139 SCSI_LOG_HLCOMPLETE(1, scmd_printk(KERN_INFO, SCpnt,
2140 "sd_done: completed %d of %d bytes\n",
2141 good_bytes, scsi_bufflen(SCpnt)));
2142
Linus Torvalds7b3d9542008-01-06 10:17:12 -08002143 return good_bytes;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002144}
2145
Linus Torvalds1da177e2005-04-16 15:20:36 -07002146/*
2147 * spinup disk - called only in sd_revalidate_disk()
2148 */
2149static void
Martin K. Petersene73aec82007-02-27 22:40:55 -05002150sd_spinup_disk(struct scsi_disk *sdkp)
James Bottomleyea73a9f2005-08-28 11:33:52 -05002151{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002152 unsigned char cmd[10];
Alan Stern4451e472005-07-12 10:45:17 -04002153 unsigned long spintime_expire = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002154 int retries, spintime;
2155 unsigned int the_result;
2156 struct scsi_sense_hdr sshdr;
2157 int sense_valid = 0;
2158
2159 spintime = 0;
2160
2161 /* Spin up drives, as required. Only do this at boot time */
2162 /* Spinup needs to be done for module loads too. */
2163 do {
2164 retries = 0;
2165
2166 do {
Heiner Kallweit45214282021-09-11 14:11:59 +02002167 bool media_was_present = sdkp->media_present;
2168
Linus Torvalds1da177e2005-04-16 15:20:36 -07002169 cmd[0] = TEST_UNIT_READY;
2170 memset((void *) &cmd[1], 0, 9);
2171
James Bottomleyea73a9f2005-08-28 11:33:52 -05002172 the_result = scsi_execute_req(sdkp->device, cmd,
2173 DMA_NONE, NULL, 0,
2174 &sshdr, SD_TIMEOUT,
Mike Christie06109592020-10-01 10:35:54 -05002175 sdkp->max_retries, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002176
Alan Sternb4d38e32006-10-11 16:48:28 -04002177 /*
2178 * If the drive has indicated to us that it
2179 * doesn't have any media in it, don't bother
2180 * with any more polling.
2181 */
Christian Loehle848ade92021-08-16 09:37:51 +00002182 if (media_not_present(sdkp, &sshdr)) {
Heiner Kallweit45214282021-09-11 14:11:59 +02002183 if (media_was_present)
2184 sd_printk(KERN_NOTICE, sdkp, "Media removed, stopped polling\n");
Alan Sternb4d38e32006-10-11 16:48:28 -04002185 return;
Christian Loehle848ade92021-08-16 09:37:51 +00002186 }
Alan Sternb4d38e32006-10-11 16:48:28 -04002187
Linus Torvalds1da177e2005-04-16 15:20:36 -07002188 if (the_result)
James Bottomleyea73a9f2005-08-28 11:33:52 -05002189 sense_valid = scsi_sense_valid(&sshdr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002190 retries++;
Hannes Reinecke464a00c2021-04-27 10:30:15 +02002191 } while (retries < 3 &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07002192 (!scsi_status_is_good(the_result) ||
Hannes Reinecke464a00c2021-04-27 10:30:15 +02002193 (scsi_status_is_check_condition(the_result) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07002194 sense_valid && sshdr.sense_key == UNIT_ATTENTION)));
2195
Hannes Reinecke464a00c2021-04-27 10:30:15 +02002196 if (!scsi_status_is_check_condition(the_result)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002197 /* no sense, TUR either succeeded or failed
2198 * with a status error */
Martin K. Petersene73aec82007-02-27 22:40:55 -05002199 if(!spintime && !scsi_status_is_good(the_result)) {
Hannes Reineckeef613292014-10-24 14:27:00 +02002200 sd_print_result(sdkp, "Test Unit Ready failed",
2201 the_result);
Martin K. Petersene73aec82007-02-27 22:40:55 -05002202 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002203 break;
2204 }
Hannes Reineckeef613292014-10-24 14:27:00 +02002205
Linus Torvalds1da177e2005-04-16 15:20:36 -07002206 /*
2207 * The device does not want the automatic start to be issued.
2208 */
Matthew Wilcox33dd6f92009-02-20 06:53:48 -07002209 if (sdkp->device->no_start_on_add)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002210 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002211
Matthew Wilcox33dd6f92009-02-20 06:53:48 -07002212 if (sense_valid && sshdr.sense_key == NOT_READY) {
2213 if (sshdr.asc == 4 && sshdr.ascq == 3)
2214 break; /* manual intervention required */
2215 if (sshdr.asc == 4 && sshdr.ascq == 0xb)
2216 break; /* standby */
2217 if (sshdr.asc == 4 && sshdr.ascq == 0xc)
2218 break; /* unavailable */
Mahesh Rajashekhara505aa4b2018-04-17 17:03:12 +05302219 if (sshdr.asc == 4 && sshdr.ascq == 0x1b)
2220 break; /* sanitize in progress */
Matthew Wilcox33dd6f92009-02-20 06:53:48 -07002221 /*
2222 * Issue command to spin up drive when not ready
2223 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002224 if (!spintime) {
Martin K. Petersene73aec82007-02-27 22:40:55 -05002225 sd_printk(KERN_NOTICE, sdkp, "Spinning up disk...");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002226 cmd[0] = START_STOP;
2227 cmd[1] = 1; /* Return immediately */
2228 memset((void *) &cmd[2], 0, 8);
2229 cmd[4] = 1; /* Start spin cycle */
Stefan Richterd2886ea2008-05-11 00:34:07 +02002230 if (sdkp->device->start_stop_pwr_cond)
2231 cmd[4] |= 1 << 4;
James Bottomleyea73a9f2005-08-28 11:33:52 -05002232 scsi_execute_req(sdkp->device, cmd, DMA_NONE,
2233 NULL, 0, &sshdr,
Mike Christie06109592020-10-01 10:35:54 -05002234 SD_TIMEOUT, sdkp->max_retries,
FUJITA Tomonorif4f4e472008-12-04 14:24:39 +09002235 NULL);
Alan Stern4451e472005-07-12 10:45:17 -04002236 spintime_expire = jiffies + 100 * HZ;
2237 spintime = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002238 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002239 /* Wait 1 second for next try */
2240 msleep(1000);
Michał Mirosław3a1d0782017-11-24 18:02:35 +01002241 printk(KERN_CONT ".");
Alan Stern4451e472005-07-12 10:45:17 -04002242
2243 /*
2244 * Wait for USB flash devices with slow firmware.
2245 * Yes, this sense key/ASC combination shouldn't
2246 * occur here. It's characteristic of these devices.
2247 */
2248 } else if (sense_valid &&
2249 sshdr.sense_key == UNIT_ATTENTION &&
2250 sshdr.asc == 0x28) {
2251 if (!spintime) {
2252 spintime_expire = jiffies + 5 * HZ;
2253 spintime = 1;
2254 }
2255 /* Wait 1 second for next try */
2256 msleep(1000);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002257 } else {
2258 /* we don't understand the sense code, so it's
2259 * probably pointless to loop */
2260 if(!spintime) {
Martin K. Petersene73aec82007-02-27 22:40:55 -05002261 sd_printk(KERN_NOTICE, sdkp, "Unit Not Ready\n");
2262 sd_print_sense_hdr(sdkp, &sshdr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002263 }
2264 break;
2265 }
2266
Alan Stern4451e472005-07-12 10:45:17 -04002267 } while (spintime && time_before_eq(jiffies, spintime_expire));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002268
2269 if (spintime) {
2270 if (scsi_status_is_good(the_result))
Michał Mirosław3a1d0782017-11-24 18:02:35 +01002271 printk(KERN_CONT "ready\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002272 else
Michał Mirosław3a1d0782017-11-24 18:02:35 +01002273 printk(KERN_CONT "not responding...\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002274 }
2275}
2276
Martin K. Petersene0597d72008-07-17 04:28:34 -04002277/*
2278 * Determine whether disk supports Data Integrity Field.
2279 */
Martin K. Petersenfe542392012-09-21 12:44:12 -04002280static int sd_read_protection_type(struct scsi_disk *sdkp, unsigned char *buffer)
Martin K. Petersene0597d72008-07-17 04:28:34 -04002281{
2282 struct scsi_device *sdp = sdkp->device;
2283 u8 type;
Martin K. Petersenfe542392012-09-21 12:44:12 -04002284 int ret = 0;
Martin K. Petersene0597d72008-07-17 04:28:34 -04002285
Xiang Chen465f4ed2020-01-09 09:12:24 +08002286 if (scsi_device_protection(sdp) == 0 || (buffer[12] & 1) == 0) {
2287 sdkp->protection_type = 0;
Martin K. Petersenfe542392012-09-21 12:44:12 -04002288 return ret;
Xiang Chen465f4ed2020-01-09 09:12:24 +08002289 }
Martin K. Petersen35e1a5d2009-09-18 17:33:00 -04002290
2291 type = ((buffer[12] >> 1) & 7) + 1; /* P_TYPE 0 = Type 1 */
2292
Christoph Hellwig8475c812016-09-11 19:35:41 +02002293 if (type > T10_PI_TYPE3_PROTECTION)
Martin K. Petersenfe542392012-09-21 12:44:12 -04002294 ret = -ENODEV;
2295 else if (scsi_host_dif_capable(sdp->host, type))
2296 ret = 1;
2297
2298 if (sdkp->first_scan || type != sdkp->protection_type)
2299 switch (ret) {
2300 case -ENODEV:
2301 sd_printk(KERN_ERR, sdkp, "formatted with unsupported" \
2302 " protection type %u. Disabling disk!\n",
2303 type);
2304 break;
2305 case 1:
2306 sd_printk(KERN_NOTICE, sdkp,
2307 "Enabling DIF Type %u protection\n", type);
2308 break;
2309 case 0:
2310 sd_printk(KERN_NOTICE, sdkp,
2311 "Disabling DIF Type %u protection\n", type);
2312 break;
2313 }
Martin K. Petersene0597d72008-07-17 04:28:34 -04002314
Martin K. Petersenbe922f42008-09-19 18:47:20 -04002315 sdkp->protection_type = type;
2316
Martin K. Petersenfe542392012-09-21 12:44:12 -04002317 return ret;
Martin K. Petersene0597d72008-07-17 04:28:34 -04002318}
2319
Matthew Wilcox0da205e2009-03-12 14:20:29 -04002320static void read_capacity_error(struct scsi_disk *sdkp, struct scsi_device *sdp,
2321 struct scsi_sense_hdr *sshdr, int sense_valid,
2322 int the_result)
2323{
Hannes Reinecke464a00c2021-04-27 10:30:15 +02002324 if (sense_valid)
Matthew Wilcox0da205e2009-03-12 14:20:29 -04002325 sd_print_sense_hdr(sdkp, sshdr);
2326 else
2327 sd_printk(KERN_NOTICE, sdkp, "Sense not available.\n");
2328
2329 /*
2330 * Set dirty bit for removable devices if not ready -
2331 * sometimes drives will not report this properly.
2332 */
2333 if (sdp->removable &&
2334 sense_valid && sshdr->sense_key == NOT_READY)
Tejun Heo2bae0092010-12-18 18:42:23 +01002335 set_media_not_present(sdkp);
Matthew Wilcox0da205e2009-03-12 14:20:29 -04002336
2337 /*
2338 * We used to set media_present to 0 here to indicate no media
2339 * in the drive, but some drives fail read capacity even with
2340 * media present, so we can't do that.
2341 */
2342 sdkp->capacity = 0; /* unknown mapped to zero - as usual */
2343}
2344
2345#define RC16_LEN 32
2346#if RC16_LEN > SD_BUF_SIZE
2347#error RC16_LEN must not be more than SD_BUF_SIZE
2348#endif
2349
James Bottomley3233ac12010-04-01 10:30:01 -04002350#define READ_CAPACITY_RETRIES_ON_RESET 10
2351
Matthew Wilcox0da205e2009-03-12 14:20:29 -04002352static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp,
2353 unsigned char *buffer)
James Bottomleyea73a9f2005-08-28 11:33:52 -05002354{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002355 unsigned char cmd[16];
Linus Torvalds1da177e2005-04-16 15:20:36 -07002356 struct scsi_sense_hdr sshdr;
2357 int sense_valid = 0;
Matthew Wilcox0da205e2009-03-12 14:20:29 -04002358 int the_result;
James Bottomley3233ac12010-04-01 10:30:01 -04002359 int retries = 3, reset_retries = READ_CAPACITY_RETRIES_ON_RESET;
Martin K. Petersenea09bcc2009-05-23 11:43:37 -04002360 unsigned int alignment;
Matthew Wilcox0da205e2009-03-12 14:20:29 -04002361 unsigned long long lba;
2362 unsigned sector_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002363
Hans de Goede5ce524b2010-10-01 14:20:10 -07002364 if (sdp->no_read_capacity_16)
2365 return -EINVAL;
2366
Linus Torvalds1da177e2005-04-16 15:20:36 -07002367 do {
Matthew Wilcox0da205e2009-03-12 14:20:29 -04002368 memset(cmd, 0, 16);
Hannes Reineckeeb846d92014-11-17 14:25:19 +01002369 cmd[0] = SERVICE_ACTION_IN_16;
Matthew Wilcox0da205e2009-03-12 14:20:29 -04002370 cmd[1] = SAI_READ_CAPACITY_16;
2371 cmd[13] = RC16_LEN;
2372 memset(buffer, 0, RC16_LEN);
2373
James Bottomleyea73a9f2005-08-28 11:33:52 -05002374 the_result = scsi_execute_req(sdp, cmd, DMA_FROM_DEVICE,
Matthew Wilcox0da205e2009-03-12 14:20:29 -04002375 buffer, RC16_LEN, &sshdr,
Mike Christie06109592020-10-01 10:35:54 -05002376 SD_TIMEOUT, sdkp->max_retries, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002377
James Bottomleyea73a9f2005-08-28 11:33:52 -05002378 if (media_not_present(sdkp, &sshdr))
Matthew Wilcox0da205e2009-03-12 14:20:29 -04002379 return -ENODEV;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002380
Hannes Reineckeced202f72021-04-27 10:30:12 +02002381 if (the_result > 0) {
James Bottomleyea73a9f2005-08-28 11:33:52 -05002382 sense_valid = scsi_sense_valid(&sshdr);
Matthew Wilcox2b301302009-03-12 14:20:30 -04002383 if (sense_valid &&
2384 sshdr.sense_key == ILLEGAL_REQUEST &&
2385 (sshdr.asc == 0x20 || sshdr.asc == 0x24) &&
2386 sshdr.ascq == 0x00)
2387 /* Invalid Command Operation Code or
2388 * Invalid Field in CDB, just retry
2389 * silently with RC10 */
2390 return -EINVAL;
James Bottomley3233ac12010-04-01 10:30:01 -04002391 if (sense_valid &&
2392 sshdr.sense_key == UNIT_ATTENTION &&
2393 sshdr.asc == 0x29 && sshdr.ascq == 0x00)
2394 /* Device reset might occur several times,
2395 * give it one more chance */
2396 if (--reset_retries > 0)
2397 continue;
Matthew Wilcox2b301302009-03-12 14:20:30 -04002398 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002399 retries--;
2400
2401 } while (the_result && retries);
2402
Matthew Wilcox0da205e2009-03-12 14:20:29 -04002403 if (the_result) {
Hannes Reineckeef613292014-10-24 14:27:00 +02002404 sd_print_result(sdkp, "Read Capacity(16) failed", the_result);
Matthew Wilcox0da205e2009-03-12 14:20:29 -04002405 read_capacity_error(sdkp, sdp, &sshdr, sense_valid, the_result);
2406 return -EINVAL;
2407 }
Martin K. Petersene73aec82007-02-27 22:40:55 -05002408
Dave Hansen8f76d152009-04-21 16:43:27 -07002409 sector_size = get_unaligned_be32(&buffer[8]);
2410 lba = get_unaligned_be64(&buffer[0]);
Matthew Wilcox0da205e2009-03-12 14:20:29 -04002411
Martin K. Petersenfe542392012-09-21 12:44:12 -04002412 if (sd_read_protection_type(sdkp, buffer) < 0) {
2413 sdkp->capacity = 0;
2414 return -ENODEV;
2415 }
Matthew Wilcox0da205e2009-03-12 14:20:29 -04002416
Martin K. Petersenea09bcc2009-05-23 11:43:37 -04002417 /* Logical blocks per physical block exponent */
Martin K. Petersen526f7c72010-09-28 14:48:47 -04002418 sdkp->physical_block_size = (1 << (buffer[13] & 0xf)) * sector_size;
Martin K. Petersenea09bcc2009-05-23 11:43:37 -04002419
Hannes Reinecke89d94752016-10-18 15:40:34 +09002420 /* RC basis */
2421 sdkp->rc_basis = (buffer[12] >> 4) & 0x3;
2422
Martin K. Petersenea09bcc2009-05-23 11:43:37 -04002423 /* Lowest aligned logical block */
2424 alignment = ((buffer[14] & 0x3f) << 8 | buffer[15]) * sector_size;
2425 blk_queue_alignment_offset(sdp->request_queue, alignment);
2426 if (alignment && sdkp->first_scan)
2427 sd_printk(KERN_NOTICE, sdkp,
2428 "physical block alignment offset: %u\n", alignment);
2429
Martin K. Petersenc98a0eb2011-03-08 02:07:15 -05002430 if (buffer[14] & 0x80) { /* LBPME */
2431 sdkp->lbpme = 1;
Martin K. Petersene339c1a2009-11-26 12:00:40 -05002432
Martin K. Petersenc98a0eb2011-03-08 02:07:15 -05002433 if (buffer[14] & 0x40) /* LBPRZ */
2434 sdkp->lbprz = 1;
Martin K. Petersene339c1a2009-11-26 12:00:40 -05002435
Martin K. Petersenc98a0eb2011-03-08 02:07:15 -05002436 sd_config_discard(sdkp, SD_LBP_WS16);
Martin K. Petersene339c1a2009-11-26 12:00:40 -05002437 }
2438
Matthew Wilcox0da205e2009-03-12 14:20:29 -04002439 sdkp->capacity = lba + 1;
2440 return sector_size;
2441}
2442
2443static int read_capacity_10(struct scsi_disk *sdkp, struct scsi_device *sdp,
2444 unsigned char *buffer)
2445{
2446 unsigned char cmd[16];
2447 struct scsi_sense_hdr sshdr;
2448 int sense_valid = 0;
2449 int the_result;
James Bottomley3233ac12010-04-01 10:30:01 -04002450 int retries = 3, reset_retries = READ_CAPACITY_RETRIES_ON_RESET;
Matthew Wilcox0da205e2009-03-12 14:20:29 -04002451 sector_t lba;
2452 unsigned sector_size;
2453
2454 do {
2455 cmd[0] = READ_CAPACITY;
2456 memset(&cmd[1], 0, 9);
2457 memset(buffer, 0, 8);
2458
2459 the_result = scsi_execute_req(sdp, cmd, DMA_FROM_DEVICE,
2460 buffer, 8, &sshdr,
Mike Christie06109592020-10-01 10:35:54 -05002461 SD_TIMEOUT, sdkp->max_retries, NULL);
Matthew Wilcox0da205e2009-03-12 14:20:29 -04002462
2463 if (media_not_present(sdkp, &sshdr))
2464 return -ENODEV;
2465
Hannes Reineckeced202f72021-04-27 10:30:12 +02002466 if (the_result > 0) {
Matthew Wilcox0da205e2009-03-12 14:20:29 -04002467 sense_valid = scsi_sense_valid(&sshdr);
James Bottomley3233ac12010-04-01 10:30:01 -04002468 if (sense_valid &&
2469 sshdr.sense_key == UNIT_ATTENTION &&
2470 sshdr.asc == 0x29 && sshdr.ascq == 0x00)
2471 /* Device reset might occur several times,
2472 * give it one more chance */
2473 if (--reset_retries > 0)
2474 continue;
2475 }
Matthew Wilcox0da205e2009-03-12 14:20:29 -04002476 retries--;
2477
2478 } while (the_result && retries);
2479
2480 if (the_result) {
Hannes Reineckeef613292014-10-24 14:27:00 +02002481 sd_print_result(sdkp, "Read Capacity(10) failed", the_result);
Matthew Wilcox0da205e2009-03-12 14:20:29 -04002482 read_capacity_error(sdkp, sdp, &sshdr, sense_valid, the_result);
2483 return -EINVAL;
2484 }
2485
Dave Hansen8f76d152009-04-21 16:43:27 -07002486 sector_size = get_unaligned_be32(&buffer[4]);
2487 lba = get_unaligned_be32(&buffer[0]);
Matthew Wilcox0da205e2009-03-12 14:20:29 -04002488
Hans de Goede5ce524b2010-10-01 14:20:10 -07002489 if (sdp->no_read_capacity_16 && (lba == 0xffffffff)) {
2490 /* Some buggy (usb cardreader) devices return an lba of
2491 0xffffffff when the want to report a size of 0 (with
2492 which they really mean no media is present) */
2493 sdkp->capacity = 0;
Linus Torvalds5cc10352010-10-22 20:30:48 -07002494 sdkp->physical_block_size = sector_size;
Hans de Goede5ce524b2010-10-01 14:20:10 -07002495 return sector_size;
2496 }
2497
Matthew Wilcox0da205e2009-03-12 14:20:29 -04002498 sdkp->capacity = lba + 1;
Martin K. Petersen526f7c72010-09-28 14:48:47 -04002499 sdkp->physical_block_size = sector_size;
Matthew Wilcox0da205e2009-03-12 14:20:29 -04002500 return sector_size;
2501}
2502
Matthew Wilcox2b301302009-03-12 14:20:30 -04002503static int sd_try_rc16_first(struct scsi_device *sdp)
2504{
Hannes Reineckef87146b2010-03-29 09:29:24 +02002505 if (sdp->host->max_cmd_len < 16)
2506 return 0;
Alan Stern6a0bdff2012-06-20 16:04:19 -04002507 if (sdp->try_rc_10_first)
2508 return 0;
Matthew Wilcox2b301302009-03-12 14:20:30 -04002509 if (sdp->scsi_level > SCSI_SPC_2)
2510 return 1;
2511 if (scsi_device_protection(sdp))
2512 return 1;
2513 return 0;
2514}
2515
Matthew Wilcox0da205e2009-03-12 14:20:29 -04002516/*
2517 * read disk capacity
2518 */
2519static void
2520sd_read_capacity(struct scsi_disk *sdkp, unsigned char *buffer)
2521{
2522 int sector_size;
2523 struct scsi_device *sdp = sdkp->device;
2524
Matthew Wilcox2b301302009-03-12 14:20:30 -04002525 if (sd_try_rc16_first(sdp)) {
Matthew Wilcox0da205e2009-03-12 14:20:29 -04002526 sector_size = read_capacity_16(sdkp, sdp, buffer);
2527 if (sector_size == -EOVERFLOW)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002528 goto got_data;
Matthew Wilcox2b301302009-03-12 14:20:30 -04002529 if (sector_size == -ENODEV)
2530 return;
2531 if (sector_size < 0)
2532 sector_size = read_capacity_10(sdkp, sdp, buffer);
Matthew Wilcox0da205e2009-03-12 14:20:29 -04002533 if (sector_size < 0)
2534 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002535 } else {
Matthew Wilcox0da205e2009-03-12 14:20:29 -04002536 sector_size = read_capacity_10(sdkp, sdp, buffer);
2537 if (sector_size == -EOVERFLOW)
2538 goto got_data;
2539 if (sector_size < 0)
2540 return;
2541 if ((sizeof(sdkp->capacity) > 4) &&
2542 (sdkp->capacity > 0xffffffffULL)) {
2543 int old_sector_size = sector_size;
2544 sd_printk(KERN_NOTICE, sdkp, "Very big device. "
2545 "Trying to use READ CAPACITY(16).\n");
2546 sector_size = read_capacity_16(sdkp, sdp, buffer);
2547 if (sector_size < 0) {
2548 sd_printk(KERN_NOTICE, sdkp,
2549 "Using 0xffffffff as device size\n");
2550 sdkp->capacity = 1 + (sector_t) 0xffffffff;
2551 sector_size = old_sector_size;
2552 goto got_data;
2553 }
Martin K. Petersen597d7402018-03-14 12:15:56 -04002554 /* Remember that READ CAPACITY(16) succeeded */
2555 sdp->try_rc_10_first = 0;
Matthew Wilcox0da205e2009-03-12 14:20:29 -04002556 }
2557 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002558
Alan Stern5c211ca2009-02-18 10:54:44 -05002559 /* Some devices are known to return the total number of blocks,
2560 * not the highest block number. Some devices have versions
2561 * which do this and others which do not. Some devices we might
2562 * suspect of doing this but we don't know for certain.
2563 *
2564 * If we know the reported capacity is wrong, decrement it. If
2565 * we can only guess, then assume the number of blocks is even
2566 * (usually true but not always) and err on the side of lowering
2567 * the capacity.
2568 */
2569 if (sdp->fix_capacity ||
2570 (sdp->guess_capacity && (sdkp->capacity & 0x01))) {
2571 sd_printk(KERN_INFO, sdkp, "Adjusting the sector count "
2572 "from its reported value: %llu\n",
2573 (unsigned long long) sdkp->capacity);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002574 --sdkp->capacity;
Oliver Neukum61bf54b2007-02-08 09:04:48 +01002575 }
2576
Linus Torvalds1da177e2005-04-16 15:20:36 -07002577got_data:
2578 if (sector_size == 0) {
2579 sector_size = 512;
Martin K. Petersene73aec82007-02-27 22:40:55 -05002580 sd_printk(KERN_NOTICE, sdkp, "Sector size 0 reported, "
2581 "assuming 512.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002582 }
2583
2584 if (sector_size != 512 &&
2585 sector_size != 1024 &&
2586 sector_size != 2048 &&
Mark Hounschell74856fb2015-05-13 10:49:09 +02002587 sector_size != 4096) {
Martin K. Petersene73aec82007-02-27 22:40:55 -05002588 sd_printk(KERN_NOTICE, sdkp, "Unsupported sector size %d.\n",
2589 sector_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002590 /*
2591 * The user might want to re-format the drive with
2592 * a supported sectorsize. Once this happens, it
2593 * would be relatively trivial to set the thing up.
2594 * For this reason, we leave the thing in the table.
2595 */
2596 sdkp->capacity = 0;
2597 /*
2598 * set a bogus sector size so the normal read/write
2599 * logic in the block layer will eventually refuse any
2600 * request on this device without tripping over power
2601 * of two sector size assumptions
2602 */
2603 sector_size = 512;
2604 }
Martin K. Petersene1defc42009-05-22 17:17:49 -04002605 blk_queue_logical_block_size(sdp->request_queue, sector_size);
Hannes Reinecke89d94752016-10-18 15:40:34 +09002606 blk_queue_physical_block_size(sdp->request_queue,
2607 sdkp->physical_block_size);
2608 sdkp->device->sector_size = sector_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002609
Martin K. Petersenca369d52015-11-13 16:46:48 -05002610 if (sdkp->capacity > 0xffffffff)
Martin K. Petersenbcdb2472014-06-03 18:45:51 -04002611 sdp->use_16_for_rw = 1;
Jason J. Herne53ad5702012-11-14 17:03:22 -05002612
Hannes Reinecke89d94752016-10-18 15:40:34 +09002613}
2614
2615/*
2616 * Print disk capacity
2617 */
2618static void
2619sd_print_capacity(struct scsi_disk *sdkp,
2620 sector_t old_capacity)
2621{
2622 int sector_size = sdkp->device->sector_size;
2623 char cap_str_2[10], cap_str_10[10];
2624
Damien Le Moald7e6db22019-01-30 16:07:34 +09002625 if (!sdkp->first_scan && old_capacity == sdkp->capacity)
2626 return;
2627
Hannes Reinecke89d94752016-10-18 15:40:34 +09002628 string_get_size(sdkp->capacity, sector_size,
2629 STRING_UNITS_2, cap_str_2, sizeof(cap_str_2));
2630 string_get_size(sdkp->capacity, sector_size,
Damien Le Moald7e6db22019-01-30 16:07:34 +09002631 STRING_UNITS_10, cap_str_10, sizeof(cap_str_10));
Hannes Reinecke89d94752016-10-18 15:40:34 +09002632
Damien Le Moald7e6db22019-01-30 16:07:34 +09002633 sd_printk(KERN_NOTICE, sdkp,
2634 "%llu %d-byte logical blocks: (%s/%s)\n",
2635 (unsigned long long)sdkp->capacity,
2636 sector_size, cap_str_10, cap_str_2);
2637
2638 if (sdkp->physical_block_size != sector_size)
Hannes Reinecke89d94752016-10-18 15:40:34 +09002639 sd_printk(KERN_NOTICE, sdkp,
Damien Le Moald7e6db22019-01-30 16:07:34 +09002640 "%u-byte physical blocks\n",
2641 sdkp->physical_block_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002642}
2643
2644/* called with buffer of length 512 */
2645static inline int
Mike Christie06109592020-10-01 10:35:54 -05002646sd_do_mode_sense(struct scsi_disk *sdkp, int dbd, int modepage,
James Bottomleyea73a9f2005-08-28 11:33:52 -05002647 unsigned char *buffer, int len, struct scsi_mode_data *data,
2648 struct scsi_sense_hdr *sshdr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002649{
Damien Le Moalc7493012021-08-20 16:02:55 +09002650 /*
2651 * If we must use MODE SENSE(10), make sure that the buffer length
2652 * is at least 8 bytes so that the mode sense header fits.
2653 */
2654 if (sdkp->device->use_10_for_ms && len < 8)
2655 len = 8;
2656
Mike Christie06109592020-10-01 10:35:54 -05002657 return scsi_mode_sense(sdkp->device, dbd, modepage, buffer, len,
2658 SD_TIMEOUT, sdkp->max_retries, data,
James Bottomleyea73a9f2005-08-28 11:33:52 -05002659 sshdr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002660}
2661
2662/*
2663 * read write protect setting, if possible - called only in sd_revalidate_disk()
Al Viro48970802006-02-26 08:34:10 -06002664 * called with buffer of length SD_BUF_SIZE
Linus Torvalds1da177e2005-04-16 15:20:36 -07002665 */
2666static void
Martin K. Petersene73aec82007-02-27 22:40:55 -05002667sd_read_write_protect_flag(struct scsi_disk *sdkp, unsigned char *buffer)
James Bottomleyea73a9f2005-08-28 11:33:52 -05002668{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002669 int res;
James Bottomleyea73a9f2005-08-28 11:33:52 -05002670 struct scsi_device *sdp = sdkp->device;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002671 struct scsi_mode_data data;
Martin K. Petersen70a9b872009-03-09 11:33:31 -04002672 int old_wp = sdkp->write_prot;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002673
2674 set_disk_ro(sdkp->disk, 0);
James Bottomleyea73a9f2005-08-28 11:33:52 -05002675 if (sdp->skip_ms_page_3f) {
Martin K. Petersenb2bff6c2014-01-03 18:19:26 -05002676 sd_first_printk(KERN_NOTICE, sdkp, "Assuming Write Enabled\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002677 return;
2678 }
2679
James Bottomleyea73a9f2005-08-28 11:33:52 -05002680 if (sdp->use_192_bytes_for_3f) {
Mike Christie06109592020-10-01 10:35:54 -05002681 res = sd_do_mode_sense(sdkp, 0, 0x3F, buffer, 192, &data, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002682 } else {
2683 /*
2684 * First attempt: ask for all pages (0x3F), but only 4 bytes.
2685 * We have to start carefully: some devices hang if we ask
2686 * for more than is available.
2687 */
Mike Christie06109592020-10-01 10:35:54 -05002688 res = sd_do_mode_sense(sdkp, 0, 0x3F, buffer, 4, &data, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002689
2690 /*
2691 * Second attempt: ask for page 0 When only page 0 is
2692 * implemented, a request for page 3F may return Sense Key
2693 * 5: Illegal Request, Sense Code 24: Invalid field in
2694 * CDB.
2695 */
Hannes Reinecke87936132021-04-27 10:30:09 +02002696 if (res < 0)
Mike Christie06109592020-10-01 10:35:54 -05002697 res = sd_do_mode_sense(sdkp, 0, 0, buffer, 4, &data, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002698
2699 /*
2700 * Third attempt: ask 255 bytes, as we did earlier.
2701 */
Hannes Reinecke87936132021-04-27 10:30:09 +02002702 if (res < 0)
Mike Christie06109592020-10-01 10:35:54 -05002703 res = sd_do_mode_sense(sdkp, 0, 0x3F, buffer, 255,
James Bottomleyea73a9f2005-08-28 11:33:52 -05002704 &data, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002705 }
2706
Hannes Reinecke87936132021-04-27 10:30:09 +02002707 if (res < 0) {
Martin K. Petersenb2bff6c2014-01-03 18:19:26 -05002708 sd_first_printk(KERN_WARNING, sdkp,
Martin K. Petersene73aec82007-02-27 22:40:55 -05002709 "Test WP failed, assume Write Enabled\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002710 } else {
2711 sdkp->write_prot = ((data.device_specific & 0x80) != 0);
Martin K. Petersen8acf6082019-05-20 10:57:18 -04002712 set_disk_ro(sdkp->disk, sdkp->write_prot);
Martin K. Petersen70a9b872009-03-09 11:33:31 -04002713 if (sdkp->first_scan || old_wp != sdkp->write_prot) {
2714 sd_printk(KERN_NOTICE, sdkp, "Write Protect is %s\n",
2715 sdkp->write_prot ? "on" : "off");
Andy Shevchenkodf441cc2016-10-22 20:32:30 +03002716 sd_printk(KERN_DEBUG, sdkp, "Mode Sense: %4ph\n", buffer);
Martin K. Petersen70a9b872009-03-09 11:33:31 -04002717 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002718 }
2719}
2720
2721/*
2722 * sd_read_cache_type - called only from sd_revalidate_disk()
Al Viro48970802006-02-26 08:34:10 -06002723 * called with buffer of length SD_BUF_SIZE
Linus Torvalds1da177e2005-04-16 15:20:36 -07002724 */
2725static void
Martin K. Petersene73aec82007-02-27 22:40:55 -05002726sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)
Al Viro 631e8a12005-05-16 01:59:55 +01002727{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002728 int len = 0, res;
James Bottomleyea73a9f2005-08-28 11:33:52 -05002729 struct scsi_device *sdp = sdkp->device;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002730
Al Viro 631e8a12005-05-16 01:59:55 +01002731 int dbd;
2732 int modepage;
Luben Tuikov0bcaa112011-05-19 00:00:58 -07002733 int first_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002734 struct scsi_mode_data data;
2735 struct scsi_sense_hdr sshdr;
Martin K. Petersen70a9b872009-03-09 11:33:31 -04002736 int old_wce = sdkp->WCE;
2737 int old_rcd = sdkp->RCD;
2738 int old_dpofua = sdkp->DPOFUA;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002739
James Bottomley39c60a02013-04-24 14:02:53 -07002740
2741 if (sdkp->cache_override)
2742 return;
2743
Luben Tuikov0bcaa112011-05-19 00:00:58 -07002744 first_len = 4;
2745 if (sdp->skip_ms_page_8) {
2746 if (sdp->type == TYPE_RBC)
2747 goto defaults;
2748 else {
2749 if (sdp->skip_ms_page_3f)
2750 goto defaults;
2751 modepage = 0x3F;
2752 if (sdp->use_192_bytes_for_3f)
2753 first_len = 192;
2754 dbd = 0;
2755 }
2756 } else if (sdp->type == TYPE_RBC) {
Al Viro 631e8a12005-05-16 01:59:55 +01002757 modepage = 6;
2758 dbd = 8;
2759 } else {
2760 modepage = 8;
2761 dbd = 0;
2762 }
2763
Linus Torvalds1da177e2005-04-16 15:20:36 -07002764 /* cautiously ask */
Mike Christie06109592020-10-01 10:35:54 -05002765 res = sd_do_mode_sense(sdkp, dbd, modepage, buffer, first_len,
Luben Tuikov0bcaa112011-05-19 00:00:58 -07002766 &data, &sshdr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002767
Hannes Reinecke87936132021-04-27 10:30:09 +02002768 if (res < 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002769 goto bad_sense;
2770
Al Viro6d73c852006-02-23 02:03:16 +01002771 if (!data.header_length) {
2772 modepage = 6;
Luben Tuikov0bcaa112011-05-19 00:00:58 -07002773 first_len = 0;
Martin K. Petersenb2bff6c2014-01-03 18:19:26 -05002774 sd_first_printk(KERN_ERR, sdkp,
2775 "Missing header in MODE_SENSE response\n");
Al Viro6d73c852006-02-23 02:03:16 +01002776 }
2777
Linus Torvalds1da177e2005-04-16 15:20:36 -07002778 /* that went OK, now ask for the proper length */
2779 len = data.length;
2780
2781 /*
2782 * We're only interested in the first three bytes, actually.
2783 * But the data cache page is defined for the first 20.
2784 */
2785 if (len < 3)
2786 goto bad_sense;
Luben Tuikov0bcaa112011-05-19 00:00:58 -07002787 else if (len > SD_BUF_SIZE) {
Martin K. Petersenb2bff6c2014-01-03 18:19:26 -05002788 sd_first_printk(KERN_NOTICE, sdkp, "Truncating mode parameter "
Luben Tuikov0bcaa112011-05-19 00:00:58 -07002789 "data from %d to %d bytes\n", len, SD_BUF_SIZE);
2790 len = SD_BUF_SIZE;
2791 }
2792 if (modepage == 0x3F && sdp->use_192_bytes_for_3f)
2793 len = 192;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002794
2795 /* Get the data */
Luben Tuikov0bcaa112011-05-19 00:00:58 -07002796 if (len > first_len)
Mike Christie06109592020-10-01 10:35:54 -05002797 res = sd_do_mode_sense(sdkp, dbd, modepage, buffer, len,
Luben Tuikov0bcaa112011-05-19 00:00:58 -07002798 &data, &sshdr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002799
Hannes Reinecke87936132021-04-27 10:30:09 +02002800 if (!res) {
Al Viro 631e8a12005-05-16 01:59:55 +01002801 int offset = data.header_length + data.block_descriptor_length;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002802
Luben Tuikov0bcaa112011-05-19 00:00:58 -07002803 while (offset < len) {
2804 u8 page_code = buffer[offset] & 0x3F;
2805 u8 spf = buffer[offset] & 0x40;
2806
2807 if (page_code == 8 || page_code == 6) {
2808 /* We're interested only in the first 3 bytes.
2809 */
2810 if (len - offset <= 2) {
Martin K. Petersenb2bff6c2014-01-03 18:19:26 -05002811 sd_first_printk(KERN_ERR, sdkp,
2812 "Incomplete mode parameter "
2813 "data\n");
Luben Tuikov0bcaa112011-05-19 00:00:58 -07002814 goto defaults;
2815 } else {
2816 modepage = page_code;
2817 goto Page_found;
2818 }
2819 } else {
2820 /* Go to the next page */
2821 if (spf && len - offset > 3)
2822 offset += 4 + (buffer[offset+2] << 8) +
2823 buffer[offset+3];
2824 else if (!spf && len - offset > 1)
2825 offset += 2 + buffer[offset+1];
2826 else {
Martin K. Petersenb2bff6c2014-01-03 18:19:26 -05002827 sd_first_printk(KERN_ERR, sdkp,
2828 "Incomplete mode "
2829 "parameter data\n");
Luben Tuikov0bcaa112011-05-19 00:00:58 -07002830 goto defaults;
2831 }
2832 }
Al Viro48970802006-02-26 08:34:10 -06002833 }
2834
Martin Kepplingerc4da1202021-10-13 09:50:50 +02002835 sd_first_printk(KERN_WARNING, sdkp,
2836 "No Caching mode page found\n");
Alan Stern984f1732013-09-06 11:49:51 -04002837 goto defaults;
2838
Luben Tuikov0bcaa112011-05-19 00:00:58 -07002839 Page_found:
Al Viro 631e8a12005-05-16 01:59:55 +01002840 if (modepage == 8) {
2841 sdkp->WCE = ((buffer[offset + 2] & 0x04) != 0);
2842 sdkp->RCD = ((buffer[offset + 2] & 0x01) != 0);
2843 } else {
2844 sdkp->WCE = ((buffer[offset + 2] & 0x01) == 0);
2845 sdkp->RCD = 0;
2846 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002847
Tejun Heo007365a2006-01-06 09:53:52 +01002848 sdkp->DPOFUA = (data.device_specific & 0x10) != 0;
Alan Sternb14bf2d2014-06-30 11:04:21 -04002849 if (sdp->broken_fua) {
2850 sd_first_printk(KERN_NOTICE, sdkp, "Disabling FUA\n");
2851 sdkp->DPOFUA = 0;
Damien Le Moal26f28192017-01-12 15:25:10 +09002852 } else if (sdkp->DPOFUA && !sdkp->device->use_10_for_rw &&
2853 !sdkp->device->use_16_for_rw) {
Martin K. Petersenb2bff6c2014-01-03 18:19:26 -05002854 sd_first_printk(KERN_NOTICE, sdkp,
Martin K. Petersene73aec82007-02-27 22:40:55 -05002855 "Uses READ/WRITE(6), disabling FUA\n");
Tejun Heo007365a2006-01-06 09:53:52 +01002856 sdkp->DPOFUA = 0;
2857 }
2858
Sujit Reddy Thumma2eefd572014-08-11 15:40:37 +03002859 /* No cache flush allowed for write protected devices */
2860 if (sdkp->WCE && sdkp->write_prot)
2861 sdkp->WCE = 0;
2862
Martin K. Petersen70a9b872009-03-09 11:33:31 -04002863 if (sdkp->first_scan || old_wce != sdkp->WCE ||
2864 old_rcd != sdkp->RCD || old_dpofua != sdkp->DPOFUA)
2865 sd_printk(KERN_NOTICE, sdkp,
2866 "Write cache: %s, read cache: %s, %s\n",
2867 sdkp->WCE ? "enabled" : "disabled",
2868 sdkp->RCD ? "disabled" : "enabled",
2869 sdkp->DPOFUA ? "supports DPO and FUA"
2870 : "doesn't support DPO or FUA");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002871
2872 return;
2873 }
2874
2875bad_sense:
James Bottomleyea73a9f2005-08-28 11:33:52 -05002876 if (scsi_sense_valid(&sshdr) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07002877 sshdr.sense_key == ILLEGAL_REQUEST &&
2878 sshdr.asc == 0x24 && sshdr.ascq == 0x0)
Martin K. Petersene73aec82007-02-27 22:40:55 -05002879 /* Invalid field in CDB */
Martin K. Petersenb2bff6c2014-01-03 18:19:26 -05002880 sd_first_printk(KERN_NOTICE, sdkp, "Cache data unavailable\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002881 else
Martin K. Petersenb2bff6c2014-01-03 18:19:26 -05002882 sd_first_printk(KERN_ERR, sdkp,
2883 "Asking for cache data failed\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002884
2885defaults:
Namjae Jeonb81478d2012-07-07 23:05:08 -04002886 if (sdp->wce_default_on) {
Martin K. Petersenb2bff6c2014-01-03 18:19:26 -05002887 sd_first_printk(KERN_NOTICE, sdkp,
2888 "Assuming drive cache: write back\n");
Namjae Jeonb81478d2012-07-07 23:05:08 -04002889 sdkp->WCE = 1;
2890 } else {
Martin Kepplingerc4da1202021-10-13 09:50:50 +02002891 sd_first_printk(KERN_WARNING, sdkp,
Martin K. Petersenb2bff6c2014-01-03 18:19:26 -05002892 "Assuming drive cache: write through\n");
Namjae Jeonb81478d2012-07-07 23:05:08 -04002893 sdkp->WCE = 0;
2894 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002895 sdkp->RCD = 0;
Al Viro48970802006-02-26 08:34:10 -06002896 sdkp->DPOFUA = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002897}
2898
Martin K. Petersene0597d72008-07-17 04:28:34 -04002899/*
2900 * The ATO bit indicates whether the DIF application tag is available
2901 * for use by the operating system.
2902 */
H Hartley Sweeten439d77f2010-08-10 18:01:20 -07002903static void sd_read_app_tag_own(struct scsi_disk *sdkp, unsigned char *buffer)
Martin K. Petersene0597d72008-07-17 04:28:34 -04002904{
2905 int res, offset;
2906 struct scsi_device *sdp = sdkp->device;
2907 struct scsi_mode_data data;
2908 struct scsi_sense_hdr sshdr;
2909
Hannes Reinecke89d94752016-10-18 15:40:34 +09002910 if (sdp->type != TYPE_DISK && sdp->type != TYPE_ZBC)
Martin K. Petersene0597d72008-07-17 04:28:34 -04002911 return;
2912
2913 if (sdkp->protection_type == 0)
2914 return;
2915
2916 res = scsi_mode_sense(sdp, 1, 0x0a, buffer, 36, SD_TIMEOUT,
Mike Christie06109592020-10-01 10:35:54 -05002917 sdkp->max_retries, &data, &sshdr);
Martin K. Petersene0597d72008-07-17 04:28:34 -04002918
Hannes Reinecke87936132021-04-27 10:30:09 +02002919 if (res < 0 || !data.header_length ||
Martin K. Petersene0597d72008-07-17 04:28:34 -04002920 data.length < 6) {
Martin K. Petersenb2bff6c2014-01-03 18:19:26 -05002921 sd_first_printk(KERN_WARNING, sdkp,
Martin K. Petersene0597d72008-07-17 04:28:34 -04002922 "getting Control mode page failed, assume no ATO\n");
2923
2924 if (scsi_sense_valid(&sshdr))
2925 sd_print_sense_hdr(sdkp, &sshdr);
2926
2927 return;
2928 }
2929
2930 offset = data.header_length + data.block_descriptor_length;
2931
2932 if ((buffer[offset] & 0x3f) != 0x0a) {
Martin K. Petersenb2bff6c2014-01-03 18:19:26 -05002933 sd_first_printk(KERN_ERR, sdkp, "ATO Got wrong page\n");
Martin K. Petersene0597d72008-07-17 04:28:34 -04002934 return;
2935 }
2936
2937 if ((buffer[offset + 5] & 0x80) == 0)
2938 return;
2939
2940 sdkp->ATO = 1;
2941
2942 return;
2943}
2944
Linus Torvalds1da177e2005-04-16 15:20:36 -07002945/**
Martin K. Petersend11b6912009-05-23 11:43:39 -04002946 * sd_read_block_limits - Query disk device for preferred I/O sizes.
Damien Le Moal7529fbb2017-04-24 16:51:09 +09002947 * @sdkp: disk to query
Martin K. Petersend11b6912009-05-23 11:43:39 -04002948 */
2949static void sd_read_block_limits(struct scsi_disk *sdkp)
2950{
2951 unsigned int sector_sz = sdkp->device->sector_size;
Martin K. Petersenbb2d3de2010-03-02 08:44:34 -05002952 const int vpd_len = 64;
James Bottomleye3deec02009-11-03 12:33:07 -06002953 unsigned char *buffer = kmalloc(vpd_len, GFP_KERNEL);
Martin K. Petersend11b6912009-05-23 11:43:39 -04002954
James Bottomleye3deec02009-11-03 12:33:07 -06002955 if (!buffer ||
2956 /* Block Limits VPD */
2957 scsi_get_vpd_page(sdkp->device, 0xb0, buffer, vpd_len))
2958 goto out;
Martin K. Petersend11b6912009-05-23 11:43:39 -04002959
2960 blk_queue_io_min(sdkp->disk->queue,
2961 get_unaligned_be16(&buffer[6]) * sector_sz);
Martin K. Petersenca369d52015-11-13 16:46:48 -05002962
2963 sdkp->max_xfer_blocks = get_unaligned_be32(&buffer[8]);
2964 sdkp->opt_xfer_blocks = get_unaligned_be32(&buffer[12]);
Martin K. Petersend11b6912009-05-23 11:43:39 -04002965
Martin K. Petersenc98a0eb2011-03-08 02:07:15 -05002966 if (buffer[3] == 0x3c) {
2967 unsigned int lba_count, desc_count;
2968
Martin K. Petersen5db44862012-09-18 12:19:32 -04002969 sdkp->max_ws_blocks = (u32)get_unaligned_be64(&buffer[36]);
Martin K. Petersenc98a0eb2011-03-08 02:07:15 -05002970
2971 if (!sdkp->lbpme)
2972 goto out;
Martin K. Petersene339c1a2009-11-26 12:00:40 -05002973
2974 lba_count = get_unaligned_be32(&buffer[20]);
2975 desc_count = get_unaligned_be32(&buffer[24]);
2976
Martin K. Petersenc98a0eb2011-03-08 02:07:15 -05002977 if (lba_count && desc_count)
2978 sdkp->max_unmap_blocks = lba_count;
Martin K. Petersene339c1a2009-11-26 12:00:40 -05002979
Martin K. Petersenc98a0eb2011-03-08 02:07:15 -05002980 sdkp->unmap_granularity = get_unaligned_be32(&buffer[28]);
Martin K. Petersene339c1a2009-11-26 12:00:40 -05002981
2982 if (buffer[32] & 0x80)
Martin K. Petersenc98a0eb2011-03-08 02:07:15 -05002983 sdkp->unmap_alignment =
Martin K. Petersene339c1a2009-11-26 12:00:40 -05002984 get_unaligned_be32(&buffer[32]) & ~(1 << 31);
Martin K. Petersenc98a0eb2011-03-08 02:07:15 -05002985
2986 if (!sdkp->lbpvpd) { /* LBP VPD page not provided */
2987
2988 if (sdkp->max_unmap_blocks)
2989 sd_config_discard(sdkp, SD_LBP_UNMAP);
2990 else
2991 sd_config_discard(sdkp, SD_LBP_WS16);
2992
2993 } else { /* LBP VPD page tells us what to use */
Martin K. Petersenbcd069b2017-04-05 19:21:25 +02002994 if (sdkp->lbpu && sdkp->max_unmap_blocks)
Martin K. Petersene4613382014-12-05 08:58:03 -05002995 sd_config_discard(sdkp, SD_LBP_UNMAP);
2996 else if (sdkp->lbpws)
Martin K. Petersenc98a0eb2011-03-08 02:07:15 -05002997 sd_config_discard(sdkp, SD_LBP_WS16);
2998 else if (sdkp->lbpws10)
2999 sd_config_discard(sdkp, SD_LBP_WS10);
3000 else
3001 sd_config_discard(sdkp, SD_LBP_DISABLE);
3002 }
Martin K. Petersene339c1a2009-11-26 12:00:40 -05003003 }
3004
James Bottomleye3deec02009-11-03 12:33:07 -06003005 out:
Martin K. Petersend11b6912009-05-23 11:43:39 -04003006 kfree(buffer);
3007}
3008
3009/**
Martin K. Petersen3821d762009-05-23 11:43:38 -04003010 * sd_read_block_characteristics - Query block dev. characteristics
Damien Le Moal7529fbb2017-04-24 16:51:09 +09003011 * @sdkp: disk to query
Martin K. Petersen3821d762009-05-23 11:43:38 -04003012 */
3013static void sd_read_block_characteristics(struct scsi_disk *sdkp)
3014{
Hannes Reinecke89d94752016-10-18 15:40:34 +09003015 struct request_queue *q = sdkp->disk->queue;
James Bottomleye3deec02009-11-03 12:33:07 -06003016 unsigned char *buffer;
Martin K. Petersen3821d762009-05-23 11:43:38 -04003017 u16 rot;
Martin K. Petersenbb2d3de2010-03-02 08:44:34 -05003018 const int vpd_len = 64;
Martin K. Petersen3821d762009-05-23 11:43:38 -04003019
James Bottomleye3deec02009-11-03 12:33:07 -06003020 buffer = kmalloc(vpd_len, GFP_KERNEL);
Martin K. Petersen3821d762009-05-23 11:43:38 -04003021
James Bottomleye3deec02009-11-03 12:33:07 -06003022 if (!buffer ||
3023 /* Block Device Characteristics VPD */
3024 scsi_get_vpd_page(sdkp->device, 0xb1, buffer, vpd_len))
3025 goto out;
Martin K. Petersen3821d762009-05-23 11:43:38 -04003026
3027 rot = get_unaligned_be16(&buffer[4]);
3028
Mike Snitzerb277da02014-10-04 10:55:32 -06003029 if (rot == 1) {
Bart Van Assche8b904b52018-03-07 17:10:10 -08003030 blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
3031 blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q);
Mike Snitzerb277da02014-10-04 10:55:32 -06003032 }
Martin K. Petersen3821d762009-05-23 11:43:38 -04003033
Damien Le Moal68af4122017-01-12 15:25:11 +09003034 if (sdkp->device->type == TYPE_ZBC) {
3035 /* Host-managed */
Damien Le Moal27ba3e82020-09-15 16:33:46 +09003036 blk_queue_set_zoned(sdkp->disk, BLK_ZONED_HM);
Damien Le Moal68af4122017-01-12 15:25:11 +09003037 } else {
3038 sdkp->zoned = (buffer[8] >> 4) & 3;
Damien Le Moal27ba3e82020-09-15 16:33:46 +09003039 if (sdkp->zoned == 1) {
Damien Le Moal68af4122017-01-12 15:25:11 +09003040 /* Host-aware */
Damien Le Moal27ba3e82020-09-15 16:33:46 +09003041 blk_queue_set_zoned(sdkp->disk, BLK_ZONED_HA);
Christoph Hellwigb7205302020-01-26 14:05:43 +01003042 } else {
Damien Le Moal27ba3e82020-09-15 16:33:46 +09003043 /* Regular disk or drive managed disk */
3044 blk_queue_set_zoned(sdkp->disk, BLK_ZONED_NONE);
Christoph Hellwigb7205302020-01-26 14:05:43 +01003045 }
Damien Le Moal68af4122017-01-12 15:25:11 +09003046 }
Damien Le Moal27ba3e82020-09-15 16:33:46 +09003047
3048 if (!sdkp->first_scan)
3049 goto out;
3050
3051 if (blk_queue_is_zoned(q)) {
Hannes Reinecke89d94752016-10-18 15:40:34 +09003052 sd_printk(KERN_NOTICE, sdkp, "Host-%s zoned block device\n",
3053 q->limits.zoned == BLK_ZONED_HM ? "managed" : "aware");
Damien Le Moal27ba3e82020-09-15 16:33:46 +09003054 } else {
3055 if (sdkp->zoned == 1)
3056 sd_printk(KERN_NOTICE, sdkp,
3057 "Host-aware SMR disk used as regular disk\n");
3058 else if (sdkp->zoned == 2)
3059 sd_printk(KERN_NOTICE, sdkp,
3060 "Drive-managed SMR disk\n");
3061 }
Hannes Reinecke89d94752016-10-18 15:40:34 +09003062
James Bottomleye3deec02009-11-03 12:33:07 -06003063 out:
Martin K. Petersen3821d762009-05-23 11:43:38 -04003064 kfree(buffer);
3065}
3066
Martin K. Petersen045d3fe2010-09-10 01:22:07 -04003067/**
Martin K. Petersenc98a0eb2011-03-08 02:07:15 -05003068 * sd_read_block_provisioning - Query provisioning VPD page
Damien Le Moal7529fbb2017-04-24 16:51:09 +09003069 * @sdkp: disk to query
Martin K. Petersen045d3fe2010-09-10 01:22:07 -04003070 */
Martin K. Petersenc98a0eb2011-03-08 02:07:15 -05003071static void sd_read_block_provisioning(struct scsi_disk *sdkp)
Martin K. Petersen045d3fe2010-09-10 01:22:07 -04003072{
3073 unsigned char *buffer;
3074 const int vpd_len = 8;
3075
Martin K. Petersenc98a0eb2011-03-08 02:07:15 -05003076 if (sdkp->lbpme == 0)
Martin K. Petersen045d3fe2010-09-10 01:22:07 -04003077 return;
3078
3079 buffer = kmalloc(vpd_len, GFP_KERNEL);
3080
3081 if (!buffer || scsi_get_vpd_page(sdkp->device, 0xb2, buffer, vpd_len))
3082 goto out;
3083
Martin K. Petersenc98a0eb2011-03-08 02:07:15 -05003084 sdkp->lbpvpd = 1;
3085 sdkp->lbpu = (buffer[5] >> 7) & 1; /* UNMAP */
3086 sdkp->lbpws = (buffer[5] >> 6) & 1; /* WRITE SAME(16) with UNMAP */
3087 sdkp->lbpws10 = (buffer[5] >> 5) & 1; /* WRITE SAME(10) with UNMAP */
Martin K. Petersen045d3fe2010-09-10 01:22:07 -04003088
3089 out:
3090 kfree(buffer);
3091}
3092
Martin K. Petersen5db44862012-09-18 12:19:32 -04003093static void sd_read_write_same(struct scsi_disk *sdkp, unsigned char *buffer)
3094{
Martin K. Petersen66c28f92013-06-06 22:15:55 -04003095 struct scsi_device *sdev = sdkp->device;
3096
Martin K. Petersen54b2b502013-10-23 06:25:40 -04003097 if (sdev->host->no_write_same) {
3098 sdev->no_write_same = 1;
3099
3100 return;
3101 }
3102
Martin K. Petersen66c28f92013-06-06 22:15:55 -04003103 if (scsi_report_opcode(sdev, buffer, SD_BUF_SIZE, INQUIRY) < 0) {
Bernd Schubertaf736232013-09-23 14:47:32 +02003104 /* too large values might cause issues with arcmsr */
3105 int vpd_buf_len = 64;
3106
Martin K. Petersen66c28f92013-06-06 22:15:55 -04003107 sdev->no_report_opcodes = 1;
3108
3109 /* Disable WRITE SAME if REPORT SUPPORTED OPERATION
3110 * CODES is unsupported and the device has an ATA
3111 * Information VPD page (SAT).
3112 */
Bernd Schubertaf736232013-09-23 14:47:32 +02003113 if (!scsi_get_vpd_page(sdev, 0x89, buffer, vpd_buf_len))
Martin K. Petersen66c28f92013-06-06 22:15:55 -04003114 sdev->no_write_same = 1;
3115 }
3116
3117 if (scsi_report_opcode(sdev, buffer, SD_BUF_SIZE, WRITE_SAME_16) == 1)
Martin K. Petersen5db44862012-09-18 12:19:32 -04003118 sdkp->ws16 = 1;
Martin K. Petersen66c28f92013-06-06 22:15:55 -04003119
3120 if (scsi_report_opcode(sdev, buffer, SD_BUF_SIZE, WRITE_SAME) == 1)
3121 sdkp->ws10 = 1;
Martin K. Petersen5db44862012-09-18 12:19:32 -04003122}
3123
Christoph Hellwigd80210f2017-06-19 14:26:46 +02003124static void sd_read_security(struct scsi_disk *sdkp, unsigned char *buffer)
3125{
3126 struct scsi_device *sdev = sdkp->device;
3127
3128 if (!sdev->security_supported)
3129 return;
3130
3131 if (scsi_report_opcode(sdev, buffer, SD_BUF_SIZE,
3132 SECURITY_PROTOCOL_IN) == 1 &&
3133 scsi_report_opcode(sdev, buffer, SD_BUF_SIZE,
3134 SECURITY_PROTOCOL_OUT) == 1)
3135 sdkp->security = 1;
3136}
3137
Damien Le Moale815d362021-10-27 11:22:20 +09003138static inline sector_t sd64_to_sectors(struct scsi_disk *sdkp, u8 *buf)
3139{
3140 return logical_to_sectors(sdkp->device, get_unaligned_be64(buf));
3141}
3142
3143/**
3144 * sd_read_cpr - Query concurrent positioning ranges
3145 * @sdkp: disk to query
3146 */
3147static void sd_read_cpr(struct scsi_disk *sdkp)
3148{
3149 struct blk_independent_access_ranges *iars = NULL;
3150 unsigned char *buffer = NULL;
3151 unsigned int nr_cpr = 0;
3152 int i, vpd_len, buf_len = SD_BUF_SIZE;
3153 u8 *desc;
3154
3155 /*
3156 * We need to have the capacity set first for the block layer to be
3157 * able to check the ranges.
3158 */
3159 if (sdkp->first_scan)
3160 return;
3161
3162 if (!sdkp->capacity)
3163 goto out;
3164
3165 /*
3166 * Concurrent Positioning Ranges VPD: there can be at most 256 ranges,
3167 * leading to a maximum page size of 64 + 256*32 bytes.
3168 */
3169 buf_len = 64 + 256*32;
3170 buffer = kmalloc(buf_len, GFP_KERNEL);
3171 if (!buffer || scsi_get_vpd_page(sdkp->device, 0xb9, buffer, buf_len))
3172 goto out;
3173
3174 /* We must have at least a 64B header and one 32B range descriptor */
3175 vpd_len = get_unaligned_be16(&buffer[2]) + 3;
3176 if (vpd_len > buf_len || vpd_len < 64 + 32 || (vpd_len & 31)) {
3177 sd_printk(KERN_ERR, sdkp,
3178 "Invalid Concurrent Positioning Ranges VPD page\n");
3179 goto out;
3180 }
3181
3182 nr_cpr = (vpd_len - 64) / 32;
3183 if (nr_cpr == 1) {
3184 nr_cpr = 0;
3185 goto out;
3186 }
3187
3188 iars = disk_alloc_independent_access_ranges(sdkp->disk, nr_cpr);
3189 if (!iars) {
3190 nr_cpr = 0;
3191 goto out;
3192 }
3193
3194 desc = &buffer[64];
3195 for (i = 0; i < nr_cpr; i++, desc += 32) {
3196 if (desc[0] != i) {
3197 sd_printk(KERN_ERR, sdkp,
3198 "Invalid Concurrent Positioning Range number\n");
3199 nr_cpr = 0;
3200 break;
3201 }
3202
3203 iars->ia_range[i].sector = sd64_to_sectors(sdkp, desc + 8);
3204 iars->ia_range[i].nr_sectors = sd64_to_sectors(sdkp, desc + 16);
3205 }
3206
3207out:
3208 disk_set_independent_access_ranges(sdkp->disk, iars);
3209 if (nr_cpr && sdkp->nr_actuators != nr_cpr) {
3210 sd_printk(KERN_NOTICE, sdkp,
3211 "%u concurrent positioning ranges\n", nr_cpr);
3212 sdkp->nr_actuators = nr_cpr;
3213 }
3214
3215 kfree(buffer);
3216}
3217
Martin K. Petersena83da8a2019-02-12 16:21:05 -05003218/*
3219 * Determine the device's preferred I/O size for reads and writes
3220 * unless the reported value is unreasonably small, large, not a
3221 * multiple of the physical block size, or simply garbage.
3222 */
3223static bool sd_validate_opt_xfer_size(struct scsi_disk *sdkp,
3224 unsigned int dev_max)
3225{
3226 struct scsi_device *sdp = sdkp->device;
3227 unsigned int opt_xfer_bytes =
3228 logical_to_bytes(sdp, sdkp->opt_xfer_blocks);
3229
Martin K. Petersen1d5de5b2019-03-27 12:11:52 -04003230 if (sdkp->opt_xfer_blocks == 0)
3231 return false;
3232
Martin K. Petersena83da8a2019-02-12 16:21:05 -05003233 if (sdkp->opt_xfer_blocks > dev_max) {
3234 sd_first_printk(KERN_WARNING, sdkp,
3235 "Optimal transfer size %u logical blocks " \
3236 "> dev_max (%u logical blocks)\n",
3237 sdkp->opt_xfer_blocks, dev_max);
3238 return false;
3239 }
3240
3241 if (sdkp->opt_xfer_blocks > SD_DEF_XFER_BLOCKS) {
3242 sd_first_printk(KERN_WARNING, sdkp,
3243 "Optimal transfer size %u logical blocks " \
3244 "> sd driver limit (%u logical blocks)\n",
3245 sdkp->opt_xfer_blocks, SD_DEF_XFER_BLOCKS);
3246 return false;
3247 }
3248
3249 if (opt_xfer_bytes < PAGE_SIZE) {
3250 sd_first_printk(KERN_WARNING, sdkp,
3251 "Optimal transfer size %u bytes < " \
3252 "PAGE_SIZE (%u bytes)\n",
3253 opt_xfer_bytes, (unsigned int)PAGE_SIZE);
3254 return false;
3255 }
3256
3257 if (opt_xfer_bytes & (sdkp->physical_block_size - 1)) {
3258 sd_first_printk(KERN_WARNING, sdkp,
3259 "Optimal transfer size %u bytes not a " \
3260 "multiple of physical block size (%u bytes)\n",
3261 opt_xfer_bytes, sdkp->physical_block_size);
3262 return false;
3263 }
3264
3265 sd_first_printk(KERN_INFO, sdkp, "Optimal transfer size %u bytes\n",
3266 opt_xfer_bytes);
3267 return true;
3268}
3269
Martin K. Petersen3821d762009-05-23 11:43:38 -04003270/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07003271 * sd_revalidate_disk - called the first time a new disk is seen,
3272 * performs disk spin up, read_capacity, etc.
3273 * @disk: struct gendisk we care about
3274 **/
3275static int sd_revalidate_disk(struct gendisk *disk)
3276{
3277 struct scsi_disk *sdkp = scsi_disk(disk);
3278 struct scsi_device *sdp = sdkp->device;
Martin K. Petersenca369d52015-11-13 16:46:48 -05003279 struct request_queue *q = sdkp->disk->queue;
Hannes Reinecke89d94752016-10-18 15:40:34 +09003280 sector_t old_capacity = sdkp->capacity;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003281 unsigned char *buffer;
Martin K. Petersenca369d52015-11-13 16:46:48 -05003282 unsigned int dev_max, rw_max;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003283
Martin K. Petersenfa0d34b2007-02-27 22:41:19 -05003284 SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp,
3285 "sd_revalidate_disk\n"));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003286
3287 /*
3288 * If the device is offline, don't try and read capacity or any
3289 * of the other niceties.
3290 */
3291 if (!scsi_device_online(sdp))
3292 goto out;
3293
Bernhard Wallea6123f12007-05-21 17:15:26 +02003294 buffer = kmalloc(SD_BUF_SIZE, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003295 if (!buffer) {
Martin K. Petersene73aec82007-02-27 22:40:55 -05003296 sd_printk(KERN_WARNING, sdkp, "sd_revalidate_disk: Memory "
3297 "allocation failure.\n");
James Bottomleyea73a9f2005-08-28 11:33:52 -05003298 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003299 }
3300
Martin K. Petersene73aec82007-02-27 22:40:55 -05003301 sd_spinup_disk(sdkp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003302
3303 /*
3304 * Without media there is no reason to ask; moreover, some devices
3305 * react badly if we do.
3306 */
3307 if (sdkp->media_present) {
Martin K. Petersene73aec82007-02-27 22:40:55 -05003308 sd_read_capacity(sdkp, buffer);
Martin K. Petersenffd4bc22009-07-29 14:06:53 -04003309
James Bottomleye4a05692019-02-12 08:05:25 -08003310 /*
3311 * set the default to rotational. All non-rotational devices
3312 * support the block characteristics VPD page, which will
3313 * cause this to be updated correctly and any device which
3314 * doesn't support it should be treated as rotational.
3315 */
3316 blk_queue_flag_clear(QUEUE_FLAG_NONROT, q);
3317 blk_queue_flag_set(QUEUE_FLAG_ADD_RANDOM, q);
3318
Hannes Reinecke5ddfe082016-04-01 08:57:36 +02003319 if (scsi_device_supports_vpd(sdp)) {
Martin K. Petersenc98a0eb2011-03-08 02:07:15 -05003320 sd_read_block_provisioning(sdkp);
Martin K. Petersenffd4bc22009-07-29 14:06:53 -04003321 sd_read_block_limits(sdkp);
3322 sd_read_block_characteristics(sdkp);
Hannes Reinecke89d94752016-10-18 15:40:34 +09003323 sd_zbc_read_zones(sdkp, buffer);
Martin K. Petersenffd4bc22009-07-29 14:06:53 -04003324 }
3325
Hannes Reinecke89d94752016-10-18 15:40:34 +09003326 sd_print_capacity(sdkp, old_capacity);
3327
Martin K. Petersene73aec82007-02-27 22:40:55 -05003328 sd_read_write_protect_flag(sdkp, buffer);
3329 sd_read_cache_type(sdkp, buffer);
Martin K. Petersene0597d72008-07-17 04:28:34 -04003330 sd_read_app_tag_own(sdkp, buffer);
Martin K. Petersen5db44862012-09-18 12:19:32 -04003331 sd_read_write_same(sdkp, buffer);
Christoph Hellwigd80210f2017-06-19 14:26:46 +02003332 sd_read_security(sdkp, buffer);
Damien Le Moale815d362021-10-27 11:22:20 +09003333 sd_read_cpr(sdkp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003334 }
Tejun Heo461d4e92006-01-06 09:52:55 +01003335
3336 /*
3337 * We now have all cache related info, determine how we deal
Tejun Heo4913efe2010-09-03 11:56:16 +02003338 * with flush requests.
Tejun Heo461d4e92006-01-06 09:52:55 +01003339 */
Vaughan Caocb2fb682014-06-03 17:37:30 +08003340 sd_set_flush_flag(sdkp);
Tejun Heo461d4e92006-01-06 09:52:55 +01003341
Martin K. Petersenca369d52015-11-13 16:46:48 -05003342 /* Initial block count limit based on CDB TRANSFER LENGTH field size. */
3343 dev_max = sdp->use_16_for_rw ? SD_MAX_XFER_BLOCKS : SD_DEF_XFER_BLOCKS;
Brian King3a9794d2015-01-29 15:54:40 -06003344
Martin K. Petersenca369d52015-11-13 16:46:48 -05003345 /* Some devices report a maximum block count for READ/WRITE requests. */
3346 dev_max = min_not_zero(dev_max, sdkp->max_xfer_blocks);
3347 q->limits.max_dev_sectors = logical_to_sectors(sdp, dev_max);
3348
Martin K. Petersena83da8a2019-02-12 16:21:05 -05003349 if (sd_validate_opt_xfer_size(sdkp, dev_max)) {
Martin K. Petersen6b7e9cd2016-05-12 22:17:34 -04003350 q->limits.io_opt = logical_to_bytes(sdp, sdkp->opt_xfer_blocks);
3351 rw_max = logical_to_sectors(sdp, sdkp->opt_xfer_blocks);
Martin K. Petersenea697a82020-03-24 11:16:15 -04003352 } else {
3353 q->limits.io_opt = 0;
Fam Zheng67804142017-03-28 12:41:26 +08003354 rw_max = min_not_zero(logical_to_sectors(sdp, dev_max),
3355 (sector_t)BLK_DEF_MAX_SECTORS);
Martin K. Petersenea697a82020-03-24 11:16:15 -04003356 }
Martin K. Petersenca369d52015-11-13 16:46:48 -05003357
Martin K. Petersen77082ca2017-09-27 21:38:59 -04003358 /* Do not exceed controller limit */
3359 rw_max = min(rw_max, queue_max_hw_sectors(q));
3360
3361 /*
3362 * Only update max_sectors if previously unset or if the current value
3363 * exceeds the capabilities of the hardware.
3364 */
3365 if (sdkp->first_scan ||
3366 q->limits.max_sectors > q->limits.max_dev_sectors ||
3367 q->limits.max_sectors > q->limits.max_hw_sectors)
3368 q->limits.max_sectors = rw_max;
3369
3370 sdkp->first_scan = 0;
Martin K. Petersen4f258a42015-06-23 12:13:59 -04003371
Christoph Hellwig449f4ec2020-11-16 15:56:56 +01003372 set_capacity_and_notify(disk, logical_to_sectors(sdp, sdkp->capacity));
Martin K. Petersen5db44862012-09-18 12:19:32 -04003373 sd_config_write_same(sdkp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003374 kfree(buffer);
3375
Damien Le Moala3d8a2572020-07-31 14:49:28 +09003376 /*
3377 * For a zoned drive, revalidating the zones can be done only once
3378 * the gendisk capacity is set. So if this fails, set back the gendisk
3379 * capacity to 0.
3380 */
3381 if (sd_zbc_revalidate_zones(sdkp))
Christoph Hellwig449f4ec2020-11-16 15:56:56 +01003382 set_capacity_and_notify(disk, 0);
Damien Le Moala3d8a2572020-07-31 14:49:28 +09003383
Linus Torvalds1da177e2005-04-16 15:20:36 -07003384 out:
3385 return 0;
3386}
3387
3388/**
Tejun Heo72ec24b2010-05-15 20:09:32 +02003389 * sd_unlock_native_capacity - unlock native capacity
3390 * @disk: struct gendisk to set capacity for
3391 *
3392 * Block layer calls this function if it detects that partitions
3393 * on @disk reach beyond the end of the device. If the SCSI host
3394 * implements ->unlock_native_capacity() method, it's invoked to
3395 * give it a chance to adjust the device capacity.
3396 *
3397 * CONTEXT:
3398 * Defined by block layer. Might sleep.
3399 */
3400static void sd_unlock_native_capacity(struct gendisk *disk)
3401{
3402 struct scsi_device *sdev = scsi_disk(disk)->device;
3403
3404 if (sdev->host->hostt->unlock_native_capacity)
3405 sdev->host->hostt->unlock_native_capacity(sdev);
3406}
3407
3408/**
Tejun Heo3e1a7ff2008-08-25 19:56:17 +09003409 * sd_format_disk_name - format disk name
3410 * @prefix: name prefix - ie. "sd" for SCSI disks
3411 * @index: index of the disk to format name for
3412 * @buf: output buffer
3413 * @buflen: length of the output buffer
3414 *
3415 * SCSI disk names starts at sda. The 26th device is sdz and the
3416 * 27th is sdaa. The last one for two lettered suffix is sdzz
3417 * which is followed by sdaaa.
3418 *
3419 * This is basically 26 base counting with one extra 'nil' entry
Daniel Mack3ad2f3fb2010-02-03 08:01:28 +08003420 * at the beginning from the second digit on and can be
Tejun Heo3e1a7ff2008-08-25 19:56:17 +09003421 * determined using similar method as 26 base conversion with the
3422 * index shifted -1 after each digit is computed.
3423 *
3424 * CONTEXT:
3425 * Don't care.
3426 *
3427 * RETURNS:
3428 * 0 on success, -errno on failure.
3429 */
3430static int sd_format_disk_name(char *prefix, int index, char *buf, int buflen)
3431{
3432 const int base = 'z' - 'a' + 1;
3433 char *begin = buf + strlen(prefix);
3434 char *end = buf + buflen;
3435 char *p;
3436 int unit;
3437
3438 p = end - 1;
3439 *p = '\0';
3440 unit = base;
3441 do {
3442 if (p == begin)
3443 return -EINVAL;
3444 *--p = 'a' + (index % unit);
3445 index = (index / unit) - 1;
3446 } while (index >= 0);
3447
3448 memmove(begin, p, end - p);
3449 memcpy(buf, prefix, strlen(prefix));
3450
3451 return 0;
3452}
3453
3454/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07003455 * sd_probe - called during driver initialization and whenever a
3456 * new scsi device is attached to the system. It is called once
3457 * for each scsi device (not just disks) present.
3458 * @dev: pointer to device object
3459 *
3460 * Returns 0 if successful (or not interested in this scsi device
3461 * (e.g. scanner)); 1 when there is an error.
3462 *
3463 * Note: this function is invoked from the scsi mid-level.
3464 * This function sets up the mapping between a given
3465 * <host,channel,id,lun> (found in sdp) and new device name
3466 * (e.g. /dev/sda). More precisely it is the block device major
3467 * and minor number that is chosen here.
3468 *
Petr Uzel2db93ce2012-02-24 16:56:54 +01003469 * Assume sd_probe is not re-entrant (for time being)
3470 * Also think about sd_probe() and sd_remove() running coincidentally.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003471 **/
3472static int sd_probe(struct device *dev)
3473{
3474 struct scsi_device *sdp = to_scsi_device(dev);
3475 struct scsi_disk *sdkp;
3476 struct gendisk *gd;
H Hartley Sweeten439d77f2010-08-10 18:01:20 -07003477 int index;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003478 int error;
3479
Subhash Jadavani6fe8c1d2014-09-10 14:54:09 +03003480 scsi_autopm_get_device(sdp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003481 error = -ENODEV;
Hannes Reinecke89d94752016-10-18 15:40:34 +09003482 if (sdp->type != TYPE_DISK &&
3483 sdp->type != TYPE_ZBC &&
3484 sdp->type != TYPE_MOD &&
3485 sdp->type != TYPE_RBC)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003486 goto out;
3487
Damien Le Moalbf1b4652021-01-28 14:56:58 +09003488 if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED) && sdp->type == TYPE_ZBC) {
3489 sdev_printk(KERN_WARNING, sdp,
3490 "Unsupported ZBC host-managed device.\n");
Hannes Reinecke89d94752016-10-18 15:40:34 +09003491 goto out;
Damien Le Moalbf1b4652021-01-28 14:56:58 +09003492 }
3493
James Bottomley9ccfc752005-10-02 11:45:08 -05003494 SCSI_LOG_HLQUEUE(3, sdev_printk(KERN_INFO, sdp,
Petr Uzel2db93ce2012-02-24 16:56:54 +01003495 "sd_probe\n"));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003496
3497 error = -ENOMEM;
Jes Sorensen24669f752006-01-16 10:31:18 -05003498 sdkp = kzalloc(sizeof(*sdkp), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003499 if (!sdkp)
3500 goto out;
3501
Christoph Hellwig4a1fa412021-08-16 15:19:08 +02003502 gd = __alloc_disk_node(sdp->request_queue, NUMA_NO_NODE,
3503 &sd_bio_compl_lkclass);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003504 if (!gd)
Jan Karac01228d2017-03-08 17:48:34 +01003505 goto out_free;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003506
Matthew Wilcox94015082018-06-11 15:26:27 -04003507 index = ida_alloc(&sd_index_ida, GFP_KERNEL);
3508 if (index < 0) {
Dave Kleikamp21208ae2011-10-19 11:49:04 -05003509 sdev_printk(KERN_WARNING, sdp, "sd_probe: memory exhausted.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003510 goto out_put;
Michael Reed1a03ae02010-09-20 11:20:22 -05003511 }
3512
Tejun Heo3e1a7ff2008-08-25 19:56:17 +09003513 error = sd_format_disk_name("sd", index, gd->disk_name, DISK_NAME_LEN);
Dave Kleikamp21208ae2011-10-19 11:49:04 -05003514 if (error) {
3515 sdev_printk(KERN_WARNING, sdp, "SCSI disk (sd) name length exceeded.\n");
Tejun Heof27bac22008-07-14 14:59:30 +09003516 goto out_free_index;
Dave Kleikamp21208ae2011-10-19 11:49:04 -05003517 }
Tejun Heof27bac22008-07-14 14:59:30 +09003518
Linus Torvalds1da177e2005-04-16 15:20:36 -07003519 sdkp->device = sdp;
3520 sdkp->driver = &sd_template;
3521 sdkp->disk = gd;
3522 sdkp->index = index;
Mike Christie06109592020-10-01 10:35:54 -05003523 sdkp->max_retries = SD_MAX_RETRIES;
Arnd Bergmann409f3492010-07-07 16:51:29 +02003524 atomic_set(&sdkp->openers, 0);
Josh Hunt9e1a1532012-06-09 07:03:39 -07003525 atomic_set(&sdkp->device->ioerr_cnt, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003526
James Bottomley601e7632009-05-26 20:35:48 +00003527 if (!sdp->request_queue->rq_timeout) {
3528 if (sdp->type != TYPE_MOD)
3529 blk_queue_rq_timeout(sdp->request_queue, SD_TIMEOUT);
3530 else
3531 blk_queue_rq_timeout(sdp->request_queue,
3532 SD_MOD_TIMEOUT);
3533 }
3534
3535 device_initialize(&sdkp->dev);
Ming Lei265dfe82021-09-06 17:01:12 +08003536 sdkp->dev.parent = get_device(dev);
James Bottomley601e7632009-05-26 20:35:48 +00003537 sdkp->dev.class = &sd_disk_class;
Kees Cook02aa2a32013-07-03 15:04:56 -07003538 dev_set_name(&sdkp->dev, "%s", dev_name(dev));
James Bottomley601e7632009-05-26 20:35:48 +00003539
Dan Carpenterdee05862015-01-19 17:41:12 +03003540 error = device_add(&sdkp->dev);
Ming Lei265dfe82021-09-06 17:01:12 +08003541 if (error) {
3542 put_device(&sdkp->dev);
3543 goto out;
3544 }
James Bottomley601e7632009-05-26 20:35:48 +00003545
Alan Stern478a8a02010-06-16 14:52:17 -04003546 dev_set_drvdata(dev, sdkp);
James Bottomley601e7632009-05-26 20:35:48 +00003547
Bart Van Assche82a54da2019-04-30 14:39:19 -07003548 gd->major = sd_major((index & 0xf0) >> 4);
3549 gd->first_minor = ((index & 0xf) << 4) | (index & 0xfff00);
Christoph Hellwiga58bd762021-08-16 15:19:07 +02003550 gd->minors = SD_MINORS;
Bart Van Assche82a54da2019-04-30 14:39:19 -07003551
3552 gd->fops = &sd_fops;
3553 gd->private_data = &sdkp->driver;
Bart Van Assche82a54da2019-04-30 14:39:19 -07003554
3555 /* defaults, until the device tells us otherwise */
3556 sdp->sector_size = 512;
3557 sdkp->capacity = 0;
3558 sdkp->media_present = 1;
3559 sdkp->write_prot = 0;
3560 sdkp->cache_override = 0;
3561 sdkp->WCE = 0;
3562 sdkp->RCD = 0;
3563 sdkp->ATO = 0;
3564 sdkp->first_scan = 1;
3565 sdkp->max_medium_access_timeouts = SD_MAX_MEDIUM_TIMEOUTS;
3566
3567 sd_revalidate_disk(gd);
3568
3569 gd->flags = GENHD_FL_EXT_DEVT;
3570 if (sdp->removable) {
3571 gd->flags |= GENHD_FL_REMOVABLE;
3572 gd->events |= DISK_EVENT_MEDIA_CHANGE;
3573 gd->event_flags = DISK_EVENT_FLAG_POLL | DISK_EVENT_FLAG_UEVENT;
3574 }
3575
3576 blk_pm_runtime_init(sdp->request_queue, dev);
Stanley Chuc74f8052019-09-16 23:56:49 +08003577 if (sdp->rpm_autosuspend) {
3578 pm_runtime_set_autosuspend_delay(dev,
3579 sdp->host->hostt->rpm_autosuspend_delay);
3580 }
Luis Chamberlain2a7a8912021-10-15 16:30:20 -07003581
3582 error = device_add_disk(dev, gd, NULL);
3583 if (error) {
3584 put_device(&sdkp->dev);
3585 goto out;
3586 }
3587
Bart Van Assche82a54da2019-04-30 14:39:19 -07003588 if (sdkp->capacity)
3589 sd_dif_config_host(sdkp);
3590
3591 sd_revalidate_disk(gd);
3592
3593 if (sdkp->security) {
Mike Christie06109592020-10-01 10:35:54 -05003594 sdkp->opal_dev = init_opal_dev(sdkp, &sd_sec_submit);
Bart Van Assche82a54da2019-04-30 14:39:19 -07003595 if (sdkp->opal_dev)
3596 sd_printk(KERN_NOTICE, sdkp, "supports TCG Opal\n");
3597 }
3598
3599 sd_printk(KERN_NOTICE, sdkp, "Attached SCSI %sdisk\n",
3600 sdp->removable ? "removable " : "");
3601 scsi_autopm_put_device(sdp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003602
3603 return 0;
3604
Tejun Heof27bac22008-07-14 14:59:30 +09003605 out_free_index:
Matthew Wilcox94015082018-06-11 15:26:27 -04003606 ida_free(&sd_index_ida, index);
James Bottomley6bdaa1f2006-03-18 14:14:21 -06003607 out_put:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003608 put_disk(gd);
Colin Ian Kingf1703962017-02-03 19:38:54 +00003609 out_free:
Johannes Thumshirn5795eb42020-05-12 17:55:51 +09003610 sd_zbc_release_disk(sdkp);
Colin Ian Kingf1703962017-02-03 19:38:54 +00003611 kfree(sdkp);
James Bottomley6bdaa1f2006-03-18 14:14:21 -06003612 out:
Subhash Jadavani6fe8c1d2014-09-10 14:54:09 +03003613 scsi_autopm_put_device(sdp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003614 return error;
3615}
3616
3617/**
3618 * sd_remove - called whenever a scsi disk (previously recognized by
3619 * sd_probe) is detached from the system. It is called (potentially
3620 * multiple times) during sd module unload.
John Pittmanf2a33132017-01-12 16:17:20 -05003621 * @dev: pointer to device object
Linus Torvalds1da177e2005-04-16 15:20:36 -07003622 *
3623 * Note: this function is invoked from the scsi mid-level.
3624 * This function potentially frees up a device name (e.g. /dev/sdc)
3625 * that could be re-used by a subsequent sd_probe().
3626 * This function is not called when the built-in sd driver is "exit-ed".
3627 **/
3628static int sd_remove(struct device *dev)
3629{
James Bottomley601e7632009-05-26 20:35:48 +00003630 struct scsi_disk *sdkp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003631
James Bottomley601e7632009-05-26 20:35:48 +00003632 sdkp = dev_get_drvdata(dev);
Alan Stern478a8a02010-06-16 14:52:17 -04003633 scsi_autopm_get_device(sdkp->device);
3634
Tony Jonesee959b02008-02-22 00:13:36 +01003635 device_del(&sdkp->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003636 del_gendisk(sdkp->disk);
3637 sd_shutdown(dev);
Alan Stern39b7f1e2005-11-04 14:44:41 -05003638
Christoph Hellwigd80210f2017-06-19 14:26:46 +02003639 free_opal_dev(sdkp->opal_dev);
3640
Arjan van de Ven0b950672006-01-11 13:16:10 +01003641 mutex_lock(&sd_ref_mutex);
Alan Stern39b7f1e2005-11-04 14:44:41 -05003642 dev_set_drvdata(dev, NULL);
Tony Jonesee959b02008-02-22 00:13:36 +01003643 put_device(&sdkp->dev);
Arjan van de Ven0b950672006-01-11 13:16:10 +01003644 mutex_unlock(&sd_ref_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003645
3646 return 0;
3647}
3648
3649/**
3650 * scsi_disk_release - Called to free the scsi_disk structure
Tony Jonesee959b02008-02-22 00:13:36 +01003651 * @dev: pointer to embedded class device
Linus Torvalds1da177e2005-04-16 15:20:36 -07003652 *
Arjan van de Ven0b950672006-01-11 13:16:10 +01003653 * sd_ref_mutex must be held entering this routine. Because it is
Linus Torvalds1da177e2005-04-16 15:20:36 -07003654 * called on last put, you should always use the scsi_disk_get()
3655 * scsi_disk_put() helpers which manipulate the semaphore directly
Tony Jonesee959b02008-02-22 00:13:36 +01003656 * and never do a direct put_device.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003657 **/
Tony Jonesee959b02008-02-22 00:13:36 +01003658static void scsi_disk_release(struct device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003659{
Tony Jonesee959b02008-02-22 00:13:36 +01003660 struct scsi_disk *sdkp = to_scsi_disk(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003661 struct gendisk *disk = sdkp->disk;
Bart Van Asschec14a5722019-03-25 10:01:46 -07003662 struct request_queue *q = disk->queue;
3663
Matthew Wilcox94015082018-06-11 15:26:27 -04003664 ida_free(&sd_index_ida, sdkp->index);
Jan Karac01228d2017-03-08 17:48:34 +01003665
Bart Van Asschec14a5722019-03-25 10:01:46 -07003666 /*
3667 * Wait until all requests that are in progress have completed.
3668 * This is necessary to avoid that e.g. scsi_end_request() crashes
3669 * due to clearing the disk->private_data pointer. Wait from inside
3670 * scsi_disk_release() instead of from sd_release() to avoid that
3671 * freezing and unfreezing the request queue affects user space I/O
3672 * in case multiple processes open a /dev/sd... node concurrently.
3673 */
3674 blk_mq_freeze_queue(q);
3675 blk_mq_unfreeze_queue(q);
3676
Linus Torvalds1da177e2005-04-16 15:20:36 -07003677 disk->private_data = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003678 put_disk(disk);
Alan Stern39b7f1e2005-11-04 14:44:41 -05003679 put_device(&sdkp->device->sdev_gendev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003680
Johannes Thumshirn5795eb42020-05-12 17:55:51 +09003681 sd_zbc_release_disk(sdkp);
3682
Linus Torvalds1da177e2005-04-16 15:20:36 -07003683 kfree(sdkp);
3684}
3685
James Bottomleycc5d2c82007-03-20 12:26:03 -05003686static int sd_start_stop_device(struct scsi_disk *sdkp, int start)
Tejun Heoc3c94c5a2007-03-21 00:13:59 +09003687{
3688 unsigned char cmd[6] = { START_STOP }; /* START_VALID */
3689 struct scsi_sense_hdr sshdr;
James Bottomleycc5d2c82007-03-20 12:26:03 -05003690 struct scsi_device *sdp = sdkp->device;
Tejun Heoc3c94c5a2007-03-21 00:13:59 +09003691 int res;
3692
3693 if (start)
3694 cmd[4] |= 1; /* START */
3695
Stefan Richterd2886ea2008-05-11 00:34:07 +02003696 if (sdp->start_stop_pwr_cond)
3697 cmd[4] |= start ? 1 << 4 : 3 << 4; /* Active or Standby */
3698
Tejun Heoc3c94c5a2007-03-21 00:13:59 +09003699 if (!scsi_device_online(sdp))
3700 return -ENODEV;
3701
Christoph Hellwigfcbfffe2017-02-23 16:02:37 +01003702 res = scsi_execute(sdp, cmd, DMA_NONE, NULL, 0, NULL, &sshdr,
Mike Christie06109592020-10-01 10:35:54 -05003703 SD_TIMEOUT, sdkp->max_retries, 0, RQF_PM, NULL);
Tejun Heoc3c94c5a2007-03-21 00:13:59 +09003704 if (res) {
Hannes Reineckeef613292014-10-24 14:27:00 +02003705 sd_print_result(sdkp, "Start/Stop Unit failed", res);
Hannes Reinecke464a00c2021-04-27 10:30:15 +02003706 if (res > 0 && scsi_sense_valid(&sshdr)) {
James Bottomleycc5d2c82007-03-20 12:26:03 -05003707 sd_print_sense_hdr(sdkp, &sshdr);
Oliver Neukum95897912013-09-16 13:28:15 +02003708 /* 0x3a is medium not present */
Hannes Reinecke464a00c2021-04-27 10:30:15 +02003709 if (sshdr.asc == 0x3a)
3710 res = 0;
3711 }
Tejun Heoc3c94c5a2007-03-21 00:13:59 +09003712 }
3713
Oliver Neukum95897912013-09-16 13:28:15 +02003714 /* SCSI error codes must not go to the generic layer */
3715 if (res)
3716 return -EIO;
3717
3718 return 0;
Tejun Heoc3c94c5a2007-03-21 00:13:59 +09003719}
3720
Linus Torvalds1da177e2005-04-16 15:20:36 -07003721/*
3722 * Send a SYNCHRONIZE CACHE instruction down to the device through
3723 * the normal SCSI command structure. Wait for the command to
3724 * complete.
3725 */
3726static void sd_shutdown(struct device *dev)
3727{
Christoph Hellwig3d9a1f52015-02-02 14:01:25 +01003728 struct scsi_disk *sdkp = dev_get_drvdata(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003729
3730 if (!sdkp)
3731 return; /* this can happen */
3732
Lin Ming54f575882011-12-05 09:20:26 +08003733 if (pm_runtime_suspended(dev))
Christoph Hellwig3d9a1f52015-02-02 14:01:25 +01003734 return;
Lin Ming54f575882011-12-05 09:20:26 +08003735
Oliver Neukum95897912013-09-16 13:28:15 +02003736 if (sdkp->WCE && sdkp->media_present) {
Martin K. Petersene73aec82007-02-27 22:40:55 -05003737 sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n");
Derek Basehore4fa83242017-05-11 14:34:24 +02003738 sd_sync_cache(sdkp, NULL);
Alan Stern39b7f1e2005-11-04 14:44:41 -05003739 }
Tejun Heoc3c94c5a2007-03-21 00:13:59 +09003740
James Bottomleycc5d2c82007-03-20 12:26:03 -05003741 if (system_state != SYSTEM_RESTART && sdkp->device->manage_start_stop) {
3742 sd_printk(KERN_NOTICE, sdkp, "Stopping disk\n");
3743 sd_start_stop_device(sdkp, 0);
Tejun Heoc3c94c5a2007-03-21 00:13:59 +09003744 }
Alan Stern39b7f1e2005-11-04 14:44:41 -05003745}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003746
Oliver Neukum95897912013-09-16 13:28:15 +02003747static int sd_suspend_common(struct device *dev, bool ignore_stop_errors)
Tejun Heoc3c94c5a2007-03-21 00:13:59 +09003748{
Christoph Hellwig3d9a1f52015-02-02 14:01:25 +01003749 struct scsi_disk *sdkp = dev_get_drvdata(dev);
Derek Basehore4fa83242017-05-11 14:34:24 +02003750 struct scsi_sense_hdr sshdr;
Alan Stern09ff92f2007-05-21 09:55:04 -04003751 int ret = 0;
Tejun Heoc3c94c5a2007-03-21 00:13:59 +09003752
Alan Stern13b43892016-01-20 11:26:01 -05003753 if (!sdkp) /* E.g.: runtime suspend following sd_remove() */
3754 return 0;
Tejun Heoc3c94c5a2007-03-21 00:13:59 +09003755
Oliver Neukum95897912013-09-16 13:28:15 +02003756 if (sdkp->WCE && sdkp->media_present) {
James Bottomleycc5d2c82007-03-20 12:26:03 -05003757 sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n");
Derek Basehore4fa83242017-05-11 14:34:24 +02003758 ret = sd_sync_cache(sdkp, &sshdr);
3759
Oliver Neukum95897912013-09-16 13:28:15 +02003760 if (ret) {
3761 /* ignore OFFLINE device */
3762 if (ret == -ENODEV)
Derek Basehore4fa83242017-05-11 14:34:24 +02003763 return 0;
3764
3765 if (!scsi_sense_valid(&sshdr) ||
3766 sshdr.sense_key != ILLEGAL_REQUEST)
3767 return ret;
3768
3769 /*
3770 * sshdr.sense_key == ILLEGAL_REQUEST means this drive
3771 * doesn't support sync. There's not much to do and
3772 * suspend shouldn't fail.
3773 */
Bart Van Asscheed91f7e2017-08-25 13:46:34 -07003774 ret = 0;
Oliver Neukum95897912013-09-16 13:28:15 +02003775 }
Tejun Heoc3c94c5a2007-03-21 00:13:59 +09003776 }
3777
Aaron Lu691e3d32012-11-09 15:27:55 +08003778 if (sdkp->device->manage_start_stop) {
James Bottomleycc5d2c82007-03-20 12:26:03 -05003779 sd_printk(KERN_NOTICE, sdkp, "Stopping disk\n");
Oliver Neukum95897912013-09-16 13:28:15 +02003780 /* an error is not worth aborting a system sleep */
James Bottomleycc5d2c82007-03-20 12:26:03 -05003781 ret = sd_start_stop_device(sdkp, 0);
Oliver Neukum95897912013-09-16 13:28:15 +02003782 if (ignore_stop_errors)
3783 ret = 0;
Tejun Heoc3c94c5a2007-03-21 00:13:59 +09003784 }
3785
Alan Stern09ff92f2007-05-21 09:55:04 -04003786 return ret;
Tejun Heoc3c94c5a2007-03-21 00:13:59 +09003787}
3788
Oliver Neukum95897912013-09-16 13:28:15 +02003789static int sd_suspend_system(struct device *dev)
3790{
Bart Van Assche9131bff2021-10-06 14:54:53 -07003791 if (pm_runtime_suspended(dev))
3792 return 0;
3793
Oliver Neukum95897912013-09-16 13:28:15 +02003794 return sd_suspend_common(dev, true);
3795}
3796
3797static int sd_suspend_runtime(struct device *dev)
3798{
3799 return sd_suspend_common(dev, false);
3800}
3801
Tejun Heoc3c94c5a2007-03-21 00:13:59 +09003802static int sd_resume(struct device *dev)
3803{
Christoph Hellwig3d9a1f52015-02-02 14:01:25 +01003804 struct scsi_disk *sdkp = dev_get_drvdata(dev);
Christoph Hellwigd80210f2017-06-19 14:26:46 +02003805 int ret;
Tejun Heoc3c94c5a2007-03-21 00:13:59 +09003806
Alan Stern13b43892016-01-20 11:26:01 -05003807 if (!sdkp) /* E.g.: runtime resume at the start of sd_probe() */
3808 return 0;
3809
James Bottomleycc5d2c82007-03-20 12:26:03 -05003810 if (!sdkp->device->manage_start_stop)
Christoph Hellwig3d9a1f52015-02-02 14:01:25 +01003811 return 0;
Tejun Heoc3c94c5a2007-03-21 00:13:59 +09003812
James Bottomleycc5d2c82007-03-20 12:26:03 -05003813 sd_printk(KERN_NOTICE, sdkp, "Starting disk\n");
Christoph Hellwigd80210f2017-06-19 14:26:46 +02003814 ret = sd_start_stop_device(sdkp, 1);
3815 if (!ret)
3816 opal_unlock_from_suspend(sdkp->opal_dev);
3817 return ret;
Tejun Heoc3c94c5a2007-03-21 00:13:59 +09003818}
3819
Bart Van Assche1c957532021-10-06 14:54:52 -07003820static int sd_resume_system(struct device *dev)
3821{
Bart Van Assche9131bff2021-10-06 14:54:53 -07003822 if (pm_runtime_suspended(dev))
3823 return 0;
3824
Bart Van Assche1c957532021-10-06 14:54:52 -07003825 return sd_resume(dev);
3826}
3827
Martin Kepplingered4246d2021-07-04 09:54:02 +02003828static int sd_resume_runtime(struct device *dev)
3829{
3830 struct scsi_disk *sdkp = dev_get_drvdata(dev);
Miles Chen85374b62021-10-15 15:46:54 +08003831 struct scsi_device *sdp;
3832
3833 if (!sdkp) /* E.g.: runtime resume at the start of sd_probe() */
3834 return 0;
3835
3836 sdp = sdkp->device;
Martin Kepplingered4246d2021-07-04 09:54:02 +02003837
3838 if (sdp->ignore_media_change) {
3839 /* clear the device's sense data */
3840 static const u8 cmd[10] = { REQUEST_SENSE };
3841
3842 if (scsi_execute(sdp, cmd, DMA_NONE, NULL, 0, NULL,
3843 NULL, sdp->request_queue->rq_timeout, 1, 0,
3844 RQF_PM, NULL))
3845 sd_printk(KERN_NOTICE, sdkp,
3846 "Failed to clear sense data\n");
3847 }
3848
3849 return sd_resume(dev);
3850}
3851
Linus Torvalds1da177e2005-04-16 15:20:36 -07003852/**
3853 * init_sd - entry point for this driver (both when built in or when
3854 * a module).
3855 *
3856 * Note: this function registers this driver with the scsi mid-level.
3857 **/
3858static int __init init_sd(void)
3859{
Jeff Garzik5e4009b2006-10-04 05:32:54 -04003860 int majors = 0, i, err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003861
3862 SCSI_LOG_HLQUEUE(3, printk("init_sd: sd driver entry point\n"));
3863
Hannes Reinecke0761df92013-05-10 11:06:16 +02003864 for (i = 0; i < SD_MAJORS; i++) {
Christoph Hellwig996e5092020-10-29 15:58:31 +01003865 if (__register_blkdev(sd_major(i), "sd", sd_default_probe))
Hannes Reinecke0761df92013-05-10 11:06:16 +02003866 continue;
3867 majors++;
Hannes Reinecke0761df92013-05-10 11:06:16 +02003868 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003869
3870 if (!majors)
3871 return -ENODEV;
3872
Jeff Garzik5e4009b2006-10-04 05:32:54 -04003873 err = class_register(&sd_disk_class);
3874 if (err)
3875 goto err_out;
James Bottomley6bdaa1f2006-03-18 14:14:21 -06003876
Martin K. Petersen4e7392e2009-09-20 16:49:38 -04003877 sd_cdb_cache = kmem_cache_create("sd_ext_cdb", SD_EXT_CDB_SIZE,
3878 0, 0, NULL);
3879 if (!sd_cdb_cache) {
3880 printk(KERN_ERR "sd: can't init extended cdb cache\n");
Clément Calmels8d964472014-06-03 23:34:25 +02003881 err = -ENOMEM;
Martin K. Petersen4e7392e2009-09-20 16:49:38 -04003882 goto err_out_class;
3883 }
3884
3885 sd_cdb_pool = mempool_create_slab_pool(SD_MEMPOOL_SIZE, sd_cdb_cache);
3886 if (!sd_cdb_pool) {
3887 printk(KERN_ERR "sd: can't init extended cdb pool\n");
Clément Calmels8d964472014-06-03 23:34:25 +02003888 err = -ENOMEM;
Martin K. Petersen4e7392e2009-09-20 16:49:38 -04003889 goto err_out_cache;
3890 }
3891
Jens Axboe61cce6f2018-12-12 06:46:55 -07003892 sd_page_pool = mempool_create_page_pool(SD_MEMPOOL_SIZE, 0);
3893 if (!sd_page_pool) {
3894 printk(KERN_ERR "sd: can't init discard page pool\n");
3895 err = -ENOMEM;
3896 goto err_out_ppool;
3897 }
3898
Joel D. Diazafd5e342012-10-10 10:36:11 +02003899 err = scsi_register_driver(&sd_template.gendrv);
3900 if (err)
3901 goto err_out_driver;
3902
Jeff Garzik5e4009b2006-10-04 05:32:54 -04003903 return 0;
3904
Joel D. Diazafd5e342012-10-10 10:36:11 +02003905err_out_driver:
Jens Axboe61cce6f2018-12-12 06:46:55 -07003906 mempool_destroy(sd_page_pool);
3907
3908err_out_ppool:
Joel D. Diazafd5e342012-10-10 10:36:11 +02003909 mempool_destroy(sd_cdb_pool);
3910
Martin K. Petersen4e7392e2009-09-20 16:49:38 -04003911err_out_cache:
3912 kmem_cache_destroy(sd_cdb_cache);
3913
Jeff Garzik5e4009b2006-10-04 05:32:54 -04003914err_out_class:
3915 class_unregister(&sd_disk_class);
3916err_out:
3917 for (i = 0; i < SD_MAJORS; i++)
3918 unregister_blkdev(sd_major(i), "sd");
3919 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003920}
3921
3922/**
3923 * exit_sd - exit point for this driver (when it is a module).
3924 *
3925 * Note: this function unregisters this driver from the scsi mid-level.
3926 **/
3927static void __exit exit_sd(void)
3928{
3929 int i;
3930
3931 SCSI_LOG_HLQUEUE(3, printk("exit_sd: exiting sd driver\n"));
3932
Joel D. Diazafd5e342012-10-10 10:36:11 +02003933 scsi_unregister_driver(&sd_template.gendrv);
Martin K. Petersen4e7392e2009-09-20 16:49:38 -04003934 mempool_destroy(sd_cdb_pool);
Jens Axboe61cce6f2018-12-12 06:46:55 -07003935 mempool_destroy(sd_page_pool);
Martin K. Petersen4e7392e2009-09-20 16:49:38 -04003936 kmem_cache_destroy(sd_cdb_cache);
3937
Jeff Garzik5e4009b2006-10-04 05:32:54 -04003938 class_unregister(&sd_disk_class);
3939
Christoph Hellwig996e5092020-10-29 15:58:31 +01003940 for (i = 0; i < SD_MAJORS; i++)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003941 unregister_blkdev(sd_major(i), "sd");
3942}
3943
Linus Torvalds1da177e2005-04-16 15:20:36 -07003944module_init(init_sd);
3945module_exit(exit_sd);
Martin K. Petersene73aec82007-02-27 22:40:55 -05003946
Damien Le Moala35989a2019-11-25 16:05:18 +09003947void sd_print_sense_hdr(struct scsi_disk *sdkp, struct scsi_sense_hdr *sshdr)
Martin K. Petersene73aec82007-02-27 22:40:55 -05003948{
Hannes Reinecke21045512015-01-08 07:43:46 +01003949 scsi_print_sense_hdr(sdkp->device,
3950 sdkp->disk ? sdkp->disk->disk_name : NULL, sshdr);
Martin K. Petersene73aec82007-02-27 22:40:55 -05003951}
3952
Damien Le Moala35989a2019-11-25 16:05:18 +09003953void sd_print_result(const struct scsi_disk *sdkp, const char *msg, int result)
Martin K. Petersene73aec82007-02-27 22:40:55 -05003954{
Hannes Reineckeef613292014-10-24 14:27:00 +02003955 const char *hb_string = scsi_hostbyte_string(result);
Hannes Reineckeef613292014-10-24 14:27:00 +02003956
Hannes Reinecke54c29082021-04-27 10:30:20 +02003957 if (hb_string)
Hannes Reineckeef613292014-10-24 14:27:00 +02003958 sd_printk(KERN_INFO, sdkp,
3959 "%s: Result: hostbyte=%s driverbyte=%s\n", msg,
3960 hb_string ? hb_string : "invalid",
Hannes Reinecke54c29082021-04-27 10:30:20 +02003961 "DRIVER_OK");
Hannes Reineckeef613292014-10-24 14:27:00 +02003962 else
3963 sd_printk(KERN_INFO, sdkp,
Hannes Reinecke54c29082021-04-27 10:30:20 +02003964 "%s: Result: hostbyte=0x%02x driverbyte=%s\n",
3965 msg, host_byte(result), "DRIVER_OK");
Martin K. Petersene73aec82007-02-27 22:40:55 -05003966}