Greg Kroah-Hartman | 724117b | 2017-11-14 18:38:02 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-1.0+ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3 | * bus driver for ccw devices |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4 | * |
Heiko Carstens | a53c8fa | 2012-07-20 11:15:04 +0200 | [diff] [blame] | 5 | * Copyright IBM Corp. 2002, 2008 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6 | * Author(s): Arnd Bergmann (arndb@de.ibm.com) |
Cornelia Huck | 4ce3b30 | 2006-01-14 13:21:04 -0800 | [diff] [blame] | 7 | * Cornelia Huck (cornelia.huck@de.ibm.com) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8 | * Martin Schwidefsky (schwidefsky@de.ibm.com) |
| 9 | */ |
Peter Oberparleiter | a7ae2c02 | 2009-12-07 12:51:20 +0100 | [diff] [blame] | 10 | |
| 11 | #define KMSG_COMPONENT "cio" |
| 12 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt |
| 13 | |
Paul Gortmaker | a00f761 | 2016-10-30 16:37:24 -0400 | [diff] [blame] | 14 | #include <linux/export.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 15 | #include <linux/init.h> |
| 16 | #include <linux/spinlock.h> |
| 17 | #include <linux/errno.h> |
| 18 | #include <linux/err.h> |
| 19 | #include <linux/slab.h> |
| 20 | #include <linux/list.h> |
| 21 | #include <linux/device.h> |
| 22 | #include <linux/workqueue.h> |
Sebastian Ott | 188561a | 2013-04-13 12:53:21 +0200 | [diff] [blame] | 23 | #include <linux/delay.h> |
Peter Oberparleiter | 90ab133 | 2008-01-26 14:10:52 +0100 | [diff] [blame] | 24 | #include <linux/timer.h> |
Peter Oberparleiter | de400d6 | 2011-10-30 15:16:04 +0100 | [diff] [blame] | 25 | #include <linux/kernel_stat.h> |
Ingo Molnar | 174cd4b | 2017-02-02 19:15:33 +0100 | [diff] [blame] | 26 | #include <linux/sched/signal.h> |
Halil Pasic | 37db898 | 2019-03-26 12:41:09 +0100 | [diff] [blame] | 27 | #include <linux/dma-mapping.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 28 | |
| 29 | #include <asm/ccwdev.h> |
| 30 | #include <asm/cio.h> |
Tim Schmielau | 4e57b68 | 2005-10-30 15:03:48 -0800 | [diff] [blame] | 31 | #include <asm/param.h> /* HZ */ |
Cornelia Huck | 1842f2b | 2007-10-12 16:11:22 +0200 | [diff] [blame] | 32 | #include <asm/cmb.h> |
Cornelia Huck | 3a3fc29 | 2008-07-14 09:58:58 +0200 | [diff] [blame] | 33 | #include <asm/isc.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 34 | |
Cornelia Huck | 0ae7a7b | 2008-07-14 09:58:43 +0200 | [diff] [blame] | 35 | #include "chp.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 36 | #include "cio.h" |
Cornelia Huck | d7b5a4c9 | 2006-12-08 15:54:28 +0100 | [diff] [blame] | 37 | #include "cio_debug.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 38 | #include "css.h" |
| 39 | #include "device.h" |
| 40 | #include "ioasm.h" |
Cornelia Huck | cd6b4f2 | 2008-01-26 14:10:43 +0100 | [diff] [blame] | 41 | #include "io_sch.h" |
Peter Oberparleiter | ecf5d9e | 2008-10-10 21:33:06 +0200 | [diff] [blame] | 42 | #include "blacklist.h" |
Michael Ernst | fd0457a | 2010-08-09 18:12:50 +0200 | [diff] [blame] | 43 | #include "chsc.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 44 | |
Peter Oberparleiter | 90ab133 | 2008-01-26 14:10:52 +0100 | [diff] [blame] | 45 | static struct timer_list recovery_timer; |
Cornelia Huck | 486d0a0 | 2008-02-19 15:29:23 +0100 | [diff] [blame] | 46 | static DEFINE_SPINLOCK(recovery_lock); |
Peter Oberparleiter | 90ab133 | 2008-01-26 14:10:52 +0100 | [diff] [blame] | 47 | static int recovery_phase; |
| 48 | static const unsigned long recovery_delay[] = { 3, 30, 300 }; |
| 49 | |
Sebastian Ott | 0ad8f714a | 2013-04-13 13:06:27 +0200 | [diff] [blame] | 50 | static atomic_t ccw_device_init_count = ATOMIC_INIT(0); |
| 51 | static DECLARE_WAIT_QUEUE_HEAD(ccw_device_init_wq); |
| 52 | static struct bus_type ccw_bus_type; |
| 53 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 54 | /******************* bus type handling ***********************/ |
| 55 | |
| 56 | /* The Linux driver model distinguishes between a bus type and |
| 57 | * the bus itself. Of course we only have one channel |
| 58 | * subsystem driver and one channel system per machine, but |
| 59 | * we still use the abstraction. T.R. says it's a good idea. */ |
| 60 | static int |
| 61 | ccw_bus_match (struct device * dev, struct device_driver * drv) |
| 62 | { |
| 63 | struct ccw_device *cdev = to_ccwdev(dev); |
| 64 | struct ccw_driver *cdrv = to_ccwdrv(drv); |
| 65 | const struct ccw_device_id *ids = cdrv->ids, *found; |
| 66 | |
| 67 | if (!ids) |
| 68 | return 0; |
| 69 | |
| 70 | found = ccw_device_id_match(ids, &cdev->id); |
| 71 | if (!found) |
| 72 | return 0; |
| 73 | |
| 74 | cdev->id.driver_info = found->driver_info; |
| 75 | |
| 76 | return 1; |
| 77 | } |
| 78 | |
Peter Oberparleiter | db0c2d5 | 2006-09-20 15:59:49 +0200 | [diff] [blame] | 79 | /* Store modalias string delimited by prefix/suffix string into buffer with |
| 80 | * specified size. Return length of resulting string (excluding trailing '\0') |
| 81 | * even if string doesn't fit buffer (snprintf semantics). */ |
Cornelia Huck | cfbe9bb | 2007-04-27 16:01:32 +0200 | [diff] [blame] | 82 | static int snprint_alias(char *buf, size_t size, |
Peter Oberparleiter | db0c2d5 | 2006-09-20 15:59:49 +0200 | [diff] [blame] | 83 | struct ccw_device_id *id, const char *suffix) |
| 84 | { |
| 85 | int len; |
| 86 | |
Cornelia Huck | cfbe9bb | 2007-04-27 16:01:32 +0200 | [diff] [blame] | 87 | len = snprintf(buf, size, "ccw:t%04Xm%02X", id->cu_type, id->cu_model); |
Peter Oberparleiter | db0c2d5 | 2006-09-20 15:59:49 +0200 | [diff] [blame] | 88 | if (len > size) |
| 89 | return len; |
| 90 | buf += len; |
| 91 | size -= len; |
| 92 | |
| 93 | if (id->dev_type != 0) |
| 94 | len += snprintf(buf, size, "dt%04Xdm%02X%s", id->dev_type, |
| 95 | id->dev_model, suffix); |
| 96 | else |
| 97 | len += snprintf(buf, size, "dtdm%s", suffix); |
| 98 | |
| 99 | return len; |
| 100 | } |
| 101 | |
| 102 | /* Set up environment variables for ccw device uevent. Return 0 on success, |
| 103 | * non-zero otherwise. */ |
Kay Sievers | 7eff2e7 | 2007-08-14 15:15:12 +0200 | [diff] [blame] | 104 | static int ccw_uevent(struct device *dev, struct kobj_uevent_env *env) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 105 | { |
| 106 | struct ccw_device *cdev = to_ccwdev(dev); |
Peter Oberparleiter | db0c2d5 | 2006-09-20 15:59:49 +0200 | [diff] [blame] | 107 | struct ccw_device_id *id = &(cdev->id); |
Cornelia Huck | cfbe9bb | 2007-04-27 16:01:32 +0200 | [diff] [blame] | 108 | int ret; |
| 109 | char modalias_buf[30]; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 110 | |
Peter Oberparleiter | db0c2d5 | 2006-09-20 15:59:49 +0200 | [diff] [blame] | 111 | /* CU_TYPE= */ |
Kay Sievers | 7eff2e7 | 2007-08-14 15:15:12 +0200 | [diff] [blame] | 112 | ret = add_uevent_var(env, "CU_TYPE=%04X", id->cu_type); |
Cornelia Huck | cfbe9bb | 2007-04-27 16:01:32 +0200 | [diff] [blame] | 113 | if (ret) |
| 114 | return ret; |
Peter Oberparleiter | db0c2d5 | 2006-09-20 15:59:49 +0200 | [diff] [blame] | 115 | |
| 116 | /* CU_MODEL= */ |
Kay Sievers | 7eff2e7 | 2007-08-14 15:15:12 +0200 | [diff] [blame] | 117 | ret = add_uevent_var(env, "CU_MODEL=%02X", id->cu_model); |
Cornelia Huck | cfbe9bb | 2007-04-27 16:01:32 +0200 | [diff] [blame] | 118 | if (ret) |
| 119 | return ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 120 | |
| 121 | /* The next two can be zero, that's ok for us */ |
Peter Oberparleiter | db0c2d5 | 2006-09-20 15:59:49 +0200 | [diff] [blame] | 122 | /* DEV_TYPE= */ |
Kay Sievers | 7eff2e7 | 2007-08-14 15:15:12 +0200 | [diff] [blame] | 123 | ret = add_uevent_var(env, "DEV_TYPE=%04X", id->dev_type); |
Cornelia Huck | cfbe9bb | 2007-04-27 16:01:32 +0200 | [diff] [blame] | 124 | if (ret) |
| 125 | return ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 126 | |
Peter Oberparleiter | db0c2d5 | 2006-09-20 15:59:49 +0200 | [diff] [blame] | 127 | /* DEV_MODEL= */ |
Kay Sievers | 7eff2e7 | 2007-08-14 15:15:12 +0200 | [diff] [blame] | 128 | ret = add_uevent_var(env, "DEV_MODEL=%02X", id->dev_model); |
Cornelia Huck | cfbe9bb | 2007-04-27 16:01:32 +0200 | [diff] [blame] | 129 | if (ret) |
| 130 | return ret; |
Peter Oberparleiter | db0c2d5 | 2006-09-20 15:59:49 +0200 | [diff] [blame] | 131 | |
| 132 | /* MODALIAS= */ |
Cornelia Huck | cfbe9bb | 2007-04-27 16:01:32 +0200 | [diff] [blame] | 133 | snprint_alias(modalias_buf, sizeof(modalias_buf), id, ""); |
Kay Sievers | 7eff2e7 | 2007-08-14 15:15:12 +0200 | [diff] [blame] | 134 | ret = add_uevent_var(env, "MODALIAS=%s", modalias_buf); |
| 135 | return ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 136 | } |
| 137 | |
Cornelia Huck | 602b20f | 2008-01-26 14:10:39 +0100 | [diff] [blame] | 138 | static void io_subchannel_irq(struct subchannel *); |
| 139 | static int io_subchannel_probe(struct subchannel *); |
Uwe Kleine-König | a7bdb9a | 2021-07-13 21:35:19 +0200 | [diff] [blame] | 140 | static void io_subchannel_remove(struct subchannel *); |
Cornelia Huck | 8bbace7 | 2006-01-11 10:56:22 +0100 | [diff] [blame] | 141 | static void io_subchannel_shutdown(struct subchannel *); |
Cornelia Huck | c820de3 | 2008-07-14 09:58:45 +0200 | [diff] [blame] | 142 | static int io_subchannel_sch_event(struct subchannel *, int); |
Cornelia Huck | 99611f8 | 2008-07-14 09:59:02 +0200 | [diff] [blame] | 143 | static int io_subchannel_chp_event(struct subchannel *, struct chp_link *, |
| 144 | int); |
Kees Cook | 846d0c6 | 2017-10-16 16:43:25 -0700 | [diff] [blame] | 145 | static void recovery_func(struct timer_list *unused); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 146 | |
Cornelia Huck | f08adc0 | 2008-07-14 09:59:03 +0200 | [diff] [blame] | 147 | static struct css_device_id io_subchannel_ids[] = { |
| 148 | { .match_flags = 0x1, .type = SUBCHANNEL_TYPE_IO, }, |
| 149 | { /* end of list */ }, |
| 150 | }; |
Cornelia Huck | f08adc0 | 2008-07-14 09:59:03 +0200 | [diff] [blame] | 151 | |
Sebastian Ott | b4c7072 | 2010-02-26 22:37:27 +0100 | [diff] [blame] | 152 | static int io_subchannel_settle(void) |
Sebastian Ott | 8ea7f55 | 2009-09-22 22:58:35 +0200 | [diff] [blame] | 153 | { |
Sebastian Ott | b4c7072 | 2010-02-26 22:37:27 +0100 | [diff] [blame] | 154 | int ret; |
| 155 | |
| 156 | ret = wait_event_interruptible(ccw_device_init_wq, |
| 157 | atomic_read(&ccw_device_init_count) == 0); |
| 158 | if (ret) |
| 159 | return -EINTR; |
Sebastian Ott | be5d382 | 2010-02-26 22:37:24 +0100 | [diff] [blame] | 160 | flush_workqueue(cio_work_q); |
Sebastian Ott | b4c7072 | 2010-02-26 22:37:27 +0100 | [diff] [blame] | 161 | return 0; |
Sebastian Ott | 8ea7f55 | 2009-09-22 22:58:35 +0200 | [diff] [blame] | 162 | } |
| 163 | |
Cornelia Huck | f7e5d67 | 2007-05-10 15:45:43 +0200 | [diff] [blame] | 164 | static struct css_driver io_subchannel_driver = { |
Sebastian Ott | e6aed12 | 2011-03-15 17:08:30 +0100 | [diff] [blame] | 165 | .drv = { |
| 166 | .owner = THIS_MODULE, |
| 167 | .name = "io_subchannel", |
| 168 | }, |
Cornelia Huck | f08adc0 | 2008-07-14 09:59:03 +0200 | [diff] [blame] | 169 | .subchannel_type = io_subchannel_ids, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 170 | .irq = io_subchannel_irq, |
Cornelia Huck | c820de3 | 2008-07-14 09:58:45 +0200 | [diff] [blame] | 171 | .sch_event = io_subchannel_sch_event, |
| 172 | .chp_event = io_subchannel_chp_event, |
Cornelia Huck | 8bbace7 | 2006-01-11 10:56:22 +0100 | [diff] [blame] | 173 | .probe = io_subchannel_probe, |
| 174 | .remove = io_subchannel_remove, |
| 175 | .shutdown = io_subchannel_shutdown, |
Sebastian Ott | 8ea7f55 | 2009-09-22 22:58:35 +0200 | [diff] [blame] | 176 | .settle = io_subchannel_settle, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 177 | }; |
| 178 | |
Sebastian Ott | 2f17644 | 2009-09-22 22:58:33 +0200 | [diff] [blame] | 179 | int __init io_subchannel_init(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 180 | { |
| 181 | int ret; |
| 182 | |
Kees Cook | 846d0c6 | 2017-10-16 16:43:25 -0700 | [diff] [blame] | 183 | timer_setup(&recovery_timer, recovery_func, 0); |
Sebastian Ott | be5d382 | 2010-02-26 22:37:24 +0100 | [diff] [blame] | 184 | ret = bus_register(&ccw_bus_type); |
| 185 | if (ret) |
| 186 | return ret; |
Cornelia Huck | 25b7bb5 | 2008-01-26 14:10:41 +0100 | [diff] [blame] | 187 | ret = css_driver_register(&io_subchannel_driver); |
| 188 | if (ret) |
Sebastian Ott | be5d382 | 2010-02-26 22:37:24 +0100 | [diff] [blame] | 189 | bus_unregister(&ccw_bus_type); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 190 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 191 | return ret; |
| 192 | } |
| 193 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 194 | |
| 195 | /************************ device handling **************************/ |
| 196 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 197 | static ssize_t |
Yani Ioannou | 3fd3c0a | 2005-05-17 06:43:27 -0400 | [diff] [blame] | 198 | devtype_show (struct device *dev, struct device_attribute *attr, char *buf) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 199 | { |
| 200 | struct ccw_device *cdev = to_ccwdev(dev); |
| 201 | struct ccw_device_id *id = &(cdev->id); |
| 202 | |
| 203 | if (id->dev_type != 0) |
| 204 | return sprintf(buf, "%04x/%02x\n", |
| 205 | id->dev_type, id->dev_model); |
| 206 | else |
| 207 | return sprintf(buf, "n/a\n"); |
| 208 | } |
| 209 | |
| 210 | static ssize_t |
Yani Ioannou | 3fd3c0a | 2005-05-17 06:43:27 -0400 | [diff] [blame] | 211 | cutype_show (struct device *dev, struct device_attribute *attr, char *buf) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 212 | { |
| 213 | struct ccw_device *cdev = to_ccwdev(dev); |
| 214 | struct ccw_device_id *id = &(cdev->id); |
| 215 | |
| 216 | return sprintf(buf, "%04x/%02x\n", |
| 217 | id->cu_type, id->cu_model); |
| 218 | } |
| 219 | |
| 220 | static ssize_t |
Bastian Blank | f1fc78a | 2005-10-30 15:00:12 -0800 | [diff] [blame] | 221 | modalias_show (struct device *dev, struct device_attribute *attr, char *buf) |
| 222 | { |
| 223 | struct ccw_device *cdev = to_ccwdev(dev); |
| 224 | struct ccw_device_id *id = &(cdev->id); |
Peter Oberparleiter | db0c2d5 | 2006-09-20 15:59:49 +0200 | [diff] [blame] | 225 | int len; |
Bastian Blank | f1fc78a | 2005-10-30 15:00:12 -0800 | [diff] [blame] | 226 | |
Cornelia Huck | 086a6c6 | 2007-07-17 13:36:08 +0200 | [diff] [blame] | 227 | len = snprint_alias(buf, PAGE_SIZE, id, "\n"); |
Peter Oberparleiter | db0c2d5 | 2006-09-20 15:59:49 +0200 | [diff] [blame] | 228 | |
| 229 | return len > PAGE_SIZE ? PAGE_SIZE : len; |
Bastian Blank | f1fc78a | 2005-10-30 15:00:12 -0800 | [diff] [blame] | 230 | } |
| 231 | |
| 232 | static ssize_t |
Yani Ioannou | 3fd3c0a | 2005-05-17 06:43:27 -0400 | [diff] [blame] | 233 | online_show (struct device *dev, struct device_attribute *attr, char *buf) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 234 | { |
| 235 | struct ccw_device *cdev = to_ccwdev(dev); |
| 236 | |
| 237 | return sprintf(buf, cdev->online ? "1\n" : "0\n"); |
| 238 | } |
| 239 | |
Cornelia Huck | d7b5a4c9 | 2006-12-08 15:54:28 +0100 | [diff] [blame] | 240 | int ccw_device_is_orphan(struct ccw_device *cdev) |
| 241 | { |
| 242 | return sch_is_pseudo_sch(to_subchannel(cdev->dev.parent)); |
| 243 | } |
| 244 | |
Cornelia Huck | ef99516 | 2007-04-27 16:01:39 +0200 | [diff] [blame] | 245 | static void ccw_device_unregister(struct ccw_device *cdev) |
Cornelia Huck | 7674da7 | 2006-12-08 15:54:21 +0100 | [diff] [blame] | 246 | { |
Sebastian Ott | 7d253b9 | 2009-12-07 12:51:33 +0100 | [diff] [blame] | 247 | if (device_is_registered(&cdev->dev)) { |
Sebastian Ott | 24a1872 | 2009-12-07 12:51:34 +0100 | [diff] [blame] | 248 | /* Undo device_add(). */ |
Cornelia Huck | ef99516 | 2007-04-27 16:01:39 +0200 | [diff] [blame] | 249 | device_del(&cdev->dev); |
Sebastian Ott | 24a1872 | 2009-12-07 12:51:34 +0100 | [diff] [blame] | 250 | } |
| 251 | if (cdev->private->flags.initialized) { |
| 252 | cdev->private->flags.initialized = 0; |
Sebastian Ott | 3b554a1 | 2009-09-11 10:28:26 +0200 | [diff] [blame] | 253 | /* Release reference from device_initialize(). */ |
| 254 | put_device(&cdev->dev); |
| 255 | } |
Cornelia Huck | 7674da7 | 2006-12-08 15:54:21 +0100 | [diff] [blame] | 256 | } |
| 257 | |
Sebastian Ott | d40f7b7 | 2009-12-07 12:51:41 +0100 | [diff] [blame] | 258 | static void io_subchannel_quiesce(struct subchannel *); |
| 259 | |
Cornelia Huck | b2ffd8e | 2007-10-12 16:11:17 +0200 | [diff] [blame] | 260 | /** |
| 261 | * ccw_device_set_offline() - disable a ccw device for I/O |
| 262 | * @cdev: target ccw device |
| 263 | * |
| 264 | * This function calls the driver's set_offline() function for @cdev, if |
| 265 | * given, and then disables @cdev. |
| 266 | * Returns: |
| 267 | * %0 on success and a negative error value on failure. |
| 268 | * Context: |
| 269 | * enabled, ccw device lock not held |
| 270 | */ |
| 271 | int ccw_device_set_offline(struct ccw_device *cdev) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 272 | { |
Sebastian Ott | d40f7b7 | 2009-12-07 12:51:41 +0100 | [diff] [blame] | 273 | struct subchannel *sch; |
| 274 | int ret, state; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 275 | |
| 276 | if (!cdev) |
| 277 | return -ENODEV; |
| 278 | if (!cdev->online || !cdev->drv) |
| 279 | return -EINVAL; |
| 280 | |
| 281 | if (cdev->drv->set_offline) { |
| 282 | ret = cdev->drv->set_offline(cdev); |
| 283 | if (ret != 0) |
| 284 | return ret; |
| 285 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 286 | spin_lock_irq(cdev->ccwlock); |
Sebastian Ott | d40f7b7 | 2009-12-07 12:51:41 +0100 | [diff] [blame] | 287 | sch = to_subchannel(cdev->dev.parent); |
Sebastian Ott | 74bd0d8 | 2013-12-16 10:51:54 +0100 | [diff] [blame] | 288 | cdev->online = 0; |
Michael Ernst | 217ee6c | 2009-09-11 10:28:21 +0200 | [diff] [blame] | 289 | /* Wait until a final state or DISCONNECTED is reached */ |
| 290 | while (!dev_fsm_final_state(cdev) && |
| 291 | cdev->private->state != DEV_STATE_DISCONNECTED) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 292 | spin_unlock_irq(cdev->ccwlock); |
Michael Ernst | 217ee6c | 2009-09-11 10:28:21 +0200 | [diff] [blame] | 293 | wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) || |
| 294 | cdev->private->state == DEV_STATE_DISCONNECTED)); |
| 295 | spin_lock_irq(cdev->ccwlock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 296 | } |
Sebastian Ott | d40f7b7 | 2009-12-07 12:51:41 +0100 | [diff] [blame] | 297 | do { |
| 298 | ret = ccw_device_offline(cdev); |
| 299 | if (!ret) |
| 300 | break; |
| 301 | CIO_MSG_EVENT(0, "ccw_device_offline returned %d, device " |
| 302 | "0.%x.%04x\n", ret, cdev->private->dev_id.ssid, |
| 303 | cdev->private->dev_id.devno); |
| 304 | if (ret != -EBUSY) |
| 305 | goto error; |
| 306 | state = cdev->private->state; |
| 307 | spin_unlock_irq(cdev->ccwlock); |
| 308 | io_subchannel_quiesce(sch); |
| 309 | spin_lock_irq(cdev->ccwlock); |
| 310 | cdev->private->state = state; |
| 311 | } while (ret == -EBUSY); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 312 | spin_unlock_irq(cdev->ccwlock); |
Michael Ernst | 217ee6c | 2009-09-11 10:28:21 +0200 | [diff] [blame] | 313 | wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) || |
| 314 | cdev->private->state == DEV_STATE_DISCONNECTED)); |
Peter Oberparleiter | a7ae2c02 | 2009-12-07 12:51:20 +0100 | [diff] [blame] | 315 | /* Inform the user if set offline failed. */ |
| 316 | if (cdev->private->state == DEV_STATE_BOXED) { |
Joe Perches | baebc70 | 2016-03-03 20:49:57 -0800 | [diff] [blame] | 317 | pr_warn("%s: The device entered boxed state while being set offline\n", |
| 318 | dev_name(&cdev->dev)); |
Peter Oberparleiter | a7ae2c02 | 2009-12-07 12:51:20 +0100 | [diff] [blame] | 319 | } else if (cdev->private->state == DEV_STATE_NOT_OPER) { |
Joe Perches | baebc70 | 2016-03-03 20:49:57 -0800 | [diff] [blame] | 320 | pr_warn("%s: The device stopped operating while being set offline\n", |
| 321 | dev_name(&cdev->dev)); |
Peter Oberparleiter | a7ae2c02 | 2009-12-07 12:51:20 +0100 | [diff] [blame] | 322 | } |
Michael Ernst | 217ee6c | 2009-09-11 10:28:21 +0200 | [diff] [blame] | 323 | /* Give up reference from ccw_device_set_online(). */ |
| 324 | put_device(&cdev->dev); |
| 325 | return 0; |
| 326 | |
| 327 | error: |
Michael Ernst | 217ee6c | 2009-09-11 10:28:21 +0200 | [diff] [blame] | 328 | cdev->private->state = DEV_STATE_OFFLINE; |
| 329 | dev_fsm_event(cdev, DEV_EVENT_NOTOPER); |
| 330 | spin_unlock_irq(cdev->ccwlock); |
| 331 | /* Give up reference from ccw_device_set_online(). */ |
| 332 | put_device(&cdev->dev); |
| 333 | return -ENODEV; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 334 | } |
| 335 | |
Cornelia Huck | b2ffd8e | 2007-10-12 16:11:17 +0200 | [diff] [blame] | 336 | /** |
| 337 | * ccw_device_set_online() - enable a ccw device for I/O |
| 338 | * @cdev: target ccw device |
| 339 | * |
| 340 | * This function first enables @cdev and then calls the driver's set_online() |
| 341 | * function for @cdev, if given. If set_online() returns an error, @cdev is |
| 342 | * disabled again. |
| 343 | * Returns: |
| 344 | * %0 on success and a negative error value on failure. |
| 345 | * Context: |
| 346 | * enabled, ccw device lock not held |
| 347 | */ |
| 348 | int ccw_device_set_online(struct ccw_device *cdev) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 349 | { |
| 350 | int ret; |
Michael Ernst | 217ee6c | 2009-09-11 10:28:21 +0200 | [diff] [blame] | 351 | int ret2; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 352 | |
| 353 | if (!cdev) |
| 354 | return -ENODEV; |
| 355 | if (cdev->online || !cdev->drv) |
| 356 | return -EINVAL; |
Cornelia Huck | 9cd6742 | 2008-12-25 13:39:06 +0100 | [diff] [blame] | 357 | /* Hold on to an extra reference while device is online. */ |
| 358 | if (!get_device(&cdev->dev)) |
| 359 | return -ENODEV; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 360 | |
| 361 | spin_lock_irq(cdev->ccwlock); |
| 362 | ret = ccw_device_online(cdev); |
| 363 | spin_unlock_irq(cdev->ccwlock); |
| 364 | if (ret == 0) |
| 365 | wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev)); |
| 366 | else { |
Michael Ernst | 139b83dd | 2008-05-07 09:22:54 +0200 | [diff] [blame] | 367 | CIO_MSG_EVENT(0, "ccw_device_online returned %d, " |
Cornelia Huck | e556bbb | 2007-07-27 12:29:19 +0200 | [diff] [blame] | 368 | "device 0.%x.%04x\n", |
| 369 | ret, cdev->private->dev_id.ssid, |
| 370 | cdev->private->dev_id.devno); |
Cornelia Huck | 9cd6742 | 2008-12-25 13:39:06 +0100 | [diff] [blame] | 371 | /* Give up online reference since onlining failed. */ |
| 372 | put_device(&cdev->dev); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 373 | return ret; |
| 374 | } |
Michael Ernst | 217ee6c | 2009-09-11 10:28:21 +0200 | [diff] [blame] | 375 | spin_lock_irq(cdev->ccwlock); |
| 376 | /* Check if online processing was successful */ |
| 377 | if ((cdev->private->state != DEV_STATE_ONLINE) && |
| 378 | (cdev->private->state != DEV_STATE_W4SENSE)) { |
| 379 | spin_unlock_irq(cdev->ccwlock); |
Peter Oberparleiter | a7ae2c02 | 2009-12-07 12:51:20 +0100 | [diff] [blame] | 380 | /* Inform the user that set online failed. */ |
| 381 | if (cdev->private->state == DEV_STATE_BOXED) { |
Joe Perches | baebc70 | 2016-03-03 20:49:57 -0800 | [diff] [blame] | 382 | pr_warn("%s: Setting the device online failed because it is boxed\n", |
| 383 | dev_name(&cdev->dev)); |
Peter Oberparleiter | a7ae2c02 | 2009-12-07 12:51:20 +0100 | [diff] [blame] | 384 | } else if (cdev->private->state == DEV_STATE_NOT_OPER) { |
Joe Perches | baebc70 | 2016-03-03 20:49:57 -0800 | [diff] [blame] | 385 | pr_warn("%s: Setting the device online failed because it is not operational\n", |
| 386 | dev_name(&cdev->dev)); |
Peter Oberparleiter | a7ae2c02 | 2009-12-07 12:51:20 +0100 | [diff] [blame] | 387 | } |
Cornelia Huck | 9cd6742 | 2008-12-25 13:39:06 +0100 | [diff] [blame] | 388 | /* Give up online reference since onlining failed. */ |
| 389 | put_device(&cdev->dev); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 390 | return -ENODEV; |
Cornelia Huck | 9cd6742 | 2008-12-25 13:39:06 +0100 | [diff] [blame] | 391 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 392 | spin_unlock_irq(cdev->ccwlock); |
Michael Ernst | 217ee6c | 2009-09-11 10:28:21 +0200 | [diff] [blame] | 393 | if (cdev->drv->set_online) |
| 394 | ret = cdev->drv->set_online(cdev); |
| 395 | if (ret) |
| 396 | goto rollback; |
Sebastian Ott | 74bd0d8 | 2013-12-16 10:51:54 +0100 | [diff] [blame] | 397 | |
| 398 | spin_lock_irq(cdev->ccwlock); |
Michael Ernst | 217ee6c | 2009-09-11 10:28:21 +0200 | [diff] [blame] | 399 | cdev->online = 1; |
Sebastian Ott | 74bd0d8 | 2013-12-16 10:51:54 +0100 | [diff] [blame] | 400 | spin_unlock_irq(cdev->ccwlock); |
Michael Ernst | 217ee6c | 2009-09-11 10:28:21 +0200 | [diff] [blame] | 401 | return 0; |
| 402 | |
| 403 | rollback: |
| 404 | spin_lock_irq(cdev->ccwlock); |
| 405 | /* Wait until a final state or DISCONNECTED is reached */ |
| 406 | while (!dev_fsm_final_state(cdev) && |
| 407 | cdev->private->state != DEV_STATE_DISCONNECTED) { |
| 408 | spin_unlock_irq(cdev->ccwlock); |
| 409 | wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) || |
| 410 | cdev->private->state == DEV_STATE_DISCONNECTED)); |
| 411 | spin_lock_irq(cdev->ccwlock); |
| 412 | } |
| 413 | ret2 = ccw_device_offline(cdev); |
| 414 | if (ret2) |
| 415 | goto error; |
| 416 | spin_unlock_irq(cdev->ccwlock); |
| 417 | wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) || |
| 418 | cdev->private->state == DEV_STATE_DISCONNECTED)); |
Cornelia Huck | 9cd6742 | 2008-12-25 13:39:06 +0100 | [diff] [blame] | 419 | /* Give up online reference since onlining failed. */ |
| 420 | put_device(&cdev->dev); |
Michael Ernst | 217ee6c | 2009-09-11 10:28:21 +0200 | [diff] [blame] | 421 | return ret; |
| 422 | |
| 423 | error: |
| 424 | CIO_MSG_EVENT(0, "rollback ccw_device_offline returned %d, " |
| 425 | "device 0.%x.%04x\n", |
| 426 | ret2, cdev->private->dev_id.ssid, |
| 427 | cdev->private->dev_id.devno); |
| 428 | cdev->private->state = DEV_STATE_OFFLINE; |
| 429 | spin_unlock_irq(cdev->ccwlock); |
| 430 | /* Give up online reference since onlining failed. */ |
| 431 | put_device(&cdev->dev); |
| 432 | return ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 433 | } |
| 434 | |
Sebastian Ott | e74fe0c | 2009-03-26 15:24:08 +0100 | [diff] [blame] | 435 | static int online_store_handle_offline(struct ccw_device *cdev) |
Cornelia Huck | f5ba6c8 | 2007-04-27 16:01:30 +0200 | [diff] [blame] | 436 | { |
Peter Oberparleiter | 37de53b | 2009-12-07 12:51:19 +0100 | [diff] [blame] | 437 | if (cdev->private->state == DEV_STATE_DISCONNECTED) { |
| 438 | spin_lock_irq(cdev->ccwlock); |
| 439 | ccw_device_sched_todo(cdev, CDEV_TODO_UNREG_EVAL); |
| 440 | spin_unlock_irq(cdev->ccwlock); |
Sebastian Ott | 7cd4031 | 2010-08-09 18:12:52 +0200 | [diff] [blame] | 441 | return 0; |
| 442 | } |
| 443 | if (cdev->drv && cdev->drv->set_offline) |
Sebastian Ott | e74fe0c | 2009-03-26 15:24:08 +0100 | [diff] [blame] | 444 | return ccw_device_set_offline(cdev); |
Sebastian Ott | 7cd4031 | 2010-08-09 18:12:52 +0200 | [diff] [blame] | 445 | return -EINVAL; |
Cornelia Huck | f5ba6c8 | 2007-04-27 16:01:30 +0200 | [diff] [blame] | 446 | } |
| 447 | |
| 448 | static int online_store_recog_and_online(struct ccw_device *cdev) |
| 449 | { |
Cornelia Huck | f5ba6c8 | 2007-04-27 16:01:30 +0200 | [diff] [blame] | 450 | /* Do device recognition, if needed. */ |
Sebastian Ott | 99f6a570 | 2009-03-31 19:16:07 +0200 | [diff] [blame] | 451 | if (cdev->private->state == DEV_STATE_BOXED) { |
Peter Oberparleiter | 1f5bd3848 | 2009-12-07 12:51:23 +0100 | [diff] [blame] | 452 | spin_lock_irq(cdev->ccwlock); |
Peter Oberparleiter | 736b5db | 2009-12-07 12:51:21 +0100 | [diff] [blame] | 453 | ccw_device_recognition(cdev); |
Peter Oberparleiter | 1f5bd3848 | 2009-12-07 12:51:23 +0100 | [diff] [blame] | 454 | spin_unlock_irq(cdev->ccwlock); |
Cornelia Huck | f5ba6c8 | 2007-04-27 16:01:30 +0200 | [diff] [blame] | 455 | wait_event(cdev->private->wait_q, |
| 456 | cdev->private->flags.recog_done); |
Sebastian Ott | 156013f | 2009-03-31 19:16:03 +0200 | [diff] [blame] | 457 | if (cdev->private->state != DEV_STATE_OFFLINE) |
| 458 | /* recognition failed */ |
| 459 | return -EAGAIN; |
Cornelia Huck | f5ba6c8 | 2007-04-27 16:01:30 +0200 | [diff] [blame] | 460 | } |
| 461 | if (cdev->drv && cdev->drv->set_online) |
Sebastian Ott | 7cd4031 | 2010-08-09 18:12:52 +0200 | [diff] [blame] | 462 | return ccw_device_set_online(cdev); |
| 463 | return -EINVAL; |
Cornelia Huck | f5ba6c8 | 2007-04-27 16:01:30 +0200 | [diff] [blame] | 464 | } |
Sebastian Ott | 156013f | 2009-03-31 19:16:03 +0200 | [diff] [blame] | 465 | |
Michael Ernst | c78aa6c | 2008-07-14 09:59:22 +0200 | [diff] [blame] | 466 | static int online_store_handle_online(struct ccw_device *cdev, int force) |
Cornelia Huck | f5ba6c8 | 2007-04-27 16:01:30 +0200 | [diff] [blame] | 467 | { |
| 468 | int ret; |
| 469 | |
| 470 | ret = online_store_recog_and_online(cdev); |
Sebastian Ott | 156013f | 2009-03-31 19:16:03 +0200 | [diff] [blame] | 471 | if (ret && !force) |
Michael Ernst | c78aa6c | 2008-07-14 09:59:22 +0200 | [diff] [blame] | 472 | return ret; |
Cornelia Huck | f5ba6c8 | 2007-04-27 16:01:30 +0200 | [diff] [blame] | 473 | if (force && cdev->private->state == DEV_STATE_BOXED) { |
| 474 | ret = ccw_device_stlck(cdev); |
Michael Ernst | c78aa6c | 2008-07-14 09:59:22 +0200 | [diff] [blame] | 475 | if (ret) |
| 476 | return ret; |
Cornelia Huck | f5ba6c8 | 2007-04-27 16:01:30 +0200 | [diff] [blame] | 477 | if (cdev->id.cu_type == 0) |
| 478 | cdev->private->state = DEV_STATE_NOT_OPER; |
Sebastian Ott | 156013f | 2009-03-31 19:16:03 +0200 | [diff] [blame] | 479 | ret = online_store_recog_and_online(cdev); |
| 480 | if (ret) |
| 481 | return ret; |
Cornelia Huck | f5ba6c8 | 2007-04-27 16:01:30 +0200 | [diff] [blame] | 482 | } |
Michael Ernst | c78aa6c | 2008-07-14 09:59:22 +0200 | [diff] [blame] | 483 | return 0; |
Cornelia Huck | f5ba6c8 | 2007-04-27 16:01:30 +0200 | [diff] [blame] | 484 | } |
| 485 | |
| 486 | static ssize_t online_store (struct device *dev, struct device_attribute *attr, |
| 487 | const char *buf, size_t count) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 488 | { |
| 489 | struct ccw_device *cdev = to_ccwdev(dev); |
Cornelia Huck | 2f97220 | 2008-04-30 13:38:33 +0200 | [diff] [blame] | 490 | int force, ret; |
| 491 | unsigned long i; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 492 | |
Peter Oberparleiter | a2fc848 | 2011-04-04 09:43:32 +0200 | [diff] [blame] | 493 | /* Prevent conflict between multiple on-/offline processing requests. */ |
Peter Oberparleiter | 350e912 | 2009-12-07 12:51:28 +0100 | [diff] [blame] | 494 | if (atomic_cmpxchg(&cdev->private->onoff, 0, 1) != 0) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 495 | return -EAGAIN; |
Peter Oberparleiter | a2fc848 | 2011-04-04 09:43:32 +0200 | [diff] [blame] | 496 | /* Prevent conflict between internal I/Os and on-/offline processing. */ |
| 497 | if (!dev_fsm_final_state(cdev) && |
| 498 | cdev->private->state != DEV_STATE_DISCONNECTED) { |
| 499 | ret = -EAGAIN; |
Sebastian Ott | 00381ee | 2013-12-16 10:54:13 +0100 | [diff] [blame] | 500 | goto out; |
Peter Oberparleiter | a2fc848 | 2011-04-04 09:43:32 +0200 | [diff] [blame] | 501 | } |
| 502 | /* Prevent conflict between pending work and on-/offline processing.*/ |
| 503 | if (work_pending(&cdev->private->todo_work)) { |
| 504 | ret = -EAGAIN; |
Sebastian Ott | 00381ee | 2013-12-16 10:54:13 +0100 | [diff] [blame] | 505 | goto out; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 506 | } |
| 507 | if (!strncmp(buf, "force\n", count)) { |
| 508 | force = 1; |
| 509 | i = 1; |
Cornelia Huck | 2f97220 | 2008-04-30 13:38:33 +0200 | [diff] [blame] | 510 | ret = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 511 | } else { |
| 512 | force = 0; |
Jingoo Han | 0178722 | 2013-07-22 10:18:15 +0900 | [diff] [blame] | 513 | ret = kstrtoul(buf, 16, &i); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 514 | } |
Cornelia Huck | 2f97220 | 2008-04-30 13:38:33 +0200 | [diff] [blame] | 515 | if (ret) |
| 516 | goto out; |
Sebastian Ott | 00381ee | 2013-12-16 10:54:13 +0100 | [diff] [blame] | 517 | |
| 518 | device_lock(dev); |
Cornelia Huck | f5ba6c8 | 2007-04-27 16:01:30 +0200 | [diff] [blame] | 519 | switch (i) { |
| 520 | case 0: |
Sebastian Ott | e74fe0c | 2009-03-26 15:24:08 +0100 | [diff] [blame] | 521 | ret = online_store_handle_offline(cdev); |
Cornelia Huck | f5ba6c8 | 2007-04-27 16:01:30 +0200 | [diff] [blame] | 522 | break; |
| 523 | case 1: |
Michael Ernst | c78aa6c | 2008-07-14 09:59:22 +0200 | [diff] [blame] | 524 | ret = online_store_handle_online(cdev, force); |
Cornelia Huck | f5ba6c8 | 2007-04-27 16:01:30 +0200 | [diff] [blame] | 525 | break; |
| 526 | default: |
Cornelia Huck | 2f97220 | 2008-04-30 13:38:33 +0200 | [diff] [blame] | 527 | ret = -EINVAL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 528 | } |
Sebastian Ott | 00381ee | 2013-12-16 10:54:13 +0100 | [diff] [blame] | 529 | device_unlock(dev); |
| 530 | |
Cornelia Huck | 2f97220 | 2008-04-30 13:38:33 +0200 | [diff] [blame] | 531 | out: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 532 | atomic_set(&cdev->private->onoff, 0); |
Sebastian Ott | e74fe0c | 2009-03-26 15:24:08 +0100 | [diff] [blame] | 533 | return (ret < 0) ? ret : count; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 534 | } |
| 535 | |
| 536 | static ssize_t |
Yani Ioannou | 3fd3c0a | 2005-05-17 06:43:27 -0400 | [diff] [blame] | 537 | available_show (struct device *dev, struct device_attribute *attr, char *buf) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 538 | { |
| 539 | struct ccw_device *cdev = to_ccwdev(dev); |
| 540 | struct subchannel *sch; |
| 541 | |
Cornelia Huck | d7b5a4c9 | 2006-12-08 15:54:28 +0100 | [diff] [blame] | 542 | if (ccw_device_is_orphan(cdev)) |
| 543 | return sprintf(buf, "no device\n"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 544 | switch (cdev->private->state) { |
| 545 | case DEV_STATE_BOXED: |
| 546 | return sprintf(buf, "boxed\n"); |
| 547 | case DEV_STATE_DISCONNECTED: |
| 548 | case DEV_STATE_DISCONNECTED_SENSE_ID: |
| 549 | case DEV_STATE_NOT_OPER: |
| 550 | sch = to_subchannel(dev->parent); |
| 551 | if (!sch->lpm) |
| 552 | return sprintf(buf, "no path\n"); |
| 553 | else |
| 554 | return sprintf(buf, "no device\n"); |
| 555 | default: |
| 556 | /* All other states considered fine. */ |
| 557 | return sprintf(buf, "good\n"); |
| 558 | } |
| 559 | } |
| 560 | |
Michael Ernst | fd0457a | 2010-08-09 18:12:50 +0200 | [diff] [blame] | 561 | static ssize_t |
| 562 | initiate_logging(struct device *dev, struct device_attribute *attr, |
| 563 | const char *buf, size_t count) |
| 564 | { |
| 565 | struct subchannel *sch = to_subchannel(dev); |
| 566 | int rc; |
| 567 | |
| 568 | rc = chsc_siosl(sch->schid); |
| 569 | if (rc < 0) { |
Joe Perches | baebc70 | 2016-03-03 20:49:57 -0800 | [diff] [blame] | 570 | pr_warn("Logging for subchannel 0.%x.%04x failed with errno=%d\n", |
| 571 | sch->schid.ssid, sch->schid.sch_no, rc); |
Michael Ernst | fd0457a | 2010-08-09 18:12:50 +0200 | [diff] [blame] | 572 | return rc; |
| 573 | } |
| 574 | pr_notice("Logging for subchannel 0.%x.%04x was triggered\n", |
| 575 | sch->schid.ssid, sch->schid.sch_no); |
| 576 | return count; |
| 577 | } |
| 578 | |
Sebastian Ott | 84c57ad | 2013-01-28 19:32:27 +0100 | [diff] [blame] | 579 | static ssize_t vpm_show(struct device *dev, struct device_attribute *attr, |
| 580 | char *buf) |
| 581 | { |
| 582 | struct subchannel *sch = to_subchannel(dev); |
| 583 | |
| 584 | return sprintf(buf, "%02x\n", sch->vpm); |
| 585 | } |
| 586 | |
Joe Perches | c828a89 | 2017-12-19 10:15:08 -0800 | [diff] [blame] | 587 | static DEVICE_ATTR_RO(devtype); |
| 588 | static DEVICE_ATTR_RO(cutype); |
| 589 | static DEVICE_ATTR_RO(modalias); |
Joe Perches | b6b996b | 2017-12-19 10:15:07 -0800 | [diff] [blame] | 590 | static DEVICE_ATTR_RW(online); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 591 | static DEVICE_ATTR(availability, 0444, available_show, NULL); |
Michael Ernst | fd0457a | 2010-08-09 18:12:50 +0200 | [diff] [blame] | 592 | static DEVICE_ATTR(logging, 0200, NULL, initiate_logging); |
Joe Perches | c828a89 | 2017-12-19 10:15:08 -0800 | [diff] [blame] | 593 | static DEVICE_ATTR_RO(vpm); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 594 | |
Cornelia Huck | 7e9db9e | 2008-07-14 09:58:44 +0200 | [diff] [blame] | 595 | static struct attribute *io_subchannel_attrs[] = { |
Michael Ernst | fd0457a | 2010-08-09 18:12:50 +0200 | [diff] [blame] | 596 | &dev_attr_logging.attr, |
Sebastian Ott | 84c57ad | 2013-01-28 19:32:27 +0100 | [diff] [blame] | 597 | &dev_attr_vpm.attr, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 598 | NULL, |
| 599 | }; |
| 600 | |
Arvind Yadav | f460d11 | 2017-07-19 12:39:13 +0530 | [diff] [blame] | 601 | static const struct attribute_group io_subchannel_attr_group = { |
Cornelia Huck | 7e9db9e | 2008-07-14 09:58:44 +0200 | [diff] [blame] | 602 | .attrs = io_subchannel_attrs, |
Cornelia Huck | 529192f | 2006-12-08 15:55:57 +0100 | [diff] [blame] | 603 | }; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 604 | |
| 605 | static struct attribute * ccwdev_attrs[] = { |
| 606 | &dev_attr_devtype.attr, |
| 607 | &dev_attr_cutype.attr, |
Bastian Blank | f1fc78a | 2005-10-30 15:00:12 -0800 | [diff] [blame] | 608 | &dev_attr_modalias.attr, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 609 | &dev_attr_online.attr, |
| 610 | &dev_attr_cmb_enable.attr, |
| 611 | &dev_attr_availability.attr, |
| 612 | NULL, |
| 613 | }; |
| 614 | |
Arvind Yadav | f460d11 | 2017-07-19 12:39:13 +0530 | [diff] [blame] | 615 | static const struct attribute_group ccwdev_attr_group = { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 616 | .attrs = ccwdev_attrs, |
| 617 | }; |
| 618 | |
David Brownell | a4dbd67 | 2009-06-24 10:06:31 -0700 | [diff] [blame] | 619 | static const struct attribute_group *ccwdev_attr_groups[] = { |
Cornelia Huck | ef99516 | 2007-04-27 16:01:39 +0200 | [diff] [blame] | 620 | &ccwdev_attr_group, |
| 621 | NULL, |
| 622 | }; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 623 | |
Suzuki K Poulose | 418e3ea | 2019-06-14 18:53:59 +0100 | [diff] [blame] | 624 | static int match_dev_id(struct device *dev, const void *data) |
Cornelia Huck | b0744bd | 2005-06-25 14:55:27 -0700 | [diff] [blame] | 625 | { |
Peter Oberparleiter | 5d6e6b6 | 2009-12-07 12:51:17 +0100 | [diff] [blame] | 626 | struct ccw_device *cdev = to_ccwdev(dev); |
Suzuki K Poulose | 418e3ea | 2019-06-14 18:53:59 +0100 | [diff] [blame] | 627 | struct ccw_dev_id *dev_id = (void *)data; |
Cornelia Huck | b0744bd | 2005-06-25 14:55:27 -0700 | [diff] [blame] | 628 | |
Cornelia Huck | d7b5a4c9 | 2006-12-08 15:54:28 +0100 | [diff] [blame] | 629 | return ccw_dev_id_is_equal(&cdev->private->dev_id, dev_id); |
| 630 | } |
| 631 | |
Sebastian Ott | b7a610f | 2012-05-15 17:52:07 +0200 | [diff] [blame] | 632 | /** |
| 633 | * get_ccwdev_by_dev_id() - obtain device from a ccw device id |
| 634 | * @dev_id: id of the device to be searched |
| 635 | * |
| 636 | * This function searches all devices attached to the ccw bus for a device |
| 637 | * matching @dev_id. |
| 638 | * Returns: |
| 639 | * If a device is found its reference count is increased and returned; |
| 640 | * else %NULL is returned. |
| 641 | */ |
| 642 | struct ccw_device *get_ccwdev_by_dev_id(struct ccw_dev_id *dev_id) |
Cornelia Huck | d7b5a4c9 | 2006-12-08 15:54:28 +0100 | [diff] [blame] | 643 | { |
| 644 | struct device *dev; |
| 645 | |
Peter Oberparleiter | 5d6e6b6 | 2009-12-07 12:51:17 +0100 | [diff] [blame] | 646 | dev = bus_find_device(&ccw_bus_type, NULL, dev_id, match_dev_id); |
Cornelia Huck | d7b5a4c9 | 2006-12-08 15:54:28 +0100 | [diff] [blame] | 647 | |
| 648 | return dev ? to_ccwdev(dev) : NULL; |
| 649 | } |
Sebastian Ott | b7a610f | 2012-05-15 17:52:07 +0200 | [diff] [blame] | 650 | EXPORT_SYMBOL_GPL(get_ccwdev_by_dev_id); |
Cornelia Huck | d7b5a4c9 | 2006-12-08 15:54:28 +0100 | [diff] [blame] | 651 | |
Peter Oberparleiter | 37de53b | 2009-12-07 12:51:19 +0100 | [diff] [blame] | 652 | static void ccw_device_do_unbind_bind(struct ccw_device *cdev) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 653 | { |
Cornelia Huck | eb32ae8 | 2009-03-26 15:24:05 +0100 | [diff] [blame] | 654 | int ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 655 | |
Sebastian Ott | 7d253b9 | 2009-12-07 12:51:33 +0100 | [diff] [blame] | 656 | if (device_is_registered(&cdev->dev)) { |
Cornelia Huck | eb32ae8 | 2009-03-26 15:24:05 +0100 | [diff] [blame] | 657 | device_release_driver(&cdev->dev); |
| 658 | ret = device_attach(&cdev->dev); |
| 659 | WARN_ON(ret == -ENODEV); |
| 660 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 661 | } |
| 662 | |
| 663 | static void |
| 664 | ccw_device_release(struct device *dev) |
| 665 | { |
| 666 | struct ccw_device *cdev; |
| 667 | |
| 668 | cdev = to_ccwdev(dev); |
Halil Pasic | 37db898 | 2019-03-26 12:41:09 +0100 | [diff] [blame] | 669 | cio_gp_dma_free(cdev->private->dma_pool, cdev->private->dma_area, |
| 670 | sizeof(*cdev->private->dma_area)); |
| 671 | cio_gp_dma_destroy(cdev->private->dma_pool, &cdev->dev); |
Cornelia Huck | 6eff208 | 2008-12-25 13:39:07 +0100 | [diff] [blame] | 672 | /* Release reference of parent subchannel. */ |
| 673 | put_device(cdev->dev.parent); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 674 | kfree(cdev->private); |
| 675 | kfree(cdev); |
| 676 | } |
| 677 | |
Cornelia Huck | 7674da7 | 2006-12-08 15:54:21 +0100 | [diff] [blame] | 678 | static struct ccw_device * io_subchannel_allocate_dev(struct subchannel *sch) |
| 679 | { |
| 680 | struct ccw_device *cdev; |
Halil Pasic | 37db898 | 2019-03-26 12:41:09 +0100 | [diff] [blame] | 681 | struct gen_pool *dma_pool; |
Julian Wiedmann | 4520a91 | 2020-12-09 11:24:13 +0100 | [diff] [blame] | 682 | int ret; |
Cornelia Huck | 7674da7 | 2006-12-08 15:54:21 +0100 | [diff] [blame] | 683 | |
| 684 | cdev = kzalloc(sizeof(*cdev), GFP_KERNEL); |
Julian Wiedmann | 4520a91 | 2020-12-09 11:24:13 +0100 | [diff] [blame] | 685 | if (!cdev) { |
| 686 | ret = -ENOMEM; |
Halil Pasic | 37db898 | 2019-03-26 12:41:09 +0100 | [diff] [blame] | 687 | goto err_cdev; |
Julian Wiedmann | 4520a91 | 2020-12-09 11:24:13 +0100 | [diff] [blame] | 688 | } |
Halil Pasic | 37db898 | 2019-03-26 12:41:09 +0100 | [diff] [blame] | 689 | cdev->private = kzalloc(sizeof(struct ccw_device_private), |
| 690 | GFP_KERNEL | GFP_DMA); |
Julian Wiedmann | 4520a91 | 2020-12-09 11:24:13 +0100 | [diff] [blame] | 691 | if (!cdev->private) { |
| 692 | ret = -ENOMEM; |
Halil Pasic | 37db898 | 2019-03-26 12:41:09 +0100 | [diff] [blame] | 693 | goto err_priv; |
Julian Wiedmann | 4520a91 | 2020-12-09 11:24:13 +0100 | [diff] [blame] | 694 | } |
| 695 | |
Halil Pasic | 05668e1 | 2019-09-30 17:38:02 +0200 | [diff] [blame] | 696 | cdev->dev.dma_mask = sch->dev.dma_mask; |
Julian Wiedmann | 4520a91 | 2020-12-09 11:24:13 +0100 | [diff] [blame] | 697 | ret = dma_set_coherent_mask(&cdev->dev, sch->dev.coherent_dma_mask); |
| 698 | if (ret) |
| 699 | goto err_coherent_mask; |
| 700 | |
Halil Pasic | 37db898 | 2019-03-26 12:41:09 +0100 | [diff] [blame] | 701 | dma_pool = cio_gp_dma_create(&cdev->dev, 1); |
Julian Wiedmann | 4520a91 | 2020-12-09 11:24:13 +0100 | [diff] [blame] | 702 | if (!dma_pool) { |
| 703 | ret = -ENOMEM; |
Halil Pasic | 37db898 | 2019-03-26 12:41:09 +0100 | [diff] [blame] | 704 | goto err_dma_pool; |
Julian Wiedmann | 4520a91 | 2020-12-09 11:24:13 +0100 | [diff] [blame] | 705 | } |
Halil Pasic | 37db898 | 2019-03-26 12:41:09 +0100 | [diff] [blame] | 706 | cdev->private->dma_pool = dma_pool; |
| 707 | cdev->private->dma_area = cio_gp_dma_zalloc(dma_pool, &cdev->dev, |
| 708 | sizeof(*cdev->private->dma_area)); |
Julian Wiedmann | 4520a91 | 2020-12-09 11:24:13 +0100 | [diff] [blame] | 709 | if (!cdev->private->dma_area) { |
| 710 | ret = -ENOMEM; |
Halil Pasic | 37db898 | 2019-03-26 12:41:09 +0100 | [diff] [blame] | 711 | goto err_dma_area; |
Julian Wiedmann | 4520a91 | 2020-12-09 11:24:13 +0100 | [diff] [blame] | 712 | } |
Halil Pasic | 37db898 | 2019-03-26 12:41:09 +0100 | [diff] [blame] | 713 | return cdev; |
| 714 | err_dma_area: |
| 715 | cio_gp_dma_destroy(dma_pool, &cdev->dev); |
| 716 | err_dma_pool: |
Julian Wiedmann | 4520a91 | 2020-12-09 11:24:13 +0100 | [diff] [blame] | 717 | err_coherent_mask: |
Halil Pasic | 37db898 | 2019-03-26 12:41:09 +0100 | [diff] [blame] | 718 | kfree(cdev->private); |
| 719 | err_priv: |
Cornelia Huck | 7674da7 | 2006-12-08 15:54:21 +0100 | [diff] [blame] | 720 | kfree(cdev); |
Halil Pasic | 37db898 | 2019-03-26 12:41:09 +0100 | [diff] [blame] | 721 | err_cdev: |
Julian Wiedmann | 4520a91 | 2020-12-09 11:24:13 +0100 | [diff] [blame] | 722 | return ERR_PTR(ret); |
Cornelia Huck | 7674da7 | 2006-12-08 15:54:21 +0100 | [diff] [blame] | 723 | } |
| 724 | |
Peter Oberparleiter | 37de53b | 2009-12-07 12:51:19 +0100 | [diff] [blame] | 725 | static void ccw_device_todo(struct work_struct *work); |
| 726 | |
Cornelia Huck | 7674da7 | 2006-12-08 15:54:21 +0100 | [diff] [blame] | 727 | static int io_subchannel_initialize_dev(struct subchannel *sch, |
| 728 | struct ccw_device *cdev) |
| 729 | { |
Sebastian Ott | 2c3e7e1 | 2014-06-11 13:06:57 +0200 | [diff] [blame] | 730 | struct ccw_device_private *priv = cdev->private; |
| 731 | int ret; |
| 732 | |
| 733 | priv->cdev = cdev; |
| 734 | priv->int_class = IRQIO_CIO; |
| 735 | priv->state = DEV_STATE_NOT_OPER; |
| 736 | priv->dev_id.devno = sch->schib.pmcw.dev; |
| 737 | priv->dev_id.ssid = sch->schid.ssid; |
Sebastian Ott | 2c3e7e1 | 2014-06-11 13:06:57 +0200 | [diff] [blame] | 738 | |
| 739 | INIT_WORK(&priv->todo_work, ccw_device_todo); |
| 740 | INIT_LIST_HEAD(&priv->cmb_list); |
| 741 | init_waitqueue_head(&priv->wait_q); |
Kees Cook | 846d0c6 | 2017-10-16 16:43:25 -0700 | [diff] [blame] | 742 | timer_setup(&priv->timer, ccw_device_timeout, 0); |
Sebastian Ott | 2c3e7e1 | 2014-06-11 13:06:57 +0200 | [diff] [blame] | 743 | |
| 744 | atomic_set(&priv->onoff, 0); |
| 745 | cdev->ccwlock = sch->lock; |
Cornelia Huck | 7674da7 | 2006-12-08 15:54:21 +0100 | [diff] [blame] | 746 | cdev->dev.parent = &sch->dev; |
| 747 | cdev->dev.release = ccw_device_release; |
Julian Wiedmann | 29c53de | 2020-11-30 10:19:57 +0200 | [diff] [blame] | 748 | cdev->dev.bus = &ccw_bus_type; |
Cornelia Huck | ef99516 | 2007-04-27 16:01:39 +0200 | [diff] [blame] | 749 | cdev->dev.groups = ccwdev_attr_groups; |
Cornelia Huck | 7674da7 | 2006-12-08 15:54:21 +0100 | [diff] [blame] | 750 | /* Do first half of device_register. */ |
| 751 | device_initialize(&cdev->dev); |
Sebastian Ott | 2c3e7e1 | 2014-06-11 13:06:57 +0200 | [diff] [blame] | 752 | ret = dev_set_name(&cdev->dev, "0.%x.%04x", cdev->private->dev_id.ssid, |
| 753 | cdev->private->dev_id.devno); |
| 754 | if (ret) |
| 755 | goto out_put; |
Cornelia Huck | 7674da7 | 2006-12-08 15:54:21 +0100 | [diff] [blame] | 756 | if (!get_device(&sch->dev)) { |
Sebastian Ott | 2c3e7e1 | 2014-06-11 13:06:57 +0200 | [diff] [blame] | 757 | ret = -ENODEV; |
| 758 | goto out_put; |
Cornelia Huck | 7674da7 | 2006-12-08 15:54:21 +0100 | [diff] [blame] | 759 | } |
Sebastian Ott | 2c3e7e1 | 2014-06-11 13:06:57 +0200 | [diff] [blame] | 760 | priv->flags.initialized = 1; |
| 761 | spin_lock_irq(sch->lock); |
| 762 | sch_set_cdev(sch, cdev); |
| 763 | spin_unlock_irq(sch->lock); |
Cornelia Huck | 7674da7 | 2006-12-08 15:54:21 +0100 | [diff] [blame] | 764 | return 0; |
Sebastian Ott | 2c3e7e1 | 2014-06-11 13:06:57 +0200 | [diff] [blame] | 765 | |
| 766 | out_put: |
| 767 | /* Release reference from device_initialize(). */ |
| 768 | put_device(&cdev->dev); |
| 769 | return ret; |
Cornelia Huck | 7674da7 | 2006-12-08 15:54:21 +0100 | [diff] [blame] | 770 | } |
| 771 | |
| 772 | static struct ccw_device * io_subchannel_create_ccwdev(struct subchannel *sch) |
| 773 | { |
| 774 | struct ccw_device *cdev; |
| 775 | int ret; |
| 776 | |
| 777 | cdev = io_subchannel_allocate_dev(sch); |
| 778 | if (!IS_ERR(cdev)) { |
| 779 | ret = io_subchannel_initialize_dev(sch, cdev); |
Sebastian Ott | 06739a8 | 2009-08-23 18:09:04 +0200 | [diff] [blame] | 780 | if (ret) |
Cornelia Huck | 7674da7 | 2006-12-08 15:54:21 +0100 | [diff] [blame] | 781 | cdev = ERR_PTR(ret); |
Cornelia Huck | 7674da7 | 2006-12-08 15:54:21 +0100 | [diff] [blame] | 782 | } |
| 783 | return cdev; |
| 784 | } |
| 785 | |
Peter Oberparleiter | 736b5db | 2009-12-07 12:51:21 +0100 | [diff] [blame] | 786 | static void io_subchannel_recog(struct ccw_device *, struct subchannel *); |
Cornelia Huck | d7b5a4c9 | 2006-12-08 15:54:28 +0100 | [diff] [blame] | 787 | |
Cornelia Huck | d7b5a4c9 | 2006-12-08 15:54:28 +0100 | [diff] [blame] | 788 | static void sch_create_and_recog_new_device(struct subchannel *sch) |
| 789 | { |
| 790 | struct ccw_device *cdev; |
| 791 | |
| 792 | /* Need to allocate a new ccw device. */ |
| 793 | cdev = io_subchannel_create_ccwdev(sch); |
| 794 | if (IS_ERR(cdev)) { |
| 795 | /* OK, we did everything we could... */ |
| 796 | css_sch_device_unregister(sch); |
| 797 | return; |
| 798 | } |
Cornelia Huck | d7b5a4c9 | 2006-12-08 15:54:28 +0100 | [diff] [blame] | 799 | /* Start recognition for the new ccw device. */ |
Peter Oberparleiter | 736b5db | 2009-12-07 12:51:21 +0100 | [diff] [blame] | 800 | io_subchannel_recog(cdev, sch); |
Cornelia Huck | d7b5a4c9 | 2006-12-08 15:54:28 +0100 | [diff] [blame] | 801 | } |
| 802 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 803 | /* |
| 804 | * Register recognized device. |
| 805 | */ |
Peter Oberparleiter | 37de53b | 2009-12-07 12:51:19 +0100 | [diff] [blame] | 806 | static void io_subchannel_register(struct ccw_device *cdev) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 807 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 808 | struct subchannel *sch; |
Sebastian Ott | a290156 | 2010-03-08 12:25:17 +0100 | [diff] [blame] | 809 | int ret, adjust_init_count = 1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 810 | unsigned long flags; |
| 811 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 812 | sch = to_subchannel(cdev->dev.parent); |
Cornelia Huck | 5fb6b85 | 2008-12-25 13:39:08 +0100 | [diff] [blame] | 813 | /* |
| 814 | * Check if subchannel is still registered. It may have become |
| 815 | * unregistered if a machine check hit us after finishing |
| 816 | * device recognition but before the register work could be |
| 817 | * queued. |
| 818 | */ |
| 819 | if (!device_is_registered(&sch->dev)) |
| 820 | goto out_err; |
Cornelia Huck | 82b7ac0 | 2007-04-27 16:01:36 +0200 | [diff] [blame] | 821 | css_update_ssd_info(sch); |
Cornelia Huck | 47af551 | 2006-12-04 15:41:07 +0100 | [diff] [blame] | 822 | /* |
| 823 | * io_subchannel_register() will also be called after device |
| 824 | * recognition has been done for a boxed device (which will already |
| 825 | * be registered). We need to reprobe since we may now have sense id |
| 826 | * information. |
| 827 | */ |
Cornelia Huck | d6a3076 | 2008-12-25 13:39:11 +0100 | [diff] [blame] | 828 | if (device_is_registered(&cdev->dev)) { |
Cornelia Huck | 47af551 | 2006-12-04 15:41:07 +0100 | [diff] [blame] | 829 | if (!cdev->drv) { |
| 830 | ret = device_reprobe(&cdev->dev); |
| 831 | if (ret) |
| 832 | /* We can't do much here. */ |
Michael Ernst | 139b83dd | 2008-05-07 09:22:54 +0200 | [diff] [blame] | 833 | CIO_MSG_EVENT(0, "device_reprobe() returned" |
Cornelia Huck | e556bbb | 2007-07-27 12:29:19 +0200 | [diff] [blame] | 834 | " %d for 0.%x.%04x\n", ret, |
| 835 | cdev->private->dev_id.ssid, |
| 836 | cdev->private->dev_id.devno); |
Cornelia Huck | 47af551 | 2006-12-04 15:41:07 +0100 | [diff] [blame] | 837 | } |
Sebastian Ott | a290156 | 2010-03-08 12:25:17 +0100 | [diff] [blame] | 838 | adjust_init_count = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 839 | goto out; |
| 840 | } |
| 841 | /* make it known to the system */ |
Julian Wiedmann | 29c53de | 2020-11-30 10:19:57 +0200 | [diff] [blame] | 842 | ret = device_add(&cdev->dev); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 843 | if (ret) { |
Cornelia Huck | e556bbb | 2007-07-27 12:29:19 +0200 | [diff] [blame] | 844 | CIO_MSG_EVENT(0, "Could not register ccw dev 0.%x.%04x: %d\n", |
| 845 | cdev->private->dev_id.ssid, |
| 846 | cdev->private->dev_id.devno, ret); |
Cornelia Huck | 2ec2298 | 2006-12-08 15:54:26 +0100 | [diff] [blame] | 847 | spin_lock_irqsave(sch->lock, flags); |
Cornelia Huck | db6a642 | 2008-01-26 14:10:46 +0100 | [diff] [blame] | 848 | sch_set_cdev(sch, NULL); |
Cornelia Huck | 2ec2298 | 2006-12-08 15:54:26 +0100 | [diff] [blame] | 849 | spin_unlock_irqrestore(sch->lock, flags); |
Cornelia Huck | 6eff208 | 2008-12-25 13:39:07 +0100 | [diff] [blame] | 850 | /* Release initial device reference. */ |
| 851 | put_device(&cdev->dev); |
Cornelia Huck | 5fb6b85 | 2008-12-25 13:39:08 +0100 | [diff] [blame] | 852 | goto out_err; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 853 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 854 | out: |
| 855 | cdev->private->flags.recog_done = 1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 856 | wake_up(&cdev->private->wait_q); |
Cornelia Huck | 5fb6b85 | 2008-12-25 13:39:08 +0100 | [diff] [blame] | 857 | out_err: |
Sebastian Ott | a290156 | 2010-03-08 12:25:17 +0100 | [diff] [blame] | 858 | if (adjust_init_count && atomic_dec_and_test(&ccw_device_init_count)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 859 | wake_up(&ccw_device_init_wq); |
| 860 | } |
| 861 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 862 | /* |
| 863 | * subchannel recognition done. Called from the state machine. |
| 864 | */ |
| 865 | void |
| 866 | io_subchannel_recog_done(struct ccw_device *cdev) |
| 867 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 868 | if (css_init_done == 0) { |
| 869 | cdev->private->flags.recog_done = 1; |
| 870 | return; |
| 871 | } |
| 872 | switch (cdev->private->state) { |
Sebastian Ott | 47593bf | 2009-03-31 19:16:05 +0200 | [diff] [blame] | 873 | case DEV_STATE_BOXED: |
| 874 | /* Device did not respond in time. */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 875 | case DEV_STATE_NOT_OPER: |
| 876 | cdev->private->flags.recog_done = 1; |
Peter Oberparleiter | 37de53b | 2009-12-07 12:51:19 +0100 | [diff] [blame] | 877 | /* Remove device found not operational. */ |
| 878 | ccw_device_sched_todo(cdev, CDEV_TODO_UNREG); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 879 | if (atomic_dec_and_test(&ccw_device_init_count)) |
| 880 | wake_up(&ccw_device_init_wq); |
| 881 | break; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 882 | case DEV_STATE_OFFLINE: |
Halil Pasic | 37db898 | 2019-03-26 12:41:09 +0100 | [diff] [blame] | 883 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 884 | * We can't register the device in interrupt context so |
| 885 | * we schedule a work item. |
| 886 | */ |
Peter Oberparleiter | 37de53b | 2009-12-07 12:51:19 +0100 | [diff] [blame] | 887 | ccw_device_sched_todo(cdev, CDEV_TODO_REGISTER); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 888 | break; |
| 889 | } |
| 890 | } |
| 891 | |
Peter Oberparleiter | 736b5db | 2009-12-07 12:51:21 +0100 | [diff] [blame] | 892 | static void io_subchannel_recog(struct ccw_device *cdev, struct subchannel *sch) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 893 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 894 | /* Increase counter of devices currently in recognition. */ |
| 895 | atomic_inc(&ccw_device_init_count); |
| 896 | |
| 897 | /* Start async. device sensing. */ |
Cornelia Huck | 2ec2298 | 2006-12-08 15:54:26 +0100 | [diff] [blame] | 898 | spin_lock_irq(sch->lock); |
Peter Oberparleiter | 736b5db | 2009-12-07 12:51:21 +0100 | [diff] [blame] | 899 | ccw_device_recognition(cdev); |
Cornelia Huck | 2ec2298 | 2006-12-08 15:54:26 +0100 | [diff] [blame] | 900 | spin_unlock_irq(sch->lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 901 | } |
| 902 | |
Peter Oberparleiter | 5d6e6b6 | 2009-12-07 12:51:17 +0100 | [diff] [blame] | 903 | static int ccw_device_move_to_sch(struct ccw_device *cdev, |
| 904 | struct subchannel *sch) |
Cornelia Huck | d7b5a4c9 | 2006-12-08 15:54:28 +0100 | [diff] [blame] | 905 | { |
Peter Oberparleiter | 5d6e6b6 | 2009-12-07 12:51:17 +0100 | [diff] [blame] | 906 | struct subchannel *old_sch; |
Sebastian Ott | 0c609fc | 2009-12-07 12:51:37 +0100 | [diff] [blame] | 907 | int rc, old_enabled = 0; |
Cornelia Huck | d7b5a4c9 | 2006-12-08 15:54:28 +0100 | [diff] [blame] | 908 | |
Peter Oberparleiter | 5d6e6b6 | 2009-12-07 12:51:17 +0100 | [diff] [blame] | 909 | old_sch = to_subchannel(cdev->dev.parent); |
| 910 | /* Obtain child reference for new parent. */ |
Cornelia Huck | 6eff208 | 2008-12-25 13:39:07 +0100 | [diff] [blame] | 911 | if (!get_device(&sch->dev)) |
Peter Oberparleiter | 5d6e6b6 | 2009-12-07 12:51:17 +0100 | [diff] [blame] | 912 | return -ENODEV; |
Sebastian Ott | 0c609fc | 2009-12-07 12:51:37 +0100 | [diff] [blame] | 913 | |
| 914 | if (!sch_is_pseudo_sch(old_sch)) { |
| 915 | spin_lock_irq(old_sch->lock); |
| 916 | old_enabled = old_sch->schib.pmcw.ena; |
| 917 | rc = 0; |
| 918 | if (old_enabled) |
| 919 | rc = cio_disable_subchannel(old_sch); |
| 920 | spin_unlock_irq(old_sch->lock); |
| 921 | if (rc == -EBUSY) { |
| 922 | /* Release child reference for new parent. */ |
| 923 | put_device(&sch->dev); |
| 924 | return rc; |
| 925 | } |
| 926 | } |
| 927 | |
Cornelia Huck | d7b5a4c9 | 2006-12-08 15:54:28 +0100 | [diff] [blame] | 928 | mutex_lock(&sch->reg_mutex); |
Cornelia Huck | ffa6a70 | 2009-03-04 12:44:00 +0100 | [diff] [blame] | 929 | rc = device_move(&cdev->dev, &sch->dev, DPM_ORDER_PARENT_BEFORE_DEV); |
Cornelia Huck | d7b5a4c9 | 2006-12-08 15:54:28 +0100 | [diff] [blame] | 930 | mutex_unlock(&sch->reg_mutex); |
| 931 | if (rc) { |
Peter Oberparleiter | 5d6e6b6 | 2009-12-07 12:51:17 +0100 | [diff] [blame] | 932 | CIO_MSG_EVENT(0, "device_move(0.%x.%04x,0.%x.%04x)=%d\n", |
Cornelia Huck | d7b5a4c9 | 2006-12-08 15:54:28 +0100 | [diff] [blame] | 933 | cdev->private->dev_id.ssid, |
| 934 | cdev->private->dev_id.devno, sch->schid.ssid, |
Peter Oberparleiter | 5d6e6b6 | 2009-12-07 12:51:17 +0100 | [diff] [blame] | 935 | sch->schib.pmcw.dev, rc); |
Sebastian Ott | 0c609fc | 2009-12-07 12:51:37 +0100 | [diff] [blame] | 936 | if (old_enabled) { |
| 937 | /* Try to reenable the old subchannel. */ |
| 938 | spin_lock_irq(old_sch->lock); |
| 939 | cio_enable_subchannel(old_sch, (u32)(addr_t)old_sch); |
| 940 | spin_unlock_irq(old_sch->lock); |
| 941 | } |
Peter Oberparleiter | 5d6e6b6 | 2009-12-07 12:51:17 +0100 | [diff] [blame] | 942 | /* Release child reference for new parent. */ |
Cornelia Huck | 6eff208 | 2008-12-25 13:39:07 +0100 | [diff] [blame] | 943 | put_device(&sch->dev); |
Peter Oberparleiter | 5d6e6b6 | 2009-12-07 12:51:17 +0100 | [diff] [blame] | 944 | return rc; |
Cornelia Huck | d7b5a4c9 | 2006-12-08 15:54:28 +0100 | [diff] [blame] | 945 | } |
Peter Oberparleiter | 5d6e6b6 | 2009-12-07 12:51:17 +0100 | [diff] [blame] | 946 | /* Clean up old subchannel. */ |
| 947 | if (!sch_is_pseudo_sch(old_sch)) { |
| 948 | spin_lock_irq(old_sch->lock); |
| 949 | sch_set_cdev(old_sch, NULL); |
Peter Oberparleiter | 5d6e6b6 | 2009-12-07 12:51:17 +0100 | [diff] [blame] | 950 | spin_unlock_irq(old_sch->lock); |
| 951 | css_schedule_eval(old_sch->schid); |
Cornelia Huck | d7b5a4c9 | 2006-12-08 15:54:28 +0100 | [diff] [blame] | 952 | } |
Peter Oberparleiter | 5d6e6b6 | 2009-12-07 12:51:17 +0100 | [diff] [blame] | 953 | /* Release child reference for old parent. */ |
| 954 | put_device(&old_sch->dev); |
| 955 | /* Initialize new subchannel. */ |
| 956 | spin_lock_irq(sch->lock); |
Peter Oberparleiter | 5d6e6b6 | 2009-12-07 12:51:17 +0100 | [diff] [blame] | 957 | cdev->ccwlock = sch->lock; |
| 958 | if (!sch_is_pseudo_sch(sch)) |
| 959 | sch_set_cdev(sch, cdev); |
| 960 | spin_unlock_irq(sch->lock); |
| 961 | if (!sch_is_pseudo_sch(sch)) |
| 962 | css_update_ssd_info(sch); |
| 963 | return 0; |
| 964 | } |
| 965 | |
| 966 | static int ccw_device_move_to_orph(struct ccw_device *cdev) |
| 967 | { |
| 968 | struct subchannel *sch = to_subchannel(cdev->dev.parent); |
| 969 | struct channel_subsystem *css = to_css(sch->dev.parent); |
| 970 | |
| 971 | return ccw_device_move_to_sch(cdev, css->pseudo_subchannel); |
Cornelia Huck | d7b5a4c9 | 2006-12-08 15:54:28 +0100 | [diff] [blame] | 972 | } |
| 973 | |
Cornelia Huck | 602b20f | 2008-01-26 14:10:39 +0100 | [diff] [blame] | 974 | static void io_subchannel_irq(struct subchannel *sch) |
| 975 | { |
| 976 | struct ccw_device *cdev; |
| 977 | |
Cornelia Huck | db6a642 | 2008-01-26 14:10:46 +0100 | [diff] [blame] | 978 | cdev = sch_get_cdev(sch); |
Cornelia Huck | 602b20f | 2008-01-26 14:10:39 +0100 | [diff] [blame] | 979 | |
Sebastian Ott | efd986d | 2009-09-11 10:28:18 +0200 | [diff] [blame] | 980 | CIO_TRACE_EVENT(6, "IRQ"); |
| 981 | CIO_TRACE_EVENT(6, dev_name(&sch->dev)); |
Cornelia Huck | 602b20f | 2008-01-26 14:10:39 +0100 | [diff] [blame] | 982 | if (cdev) |
| 983 | dev_fsm_event(cdev, DEV_EVENT_INTERRUPT); |
Peter Oberparleiter | de400d6 | 2011-10-30 15:16:04 +0100 | [diff] [blame] | 984 | else |
Heiko Carstens | 420f42e | 2013-01-02 15:18:18 +0100 | [diff] [blame] | 985 | inc_irq_stat(IRQIO_CIO); |
Cornelia Huck | 602b20f | 2008-01-26 14:10:39 +0100 | [diff] [blame] | 986 | } |
| 987 | |
Sebastian Ott | 13952ec | 2008-12-25 13:39:13 +0100 | [diff] [blame] | 988 | void io_subchannel_init_config(struct subchannel *sch) |
| 989 | { |
| 990 | memset(&sch->config, 0, sizeof(sch->config)); |
| 991 | sch->config.csense = 1; |
Sebastian Ott | 13952ec | 2008-12-25 13:39:13 +0100 | [diff] [blame] | 992 | } |
| 993 | |
Cornelia Huck | 0ae7a7b | 2008-07-14 09:58:43 +0200 | [diff] [blame] | 994 | static void io_subchannel_init_fields(struct subchannel *sch) |
| 995 | { |
| 996 | if (cio_is_console(sch->schid)) |
| 997 | sch->opm = 0xff; |
| 998 | else |
| 999 | sch->opm = chp_get_sch_opm(sch); |
| 1000 | sch->lpm = sch->schib.pmcw.pam & sch->opm; |
Cornelia Huck | 3a3fc29 | 2008-07-14 09:58:58 +0200 | [diff] [blame] | 1001 | sch->isc = cio_is_console(sch->schid) ? CONSOLE_ISC : IO_SCH_ISC; |
Cornelia Huck | 0ae7a7b | 2008-07-14 09:58:43 +0200 | [diff] [blame] | 1002 | |
| 1003 | CIO_MSG_EVENT(6, "Detected device %04x on subchannel 0.%x.%04X" |
| 1004 | " - PIM = %02X, PAM = %02X, POM = %02X\n", |
| 1005 | sch->schib.pmcw.dev, sch->schid.ssid, |
| 1006 | sch->schid.sch_no, sch->schib.pmcw.pim, |
| 1007 | sch->schib.pmcw.pam, sch->schib.pmcw.pom); |
Sebastian Ott | 13952ec | 2008-12-25 13:39:13 +0100 | [diff] [blame] | 1008 | |
| 1009 | io_subchannel_init_config(sch); |
Cornelia Huck | 0ae7a7b | 2008-07-14 09:58:43 +0200 | [diff] [blame] | 1010 | } |
| 1011 | |
Cornelia Huck | 90ed2b6 | 2008-12-25 13:39:09 +0100 | [diff] [blame] | 1012 | /* |
| 1013 | * Note: We always return 0 so that we bind to the device even on error. |
| 1014 | * This is needed so that our remove function is called on unregister. |
| 1015 | */ |
Cornelia Huck | 0ae7a7b | 2008-07-14 09:58:43 +0200 | [diff] [blame] | 1016 | static int io_subchannel_probe(struct subchannel *sch) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1017 | { |
Sebastian Ott | f92519e | 2011-03-15 17:08:27 +0100 | [diff] [blame] | 1018 | struct io_subchannel_private *io_priv; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1019 | struct ccw_device *cdev; |
| 1020 | int rc; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1021 | |
Peter Oberparleiter | 6d7c5af | 2009-10-14 12:43:50 +0200 | [diff] [blame] | 1022 | if (cio_is_console(sch->schid)) { |
Cornelia Huck | 7e9db9e | 2008-07-14 09:58:44 +0200 | [diff] [blame] | 1023 | rc = sysfs_create_group(&sch->dev.kobj, |
| 1024 | &io_subchannel_attr_group); |
| 1025 | if (rc) |
| 1026 | CIO_MSG_EVENT(0, "Failed to create io subchannel " |
| 1027 | "attributes for subchannel " |
| 1028 | "0.%x.%04x (rc=%d)\n", |
| 1029 | sch->schid.ssid, sch->schid.sch_no, rc); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1030 | /* |
Vineeth Vijayan | a84d1c5 | 2021-06-29 09:38:22 +0200 | [diff] [blame] | 1031 | * The console subchannel already has an associated ccw_device. |
| 1032 | * Register it and exit. |
| 1033 | */ |
Peter Oberparleiter | 6d7c5af | 2009-10-14 12:43:50 +0200 | [diff] [blame] | 1034 | cdev = sch_get_cdev(sch); |
Julian Wiedmann | 29c53de | 2020-11-30 10:19:57 +0200 | [diff] [blame] | 1035 | rc = device_add(&cdev->dev); |
Sebastian Ott | afdfed0 | 2013-04-13 13:03:03 +0200 | [diff] [blame] | 1036 | if (rc) { |
| 1037 | /* Release online reference. */ |
| 1038 | put_device(&cdev->dev); |
| 1039 | goto out_schedule; |
| 1040 | } |
Sebastian Ott | 0ad8f714a | 2013-04-13 13:06:27 +0200 | [diff] [blame] | 1041 | if (atomic_dec_and_test(&ccw_device_init_count)) |
| 1042 | wake_up(&ccw_device_init_wq); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1043 | return 0; |
| 1044 | } |
Cornelia Huck | 0ae7a7b | 2008-07-14 09:58:43 +0200 | [diff] [blame] | 1045 | io_subchannel_init_fields(sch); |
Sebastian Ott | f444cc0 | 2008-12-25 13:39:14 +0100 | [diff] [blame] | 1046 | rc = cio_commit_config(sch); |
| 1047 | if (rc) |
| 1048 | goto out_schedule; |
Cornelia Huck | 7e9db9e | 2008-07-14 09:58:44 +0200 | [diff] [blame] | 1049 | rc = sysfs_create_group(&sch->dev.kobj, |
| 1050 | &io_subchannel_attr_group); |
| 1051 | if (rc) |
Cornelia Huck | 90ed2b6 | 2008-12-25 13:39:09 +0100 | [diff] [blame] | 1052 | goto out_schedule; |
Cornelia Huck | cd6b4f2 | 2008-01-26 14:10:43 +0100 | [diff] [blame] | 1053 | /* Allocate I/O subchannel private data. */ |
Sebastian Ott | f92519e | 2011-03-15 17:08:27 +0100 | [diff] [blame] | 1054 | io_priv = kzalloc(sizeof(*io_priv), GFP_KERNEL | GFP_DMA); |
| 1055 | if (!io_priv) |
Peter Oberparleiter | 48e4c38 | 2009-12-07 12:51:15 +0100 | [diff] [blame] | 1056 | goto out_schedule; |
Sebastian Ott | f92519e | 2011-03-15 17:08:27 +0100 | [diff] [blame] | 1057 | |
Halil Pasic | 37db898 | 2019-03-26 12:41:09 +0100 | [diff] [blame] | 1058 | io_priv->dma_area = dma_alloc_coherent(&sch->dev, |
| 1059 | sizeof(*io_priv->dma_area), |
| 1060 | &io_priv->dma_area_dma, GFP_KERNEL); |
| 1061 | if (!io_priv->dma_area) { |
| 1062 | kfree(io_priv); |
| 1063 | goto out_schedule; |
| 1064 | } |
| 1065 | |
Sebastian Ott | f92519e | 2011-03-15 17:08:27 +0100 | [diff] [blame] | 1066 | set_io_private(sch, io_priv); |
Peter Oberparleiter | 5d6e6b6 | 2009-12-07 12:51:17 +0100 | [diff] [blame] | 1067 | css_schedule_eval(sch->schid); |
Cornelia Huck | 7e9db9e | 2008-07-14 09:58:44 +0200 | [diff] [blame] | 1068 | return 0; |
Peter Oberparleiter | 48e4c38 | 2009-12-07 12:51:15 +0100 | [diff] [blame] | 1069 | |
Cornelia Huck | 90ed2b6 | 2008-12-25 13:39:09 +0100 | [diff] [blame] | 1070 | out_schedule: |
Peter Oberparleiter | 390935a | 2009-12-07 12:51:18 +0100 | [diff] [blame] | 1071 | spin_lock_irq(sch->lock); |
| 1072 | css_sched_sch_todo(sch, SCH_TODO_UNREG); |
| 1073 | spin_unlock_irq(sch->lock); |
Cornelia Huck | 90ed2b6 | 2008-12-25 13:39:09 +0100 | [diff] [blame] | 1074 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1075 | } |
| 1076 | |
Uwe Kleine-König | a7bdb9a | 2021-07-13 21:35:19 +0200 | [diff] [blame] | 1077 | static void io_subchannel_remove(struct subchannel *sch) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1078 | { |
Sebastian Ott | f92519e | 2011-03-15 17:08:27 +0100 | [diff] [blame] | 1079 | struct io_subchannel_private *io_priv = to_io_private(sch); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1080 | struct ccw_device *cdev; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1081 | |
Cornelia Huck | db6a642 | 2008-01-26 14:10:46 +0100 | [diff] [blame] | 1082 | cdev = sch_get_cdev(sch); |
| 1083 | if (!cdev) |
Peter Oberparleiter | 48e4c38 | 2009-12-07 12:51:15 +0100 | [diff] [blame] | 1084 | goto out_free; |
Sebastian Ott | 135a8b4 | 2018-03-15 15:03:43 +0100 | [diff] [blame] | 1085 | |
| 1086 | ccw_device_unregister(cdev); |
| 1087 | spin_lock_irq(sch->lock); |
Cornelia Huck | db6a642 | 2008-01-26 14:10:46 +0100 | [diff] [blame] | 1088 | sch_set_cdev(sch, NULL); |
Sebastian Ott | f92519e | 2011-03-15 17:08:27 +0100 | [diff] [blame] | 1089 | set_io_private(sch, NULL); |
Sebastian Ott | 135a8b4 | 2018-03-15 15:03:43 +0100 | [diff] [blame] | 1090 | spin_unlock_irq(sch->lock); |
Peter Oberparleiter | 48e4c38 | 2009-12-07 12:51:15 +0100 | [diff] [blame] | 1091 | out_free: |
Halil Pasic | 37db898 | 2019-03-26 12:41:09 +0100 | [diff] [blame] | 1092 | dma_free_coherent(&sch->dev, sizeof(*io_priv->dma_area), |
| 1093 | io_priv->dma_area, io_priv->dma_area_dma); |
Sebastian Ott | f92519e | 2011-03-15 17:08:27 +0100 | [diff] [blame] | 1094 | kfree(io_priv); |
Cornelia Huck | 7e9db9e | 2008-07-14 09:58:44 +0200 | [diff] [blame] | 1095 | sysfs_remove_group(&sch->dev.kobj, &io_subchannel_attr_group); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1096 | } |
| 1097 | |
Cornelia Huck | 602b20f | 2008-01-26 14:10:39 +0100 | [diff] [blame] | 1098 | static void io_subchannel_verify(struct subchannel *sch) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1099 | { |
| 1100 | struct ccw_device *cdev; |
| 1101 | |
Cornelia Huck | db6a642 | 2008-01-26 14:10:46 +0100 | [diff] [blame] | 1102 | cdev = sch_get_cdev(sch); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1103 | if (cdev) |
| 1104 | dev_fsm_event(cdev, DEV_EVENT_VERIFY); |
| 1105 | } |
| 1106 | |
Cornelia Huck | c820de3 | 2008-07-14 09:58:45 +0200 | [diff] [blame] | 1107 | static void io_subchannel_terminate_path(struct subchannel *sch, u8 mask) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1108 | { |
| 1109 | struct ccw_device *cdev; |
| 1110 | |
Cornelia Huck | db6a642 | 2008-01-26 14:10:46 +0100 | [diff] [blame] | 1111 | cdev = sch_get_cdev(sch); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1112 | if (!cdev) |
| 1113 | return; |
Peter Oberparleiter | 4257aae | 2009-12-07 12:51:29 +0100 | [diff] [blame] | 1114 | if (cio_update_schib(sch)) |
| 1115 | goto err; |
| 1116 | /* Check for I/O on path. */ |
| 1117 | if (scsw_actl(&sch->schib.scsw) == 0 || sch->schib.pmcw.lpum != mask) |
| 1118 | goto out; |
| 1119 | if (cdev->private->state == DEV_STATE_ONLINE) { |
| 1120 | ccw_device_kill_io(cdev); |
| 1121 | goto out; |
| 1122 | } |
| 1123 | if (cio_clear(sch)) |
| 1124 | goto err; |
| 1125 | out: |
| 1126 | /* Trigger path verification. */ |
| 1127 | dev_fsm_event(cdev, DEV_EVENT_VERIFY); |
| 1128 | return; |
Cornelia Huck | c820de3 | 2008-07-14 09:58:45 +0200 | [diff] [blame] | 1129 | |
Peter Oberparleiter | 4257aae | 2009-12-07 12:51:29 +0100 | [diff] [blame] | 1130 | err: |
| 1131 | dev_fsm_event(cdev, DEV_EVENT_NOTOPER); |
Cornelia Huck | c820de3 | 2008-07-14 09:58:45 +0200 | [diff] [blame] | 1132 | } |
| 1133 | |
Cornelia Huck | 99611f8 | 2008-07-14 09:59:02 +0200 | [diff] [blame] | 1134 | static int io_subchannel_chp_event(struct subchannel *sch, |
| 1135 | struct chp_link *link, int event) |
Cornelia Huck | c820de3 | 2008-07-14 09:58:45 +0200 | [diff] [blame] | 1136 | { |
Sebastian Ott | 585b954 | 2010-10-25 16:10:34 +0200 | [diff] [blame] | 1137 | struct ccw_device *cdev = sch_get_cdev(sch); |
Vineeth Vijayan | 32ef938 | 2020-10-08 15:13:29 +0200 | [diff] [blame] | 1138 | int mask, chpid, valid_bit; |
| 1139 | int path_event[8]; |
Cornelia Huck | c820de3 | 2008-07-14 09:58:45 +0200 | [diff] [blame] | 1140 | |
Cornelia Huck | 99611f8 | 2008-07-14 09:59:02 +0200 | [diff] [blame] | 1141 | mask = chp_ssd_get_mask(&sch->ssd_info, link); |
Cornelia Huck | c820de3 | 2008-07-14 09:58:45 +0200 | [diff] [blame] | 1142 | if (!mask) |
| 1143 | return 0; |
| 1144 | switch (event) { |
| 1145 | case CHP_VARY_OFF: |
| 1146 | sch->opm &= ~mask; |
| 1147 | sch->lpm &= ~mask; |
Sebastian Ott | 585b954 | 2010-10-25 16:10:34 +0200 | [diff] [blame] | 1148 | if (cdev) |
| 1149 | cdev->private->path_gone_mask |= mask; |
Cornelia Huck | c820de3 | 2008-07-14 09:58:45 +0200 | [diff] [blame] | 1150 | io_subchannel_terminate_path(sch, mask); |
| 1151 | break; |
| 1152 | case CHP_VARY_ON: |
| 1153 | sch->opm |= mask; |
| 1154 | sch->lpm |= mask; |
Sebastian Ott | 585b954 | 2010-10-25 16:10:34 +0200 | [diff] [blame] | 1155 | if (cdev) |
| 1156 | cdev->private->path_new_mask |= mask; |
Cornelia Huck | c820de3 | 2008-07-14 09:58:45 +0200 | [diff] [blame] | 1157 | io_subchannel_verify(sch); |
| 1158 | break; |
| 1159 | case CHP_OFFLINE: |
Sebastian Ott | cdb912a | 2008-12-25 13:39:12 +0100 | [diff] [blame] | 1160 | if (cio_update_schib(sch)) |
Cornelia Huck | c820de3 | 2008-07-14 09:58:45 +0200 | [diff] [blame] | 1161 | return -ENODEV; |
Sebastian Ott | 585b954 | 2010-10-25 16:10:34 +0200 | [diff] [blame] | 1162 | if (cdev) |
| 1163 | cdev->private->path_gone_mask |= mask; |
Cornelia Huck | c820de3 | 2008-07-14 09:58:45 +0200 | [diff] [blame] | 1164 | io_subchannel_terminate_path(sch, mask); |
| 1165 | break; |
| 1166 | case CHP_ONLINE: |
Sebastian Ott | cdb912a | 2008-12-25 13:39:12 +0100 | [diff] [blame] | 1167 | if (cio_update_schib(sch)) |
| 1168 | return -ENODEV; |
Cornelia Huck | c820de3 | 2008-07-14 09:58:45 +0200 | [diff] [blame] | 1169 | sch->lpm |= mask & sch->opm; |
Sebastian Ott | 585b954 | 2010-10-25 16:10:34 +0200 | [diff] [blame] | 1170 | if (cdev) |
| 1171 | cdev->private->path_new_mask |= mask; |
Cornelia Huck | c820de3 | 2008-07-14 09:58:45 +0200 | [diff] [blame] | 1172 | io_subchannel_verify(sch); |
| 1173 | break; |
Vineeth Vijayan | 32ef938 | 2020-10-08 15:13:29 +0200 | [diff] [blame] | 1174 | case CHP_FCES_EVENT: |
| 1175 | /* Forward Endpoint Security event */ |
| 1176 | for (chpid = 0, valid_bit = 0x80; chpid < 8; chpid++, |
| 1177 | valid_bit >>= 1) { |
| 1178 | if (mask & valid_bit) |
| 1179 | path_event[chpid] = PE_PATH_FCES_EVENT; |
| 1180 | else |
| 1181 | path_event[chpid] = PE_NONE; |
| 1182 | } |
Vineeth Vijayan | dd9cb84 | 2022-02-02 21:45:56 +0100 | [diff] [blame] | 1183 | if (cdev && cdev->drv && cdev->drv->path_event) |
Vineeth Vijayan | 32ef938 | 2020-10-08 15:13:29 +0200 | [diff] [blame] | 1184 | cdev->drv->path_event(cdev, path_event); |
| 1185 | break; |
Cornelia Huck | c820de3 | 2008-07-14 09:58:45 +0200 | [diff] [blame] | 1186 | } |
| 1187 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1188 | } |
| 1189 | |
Sebastian Ott | 6e9a0f6 | 2009-12-07 12:51:38 +0100 | [diff] [blame] | 1190 | static void io_subchannel_quiesce(struct subchannel *sch) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1191 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1192 | struct ccw_device *cdev; |
| 1193 | int ret; |
| 1194 | |
Sebastian Ott | 56e6b79 | 2009-12-07 12:51:35 +0100 | [diff] [blame] | 1195 | spin_lock_irq(sch->lock); |
Cornelia Huck | db6a642 | 2008-01-26 14:10:46 +0100 | [diff] [blame] | 1196 | cdev = sch_get_cdev(sch); |
Cornelia Huck | a8237fc | 2006-01-06 00:19:21 -0800 | [diff] [blame] | 1197 | if (cio_is_console(sch->schid)) |
Sebastian Ott | 56e6b79 | 2009-12-07 12:51:35 +0100 | [diff] [blame] | 1198 | goto out_unlock; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1199 | if (!sch->schib.pmcw.ena) |
Sebastian Ott | 56e6b79 | 2009-12-07 12:51:35 +0100 | [diff] [blame] | 1200 | goto out_unlock; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1201 | ret = cio_disable_subchannel(sch); |
| 1202 | if (ret != -EBUSY) |
Sebastian Ott | 56e6b79 | 2009-12-07 12:51:35 +0100 | [diff] [blame] | 1203 | goto out_unlock; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1204 | if (cdev->handler) |
Sebastian Ott | 56e6b79 | 2009-12-07 12:51:35 +0100 | [diff] [blame] | 1205 | cdev->handler(cdev, cdev->private->intparm, ERR_PTR(-EIO)); |
| 1206 | while (ret == -EBUSY) { |
| 1207 | cdev->private->state = DEV_STATE_QUIESCE; |
Peter Oberparleiter | 376ae47 | 2010-10-25 16:10:44 +0200 | [diff] [blame] | 1208 | cdev->private->iretry = 255; |
Sebastian Ott | 56e6b79 | 2009-12-07 12:51:35 +0100 | [diff] [blame] | 1209 | ret = ccw_device_cancel_halt_clear(cdev); |
| 1210 | if (ret == -EBUSY) { |
| 1211 | ccw_device_set_timeout(cdev, HZ/10); |
| 1212 | spin_unlock_irq(sch->lock); |
| 1213 | wait_event(cdev->private->wait_q, |
| 1214 | cdev->private->state != DEV_STATE_QUIESCE); |
| 1215 | spin_lock_irq(sch->lock); |
| 1216 | } |
| 1217 | ret = cio_disable_subchannel(sch); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1218 | } |
Sebastian Ott | 56e6b79 | 2009-12-07 12:51:35 +0100 | [diff] [blame] | 1219 | out_unlock: |
| 1220 | spin_unlock_irq(sch->lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1221 | } |
| 1222 | |
Sebastian Ott | 6e9a0f6 | 2009-12-07 12:51:38 +0100 | [diff] [blame] | 1223 | static void io_subchannel_shutdown(struct subchannel *sch) |
| 1224 | { |
| 1225 | io_subchannel_quiesce(sch); |
| 1226 | } |
| 1227 | |
Cornelia Huck | c820de3 | 2008-07-14 09:58:45 +0200 | [diff] [blame] | 1228 | static int device_is_disconnected(struct ccw_device *cdev) |
| 1229 | { |
| 1230 | if (!cdev) |
| 1231 | return 0; |
| 1232 | return (cdev->private->state == DEV_STATE_DISCONNECTED || |
| 1233 | cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID); |
| 1234 | } |
| 1235 | |
| 1236 | static int recovery_check(struct device *dev, void *data) |
| 1237 | { |
| 1238 | struct ccw_device *cdev = to_ccwdev(dev); |
Sebastian Ott | 55fb734 | 2017-09-14 13:55:22 +0200 | [diff] [blame] | 1239 | struct subchannel *sch; |
Cornelia Huck | c820de3 | 2008-07-14 09:58:45 +0200 | [diff] [blame] | 1240 | int *redo = data; |
| 1241 | |
| 1242 | spin_lock_irq(cdev->ccwlock); |
| 1243 | switch (cdev->private->state) { |
Sebastian Ott | 55fb734 | 2017-09-14 13:55:22 +0200 | [diff] [blame] | 1244 | case DEV_STATE_ONLINE: |
| 1245 | sch = to_subchannel(cdev->dev.parent); |
| 1246 | if ((sch->schib.pmcw.pam & sch->opm) == sch->vpm) |
| 1247 | break; |
Joe Perches | b09fcec | 2020-03-10 13:39:50 -0700 | [diff] [blame] | 1248 | fallthrough; |
Cornelia Huck | c820de3 | 2008-07-14 09:58:45 +0200 | [diff] [blame] | 1249 | case DEV_STATE_DISCONNECTED: |
| 1250 | CIO_MSG_EVENT(3, "recovery: trigger 0.%x.%04x\n", |
| 1251 | cdev->private->dev_id.ssid, |
| 1252 | cdev->private->dev_id.devno); |
| 1253 | dev_fsm_event(cdev, DEV_EVENT_VERIFY); |
| 1254 | *redo = 1; |
| 1255 | break; |
| 1256 | case DEV_STATE_DISCONNECTED_SENSE_ID: |
| 1257 | *redo = 1; |
| 1258 | break; |
| 1259 | } |
| 1260 | spin_unlock_irq(cdev->ccwlock); |
| 1261 | |
| 1262 | return 0; |
| 1263 | } |
| 1264 | |
| 1265 | static void recovery_work_func(struct work_struct *unused) |
| 1266 | { |
| 1267 | int redo = 0; |
| 1268 | |
| 1269 | bus_for_each_dev(&ccw_bus_type, NULL, &redo, recovery_check); |
| 1270 | if (redo) { |
| 1271 | spin_lock_irq(&recovery_lock); |
| 1272 | if (!timer_pending(&recovery_timer)) { |
| 1273 | if (recovery_phase < ARRAY_SIZE(recovery_delay) - 1) |
| 1274 | recovery_phase++; |
| 1275 | mod_timer(&recovery_timer, jiffies + |
| 1276 | recovery_delay[recovery_phase] * HZ); |
| 1277 | } |
| 1278 | spin_unlock_irq(&recovery_lock); |
| 1279 | } else |
Sebastian Ott | 55fb734 | 2017-09-14 13:55:22 +0200 | [diff] [blame] | 1280 | CIO_MSG_EVENT(3, "recovery: end\n"); |
Cornelia Huck | c820de3 | 2008-07-14 09:58:45 +0200 | [diff] [blame] | 1281 | } |
| 1282 | |
| 1283 | static DECLARE_WORK(recovery_work, recovery_work_func); |
| 1284 | |
Kees Cook | 846d0c6 | 2017-10-16 16:43:25 -0700 | [diff] [blame] | 1285 | static void recovery_func(struct timer_list *unused) |
Cornelia Huck | c820de3 | 2008-07-14 09:58:45 +0200 | [diff] [blame] | 1286 | { |
| 1287 | /* |
| 1288 | * We can't do our recovery in softirq context and it's not |
| 1289 | * performance critical, so we schedule it. |
| 1290 | */ |
| 1291 | schedule_work(&recovery_work); |
| 1292 | } |
| 1293 | |
Sebastian Ott | 55fb734 | 2017-09-14 13:55:22 +0200 | [diff] [blame] | 1294 | void ccw_device_schedule_recovery(void) |
Cornelia Huck | c820de3 | 2008-07-14 09:58:45 +0200 | [diff] [blame] | 1295 | { |
| 1296 | unsigned long flags; |
| 1297 | |
Sebastian Ott | 55fb734 | 2017-09-14 13:55:22 +0200 | [diff] [blame] | 1298 | CIO_MSG_EVENT(3, "recovery: schedule\n"); |
Cornelia Huck | c820de3 | 2008-07-14 09:58:45 +0200 | [diff] [blame] | 1299 | spin_lock_irqsave(&recovery_lock, flags); |
| 1300 | if (!timer_pending(&recovery_timer) || (recovery_phase != 0)) { |
| 1301 | recovery_phase = 0; |
| 1302 | mod_timer(&recovery_timer, jiffies + recovery_delay[0] * HZ); |
| 1303 | } |
| 1304 | spin_unlock_irqrestore(&recovery_lock, flags); |
| 1305 | } |
| 1306 | |
Peter Oberparleiter | ecf5d9e | 2008-10-10 21:33:06 +0200 | [diff] [blame] | 1307 | static int purge_fn(struct device *dev, void *data) |
| 1308 | { |
| 1309 | struct ccw_device *cdev = to_ccwdev(dev); |
Peter Oberparleiter | 37de53b | 2009-12-07 12:51:19 +0100 | [diff] [blame] | 1310 | struct ccw_dev_id *id = &cdev->private->dev_id; |
Vineeth Vijayan | fa172f0 | 2021-09-15 13:39:16 +0200 | [diff] [blame] | 1311 | struct subchannel *sch = to_subchannel(cdev->dev.parent); |
Peter Oberparleiter | ecf5d9e | 2008-10-10 21:33:06 +0200 | [diff] [blame] | 1312 | |
| 1313 | spin_lock_irq(cdev->ccwlock); |
Peter Oberparleiter | 37de53b | 2009-12-07 12:51:19 +0100 | [diff] [blame] | 1314 | if (is_blacklisted(id->ssid, id->devno) && |
Peter Oberparleiter | a2fc848 | 2011-04-04 09:43:32 +0200 | [diff] [blame] | 1315 | (cdev->private->state == DEV_STATE_OFFLINE) && |
| 1316 | (atomic_cmpxchg(&cdev->private->onoff, 0, 1) == 0)) { |
Peter Oberparleiter | 37de53b | 2009-12-07 12:51:19 +0100 | [diff] [blame] | 1317 | CIO_MSG_EVENT(3, "ccw: purging 0.%x.%04x\n", id->ssid, |
| 1318 | id->devno); |
| 1319 | ccw_device_sched_todo(cdev, CDEV_TODO_UNREG); |
Vineeth Vijayan | fa172f0 | 2021-09-15 13:39:16 +0200 | [diff] [blame] | 1320 | css_sched_sch_todo(sch, SCH_TODO_UNREG); |
Peter Oberparleiter | a2fc848 | 2011-04-04 09:43:32 +0200 | [diff] [blame] | 1321 | atomic_set(&cdev->private->onoff, 0); |
Peter Oberparleiter | 37de53b | 2009-12-07 12:51:19 +0100 | [diff] [blame] | 1322 | } |
Peter Oberparleiter | ecf5d9e | 2008-10-10 21:33:06 +0200 | [diff] [blame] | 1323 | spin_unlock_irq(cdev->ccwlock); |
Peter Oberparleiter | ecf5d9e | 2008-10-10 21:33:06 +0200 | [diff] [blame] | 1324 | /* Abort loop in case of pending signal. */ |
| 1325 | if (signal_pending(current)) |
| 1326 | return -EINTR; |
| 1327 | |
| 1328 | return 0; |
| 1329 | } |
| 1330 | |
| 1331 | /** |
| 1332 | * ccw_purge_blacklisted - purge unused, blacklisted devices |
| 1333 | * |
| 1334 | * Unregister all ccw devices that are offline and on the blacklist. |
| 1335 | */ |
| 1336 | int ccw_purge_blacklisted(void) |
| 1337 | { |
| 1338 | CIO_MSG_EVENT(2, "ccw: purging blacklisted devices\n"); |
| 1339 | bus_for_each_dev(&ccw_bus_type, NULL, NULL, purge_fn); |
| 1340 | return 0; |
| 1341 | } |
| 1342 | |
Peter Oberparleiter | 6afcc77 | 2009-10-06 10:34:02 +0200 | [diff] [blame] | 1343 | void ccw_device_set_disconnected(struct ccw_device *cdev) |
Cornelia Huck | c820de3 | 2008-07-14 09:58:45 +0200 | [diff] [blame] | 1344 | { |
| 1345 | if (!cdev) |
| 1346 | return; |
| 1347 | ccw_device_set_timeout(cdev, 0); |
| 1348 | cdev->private->flags.fake_irb = 0; |
| 1349 | cdev->private->state = DEV_STATE_DISCONNECTED; |
| 1350 | if (cdev->online) |
| 1351 | ccw_device_schedule_recovery(); |
| 1352 | } |
| 1353 | |
Peter Oberparleiter | 91c3691 | 2008-08-21 19:46:39 +0200 | [diff] [blame] | 1354 | void ccw_device_set_notoper(struct ccw_device *cdev) |
| 1355 | { |
| 1356 | struct subchannel *sch = to_subchannel(cdev->dev.parent); |
| 1357 | |
| 1358 | CIO_TRACE_EVENT(2, "notoper"); |
Cornelia Huck | b9d3aed | 2008-10-10 21:33:11 +0200 | [diff] [blame] | 1359 | CIO_TRACE_EVENT(2, dev_name(&sch->dev)); |
Peter Oberparleiter | 91c3691 | 2008-08-21 19:46:39 +0200 | [diff] [blame] | 1360 | ccw_device_set_timeout(cdev, 0); |
| 1361 | cio_disable_subchannel(sch); |
| 1362 | cdev->private->state = DEV_STATE_NOT_OPER; |
| 1363 | } |
| 1364 | |
Peter Oberparleiter | 5d6e6b6 | 2009-12-07 12:51:17 +0100 | [diff] [blame] | 1365 | enum io_sch_action { |
| 1366 | IO_SCH_UNREG, |
| 1367 | IO_SCH_ORPH_UNREG, |
| 1368 | IO_SCH_ATTACH, |
| 1369 | IO_SCH_UNREG_ATTACH, |
| 1370 | IO_SCH_ORPH_ATTACH, |
| 1371 | IO_SCH_REPROBE, |
| 1372 | IO_SCH_VERIFY, |
| 1373 | IO_SCH_DISC, |
| 1374 | IO_SCH_NOP, |
| 1375 | }; |
| 1376 | |
| 1377 | static enum io_sch_action sch_get_action(struct subchannel *sch) |
Cornelia Huck | c820de3 | 2008-07-14 09:58:45 +0200 | [diff] [blame] | 1378 | { |
Cornelia Huck | c820de3 | 2008-07-14 09:58:45 +0200 | [diff] [blame] | 1379 | struct ccw_device *cdev; |
| 1380 | |
Cornelia Huck | c820de3 | 2008-07-14 09:58:45 +0200 | [diff] [blame] | 1381 | cdev = sch_get_cdev(sch); |
Peter Oberparleiter | 5d6e6b6 | 2009-12-07 12:51:17 +0100 | [diff] [blame] | 1382 | if (cio_update_schib(sch)) { |
| 1383 | /* Not operational. */ |
| 1384 | if (!cdev) |
| 1385 | return IO_SCH_UNREG; |
Sebastian Ott | 76e6fb4 | 2010-02-26 22:37:28 +0100 | [diff] [blame] | 1386 | if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK) |
Peter Oberparleiter | 5d6e6b6 | 2009-12-07 12:51:17 +0100 | [diff] [blame] | 1387 | return IO_SCH_UNREG; |
| 1388 | return IO_SCH_ORPH_UNREG; |
Cornelia Huck | c820de3 | 2008-07-14 09:58:45 +0200 | [diff] [blame] | 1389 | } |
Peter Oberparleiter | 5d6e6b6 | 2009-12-07 12:51:17 +0100 | [diff] [blame] | 1390 | /* Operational. */ |
| 1391 | if (!cdev) |
| 1392 | return IO_SCH_ATTACH; |
| 1393 | if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) { |
Sebastian Ott | 76e6fb4 | 2010-02-26 22:37:28 +0100 | [diff] [blame] | 1394 | if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK) |
Peter Oberparleiter | 5d6e6b6 | 2009-12-07 12:51:17 +0100 | [diff] [blame] | 1395 | return IO_SCH_UNREG_ATTACH; |
| 1396 | return IO_SCH_ORPH_ATTACH; |
Cornelia Huck | c820de3 | 2008-07-14 09:58:45 +0200 | [diff] [blame] | 1397 | } |
Peter Oberparleiter | 5d6e6b6 | 2009-12-07 12:51:17 +0100 | [diff] [blame] | 1398 | if ((sch->schib.pmcw.pam & sch->opm) == 0) { |
Sebastian Ott | 76e6fb4 | 2010-02-26 22:37:28 +0100 | [diff] [blame] | 1399 | if (ccw_device_notify(cdev, CIO_NO_PATH) != NOTIFY_OK) |
Peter Oberparleiter | 5d6e6b6 | 2009-12-07 12:51:17 +0100 | [diff] [blame] | 1400 | return IO_SCH_UNREG; |
| 1401 | return IO_SCH_DISC; |
Cornelia Huck | c820de3 | 2008-07-14 09:58:45 +0200 | [diff] [blame] | 1402 | } |
Peter Oberparleiter | 5d6e6b6 | 2009-12-07 12:51:17 +0100 | [diff] [blame] | 1403 | if (device_is_disconnected(cdev)) |
| 1404 | return IO_SCH_REPROBE; |
Vineeth Vijayan | 8cc0dcf | 2020-11-20 09:36:38 +0100 | [diff] [blame] | 1405 | if (cdev->online) |
Peter Oberparleiter | 5d6e6b6 | 2009-12-07 12:51:17 +0100 | [diff] [blame] | 1406 | return IO_SCH_VERIFY; |
Sebastian Ott | 43d0be7 | 2012-09-05 14:19:42 +0200 | [diff] [blame] | 1407 | if (cdev->private->state == DEV_STATE_NOT_OPER) |
| 1408 | return IO_SCH_UNREG_ATTACH; |
Peter Oberparleiter | 5d6e6b6 | 2009-12-07 12:51:17 +0100 | [diff] [blame] | 1409 | return IO_SCH_NOP; |
| 1410 | } |
| 1411 | |
| 1412 | /** |
| 1413 | * io_subchannel_sch_event - process subchannel event |
| 1414 | * @sch: subchannel |
| 1415 | * @process: non-zero if function is called in process context |
| 1416 | * |
| 1417 | * An unspecified event occurred for this subchannel. Adjust data according |
| 1418 | * to the current operational state of the subchannel and device. Return |
| 1419 | * zero when the event has been handled sufficiently or -EAGAIN when this |
| 1420 | * function should be called again in process context. |
| 1421 | */ |
| 1422 | static int io_subchannel_sch_event(struct subchannel *sch, int process) |
| 1423 | { |
| 1424 | unsigned long flags; |
| 1425 | struct ccw_device *cdev; |
| 1426 | struct ccw_dev_id dev_id; |
| 1427 | enum io_sch_action action; |
| 1428 | int rc = -EAGAIN; |
| 1429 | |
| 1430 | spin_lock_irqsave(sch->lock, flags); |
| 1431 | if (!device_is_registered(&sch->dev)) |
| 1432 | goto out_unlock; |
Peter Oberparleiter | 390935a | 2009-12-07 12:51:18 +0100 | [diff] [blame] | 1433 | if (work_pending(&sch->todo_work)) |
| 1434 | goto out_unlock; |
Peter Oberparleiter | 37de53b | 2009-12-07 12:51:19 +0100 | [diff] [blame] | 1435 | cdev = sch_get_cdev(sch); |
| 1436 | if (cdev && work_pending(&cdev->private->todo_work)) |
| 1437 | goto out_unlock; |
Peter Oberparleiter | 5d6e6b6 | 2009-12-07 12:51:17 +0100 | [diff] [blame] | 1438 | action = sch_get_action(sch); |
| 1439 | CIO_MSG_EVENT(2, "event: sch 0.%x.%04x, process=%d, action=%d\n", |
| 1440 | sch->schid.ssid, sch->schid.sch_no, process, |
| 1441 | action); |
| 1442 | /* Perform immediate actions while holding the lock. */ |
Cornelia Huck | c820de3 | 2008-07-14 09:58:45 +0200 | [diff] [blame] | 1443 | switch (action) { |
Peter Oberparleiter | 5d6e6b6 | 2009-12-07 12:51:17 +0100 | [diff] [blame] | 1444 | case IO_SCH_REPROBE: |
| 1445 | /* Trigger device recognition. */ |
Cornelia Huck | c820de3 | 2008-07-14 09:58:45 +0200 | [diff] [blame] | 1446 | ccw_device_trigger_reprobe(cdev); |
Peter Oberparleiter | 5d6e6b6 | 2009-12-07 12:51:17 +0100 | [diff] [blame] | 1447 | rc = 0; |
| 1448 | goto out_unlock; |
| 1449 | case IO_SCH_VERIFY: |
| 1450 | /* Trigger path verification. */ |
| 1451 | io_subchannel_verify(sch); |
| 1452 | rc = 0; |
| 1453 | goto out_unlock; |
| 1454 | case IO_SCH_DISC: |
| 1455 | ccw_device_set_disconnected(cdev); |
| 1456 | rc = 0; |
| 1457 | goto out_unlock; |
| 1458 | case IO_SCH_ORPH_UNREG: |
| 1459 | case IO_SCH_ORPH_ATTACH: |
Peter Oberparleiter | 6afcc77 | 2009-10-06 10:34:02 +0200 | [diff] [blame] | 1460 | ccw_device_set_disconnected(cdev); |
Peter Oberparleiter | 91c3691 | 2008-08-21 19:46:39 +0200 | [diff] [blame] | 1461 | break; |
Peter Oberparleiter | 5d6e6b6 | 2009-12-07 12:51:17 +0100 | [diff] [blame] | 1462 | case IO_SCH_UNREG_ATTACH: |
| 1463 | case IO_SCH_UNREG: |
Sebastian Ott | 16d2ce2 | 2010-11-10 10:05:53 +0100 | [diff] [blame] | 1464 | if (!cdev) |
| 1465 | break; |
| 1466 | if (cdev->private->state == DEV_STATE_SENSE_ID) { |
| 1467 | /* |
| 1468 | * Note: delayed work triggered by this event |
| 1469 | * and repeated calls to sch_event are synchronized |
| 1470 | * by the above check for work_pending(cdev). |
| 1471 | */ |
| 1472 | dev_fsm_event(cdev, DEV_EVENT_NOTOPER); |
| 1473 | } else |
Peter Oberparleiter | 5d6e6b6 | 2009-12-07 12:51:17 +0100 | [diff] [blame] | 1474 | ccw_device_set_notoper(cdev); |
| 1475 | break; |
| 1476 | case IO_SCH_NOP: |
| 1477 | rc = 0; |
| 1478 | goto out_unlock; |
Cornelia Huck | c820de3 | 2008-07-14 09:58:45 +0200 | [diff] [blame] | 1479 | default: |
| 1480 | break; |
| 1481 | } |
| 1482 | spin_unlock_irqrestore(sch->lock, flags); |
Peter Oberparleiter | 5d6e6b6 | 2009-12-07 12:51:17 +0100 | [diff] [blame] | 1483 | /* All other actions require process context. */ |
| 1484 | if (!process) |
| 1485 | goto out; |
| 1486 | /* Handle attached ccw device. */ |
| 1487 | switch (action) { |
| 1488 | case IO_SCH_ORPH_UNREG: |
| 1489 | case IO_SCH_ORPH_ATTACH: |
| 1490 | /* Move ccw device to orphanage. */ |
| 1491 | rc = ccw_device_move_to_orph(cdev); |
| 1492 | if (rc) |
| 1493 | goto out; |
| 1494 | break; |
| 1495 | case IO_SCH_UNREG_ATTACH: |
Sebastian Ott | 3368ba2 | 2012-09-05 14:20:41 +0200 | [diff] [blame] | 1496 | spin_lock_irqsave(sch->lock, flags); |
Sebastian Ott | 3368ba2 | 2012-09-05 14:20:41 +0200 | [diff] [blame] | 1497 | sch_set_cdev(sch, NULL); |
| 1498 | spin_unlock_irqrestore(sch->lock, flags); |
Peter Oberparleiter | 5d6e6b6 | 2009-12-07 12:51:17 +0100 | [diff] [blame] | 1499 | /* Unregister ccw device. */ |
Sebastian Ott | 74b6127 | 2010-10-25 16:10:26 +0200 | [diff] [blame] | 1500 | ccw_device_unregister(cdev); |
Peter Oberparleiter | 5d6e6b6 | 2009-12-07 12:51:17 +0100 | [diff] [blame] | 1501 | break; |
| 1502 | default: |
| 1503 | break; |
| 1504 | } |
| 1505 | /* Handle subchannel. */ |
| 1506 | switch (action) { |
| 1507 | case IO_SCH_ORPH_UNREG: |
| 1508 | case IO_SCH_UNREG: |
Vineeth Vijayan | 2f7484f | 2021-04-23 12:08:43 +0200 | [diff] [blame] | 1509 | css_sch_device_unregister(sch); |
Peter Oberparleiter | 5d6e6b6 | 2009-12-07 12:51:17 +0100 | [diff] [blame] | 1510 | break; |
| 1511 | case IO_SCH_ORPH_ATTACH: |
| 1512 | case IO_SCH_UNREG_ATTACH: |
| 1513 | case IO_SCH_ATTACH: |
| 1514 | dev_id.ssid = sch->schid.ssid; |
| 1515 | dev_id.devno = sch->schib.pmcw.dev; |
| 1516 | cdev = get_ccwdev_by_dev_id(&dev_id); |
| 1517 | if (!cdev) { |
| 1518 | sch_create_and_recog_new_device(sch); |
| 1519 | break; |
| 1520 | } |
| 1521 | rc = ccw_device_move_to_sch(cdev, sch); |
| 1522 | if (rc) { |
| 1523 | /* Release reference from get_ccwdev_by_dev_id() */ |
| 1524 | put_device(&cdev->dev); |
| 1525 | goto out; |
| 1526 | } |
| 1527 | spin_lock_irqsave(sch->lock, flags); |
| 1528 | ccw_device_trigger_reprobe(cdev); |
| 1529 | spin_unlock_irqrestore(sch->lock, flags); |
| 1530 | /* Release reference from get_ccwdev_by_dev_id() */ |
| 1531 | put_device(&cdev->dev); |
| 1532 | break; |
| 1533 | default: |
| 1534 | break; |
| 1535 | } |
| 1536 | return 0; |
Cornelia Huck | c820de3 | 2008-07-14 09:58:45 +0200 | [diff] [blame] | 1537 | |
Peter Oberparleiter | 5d6e6b6 | 2009-12-07 12:51:17 +0100 | [diff] [blame] | 1538 | out_unlock: |
| 1539 | spin_unlock_irqrestore(sch->lock, flags); |
| 1540 | out: |
| 1541 | return rc; |
Cornelia Huck | c820de3 | 2008-07-14 09:58:45 +0200 | [diff] [blame] | 1542 | } |
| 1543 | |
Sebastian Ott | 137a14f | 2014-01-27 13:29:15 +0100 | [diff] [blame] | 1544 | static void ccw_device_set_int_class(struct ccw_device *cdev) |
| 1545 | { |
| 1546 | struct ccw_driver *cdrv = cdev->drv; |
| 1547 | |
| 1548 | /* Note: we interpret class 0 in this context as an uninitialized |
| 1549 | * field since it translates to a non-I/O interrupt class. */ |
| 1550 | if (cdrv->int_class != 0) |
| 1551 | cdev->private->int_class = cdrv->int_class; |
| 1552 | else |
| 1553 | cdev->private->int_class = IRQIO_CIO; |
| 1554 | } |
| 1555 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1556 | #ifdef CONFIG_CCW_CONSOLE |
Sebastian Ott | 1e53209 | 2014-01-27 13:28:10 +0100 | [diff] [blame] | 1557 | int __init ccw_device_enable_console(struct ccw_device *cdev) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1558 | { |
Sebastian Ott | 1e53209 | 2014-01-27 13:28:10 +0100 | [diff] [blame] | 1559 | struct subchannel *sch = to_subchannel(cdev->dev.parent); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1560 | int rc; |
| 1561 | |
Sebastian Ott | 1e53209 | 2014-01-27 13:28:10 +0100 | [diff] [blame] | 1562 | if (!cdev->drv || !cdev->handler) |
| 1563 | return -EINVAL; |
| 1564 | |
Cornelia Huck | 0ae7a7b | 2008-07-14 09:58:43 +0200 | [diff] [blame] | 1565 | io_subchannel_init_fields(sch); |
Sebastian Ott | f444cc0 | 2008-12-25 13:39:14 +0100 | [diff] [blame] | 1566 | rc = cio_commit_config(sch); |
| 1567 | if (rc) |
| 1568 | return rc; |
Cornelia Huck | 0ae7a7b | 2008-07-14 09:58:43 +0200 | [diff] [blame] | 1569 | sch->driver = &io_subchannel_driver; |
Peter Oberparleiter | 736b5db | 2009-12-07 12:51:21 +0100 | [diff] [blame] | 1570 | io_subchannel_recog(cdev, sch); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1571 | /* Now wait for the async. recognition to come to an end. */ |
| 1572 | spin_lock_irq(cdev->ccwlock); |
| 1573 | while (!dev_fsm_final_state(cdev)) |
Sebastian Ott | 188561a | 2013-04-13 12:53:21 +0200 | [diff] [blame] | 1574 | ccw_device_wait_idle(cdev); |
| 1575 | |
Sebastian Ott | afdfed0 | 2013-04-13 13:03:03 +0200 | [diff] [blame] | 1576 | /* Hold on to an extra reference while device is online. */ |
| 1577 | get_device(&cdev->dev); |
| 1578 | rc = ccw_device_online(cdev); |
| 1579 | if (rc) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1580 | goto out_unlock; |
Sebastian Ott | afdfed0 | 2013-04-13 13:03:03 +0200 | [diff] [blame] | 1581 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1582 | while (!dev_fsm_final_state(cdev)) |
Sebastian Ott | 188561a | 2013-04-13 12:53:21 +0200 | [diff] [blame] | 1583 | ccw_device_wait_idle(cdev); |
| 1584 | |
Sebastian Ott | afdfed0 | 2013-04-13 13:03:03 +0200 | [diff] [blame] | 1585 | if (cdev->private->state == DEV_STATE_ONLINE) |
| 1586 | cdev->online = 1; |
| 1587 | else |
| 1588 | rc = -EIO; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1589 | out_unlock: |
| 1590 | spin_unlock_irq(cdev->ccwlock); |
Sebastian Ott | afdfed0 | 2013-04-13 13:03:03 +0200 | [diff] [blame] | 1591 | if (rc) /* Give up online reference since onlining failed. */ |
| 1592 | put_device(&cdev->dev); |
Peter Oberparleiter | 736b5db | 2009-12-07 12:51:21 +0100 | [diff] [blame] | 1593 | return rc; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1594 | } |
| 1595 | |
Sebastian Ott | 1e53209 | 2014-01-27 13:28:10 +0100 | [diff] [blame] | 1596 | struct ccw_device * __init ccw_device_create_console(struct ccw_driver *drv) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1597 | { |
Sebastian Ott | 863fc84 | 2013-04-13 13:01:50 +0200 | [diff] [blame] | 1598 | struct io_subchannel_private *io_priv; |
Sebastian Ott | afdfed0 | 2013-04-13 13:03:03 +0200 | [diff] [blame] | 1599 | struct ccw_device *cdev; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1600 | struct subchannel *sch; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1601 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1602 | sch = cio_probe_console(); |
Sebastian Ott | afdfed0 | 2013-04-13 13:03:03 +0200 | [diff] [blame] | 1603 | if (IS_ERR(sch)) |
| 1604 | return ERR_CAST(sch); |
Sebastian Ott | 863fc84 | 2013-04-13 13:01:50 +0200 | [diff] [blame] | 1605 | |
| 1606 | io_priv = kzalloc(sizeof(*io_priv), GFP_KERNEL | GFP_DMA); |
Halil Pasic | 37db898 | 2019-03-26 12:41:09 +0100 | [diff] [blame] | 1607 | if (!io_priv) |
| 1608 | goto err_priv; |
| 1609 | io_priv->dma_area = dma_alloc_coherent(&sch->dev, |
| 1610 | sizeof(*io_priv->dma_area), |
| 1611 | &io_priv->dma_area_dma, GFP_KERNEL); |
| 1612 | if (!io_priv->dma_area) |
| 1613 | goto err_dma_area; |
Sebastian Ott | 2c3e7e1 | 2014-06-11 13:06:57 +0200 | [diff] [blame] | 1614 | set_io_private(sch, io_priv); |
Sebastian Ott | afdfed0 | 2013-04-13 13:03:03 +0200 | [diff] [blame] | 1615 | cdev = io_subchannel_create_ccwdev(sch); |
| 1616 | if (IS_ERR(cdev)) { |
Halil Pasic | 37db898 | 2019-03-26 12:41:09 +0100 | [diff] [blame] | 1617 | dma_free_coherent(&sch->dev, sizeof(*io_priv->dma_area), |
| 1618 | io_priv->dma_area, io_priv->dma_area_dma); |
| 1619 | set_io_private(sch, NULL); |
Sebastian Ott | afdfed0 | 2013-04-13 13:03:03 +0200 | [diff] [blame] | 1620 | put_device(&sch->dev); |
| 1621 | kfree(io_priv); |
| 1622 | return cdev; |
| 1623 | } |
Sebastian Ott | 2253e8d | 2014-01-27 13:26:10 +0100 | [diff] [blame] | 1624 | cdev->drv = drv; |
Sebastian Ott | 137a14f | 2014-01-27 13:29:15 +0100 | [diff] [blame] | 1625 | ccw_device_set_int_class(cdev); |
Sebastian Ott | afdfed0 | 2013-04-13 13:03:03 +0200 | [diff] [blame] | 1626 | return cdev; |
Halil Pasic | 37db898 | 2019-03-26 12:41:09 +0100 | [diff] [blame] | 1627 | |
| 1628 | err_dma_area: |
| 1629 | kfree(io_priv); |
| 1630 | err_priv: |
| 1631 | put_device(&sch->dev); |
| 1632 | return ERR_PTR(-ENOMEM); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1633 | } |
Cornelia Huck | 1f4e7ed | 2008-10-10 21:33:14 +0200 | [diff] [blame] | 1634 | |
Sebastian Ott | 1e53209 | 2014-01-27 13:28:10 +0100 | [diff] [blame] | 1635 | void __init ccw_device_destroy_console(struct ccw_device *cdev) |
| 1636 | { |
| 1637 | struct subchannel *sch = to_subchannel(cdev->dev.parent); |
| 1638 | struct io_subchannel_private *io_priv = to_io_private(sch); |
| 1639 | |
| 1640 | set_io_private(sch, NULL); |
Halil Pasic | 37db898 | 2019-03-26 12:41:09 +0100 | [diff] [blame] | 1641 | dma_free_coherent(&sch->dev, sizeof(*io_priv->dma_area), |
| 1642 | io_priv->dma_area, io_priv->dma_area_dma); |
Qinglang Miao | 14d4c4f | 2020-12-01 14:31:50 +0800 | [diff] [blame] | 1643 | put_device(&sch->dev); |
| 1644 | put_device(&cdev->dev); |
Sebastian Ott | 1e53209 | 2014-01-27 13:28:10 +0100 | [diff] [blame] | 1645 | kfree(io_priv); |
| 1646 | } |
| 1647 | |
Sebastian Ott | 188561a | 2013-04-13 12:53:21 +0200 | [diff] [blame] | 1648 | /** |
| 1649 | * ccw_device_wait_idle() - busy wait for device to become idle |
| 1650 | * @cdev: ccw device |
| 1651 | * |
| 1652 | * Poll until activity control is zero, that is, no function or data |
| 1653 | * transfer is pending/active. |
| 1654 | * Called with device lock being held. |
| 1655 | */ |
| 1656 | void ccw_device_wait_idle(struct ccw_device *cdev) |
| 1657 | { |
| 1658 | struct subchannel *sch = to_subchannel(cdev->dev.parent); |
| 1659 | |
| 1660 | while (1) { |
| 1661 | cio_tsch(sch); |
| 1662 | if (sch->schib.scsw.cmd.actl == 0) |
| 1663 | break; |
Heiko Carstens | e0d62dc | 2020-12-14 21:44:39 +0100 | [diff] [blame] | 1664 | udelay(100); |
Sebastian Ott | 188561a | 2013-04-13 12:53:21 +0200 | [diff] [blame] | 1665 | } |
| 1666 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1667 | #endif |
| 1668 | |
Cornelia Huck | b2ffd8e | 2007-10-12 16:11:17 +0200 | [diff] [blame] | 1669 | /** |
| 1670 | * get_ccwdev_by_busid() - obtain device from a bus id |
| 1671 | * @cdrv: driver the device is owned by |
| 1672 | * @bus_id: bus id of the device to be searched |
| 1673 | * |
| 1674 | * This function searches all devices owned by @cdrv for a device with a bus |
| 1675 | * id matching @bus_id. |
| 1676 | * Returns: |
| 1677 | * If a match is found, its reference count of the found device is increased |
| 1678 | * and it is returned; else %NULL is returned. |
| 1679 | */ |
| 1680 | struct ccw_device *get_ccwdev_by_busid(struct ccw_driver *cdrv, |
| 1681 | const char *bus_id) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1682 | { |
Cornelia Huck | b0744bd | 2005-06-25 14:55:27 -0700 | [diff] [blame] | 1683 | struct device *dev; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1684 | |
Suzuki K Poulose | 6cda08a | 2019-07-23 23:18:32 +0100 | [diff] [blame] | 1685 | dev = driver_find_device_by_name(&cdrv->driver, bus_id); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1686 | |
Heiko Carstens | d2c993d | 2006-07-12 16:41:55 +0200 | [diff] [blame] | 1687 | return dev ? to_ccwdev(dev) : NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1688 | } |
| 1689 | |
| 1690 | /************************** device driver handling ************************/ |
| 1691 | |
| 1692 | /* This is the implementation of the ccw_driver class. The probe, remove |
| 1693 | * and release methods are initially very similar to the device_driver |
| 1694 | * implementations, with the difference that they have ccw_device |
| 1695 | * arguments. |
| 1696 | * |
| 1697 | * A ccw driver also contains the information that is needed for |
| 1698 | * device matching. |
| 1699 | */ |
| 1700 | static int |
| 1701 | ccw_device_probe (struct device *dev) |
| 1702 | { |
| 1703 | struct ccw_device *cdev = to_ccwdev(dev); |
| 1704 | struct ccw_driver *cdrv = to_ccwdrv(dev->driver); |
| 1705 | int ret; |
| 1706 | |
| 1707 | cdev->drv = cdrv; /* to let the driver call _set_online */ |
Sebastian Ott | 137a14f | 2014-01-27 13:29:15 +0100 | [diff] [blame] | 1708 | ccw_device_set_int_class(cdev); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1709 | ret = cdrv->probe ? cdrv->probe(cdev) : -ENODEV; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1710 | if (ret) { |
Heiko Carstens | d2c993d | 2006-07-12 16:41:55 +0200 | [diff] [blame] | 1711 | cdev->drv = NULL; |
Heiko Carstens | 420f42e | 2013-01-02 15:18:18 +0100 | [diff] [blame] | 1712 | cdev->private->int_class = IRQIO_CIO; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1713 | return ret; |
| 1714 | } |
| 1715 | |
| 1716 | return 0; |
| 1717 | } |
| 1718 | |
Uwe Kleine-König | fc7a620 | 2021-07-13 21:35:22 +0200 | [diff] [blame] | 1719 | static void ccw_device_remove(struct device *dev) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1720 | { |
| 1721 | struct ccw_device *cdev = to_ccwdev(dev); |
| 1722 | struct ccw_driver *cdrv = cdev->drv; |
Sebastian Ott | 135a8b4 | 2018-03-15 15:03:43 +0100 | [diff] [blame] | 1723 | struct subchannel *sch; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1724 | int ret; |
| 1725 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1726 | if (cdrv->remove) |
| 1727 | cdrv->remove(cdev); |
Sebastian Ott | 74bd0d8 | 2013-12-16 10:51:54 +0100 | [diff] [blame] | 1728 | |
| 1729 | spin_lock_irq(cdev->ccwlock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1730 | if (cdev->online) { |
| 1731 | cdev->online = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1732 | ret = ccw_device_offline(cdev); |
| 1733 | spin_unlock_irq(cdev->ccwlock); |
| 1734 | if (ret == 0) |
| 1735 | wait_event(cdev->private->wait_q, |
| 1736 | dev_fsm_final_state(cdev)); |
| 1737 | else |
Michael Ernst | 139b83dd | 2008-05-07 09:22:54 +0200 | [diff] [blame] | 1738 | CIO_MSG_EVENT(0, "ccw_device_offline returned %d, " |
Cornelia Huck | e556bbb | 2007-07-27 12:29:19 +0200 | [diff] [blame] | 1739 | "device 0.%x.%04x\n", |
| 1740 | ret, cdev->private->dev_id.ssid, |
| 1741 | cdev->private->dev_id.devno); |
Cornelia Huck | 9cd6742 | 2008-12-25 13:39:06 +0100 | [diff] [blame] | 1742 | /* Give up reference obtained in ccw_device_set_online(). */ |
| 1743 | put_device(&cdev->dev); |
Sebastian Ott | 74bd0d8 | 2013-12-16 10:51:54 +0100 | [diff] [blame] | 1744 | spin_lock_irq(cdev->ccwlock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1745 | } |
| 1746 | ccw_device_set_timeout(cdev, 0); |
Heiko Carstens | d2c993d | 2006-07-12 16:41:55 +0200 | [diff] [blame] | 1747 | cdev->drv = NULL; |
Heiko Carstens | 420f42e | 2013-01-02 15:18:18 +0100 | [diff] [blame] | 1748 | cdev->private->int_class = IRQIO_CIO; |
Sebastian Ott | 135a8b4 | 2018-03-15 15:03:43 +0100 | [diff] [blame] | 1749 | sch = to_subchannel(cdev->dev.parent); |
Sebastian Ott | 74bd0d8 | 2013-12-16 10:51:54 +0100 | [diff] [blame] | 1750 | spin_unlock_irq(cdev->ccwlock); |
Sebastian Ott | 135a8b4 | 2018-03-15 15:03:43 +0100 | [diff] [blame] | 1751 | io_subchannel_quiesce(sch); |
Sebastian Ott | a6ef156 | 2015-09-07 19:51:39 +0200 | [diff] [blame] | 1752 | __disable_cmf(cdev); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1753 | } |
| 1754 | |
Cornelia Huck | 958974fb | 2007-10-12 16:11:21 +0200 | [diff] [blame] | 1755 | static void ccw_device_shutdown(struct device *dev) |
| 1756 | { |
| 1757 | struct ccw_device *cdev; |
| 1758 | |
| 1759 | cdev = to_ccwdev(dev); |
| 1760 | if (cdev->drv && cdev->drv->shutdown) |
| 1761 | cdev->drv->shutdown(cdev); |
Sebastian Ott | 1bc6664 | 2015-09-15 13:11:42 +0200 | [diff] [blame] | 1762 | __disable_cmf(cdev); |
Cornelia Huck | 958974fb | 2007-10-12 16:11:21 +0200 | [diff] [blame] | 1763 | } |
| 1764 | |
Sebastian Ott | d5ab527 | 2011-03-23 10:16:03 +0100 | [diff] [blame] | 1765 | static struct bus_type ccw_bus_type = { |
Cornelia Huck | 8bbace7 | 2006-01-11 10:56:22 +0100 | [diff] [blame] | 1766 | .name = "ccw", |
| 1767 | .match = ccw_bus_match, |
| 1768 | .uevent = ccw_uevent, |
| 1769 | .probe = ccw_device_probe, |
| 1770 | .remove = ccw_device_remove, |
Cornelia Huck | 958974fb | 2007-10-12 16:11:21 +0200 | [diff] [blame] | 1771 | .shutdown = ccw_device_shutdown, |
Cornelia Huck | 8bbace7 | 2006-01-11 10:56:22 +0100 | [diff] [blame] | 1772 | }; |
| 1773 | |
Cornelia Huck | b2ffd8e | 2007-10-12 16:11:17 +0200 | [diff] [blame] | 1774 | /** |
| 1775 | * ccw_driver_register() - register a ccw driver |
| 1776 | * @cdriver: driver to be registered |
| 1777 | * |
| 1778 | * This function is mainly a wrapper around driver_register(). |
| 1779 | * Returns: |
| 1780 | * %0 on success and a negative error value on failure. |
| 1781 | */ |
| 1782 | int ccw_driver_register(struct ccw_driver *cdriver) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1783 | { |
| 1784 | struct device_driver *drv = &cdriver->driver; |
| 1785 | |
| 1786 | drv->bus = &ccw_bus_type; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1787 | |
| 1788 | return driver_register(drv); |
| 1789 | } |
| 1790 | |
Cornelia Huck | b2ffd8e | 2007-10-12 16:11:17 +0200 | [diff] [blame] | 1791 | /** |
| 1792 | * ccw_driver_unregister() - deregister a ccw driver |
| 1793 | * @cdriver: driver to be deregistered |
| 1794 | * |
| 1795 | * This function is mainly a wrapper around driver_unregister(). |
| 1796 | */ |
| 1797 | void ccw_driver_unregister(struct ccw_driver *cdriver) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1798 | { |
| 1799 | driver_unregister(&cdriver->driver); |
| 1800 | } |
| 1801 | |
Peter Oberparleiter | 37de53b | 2009-12-07 12:51:19 +0100 | [diff] [blame] | 1802 | static void ccw_device_todo(struct work_struct *work) |
| 1803 | { |
| 1804 | struct ccw_device_private *priv; |
| 1805 | struct ccw_device *cdev; |
| 1806 | struct subchannel *sch; |
| 1807 | enum cdev_todo todo; |
| 1808 | |
| 1809 | priv = container_of(work, struct ccw_device_private, todo_work); |
| 1810 | cdev = priv->cdev; |
| 1811 | sch = to_subchannel(cdev->dev.parent); |
| 1812 | /* Find out todo. */ |
| 1813 | spin_lock_irq(cdev->ccwlock); |
| 1814 | todo = priv->todo; |
| 1815 | priv->todo = CDEV_TODO_NOTHING; |
| 1816 | CIO_MSG_EVENT(4, "cdev_todo: cdev=0.%x.%04x todo=%d\n", |
| 1817 | priv->dev_id.ssid, priv->dev_id.devno, todo); |
| 1818 | spin_unlock_irq(cdev->ccwlock); |
| 1819 | /* Perform todo. */ |
| 1820 | switch (todo) { |
| 1821 | case CDEV_TODO_ENABLE_CMF: |
| 1822 | cmf_reenable(cdev); |
| 1823 | break; |
| 1824 | case CDEV_TODO_REBIND: |
| 1825 | ccw_device_do_unbind_bind(cdev); |
| 1826 | break; |
| 1827 | case CDEV_TODO_REGISTER: |
| 1828 | io_subchannel_register(cdev); |
| 1829 | break; |
| 1830 | case CDEV_TODO_UNREG_EVAL: |
| 1831 | if (!sch_is_pseudo_sch(sch)) |
| 1832 | css_schedule_eval(sch->schid); |
Joe Perches | b09fcec | 2020-03-10 13:39:50 -0700 | [diff] [blame] | 1833 | fallthrough; |
Peter Oberparleiter | 37de53b | 2009-12-07 12:51:19 +0100 | [diff] [blame] | 1834 | case CDEV_TODO_UNREG: |
Vineeth Vijayan | 2297791 | 2021-04-25 10:41:59 +0200 | [diff] [blame] | 1835 | spin_lock_irq(sch->lock); |
| 1836 | sch_set_cdev(sch, NULL); |
| 1837 | spin_unlock_irq(sch->lock); |
| 1838 | ccw_device_unregister(cdev); |
Peter Oberparleiter | 37de53b | 2009-12-07 12:51:19 +0100 | [diff] [blame] | 1839 | break; |
| 1840 | default: |
| 1841 | break; |
| 1842 | } |
| 1843 | /* Release workqueue ref. */ |
| 1844 | put_device(&cdev->dev); |
| 1845 | } |
| 1846 | |
| 1847 | /** |
| 1848 | * ccw_device_sched_todo - schedule ccw device operation |
| 1849 | * @cdev: ccw device |
| 1850 | * @todo: todo |
| 1851 | * |
| 1852 | * Schedule the operation identified by @todo to be performed on the slow path |
| 1853 | * workqueue. Do nothing if another operation with higher priority is already |
| 1854 | * scheduled. Needs to be called with ccwdev lock held. |
| 1855 | */ |
| 1856 | void ccw_device_sched_todo(struct ccw_device *cdev, enum cdev_todo todo) |
| 1857 | { |
| 1858 | CIO_MSG_EVENT(4, "cdev_todo: sched cdev=0.%x.%04x todo=%d\n", |
| 1859 | cdev->private->dev_id.ssid, cdev->private->dev_id.devno, |
| 1860 | todo); |
| 1861 | if (cdev->private->todo >= todo) |
| 1862 | return; |
| 1863 | cdev->private->todo = todo; |
| 1864 | /* Get workqueue ref. */ |
| 1865 | if (!get_device(&cdev->dev)) |
| 1866 | return; |
Sebastian Ott | be5d382 | 2010-02-26 22:37:24 +0100 | [diff] [blame] | 1867 | if (!queue_work(cio_work_q, &cdev->private->todo_work)) { |
Peter Oberparleiter | 37de53b | 2009-12-07 12:51:19 +0100 | [diff] [blame] | 1868 | /* Already queued, release workqueue ref. */ |
| 1869 | put_device(&cdev->dev); |
| 1870 | } |
| 1871 | } |
| 1872 | |
Michael Ernst | fd0457a | 2010-08-09 18:12:50 +0200 | [diff] [blame] | 1873 | /** |
| 1874 | * ccw_device_siosl() - initiate logging |
| 1875 | * @cdev: ccw device |
| 1876 | * |
| 1877 | * This function is used to invoke model-dependent logging within the channel |
| 1878 | * subsystem. |
| 1879 | */ |
| 1880 | int ccw_device_siosl(struct ccw_device *cdev) |
| 1881 | { |
| 1882 | struct subchannel *sch = to_subchannel(cdev->dev.parent); |
| 1883 | |
| 1884 | return chsc_siosl(sch->schid); |
| 1885 | } |
| 1886 | EXPORT_SYMBOL_GPL(ccw_device_siosl); |
| 1887 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1888 | EXPORT_SYMBOL(ccw_device_set_online); |
| 1889 | EXPORT_SYMBOL(ccw_device_set_offline); |
| 1890 | EXPORT_SYMBOL(ccw_driver_register); |
| 1891 | EXPORT_SYMBOL(ccw_driver_unregister); |
| 1892 | EXPORT_SYMBOL(get_ccwdev_by_busid); |