blob: e4ed6c651bcacc967dfb8c387ccde236d9acd178 [file] [log] [blame]
Greg Kroah-Hartman724117b2017-11-14 18:38:02 +01001// SPDX-License-Identifier: GPL-1.0+
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 * bus driver for ccw devices
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 *
Heiko Carstensa53c8fa2012-07-20 11:15:04 +02005 * Copyright IBM Corp. 2002, 2008
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 * Author(s): Arnd Bergmann (arndb@de.ibm.com)
Cornelia Huck4ce3b302006-01-14 13:21:04 -08007 * Cornelia Huck (cornelia.huck@de.ibm.com)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 * Martin Schwidefsky (schwidefsky@de.ibm.com)
Paul Gortmakera00f7612016-10-30 16:37:24 -04009 *
10 * License: GPL
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 */
Peter Oberparleitera7ae2c022009-12-07 12:51:20 +010012
13#define KMSG_COMPONENT "cio"
14#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
15
Paul Gortmakera00f7612016-10-30 16:37:24 -040016#include <linux/export.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070017#include <linux/init.h>
18#include <linux/spinlock.h>
19#include <linux/errno.h>
20#include <linux/err.h>
21#include <linux/slab.h>
22#include <linux/list.h>
23#include <linux/device.h>
24#include <linux/workqueue.h>
Sebastian Ott188561a2013-04-13 12:53:21 +020025#include <linux/delay.h>
Peter Oberparleiter90ab1332008-01-26 14:10:52 +010026#include <linux/timer.h>
Peter Oberparleiterde400d62011-10-30 15:16:04 +010027#include <linux/kernel_stat.h>
Ingo Molnar174cd4b2017-02-02 19:15:33 +010028#include <linux/sched/signal.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070029
30#include <asm/ccwdev.h>
31#include <asm/cio.h>
Tim Schmielau4e57b682005-10-30 15:03:48 -080032#include <asm/param.h> /* HZ */
Cornelia Huck1842f2b2007-10-12 16:11:22 +020033#include <asm/cmb.h>
Cornelia Huck3a3fc292008-07-14 09:58:58 +020034#include <asm/isc.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035
Cornelia Huck0ae7a7b2008-07-14 09:58:43 +020036#include "chp.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070037#include "cio.h"
Cornelia Huckd7b5a4c92006-12-08 15:54:28 +010038#include "cio_debug.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070039#include "css.h"
40#include "device.h"
41#include "ioasm.h"
Cornelia Huckcd6b4f22008-01-26 14:10:43 +010042#include "io_sch.h"
Peter Oberparleiterecf5d9e2008-10-10 21:33:06 +020043#include "blacklist.h"
Michael Ernstfd0457a2010-08-09 18:12:50 +020044#include "chsc.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070045
Peter Oberparleiter90ab1332008-01-26 14:10:52 +010046static struct timer_list recovery_timer;
Cornelia Huck486d0a02008-02-19 15:29:23 +010047static DEFINE_SPINLOCK(recovery_lock);
Peter Oberparleiter90ab1332008-01-26 14:10:52 +010048static int recovery_phase;
49static const unsigned long recovery_delay[] = { 3, 30, 300 };
50
Sebastian Ott0ad8f714a2013-04-13 13:06:27 +020051static atomic_t ccw_device_init_count = ATOMIC_INIT(0);
52static DECLARE_WAIT_QUEUE_HEAD(ccw_device_init_wq);
53static struct bus_type ccw_bus_type;
54
Linus Torvalds1da177e2005-04-16 15:20:36 -070055/******************* bus type handling ***********************/
56
57/* The Linux driver model distinguishes between a bus type and
58 * the bus itself. Of course we only have one channel
59 * subsystem driver and one channel system per machine, but
60 * we still use the abstraction. T.R. says it's a good idea. */
61static int
62ccw_bus_match (struct device * dev, struct device_driver * drv)
63{
64 struct ccw_device *cdev = to_ccwdev(dev);
65 struct ccw_driver *cdrv = to_ccwdrv(drv);
66 const struct ccw_device_id *ids = cdrv->ids, *found;
67
68 if (!ids)
69 return 0;
70
71 found = ccw_device_id_match(ids, &cdev->id);
72 if (!found)
73 return 0;
74
75 cdev->id.driver_info = found->driver_info;
76
77 return 1;
78}
79
Peter Oberparleiterdb0c2d52006-09-20 15:59:49 +020080/* Store modalias string delimited by prefix/suffix string into buffer with
81 * specified size. Return length of resulting string (excluding trailing '\0')
82 * even if string doesn't fit buffer (snprintf semantics). */
Cornelia Huckcfbe9bb2007-04-27 16:01:32 +020083static int snprint_alias(char *buf, size_t size,
Peter Oberparleiterdb0c2d52006-09-20 15:59:49 +020084 struct ccw_device_id *id, const char *suffix)
85{
86 int len;
87
Cornelia Huckcfbe9bb2007-04-27 16:01:32 +020088 len = snprintf(buf, size, "ccw:t%04Xm%02X", id->cu_type, id->cu_model);
Peter Oberparleiterdb0c2d52006-09-20 15:59:49 +020089 if (len > size)
90 return len;
91 buf += len;
92 size -= len;
93
94 if (id->dev_type != 0)
95 len += snprintf(buf, size, "dt%04Xdm%02X%s", id->dev_type,
96 id->dev_model, suffix);
97 else
98 len += snprintf(buf, size, "dtdm%s", suffix);
99
100 return len;
101}
102
103/* Set up environment variables for ccw device uevent. Return 0 on success,
104 * non-zero otherwise. */
Kay Sievers7eff2e72007-08-14 15:15:12 +0200105static int ccw_uevent(struct device *dev, struct kobj_uevent_env *env)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106{
107 struct ccw_device *cdev = to_ccwdev(dev);
Peter Oberparleiterdb0c2d52006-09-20 15:59:49 +0200108 struct ccw_device_id *id = &(cdev->id);
Cornelia Huckcfbe9bb2007-04-27 16:01:32 +0200109 int ret;
110 char modalias_buf[30];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111
Peter Oberparleiterdb0c2d52006-09-20 15:59:49 +0200112 /* CU_TYPE= */
Kay Sievers7eff2e72007-08-14 15:15:12 +0200113 ret = add_uevent_var(env, "CU_TYPE=%04X", id->cu_type);
Cornelia Huckcfbe9bb2007-04-27 16:01:32 +0200114 if (ret)
115 return ret;
Peter Oberparleiterdb0c2d52006-09-20 15:59:49 +0200116
117 /* CU_MODEL= */
Kay Sievers7eff2e72007-08-14 15:15:12 +0200118 ret = add_uevent_var(env, "CU_MODEL=%02X", id->cu_model);
Cornelia Huckcfbe9bb2007-04-27 16:01:32 +0200119 if (ret)
120 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121
122 /* The next two can be zero, that's ok for us */
Peter Oberparleiterdb0c2d52006-09-20 15:59:49 +0200123 /* DEV_TYPE= */
Kay Sievers7eff2e72007-08-14 15:15:12 +0200124 ret = add_uevent_var(env, "DEV_TYPE=%04X", id->dev_type);
Cornelia Huckcfbe9bb2007-04-27 16:01:32 +0200125 if (ret)
126 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127
Peter Oberparleiterdb0c2d52006-09-20 15:59:49 +0200128 /* DEV_MODEL= */
Kay Sievers7eff2e72007-08-14 15:15:12 +0200129 ret = add_uevent_var(env, "DEV_MODEL=%02X", id->dev_model);
Cornelia Huckcfbe9bb2007-04-27 16:01:32 +0200130 if (ret)
131 return ret;
Peter Oberparleiterdb0c2d52006-09-20 15:59:49 +0200132
133 /* MODALIAS= */
Cornelia Huckcfbe9bb2007-04-27 16:01:32 +0200134 snprint_alias(modalias_buf, sizeof(modalias_buf), id, "");
Kay Sievers7eff2e72007-08-14 15:15:12 +0200135 ret = add_uevent_var(env, "MODALIAS=%s", modalias_buf);
136 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700137}
138
Cornelia Huck602b20f2008-01-26 14:10:39 +0100139static void io_subchannel_irq(struct subchannel *);
140static int io_subchannel_probe(struct subchannel *);
141static int io_subchannel_remove(struct subchannel *);
Cornelia Huck8bbace72006-01-11 10:56:22 +0100142static void io_subchannel_shutdown(struct subchannel *);
Cornelia Huckc820de32008-07-14 09:58:45 +0200143static int io_subchannel_sch_event(struct subchannel *, int);
Cornelia Huck99611f82008-07-14 09:59:02 +0200144static int io_subchannel_chp_event(struct subchannel *, struct chp_link *,
145 int);
Kees Cook846d0c62017-10-16 16:43:25 -0700146static void recovery_func(struct timer_list *unused);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147
Cornelia Huckf08adc02008-07-14 09:59:03 +0200148static struct css_device_id io_subchannel_ids[] = {
149 { .match_flags = 0x1, .type = SUBCHANNEL_TYPE_IO, },
150 { /* end of list */ },
151};
Cornelia Huckf08adc02008-07-14 09:59:03 +0200152
Cornelia Huck93a27592009-06-16 10:30:23 +0200153static int io_subchannel_prepare(struct subchannel *sch)
154{
155 struct ccw_device *cdev;
156 /*
157 * Don't allow suspend while a ccw device registration
158 * is still outstanding.
159 */
160 cdev = sch_get_cdev(sch);
161 if (cdev && !device_is_registered(&cdev->dev))
162 return -EAGAIN;
163 return 0;
164}
165
Sebastian Ottb4c70722010-02-26 22:37:27 +0100166static int io_subchannel_settle(void)
Sebastian Ott8ea7f552009-09-22 22:58:35 +0200167{
Sebastian Ottb4c70722010-02-26 22:37:27 +0100168 int ret;
169
170 ret = wait_event_interruptible(ccw_device_init_wq,
171 atomic_read(&ccw_device_init_count) == 0);
172 if (ret)
173 return -EINTR;
Sebastian Ottbe5d3822010-02-26 22:37:24 +0100174 flush_workqueue(cio_work_q);
Sebastian Ottb4c70722010-02-26 22:37:27 +0100175 return 0;
Sebastian Ott8ea7f552009-09-22 22:58:35 +0200176}
177
Cornelia Huckf7e5d672007-05-10 15:45:43 +0200178static struct css_driver io_subchannel_driver = {
Sebastian Otte6aed122011-03-15 17:08:30 +0100179 .drv = {
180 .owner = THIS_MODULE,
181 .name = "io_subchannel",
182 },
Cornelia Huckf08adc02008-07-14 09:59:03 +0200183 .subchannel_type = io_subchannel_ids,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700184 .irq = io_subchannel_irq,
Cornelia Huckc820de32008-07-14 09:58:45 +0200185 .sch_event = io_subchannel_sch_event,
186 .chp_event = io_subchannel_chp_event,
Cornelia Huck8bbace72006-01-11 10:56:22 +0100187 .probe = io_subchannel_probe,
188 .remove = io_subchannel_remove,
189 .shutdown = io_subchannel_shutdown,
Cornelia Huck93a27592009-06-16 10:30:23 +0200190 .prepare = io_subchannel_prepare,
Sebastian Ott8ea7f552009-09-22 22:58:35 +0200191 .settle = io_subchannel_settle,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700192};
193
Sebastian Ott2f176442009-09-22 22:58:33 +0200194int __init io_subchannel_init(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195{
196 int ret;
197
Kees Cook846d0c62017-10-16 16:43:25 -0700198 timer_setup(&recovery_timer, recovery_func, 0);
Sebastian Ottbe5d3822010-02-26 22:37:24 +0100199 ret = bus_register(&ccw_bus_type);
200 if (ret)
201 return ret;
Cornelia Huck25b7bb52008-01-26 14:10:41 +0100202 ret = css_driver_register(&io_subchannel_driver);
203 if (ret)
Sebastian Ottbe5d3822010-02-26 22:37:24 +0100204 bus_unregister(&ccw_bus_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206 return ret;
207}
208
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209
210/************************ device handling **************************/
211
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212static ssize_t
Yani Ioannou3fd3c0a2005-05-17 06:43:27 -0400213devtype_show (struct device *dev, struct device_attribute *attr, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214{
215 struct ccw_device *cdev = to_ccwdev(dev);
216 struct ccw_device_id *id = &(cdev->id);
217
218 if (id->dev_type != 0)
219 return sprintf(buf, "%04x/%02x\n",
220 id->dev_type, id->dev_model);
221 else
222 return sprintf(buf, "n/a\n");
223}
224
225static ssize_t
Yani Ioannou3fd3c0a2005-05-17 06:43:27 -0400226cutype_show (struct device *dev, struct device_attribute *attr, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227{
228 struct ccw_device *cdev = to_ccwdev(dev);
229 struct ccw_device_id *id = &(cdev->id);
230
231 return sprintf(buf, "%04x/%02x\n",
232 id->cu_type, id->cu_model);
233}
234
235static ssize_t
Bastian Blankf1fc78a2005-10-30 15:00:12 -0800236modalias_show (struct device *dev, struct device_attribute *attr, char *buf)
237{
238 struct ccw_device *cdev = to_ccwdev(dev);
239 struct ccw_device_id *id = &(cdev->id);
Peter Oberparleiterdb0c2d52006-09-20 15:59:49 +0200240 int len;
Bastian Blankf1fc78a2005-10-30 15:00:12 -0800241
Cornelia Huck086a6c62007-07-17 13:36:08 +0200242 len = snprint_alias(buf, PAGE_SIZE, id, "\n");
Peter Oberparleiterdb0c2d52006-09-20 15:59:49 +0200243
244 return len > PAGE_SIZE ? PAGE_SIZE : len;
Bastian Blankf1fc78a2005-10-30 15:00:12 -0800245}
246
247static ssize_t
Yani Ioannou3fd3c0a2005-05-17 06:43:27 -0400248online_show (struct device *dev, struct device_attribute *attr, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700249{
250 struct ccw_device *cdev = to_ccwdev(dev);
251
252 return sprintf(buf, cdev->online ? "1\n" : "0\n");
253}
254
Cornelia Huckd7b5a4c92006-12-08 15:54:28 +0100255int ccw_device_is_orphan(struct ccw_device *cdev)
256{
257 return sch_is_pseudo_sch(to_subchannel(cdev->dev.parent));
258}
259
Cornelia Huckef995162007-04-27 16:01:39 +0200260static void ccw_device_unregister(struct ccw_device *cdev)
Cornelia Huck7674da72006-12-08 15:54:21 +0100261{
Sebastian Ott7d253b92009-12-07 12:51:33 +0100262 if (device_is_registered(&cdev->dev)) {
Sebastian Ott24a18722009-12-07 12:51:34 +0100263 /* Undo device_add(). */
Cornelia Huckef995162007-04-27 16:01:39 +0200264 device_del(&cdev->dev);
Sebastian Ott24a18722009-12-07 12:51:34 +0100265 }
266 if (cdev->private->flags.initialized) {
267 cdev->private->flags.initialized = 0;
Sebastian Ott3b554a12009-09-11 10:28:26 +0200268 /* Release reference from device_initialize(). */
269 put_device(&cdev->dev);
270 }
Cornelia Huck7674da72006-12-08 15:54:21 +0100271}
272
Sebastian Ottd40f7b72009-12-07 12:51:41 +0100273static void io_subchannel_quiesce(struct subchannel *);
274
Cornelia Huckb2ffd8e2007-10-12 16:11:17 +0200275/**
276 * ccw_device_set_offline() - disable a ccw device for I/O
277 * @cdev: target ccw device
278 *
279 * This function calls the driver's set_offline() function for @cdev, if
280 * given, and then disables @cdev.
281 * Returns:
282 * %0 on success and a negative error value on failure.
283 * Context:
284 * enabled, ccw device lock not held
285 */
286int ccw_device_set_offline(struct ccw_device *cdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287{
Sebastian Ottd40f7b72009-12-07 12:51:41 +0100288 struct subchannel *sch;
289 int ret, state;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700290
291 if (!cdev)
292 return -ENODEV;
293 if (!cdev->online || !cdev->drv)
294 return -EINVAL;
295
296 if (cdev->drv->set_offline) {
297 ret = cdev->drv->set_offline(cdev);
298 if (ret != 0)
299 return ret;
300 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301 spin_lock_irq(cdev->ccwlock);
Sebastian Ottd40f7b72009-12-07 12:51:41 +0100302 sch = to_subchannel(cdev->dev.parent);
Sebastian Ott74bd0d82013-12-16 10:51:54 +0100303 cdev->online = 0;
Michael Ernst217ee6c2009-09-11 10:28:21 +0200304 /* Wait until a final state or DISCONNECTED is reached */
305 while (!dev_fsm_final_state(cdev) &&
306 cdev->private->state != DEV_STATE_DISCONNECTED) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700307 spin_unlock_irq(cdev->ccwlock);
Michael Ernst217ee6c2009-09-11 10:28:21 +0200308 wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) ||
309 cdev->private->state == DEV_STATE_DISCONNECTED));
310 spin_lock_irq(cdev->ccwlock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700311 }
Sebastian Ottd40f7b72009-12-07 12:51:41 +0100312 do {
313 ret = ccw_device_offline(cdev);
314 if (!ret)
315 break;
316 CIO_MSG_EVENT(0, "ccw_device_offline returned %d, device "
317 "0.%x.%04x\n", ret, cdev->private->dev_id.ssid,
318 cdev->private->dev_id.devno);
319 if (ret != -EBUSY)
320 goto error;
321 state = cdev->private->state;
322 spin_unlock_irq(cdev->ccwlock);
323 io_subchannel_quiesce(sch);
324 spin_lock_irq(cdev->ccwlock);
325 cdev->private->state = state;
326 } while (ret == -EBUSY);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700327 spin_unlock_irq(cdev->ccwlock);
Michael Ernst217ee6c2009-09-11 10:28:21 +0200328 wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) ||
329 cdev->private->state == DEV_STATE_DISCONNECTED));
Peter Oberparleitera7ae2c022009-12-07 12:51:20 +0100330 /* Inform the user if set offline failed. */
331 if (cdev->private->state == DEV_STATE_BOXED) {
Joe Perchesbaebc702016-03-03 20:49:57 -0800332 pr_warn("%s: The device entered boxed state while being set offline\n",
333 dev_name(&cdev->dev));
Peter Oberparleitera7ae2c022009-12-07 12:51:20 +0100334 } else if (cdev->private->state == DEV_STATE_NOT_OPER) {
Joe Perchesbaebc702016-03-03 20:49:57 -0800335 pr_warn("%s: The device stopped operating while being set offline\n",
336 dev_name(&cdev->dev));
Peter Oberparleitera7ae2c022009-12-07 12:51:20 +0100337 }
Michael Ernst217ee6c2009-09-11 10:28:21 +0200338 /* Give up reference from ccw_device_set_online(). */
339 put_device(&cdev->dev);
340 return 0;
341
342error:
Michael Ernst217ee6c2009-09-11 10:28:21 +0200343 cdev->private->state = DEV_STATE_OFFLINE;
344 dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
345 spin_unlock_irq(cdev->ccwlock);
346 /* Give up reference from ccw_device_set_online(). */
347 put_device(&cdev->dev);
348 return -ENODEV;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700349}
350
Cornelia Huckb2ffd8e2007-10-12 16:11:17 +0200351/**
352 * ccw_device_set_online() - enable a ccw device for I/O
353 * @cdev: target ccw device
354 *
355 * This function first enables @cdev and then calls the driver's set_online()
356 * function for @cdev, if given. If set_online() returns an error, @cdev is
357 * disabled again.
358 * Returns:
359 * %0 on success and a negative error value on failure.
360 * Context:
361 * enabled, ccw device lock not held
362 */
363int ccw_device_set_online(struct ccw_device *cdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700364{
365 int ret;
Michael Ernst217ee6c2009-09-11 10:28:21 +0200366 int ret2;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700367
368 if (!cdev)
369 return -ENODEV;
370 if (cdev->online || !cdev->drv)
371 return -EINVAL;
Cornelia Huck9cd67422008-12-25 13:39:06 +0100372 /* Hold on to an extra reference while device is online. */
373 if (!get_device(&cdev->dev))
374 return -ENODEV;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700375
376 spin_lock_irq(cdev->ccwlock);
377 ret = ccw_device_online(cdev);
378 spin_unlock_irq(cdev->ccwlock);
379 if (ret == 0)
380 wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev));
381 else {
Michael Ernst139b83dd2008-05-07 09:22:54 +0200382 CIO_MSG_EVENT(0, "ccw_device_online returned %d, "
Cornelia Hucke556bbb2007-07-27 12:29:19 +0200383 "device 0.%x.%04x\n",
384 ret, cdev->private->dev_id.ssid,
385 cdev->private->dev_id.devno);
Cornelia Huck9cd67422008-12-25 13:39:06 +0100386 /* Give up online reference since onlining failed. */
387 put_device(&cdev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700388 return ret;
389 }
Michael Ernst217ee6c2009-09-11 10:28:21 +0200390 spin_lock_irq(cdev->ccwlock);
391 /* Check if online processing was successful */
392 if ((cdev->private->state != DEV_STATE_ONLINE) &&
393 (cdev->private->state != DEV_STATE_W4SENSE)) {
394 spin_unlock_irq(cdev->ccwlock);
Peter Oberparleitera7ae2c022009-12-07 12:51:20 +0100395 /* Inform the user that set online failed. */
396 if (cdev->private->state == DEV_STATE_BOXED) {
Joe Perchesbaebc702016-03-03 20:49:57 -0800397 pr_warn("%s: Setting the device online failed because it is boxed\n",
398 dev_name(&cdev->dev));
Peter Oberparleitera7ae2c022009-12-07 12:51:20 +0100399 } else if (cdev->private->state == DEV_STATE_NOT_OPER) {
Joe Perchesbaebc702016-03-03 20:49:57 -0800400 pr_warn("%s: Setting the device online failed because it is not operational\n",
401 dev_name(&cdev->dev));
Peter Oberparleitera7ae2c022009-12-07 12:51:20 +0100402 }
Cornelia Huck9cd67422008-12-25 13:39:06 +0100403 /* Give up online reference since onlining failed. */
404 put_device(&cdev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700405 return -ENODEV;
Cornelia Huck9cd67422008-12-25 13:39:06 +0100406 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700407 spin_unlock_irq(cdev->ccwlock);
Michael Ernst217ee6c2009-09-11 10:28:21 +0200408 if (cdev->drv->set_online)
409 ret = cdev->drv->set_online(cdev);
410 if (ret)
411 goto rollback;
Sebastian Ott74bd0d82013-12-16 10:51:54 +0100412
413 spin_lock_irq(cdev->ccwlock);
Michael Ernst217ee6c2009-09-11 10:28:21 +0200414 cdev->online = 1;
Sebastian Ott74bd0d82013-12-16 10:51:54 +0100415 spin_unlock_irq(cdev->ccwlock);
Michael Ernst217ee6c2009-09-11 10:28:21 +0200416 return 0;
417
418rollback:
419 spin_lock_irq(cdev->ccwlock);
420 /* Wait until a final state or DISCONNECTED is reached */
421 while (!dev_fsm_final_state(cdev) &&
422 cdev->private->state != DEV_STATE_DISCONNECTED) {
423 spin_unlock_irq(cdev->ccwlock);
424 wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) ||
425 cdev->private->state == DEV_STATE_DISCONNECTED));
426 spin_lock_irq(cdev->ccwlock);
427 }
428 ret2 = ccw_device_offline(cdev);
429 if (ret2)
430 goto error;
431 spin_unlock_irq(cdev->ccwlock);
432 wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) ||
433 cdev->private->state == DEV_STATE_DISCONNECTED));
Cornelia Huck9cd67422008-12-25 13:39:06 +0100434 /* Give up online reference since onlining failed. */
435 put_device(&cdev->dev);
Michael Ernst217ee6c2009-09-11 10:28:21 +0200436 return ret;
437
438error:
439 CIO_MSG_EVENT(0, "rollback ccw_device_offline returned %d, "
440 "device 0.%x.%04x\n",
441 ret2, cdev->private->dev_id.ssid,
442 cdev->private->dev_id.devno);
443 cdev->private->state = DEV_STATE_OFFLINE;
444 spin_unlock_irq(cdev->ccwlock);
445 /* Give up online reference since onlining failed. */
446 put_device(&cdev->dev);
447 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700448}
449
Sebastian Otte74fe0c2009-03-26 15:24:08 +0100450static int online_store_handle_offline(struct ccw_device *cdev)
Cornelia Huckf5ba6c82007-04-27 16:01:30 +0200451{
Peter Oberparleiter37de53b2009-12-07 12:51:19 +0100452 if (cdev->private->state == DEV_STATE_DISCONNECTED) {
453 spin_lock_irq(cdev->ccwlock);
454 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG_EVAL);
455 spin_unlock_irq(cdev->ccwlock);
Sebastian Ott7cd40312010-08-09 18:12:52 +0200456 return 0;
457 }
458 if (cdev->drv && cdev->drv->set_offline)
Sebastian Otte74fe0c2009-03-26 15:24:08 +0100459 return ccw_device_set_offline(cdev);
Sebastian Ott7cd40312010-08-09 18:12:52 +0200460 return -EINVAL;
Cornelia Huckf5ba6c82007-04-27 16:01:30 +0200461}
462
463static int online_store_recog_and_online(struct ccw_device *cdev)
464{
Cornelia Huckf5ba6c82007-04-27 16:01:30 +0200465 /* Do device recognition, if needed. */
Sebastian Ott99f6a5702009-03-31 19:16:07 +0200466 if (cdev->private->state == DEV_STATE_BOXED) {
Peter Oberparleiter1f5bd38482009-12-07 12:51:23 +0100467 spin_lock_irq(cdev->ccwlock);
Peter Oberparleiter736b5db2009-12-07 12:51:21 +0100468 ccw_device_recognition(cdev);
Peter Oberparleiter1f5bd38482009-12-07 12:51:23 +0100469 spin_unlock_irq(cdev->ccwlock);
Cornelia Huckf5ba6c82007-04-27 16:01:30 +0200470 wait_event(cdev->private->wait_q,
471 cdev->private->flags.recog_done);
Sebastian Ott156013f2009-03-31 19:16:03 +0200472 if (cdev->private->state != DEV_STATE_OFFLINE)
473 /* recognition failed */
474 return -EAGAIN;
Cornelia Huckf5ba6c82007-04-27 16:01:30 +0200475 }
476 if (cdev->drv && cdev->drv->set_online)
Sebastian Ott7cd40312010-08-09 18:12:52 +0200477 return ccw_device_set_online(cdev);
478 return -EINVAL;
Cornelia Huckf5ba6c82007-04-27 16:01:30 +0200479}
Sebastian Ott156013f2009-03-31 19:16:03 +0200480
Michael Ernstc78aa6c2008-07-14 09:59:22 +0200481static int online_store_handle_online(struct ccw_device *cdev, int force)
Cornelia Huckf5ba6c82007-04-27 16:01:30 +0200482{
483 int ret;
484
485 ret = online_store_recog_and_online(cdev);
Sebastian Ott156013f2009-03-31 19:16:03 +0200486 if (ret && !force)
Michael Ernstc78aa6c2008-07-14 09:59:22 +0200487 return ret;
Cornelia Huckf5ba6c82007-04-27 16:01:30 +0200488 if (force && cdev->private->state == DEV_STATE_BOXED) {
489 ret = ccw_device_stlck(cdev);
Michael Ernstc78aa6c2008-07-14 09:59:22 +0200490 if (ret)
491 return ret;
Cornelia Huckf5ba6c82007-04-27 16:01:30 +0200492 if (cdev->id.cu_type == 0)
493 cdev->private->state = DEV_STATE_NOT_OPER;
Sebastian Ott156013f2009-03-31 19:16:03 +0200494 ret = online_store_recog_and_online(cdev);
495 if (ret)
496 return ret;
Cornelia Huckf5ba6c82007-04-27 16:01:30 +0200497 }
Michael Ernstc78aa6c2008-07-14 09:59:22 +0200498 return 0;
Cornelia Huckf5ba6c82007-04-27 16:01:30 +0200499}
500
501static ssize_t online_store (struct device *dev, struct device_attribute *attr,
502 const char *buf, size_t count)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700503{
504 struct ccw_device *cdev = to_ccwdev(dev);
Cornelia Huck2f972202008-04-30 13:38:33 +0200505 int force, ret;
506 unsigned long i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700507
Peter Oberparleitera2fc8482011-04-04 09:43:32 +0200508 /* Prevent conflict between multiple on-/offline processing requests. */
Peter Oberparleiter350e9122009-12-07 12:51:28 +0100509 if (atomic_cmpxchg(&cdev->private->onoff, 0, 1) != 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700510 return -EAGAIN;
Peter Oberparleitera2fc8482011-04-04 09:43:32 +0200511 /* Prevent conflict between internal I/Os and on-/offline processing. */
512 if (!dev_fsm_final_state(cdev) &&
513 cdev->private->state != DEV_STATE_DISCONNECTED) {
514 ret = -EAGAIN;
Sebastian Ott00381ee2013-12-16 10:54:13 +0100515 goto out;
Peter Oberparleitera2fc8482011-04-04 09:43:32 +0200516 }
517 /* Prevent conflict between pending work and on-/offline processing.*/
518 if (work_pending(&cdev->private->todo_work)) {
519 ret = -EAGAIN;
Sebastian Ott00381ee2013-12-16 10:54:13 +0100520 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700521 }
522 if (!strncmp(buf, "force\n", count)) {
523 force = 1;
524 i = 1;
Cornelia Huck2f972202008-04-30 13:38:33 +0200525 ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700526 } else {
527 force = 0;
Jingoo Han01787222013-07-22 10:18:15 +0900528 ret = kstrtoul(buf, 16, &i);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700529 }
Cornelia Huck2f972202008-04-30 13:38:33 +0200530 if (ret)
531 goto out;
Sebastian Ott00381ee2013-12-16 10:54:13 +0100532
533 device_lock(dev);
Cornelia Huckf5ba6c82007-04-27 16:01:30 +0200534 switch (i) {
535 case 0:
Sebastian Otte74fe0c2009-03-26 15:24:08 +0100536 ret = online_store_handle_offline(cdev);
Cornelia Huckf5ba6c82007-04-27 16:01:30 +0200537 break;
538 case 1:
Michael Ernstc78aa6c2008-07-14 09:59:22 +0200539 ret = online_store_handle_online(cdev, force);
Cornelia Huckf5ba6c82007-04-27 16:01:30 +0200540 break;
541 default:
Cornelia Huck2f972202008-04-30 13:38:33 +0200542 ret = -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700543 }
Sebastian Ott00381ee2013-12-16 10:54:13 +0100544 device_unlock(dev);
545
Cornelia Huck2f972202008-04-30 13:38:33 +0200546out:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700547 atomic_set(&cdev->private->onoff, 0);
Sebastian Otte74fe0c2009-03-26 15:24:08 +0100548 return (ret < 0) ? ret : count;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700549}
550
551static ssize_t
Yani Ioannou3fd3c0a2005-05-17 06:43:27 -0400552available_show (struct device *dev, struct device_attribute *attr, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700553{
554 struct ccw_device *cdev = to_ccwdev(dev);
555 struct subchannel *sch;
556
Cornelia Huckd7b5a4c92006-12-08 15:54:28 +0100557 if (ccw_device_is_orphan(cdev))
558 return sprintf(buf, "no device\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700559 switch (cdev->private->state) {
560 case DEV_STATE_BOXED:
561 return sprintf(buf, "boxed\n");
562 case DEV_STATE_DISCONNECTED:
563 case DEV_STATE_DISCONNECTED_SENSE_ID:
564 case DEV_STATE_NOT_OPER:
565 sch = to_subchannel(dev->parent);
566 if (!sch->lpm)
567 return sprintf(buf, "no path\n");
568 else
569 return sprintf(buf, "no device\n");
570 default:
571 /* All other states considered fine. */
572 return sprintf(buf, "good\n");
573 }
574}
575
Michael Ernstfd0457a2010-08-09 18:12:50 +0200576static ssize_t
577initiate_logging(struct device *dev, struct device_attribute *attr,
578 const char *buf, size_t count)
579{
580 struct subchannel *sch = to_subchannel(dev);
581 int rc;
582
583 rc = chsc_siosl(sch->schid);
584 if (rc < 0) {
Joe Perchesbaebc702016-03-03 20:49:57 -0800585 pr_warn("Logging for subchannel 0.%x.%04x failed with errno=%d\n",
586 sch->schid.ssid, sch->schid.sch_no, rc);
Michael Ernstfd0457a2010-08-09 18:12:50 +0200587 return rc;
588 }
589 pr_notice("Logging for subchannel 0.%x.%04x was triggered\n",
590 sch->schid.ssid, sch->schid.sch_no);
591 return count;
592}
593
Sebastian Ott84c57ad2013-01-28 19:32:27 +0100594static ssize_t vpm_show(struct device *dev, struct device_attribute *attr,
595 char *buf)
596{
597 struct subchannel *sch = to_subchannel(dev);
598
599 return sprintf(buf, "%02x\n", sch->vpm);
600}
601
Linus Torvalds1da177e2005-04-16 15:20:36 -0700602static DEVICE_ATTR(devtype, 0444, devtype_show, NULL);
603static DEVICE_ATTR(cutype, 0444, cutype_show, NULL);
Bastian Blankf1fc78a2005-10-30 15:00:12 -0800604static DEVICE_ATTR(modalias, 0444, modalias_show, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700605static DEVICE_ATTR(online, 0644, online_show, online_store);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700606static DEVICE_ATTR(availability, 0444, available_show, NULL);
Michael Ernstfd0457a2010-08-09 18:12:50 +0200607static DEVICE_ATTR(logging, 0200, NULL, initiate_logging);
Sebastian Ott84c57ad2013-01-28 19:32:27 +0100608static DEVICE_ATTR(vpm, 0444, vpm_show, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700609
Cornelia Huck7e9db9e2008-07-14 09:58:44 +0200610static struct attribute *io_subchannel_attrs[] = {
Michael Ernstfd0457a2010-08-09 18:12:50 +0200611 &dev_attr_logging.attr,
Sebastian Ott84c57ad2013-01-28 19:32:27 +0100612 &dev_attr_vpm.attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700613 NULL,
614};
615
Arvind Yadavf460d112017-07-19 12:39:13 +0530616static const struct attribute_group io_subchannel_attr_group = {
Cornelia Huck7e9db9e2008-07-14 09:58:44 +0200617 .attrs = io_subchannel_attrs,
Cornelia Huck529192f2006-12-08 15:55:57 +0100618};
Linus Torvalds1da177e2005-04-16 15:20:36 -0700619
620static struct attribute * ccwdev_attrs[] = {
621 &dev_attr_devtype.attr,
622 &dev_attr_cutype.attr,
Bastian Blankf1fc78a2005-10-30 15:00:12 -0800623 &dev_attr_modalias.attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700624 &dev_attr_online.attr,
625 &dev_attr_cmb_enable.attr,
626 &dev_attr_availability.attr,
627 NULL,
628};
629
Arvind Yadavf460d112017-07-19 12:39:13 +0530630static const struct attribute_group ccwdev_attr_group = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700631 .attrs = ccwdev_attrs,
632};
633
David Brownella4dbd672009-06-24 10:06:31 -0700634static const struct attribute_group *ccwdev_attr_groups[] = {
Cornelia Huckef995162007-04-27 16:01:39 +0200635 &ccwdev_attr_group,
636 NULL,
637};
Linus Torvalds1da177e2005-04-16 15:20:36 -0700638
Sebastian Ott2c3e7e12014-06-11 13:06:57 +0200639static int ccw_device_add(struct ccw_device *cdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700640{
641 struct device *dev = &cdev->dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700642
643 dev->bus = &ccw_bus_type;
Sebastian Ott7d253b92009-12-07 12:51:33 +0100644 return device_add(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700645}
646
Peter Oberparleiter5d6e6b62009-12-07 12:51:17 +0100647static int match_dev_id(struct device *dev, void *data)
Cornelia Huckb0744bd2005-06-25 14:55:27 -0700648{
Peter Oberparleiter5d6e6b62009-12-07 12:51:17 +0100649 struct ccw_device *cdev = to_ccwdev(dev);
650 struct ccw_dev_id *dev_id = data;
Cornelia Huckb0744bd2005-06-25 14:55:27 -0700651
Cornelia Huckd7b5a4c92006-12-08 15:54:28 +0100652 return ccw_dev_id_is_equal(&cdev->private->dev_id, dev_id);
653}
654
Sebastian Ottb7a610f2012-05-15 17:52:07 +0200655/**
656 * get_ccwdev_by_dev_id() - obtain device from a ccw device id
657 * @dev_id: id of the device to be searched
658 *
659 * This function searches all devices attached to the ccw bus for a device
660 * matching @dev_id.
661 * Returns:
662 * If a device is found its reference count is increased and returned;
663 * else %NULL is returned.
664 */
665struct ccw_device *get_ccwdev_by_dev_id(struct ccw_dev_id *dev_id)
Cornelia Huckd7b5a4c92006-12-08 15:54:28 +0100666{
667 struct device *dev;
668
Peter Oberparleiter5d6e6b62009-12-07 12:51:17 +0100669 dev = bus_find_device(&ccw_bus_type, NULL, dev_id, match_dev_id);
Cornelia Huckd7b5a4c92006-12-08 15:54:28 +0100670
671 return dev ? to_ccwdev(dev) : NULL;
672}
Sebastian Ottb7a610f2012-05-15 17:52:07 +0200673EXPORT_SYMBOL_GPL(get_ccwdev_by_dev_id);
Cornelia Huckd7b5a4c92006-12-08 15:54:28 +0100674
Peter Oberparleiter37de53b2009-12-07 12:51:19 +0100675static void ccw_device_do_unbind_bind(struct ccw_device *cdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700676{
Cornelia Huckeb32ae82009-03-26 15:24:05 +0100677 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700678
Sebastian Ott7d253b92009-12-07 12:51:33 +0100679 if (device_is_registered(&cdev->dev)) {
Cornelia Huckeb32ae82009-03-26 15:24:05 +0100680 device_release_driver(&cdev->dev);
681 ret = device_attach(&cdev->dev);
682 WARN_ON(ret == -ENODEV);
683 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700684}
685
686static void
687ccw_device_release(struct device *dev)
688{
689 struct ccw_device *cdev;
690
691 cdev = to_ccwdev(dev);
Cornelia Huck6eff2082008-12-25 13:39:07 +0100692 /* Release reference of parent subchannel. */
693 put_device(cdev->dev.parent);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700694 kfree(cdev->private);
695 kfree(cdev);
696}
697
Cornelia Huck7674da72006-12-08 15:54:21 +0100698static struct ccw_device * io_subchannel_allocate_dev(struct subchannel *sch)
699{
700 struct ccw_device *cdev;
701
702 cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
703 if (cdev) {
704 cdev->private = kzalloc(sizeof(struct ccw_device_private),
705 GFP_KERNEL | GFP_DMA);
706 if (cdev->private)
707 return cdev;
708 }
709 kfree(cdev);
710 return ERR_PTR(-ENOMEM);
711}
712
Peter Oberparleiter37de53b2009-12-07 12:51:19 +0100713static void ccw_device_todo(struct work_struct *work);
714
Cornelia Huck7674da72006-12-08 15:54:21 +0100715static int io_subchannel_initialize_dev(struct subchannel *sch,
716 struct ccw_device *cdev)
717{
Sebastian Ott2c3e7e12014-06-11 13:06:57 +0200718 struct ccw_device_private *priv = cdev->private;
719 int ret;
720
721 priv->cdev = cdev;
722 priv->int_class = IRQIO_CIO;
723 priv->state = DEV_STATE_NOT_OPER;
724 priv->dev_id.devno = sch->schib.pmcw.dev;
725 priv->dev_id.ssid = sch->schid.ssid;
Sebastian Ott2c3e7e12014-06-11 13:06:57 +0200726
727 INIT_WORK(&priv->todo_work, ccw_device_todo);
728 INIT_LIST_HEAD(&priv->cmb_list);
729 init_waitqueue_head(&priv->wait_q);
Kees Cook846d0c62017-10-16 16:43:25 -0700730 timer_setup(&priv->timer, ccw_device_timeout, 0);
Sebastian Ott2c3e7e12014-06-11 13:06:57 +0200731
732 atomic_set(&priv->onoff, 0);
733 cdev->ccwlock = sch->lock;
Cornelia Huck7674da72006-12-08 15:54:21 +0100734 cdev->dev.parent = &sch->dev;
735 cdev->dev.release = ccw_device_release;
Cornelia Huckef995162007-04-27 16:01:39 +0200736 cdev->dev.groups = ccwdev_attr_groups;
Cornelia Huck7674da72006-12-08 15:54:21 +0100737 /* Do first half of device_register. */
738 device_initialize(&cdev->dev);
Sebastian Ott2c3e7e12014-06-11 13:06:57 +0200739 ret = dev_set_name(&cdev->dev, "0.%x.%04x", cdev->private->dev_id.ssid,
740 cdev->private->dev_id.devno);
741 if (ret)
742 goto out_put;
Cornelia Huck7674da72006-12-08 15:54:21 +0100743 if (!get_device(&sch->dev)) {
Sebastian Ott2c3e7e12014-06-11 13:06:57 +0200744 ret = -ENODEV;
745 goto out_put;
Cornelia Huck7674da72006-12-08 15:54:21 +0100746 }
Sebastian Ott2c3e7e12014-06-11 13:06:57 +0200747 priv->flags.initialized = 1;
748 spin_lock_irq(sch->lock);
749 sch_set_cdev(sch, cdev);
750 spin_unlock_irq(sch->lock);
Cornelia Huck7674da72006-12-08 15:54:21 +0100751 return 0;
Sebastian Ott2c3e7e12014-06-11 13:06:57 +0200752
753out_put:
754 /* Release reference from device_initialize(). */
755 put_device(&cdev->dev);
756 return ret;
Cornelia Huck7674da72006-12-08 15:54:21 +0100757}
758
759static struct ccw_device * io_subchannel_create_ccwdev(struct subchannel *sch)
760{
761 struct ccw_device *cdev;
762 int ret;
763
764 cdev = io_subchannel_allocate_dev(sch);
765 if (!IS_ERR(cdev)) {
766 ret = io_subchannel_initialize_dev(sch, cdev);
Sebastian Ott06739a82009-08-23 18:09:04 +0200767 if (ret)
Cornelia Huck7674da72006-12-08 15:54:21 +0100768 cdev = ERR_PTR(ret);
Cornelia Huck7674da72006-12-08 15:54:21 +0100769 }
770 return cdev;
771}
772
Peter Oberparleiter736b5db2009-12-07 12:51:21 +0100773static void io_subchannel_recog(struct ccw_device *, struct subchannel *);
Cornelia Huckd7b5a4c92006-12-08 15:54:28 +0100774
Cornelia Huckd7b5a4c92006-12-08 15:54:28 +0100775static void sch_create_and_recog_new_device(struct subchannel *sch)
776{
777 struct ccw_device *cdev;
778
779 /* Need to allocate a new ccw device. */
780 cdev = io_subchannel_create_ccwdev(sch);
781 if (IS_ERR(cdev)) {
782 /* OK, we did everything we could... */
783 css_sch_device_unregister(sch);
784 return;
785 }
Cornelia Huckd7b5a4c92006-12-08 15:54:28 +0100786 /* Start recognition for the new ccw device. */
Peter Oberparleiter736b5db2009-12-07 12:51:21 +0100787 io_subchannel_recog(cdev, sch);
Cornelia Huckd7b5a4c92006-12-08 15:54:28 +0100788}
789
Linus Torvalds1da177e2005-04-16 15:20:36 -0700790/*
791 * Register recognized device.
792 */
Peter Oberparleiter37de53b2009-12-07 12:51:19 +0100793static void io_subchannel_register(struct ccw_device *cdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700794{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700795 struct subchannel *sch;
Sebastian Otta2901562010-03-08 12:25:17 +0100796 int ret, adjust_init_count = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700797 unsigned long flags;
798
Linus Torvalds1da177e2005-04-16 15:20:36 -0700799 sch = to_subchannel(cdev->dev.parent);
Cornelia Huck5fb6b852008-12-25 13:39:08 +0100800 /*
801 * Check if subchannel is still registered. It may have become
802 * unregistered if a machine check hit us after finishing
803 * device recognition but before the register work could be
804 * queued.
805 */
806 if (!device_is_registered(&sch->dev))
807 goto out_err;
Cornelia Huck82b7ac02007-04-27 16:01:36 +0200808 css_update_ssd_info(sch);
Cornelia Huck47af5512006-12-04 15:41:07 +0100809 /*
810 * io_subchannel_register() will also be called after device
811 * recognition has been done for a boxed device (which will already
812 * be registered). We need to reprobe since we may now have sense id
813 * information.
814 */
Cornelia Huckd6a30762008-12-25 13:39:11 +0100815 if (device_is_registered(&cdev->dev)) {
Cornelia Huck47af5512006-12-04 15:41:07 +0100816 if (!cdev->drv) {
817 ret = device_reprobe(&cdev->dev);
818 if (ret)
819 /* We can't do much here. */
Michael Ernst139b83dd2008-05-07 09:22:54 +0200820 CIO_MSG_EVENT(0, "device_reprobe() returned"
Cornelia Hucke556bbb2007-07-27 12:29:19 +0200821 " %d for 0.%x.%04x\n", ret,
822 cdev->private->dev_id.ssid,
823 cdev->private->dev_id.devno);
Cornelia Huck47af5512006-12-04 15:41:07 +0100824 }
Sebastian Otta2901562010-03-08 12:25:17 +0100825 adjust_init_count = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700826 goto out;
827 }
Cornelia Huckfa1a8c22007-04-26 00:12:03 -0700828 /*
829 * Now we know this subchannel will stay, we can throw
830 * our delayed uevent.
831 */
Ming Leif67f1292009-03-01 21:10:49 +0800832 dev_set_uevent_suppress(&sch->dev, 0);
Cornelia Huckfa1a8c22007-04-26 00:12:03 -0700833 kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700834 /* make it known to the system */
Sebastian Ott2c3e7e12014-06-11 13:06:57 +0200835 ret = ccw_device_add(cdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700836 if (ret) {
Cornelia Hucke556bbb2007-07-27 12:29:19 +0200837 CIO_MSG_EVENT(0, "Could not register ccw dev 0.%x.%04x: %d\n",
838 cdev->private->dev_id.ssid,
839 cdev->private->dev_id.devno, ret);
Cornelia Huck2ec22982006-12-08 15:54:26 +0100840 spin_lock_irqsave(sch->lock, flags);
Cornelia Huckdb6a6422008-01-26 14:10:46 +0100841 sch_set_cdev(sch, NULL);
Cornelia Huck2ec22982006-12-08 15:54:26 +0100842 spin_unlock_irqrestore(sch->lock, flags);
Cornelia Huck6eff2082008-12-25 13:39:07 +0100843 /* Release initial device reference. */
844 put_device(&cdev->dev);
Cornelia Huck5fb6b852008-12-25 13:39:08 +0100845 goto out_err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700846 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700847out:
848 cdev->private->flags.recog_done = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700849 wake_up(&cdev->private->wait_q);
Cornelia Huck5fb6b852008-12-25 13:39:08 +0100850out_err:
Sebastian Otta2901562010-03-08 12:25:17 +0100851 if (adjust_init_count && atomic_dec_and_test(&ccw_device_init_count))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700852 wake_up(&ccw_device_init_wq);
853}
854
Peter Oberparleiter37de53b2009-12-07 12:51:19 +0100855static void ccw_device_call_sch_unregister(struct ccw_device *cdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700856{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700857 struct subchannel *sch;
858
Peter Oberparleiter46fbe4e2008-10-10 21:33:05 +0200859 /* Get subchannel reference for local processing. */
860 if (!get_device(cdev->dev.parent))
861 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700862 sch = to_subchannel(cdev->dev.parent);
Cornelia Huck6ab48792006-07-12 16:39:50 +0200863 css_sch_device_unregister(sch);
Peter Oberparleiter46fbe4e2008-10-10 21:33:05 +0200864 /* Release subchannel reference for local processing. */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700865 put_device(&sch->dev);
866}
867
868/*
869 * subchannel recognition done. Called from the state machine.
870 */
871void
872io_subchannel_recog_done(struct ccw_device *cdev)
873{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700874 if (css_init_done == 0) {
875 cdev->private->flags.recog_done = 1;
876 return;
877 }
878 switch (cdev->private->state) {
Sebastian Ott47593bf2009-03-31 19:16:05 +0200879 case DEV_STATE_BOXED:
880 /* Device did not respond in time. */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700881 case DEV_STATE_NOT_OPER:
882 cdev->private->flags.recog_done = 1;
Peter Oberparleiter37de53b2009-12-07 12:51:19 +0100883 /* Remove device found not operational. */
884 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700885 if (atomic_dec_and_test(&ccw_device_init_count))
886 wake_up(&ccw_device_init_wq);
887 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700888 case DEV_STATE_OFFLINE:
889 /*
890 * We can't register the device in interrupt context so
891 * we schedule a work item.
892 */
Peter Oberparleiter37de53b2009-12-07 12:51:19 +0100893 ccw_device_sched_todo(cdev, CDEV_TODO_REGISTER);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700894 break;
895 }
896}
897
Peter Oberparleiter736b5db2009-12-07 12:51:21 +0100898static void io_subchannel_recog(struct ccw_device *cdev, struct subchannel *sch)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700899{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700900 /* Increase counter of devices currently in recognition. */
901 atomic_inc(&ccw_device_init_count);
902
903 /* Start async. device sensing. */
Cornelia Huck2ec22982006-12-08 15:54:26 +0100904 spin_lock_irq(sch->lock);
Peter Oberparleiter736b5db2009-12-07 12:51:21 +0100905 ccw_device_recognition(cdev);
Cornelia Huck2ec22982006-12-08 15:54:26 +0100906 spin_unlock_irq(sch->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700907}
908
Peter Oberparleiter5d6e6b62009-12-07 12:51:17 +0100909static int ccw_device_move_to_sch(struct ccw_device *cdev,
910 struct subchannel *sch)
Cornelia Huckd7b5a4c92006-12-08 15:54:28 +0100911{
Peter Oberparleiter5d6e6b62009-12-07 12:51:17 +0100912 struct subchannel *old_sch;
Sebastian Ott0c609fc2009-12-07 12:51:37 +0100913 int rc, old_enabled = 0;
Cornelia Huckd7b5a4c92006-12-08 15:54:28 +0100914
Peter Oberparleiter5d6e6b62009-12-07 12:51:17 +0100915 old_sch = to_subchannel(cdev->dev.parent);
916 /* Obtain child reference for new parent. */
Cornelia Huck6eff2082008-12-25 13:39:07 +0100917 if (!get_device(&sch->dev))
Peter Oberparleiter5d6e6b62009-12-07 12:51:17 +0100918 return -ENODEV;
Sebastian Ott0c609fc2009-12-07 12:51:37 +0100919
920 if (!sch_is_pseudo_sch(old_sch)) {
921 spin_lock_irq(old_sch->lock);
922 old_enabled = old_sch->schib.pmcw.ena;
923 rc = 0;
924 if (old_enabled)
925 rc = cio_disable_subchannel(old_sch);
926 spin_unlock_irq(old_sch->lock);
927 if (rc == -EBUSY) {
928 /* Release child reference for new parent. */
929 put_device(&sch->dev);
930 return rc;
931 }
932 }
933
Cornelia Huckd7b5a4c92006-12-08 15:54:28 +0100934 mutex_lock(&sch->reg_mutex);
Cornelia Huckffa6a702009-03-04 12:44:00 +0100935 rc = device_move(&cdev->dev, &sch->dev, DPM_ORDER_PARENT_BEFORE_DEV);
Cornelia Huckd7b5a4c92006-12-08 15:54:28 +0100936 mutex_unlock(&sch->reg_mutex);
937 if (rc) {
Peter Oberparleiter5d6e6b62009-12-07 12:51:17 +0100938 CIO_MSG_EVENT(0, "device_move(0.%x.%04x,0.%x.%04x)=%d\n",
Cornelia Huckd7b5a4c92006-12-08 15:54:28 +0100939 cdev->private->dev_id.ssid,
940 cdev->private->dev_id.devno, sch->schid.ssid,
Peter Oberparleiter5d6e6b62009-12-07 12:51:17 +0100941 sch->schib.pmcw.dev, rc);
Sebastian Ott0c609fc2009-12-07 12:51:37 +0100942 if (old_enabled) {
943 /* Try to reenable the old subchannel. */
944 spin_lock_irq(old_sch->lock);
945 cio_enable_subchannel(old_sch, (u32)(addr_t)old_sch);
946 spin_unlock_irq(old_sch->lock);
947 }
Peter Oberparleiter5d6e6b62009-12-07 12:51:17 +0100948 /* Release child reference for new parent. */
Cornelia Huck6eff2082008-12-25 13:39:07 +0100949 put_device(&sch->dev);
Peter Oberparleiter5d6e6b62009-12-07 12:51:17 +0100950 return rc;
Cornelia Huckd7b5a4c92006-12-08 15:54:28 +0100951 }
Peter Oberparleiter5d6e6b62009-12-07 12:51:17 +0100952 /* Clean up old subchannel. */
953 if (!sch_is_pseudo_sch(old_sch)) {
954 spin_lock_irq(old_sch->lock);
955 sch_set_cdev(old_sch, NULL);
Peter Oberparleiter5d6e6b62009-12-07 12:51:17 +0100956 spin_unlock_irq(old_sch->lock);
957 css_schedule_eval(old_sch->schid);
Cornelia Huckd7b5a4c92006-12-08 15:54:28 +0100958 }
Peter Oberparleiter5d6e6b62009-12-07 12:51:17 +0100959 /* Release child reference for old parent. */
960 put_device(&old_sch->dev);
961 /* Initialize new subchannel. */
962 spin_lock_irq(sch->lock);
Peter Oberparleiter5d6e6b62009-12-07 12:51:17 +0100963 cdev->ccwlock = sch->lock;
964 if (!sch_is_pseudo_sch(sch))
965 sch_set_cdev(sch, cdev);
966 spin_unlock_irq(sch->lock);
967 if (!sch_is_pseudo_sch(sch))
968 css_update_ssd_info(sch);
969 return 0;
970}
971
972static int ccw_device_move_to_orph(struct ccw_device *cdev)
973{
974 struct subchannel *sch = to_subchannel(cdev->dev.parent);
975 struct channel_subsystem *css = to_css(sch->dev.parent);
976
977 return ccw_device_move_to_sch(cdev, css->pseudo_subchannel);
Cornelia Huckd7b5a4c92006-12-08 15:54:28 +0100978}
979
Cornelia Huck602b20f2008-01-26 14:10:39 +0100980static void io_subchannel_irq(struct subchannel *sch)
981{
982 struct ccw_device *cdev;
983
Cornelia Huckdb6a6422008-01-26 14:10:46 +0100984 cdev = sch_get_cdev(sch);
Cornelia Huck602b20f2008-01-26 14:10:39 +0100985
Sebastian Ottefd986d2009-09-11 10:28:18 +0200986 CIO_TRACE_EVENT(6, "IRQ");
987 CIO_TRACE_EVENT(6, dev_name(&sch->dev));
Cornelia Huck602b20f2008-01-26 14:10:39 +0100988 if (cdev)
989 dev_fsm_event(cdev, DEV_EVENT_INTERRUPT);
Peter Oberparleiterde400d62011-10-30 15:16:04 +0100990 else
Heiko Carstens420f42e2013-01-02 15:18:18 +0100991 inc_irq_stat(IRQIO_CIO);
Cornelia Huck602b20f2008-01-26 14:10:39 +0100992}
993
Sebastian Ott13952ec2008-12-25 13:39:13 +0100994void io_subchannel_init_config(struct subchannel *sch)
995{
996 memset(&sch->config, 0, sizeof(sch->config));
997 sch->config.csense = 1;
Sebastian Ott13952ec2008-12-25 13:39:13 +0100998}
999
Cornelia Huck0ae7a7b2008-07-14 09:58:43 +02001000static void io_subchannel_init_fields(struct subchannel *sch)
1001{
1002 if (cio_is_console(sch->schid))
1003 sch->opm = 0xff;
1004 else
1005 sch->opm = chp_get_sch_opm(sch);
1006 sch->lpm = sch->schib.pmcw.pam & sch->opm;
Cornelia Huck3a3fc292008-07-14 09:58:58 +02001007 sch->isc = cio_is_console(sch->schid) ? CONSOLE_ISC : IO_SCH_ISC;
Cornelia Huck0ae7a7b2008-07-14 09:58:43 +02001008
1009 CIO_MSG_EVENT(6, "Detected device %04x on subchannel 0.%x.%04X"
1010 " - PIM = %02X, PAM = %02X, POM = %02X\n",
1011 sch->schib.pmcw.dev, sch->schid.ssid,
1012 sch->schid.sch_no, sch->schib.pmcw.pim,
1013 sch->schib.pmcw.pam, sch->schib.pmcw.pom);
Sebastian Ott13952ec2008-12-25 13:39:13 +01001014
1015 io_subchannel_init_config(sch);
Cornelia Huck0ae7a7b2008-07-14 09:58:43 +02001016}
1017
Cornelia Huck90ed2b62008-12-25 13:39:09 +01001018/*
1019 * Note: We always return 0 so that we bind to the device even on error.
1020 * This is needed so that our remove function is called on unregister.
1021 */
Cornelia Huck0ae7a7b2008-07-14 09:58:43 +02001022static int io_subchannel_probe(struct subchannel *sch)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001023{
Sebastian Ottf92519e2011-03-15 17:08:27 +01001024 struct io_subchannel_private *io_priv;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001025 struct ccw_device *cdev;
1026 int rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001027
Peter Oberparleiter6d7c5af2009-10-14 12:43:50 +02001028 if (cio_is_console(sch->schid)) {
Cornelia Huck7e9db9e2008-07-14 09:58:44 +02001029 rc = sysfs_create_group(&sch->dev.kobj,
1030 &io_subchannel_attr_group);
1031 if (rc)
1032 CIO_MSG_EVENT(0, "Failed to create io subchannel "
1033 "attributes for subchannel "
1034 "0.%x.%04x (rc=%d)\n",
1035 sch->schid.ssid, sch->schid.sch_no, rc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001036 /*
Peter Oberparleiter6d7c5af2009-10-14 12:43:50 +02001037 * The console subchannel already has an associated ccw_device.
Cornelia Huck7e9db9e2008-07-14 09:58:44 +02001038 * Throw the delayed uevent for the subchannel, register
Peter Oberparleiter6d7c5af2009-10-14 12:43:50 +02001039 * the ccw_device and exit.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001040 */
Ming Leif67f1292009-03-01 21:10:49 +08001041 dev_set_uevent_suppress(&sch->dev, 0);
Cornelia Huck7e9db9e2008-07-14 09:58:44 +02001042 kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
Peter Oberparleiter6d7c5af2009-10-14 12:43:50 +02001043 cdev = sch_get_cdev(sch);
Sebastian Ott2c3e7e12014-06-11 13:06:57 +02001044 rc = ccw_device_add(cdev);
Sebastian Ottafdfed02013-04-13 13:03:03 +02001045 if (rc) {
1046 /* Release online reference. */
1047 put_device(&cdev->dev);
1048 goto out_schedule;
1049 }
Sebastian Ott0ad8f714a2013-04-13 13:06:27 +02001050 if (atomic_dec_and_test(&ccw_device_init_count))
1051 wake_up(&ccw_device_init_wq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001052 return 0;
1053 }
Cornelia Huck0ae7a7b2008-07-14 09:58:43 +02001054 io_subchannel_init_fields(sch);
Sebastian Ottf444cc02008-12-25 13:39:14 +01001055 rc = cio_commit_config(sch);
1056 if (rc)
1057 goto out_schedule;
Cornelia Huck7e9db9e2008-07-14 09:58:44 +02001058 rc = sysfs_create_group(&sch->dev.kobj,
1059 &io_subchannel_attr_group);
1060 if (rc)
Cornelia Huck90ed2b62008-12-25 13:39:09 +01001061 goto out_schedule;
Cornelia Huckcd6b4f22008-01-26 14:10:43 +01001062 /* Allocate I/O subchannel private data. */
Sebastian Ottf92519e2011-03-15 17:08:27 +01001063 io_priv = kzalloc(sizeof(*io_priv), GFP_KERNEL | GFP_DMA);
1064 if (!io_priv)
Peter Oberparleiter48e4c382009-12-07 12:51:15 +01001065 goto out_schedule;
Sebastian Ottf92519e2011-03-15 17:08:27 +01001066
1067 set_io_private(sch, io_priv);
Peter Oberparleiter5d6e6b62009-12-07 12:51:17 +01001068 css_schedule_eval(sch->schid);
Cornelia Huck7e9db9e2008-07-14 09:58:44 +02001069 return 0;
Peter Oberparleiter48e4c382009-12-07 12:51:15 +01001070
Cornelia Huck90ed2b62008-12-25 13:39:09 +01001071out_schedule:
Peter Oberparleiter390935a2009-12-07 12:51:18 +01001072 spin_lock_irq(sch->lock);
1073 css_sched_sch_todo(sch, SCH_TODO_UNREG);
1074 spin_unlock_irq(sch->lock);
Cornelia Huck90ed2b62008-12-25 13:39:09 +01001075 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001076}
1077
Linus Torvalds1da177e2005-04-16 15:20:36 -07001078static int
Cornelia Huck8bbace72006-01-11 10:56:22 +01001079io_subchannel_remove (struct subchannel *sch)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001080{
Sebastian Ottf92519e2011-03-15 17:08:27 +01001081 struct io_subchannel_private *io_priv = to_io_private(sch);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001082 struct ccw_device *cdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001083
Cornelia Huckdb6a6422008-01-26 14:10:46 +01001084 cdev = sch_get_cdev(sch);
1085 if (!cdev)
Peter Oberparleiter48e4c382009-12-07 12:51:15 +01001086 goto out_free;
Sebastian Ott6e9a0f62009-12-07 12:51:38 +01001087 io_subchannel_quiesce(sch);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001088 /* Set ccw device to not operational and drop reference. */
Sebastian Ott7a8ad102009-12-07 12:51:39 +01001089 spin_lock_irq(cdev->ccwlock);
Cornelia Huckdb6a6422008-01-26 14:10:46 +01001090 sch_set_cdev(sch, NULL);
Sebastian Ottf92519e2011-03-15 17:08:27 +01001091 set_io_private(sch, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001092 cdev->private->state = DEV_STATE_NOT_OPER;
Sebastian Ott7a8ad102009-12-07 12:51:39 +01001093 spin_unlock_irq(cdev->ccwlock);
Cornelia Huckef995162007-04-27 16:01:39 +02001094 ccw_device_unregister(cdev);
Peter Oberparleiter48e4c382009-12-07 12:51:15 +01001095out_free:
Sebastian Ottf92519e2011-03-15 17:08:27 +01001096 kfree(io_priv);
Cornelia Huck7e9db9e2008-07-14 09:58:44 +02001097 sysfs_remove_group(&sch->dev.kobj, &io_subchannel_attr_group);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001098 return 0;
1099}
1100
Cornelia Huck602b20f2008-01-26 14:10:39 +01001101static void io_subchannel_verify(struct subchannel *sch)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001102{
1103 struct ccw_device *cdev;
1104
Cornelia Huckdb6a6422008-01-26 14:10:46 +01001105 cdev = sch_get_cdev(sch);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001106 if (cdev)
1107 dev_fsm_event(cdev, DEV_EVENT_VERIFY);
1108}
1109
Cornelia Huckc820de32008-07-14 09:58:45 +02001110static void io_subchannel_terminate_path(struct subchannel *sch, u8 mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001111{
1112 struct ccw_device *cdev;
1113
Cornelia Huckdb6a6422008-01-26 14:10:46 +01001114 cdev = sch_get_cdev(sch);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001115 if (!cdev)
1116 return;
Peter Oberparleiter4257aae2009-12-07 12:51:29 +01001117 if (cio_update_schib(sch))
1118 goto err;
1119 /* Check for I/O on path. */
1120 if (scsw_actl(&sch->schib.scsw) == 0 || sch->schib.pmcw.lpum != mask)
1121 goto out;
1122 if (cdev->private->state == DEV_STATE_ONLINE) {
1123 ccw_device_kill_io(cdev);
1124 goto out;
1125 }
1126 if (cio_clear(sch))
1127 goto err;
1128out:
1129 /* Trigger path verification. */
1130 dev_fsm_event(cdev, DEV_EVENT_VERIFY);
1131 return;
Cornelia Huckc820de32008-07-14 09:58:45 +02001132
Peter Oberparleiter4257aae2009-12-07 12:51:29 +01001133err:
1134 dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
Cornelia Huckc820de32008-07-14 09:58:45 +02001135}
1136
Cornelia Huck99611f82008-07-14 09:59:02 +02001137static int io_subchannel_chp_event(struct subchannel *sch,
1138 struct chp_link *link, int event)
Cornelia Huckc820de32008-07-14 09:58:45 +02001139{
Sebastian Ott585b9542010-10-25 16:10:34 +02001140 struct ccw_device *cdev = sch_get_cdev(sch);
Cornelia Huckc820de32008-07-14 09:58:45 +02001141 int mask;
Cornelia Huckc820de32008-07-14 09:58:45 +02001142
Cornelia Huck99611f82008-07-14 09:59:02 +02001143 mask = chp_ssd_get_mask(&sch->ssd_info, link);
Cornelia Huckc820de32008-07-14 09:58:45 +02001144 if (!mask)
1145 return 0;
1146 switch (event) {
1147 case CHP_VARY_OFF:
1148 sch->opm &= ~mask;
1149 sch->lpm &= ~mask;
Sebastian Ott585b9542010-10-25 16:10:34 +02001150 if (cdev)
1151 cdev->private->path_gone_mask |= mask;
Cornelia Huckc820de32008-07-14 09:58:45 +02001152 io_subchannel_terminate_path(sch, mask);
1153 break;
1154 case CHP_VARY_ON:
1155 sch->opm |= mask;
1156 sch->lpm |= mask;
Sebastian Ott585b9542010-10-25 16:10:34 +02001157 if (cdev)
1158 cdev->private->path_new_mask |= mask;
Cornelia Huckc820de32008-07-14 09:58:45 +02001159 io_subchannel_verify(sch);
1160 break;
1161 case CHP_OFFLINE:
Sebastian Ottcdb912a2008-12-25 13:39:12 +01001162 if (cio_update_schib(sch))
Cornelia Huckc820de32008-07-14 09:58:45 +02001163 return -ENODEV;
Sebastian Ott585b9542010-10-25 16:10:34 +02001164 if (cdev)
1165 cdev->private->path_gone_mask |= mask;
Cornelia Huckc820de32008-07-14 09:58:45 +02001166 io_subchannel_terminate_path(sch, mask);
1167 break;
1168 case CHP_ONLINE:
Sebastian Ottcdb912a2008-12-25 13:39:12 +01001169 if (cio_update_schib(sch))
1170 return -ENODEV;
Cornelia Huckc820de32008-07-14 09:58:45 +02001171 sch->lpm |= mask & sch->opm;
Sebastian Ott585b9542010-10-25 16:10:34 +02001172 if (cdev)
1173 cdev->private->path_new_mask |= mask;
Cornelia Huckc820de32008-07-14 09:58:45 +02001174 io_subchannel_verify(sch);
1175 break;
1176 }
1177 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001178}
1179
Sebastian Ott6e9a0f62009-12-07 12:51:38 +01001180static void io_subchannel_quiesce(struct subchannel *sch)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001181{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001182 struct ccw_device *cdev;
1183 int ret;
1184
Sebastian Ott56e6b792009-12-07 12:51:35 +01001185 spin_lock_irq(sch->lock);
Cornelia Huckdb6a6422008-01-26 14:10:46 +01001186 cdev = sch_get_cdev(sch);
Cornelia Hucka8237fc2006-01-06 00:19:21 -08001187 if (cio_is_console(sch->schid))
Sebastian Ott56e6b792009-12-07 12:51:35 +01001188 goto out_unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001189 if (!sch->schib.pmcw.ena)
Sebastian Ott56e6b792009-12-07 12:51:35 +01001190 goto out_unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001191 ret = cio_disable_subchannel(sch);
1192 if (ret != -EBUSY)
Sebastian Ott56e6b792009-12-07 12:51:35 +01001193 goto out_unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001194 if (cdev->handler)
Sebastian Ott56e6b792009-12-07 12:51:35 +01001195 cdev->handler(cdev, cdev->private->intparm, ERR_PTR(-EIO));
1196 while (ret == -EBUSY) {
1197 cdev->private->state = DEV_STATE_QUIESCE;
Peter Oberparleiter376ae472010-10-25 16:10:44 +02001198 cdev->private->iretry = 255;
Sebastian Ott56e6b792009-12-07 12:51:35 +01001199 ret = ccw_device_cancel_halt_clear(cdev);
1200 if (ret == -EBUSY) {
1201 ccw_device_set_timeout(cdev, HZ/10);
1202 spin_unlock_irq(sch->lock);
1203 wait_event(cdev->private->wait_q,
1204 cdev->private->state != DEV_STATE_QUIESCE);
1205 spin_lock_irq(sch->lock);
1206 }
1207 ret = cio_disable_subchannel(sch);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001208 }
Sebastian Ott56e6b792009-12-07 12:51:35 +01001209out_unlock:
1210 spin_unlock_irq(sch->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001211}
1212
Sebastian Ott6e9a0f62009-12-07 12:51:38 +01001213static void io_subchannel_shutdown(struct subchannel *sch)
1214{
1215 io_subchannel_quiesce(sch);
1216}
1217
Cornelia Huckc820de32008-07-14 09:58:45 +02001218static int device_is_disconnected(struct ccw_device *cdev)
1219{
1220 if (!cdev)
1221 return 0;
1222 return (cdev->private->state == DEV_STATE_DISCONNECTED ||
1223 cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID);
1224}
1225
1226static int recovery_check(struct device *dev, void *data)
1227{
1228 struct ccw_device *cdev = to_ccwdev(dev);
Sebastian Ott55fb7342017-09-14 13:55:22 +02001229 struct subchannel *sch;
Cornelia Huckc820de32008-07-14 09:58:45 +02001230 int *redo = data;
1231
1232 spin_lock_irq(cdev->ccwlock);
1233 switch (cdev->private->state) {
Sebastian Ott55fb7342017-09-14 13:55:22 +02001234 case DEV_STATE_ONLINE:
1235 sch = to_subchannel(cdev->dev.parent);
1236 if ((sch->schib.pmcw.pam & sch->opm) == sch->vpm)
1237 break;
1238 /* fall through */
Cornelia Huckc820de32008-07-14 09:58:45 +02001239 case DEV_STATE_DISCONNECTED:
1240 CIO_MSG_EVENT(3, "recovery: trigger 0.%x.%04x\n",
1241 cdev->private->dev_id.ssid,
1242 cdev->private->dev_id.devno);
1243 dev_fsm_event(cdev, DEV_EVENT_VERIFY);
1244 *redo = 1;
1245 break;
1246 case DEV_STATE_DISCONNECTED_SENSE_ID:
1247 *redo = 1;
1248 break;
1249 }
1250 spin_unlock_irq(cdev->ccwlock);
1251
1252 return 0;
1253}
1254
1255static void recovery_work_func(struct work_struct *unused)
1256{
1257 int redo = 0;
1258
1259 bus_for_each_dev(&ccw_bus_type, NULL, &redo, recovery_check);
1260 if (redo) {
1261 spin_lock_irq(&recovery_lock);
1262 if (!timer_pending(&recovery_timer)) {
1263 if (recovery_phase < ARRAY_SIZE(recovery_delay) - 1)
1264 recovery_phase++;
1265 mod_timer(&recovery_timer, jiffies +
1266 recovery_delay[recovery_phase] * HZ);
1267 }
1268 spin_unlock_irq(&recovery_lock);
1269 } else
Sebastian Ott55fb7342017-09-14 13:55:22 +02001270 CIO_MSG_EVENT(3, "recovery: end\n");
Cornelia Huckc820de32008-07-14 09:58:45 +02001271}
1272
1273static DECLARE_WORK(recovery_work, recovery_work_func);
1274
Kees Cook846d0c62017-10-16 16:43:25 -07001275static void recovery_func(struct timer_list *unused)
Cornelia Huckc820de32008-07-14 09:58:45 +02001276{
1277 /*
1278 * We can't do our recovery in softirq context and it's not
1279 * performance critical, so we schedule it.
1280 */
1281 schedule_work(&recovery_work);
1282}
1283
Sebastian Ott55fb7342017-09-14 13:55:22 +02001284void ccw_device_schedule_recovery(void)
Cornelia Huckc820de32008-07-14 09:58:45 +02001285{
1286 unsigned long flags;
1287
Sebastian Ott55fb7342017-09-14 13:55:22 +02001288 CIO_MSG_EVENT(3, "recovery: schedule\n");
Cornelia Huckc820de32008-07-14 09:58:45 +02001289 spin_lock_irqsave(&recovery_lock, flags);
1290 if (!timer_pending(&recovery_timer) || (recovery_phase != 0)) {
1291 recovery_phase = 0;
1292 mod_timer(&recovery_timer, jiffies + recovery_delay[0] * HZ);
1293 }
1294 spin_unlock_irqrestore(&recovery_lock, flags);
1295}
1296
Peter Oberparleiterecf5d9e2008-10-10 21:33:06 +02001297static int purge_fn(struct device *dev, void *data)
1298{
1299 struct ccw_device *cdev = to_ccwdev(dev);
Peter Oberparleiter37de53b2009-12-07 12:51:19 +01001300 struct ccw_dev_id *id = &cdev->private->dev_id;
Peter Oberparleiterecf5d9e2008-10-10 21:33:06 +02001301
1302 spin_lock_irq(cdev->ccwlock);
Peter Oberparleiter37de53b2009-12-07 12:51:19 +01001303 if (is_blacklisted(id->ssid, id->devno) &&
Peter Oberparleitera2fc8482011-04-04 09:43:32 +02001304 (cdev->private->state == DEV_STATE_OFFLINE) &&
1305 (atomic_cmpxchg(&cdev->private->onoff, 0, 1) == 0)) {
Peter Oberparleiter37de53b2009-12-07 12:51:19 +01001306 CIO_MSG_EVENT(3, "ccw: purging 0.%x.%04x\n", id->ssid,
1307 id->devno);
1308 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
Peter Oberparleitera2fc8482011-04-04 09:43:32 +02001309 atomic_set(&cdev->private->onoff, 0);
Peter Oberparleiter37de53b2009-12-07 12:51:19 +01001310 }
Peter Oberparleiterecf5d9e2008-10-10 21:33:06 +02001311 spin_unlock_irq(cdev->ccwlock);
Peter Oberparleiterecf5d9e2008-10-10 21:33:06 +02001312 /* Abort loop in case of pending signal. */
1313 if (signal_pending(current))
1314 return -EINTR;
1315
1316 return 0;
1317}
1318
1319/**
1320 * ccw_purge_blacklisted - purge unused, blacklisted devices
1321 *
1322 * Unregister all ccw devices that are offline and on the blacklist.
1323 */
1324int ccw_purge_blacklisted(void)
1325{
1326 CIO_MSG_EVENT(2, "ccw: purging blacklisted devices\n");
1327 bus_for_each_dev(&ccw_bus_type, NULL, NULL, purge_fn);
1328 return 0;
1329}
1330
Peter Oberparleiter6afcc772009-10-06 10:34:02 +02001331void ccw_device_set_disconnected(struct ccw_device *cdev)
Cornelia Huckc820de32008-07-14 09:58:45 +02001332{
1333 if (!cdev)
1334 return;
1335 ccw_device_set_timeout(cdev, 0);
1336 cdev->private->flags.fake_irb = 0;
1337 cdev->private->state = DEV_STATE_DISCONNECTED;
1338 if (cdev->online)
1339 ccw_device_schedule_recovery();
1340}
1341
Peter Oberparleiter91c36912008-08-21 19:46:39 +02001342void ccw_device_set_notoper(struct ccw_device *cdev)
1343{
1344 struct subchannel *sch = to_subchannel(cdev->dev.parent);
1345
1346 CIO_TRACE_EVENT(2, "notoper");
Cornelia Huckb9d3aed2008-10-10 21:33:11 +02001347 CIO_TRACE_EVENT(2, dev_name(&sch->dev));
Peter Oberparleiter91c36912008-08-21 19:46:39 +02001348 ccw_device_set_timeout(cdev, 0);
1349 cio_disable_subchannel(sch);
1350 cdev->private->state = DEV_STATE_NOT_OPER;
1351}
1352
Peter Oberparleiter5d6e6b62009-12-07 12:51:17 +01001353enum io_sch_action {
1354 IO_SCH_UNREG,
1355 IO_SCH_ORPH_UNREG,
1356 IO_SCH_ATTACH,
1357 IO_SCH_UNREG_ATTACH,
1358 IO_SCH_ORPH_ATTACH,
1359 IO_SCH_REPROBE,
1360 IO_SCH_VERIFY,
1361 IO_SCH_DISC,
1362 IO_SCH_NOP,
1363};
1364
1365static enum io_sch_action sch_get_action(struct subchannel *sch)
Cornelia Huckc820de32008-07-14 09:58:45 +02001366{
Cornelia Huckc820de32008-07-14 09:58:45 +02001367 struct ccw_device *cdev;
1368
Cornelia Huckc820de32008-07-14 09:58:45 +02001369 cdev = sch_get_cdev(sch);
Peter Oberparleiter5d6e6b62009-12-07 12:51:17 +01001370 if (cio_update_schib(sch)) {
1371 /* Not operational. */
1372 if (!cdev)
1373 return IO_SCH_UNREG;
Sebastian Ott76e6fb42010-02-26 22:37:28 +01001374 if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK)
Peter Oberparleiter5d6e6b62009-12-07 12:51:17 +01001375 return IO_SCH_UNREG;
1376 return IO_SCH_ORPH_UNREG;
Cornelia Huckc820de32008-07-14 09:58:45 +02001377 }
Peter Oberparleiter5d6e6b62009-12-07 12:51:17 +01001378 /* Operational. */
1379 if (!cdev)
1380 return IO_SCH_ATTACH;
1381 if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) {
Sebastian Ott76e6fb42010-02-26 22:37:28 +01001382 if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK)
Peter Oberparleiter5d6e6b62009-12-07 12:51:17 +01001383 return IO_SCH_UNREG_ATTACH;
1384 return IO_SCH_ORPH_ATTACH;
Cornelia Huckc820de32008-07-14 09:58:45 +02001385 }
Peter Oberparleiter5d6e6b62009-12-07 12:51:17 +01001386 if ((sch->schib.pmcw.pam & sch->opm) == 0) {
Sebastian Ott76e6fb42010-02-26 22:37:28 +01001387 if (ccw_device_notify(cdev, CIO_NO_PATH) != NOTIFY_OK)
Peter Oberparleiter5d6e6b62009-12-07 12:51:17 +01001388 return IO_SCH_UNREG;
1389 return IO_SCH_DISC;
Cornelia Huckc820de32008-07-14 09:58:45 +02001390 }
Peter Oberparleiter5d6e6b62009-12-07 12:51:17 +01001391 if (device_is_disconnected(cdev))
1392 return IO_SCH_REPROBE;
Sebastian Ott31370f752012-10-24 11:22:52 +02001393 if (cdev->online && !cdev->private->flags.resuming)
Peter Oberparleiter5d6e6b62009-12-07 12:51:17 +01001394 return IO_SCH_VERIFY;
Sebastian Ott43d0be72012-09-05 14:19:42 +02001395 if (cdev->private->state == DEV_STATE_NOT_OPER)
1396 return IO_SCH_UNREG_ATTACH;
Peter Oberparleiter5d6e6b62009-12-07 12:51:17 +01001397 return IO_SCH_NOP;
1398}
1399
1400/**
1401 * io_subchannel_sch_event - process subchannel event
1402 * @sch: subchannel
1403 * @process: non-zero if function is called in process context
1404 *
1405 * An unspecified event occurred for this subchannel. Adjust data according
1406 * to the current operational state of the subchannel and device. Return
1407 * zero when the event has been handled sufficiently or -EAGAIN when this
1408 * function should be called again in process context.
1409 */
1410static int io_subchannel_sch_event(struct subchannel *sch, int process)
1411{
1412 unsigned long flags;
1413 struct ccw_device *cdev;
1414 struct ccw_dev_id dev_id;
1415 enum io_sch_action action;
1416 int rc = -EAGAIN;
1417
1418 spin_lock_irqsave(sch->lock, flags);
1419 if (!device_is_registered(&sch->dev))
1420 goto out_unlock;
Peter Oberparleiter390935a2009-12-07 12:51:18 +01001421 if (work_pending(&sch->todo_work))
1422 goto out_unlock;
Peter Oberparleiter37de53b2009-12-07 12:51:19 +01001423 cdev = sch_get_cdev(sch);
1424 if (cdev && work_pending(&cdev->private->todo_work))
1425 goto out_unlock;
Peter Oberparleiter5d6e6b62009-12-07 12:51:17 +01001426 action = sch_get_action(sch);
1427 CIO_MSG_EVENT(2, "event: sch 0.%x.%04x, process=%d, action=%d\n",
1428 sch->schid.ssid, sch->schid.sch_no, process,
1429 action);
1430 /* Perform immediate actions while holding the lock. */
Cornelia Huckc820de32008-07-14 09:58:45 +02001431 switch (action) {
Peter Oberparleiter5d6e6b62009-12-07 12:51:17 +01001432 case IO_SCH_REPROBE:
1433 /* Trigger device recognition. */
Cornelia Huckc820de32008-07-14 09:58:45 +02001434 ccw_device_trigger_reprobe(cdev);
Peter Oberparleiter5d6e6b62009-12-07 12:51:17 +01001435 rc = 0;
1436 goto out_unlock;
1437 case IO_SCH_VERIFY:
1438 /* Trigger path verification. */
1439 io_subchannel_verify(sch);
1440 rc = 0;
1441 goto out_unlock;
1442 case IO_SCH_DISC:
1443 ccw_device_set_disconnected(cdev);
1444 rc = 0;
1445 goto out_unlock;
1446 case IO_SCH_ORPH_UNREG:
1447 case IO_SCH_ORPH_ATTACH:
Peter Oberparleiter6afcc772009-10-06 10:34:02 +02001448 ccw_device_set_disconnected(cdev);
Peter Oberparleiter91c36912008-08-21 19:46:39 +02001449 break;
Peter Oberparleiter5d6e6b62009-12-07 12:51:17 +01001450 case IO_SCH_UNREG_ATTACH:
1451 case IO_SCH_UNREG:
Sebastian Ott16d2ce22010-11-10 10:05:53 +01001452 if (!cdev)
1453 break;
1454 if (cdev->private->state == DEV_STATE_SENSE_ID) {
1455 /*
1456 * Note: delayed work triggered by this event
1457 * and repeated calls to sch_event are synchronized
1458 * by the above check for work_pending(cdev).
1459 */
1460 dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
1461 } else
Peter Oberparleiter5d6e6b62009-12-07 12:51:17 +01001462 ccw_device_set_notoper(cdev);
1463 break;
1464 case IO_SCH_NOP:
1465 rc = 0;
1466 goto out_unlock;
Cornelia Huckc820de32008-07-14 09:58:45 +02001467 default:
1468 break;
1469 }
1470 spin_unlock_irqrestore(sch->lock, flags);
Peter Oberparleiter5d6e6b62009-12-07 12:51:17 +01001471 /* All other actions require process context. */
1472 if (!process)
1473 goto out;
1474 /* Handle attached ccw device. */
1475 switch (action) {
1476 case IO_SCH_ORPH_UNREG:
1477 case IO_SCH_ORPH_ATTACH:
1478 /* Move ccw device to orphanage. */
1479 rc = ccw_device_move_to_orph(cdev);
1480 if (rc)
1481 goto out;
1482 break;
1483 case IO_SCH_UNREG_ATTACH:
Sebastian Ott3368ba22012-09-05 14:20:41 +02001484 spin_lock_irqsave(sch->lock, flags);
Sebastian Ott74b61272010-10-25 16:10:26 +02001485 if (cdev->private->flags.resuming) {
1486 /* Device will be handled later. */
1487 rc = 0;
Sebastian Ott3368ba22012-09-05 14:20:41 +02001488 goto out_unlock;
Sebastian Ott74b61272010-10-25 16:10:26 +02001489 }
Sebastian Ott3368ba22012-09-05 14:20:41 +02001490 sch_set_cdev(sch, NULL);
1491 spin_unlock_irqrestore(sch->lock, flags);
Peter Oberparleiter5d6e6b62009-12-07 12:51:17 +01001492 /* Unregister ccw device. */
Sebastian Ott74b61272010-10-25 16:10:26 +02001493 ccw_device_unregister(cdev);
Peter Oberparleiter5d6e6b62009-12-07 12:51:17 +01001494 break;
1495 default:
1496 break;
1497 }
1498 /* Handle subchannel. */
1499 switch (action) {
1500 case IO_SCH_ORPH_UNREG:
1501 case IO_SCH_UNREG:
Sebastian Ott0d01bb82010-02-26 22:37:29 +01001502 if (!cdev || !cdev->private->flags.resuming)
1503 css_sch_device_unregister(sch);
Peter Oberparleiter5d6e6b62009-12-07 12:51:17 +01001504 break;
1505 case IO_SCH_ORPH_ATTACH:
1506 case IO_SCH_UNREG_ATTACH:
1507 case IO_SCH_ATTACH:
1508 dev_id.ssid = sch->schid.ssid;
1509 dev_id.devno = sch->schib.pmcw.dev;
1510 cdev = get_ccwdev_by_dev_id(&dev_id);
1511 if (!cdev) {
1512 sch_create_and_recog_new_device(sch);
1513 break;
1514 }
1515 rc = ccw_device_move_to_sch(cdev, sch);
1516 if (rc) {
1517 /* Release reference from get_ccwdev_by_dev_id() */
1518 put_device(&cdev->dev);
1519 goto out;
1520 }
1521 spin_lock_irqsave(sch->lock, flags);
1522 ccw_device_trigger_reprobe(cdev);
1523 spin_unlock_irqrestore(sch->lock, flags);
1524 /* Release reference from get_ccwdev_by_dev_id() */
1525 put_device(&cdev->dev);
1526 break;
1527 default:
1528 break;
1529 }
1530 return 0;
Cornelia Huckc820de32008-07-14 09:58:45 +02001531
Peter Oberparleiter5d6e6b62009-12-07 12:51:17 +01001532out_unlock:
1533 spin_unlock_irqrestore(sch->lock, flags);
1534out:
1535 return rc;
Cornelia Huckc820de32008-07-14 09:58:45 +02001536}
1537
Sebastian Ott137a14f2014-01-27 13:29:15 +01001538static void ccw_device_set_int_class(struct ccw_device *cdev)
1539{
1540 struct ccw_driver *cdrv = cdev->drv;
1541
1542 /* Note: we interpret class 0 in this context as an uninitialized
1543 * field since it translates to a non-I/O interrupt class. */
1544 if (cdrv->int_class != 0)
1545 cdev->private->int_class = cdrv->int_class;
1546 else
1547 cdev->private->int_class = IRQIO_CIO;
1548}
1549
Linus Torvalds1da177e2005-04-16 15:20:36 -07001550#ifdef CONFIG_CCW_CONSOLE
Sebastian Ott1e532092014-01-27 13:28:10 +01001551int __init ccw_device_enable_console(struct ccw_device *cdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001552{
Sebastian Ott1e532092014-01-27 13:28:10 +01001553 struct subchannel *sch = to_subchannel(cdev->dev.parent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001554 int rc;
1555
Sebastian Ott1e532092014-01-27 13:28:10 +01001556 if (!cdev->drv || !cdev->handler)
1557 return -EINVAL;
1558
Cornelia Huck0ae7a7b2008-07-14 09:58:43 +02001559 io_subchannel_init_fields(sch);
Sebastian Ottf444cc02008-12-25 13:39:14 +01001560 rc = cio_commit_config(sch);
1561 if (rc)
1562 return rc;
Cornelia Huck0ae7a7b2008-07-14 09:58:43 +02001563 sch->driver = &io_subchannel_driver;
Peter Oberparleiter736b5db2009-12-07 12:51:21 +01001564 io_subchannel_recog(cdev, sch);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001565 /* Now wait for the async. recognition to come to an end. */
1566 spin_lock_irq(cdev->ccwlock);
1567 while (!dev_fsm_final_state(cdev))
Sebastian Ott188561a2013-04-13 12:53:21 +02001568 ccw_device_wait_idle(cdev);
1569
Sebastian Ottafdfed02013-04-13 13:03:03 +02001570 /* Hold on to an extra reference while device is online. */
1571 get_device(&cdev->dev);
1572 rc = ccw_device_online(cdev);
1573 if (rc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001574 goto out_unlock;
Sebastian Ottafdfed02013-04-13 13:03:03 +02001575
Linus Torvalds1da177e2005-04-16 15:20:36 -07001576 while (!dev_fsm_final_state(cdev))
Sebastian Ott188561a2013-04-13 12:53:21 +02001577 ccw_device_wait_idle(cdev);
1578
Sebastian Ottafdfed02013-04-13 13:03:03 +02001579 if (cdev->private->state == DEV_STATE_ONLINE)
1580 cdev->online = 1;
1581 else
1582 rc = -EIO;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001583out_unlock:
1584 spin_unlock_irq(cdev->ccwlock);
Sebastian Ottafdfed02013-04-13 13:03:03 +02001585 if (rc) /* Give up online reference since onlining failed. */
1586 put_device(&cdev->dev);
Peter Oberparleiter736b5db2009-12-07 12:51:21 +01001587 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001588}
1589
Sebastian Ott1e532092014-01-27 13:28:10 +01001590struct ccw_device * __init ccw_device_create_console(struct ccw_driver *drv)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001591{
Sebastian Ott863fc842013-04-13 13:01:50 +02001592 struct io_subchannel_private *io_priv;
Sebastian Ottafdfed02013-04-13 13:03:03 +02001593 struct ccw_device *cdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001594 struct subchannel *sch;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001595
Linus Torvalds1da177e2005-04-16 15:20:36 -07001596 sch = cio_probe_console();
Sebastian Ottafdfed02013-04-13 13:03:03 +02001597 if (IS_ERR(sch))
1598 return ERR_CAST(sch);
Sebastian Ott863fc842013-04-13 13:01:50 +02001599
1600 io_priv = kzalloc(sizeof(*io_priv), GFP_KERNEL | GFP_DMA);
1601 if (!io_priv) {
1602 put_device(&sch->dev);
1603 return ERR_PTR(-ENOMEM);
1604 }
Sebastian Ott2c3e7e12014-06-11 13:06:57 +02001605 set_io_private(sch, io_priv);
Sebastian Ottafdfed02013-04-13 13:03:03 +02001606 cdev = io_subchannel_create_ccwdev(sch);
1607 if (IS_ERR(cdev)) {
1608 put_device(&sch->dev);
1609 kfree(io_priv);
1610 return cdev;
1611 }
Sebastian Ott2253e8d2014-01-27 13:26:10 +01001612 cdev->drv = drv;
Sebastian Ott137a14f2014-01-27 13:29:15 +01001613 ccw_device_set_int_class(cdev);
Sebastian Ottafdfed02013-04-13 13:03:03 +02001614 return cdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001615}
Cornelia Huck1f4e7ed2008-10-10 21:33:14 +02001616
Sebastian Ott1e532092014-01-27 13:28:10 +01001617void __init ccw_device_destroy_console(struct ccw_device *cdev)
1618{
1619 struct subchannel *sch = to_subchannel(cdev->dev.parent);
1620 struct io_subchannel_private *io_priv = to_io_private(sch);
1621
1622 set_io_private(sch, NULL);
1623 put_device(&sch->dev);
1624 put_device(&cdev->dev);
1625 kfree(io_priv);
1626}
1627
Sebastian Ott188561a2013-04-13 12:53:21 +02001628/**
1629 * ccw_device_wait_idle() - busy wait for device to become idle
1630 * @cdev: ccw device
1631 *
1632 * Poll until activity control is zero, that is, no function or data
1633 * transfer is pending/active.
1634 * Called with device lock being held.
1635 */
1636void ccw_device_wait_idle(struct ccw_device *cdev)
1637{
1638 struct subchannel *sch = to_subchannel(cdev->dev.parent);
1639
1640 while (1) {
1641 cio_tsch(sch);
1642 if (sch->schib.scsw.cmd.actl == 0)
1643 break;
1644 udelay_simple(100);
1645 }
1646}
1647
Martin Schwidefsky66648452009-06-16 10:30:28 +02001648static int ccw_device_pm_restore(struct device *dev);
1649
Sebastian Ottf10ccca2013-04-13 12:56:51 +02001650int ccw_device_force_console(struct ccw_device *cdev)
Martin Schwidefsky66648452009-06-16 10:30:28 +02001651{
Sebastian Ottf10ccca2013-04-13 12:56:51 +02001652 return ccw_device_pm_restore(&cdev->dev);
Martin Schwidefsky66648452009-06-16 10:30:28 +02001653}
1654EXPORT_SYMBOL_GPL(ccw_device_force_console);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001655#endif
1656
1657/*
1658 * get ccw_device matching the busid, but only if owned by cdrv
1659 */
Cornelia Huckb0744bd2005-06-25 14:55:27 -07001660static int
1661__ccwdev_check_busid(struct device *dev, void *id)
1662{
1663 char *bus_id;
1664
Cornelia Huck12975ae2006-10-11 15:31:47 +02001665 bus_id = id;
Cornelia Huckb0744bd2005-06-25 14:55:27 -07001666
Kay Sievers98df67b2008-12-25 13:38:55 +01001667 return (strcmp(bus_id, dev_name(dev)) == 0);
Cornelia Huckb0744bd2005-06-25 14:55:27 -07001668}
1669
1670
Cornelia Huckb2ffd8e2007-10-12 16:11:17 +02001671/**
1672 * get_ccwdev_by_busid() - obtain device from a bus id
1673 * @cdrv: driver the device is owned by
1674 * @bus_id: bus id of the device to be searched
1675 *
1676 * This function searches all devices owned by @cdrv for a device with a bus
1677 * id matching @bus_id.
1678 * Returns:
1679 * If a match is found, its reference count of the found device is increased
1680 * and it is returned; else %NULL is returned.
1681 */
1682struct ccw_device *get_ccwdev_by_busid(struct ccw_driver *cdrv,
1683 const char *bus_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001684{
Cornelia Huckb0744bd2005-06-25 14:55:27 -07001685 struct device *dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001686
Sebastian Ott9f30ea92012-01-24 13:35:02 -05001687 dev = driver_find_device(&cdrv->driver, NULL, (void *)bus_id,
Cornelia Huckb0744bd2005-06-25 14:55:27 -07001688 __ccwdev_check_busid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001689
Heiko Carstensd2c993d2006-07-12 16:41:55 +02001690 return dev ? to_ccwdev(dev) : NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001691}
1692
1693/************************** device driver handling ************************/
1694
1695/* This is the implementation of the ccw_driver class. The probe, remove
1696 * and release methods are initially very similar to the device_driver
1697 * implementations, with the difference that they have ccw_device
1698 * arguments.
1699 *
1700 * A ccw driver also contains the information that is needed for
1701 * device matching.
1702 */
1703static int
1704ccw_device_probe (struct device *dev)
1705{
1706 struct ccw_device *cdev = to_ccwdev(dev);
1707 struct ccw_driver *cdrv = to_ccwdrv(dev->driver);
1708 int ret;
1709
1710 cdev->drv = cdrv; /* to let the driver call _set_online */
Sebastian Ott137a14f2014-01-27 13:29:15 +01001711 ccw_device_set_int_class(cdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001712 ret = cdrv->probe ? cdrv->probe(cdev) : -ENODEV;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001713 if (ret) {
Heiko Carstensd2c993d2006-07-12 16:41:55 +02001714 cdev->drv = NULL;
Heiko Carstens420f42e2013-01-02 15:18:18 +01001715 cdev->private->int_class = IRQIO_CIO;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001716 return ret;
1717 }
1718
1719 return 0;
1720}
1721
Sebastian Ott74bd0d82013-12-16 10:51:54 +01001722static int ccw_device_remove(struct device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001723{
1724 struct ccw_device *cdev = to_ccwdev(dev);
1725 struct ccw_driver *cdrv = cdev->drv;
1726 int ret;
1727
Linus Torvalds1da177e2005-04-16 15:20:36 -07001728 if (cdrv->remove)
1729 cdrv->remove(cdev);
Sebastian Ott74bd0d82013-12-16 10:51:54 +01001730
1731 spin_lock_irq(cdev->ccwlock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001732 if (cdev->online) {
1733 cdev->online = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001734 ret = ccw_device_offline(cdev);
1735 spin_unlock_irq(cdev->ccwlock);
1736 if (ret == 0)
1737 wait_event(cdev->private->wait_q,
1738 dev_fsm_final_state(cdev));
1739 else
Michael Ernst139b83dd2008-05-07 09:22:54 +02001740 CIO_MSG_EVENT(0, "ccw_device_offline returned %d, "
Cornelia Hucke556bbb2007-07-27 12:29:19 +02001741 "device 0.%x.%04x\n",
1742 ret, cdev->private->dev_id.ssid,
1743 cdev->private->dev_id.devno);
Cornelia Huck9cd67422008-12-25 13:39:06 +01001744 /* Give up reference obtained in ccw_device_set_online(). */
1745 put_device(&cdev->dev);
Sebastian Ott74bd0d82013-12-16 10:51:54 +01001746 spin_lock_irq(cdev->ccwlock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001747 }
1748 ccw_device_set_timeout(cdev, 0);
Heiko Carstensd2c993d2006-07-12 16:41:55 +02001749 cdev->drv = NULL;
Heiko Carstens420f42e2013-01-02 15:18:18 +01001750 cdev->private->int_class = IRQIO_CIO;
Sebastian Ott74bd0d82013-12-16 10:51:54 +01001751 spin_unlock_irq(cdev->ccwlock);
Sebastian Otta6ef1562015-09-07 19:51:39 +02001752 __disable_cmf(cdev);
1753
Linus Torvalds1da177e2005-04-16 15:20:36 -07001754 return 0;
1755}
1756
Cornelia Huck958974fb2007-10-12 16:11:21 +02001757static void ccw_device_shutdown(struct device *dev)
1758{
1759 struct ccw_device *cdev;
1760
1761 cdev = to_ccwdev(dev);
1762 if (cdev->drv && cdev->drv->shutdown)
1763 cdev->drv->shutdown(cdev);
Sebastian Ott1bc66642015-09-15 13:11:42 +02001764 __disable_cmf(cdev);
Cornelia Huck958974fb2007-10-12 16:11:21 +02001765}
1766
Sebastian Ott823d4942009-06-16 10:30:20 +02001767static int ccw_device_pm_prepare(struct device *dev)
1768{
1769 struct ccw_device *cdev = to_ccwdev(dev);
1770
Peter Oberparleiter37de53b2009-12-07 12:51:19 +01001771 if (work_pending(&cdev->private->todo_work))
Sebastian Ott823d4942009-06-16 10:30:20 +02001772 return -EAGAIN;
1773 /* Fail while device is being set online/offline. */
1774 if (atomic_read(&cdev->private->onoff))
1775 return -EAGAIN;
1776
1777 if (cdev->online && cdev->drv && cdev->drv->prepare)
1778 return cdev->drv->prepare(cdev);
1779
1780 return 0;
1781}
1782
1783static void ccw_device_pm_complete(struct device *dev)
1784{
1785 struct ccw_device *cdev = to_ccwdev(dev);
1786
1787 if (cdev->online && cdev->drv && cdev->drv->complete)
1788 cdev->drv->complete(cdev);
1789}
1790
1791static int ccw_device_pm_freeze(struct device *dev)
1792{
1793 struct ccw_device *cdev = to_ccwdev(dev);
1794 struct subchannel *sch = to_subchannel(cdev->dev.parent);
1795 int ret, cm_enabled;
1796
1797 /* Fail suspend while device is in transistional state. */
1798 if (!dev_fsm_final_state(cdev))
1799 return -EAGAIN;
1800 if (!cdev->online)
1801 return 0;
1802 if (cdev->drv && cdev->drv->freeze) {
1803 ret = cdev->drv->freeze(cdev);
1804 if (ret)
1805 return ret;
1806 }
1807
1808 spin_lock_irq(sch->lock);
1809 cm_enabled = cdev->private->cmb != NULL;
1810 spin_unlock_irq(sch->lock);
1811 if (cm_enabled) {
1812 /* Don't have the css write on memory. */
1813 ret = ccw_set_cmf(cdev, 0);
1814 if (ret)
1815 return ret;
1816 }
1817 /* From here on, disallow device driver I/O. */
1818 spin_lock_irq(sch->lock);
1819 ret = cio_disable_subchannel(sch);
1820 spin_unlock_irq(sch->lock);
1821
1822 return ret;
1823}
1824
1825static int ccw_device_pm_thaw(struct device *dev)
1826{
1827 struct ccw_device *cdev = to_ccwdev(dev);
1828 struct subchannel *sch = to_subchannel(cdev->dev.parent);
1829 int ret, cm_enabled;
1830
1831 if (!cdev->online)
1832 return 0;
1833
1834 spin_lock_irq(sch->lock);
1835 /* Allow device driver I/O again. */
1836 ret = cio_enable_subchannel(sch, (u32)(addr_t)sch);
1837 cm_enabled = cdev->private->cmb != NULL;
1838 spin_unlock_irq(sch->lock);
1839 if (ret)
1840 return ret;
1841
1842 if (cm_enabled) {
1843 ret = ccw_set_cmf(cdev, 1);
1844 if (ret)
1845 return ret;
1846 }
1847
1848 if (cdev->drv && cdev->drv->thaw)
1849 ret = cdev->drv->thaw(cdev);
1850
1851 return ret;
1852}
1853
1854static void __ccw_device_pm_restore(struct ccw_device *cdev)
1855{
1856 struct subchannel *sch = to_subchannel(cdev->dev.parent);
Sebastian Ott823d4942009-06-16 10:30:20 +02001857
Sebastian Ott0d01bb82010-02-26 22:37:29 +01001858 spin_lock_irq(sch->lock);
1859 if (cio_is_console(sch->schid)) {
1860 cio_enable_subchannel(sch, (u32)(addr_t)sch);
1861 goto out_unlock;
1862 }
Sebastian Ott823d4942009-06-16 10:30:20 +02001863 /*
1864 * While we were sleeping, devices may have gone or become
1865 * available again. Kick re-detection.
1866 */
Sebastian Ott823d4942009-06-16 10:30:20 +02001867 cdev->private->flags.resuming = 1;
Sebastian Ottb17295e2011-01-12 09:55:10 +01001868 cdev->private->path_new_mask = LPM_ANYPATH;
Sebastian Ott817e5002011-12-01 13:32:19 +01001869 css_sched_sch_todo(sch, SCH_TODO_EVAL);
Sebastian Ott0d01bb82010-02-26 22:37:29 +01001870 spin_unlock_irq(sch->lock);
Sebastian Ott817e5002011-12-01 13:32:19 +01001871 css_wait_for_slow_path();
Sebastian Ott0d01bb82010-02-26 22:37:29 +01001872
1873 /* cdev may have been moved to a different subchannel. */
1874 sch = to_subchannel(cdev->dev.parent);
1875 spin_lock_irq(sch->lock);
1876 if (cdev->private->state != DEV_STATE_ONLINE &&
1877 cdev->private->state != DEV_STATE_OFFLINE)
1878 goto out_unlock;
1879
Peter Oberparleiter736b5db2009-12-07 12:51:21 +01001880 ccw_device_recognition(cdev);
Sebastian Ott823d4942009-06-16 10:30:20 +02001881 spin_unlock_irq(sch->lock);
Sebastian Ott823d4942009-06-16 10:30:20 +02001882 wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev) ||
1883 cdev->private->state == DEV_STATE_DISCONNECTED);
Sebastian Ott0d01bb82010-02-26 22:37:29 +01001884 spin_lock_irq(sch->lock);
1885
1886out_unlock:
Sebastian Ott823d4942009-06-16 10:30:20 +02001887 cdev->private->flags.resuming = 0;
Sebastian Ott0d01bb82010-02-26 22:37:29 +01001888 spin_unlock_irq(sch->lock);
Sebastian Ott823d4942009-06-16 10:30:20 +02001889}
1890
1891static int resume_handle_boxed(struct ccw_device *cdev)
1892{
1893 cdev->private->state = DEV_STATE_BOXED;
Sebastian Ott76e6fb42010-02-26 22:37:28 +01001894 if (ccw_device_notify(cdev, CIO_BOXED) == NOTIFY_OK)
Sebastian Ott823d4942009-06-16 10:30:20 +02001895 return 0;
Peter Oberparleiter37de53b2009-12-07 12:51:19 +01001896 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
Sebastian Ott823d4942009-06-16 10:30:20 +02001897 return -ENODEV;
1898}
1899
1900static int resume_handle_disc(struct ccw_device *cdev)
1901{
1902 cdev->private->state = DEV_STATE_DISCONNECTED;
Sebastian Ott76e6fb42010-02-26 22:37:28 +01001903 if (ccw_device_notify(cdev, CIO_GONE) == NOTIFY_OK)
Sebastian Ott823d4942009-06-16 10:30:20 +02001904 return 0;
Peter Oberparleiter37de53b2009-12-07 12:51:19 +01001905 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
Sebastian Ott823d4942009-06-16 10:30:20 +02001906 return -ENODEV;
1907}
1908
1909static int ccw_device_pm_restore(struct device *dev)
1910{
1911 struct ccw_device *cdev = to_ccwdev(dev);
Sebastian Ott0d01bb82010-02-26 22:37:29 +01001912 struct subchannel *sch;
1913 int ret = 0;
Sebastian Ott823d4942009-06-16 10:30:20 +02001914
1915 __ccw_device_pm_restore(cdev);
Sebastian Ott0d01bb82010-02-26 22:37:29 +01001916 sch = to_subchannel(cdev->dev.parent);
Sebastian Ott823d4942009-06-16 10:30:20 +02001917 spin_lock_irq(sch->lock);
Sebastian Ott0d01bb82010-02-26 22:37:29 +01001918 if (cio_is_console(sch->schid))
Sebastian Ott823d4942009-06-16 10:30:20 +02001919 goto out_restore;
Sebastian Ott0d01bb82010-02-26 22:37:29 +01001920
Sebastian Ott823d4942009-06-16 10:30:20 +02001921 /* check recognition results */
1922 switch (cdev->private->state) {
1923 case DEV_STATE_OFFLINE:
Sebastian Ott0d01bb82010-02-26 22:37:29 +01001924 case DEV_STATE_ONLINE:
1925 cdev->private->flags.donotify = 0;
Sebastian Ott823d4942009-06-16 10:30:20 +02001926 break;
1927 case DEV_STATE_BOXED:
1928 ret = resume_handle_boxed(cdev);
Sebastian Ott823d4942009-06-16 10:30:20 +02001929 if (ret)
Sebastian Ott0d01bb82010-02-26 22:37:29 +01001930 goto out_unlock;
Sebastian Ott823d4942009-06-16 10:30:20 +02001931 goto out_restore;
Sebastian Ott823d4942009-06-16 10:30:20 +02001932 default:
Sebastian Ott0d01bb82010-02-26 22:37:29 +01001933 ret = resume_handle_disc(cdev);
1934 if (ret)
1935 goto out_unlock;
1936 goto out_restore;
Sebastian Ott823d4942009-06-16 10:30:20 +02001937 }
1938 /* check if the device type has changed */
1939 if (!ccw_device_test_sense_data(cdev)) {
1940 ccw_device_update_sense_data(cdev);
Peter Oberparleiter37de53b2009-12-07 12:51:19 +01001941 ccw_device_sched_todo(cdev, CDEV_TODO_REBIND);
Sebastian Ott823d4942009-06-16 10:30:20 +02001942 ret = -ENODEV;
1943 goto out_unlock;
1944 }
Sebastian Ott0d01bb82010-02-26 22:37:29 +01001945 if (!cdev->online)
1946 goto out_unlock;
1947
1948 if (ccw_device_online(cdev)) {
1949 ret = resume_handle_disc(cdev);
1950 if (ret)
1951 goto out_unlock;
1952 goto out_restore;
1953 }
1954 spin_unlock_irq(sch->lock);
1955 wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev));
1956 spin_lock_irq(sch->lock);
1957
1958 if (ccw_device_notify(cdev, CIO_OPER) == NOTIFY_BAD) {
1959 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
1960 ret = -ENODEV;
Sebastian Ott823d4942009-06-16 10:30:20 +02001961 goto out_unlock;
1962 }
Sebastian Ott823d4942009-06-16 10:30:20 +02001963
Sebastian Ott0d01bb82010-02-26 22:37:29 +01001964 /* reenable cmf, if needed */
1965 if (cdev->private->cmb) {
1966 spin_unlock_irq(sch->lock);
Sebastian Ott823d4942009-06-16 10:30:20 +02001967 ret = ccw_set_cmf(cdev, 1);
Sebastian Ott0d01bb82010-02-26 22:37:29 +01001968 spin_lock_irq(sch->lock);
Sebastian Ott823d4942009-06-16 10:30:20 +02001969 if (ret) {
Sebastian Ottf0148242009-09-11 10:28:23 +02001970 CIO_MSG_EVENT(2, "resume: cdev 0.%x.%04x: cmf failed "
1971 "(rc=%d)\n", cdev->private->dev_id.ssid,
1972 cdev->private->dev_id.devno, ret);
Sebastian Ott823d4942009-06-16 10:30:20 +02001973 ret = 0;
1974 }
1975 }
1976
1977out_restore:
Sebastian Ott0d01bb82010-02-26 22:37:29 +01001978 spin_unlock_irq(sch->lock);
Sebastian Ott823d4942009-06-16 10:30:20 +02001979 if (cdev->online && cdev->drv && cdev->drv->restore)
1980 ret = cdev->drv->restore(cdev);
Sebastian Ott823d4942009-06-16 10:30:20 +02001981 return ret;
1982
Sebastian Ott823d4942009-06-16 10:30:20 +02001983out_unlock:
1984 spin_unlock_irq(sch->lock);
1985 return ret;
1986}
1987
Alexey Dobriyan47145212009-12-14 18:00:08 -08001988static const struct dev_pm_ops ccw_pm_ops = {
Sebastian Ott823d4942009-06-16 10:30:20 +02001989 .prepare = ccw_device_pm_prepare,
1990 .complete = ccw_device_pm_complete,
1991 .freeze = ccw_device_pm_freeze,
1992 .thaw = ccw_device_pm_thaw,
1993 .restore = ccw_device_pm_restore,
1994};
1995
Sebastian Ottd5ab5272011-03-23 10:16:03 +01001996static struct bus_type ccw_bus_type = {
Cornelia Huck8bbace72006-01-11 10:56:22 +01001997 .name = "ccw",
1998 .match = ccw_bus_match,
1999 .uevent = ccw_uevent,
2000 .probe = ccw_device_probe,
2001 .remove = ccw_device_remove,
Cornelia Huck958974fb2007-10-12 16:11:21 +02002002 .shutdown = ccw_device_shutdown,
Sebastian Ott823d4942009-06-16 10:30:20 +02002003 .pm = &ccw_pm_ops,
Cornelia Huck8bbace72006-01-11 10:56:22 +01002004};
2005
Cornelia Huckb2ffd8e2007-10-12 16:11:17 +02002006/**
2007 * ccw_driver_register() - register a ccw driver
2008 * @cdriver: driver to be registered
2009 *
2010 * This function is mainly a wrapper around driver_register().
2011 * Returns:
2012 * %0 on success and a negative error value on failure.
2013 */
2014int ccw_driver_register(struct ccw_driver *cdriver)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002015{
2016 struct device_driver *drv = &cdriver->driver;
2017
2018 drv->bus = &ccw_bus_type;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002019
2020 return driver_register(drv);
2021}
2022
Cornelia Huckb2ffd8e2007-10-12 16:11:17 +02002023/**
2024 * ccw_driver_unregister() - deregister a ccw driver
2025 * @cdriver: driver to be deregistered
2026 *
2027 * This function is mainly a wrapper around driver_unregister().
2028 */
2029void ccw_driver_unregister(struct ccw_driver *cdriver)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002030{
2031 driver_unregister(&cdriver->driver);
2032}
2033
Peter Oberparleiter37de53b2009-12-07 12:51:19 +01002034static void ccw_device_todo(struct work_struct *work)
2035{
2036 struct ccw_device_private *priv;
2037 struct ccw_device *cdev;
2038 struct subchannel *sch;
2039 enum cdev_todo todo;
2040
2041 priv = container_of(work, struct ccw_device_private, todo_work);
2042 cdev = priv->cdev;
2043 sch = to_subchannel(cdev->dev.parent);
2044 /* Find out todo. */
2045 spin_lock_irq(cdev->ccwlock);
2046 todo = priv->todo;
2047 priv->todo = CDEV_TODO_NOTHING;
2048 CIO_MSG_EVENT(4, "cdev_todo: cdev=0.%x.%04x todo=%d\n",
2049 priv->dev_id.ssid, priv->dev_id.devno, todo);
2050 spin_unlock_irq(cdev->ccwlock);
2051 /* Perform todo. */
2052 switch (todo) {
2053 case CDEV_TODO_ENABLE_CMF:
2054 cmf_reenable(cdev);
2055 break;
2056 case CDEV_TODO_REBIND:
2057 ccw_device_do_unbind_bind(cdev);
2058 break;
2059 case CDEV_TODO_REGISTER:
2060 io_subchannel_register(cdev);
2061 break;
2062 case CDEV_TODO_UNREG_EVAL:
2063 if (!sch_is_pseudo_sch(sch))
2064 css_schedule_eval(sch->schid);
2065 /* fall-through */
2066 case CDEV_TODO_UNREG:
2067 if (sch_is_pseudo_sch(sch))
2068 ccw_device_unregister(cdev);
2069 else
2070 ccw_device_call_sch_unregister(cdev);
2071 break;
2072 default:
2073 break;
2074 }
2075 /* Release workqueue ref. */
2076 put_device(&cdev->dev);
2077}
2078
2079/**
2080 * ccw_device_sched_todo - schedule ccw device operation
2081 * @cdev: ccw device
2082 * @todo: todo
2083 *
2084 * Schedule the operation identified by @todo to be performed on the slow path
2085 * workqueue. Do nothing if another operation with higher priority is already
2086 * scheduled. Needs to be called with ccwdev lock held.
2087 */
2088void ccw_device_sched_todo(struct ccw_device *cdev, enum cdev_todo todo)
2089{
2090 CIO_MSG_EVENT(4, "cdev_todo: sched cdev=0.%x.%04x todo=%d\n",
2091 cdev->private->dev_id.ssid, cdev->private->dev_id.devno,
2092 todo);
2093 if (cdev->private->todo >= todo)
2094 return;
2095 cdev->private->todo = todo;
2096 /* Get workqueue ref. */
2097 if (!get_device(&cdev->dev))
2098 return;
Sebastian Ottbe5d3822010-02-26 22:37:24 +01002099 if (!queue_work(cio_work_q, &cdev->private->todo_work)) {
Peter Oberparleiter37de53b2009-12-07 12:51:19 +01002100 /* Already queued, release workqueue ref. */
2101 put_device(&cdev->dev);
2102 }
2103}
2104
Michael Ernstfd0457a2010-08-09 18:12:50 +02002105/**
2106 * ccw_device_siosl() - initiate logging
2107 * @cdev: ccw device
2108 *
2109 * This function is used to invoke model-dependent logging within the channel
2110 * subsystem.
2111 */
2112int ccw_device_siosl(struct ccw_device *cdev)
2113{
2114 struct subchannel *sch = to_subchannel(cdev->dev.parent);
2115
2116 return chsc_siosl(sch->schid);
2117}
2118EXPORT_SYMBOL_GPL(ccw_device_siosl);
2119
Linus Torvalds1da177e2005-04-16 15:20:36 -07002120EXPORT_SYMBOL(ccw_device_set_online);
2121EXPORT_SYMBOL(ccw_device_set_offline);
2122EXPORT_SYMBOL(ccw_driver_register);
2123EXPORT_SYMBOL(ccw_driver_unregister);
2124EXPORT_SYMBOL(get_ccwdev_by_busid);