blob: 5e963fe0e38d4c2125c43ae801ca7e9b28d98d07 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002/*
3 * PAV alias management for the DASD ECKD discipline
4 *
Heiko Carstensa53c8fa2012-07-20 11:15:04 +02005 * Copyright IBM Corp. 2007
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01006 * Author(s): Stefan Weinhuber <wein@de.ibm.com>
7 */
8
Stefan Haberlandca99dab2009-09-11 10:28:30 +02009#define KMSG_COMPONENT "dasd-eckd"
Stefan Haberlandfc19f382009-03-26 15:23:49 +010010
Stefan Weinhuber8e09f212008-01-26 14:11:23 +010011#include <linux/list.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090012#include <linux/slab.h>
Stefan Weinhuber8e09f212008-01-26 14:11:23 +010013#include <asm/ebcdic.h>
14#include "dasd_int.h"
15#include "dasd_eckd.h"
16
17#ifdef PRINTK_HEADER
18#undef PRINTK_HEADER
19#endif /* PRINTK_HEADER */
20#define PRINTK_HEADER "dasd(eckd):"
21
22
23/*
24 * General concept of alias management:
25 * - PAV and DASD alias management is specific to the eckd discipline.
26 * - A device is connected to an lcu as long as the device exists.
27 * dasd_alias_make_device_known_to_lcu will be called wenn the
28 * device is checked by the eckd discipline and
29 * dasd_alias_disconnect_device_from_lcu will be called
30 * before the device is deleted.
31 * - The dasd_alias_add_device / dasd_alias_remove_device
32 * functions mark the point when a device is 'ready for service'.
33 * - A summary unit check is a rare occasion, but it is mandatory to
34 * support it. It requires some complex recovery actions before the
35 * devices can be used again (see dasd_alias_handle_summary_unit_check).
36 * - dasd_alias_get_start_dev will find an alias device that can be used
37 * instead of the base device and does some (very simple) load balancing.
38 * This is the function that gets called for each I/O, so when improving
39 * something, this function should get faster or better, the rest has just
40 * to be correct.
41 */
42
43
44static void summary_unit_check_handling_work(struct work_struct *);
45static void lcu_update_work(struct work_struct *);
46static int _schedule_lcu_update(struct alias_lcu *, struct dasd_device *);
47
48static struct alias_root aliastree = {
49 .serverlist = LIST_HEAD_INIT(aliastree.serverlist),
50 .lock = __SPIN_LOCK_UNLOCKED(aliastree.lock),
51};
52
53static struct alias_server *_find_server(struct dasd_uid *uid)
54{
55 struct alias_server *pos;
56 list_for_each_entry(pos, &aliastree.serverlist, server) {
57 if (!strncmp(pos->uid.vendor, uid->vendor,
58 sizeof(uid->vendor))
59 && !strncmp(pos->uid.serial, uid->serial,
60 sizeof(uid->serial)))
61 return pos;
Peter Senna Tschudin3b974872015-08-04 17:11:15 +020062 }
Stefan Weinhuber8e09f212008-01-26 14:11:23 +010063 return NULL;
64}
65
66static struct alias_lcu *_find_lcu(struct alias_server *server,
67 struct dasd_uid *uid)
68{
69 struct alias_lcu *pos;
70 list_for_each_entry(pos, &server->lculist, lcu) {
71 if (pos->uid.ssid == uid->ssid)
72 return pos;
Peter Senna Tschudin3b974872015-08-04 17:11:15 +020073 }
Stefan Weinhuber8e09f212008-01-26 14:11:23 +010074 return NULL;
75}
76
77static struct alias_pav_group *_find_group(struct alias_lcu *lcu,
78 struct dasd_uid *uid)
79{
80 struct alias_pav_group *pos;
81 __u8 search_unit_addr;
82
83 /* for hyper pav there is only one group */
84 if (lcu->pav == HYPER_PAV) {
85 if (list_empty(&lcu->grouplist))
86 return NULL;
87 else
88 return list_first_entry(&lcu->grouplist,
89 struct alias_pav_group, group);
90 }
91
92 /* for base pav we have to find the group that matches the base */
93 if (uid->type == UA_BASE_DEVICE)
94 search_unit_addr = uid->real_unit_addr;
95 else
96 search_unit_addr = uid->base_unit_addr;
97 list_for_each_entry(pos, &lcu->grouplist, group) {
Stefan Weinhuber4abb08c2008-08-01 16:39:09 +020098 if (pos->uid.base_unit_addr == search_unit_addr &&
99 !strncmp(pos->uid.vduit, uid->vduit, sizeof(uid->vduit)))
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100100 return pos;
Peter Senna Tschudin3b974872015-08-04 17:11:15 +0200101 }
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100102 return NULL;
103}
104
105static struct alias_server *_allocate_server(struct dasd_uid *uid)
106{
107 struct alias_server *server;
108
109 server = kzalloc(sizeof(*server), GFP_KERNEL);
110 if (!server)
111 return ERR_PTR(-ENOMEM);
112 memcpy(server->uid.vendor, uid->vendor, sizeof(uid->vendor));
113 memcpy(server->uid.serial, uid->serial, sizeof(uid->serial));
114 INIT_LIST_HEAD(&server->server);
115 INIT_LIST_HEAD(&server->lculist);
116 return server;
117}
118
119static void _free_server(struct alias_server *server)
120{
121 kfree(server);
122}
123
124static struct alias_lcu *_allocate_lcu(struct dasd_uid *uid)
125{
126 struct alias_lcu *lcu;
127
128 lcu = kzalloc(sizeof(*lcu), GFP_KERNEL);
129 if (!lcu)
130 return ERR_PTR(-ENOMEM);
131 lcu->uac = kzalloc(sizeof(*(lcu->uac)), GFP_KERNEL | GFP_DMA);
132 if (!lcu->uac)
133 goto out_err1;
134 lcu->rsu_cqr = kzalloc(sizeof(*lcu->rsu_cqr), GFP_KERNEL | GFP_DMA);
135 if (!lcu->rsu_cqr)
136 goto out_err2;
137 lcu->rsu_cqr->cpaddr = kzalloc(sizeof(struct ccw1),
138 GFP_KERNEL | GFP_DMA);
139 if (!lcu->rsu_cqr->cpaddr)
140 goto out_err3;
141 lcu->rsu_cqr->data = kzalloc(16, GFP_KERNEL | GFP_DMA);
142 if (!lcu->rsu_cqr->data)
143 goto out_err4;
144
145 memcpy(lcu->uid.vendor, uid->vendor, sizeof(uid->vendor));
146 memcpy(lcu->uid.serial, uid->serial, sizeof(uid->serial));
147 lcu->uid.ssid = uid->ssid;
148 lcu->pav = NO_PAV;
149 lcu->flags = NEED_UAC_UPDATE | UPDATE_PENDING;
150 INIT_LIST_HEAD(&lcu->lcu);
151 INIT_LIST_HEAD(&lcu->inactive_devices);
152 INIT_LIST_HEAD(&lcu->active_devices);
153 INIT_LIST_HEAD(&lcu->grouplist);
154 INIT_WORK(&lcu->suc_data.worker, summary_unit_check_handling_work);
155 INIT_DELAYED_WORK(&lcu->ruac_data.dwork, lcu_update_work);
156 spin_lock_init(&lcu->lock);
Stefan Weinhuberf4ac1d02009-12-07 12:51:53 +0100157 init_completion(&lcu->lcu_setup);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100158 return lcu;
159
160out_err4:
161 kfree(lcu->rsu_cqr->cpaddr);
162out_err3:
163 kfree(lcu->rsu_cqr);
164out_err2:
165 kfree(lcu->uac);
166out_err1:
167 kfree(lcu);
168 return ERR_PTR(-ENOMEM);
169}
170
171static void _free_lcu(struct alias_lcu *lcu)
172{
173 kfree(lcu->rsu_cqr->data);
174 kfree(lcu->rsu_cqr->cpaddr);
175 kfree(lcu->rsu_cqr);
176 kfree(lcu->uac);
177 kfree(lcu);
178}
179
180/*
181 * This is the function that will allocate all the server and lcu data,
182 * so this function must be called first for a new device.
183 * If the return value is 1, the lcu was already known before, if it
184 * is 0, this is a new lcu.
185 * Negative return code indicates that something went wrong (e.g. -ENOMEM)
186 */
187int dasd_alias_make_device_known_to_lcu(struct dasd_device *device)
188{
Sebastian Ott543691a42016-03-04 10:34:05 +0100189 struct dasd_eckd_private *private = device->private;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100190 unsigned long flags;
191 struct alias_server *server, *newserver;
192 struct alias_lcu *lcu, *newlcu;
Stefan Haberland2dedf0d2010-05-17 10:00:11 +0200193 struct dasd_uid uid;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100194
Stefan Haberland2dedf0d2010-05-17 10:00:11 +0200195 device->discipline->get_uid(device, &uid);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100196 spin_lock_irqsave(&aliastree.lock, flags);
Stefan Haberland2dedf0d2010-05-17 10:00:11 +0200197 server = _find_server(&uid);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100198 if (!server) {
199 spin_unlock_irqrestore(&aliastree.lock, flags);
Stefan Haberland2dedf0d2010-05-17 10:00:11 +0200200 newserver = _allocate_server(&uid);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100201 if (IS_ERR(newserver))
202 return PTR_ERR(newserver);
203 spin_lock_irqsave(&aliastree.lock, flags);
Stefan Haberland2dedf0d2010-05-17 10:00:11 +0200204 server = _find_server(&uid);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100205 if (!server) {
206 list_add(&newserver->server, &aliastree.serverlist);
207 server = newserver;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100208 } else {
209 /* someone was faster */
210 _free_server(newserver);
211 }
212 }
213
Stefan Haberland2dedf0d2010-05-17 10:00:11 +0200214 lcu = _find_lcu(server, &uid);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100215 if (!lcu) {
216 spin_unlock_irqrestore(&aliastree.lock, flags);
Stefan Haberland2dedf0d2010-05-17 10:00:11 +0200217 newlcu = _allocate_lcu(&uid);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100218 if (IS_ERR(newlcu))
Roel Kluin6d53cfe2009-12-18 17:43:17 +0100219 return PTR_ERR(newlcu);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100220 spin_lock_irqsave(&aliastree.lock, flags);
Stefan Haberland2dedf0d2010-05-17 10:00:11 +0200221 lcu = _find_lcu(server, &uid);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100222 if (!lcu) {
223 list_add(&newlcu->lcu, &server->lculist);
224 lcu = newlcu;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100225 } else {
226 /* someone was faster */
227 _free_lcu(newlcu);
228 }
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100229 }
230 spin_lock(&lcu->lock);
231 list_add(&device->alias_list, &lcu->inactive_devices);
232 private->lcu = lcu;
233 spin_unlock(&lcu->lock);
234 spin_unlock_irqrestore(&aliastree.lock, flags);
235
Stefan Haberlandf9f8d022012-01-18 18:03:40 +0100236 return 0;
Stefan Weinhuberf4ac1d02009-12-07 12:51:53 +0100237}
238
239/*
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100240 * This function removes a device from the scope of alias management.
241 * The complicated part is to make sure that it is not in use by
242 * any of the workers. If necessary cancel the work.
243 */
244void dasd_alias_disconnect_device_from_lcu(struct dasd_device *device)
245{
Sebastian Ott543691a42016-03-04 10:34:05 +0100246 struct dasd_eckd_private *private = device->private;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100247 unsigned long flags;
248 struct alias_lcu *lcu;
249 struct alias_server *server;
250 int was_pending;
Stefan Haberland2dedf0d2010-05-17 10:00:11 +0200251 struct dasd_uid uid;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100252
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100253 lcu = private->lcu;
Stefan Haberlandf602f6d62011-01-31 11:30:03 +0100254 /* nothing to do if already disconnected */
255 if (!lcu)
256 return;
Stefan Haberland2dedf0d2010-05-17 10:00:11 +0200257 device->discipline->get_uid(device, &uid);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100258 spin_lock_irqsave(&lcu->lock, flags);
259 list_del_init(&device->alias_list);
260 /* make sure that the workers don't use this device */
261 if (device == lcu->suc_data.device) {
262 spin_unlock_irqrestore(&lcu->lock, flags);
263 cancel_work_sync(&lcu->suc_data.worker);
264 spin_lock_irqsave(&lcu->lock, flags);
Stefan Haberland9d862ab2015-12-15 10:45:05 +0100265 if (device == lcu->suc_data.device) {
266 dasd_put_device(device);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100267 lcu->suc_data.device = NULL;
Stefan Haberland9d862ab2015-12-15 10:45:05 +0100268 }
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100269 }
270 was_pending = 0;
271 if (device == lcu->ruac_data.device) {
272 spin_unlock_irqrestore(&lcu->lock, flags);
273 was_pending = 1;
274 cancel_delayed_work_sync(&lcu->ruac_data.dwork);
275 spin_lock_irqsave(&lcu->lock, flags);
Stefan Haberland9d862ab2015-12-15 10:45:05 +0100276 if (device == lcu->ruac_data.device) {
277 dasd_put_device(device);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100278 lcu->ruac_data.device = NULL;
Stefan Haberland9d862ab2015-12-15 10:45:05 +0100279 }
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100280 }
281 private->lcu = NULL;
282 spin_unlock_irqrestore(&lcu->lock, flags);
283
284 spin_lock_irqsave(&aliastree.lock, flags);
285 spin_lock(&lcu->lock);
286 if (list_empty(&lcu->grouplist) &&
287 list_empty(&lcu->active_devices) &&
288 list_empty(&lcu->inactive_devices)) {
289 list_del(&lcu->lcu);
290 spin_unlock(&lcu->lock);
291 _free_lcu(lcu);
292 lcu = NULL;
293 } else {
294 if (was_pending)
295 _schedule_lcu_update(lcu, NULL);
296 spin_unlock(&lcu->lock);
297 }
Stefan Haberland2dedf0d2010-05-17 10:00:11 +0200298 server = _find_server(&uid);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100299 if (server && list_empty(&server->lculist)) {
300 list_del(&server->server);
301 _free_server(server);
302 }
303 spin_unlock_irqrestore(&aliastree.lock, flags);
304}
305
306/*
307 * This function assumes that the unit address configuration stored
308 * in the lcu is up to date and will update the device uid before
309 * adding it to a pav group.
310 */
Stefan Haberland2dedf0d2010-05-17 10:00:11 +0200311
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100312static int _add_device_to_lcu(struct alias_lcu *lcu,
Stefan Haberland2dedf0d2010-05-17 10:00:11 +0200313 struct dasd_device *device,
314 struct dasd_device *pos)
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100315{
316
Sebastian Ott543691a42016-03-04 10:34:05 +0100317 struct dasd_eckd_private *private = device->private;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100318 struct alias_pav_group *group;
Stefan Haberland2dedf0d2010-05-17 10:00:11 +0200319 struct dasd_uid uid;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100320
Stefan Haberland59a9ed52016-02-23 10:15:27 +0100321 spin_lock(get_ccwdev_lock(device->cdev));
Stefan Haberland2dedf0d2010-05-17 10:00:11 +0200322 private->uid.type = lcu->uac->unit[private->uid.real_unit_addr].ua_type;
323 private->uid.base_unit_addr =
324 lcu->uac->unit[private->uid.real_unit_addr].base_ua;
325 uid = private->uid;
Stefan Haberland59a9ed52016-02-23 10:15:27 +0100326 spin_unlock(get_ccwdev_lock(device->cdev));
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100327 /* if we have no PAV anyway, we don't need to bother with PAV groups */
328 if (lcu->pav == NO_PAV) {
329 list_move(&device->alias_list, &lcu->active_devices);
330 return 0;
331 }
Stefan Haberland2dedf0d2010-05-17 10:00:11 +0200332 group = _find_group(lcu, &uid);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100333 if (!group) {
334 group = kzalloc(sizeof(*group), GFP_ATOMIC);
335 if (!group)
336 return -ENOMEM;
Stefan Haberland2dedf0d2010-05-17 10:00:11 +0200337 memcpy(group->uid.vendor, uid.vendor, sizeof(uid.vendor));
338 memcpy(group->uid.serial, uid.serial, sizeof(uid.serial));
339 group->uid.ssid = uid.ssid;
340 if (uid.type == UA_BASE_DEVICE)
341 group->uid.base_unit_addr = uid.real_unit_addr;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100342 else
Stefan Haberland2dedf0d2010-05-17 10:00:11 +0200343 group->uid.base_unit_addr = uid.base_unit_addr;
344 memcpy(group->uid.vduit, uid.vduit, sizeof(uid.vduit));
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100345 INIT_LIST_HEAD(&group->group);
346 INIT_LIST_HEAD(&group->baselist);
347 INIT_LIST_HEAD(&group->aliaslist);
348 list_add(&group->group, &lcu->grouplist);
349 }
Stefan Haberland2dedf0d2010-05-17 10:00:11 +0200350 if (uid.type == UA_BASE_DEVICE)
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100351 list_move(&device->alias_list, &group->baselist);
352 else
353 list_move(&device->alias_list, &group->aliaslist);
354 private->pavgroup = group;
355 return 0;
356};
357
358static void _remove_device_from_lcu(struct alias_lcu *lcu,
359 struct dasd_device *device)
360{
Sebastian Ott543691a42016-03-04 10:34:05 +0100361 struct dasd_eckd_private *private = device->private;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100362 struct alias_pav_group *group;
363
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100364 list_move(&device->alias_list, &lcu->inactive_devices);
365 group = private->pavgroup;
366 if (!group)
367 return;
368 private->pavgroup = NULL;
369 if (list_empty(&group->baselist) && list_empty(&group->aliaslist)) {
370 list_del(&group->group);
371 kfree(group);
372 return;
373 }
374 if (group->next == device)
375 group->next = NULL;
376};
377
Stefan Haberland03429f32012-09-11 17:19:12 +0200378static int
379suborder_not_supported(struct dasd_ccw_req *cqr)
380{
381 char *sense;
382 char reason;
383 char msg_format;
384 char msg_no;
385
386 sense = dasd_get_sense(&cqr->irb);
387 if (!sense)
388 return 0;
389
390 reason = sense[0];
391 msg_format = (sense[7] & 0xF0);
392 msg_no = (sense[7] & 0x0F);
393
394 /* command reject, Format 0 MSG 4 - invalid parameter */
395 if ((reason == 0x80) && (msg_format == 0x00) && (msg_no == 0x04))
396 return 1;
397
398 return 0;
399}
400
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100401static int read_unit_address_configuration(struct dasd_device *device,
402 struct alias_lcu *lcu)
403{
404 struct dasd_psf_prssd_data *prssdp;
405 struct dasd_ccw_req *cqr;
406 struct ccw1 *ccw;
407 int rc;
408 unsigned long flags;
409
Stefan Haberland68b781f2009-09-11 10:28:29 +0200410 cqr = dasd_kmalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100411 (sizeof(struct dasd_psf_prssd_data)),
412 device);
413 if (IS_ERR(cqr))
414 return PTR_ERR(cqr);
415 cqr->startdev = device;
416 cqr->memdev = device;
417 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
418 cqr->retries = 10;
419 cqr->expires = 20 * HZ;
420
421 /* Prepare for Read Subsystem Data */
422 prssdp = (struct dasd_psf_prssd_data *) cqr->data;
423 memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
424 prssdp->order = PSF_ORDER_PRSSD;
425 prssdp->suborder = 0x0e; /* Read unit address configuration */
426 /* all other bytes of prssdp must be zero */
427
428 ccw = cqr->cpaddr;
429 ccw->cmd_code = DASD_ECKD_CCW_PSF;
430 ccw->count = sizeof(struct dasd_psf_prssd_data);
431 ccw->flags |= CCW_FLAG_CC;
432 ccw->cda = (__u32)(addr_t) prssdp;
433
434 /* Read Subsystem Data - feature codes */
435 memset(lcu->uac, 0, sizeof(*(lcu->uac)));
436
437 ccw++;
438 ccw->cmd_code = DASD_ECKD_CCW_RSSD;
439 ccw->count = sizeof(*(lcu->uac));
440 ccw->cda = (__u32)(addr_t) lcu->uac;
441
Heiko Carstens1aae0562013-01-30 09:49:40 +0100442 cqr->buildclk = get_tod_clock();
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100443 cqr->status = DASD_CQR_FILLED;
444
445 /* need to unset flag here to detect race with summary unit check */
446 spin_lock_irqsave(&lcu->lock, flags);
447 lcu->flags &= ~NEED_UAC_UPDATE;
448 spin_unlock_irqrestore(&lcu->lock, flags);
449
450 do {
451 rc = dasd_sleep_on(cqr);
Stefan Haberland03429f32012-09-11 17:19:12 +0200452 if (rc && suborder_not_supported(cqr))
453 return -EOPNOTSUPP;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100454 } while (rc && (cqr->retries > 0));
455 if (rc) {
456 spin_lock_irqsave(&lcu->lock, flags);
457 lcu->flags |= NEED_UAC_UPDATE;
458 spin_unlock_irqrestore(&lcu->lock, flags);
459 }
460 dasd_kfree_request(cqr, cqr->memdev);
461 return rc;
462}
463
464static int _lcu_update(struct dasd_device *refdev, struct alias_lcu *lcu)
465{
466 unsigned long flags;
467 struct alias_pav_group *pavgroup, *tempgroup;
468 struct dasd_device *device, *tempdev;
469 int i, rc;
470 struct dasd_eckd_private *private;
471
472 spin_lock_irqsave(&lcu->lock, flags);
473 list_for_each_entry_safe(pavgroup, tempgroup, &lcu->grouplist, group) {
474 list_for_each_entry_safe(device, tempdev, &pavgroup->baselist,
475 alias_list) {
476 list_move(&device->alias_list, &lcu->active_devices);
Sebastian Ott543691a42016-03-04 10:34:05 +0100477 private = device->private;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100478 private->pavgroup = NULL;
479 }
480 list_for_each_entry_safe(device, tempdev, &pavgroup->aliaslist,
481 alias_list) {
482 list_move(&device->alias_list, &lcu->active_devices);
Sebastian Ott543691a42016-03-04 10:34:05 +0100483 private = device->private;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100484 private->pavgroup = NULL;
485 }
486 list_del(&pavgroup->group);
487 kfree(pavgroup);
488 }
489 spin_unlock_irqrestore(&lcu->lock, flags);
490
491 rc = read_unit_address_configuration(refdev, lcu);
492 if (rc)
493 return rc;
494
Stefan Haberland59a9ed52016-02-23 10:15:27 +0100495 spin_lock_irqsave(&lcu->lock, flags);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100496 lcu->pav = NO_PAV;
497 for (i = 0; i < MAX_DEVICES_PER_LCU; ++i) {
498 switch (lcu->uac->unit[i].ua_type) {
499 case UA_BASE_PAV_ALIAS:
500 lcu->pav = BASE_PAV;
501 break;
502 case UA_HYPER_PAV_ALIAS:
503 lcu->pav = HYPER_PAV;
504 break;
505 }
506 if (lcu->pav != NO_PAV)
507 break;
508 }
509
510 list_for_each_entry_safe(device, tempdev, &lcu->active_devices,
511 alias_list) {
Stefan Haberland2dedf0d2010-05-17 10:00:11 +0200512 _add_device_to_lcu(lcu, device, refdev);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100513 }
Stefan Haberland9bfefde2015-12-15 11:00:51 +0100514 spin_unlock_irqrestore(&lcu->lock, flags);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100515 return 0;
516}
517
518static void lcu_update_work(struct work_struct *work)
519{
520 struct alias_lcu *lcu;
521 struct read_uac_work_data *ruac_data;
522 struct dasd_device *device;
523 unsigned long flags;
524 int rc;
525
526 ruac_data = container_of(work, struct read_uac_work_data, dwork.work);
527 lcu = container_of(ruac_data, struct alias_lcu, ruac_data);
528 device = ruac_data->device;
529 rc = _lcu_update(device, lcu);
530 /*
531 * Need to check flags again, as there could have been another
532 * prepare_update or a new device a new device while we were still
533 * processing the data
534 */
535 spin_lock_irqsave(&lcu->lock, flags);
Stefan Haberland03429f32012-09-11 17:19:12 +0200536 if ((rc && (rc != -EOPNOTSUPP)) || (lcu->flags & NEED_UAC_UPDATE)) {
Stefan Haberlandfc19f382009-03-26 15:23:49 +0100537 DBF_DEV_EVENT(DBF_WARNING, device, "could not update"
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100538 " alias data in lcu (rc = %d), retry later", rc);
Stefan Haberland9d862ab2015-12-15 10:45:05 +0100539 if (!schedule_delayed_work(&lcu->ruac_data.dwork, 30*HZ))
540 dasd_put_device(device);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100541 } else {
Stefan Haberland9d862ab2015-12-15 10:45:05 +0100542 dasd_put_device(device);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100543 lcu->ruac_data.device = NULL;
544 lcu->flags &= ~UPDATE_PENDING;
545 }
546 spin_unlock_irqrestore(&lcu->lock, flags);
547}
548
549static int _schedule_lcu_update(struct alias_lcu *lcu,
550 struct dasd_device *device)
551{
552 struct dasd_device *usedev = NULL;
553 struct alias_pav_group *group;
554
555 lcu->flags |= NEED_UAC_UPDATE;
556 if (lcu->ruac_data.device) {
557 /* already scheduled or running */
558 return 0;
559 }
560 if (device && !list_empty(&device->alias_list))
561 usedev = device;
562
563 if (!usedev && !list_empty(&lcu->grouplist)) {
564 group = list_first_entry(&lcu->grouplist,
565 struct alias_pav_group, group);
566 if (!list_empty(&group->baselist))
567 usedev = list_first_entry(&group->baselist,
568 struct dasd_device,
569 alias_list);
570 else if (!list_empty(&group->aliaslist))
571 usedev = list_first_entry(&group->aliaslist,
572 struct dasd_device,
573 alias_list);
574 }
575 if (!usedev && !list_empty(&lcu->active_devices)) {
576 usedev = list_first_entry(&lcu->active_devices,
577 struct dasd_device, alias_list);
578 }
579 /*
580 * if we haven't found a proper device yet, give up for now, the next
581 * device that will be set active will trigger an lcu update
582 */
583 if (!usedev)
584 return -EINVAL;
Stefan Haberland9d862ab2015-12-15 10:45:05 +0100585 dasd_get_device(usedev);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100586 lcu->ruac_data.device = usedev;
Stefan Haberland9d862ab2015-12-15 10:45:05 +0100587 if (!schedule_delayed_work(&lcu->ruac_data.dwork, 0))
588 dasd_put_device(usedev);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100589 return 0;
590}
591
592int dasd_alias_add_device(struct dasd_device *device)
593{
Sebastian Ott543691a42016-03-04 10:34:05 +0100594 struct dasd_eckd_private *private = device->private;
Stefan Haberland5d27a2b2018-04-12 13:38:22 +0200595 __u8 uaddr = private->uid.real_unit_addr;
596 struct alias_lcu *lcu = private->lcu;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100597 unsigned long flags;
598 int rc;
599
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100600 rc = 0;
Stefan Haberland59a9ed52016-02-23 10:15:27 +0100601 spin_lock_irqsave(&lcu->lock, flags);
Stefan Haberland5d27a2b2018-04-12 13:38:22 +0200602 /*
603 * Check if device and lcu type differ. If so, the uac data may be
604 * outdated and needs to be updated.
605 */
606 if (private->uid.type != lcu->uac->unit[uaddr].ua_type) {
607 lcu->flags |= UPDATE_PENDING;
608 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
609 "uid type mismatch - trigger rescan");
610 }
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100611 if (!(lcu->flags & UPDATE_PENDING)) {
Stefan Haberland2dedf0d2010-05-17 10:00:11 +0200612 rc = _add_device_to_lcu(lcu, device, device);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100613 if (rc)
614 lcu->flags |= UPDATE_PENDING;
615 }
616 if (lcu->flags & UPDATE_PENDING) {
617 list_move(&device->alias_list, &lcu->active_devices);
618 _schedule_lcu_update(lcu, device);
619 }
Stefan Haberland59a9ed52016-02-23 10:15:27 +0100620 spin_unlock_irqrestore(&lcu->lock, flags);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100621 return rc;
622}
623
Stefan Haberland501183f2010-05-17 10:00:10 +0200624int dasd_alias_update_add_device(struct dasd_device *device)
625{
Sebastian Ott543691a42016-03-04 10:34:05 +0100626 struct dasd_eckd_private *private = device->private;
627
Stefan Haberland501183f2010-05-17 10:00:10 +0200628 private->lcu->flags |= UPDATE_PENDING;
629 return dasd_alias_add_device(device);
630}
631
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100632int dasd_alias_remove_device(struct dasd_device *device)
633{
Sebastian Ott543691a42016-03-04 10:34:05 +0100634 struct dasd_eckd_private *private = device->private;
635 struct alias_lcu *lcu = private->lcu;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100636 unsigned long flags;
637
Stefan Haberlandf602f6d62011-01-31 11:30:03 +0100638 /* nothing to do if already removed */
639 if (!lcu)
640 return 0;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100641 spin_lock_irqsave(&lcu->lock, flags);
642 _remove_device_from_lcu(lcu, device);
643 spin_unlock_irqrestore(&lcu->lock, flags);
644 return 0;
645}
646
647struct dasd_device *dasd_alias_get_start_dev(struct dasd_device *base_device)
648{
Sebastian Ott543691a42016-03-04 10:34:05 +0100649 struct dasd_eckd_private *alias_priv, *private = base_device->private;
650 struct alias_pav_group *group = private->pavgroup;
651 struct alias_lcu *lcu = private->lcu;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100652 struct dasd_device *alias_device;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100653 unsigned long flags;
654
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100655 if (!group || !lcu)
656 return NULL;
657 if (lcu->pav == NO_PAV ||
658 lcu->flags & (NEED_UAC_UPDATE | UPDATE_PENDING))
659 return NULL;
Stefan Haberlandb38f27e2011-12-27 11:27:28 +0100660 if (unlikely(!(private->features.feature[8] & 0x01))) {
661 /*
662 * PAV enabled but prefix not, very unlikely
663 * seems to be a lost pathgroup
664 * use base device to do IO
665 */
666 DBF_DEV_EVENT(DBF_ERR, base_device, "%s",
667 "Prefix not enabled with PAV enabled\n");
668 return NULL;
669 }
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100670
671 spin_lock_irqsave(&lcu->lock, flags);
672 alias_device = group->next;
673 if (!alias_device) {
674 if (list_empty(&group->aliaslist)) {
675 spin_unlock_irqrestore(&lcu->lock, flags);
676 return NULL;
677 } else {
678 alias_device = list_first_entry(&group->aliaslist,
679 struct dasd_device,
680 alias_list);
681 }
682 }
683 if (list_is_last(&alias_device->alias_list, &group->aliaslist))
684 group->next = list_first_entry(&group->aliaslist,
685 struct dasd_device, alias_list);
686 else
687 group->next = list_first_entry(&alias_device->alias_list,
688 struct dasd_device, alias_list);
689 spin_unlock_irqrestore(&lcu->lock, flags);
Sebastian Ott543691a42016-03-04 10:34:05 +0100690 alias_priv = alias_device->private;
Stefan Haberlandf81a49d2015-07-10 10:47:09 +0200691 if ((alias_priv->count < private->count) && !alias_device->stopped &&
692 !test_bit(DASD_FLAG_OFFLINE, &alias_device->flags))
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100693 return alias_device;
694 else
695 return NULL;
696}
697
698/*
699 * Summary unit check handling depends on the way alias devices
700 * are handled so it is done here rather then in dasd_eckd.c
701 */
702static int reset_summary_unit_check(struct alias_lcu *lcu,
703 struct dasd_device *device,
704 char reason)
705{
706 struct dasd_ccw_req *cqr;
707 int rc = 0;
Stefan Weinhuberf3eb5382009-03-26 15:23:48 +0100708 struct ccw1 *ccw;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100709
710 cqr = lcu->rsu_cqr;
711 strncpy((char *) &cqr->magic, "ECKD", 4);
712 ASCEBC((char *) &cqr->magic, 4);
Stefan Weinhuberf3eb5382009-03-26 15:23:48 +0100713 ccw = cqr->cpaddr;
714 ccw->cmd_code = DASD_ECKD_CCW_RSCK;
Stefan Haberland020bf042015-12-15 10:16:43 +0100715 ccw->flags = CCW_FLAG_SLI;
Stefan Weinhuberf3eb5382009-03-26 15:23:48 +0100716 ccw->count = 16;
717 ccw->cda = (__u32)(addr_t) cqr->data;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100718 ((char *)cqr->data)[0] = reason;
719
720 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
721 cqr->retries = 255; /* set retry counter to enable basic ERP */
722 cqr->startdev = device;
723 cqr->memdev = device;
724 cqr->block = NULL;
725 cqr->expires = 5 * HZ;
Heiko Carstens1aae0562013-01-30 09:49:40 +0100726 cqr->buildclk = get_tod_clock();
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100727 cqr->status = DASD_CQR_FILLED;
728
729 rc = dasd_sleep_on_immediatly(cqr);
730 return rc;
731}
732
733static void _restart_all_base_devices_on_lcu(struct alias_lcu *lcu)
734{
735 struct alias_pav_group *pavgroup;
736 struct dasd_device *device;
737 struct dasd_eckd_private *private;
738
739 /* active and inactive list can contain alias as well as base devices */
740 list_for_each_entry(device, &lcu->active_devices, alias_list) {
Sebastian Ott543691a42016-03-04 10:34:05 +0100741 private = device->private;
Stefan Haberland9bfefde2015-12-15 11:00:51 +0100742 if (private->uid.type != UA_BASE_DEVICE)
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100743 continue;
744 dasd_schedule_block_bh(device->block);
745 dasd_schedule_device_bh(device);
746 }
747 list_for_each_entry(device, &lcu->inactive_devices, alias_list) {
Sebastian Ott543691a42016-03-04 10:34:05 +0100748 private = device->private;
Stefan Haberland9bfefde2015-12-15 11:00:51 +0100749 if (private->uid.type != UA_BASE_DEVICE)
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100750 continue;
751 dasd_schedule_block_bh(device->block);
752 dasd_schedule_device_bh(device);
753 }
754 list_for_each_entry(pavgroup, &lcu->grouplist, group) {
755 list_for_each_entry(device, &pavgroup->baselist, alias_list) {
756 dasd_schedule_block_bh(device->block);
757 dasd_schedule_device_bh(device);
758 }
759 }
760}
761
762static void flush_all_alias_devices_on_lcu(struct alias_lcu *lcu)
763{
764 struct alias_pav_group *pavgroup;
765 struct dasd_device *device, *temp;
766 struct dasd_eckd_private *private;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100767 unsigned long flags;
768 LIST_HEAD(active);
769
770 /*
771 * Problem here ist that dasd_flush_device_queue may wait
772 * for termination of a request to complete. We can't keep
773 * the lcu lock during that time, so we must assume that
774 * the lists may have changed.
775 * Idea: first gather all active alias devices in a separate list,
776 * then flush the first element of this list unlocked, and afterwards
777 * check if it is still on the list before moving it to the
778 * active_devices list.
779 */
780
781 spin_lock_irqsave(&lcu->lock, flags);
782 list_for_each_entry_safe(device, temp, &lcu->active_devices,
783 alias_list) {
Sebastian Ott543691a42016-03-04 10:34:05 +0100784 private = device->private;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100785 if (private->uid.type == UA_BASE_DEVICE)
786 continue;
787 list_move(&device->alias_list, &active);
788 }
789
790 list_for_each_entry(pavgroup, &lcu->grouplist, group) {
791 list_splice_init(&pavgroup->aliaslist, &active);
792 }
793 while (!list_empty(&active)) {
794 device = list_first_entry(&active, struct dasd_device,
795 alias_list);
796 spin_unlock_irqrestore(&lcu->lock, flags);
Sebastian Ott4bca6982017-06-26 19:26:55 +0200797 dasd_flush_device_queue(device);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100798 spin_lock_irqsave(&lcu->lock, flags);
799 /*
800 * only move device around if it wasn't moved away while we
801 * were waiting for the flush
802 */
803 if (device == list_first_entry(&active,
Stefan Haberland6933c352015-10-14 11:01:05 +0200804 struct dasd_device, alias_list)) {
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100805 list_move(&device->alias_list, &lcu->active_devices);
Sebastian Ott543691a42016-03-04 10:34:05 +0100806 private = device->private;
Stefan Haberland6933c352015-10-14 11:01:05 +0200807 private->pavgroup = NULL;
808 }
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100809 }
810 spin_unlock_irqrestore(&lcu->lock, flags);
811}
812
Stefan Haberland9bfefde2015-12-15 11:00:51 +0100813static void _stop_all_devices_on_lcu(struct alias_lcu *lcu)
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100814{
815 struct alias_pav_group *pavgroup;
Stefan Haberland9bfefde2015-12-15 11:00:51 +0100816 struct dasd_device *device;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100817
Stefan Haberland59a9ed52016-02-23 10:15:27 +0100818 list_for_each_entry(device, &lcu->active_devices, alias_list) {
819 spin_lock(get_ccwdev_lock(device->cdev));
Stefan Haberland9bfefde2015-12-15 11:00:51 +0100820 dasd_device_set_stop_bits(device, DASD_STOPPED_SU);
Stefan Haberland59a9ed52016-02-23 10:15:27 +0100821 spin_unlock(get_ccwdev_lock(device->cdev));
822 }
823 list_for_each_entry(device, &lcu->inactive_devices, alias_list) {
824 spin_lock(get_ccwdev_lock(device->cdev));
Stefan Haberland9bfefde2015-12-15 11:00:51 +0100825 dasd_device_set_stop_bits(device, DASD_STOPPED_SU);
Stefan Haberland59a9ed52016-02-23 10:15:27 +0100826 spin_unlock(get_ccwdev_lock(device->cdev));
827 }
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100828 list_for_each_entry(pavgroup, &lcu->grouplist, group) {
Stefan Haberland59a9ed52016-02-23 10:15:27 +0100829 list_for_each_entry(device, &pavgroup->baselist, alias_list) {
830 spin_lock(get_ccwdev_lock(device->cdev));
Stefan Haberland9bfefde2015-12-15 11:00:51 +0100831 dasd_device_set_stop_bits(device, DASD_STOPPED_SU);
Stefan Haberland59a9ed52016-02-23 10:15:27 +0100832 spin_unlock(get_ccwdev_lock(device->cdev));
833 }
834 list_for_each_entry(device, &pavgroup->aliaslist, alias_list) {
835 spin_lock(get_ccwdev_lock(device->cdev));
Stefan Haberland9bfefde2015-12-15 11:00:51 +0100836 dasd_device_set_stop_bits(device, DASD_STOPPED_SU);
Stefan Haberland59a9ed52016-02-23 10:15:27 +0100837 spin_unlock(get_ccwdev_lock(device->cdev));
838 }
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100839 }
840}
841
842static void _unstop_all_devices_on_lcu(struct alias_lcu *lcu)
843{
844 struct alias_pav_group *pavgroup;
845 struct dasd_device *device;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100846
Stefan Haberland59a9ed52016-02-23 10:15:27 +0100847 list_for_each_entry(device, &lcu->active_devices, alias_list) {
848 spin_lock(get_ccwdev_lock(device->cdev));
Stefan Weinhubereb6e1992009-12-07 12:51:51 +0100849 dasd_device_remove_stop_bits(device, DASD_STOPPED_SU);
Stefan Haberland59a9ed52016-02-23 10:15:27 +0100850 spin_unlock(get_ccwdev_lock(device->cdev));
851 }
852 list_for_each_entry(device, &lcu->inactive_devices, alias_list) {
853 spin_lock(get_ccwdev_lock(device->cdev));
Stefan Weinhubereb6e1992009-12-07 12:51:51 +0100854 dasd_device_remove_stop_bits(device, DASD_STOPPED_SU);
Stefan Haberland59a9ed52016-02-23 10:15:27 +0100855 spin_unlock(get_ccwdev_lock(device->cdev));
856 }
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100857 list_for_each_entry(pavgroup, &lcu->grouplist, group) {
Stefan Haberland59a9ed52016-02-23 10:15:27 +0100858 list_for_each_entry(device, &pavgroup->baselist, alias_list) {
859 spin_lock(get_ccwdev_lock(device->cdev));
Stefan Weinhubereb6e1992009-12-07 12:51:51 +0100860 dasd_device_remove_stop_bits(device, DASD_STOPPED_SU);
Stefan Haberland59a9ed52016-02-23 10:15:27 +0100861 spin_unlock(get_ccwdev_lock(device->cdev));
862 }
863 list_for_each_entry(device, &pavgroup->aliaslist, alias_list) {
864 spin_lock(get_ccwdev_lock(device->cdev));
Stefan Weinhubereb6e1992009-12-07 12:51:51 +0100865 dasd_device_remove_stop_bits(device, DASD_STOPPED_SU);
Stefan Haberland59a9ed52016-02-23 10:15:27 +0100866 spin_unlock(get_ccwdev_lock(device->cdev));
867 }
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100868 }
869}
870
871static void summary_unit_check_handling_work(struct work_struct *work)
872{
873 struct alias_lcu *lcu;
874 struct summary_unit_check_work_data *suc_data;
875 unsigned long flags;
876 struct dasd_device *device;
877
878 suc_data = container_of(work, struct summary_unit_check_work_data,
879 worker);
880 lcu = container_of(suc_data, struct alias_lcu, suc_data);
881 device = suc_data->device;
882
883 /* 1. flush alias devices */
884 flush_all_alias_devices_on_lcu(lcu);
885
886 /* 2. reset summary unit check */
887 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
Stefan Weinhubereb6e1992009-12-07 12:51:51 +0100888 dasd_device_remove_stop_bits(device,
889 (DASD_STOPPED_SU | DASD_STOPPED_PENDING));
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100890 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
891 reset_summary_unit_check(lcu, device, suc_data->reason);
892
Stefan Haberland59a9ed52016-02-23 10:15:27 +0100893 spin_lock_irqsave(&lcu->lock, flags);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100894 _unstop_all_devices_on_lcu(lcu);
895 _restart_all_base_devices_on_lcu(lcu);
896 /* 3. read new alias configuration */
897 _schedule_lcu_update(lcu, device);
898 lcu->suc_data.device = NULL;
Stefan Haberland9d862ab2015-12-15 10:45:05 +0100899 dasd_put_device(device);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100900 spin_unlock_irqrestore(&lcu->lock, flags);
901}
902
Stefan Haberland59a9ed52016-02-23 10:15:27 +0100903void dasd_alias_handle_summary_unit_check(struct work_struct *work)
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100904{
Stefan Haberland59a9ed52016-02-23 10:15:27 +0100905 struct dasd_device *device = container_of(work, struct dasd_device,
906 suc_work);
Sebastian Ott543691a42016-03-04 10:34:05 +0100907 struct dasd_eckd_private *private = device->private;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100908 struct alias_lcu *lcu;
Stefan Haberland59a9ed52016-02-23 10:15:27 +0100909 unsigned long flags;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100910
911 lcu = private->lcu;
912 if (!lcu) {
Stefan Haberlandfc19f382009-03-26 15:23:49 +0100913 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100914 "device not ready to handle summary"
915 " unit check (no lcu structure)");
Stefan Haberland59a9ed52016-02-23 10:15:27 +0100916 goto out;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100917 }
Stefan Haberland59a9ed52016-02-23 10:15:27 +0100918 spin_lock_irqsave(&lcu->lock, flags);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100919 /* If this device is about to be removed just return and wait for
920 * the next interrupt on a different device
921 */
922 if (list_empty(&device->alias_list)) {
Stefan Haberlandfc19f382009-03-26 15:23:49 +0100923 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100924 "device is in offline processing,"
925 " don't do summary unit check handling");
Stefan Haberland59a9ed52016-02-23 10:15:27 +0100926 goto out_unlock;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100927 }
928 if (lcu->suc_data.device) {
929 /* already scheduled or running */
Stefan Haberlandfc19f382009-03-26 15:23:49 +0100930 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100931 "previous instance of summary unit check worker"
932 " still pending");
Stefan Haberland59a9ed52016-02-23 10:15:27 +0100933 goto out_unlock;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100934 }
Stefan Haberland9bfefde2015-12-15 11:00:51 +0100935 _stop_all_devices_on_lcu(lcu);
936 /* prepare for lcu_update */
Stefan Haberland59a9ed52016-02-23 10:15:27 +0100937 lcu->flags |= NEED_UAC_UPDATE | UPDATE_PENDING;
938 lcu->suc_data.reason = private->suc_reason;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100939 lcu->suc_data.device = device;
Stefan Haberland9d862ab2015-12-15 10:45:05 +0100940 dasd_get_device(device);
Stefan Haberland9d862ab2015-12-15 10:45:05 +0100941 if (!schedule_work(&lcu->suc_data.worker))
942 dasd_put_device(device);
Stefan Haberland59a9ed52016-02-23 10:15:27 +0100943out_unlock:
944 spin_unlock_irqrestore(&lcu->lock, flags);
945out:
946 clear_bit(DASD_FLAG_SUC, &device->flags);
947 dasd_put_device(device);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100948};