blob: 986bbbc23d0a4b61298ab1885ba08340d44e940c [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
Michael Holzheu62b749422009-06-16 10:30:40 +02003 * core function to access sclp interface
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 *
Michael Holzheu62b749422009-06-16 10:30:40 +02005 * Copyright IBM Corp. 1999, 2009
6 *
7 * Author(s): Martin Peschke <mpeschke@de.ibm.com>
8 * Martin Schwidefsky <schwidefsky@de.ibm.com>
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 */
10
Heiko Carstens052ff462011-01-05 12:47:28 +010011#include <linux/kernel_stat.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070012#include <linux/module.h>
13#include <linux/err.h>
14#include <linux/spinlock.h>
15#include <linux/interrupt.h>
16#include <linux/timer.h>
17#include <linux/reboot.h>
18#include <linux/jiffies.h>
Peter Oberparleiterb3d00c32007-04-27 16:01:51 +020019#include <linux/init.h>
Michael Holzheu62b749422009-06-16 10:30:40 +020020#include <linux/suspend.h>
21#include <linux/completion.h>
22#include <linux/platform_device.h>
Heiko Carstens052ff462011-01-05 12:47:28 +010023#include <asm/types.h>
24#include <asm/irq.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070025
26#include "sclp.h"
27
28#define SCLP_HEADER "sclp: "
29
Linus Torvalds1da177e2005-04-16 15:20:36 -070030/* Lock to protect internal data consistency. */
31static DEFINE_SPINLOCK(sclp_lock);
32
Peter Oberparleiterd082d3c2008-02-19 15:29:32 +010033/* Mask of events that we can send to the sclp interface. */
Linus Torvalds1da177e2005-04-16 15:20:36 -070034static sccb_mask_t sclp_receive_mask;
35
Peter Oberparleiterd082d3c2008-02-19 15:29:32 +010036/* Mask of events that we can receive from the sclp interface. */
Linus Torvalds1da177e2005-04-16 15:20:36 -070037static sccb_mask_t sclp_send_mask;
38
39/* List of registered event listeners and senders. */
Vineeth Vijayan8bc00c02021-03-31 15:43:40 +020040static LIST_HEAD(sclp_reg_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -070041
42/* List of queued requests. */
Vineeth Vijayan8bc00c02021-03-31 15:43:40 +020043static LIST_HEAD(sclp_req_queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -070044
45/* Data for read and and init requests. */
46static struct sclp_req sclp_read_req;
47static struct sclp_req sclp_init_req;
Gerald Schaefer087c4d72019-04-08 12:49:58 +020048static void *sclp_read_sccb;
49static struct init_sccb *sclp_init_sccb;
Linus Torvalds1da177e2005-04-16 15:20:36 -070050
Michael Holzheu62b749422009-06-16 10:30:40 +020051/* Suspend request */
52static DECLARE_COMPLETION(sclp_request_queue_flushed);
53
Martin Schwidefsky25b41a72013-05-24 12:30:03 +020054/* Number of console pages to allocate, used by sclp_con.c and sclp_vt220.c */
55int sclp_console_pages = SCLP_CONSOLE_PAGES;
56/* Flag to indicate if buffer pages are dropped on buffer full condition */
Peter Oberparleiter00fd2cb2015-07-02 09:06:55 +020057int sclp_console_drop = 1;
Martin Schwidefsky25b41a72013-05-24 12:30:03 +020058/* Number of times the console dropped buffer pages */
59unsigned long sclp_console_full;
60
Michael Holzheu62b749422009-06-16 10:30:40 +020061static void sclp_suspend_req_cb(struct sclp_req *req, void *data)
62{
63 complete(&sclp_request_queue_flushed);
64}
65
Martin Schwidefsky25b41a72013-05-24 12:30:03 +020066static int __init sclp_setup_console_pages(char *str)
67{
68 int pages, rc;
69
70 rc = kstrtoint(str, 0, &pages);
71 if (!rc && pages >= SCLP_CONSOLE_PAGES)
72 sclp_console_pages = pages;
73 return 1;
74}
75
76__setup("sclp_con_pages=", sclp_setup_console_pages);
77
78static int __init sclp_setup_console_drop(char *str)
79{
80 int drop, rc;
81
82 rc = kstrtoint(str, 0, &drop);
Peter Oberparleiter00fd2cb2015-07-02 09:06:55 +020083 if (!rc)
84 sclp_console_drop = drop;
Martin Schwidefsky25b41a72013-05-24 12:30:03 +020085 return 1;
86}
87
88__setup("sclp_con_drop=", sclp_setup_console_drop);
89
Michael Holzheu62b749422009-06-16 10:30:40 +020090static struct sclp_req sclp_suspend_req;
91
Linus Torvalds1da177e2005-04-16 15:20:36 -070092/* Timer for request retries. */
93static struct timer_list sclp_request_timer;
94
Gerald Schaefer9f0128f2014-03-31 16:18:29 +020095/* Timer for queued requests. */
96static struct timer_list sclp_queue_timer;
97
Linus Torvalds1da177e2005-04-16 15:20:36 -070098/* Internal state: is a request active at the sclp? */
99static volatile enum sclp_running_state_t {
100 sclp_running_state_idle,
Peter Oberparleiterdbd8ae62007-02-05 21:17:00 +0100101 sclp_running_state_running,
102 sclp_running_state_reset_pending
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103} sclp_running_state = sclp_running_state_idle;
104
105/* Internal state: is a read request pending? */
106static volatile enum sclp_reading_state_t {
107 sclp_reading_state_idle,
108 sclp_reading_state_reading
109} sclp_reading_state = sclp_reading_state_idle;
110
111/* Internal state: is the driver currently serving requests? */
112static volatile enum sclp_activation_state_t {
113 sclp_activation_state_active,
114 sclp_activation_state_deactivating,
115 sclp_activation_state_inactive,
116 sclp_activation_state_activating
117} sclp_activation_state = sclp_activation_state_active;
118
119/* Internal state: is an init mask request pending? */
120static volatile enum sclp_mask_state_t {
121 sclp_mask_state_idle,
122 sclp_mask_state_initializing
123} sclp_mask_state = sclp_mask_state_idle;
124
Michael Holzheu62b749422009-06-16 10:30:40 +0200125/* Internal state: is the driver suspended? */
126static enum sclp_suspend_state_t {
127 sclp_suspend_state_running,
128 sclp_suspend_state_suspended,
129} sclp_suspend_state = sclp_suspend_state_running;
130
Linus Torvalds1da177e2005-04-16 15:20:36 -0700131/* Maximum retry counts */
132#define SCLP_INIT_RETRY 3
133#define SCLP_MASK_RETRY 3
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134
135/* Timeout intervals in seconds.*/
Peter Oberparleiter25fab9e2006-02-11 17:55:59 -0800136#define SCLP_BUSY_INTERVAL 10
Peter Oberparleiterdbd8ae62007-02-05 21:17:00 +0100137#define SCLP_RETRY_INTERVAL 30
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138
Kees Cookc9602ee2017-10-16 16:44:30 -0700139static void sclp_request_timeout(bool force_restart);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700140static void sclp_process_queue(void);
Heiko Carstens364c8552007-10-12 16:11:35 +0200141static void __sclp_make_read_req(void);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142static int sclp_init_mask(int calculate);
143static int sclp_init(void);
144
Peter Oberparleiterdbd8ae62007-02-05 21:17:00 +0100145static void
146__sclp_queue_read_req(void)
147{
148 if (sclp_reading_state == sclp_reading_state_idle) {
149 sclp_reading_state = sclp_reading_state_reading;
150 __sclp_make_read_req();
151 /* Add request to head of queue */
152 list_add(&sclp_read_req.list, &sclp_req_queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700154}
155
156/* Set up request retry timer. Called while sclp_lock is locked. */
157static inline void
Kees Cookc9602ee2017-10-16 16:44:30 -0700158__sclp_set_request_timer(unsigned long time, void (*cb)(struct timer_list *))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159{
160 del_timer(&sclp_request_timer);
Kees Cook841b86f2017-10-23 09:40:42 +0200161 sclp_request_timer.function = cb;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162 sclp_request_timer.expires = jiffies + time;
163 add_timer(&sclp_request_timer);
164}
165
Kees Cookc9602ee2017-10-16 16:44:30 -0700166static void sclp_request_timeout_restart(struct timer_list *unused)
167{
168 sclp_request_timeout(true);
169}
170
171static void sclp_request_timeout_normal(struct timer_list *unused)
172{
173 sclp_request_timeout(false);
174}
175
176/* Request timeout handler. Restart the request queue. If force_restart,
Peter Oberparleiterdbd8ae62007-02-05 21:17:00 +0100177 * force restart of running request. */
Kees Cookc9602ee2017-10-16 16:44:30 -0700178static void sclp_request_timeout(bool force_restart)
Peter Oberparleiterdbd8ae62007-02-05 21:17:00 +0100179{
180 unsigned long flags;
181
182 spin_lock_irqsave(&sclp_lock, flags);
Kees Cookc9602ee2017-10-16 16:44:30 -0700183 if (force_restart) {
Peter Oberparleiterdbd8ae62007-02-05 21:17:00 +0100184 if (sclp_running_state == sclp_running_state_running) {
185 /* Break running state and queue NOP read event request
186 * to get a defined interface state. */
187 __sclp_queue_read_req();
188 sclp_running_state = sclp_running_state_idle;
189 }
190 } else {
191 __sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ,
Kees Cookc9602ee2017-10-16 16:44:30 -0700192 sclp_request_timeout_normal);
Peter Oberparleiterdbd8ae62007-02-05 21:17:00 +0100193 }
194 spin_unlock_irqrestore(&sclp_lock, flags);
195 sclp_process_queue();
196}
197
Gerald Schaefer9f0128f2014-03-31 16:18:29 +0200198/*
199 * Returns the expire value in jiffies of the next pending request timeout,
200 * if any. Needs to be called with sclp_lock.
201 */
202static unsigned long __sclp_req_queue_find_next_timeout(void)
203{
204 unsigned long expires_next = 0;
205 struct sclp_req *req;
206
207 list_for_each_entry(req, &sclp_req_queue, list) {
208 if (!req->queue_expires)
209 continue;
210 if (!expires_next ||
211 (time_before(req->queue_expires, expires_next)))
212 expires_next = req->queue_expires;
213 }
214 return expires_next;
215}
216
217/*
218 * Returns expired request, if any, and removes it from the list.
219 */
220static struct sclp_req *__sclp_req_queue_remove_expired_req(void)
221{
222 unsigned long flags, now;
223 struct sclp_req *req;
224
225 spin_lock_irqsave(&sclp_lock, flags);
226 now = jiffies;
227 /* Don't need list_for_each_safe because we break out after list_del */
228 list_for_each_entry(req, &sclp_req_queue, list) {
229 if (!req->queue_expires)
230 continue;
231 if (time_before_eq(req->queue_expires, now)) {
232 if (req->status == SCLP_REQ_QUEUED) {
233 req->status = SCLP_REQ_QUEUED_TIMEOUT;
234 list_del(&req->list);
235 goto out;
236 }
237 }
238 }
239 req = NULL;
240out:
241 spin_unlock_irqrestore(&sclp_lock, flags);
242 return req;
243}
244
245/*
246 * Timeout handler for queued requests. Removes request from list and
247 * invokes callback. This timer can be set per request in situations where
248 * waiting too long would be harmful to the system, e.g. during SE reboot.
249 */
Kees Cookc9602ee2017-10-16 16:44:30 -0700250static void sclp_req_queue_timeout(struct timer_list *unused)
Gerald Schaefer9f0128f2014-03-31 16:18:29 +0200251{
252 unsigned long flags, expires_next;
253 struct sclp_req *req;
254
255 do {
256 req = __sclp_req_queue_remove_expired_req();
257 if (req && req->callback)
258 req->callback(req, req->callback_data);
259 } while (req);
260
261 spin_lock_irqsave(&sclp_lock, flags);
262 expires_next = __sclp_req_queue_find_next_timeout();
263 if (expires_next)
264 mod_timer(&sclp_queue_timer, expires_next);
265 spin_unlock_irqrestore(&sclp_lock, flags);
266}
267
Linus Torvalds1da177e2005-04-16 15:20:36 -0700268/* Try to start a request. Return zero if the request was successfully
269 * started or if it will be started at a later time. Return non-zero otherwise.
270 * Called while sclp_lock is locked. */
271static int
272__sclp_start_request(struct sclp_req *req)
273{
274 int rc;
275
276 if (sclp_running_state != sclp_running_state_idle)
277 return 0;
278 del_timer(&sclp_request_timer);
Heiko Carstensab14de62007-02-05 21:18:37 +0100279 rc = sclp_service_call(req->command, req->sccb);
Peter Oberparleiter25fab9e2006-02-11 17:55:59 -0800280 req->start_count++;
281
Linus Torvalds1da177e2005-04-16 15:20:36 -0700282 if (rc == 0) {
Daniel Mack3ad2f3fb2010-02-03 08:01:28 +0800283 /* Successfully started request */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700284 req->status = SCLP_REQ_RUNNING;
285 sclp_running_state = sclp_running_state_running;
286 __sclp_set_request_timer(SCLP_RETRY_INTERVAL * HZ,
Kees Cookc9602ee2017-10-16 16:44:30 -0700287 sclp_request_timeout_restart);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700288 return 0;
289 } else if (rc == -EBUSY) {
290 /* Try again later */
291 __sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ,
Kees Cookc9602ee2017-10-16 16:44:30 -0700292 sclp_request_timeout_normal);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700293 return 0;
294 }
295 /* Request failed */
296 req->status = SCLP_REQ_FAILED;
297 return rc;
298}
299
300/* Try to start queued requests. */
301static void
302sclp_process_queue(void)
303{
304 struct sclp_req *req;
305 int rc;
306 unsigned long flags;
307
308 spin_lock_irqsave(&sclp_lock, flags);
309 if (sclp_running_state != sclp_running_state_idle) {
310 spin_unlock_irqrestore(&sclp_lock, flags);
311 return;
312 }
313 del_timer(&sclp_request_timer);
314 while (!list_empty(&sclp_req_queue)) {
315 req = list_entry(sclp_req_queue.next, struct sclp_req, list);
Michael Holzheu62b749422009-06-16 10:30:40 +0200316 if (!req->sccb)
317 goto do_post;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700318 rc = __sclp_start_request(req);
319 if (rc == 0)
320 break;
Peter Oberparleiterdbd8ae62007-02-05 21:17:00 +0100321 /* Request failed */
322 if (req->start_count > 1) {
323 /* Cannot abort already submitted request - could still
324 * be active at the SCLP */
325 __sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ,
Kees Cookc9602ee2017-10-16 16:44:30 -0700326 sclp_request_timeout_normal);
Peter Oberparleiterdbd8ae62007-02-05 21:17:00 +0100327 break;
328 }
Michael Holzheu62b749422009-06-16 10:30:40 +0200329do_post:
Peter Oberparleiterdbd8ae62007-02-05 21:17:00 +0100330 /* Post-processing for aborted request */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700331 list_del(&req->list);
332 if (req->callback) {
333 spin_unlock_irqrestore(&sclp_lock, flags);
334 req->callback(req, req->callback_data);
335 spin_lock_irqsave(&sclp_lock, flags);
336 }
337 }
338 spin_unlock_irqrestore(&sclp_lock, flags);
339}
340
Michael Holzheu62b749422009-06-16 10:30:40 +0200341static int __sclp_can_add_request(struct sclp_req *req)
342{
343 if (req == &sclp_suspend_req || req == &sclp_init_req)
344 return 1;
345 if (sclp_suspend_state != sclp_suspend_state_running)
346 return 0;
347 if (sclp_init_state != sclp_init_state_initialized)
348 return 0;
349 if (sclp_activation_state != sclp_activation_state_active)
350 return 0;
351 return 1;
352}
353
Linus Torvalds1da177e2005-04-16 15:20:36 -0700354/* Queue a new request. Return zero on success, non-zero otherwise. */
355int
356sclp_add_request(struct sclp_req *req)
357{
358 unsigned long flags;
359 int rc;
360
361 spin_lock_irqsave(&sclp_lock, flags);
Michael Holzheu62b749422009-06-16 10:30:40 +0200362 if (!__sclp_can_add_request(req)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700363 spin_unlock_irqrestore(&sclp_lock, flags);
364 return -EIO;
365 }
366 req->status = SCLP_REQ_QUEUED;
367 req->start_count = 0;
368 list_add_tail(&req->list, &sclp_req_queue);
369 rc = 0;
Gerald Schaefer9f0128f2014-03-31 16:18:29 +0200370 if (req->queue_timeout) {
371 req->queue_expires = jiffies + req->queue_timeout * HZ;
372 if (!timer_pending(&sclp_queue_timer) ||
373 time_after(sclp_queue_timer.expires, req->queue_expires))
374 mod_timer(&sclp_queue_timer, req->queue_expires);
375 } else
376 req->queue_expires = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700377 /* Start if request is first in list */
Peter Oberparleiterdbd8ae62007-02-05 21:17:00 +0100378 if (sclp_running_state == sclp_running_state_idle &&
379 req->list.prev == &sclp_req_queue) {
Michael Holzheu62b749422009-06-16 10:30:40 +0200380 if (!req->sccb) {
381 list_del(&req->list);
382 rc = -ENODATA;
383 goto out;
384 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700385 rc = __sclp_start_request(req);
386 if (rc)
387 list_del(&req->list);
388 }
Michael Holzheu62b749422009-06-16 10:30:40 +0200389out:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700390 spin_unlock_irqrestore(&sclp_lock, flags);
391 return rc;
392}
393
394EXPORT_SYMBOL(sclp_add_request);
395
396/* Dispatch events found in request buffer to registered listeners. Return 0
397 * if all events were dispatched, non-zero otherwise. */
398static int
399sclp_dispatch_evbufs(struct sccb_header *sccb)
400{
401 unsigned long flags;
402 struct evbuf_header *evbuf;
403 struct list_head *l;
404 struct sclp_register *reg;
405 int offset;
406 int rc;
407
408 spin_lock_irqsave(&sclp_lock, flags);
409 rc = 0;
410 for (offset = sizeof(struct sccb_header); offset < sccb->length;
411 offset += evbuf->length) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700412 evbuf = (struct evbuf_header *) ((addr_t) sccb + offset);
Peter Oberparleitere2e5a0f2009-02-19 15:18:59 +0100413 /* Check for malformed hardware response */
414 if (evbuf->length == 0)
415 break;
416 /* Search for event handler */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700417 reg = NULL;
418 list_for_each(l, &sclp_reg_list) {
419 reg = list_entry(l, struct sclp_register, list);
Claudio Imbrenda0ee5f8d2018-01-23 15:07:09 +0100420 if (reg->receive_mask & SCLP_EVTYP_MASK(evbuf->type))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700421 break;
422 else
423 reg = NULL;
424 }
425 if (reg && reg->receiver_fn) {
426 spin_unlock_irqrestore(&sclp_lock, flags);
427 reg->receiver_fn(evbuf);
428 spin_lock_irqsave(&sclp_lock, flags);
429 } else if (reg == NULL)
Heiko Carstensd06cbda2012-09-06 15:00:07 +0200430 rc = -EOPNOTSUPP;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700431 }
432 spin_unlock_irqrestore(&sclp_lock, flags);
433 return rc;
434}
435
436/* Read event data request callback. */
437static void
438sclp_read_cb(struct sclp_req *req, void *data)
439{
440 unsigned long flags;
441 struct sccb_header *sccb;
442
443 sccb = (struct sccb_header *) req->sccb;
444 if (req->status == SCLP_REQ_DONE && (sccb->response_code == 0x20 ||
445 sccb->response_code == 0x220))
446 sclp_dispatch_evbufs(sccb);
447 spin_lock_irqsave(&sclp_lock, flags);
448 sclp_reading_state = sclp_reading_state_idle;
449 spin_unlock_irqrestore(&sclp_lock, flags);
450}
451
452/* Prepare read event data request. Called while sclp_lock is locked. */
Heiko Carstens364c8552007-10-12 16:11:35 +0200453static void __sclp_make_read_req(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700454{
455 struct sccb_header *sccb;
456
457 sccb = (struct sccb_header *) sclp_read_sccb;
458 clear_page(sccb);
459 memset(&sclp_read_req, 0, sizeof(struct sclp_req));
Heiko Carstensab14de62007-02-05 21:18:37 +0100460 sclp_read_req.command = SCLP_CMDW_READ_EVENT_DATA;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700461 sclp_read_req.status = SCLP_REQ_QUEUED;
462 sclp_read_req.start_count = 0;
463 sclp_read_req.callback = sclp_read_cb;
464 sclp_read_req.sccb = sccb;
465 sccb->length = PAGE_SIZE;
466 sccb->function_code = 0;
467 sccb->control_mask[2] = 0x80;
468}
469
470/* Search request list for request with matching sccb. Return request if found,
471 * NULL otherwise. Called while sclp_lock is locked. */
472static inline struct sclp_req *
473__sclp_find_req(u32 sccb)
474{
475 struct list_head *l;
476 struct sclp_req *req;
477
478 list_for_each(l, &sclp_req_queue) {
479 req = list_entry(l, struct sclp_req, list);
480 if (sccb == (u32) (addr_t) req->sccb)
481 return req;
482 }
483 return NULL;
484}
485
486/* Handler for external interruption. Perform request post-processing.
487 * Prepare read event data request if necessary. Start processing of next
488 * request on queue. */
Heiko Carstensfde15c32012-03-11 11:59:31 -0400489static void sclp_interrupt_handler(struct ext_code ext_code,
Martin Schwidefskyf6649a72010-10-25 16:10:38 +0200490 unsigned int param32, unsigned long param64)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700491{
492 struct sclp_req *req;
493 u32 finished_sccb;
494 u32 evbuf_pending;
495
Heiko Carstens420f42e2013-01-02 15:18:18 +0100496 inc_irq_stat(IRQEXT_SCP);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700497 spin_lock(&sclp_lock);
Martin Schwidefskyf6649a72010-10-25 16:10:38 +0200498 finished_sccb = param32 & 0xfffffff8;
499 evbuf_pending = param32 & 0x3;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700500 if (finished_sccb) {
Peter Oberparleiterdbd8ae62007-02-05 21:17:00 +0100501 del_timer(&sclp_request_timer);
502 sclp_running_state = sclp_running_state_reset_pending;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700503 req = __sclp_find_req(finished_sccb);
504 if (req) {
505 /* Request post-processing */
506 list_del(&req->list);
507 req->status = SCLP_REQ_DONE;
508 if (req->callback) {
509 spin_unlock(&sclp_lock);
510 req->callback(req, req->callback_data);
511 spin_lock(&sclp_lock);
512 }
513 }
514 sclp_running_state = sclp_running_state_idle;
515 }
Peter Oberparleiterd082d3c2008-02-19 15:29:32 +0100516 if (evbuf_pending &&
Peter Oberparleiterdbd8ae62007-02-05 21:17:00 +0100517 sclp_activation_state == sclp_activation_state_active)
518 __sclp_queue_read_req();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700519 spin_unlock(&sclp_lock);
520 sclp_process_queue();
521}
522
Linus Torvalds1da177e2005-04-16 15:20:36 -0700523/* Convert interval in jiffies to TOD ticks. */
524static inline u64
525sclp_tod_from_jiffies(unsigned long jiffies)
526{
527 return (u64) (jiffies / HZ) << 32;
528}
529
530/* Wait until a currently running request finished. Note: while this function
531 * is running, no timers are served on the calling CPU. */
532void
533sclp_sync_wait(void)
534{
Heiko Carstens934b2852008-08-01 16:39:11 +0200535 unsigned long long old_tick;
Heiko Carstens1f194a42006-07-03 00:24:46 -0700536 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700537 unsigned long cr0, cr0_sync;
538 u64 timeout;
Heiko Carstensc59d7442007-02-05 21:17:16 +0100539 int irq_context;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700540
541 /* We'll be disabling timer interrupts, so we need a custom timeout
542 * mechanism */
543 timeout = 0;
544 if (timer_pending(&sclp_request_timer)) {
545 /* Get timeout TOD value */
Martin Schwidefsky8c071b02013-10-17 12:38:17 +0200546 timeout = get_tod_clock_fast() +
Linus Torvalds1da177e2005-04-16 15:20:36 -0700547 sclp_tod_from_jiffies(sclp_request_timer.expires -
548 jiffies);
549 }
Heiko Carstens1f194a42006-07-03 00:24:46 -0700550 local_irq_save(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700551 /* Prevent bottom half from executing once we force interrupts open */
Heiko Carstensc59d7442007-02-05 21:17:16 +0100552 irq_context = in_interrupt();
553 if (!irq_context)
554 local_bh_disable();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700555 /* Enable service-signal interruption, disable timer interrupts */
Heiko Carstens934b2852008-08-01 16:39:11 +0200556 old_tick = local_tick_disable();
Heiko Carstens1f194a42006-07-03 00:24:46 -0700557 trace_hardirqs_on();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700558 __ctl_store(cr0, 0, 0);
Heiko Carstensc2ab7282016-01-07 13:37:22 +0100559 cr0_sync = cr0 & ~CR0_IRQ_SUBCLASS_MASK;
560 cr0_sync |= 1UL << (63 - 54);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700561 __ctl_load(cr0_sync, 0, 0);
David Howellsdf9ee292010-10-07 14:08:55 +0100562 __arch_local_irq_stosm(0x01);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700563 /* Loop until driver state indicates finished request */
564 while (sclp_running_state != sclp_running_state_idle) {
565 /* Check for expired request timer */
566 if (timer_pending(&sclp_request_timer) &&
Martin Schwidefsky8c071b02013-10-17 12:38:17 +0200567 get_tod_clock_fast() > timeout &&
Linus Torvalds1da177e2005-04-16 15:20:36 -0700568 del_timer(&sclp_request_timer))
Kees Cook841b86f2017-10-23 09:40:42 +0200569 sclp_request_timer.function(&sclp_request_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700570 cpu_relax();
571 }
Heiko Carstens1f194a42006-07-03 00:24:46 -0700572 local_irq_disable();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700573 __ctl_load(cr0, 0, 0);
Heiko Carstensc59d7442007-02-05 21:17:16 +0100574 if (!irq_context)
575 _local_bh_enable();
Heiko Carstens934b2852008-08-01 16:39:11 +0200576 local_tick_enable(old_tick);
Heiko Carstens1f194a42006-07-03 00:24:46 -0700577 local_irq_restore(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700578}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700579EXPORT_SYMBOL(sclp_sync_wait);
580
581/* Dispatch changes in send and receive mask to registered listeners. */
Heiko Carstens4d284ca2007-02-05 21:18:53 +0100582static void
Linus Torvalds1da177e2005-04-16 15:20:36 -0700583sclp_dispatch_state_change(void)
584{
585 struct list_head *l;
586 struct sclp_register *reg;
587 unsigned long flags;
588 sccb_mask_t receive_mask;
589 sccb_mask_t send_mask;
590
591 do {
592 spin_lock_irqsave(&sclp_lock, flags);
593 reg = NULL;
594 list_for_each(l, &sclp_reg_list) {
595 reg = list_entry(l, struct sclp_register, list);
Peter Oberparleiterd082d3c2008-02-19 15:29:32 +0100596 receive_mask = reg->send_mask & sclp_receive_mask;
597 send_mask = reg->receive_mask & sclp_send_mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700598 if (reg->sclp_receive_mask != receive_mask ||
599 reg->sclp_send_mask != send_mask) {
600 reg->sclp_receive_mask = receive_mask;
601 reg->sclp_send_mask = send_mask;
602 break;
603 } else
604 reg = NULL;
605 }
606 spin_unlock_irqrestore(&sclp_lock, flags);
607 if (reg && reg->state_change_fn)
608 reg->state_change_fn(reg);
609 } while (reg);
610}
611
612struct sclp_statechangebuf {
613 struct evbuf_header header;
614 u8 validity_sclp_active_facility_mask : 1;
615 u8 validity_sclp_receive_mask : 1;
616 u8 validity_sclp_send_mask : 1;
617 u8 validity_read_data_function_mask : 1;
618 u16 _zeros : 12;
619 u16 mask_length;
620 u64 sclp_active_facility_mask;
Claudio Imbrendab8435632018-01-23 16:41:38 +0100621 u8 masks[2 * 1021 + 4]; /* variable length */
622 /*
623 * u8 sclp_receive_mask[mask_length];
624 * u8 sclp_send_mask[mask_length];
625 * u32 read_data_function_mask;
626 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700627} __attribute__((packed));
628
629
630/* State change event callback. Inform listeners of changes. */
631static void
632sclp_state_change_cb(struct evbuf_header *evbuf)
633{
634 unsigned long flags;
635 struct sclp_statechangebuf *scbuf;
636
Claudio Imbrendab8435632018-01-23 16:41:38 +0100637 BUILD_BUG_ON(sizeof(struct sclp_statechangebuf) > PAGE_SIZE);
638
Linus Torvalds1da177e2005-04-16 15:20:36 -0700639 scbuf = (struct sclp_statechangebuf *) evbuf;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700640 spin_lock_irqsave(&sclp_lock, flags);
641 if (scbuf->validity_sclp_receive_mask)
Claudio Imbrendab8435632018-01-23 16:41:38 +0100642 sclp_receive_mask = sccb_get_recv_mask(scbuf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700643 if (scbuf->validity_sclp_send_mask)
Claudio Imbrendab8435632018-01-23 16:41:38 +0100644 sclp_send_mask = sccb_get_send_mask(scbuf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700645 spin_unlock_irqrestore(&sclp_lock, flags);
Heiko Carstens887d9352008-07-14 09:57:26 +0200646 if (scbuf->validity_sclp_active_facility_mask)
David Hildenbrand78335a32015-05-06 09:17:51 +0200647 sclp.facilities = scbuf->sclp_active_facility_mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700648 sclp_dispatch_state_change();
649}
650
651static struct sclp_register sclp_state_change_event = {
Stefan Haberland6d4740c2007-04-27 16:01:53 +0200652 .receive_mask = EVTYP_STATECHANGE_MASK,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700653 .receiver_fn = sclp_state_change_cb
654};
655
656/* Calculate receive and send mask of currently registered listeners.
657 * Called while sclp_lock is locked. */
658static inline void
659__sclp_get_mask(sccb_mask_t *receive_mask, sccb_mask_t *send_mask)
660{
661 struct list_head *l;
662 struct sclp_register *t;
663
664 *receive_mask = 0;
665 *send_mask = 0;
666 list_for_each(l, &sclp_reg_list) {
667 t = list_entry(l, struct sclp_register, list);
668 *receive_mask |= t->receive_mask;
669 *send_mask |= t->send_mask;
670 }
671}
672
673/* Register event listener. Return 0 on success, non-zero otherwise. */
674int
675sclp_register(struct sclp_register *reg)
676{
677 unsigned long flags;
678 sccb_mask_t receive_mask;
679 sccb_mask_t send_mask;
680 int rc;
681
682 rc = sclp_init();
683 if (rc)
684 return rc;
685 spin_lock_irqsave(&sclp_lock, flags);
686 /* Check event mask for collisions */
687 __sclp_get_mask(&receive_mask, &send_mask);
688 if (reg->receive_mask & receive_mask || reg->send_mask & send_mask) {
689 spin_unlock_irqrestore(&sclp_lock, flags);
690 return -EBUSY;
691 }
692 /* Trigger initial state change callback */
693 reg->sclp_receive_mask = 0;
694 reg->sclp_send_mask = 0;
Michael Holzheu62b749422009-06-16 10:30:40 +0200695 reg->pm_event_posted = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700696 list_add(&reg->list, &sclp_reg_list);
697 spin_unlock_irqrestore(&sclp_lock, flags);
698 rc = sclp_init_mask(1);
699 if (rc) {
700 spin_lock_irqsave(&sclp_lock, flags);
701 list_del(&reg->list);
702 spin_unlock_irqrestore(&sclp_lock, flags);
703 }
704 return rc;
705}
706
707EXPORT_SYMBOL(sclp_register);
708
709/* Unregister event listener. */
710void
711sclp_unregister(struct sclp_register *reg)
712{
713 unsigned long flags;
714
715 spin_lock_irqsave(&sclp_lock, flags);
716 list_del(&reg->list);
717 spin_unlock_irqrestore(&sclp_lock, flags);
718 sclp_init_mask(1);
719}
720
721EXPORT_SYMBOL(sclp_unregister);
722
723/* Remove event buffers which are marked processed. Return the number of
724 * remaining event buffers. */
725int
726sclp_remove_processed(struct sccb_header *sccb)
727{
728 struct evbuf_header *evbuf;
729 int unprocessed;
730 u16 remaining;
731
732 evbuf = (struct evbuf_header *) (sccb + 1);
733 unprocessed = 0;
734 remaining = sccb->length - sizeof(struct sccb_header);
735 while (remaining > 0) {
736 remaining -= evbuf->length;
737 if (evbuf->flags & 0x80) {
738 sccb->length -= evbuf->length;
739 memcpy(evbuf, (void *) ((addr_t) evbuf + evbuf->length),
740 remaining);
741 } else {
742 unprocessed++;
743 evbuf = (struct evbuf_header *)
744 ((addr_t) evbuf + evbuf->length);
745 }
746 }
747 return unprocessed;
748}
749
750EXPORT_SYMBOL(sclp_remove_processed);
751
Linus Torvalds1da177e2005-04-16 15:20:36 -0700752/* Prepare init mask request. Called while sclp_lock is locked. */
753static inline void
Claudio Imbrenda0ee5f8d2018-01-23 15:07:09 +0100754__sclp_make_init_req(sccb_mask_t receive_mask, sccb_mask_t send_mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700755{
Gerald Schaefer087c4d72019-04-08 12:49:58 +0200756 struct init_sccb *sccb = sclp_init_sccb;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700757
Linus Torvalds1da177e2005-04-16 15:20:36 -0700758 clear_page(sccb);
759 memset(&sclp_init_req, 0, sizeof(struct sclp_req));
Heiko Carstensab14de62007-02-05 21:18:37 +0100760 sclp_init_req.command = SCLP_CMDW_WRITE_EVENT_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700761 sclp_init_req.status = SCLP_REQ_FILLED;
762 sclp_init_req.start_count = 0;
763 sclp_init_req.callback = NULL;
764 sclp_init_req.callback_data = NULL;
765 sclp_init_req.sccb = sccb;
Claudio Imbrenda0ee5f8d2018-01-23 15:07:09 +0100766 sccb->header.length = sizeof(*sccb);
Claudio Imbrenda0b0d1172018-01-23 16:50:43 +0100767 if (sclp_mask_compat_mode)
768 sccb->mask_length = SCLP_MASK_SIZE_COMPAT;
769 else
770 sccb->mask_length = sizeof(sccb_mask_t);
Claudio Imbrendab8435632018-01-23 16:41:38 +0100771 sccb_set_recv_mask(sccb, receive_mask);
772 sccb_set_send_mask(sccb, send_mask);
773 sccb_set_sclp_recv_mask(sccb, 0);
774 sccb_set_sclp_send_mask(sccb, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700775}
776
777/* Start init mask request. If calculate is non-zero, calculate the mask as
778 * requested by registered listeners. Use zero mask otherwise. Return 0 on
779 * success, non-zero otherwise. */
780static int
781sclp_init_mask(int calculate)
782{
783 unsigned long flags;
Gerald Schaefer087c4d72019-04-08 12:49:58 +0200784 struct init_sccb *sccb = sclp_init_sccb;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700785 sccb_mask_t receive_mask;
786 sccb_mask_t send_mask;
787 int retry;
788 int rc;
789 unsigned long wait;
790
791 spin_lock_irqsave(&sclp_lock, flags);
792 /* Check if interface is in appropriate state */
793 if (sclp_mask_state != sclp_mask_state_idle) {
794 spin_unlock_irqrestore(&sclp_lock, flags);
795 return -EBUSY;
796 }
797 if (sclp_activation_state == sclp_activation_state_inactive) {
798 spin_unlock_irqrestore(&sclp_lock, flags);
799 return -EINVAL;
800 }
801 sclp_mask_state = sclp_mask_state_initializing;
802 /* Determine mask */
803 if (calculate)
804 __sclp_get_mask(&receive_mask, &send_mask);
805 else {
806 receive_mask = 0;
807 send_mask = 0;
808 }
809 rc = -EIO;
810 for (retry = 0; retry <= SCLP_MASK_RETRY; retry++) {
811 /* Prepare request */
812 __sclp_make_init_req(receive_mask, send_mask);
813 spin_unlock_irqrestore(&sclp_lock, flags);
814 if (sclp_add_request(&sclp_init_req)) {
815 /* Try again later */
816 wait = jiffies + SCLP_BUSY_INTERVAL * HZ;
817 while (time_before(jiffies, wait))
818 sclp_sync_wait();
819 spin_lock_irqsave(&sclp_lock, flags);
820 continue;
821 }
822 while (sclp_init_req.status != SCLP_REQ_DONE &&
823 sclp_init_req.status != SCLP_REQ_FAILED)
824 sclp_sync_wait();
825 spin_lock_irqsave(&sclp_lock, flags);
826 if (sclp_init_req.status == SCLP_REQ_DONE &&
827 sccb->header.response_code == 0x20) {
828 /* Successful request */
829 if (calculate) {
Claudio Imbrendab8435632018-01-23 16:41:38 +0100830 sclp_receive_mask = sccb_get_sclp_recv_mask(sccb);
831 sclp_send_mask = sccb_get_sclp_send_mask(sccb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700832 } else {
833 sclp_receive_mask = 0;
834 sclp_send_mask = 0;
835 }
836 spin_unlock_irqrestore(&sclp_lock, flags);
837 sclp_dispatch_state_change();
838 spin_lock_irqsave(&sclp_lock, flags);
839 rc = 0;
840 break;
841 }
842 }
843 sclp_mask_state = sclp_mask_state_idle;
844 spin_unlock_irqrestore(&sclp_lock, flags);
845 return rc;
846}
847
848/* Deactivate SCLP interface. On success, new requests will be rejected,
849 * events will no longer be dispatched. Return 0 on success, non-zero
850 * otherwise. */
851int
852sclp_deactivate(void)
853{
854 unsigned long flags;
855 int rc;
856
857 spin_lock_irqsave(&sclp_lock, flags);
858 /* Deactivate can only be called when active */
859 if (sclp_activation_state != sclp_activation_state_active) {
860 spin_unlock_irqrestore(&sclp_lock, flags);
861 return -EINVAL;
862 }
863 sclp_activation_state = sclp_activation_state_deactivating;
864 spin_unlock_irqrestore(&sclp_lock, flags);
865 rc = sclp_init_mask(0);
866 spin_lock_irqsave(&sclp_lock, flags);
867 if (rc == 0)
868 sclp_activation_state = sclp_activation_state_inactive;
869 else
870 sclp_activation_state = sclp_activation_state_active;
871 spin_unlock_irqrestore(&sclp_lock, flags);
872 return rc;
873}
874
875EXPORT_SYMBOL(sclp_deactivate);
876
877/* Reactivate SCLP interface after sclp_deactivate. On success, new
878 * requests will be accepted, events will be dispatched again. Return 0 on
879 * success, non-zero otherwise. */
880int
881sclp_reactivate(void)
882{
883 unsigned long flags;
884 int rc;
885
886 spin_lock_irqsave(&sclp_lock, flags);
887 /* Reactivate can only be called when inactive */
888 if (sclp_activation_state != sclp_activation_state_inactive) {
889 spin_unlock_irqrestore(&sclp_lock, flags);
890 return -EINVAL;
891 }
892 sclp_activation_state = sclp_activation_state_activating;
893 spin_unlock_irqrestore(&sclp_lock, flags);
894 rc = sclp_init_mask(1);
895 spin_lock_irqsave(&sclp_lock, flags);
896 if (rc == 0)
897 sclp_activation_state = sclp_activation_state_active;
898 else
899 sclp_activation_state = sclp_activation_state_inactive;
900 spin_unlock_irqrestore(&sclp_lock, flags);
901 return rc;
902}
903
904EXPORT_SYMBOL(sclp_reactivate);
905
906/* Handler for external interruption used during initialization. Modify
907 * request state to done. */
Heiko Carstensfde15c32012-03-11 11:59:31 -0400908static void sclp_check_handler(struct ext_code ext_code,
Martin Schwidefskyf6649a72010-10-25 16:10:38 +0200909 unsigned int param32, unsigned long param64)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700910{
911 u32 finished_sccb;
912
Heiko Carstens420f42e2013-01-02 15:18:18 +0100913 inc_irq_stat(IRQEXT_SCP);
Martin Schwidefskyf6649a72010-10-25 16:10:38 +0200914 finished_sccb = param32 & 0xfffffff8;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700915 /* Is this the interrupt we are waiting for? */
916 if (finished_sccb == 0)
917 return;
Martin Schwidefskya12c53f2008-07-14 09:59:28 +0200918 if (finished_sccb != (u32) (addr_t) sclp_init_sccb)
919 panic("sclp: unsolicited interrupt for buffer at 0x%x\n",
920 finished_sccb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700921 spin_lock(&sclp_lock);
922 if (sclp_running_state == sclp_running_state_running) {
923 sclp_init_req.status = SCLP_REQ_DONE;
924 sclp_running_state = sclp_running_state_idle;
925 }
926 spin_unlock(&sclp_lock);
927}
928
929/* Initial init mask request timed out. Modify request state to failed. */
930static void
Kees Cookc9602ee2017-10-16 16:44:30 -0700931sclp_check_timeout(struct timer_list *unused)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700932{
933 unsigned long flags;
934
935 spin_lock_irqsave(&sclp_lock, flags);
936 if (sclp_running_state == sclp_running_state_running) {
937 sclp_init_req.status = SCLP_REQ_FAILED;
938 sclp_running_state = sclp_running_state_idle;
939 }
940 spin_unlock_irqrestore(&sclp_lock, flags);
941}
942
943/* Perform a check of the SCLP interface. Return zero if the interface is
944 * available and there are no pending requests from a previous instance.
945 * Return non-zero otherwise. */
946static int
947sclp_check_interface(void)
948{
949 struct init_sccb *sccb;
950 unsigned long flags;
951 int retry;
952 int rc;
953
954 spin_lock_irqsave(&sclp_lock, flags);
955 /* Prepare init mask command */
Thomas Huth1dad0932014-03-31 15:24:08 +0200956 rc = register_external_irq(EXT_IRQ_SERVICE_SIG, sclp_check_handler);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700957 if (rc) {
958 spin_unlock_irqrestore(&sclp_lock, flags);
959 return rc;
960 }
961 for (retry = 0; retry <= SCLP_INIT_RETRY; retry++) {
962 __sclp_make_init_req(0, 0);
963 sccb = (struct init_sccb *) sclp_init_req.sccb;
Heiko Carstensab14de62007-02-05 21:18:37 +0100964 rc = sclp_service_call(sclp_init_req.command, sccb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700965 if (rc == -EIO)
966 break;
967 sclp_init_req.status = SCLP_REQ_RUNNING;
968 sclp_running_state = sclp_running_state_running;
969 __sclp_set_request_timer(SCLP_RETRY_INTERVAL * HZ,
Kees Cookc9602ee2017-10-16 16:44:30 -0700970 sclp_check_timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700971 spin_unlock_irqrestore(&sclp_lock, flags);
972 /* Enable service-signal interruption - needs to happen
973 * with IRQs enabled. */
Heiko Carstens82003c3e2013-09-04 13:35:45 +0200974 irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700975 /* Wait for signal from interrupt or timeout */
976 sclp_sync_wait();
977 /* Disable service-signal interruption - needs to happen
978 * with IRQs enabled. */
Heiko Carstens82003c3e2013-09-04 13:35:45 +0200979 irq_subclass_unregister(IRQ_SUBCLASS_SERVICE_SIGNAL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700980 spin_lock_irqsave(&sclp_lock, flags);
981 del_timer(&sclp_request_timer);
Claudio Imbrenda0b0d1172018-01-23 16:50:43 +0100982 rc = -EBUSY;
983 if (sclp_init_req.status == SCLP_REQ_DONE) {
984 if (sccb->header.response_code == 0x20) {
985 rc = 0;
986 break;
987 } else if (sccb->header.response_code == 0x74f0) {
988 if (!sclp_mask_compat_mode) {
989 sclp_mask_compat_mode = true;
990 retry = 0;
991 }
992 }
993 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700994 }
Thomas Huth1dad0932014-03-31 15:24:08 +0200995 unregister_external_irq(EXT_IRQ_SERVICE_SIG, sclp_check_handler);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700996 spin_unlock_irqrestore(&sclp_lock, flags);
997 return rc;
998}
999
1000/* Reboot event handler. Reset send and receive mask to prevent pending SCLP
1001 * events from interfering with rebooted system. */
1002static int
1003sclp_reboot_event(struct notifier_block *this, unsigned long event, void *ptr)
1004{
1005 sclp_deactivate();
1006 return NOTIFY_DONE;
1007}
1008
1009static struct notifier_block sclp_reboot_notifier = {
1010 .notifier_call = sclp_reboot_event
1011};
1012
Michael Holzheu62b749422009-06-16 10:30:40 +02001013/*
1014 * Suspend/resume SCLP notifier implementation
1015 */
1016
1017static void sclp_pm_event(enum sclp_pm_event sclp_pm_event, int rollback)
1018{
1019 struct sclp_register *reg;
1020 unsigned long flags;
1021
1022 if (!rollback) {
1023 spin_lock_irqsave(&sclp_lock, flags);
1024 list_for_each_entry(reg, &sclp_reg_list, list)
1025 reg->pm_event_posted = 0;
1026 spin_unlock_irqrestore(&sclp_lock, flags);
1027 }
1028 do {
1029 spin_lock_irqsave(&sclp_lock, flags);
1030 list_for_each_entry(reg, &sclp_reg_list, list) {
1031 if (rollback && reg->pm_event_posted)
1032 goto found;
1033 if (!rollback && !reg->pm_event_posted)
1034 goto found;
1035 }
1036 spin_unlock_irqrestore(&sclp_lock, flags);
1037 return;
1038found:
1039 spin_unlock_irqrestore(&sclp_lock, flags);
1040 if (reg->pm_event_fn)
1041 reg->pm_event_fn(reg, sclp_pm_event);
1042 reg->pm_event_posted = rollback ? 0 : 1;
1043 } while (1);
1044}
1045
1046/*
1047 * Susend/resume callbacks for platform device
1048 */
1049
1050static int sclp_freeze(struct device *dev)
1051{
1052 unsigned long flags;
1053 int rc;
1054
1055 sclp_pm_event(SCLP_PM_EVENT_FREEZE, 0);
1056
1057 spin_lock_irqsave(&sclp_lock, flags);
1058 sclp_suspend_state = sclp_suspend_state_suspended;
1059 spin_unlock_irqrestore(&sclp_lock, flags);
1060
1061 /* Init supend data */
1062 memset(&sclp_suspend_req, 0, sizeof(sclp_suspend_req));
1063 sclp_suspend_req.callback = sclp_suspend_req_cb;
1064 sclp_suspend_req.status = SCLP_REQ_FILLED;
1065 init_completion(&sclp_request_queue_flushed);
1066
1067 rc = sclp_add_request(&sclp_suspend_req);
1068 if (rc == 0)
1069 wait_for_completion(&sclp_request_queue_flushed);
1070 else if (rc != -ENODATA)
1071 goto fail_thaw;
1072
1073 rc = sclp_deactivate();
1074 if (rc)
1075 goto fail_thaw;
1076 return 0;
1077
1078fail_thaw:
1079 spin_lock_irqsave(&sclp_lock, flags);
1080 sclp_suspend_state = sclp_suspend_state_running;
1081 spin_unlock_irqrestore(&sclp_lock, flags);
1082 sclp_pm_event(SCLP_PM_EVENT_THAW, 1);
1083 return rc;
1084}
1085
1086static int sclp_undo_suspend(enum sclp_pm_event event)
1087{
1088 unsigned long flags;
1089 int rc;
1090
1091 rc = sclp_reactivate();
1092 if (rc)
1093 return rc;
1094
1095 spin_lock_irqsave(&sclp_lock, flags);
1096 sclp_suspend_state = sclp_suspend_state_running;
1097 spin_unlock_irqrestore(&sclp_lock, flags);
1098
1099 sclp_pm_event(event, 0);
1100 return 0;
1101}
1102
1103static int sclp_thaw(struct device *dev)
1104{
1105 return sclp_undo_suspend(SCLP_PM_EVENT_THAW);
1106}
1107
1108static int sclp_restore(struct device *dev)
1109{
1110 return sclp_undo_suspend(SCLP_PM_EVENT_RESTORE);
1111}
1112
Alexey Dobriyan47145212009-12-14 18:00:08 -08001113static const struct dev_pm_ops sclp_pm_ops = {
Michael Holzheu62b749422009-06-16 10:30:40 +02001114 .freeze = sclp_freeze,
1115 .thaw = sclp_thaw,
1116 .restore = sclp_restore,
1117};
1118
Greg Kroah-Hartman36369562017-06-09 11:03:13 +02001119static ssize_t con_pages_show(struct device_driver *dev, char *buf)
Martin Schwidefsky25b41a72013-05-24 12:30:03 +02001120{
1121 return sprintf(buf, "%i\n", sclp_console_pages);
1122}
1123
Greg Kroah-Hartman36369562017-06-09 11:03:13 +02001124static DRIVER_ATTR_RO(con_pages);
Martin Schwidefsky25b41a72013-05-24 12:30:03 +02001125
Greg Kroah-Hartman36369562017-06-09 11:03:13 +02001126static ssize_t con_drop_show(struct device_driver *dev, char *buf)
Martin Schwidefsky25b41a72013-05-24 12:30:03 +02001127{
1128 return sprintf(buf, "%i\n", sclp_console_drop);
1129}
1130
Greg Kroah-Hartman36369562017-06-09 11:03:13 +02001131static DRIVER_ATTR_RO(con_drop);
Martin Schwidefsky25b41a72013-05-24 12:30:03 +02001132
Greg Kroah-Hartman36369562017-06-09 11:03:13 +02001133static ssize_t con_full_show(struct device_driver *dev, char *buf)
Martin Schwidefsky25b41a72013-05-24 12:30:03 +02001134{
1135 return sprintf(buf, "%lu\n", sclp_console_full);
1136}
1137
Greg Kroah-Hartman36369562017-06-09 11:03:13 +02001138static DRIVER_ATTR_RO(con_full);
Martin Schwidefsky25b41a72013-05-24 12:30:03 +02001139
1140static struct attribute *sclp_drv_attrs[] = {
1141 &driver_attr_con_pages.attr,
1142 &driver_attr_con_drop.attr,
1143 &driver_attr_con_full.attr,
1144 NULL,
1145};
1146static struct attribute_group sclp_drv_attr_group = {
1147 .attrs = sclp_drv_attrs,
1148};
1149static const struct attribute_group *sclp_drv_attr_groups[] = {
1150 &sclp_drv_attr_group,
1151 NULL,
1152};
1153
Michael Holzheu62b749422009-06-16 10:30:40 +02001154static struct platform_driver sclp_pdrv = {
1155 .driver = {
1156 .name = "sclp",
Michael Holzheu62b749422009-06-16 10:30:40 +02001157 .pm = &sclp_pm_ops,
Martin Schwidefsky25b41a72013-05-24 12:30:03 +02001158 .groups = sclp_drv_attr_groups,
Michael Holzheu62b749422009-06-16 10:30:40 +02001159 },
1160};
1161
1162static struct platform_device *sclp_pdev;
1163
Linus Torvalds1da177e2005-04-16 15:20:36 -07001164/* Initialize SCLP driver. Return zero if driver is operational, non-zero
1165 * otherwise. */
1166static int
1167sclp_init(void)
1168{
1169 unsigned long flags;
Michael Holzheu62b749422009-06-16 10:30:40 +02001170 int rc = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001171
Linus Torvalds1da177e2005-04-16 15:20:36 -07001172 spin_lock_irqsave(&sclp_lock, flags);
1173 /* Check for previous or running initialization */
Michael Holzheu62b749422009-06-16 10:30:40 +02001174 if (sclp_init_state != sclp_init_state_uninitialized)
1175 goto fail_unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001176 sclp_init_state = sclp_init_state_initializing;
Gerald Schaefer087c4d72019-04-08 12:49:58 +02001177 sclp_read_sccb = (void *) __get_free_page(GFP_ATOMIC | GFP_DMA);
1178 sclp_init_sccb = (void *) __get_free_page(GFP_ATOMIC | GFP_DMA);
1179 BUG_ON(!sclp_read_sccb || !sclp_init_sccb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001180 /* Set up variables */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001181 list_add(&sclp_state_change_event.list, &sclp_reg_list);
Kees Cookc9602ee2017-10-16 16:44:30 -07001182 timer_setup(&sclp_request_timer, NULL, 0);
1183 timer_setup(&sclp_queue_timer, sclp_req_queue_timeout, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001184 /* Check interface */
1185 spin_unlock_irqrestore(&sclp_lock, flags);
1186 rc = sclp_check_interface();
1187 spin_lock_irqsave(&sclp_lock, flags);
Michael Holzheu62b749422009-06-16 10:30:40 +02001188 if (rc)
1189 goto fail_init_state_uninitialized;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001190 /* Register reboot handler */
1191 rc = register_reboot_notifier(&sclp_reboot_notifier);
Michael Holzheu62b749422009-06-16 10:30:40 +02001192 if (rc)
1193 goto fail_init_state_uninitialized;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001194 /* Register interrupt handler */
Thomas Huth1dad0932014-03-31 15:24:08 +02001195 rc = register_external_irq(EXT_IRQ_SERVICE_SIG, sclp_interrupt_handler);
Michael Holzheu62b749422009-06-16 10:30:40 +02001196 if (rc)
1197 goto fail_unregister_reboot_notifier;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001198 sclp_init_state = sclp_init_state_initialized;
1199 spin_unlock_irqrestore(&sclp_lock, flags);
1200 /* Enable service-signal external interruption - needs to happen with
1201 * IRQs enabled. */
Heiko Carstens82003c3e2013-09-04 13:35:45 +02001202 irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001203 sclp_init_mask(1);
1204 return 0;
Michael Holzheu62b749422009-06-16 10:30:40 +02001205
1206fail_unregister_reboot_notifier:
1207 unregister_reboot_notifier(&sclp_reboot_notifier);
1208fail_init_state_uninitialized:
1209 sclp_init_state = sclp_init_state_uninitialized;
Gerald Schaefer087c4d72019-04-08 12:49:58 +02001210 free_page((unsigned long) sclp_read_sccb);
1211 free_page((unsigned long) sclp_init_sccb);
Michael Holzheu62b749422009-06-16 10:30:40 +02001212fail_unlock:
1213 spin_unlock_irqrestore(&sclp_lock, flags);
1214 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001215}
Peter Oberparleiterb3d00c32007-04-27 16:01:51 +02001216
Michael Holzheu62b749422009-06-16 10:30:40 +02001217/*
1218 * SCLP panic notifier: If we are suspended, we thaw SCLP in order to be able
1219 * to print the panic message.
1220 */
1221static int sclp_panic_notify(struct notifier_block *self,
1222 unsigned long event, void *data)
1223{
1224 if (sclp_suspend_state == sclp_suspend_state_suspended)
1225 sclp_undo_suspend(SCLP_PM_EVENT_THAW);
1226 return NOTIFY_OK;
1227}
1228
1229static struct notifier_block sclp_on_panic_nb = {
1230 .notifier_call = sclp_panic_notify,
1231 .priority = SCLP_PANIC_PRIO,
1232};
1233
Peter Oberparleiterb3d00c32007-04-27 16:01:51 +02001234static __init int sclp_initcall(void)
1235{
Michael Holzheu62b749422009-06-16 10:30:40 +02001236 int rc;
1237
1238 rc = platform_driver_register(&sclp_pdrv);
1239 if (rc)
1240 return rc;
Martin Schwidefsky25b41a72013-05-24 12:30:03 +02001241
Michael Holzheu62b749422009-06-16 10:30:40 +02001242 sclp_pdev = platform_device_register_simple("sclp", -1, NULL, 0);
Duan Jiong83d8e252014-04-11 13:39:06 +02001243 rc = PTR_ERR_OR_ZERO(sclp_pdev);
Michael Holzheu62b749422009-06-16 10:30:40 +02001244 if (rc)
1245 goto fail_platform_driver_unregister;
Martin Schwidefsky25b41a72013-05-24 12:30:03 +02001246
Michael Holzheu62b749422009-06-16 10:30:40 +02001247 rc = atomic_notifier_chain_register(&panic_notifier_list,
1248 &sclp_on_panic_nb);
1249 if (rc)
1250 goto fail_platform_device_unregister;
1251
Peter Oberparleiterb3d00c32007-04-27 16:01:51 +02001252 return sclp_init();
Michael Holzheu62b749422009-06-16 10:30:40 +02001253
1254fail_platform_device_unregister:
1255 platform_device_unregister(sclp_pdev);
1256fail_platform_driver_unregister:
1257 platform_driver_unregister(&sclp_pdrv);
1258 return rc;
Peter Oberparleiterb3d00c32007-04-27 16:01:51 +02001259}
1260
1261arch_initcall(sclp_initcall);