blob: 80bbe639fccd72d002d269f6486e58ace210aab9 [file] [log] [blame]
josh.h.morris@us.ibm.com8722ff82013-02-05 14:15:02 +01001/*
2* Filename: cregs.c
3*
4*
5* Authors: Joshua Morris <josh.h.morris@us.ibm.com>
6* Philip Kelleher <pjk1939@linux.vnet.ibm.com>
7*
8* (C) Copyright 2013 IBM Corporation
9*
10* This program is free software; you can redistribute it and/or
11* modify it under the terms of the GNU General Public License as
12* published by the Free Software Foundation; either version 2 of the
13* License, or (at your option) any later version.
14*
15* This program is distributed in the hope that it will be useful, but
16* WITHOUT ANY WARRANTY; without even the implied warranty of
17* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18* General Public License for more details.
19*
20* You should have received a copy of the GNU General Public License
21* along with this program; if not, write to the Free Software Foundation,
22* Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23*/
24
25#include <linux/completion.h>
26#include <linux/slab.h>
27
28#include "rsxx_priv.h"
29
30#define CREG_TIMEOUT_MSEC 10000
31
32typedef void (*creg_cmd_cb)(struct rsxx_cardinfo *card,
33 struct creg_cmd *cmd,
34 int st);
35
36struct creg_cmd {
37 struct list_head list;
38 creg_cmd_cb cb;
39 void *cb_private;
40 unsigned int op;
41 unsigned int addr;
42 int cnt8;
43 void *buf;
44 unsigned int stream;
45 unsigned int status;
46};
47
48static struct kmem_cache *creg_cmd_pool;
49
50
51/*------------ Private Functions --------------*/
52
53#if defined(__LITTLE_ENDIAN)
54#define LITTLE_ENDIAN 1
55#elif defined(__BIG_ENDIAN)
56#define LITTLE_ENDIAN 0
57#else
58#error Unknown endianess!!! Aborting...
59#endif
60
61static void copy_to_creg_data(struct rsxx_cardinfo *card,
62 int cnt8,
63 void *buf,
64 unsigned int stream)
65{
66 int i = 0;
67 u32 *data = buf;
68
69 for (i = 0; cnt8 > 0; i++, cnt8 -= 4) {
70 /*
71 * Firmware implementation makes it necessary to byte swap on
72 * little endian processors.
73 */
74 if (LITTLE_ENDIAN && stream)
75 iowrite32be(data[i], card->regmap + CREG_DATA(i));
76 else
77 iowrite32(data[i], card->regmap + CREG_DATA(i));
78 }
79}
80
81
82static void copy_from_creg_data(struct rsxx_cardinfo *card,
83 int cnt8,
84 void *buf,
85 unsigned int stream)
86{
87 int i = 0;
88 u32 *data = buf;
89
90 for (i = 0; cnt8 > 0; i++, cnt8 -= 4) {
91 /*
92 * Firmware implementation makes it necessary to byte swap on
93 * little endian processors.
94 */
95 if (LITTLE_ENDIAN && stream)
96 data[i] = ioread32be(card->regmap + CREG_DATA(i));
97 else
98 data[i] = ioread32(card->regmap + CREG_DATA(i));
99 }
100}
101
102static struct creg_cmd *pop_active_cmd(struct rsxx_cardinfo *card)
103{
104 struct creg_cmd *cmd;
105
106 /*
107 * Spin lock is needed because this can be called in atomic/interrupt
108 * context.
109 */
Philip J Kelleherc206c702013-02-18 21:35:59 +0100110 spin_lock_bh(&card->creg_ctrl.lock);
josh.h.morris@us.ibm.com8722ff82013-02-05 14:15:02 +0100111 cmd = card->creg_ctrl.active_cmd;
112 card->creg_ctrl.active_cmd = NULL;
Philip J Kelleherc206c702013-02-18 21:35:59 +0100113 spin_unlock_bh(&card->creg_ctrl.lock);
josh.h.morris@us.ibm.com8722ff82013-02-05 14:15:02 +0100114
115 return cmd;
116}
117
118static void creg_issue_cmd(struct rsxx_cardinfo *card, struct creg_cmd *cmd)
119{
120 iowrite32(cmd->addr, card->regmap + CREG_ADD);
121 iowrite32(cmd->cnt8, card->regmap + CREG_CNT);
122
123 if (cmd->op == CREG_OP_WRITE) {
124 if (cmd->buf)
125 copy_to_creg_data(card, cmd->cnt8,
126 cmd->buf, cmd->stream);
127 }
128
Philip J Kelleherc206c702013-02-18 21:35:59 +0100129 /*
130 * Data copy must complete before initiating the command. This is
131 * needed for weakly ordered processors (i.e. PowerPC), so that all
132 * neccessary registers are written before we kick the hardware.
133 */
josh.h.morris@us.ibm.com8722ff82013-02-05 14:15:02 +0100134 wmb();
135
136 /* Setting the valid bit will kick off the command. */
137 iowrite32(cmd->op, card->regmap + CREG_CMD);
138}
139
140static void creg_kick_queue(struct rsxx_cardinfo *card)
141{
142 if (card->creg_ctrl.active || list_empty(&card->creg_ctrl.queue))
143 return;
144
145 card->creg_ctrl.active = 1;
146 card->creg_ctrl.active_cmd = list_first_entry(&card->creg_ctrl.queue,
147 struct creg_cmd, list);
148 list_del(&card->creg_ctrl.active_cmd->list);
149 card->creg_ctrl.q_depth--;
150
151 /*
152 * We have to set the timer before we push the new command. Otherwise,
153 * we could create a race condition that would occur if the timer
154 * was not canceled, and expired after the new command was pushed,
155 * but before the command was issued to hardware.
156 */
157 mod_timer(&card->creg_ctrl.cmd_timer,
158 jiffies + msecs_to_jiffies(CREG_TIMEOUT_MSEC));
159
160 creg_issue_cmd(card, card->creg_ctrl.active_cmd);
161}
162
163static int creg_queue_cmd(struct rsxx_cardinfo *card,
164 unsigned int op,
165 unsigned int addr,
166 unsigned int cnt8,
167 void *buf,
168 int stream,
169 creg_cmd_cb callback,
170 void *cb_private)
171{
172 struct creg_cmd *cmd;
173
174 /* Don't queue stuff up if we're halted. */
175 if (unlikely(card->halt))
176 return -EINVAL;
177
178 if (card->creg_ctrl.reset)
179 return -EAGAIN;
180
181 if (cnt8 > MAX_CREG_DATA8)
182 return -EINVAL;
183
184 cmd = kmem_cache_alloc(creg_cmd_pool, GFP_KERNEL);
185 if (!cmd)
186 return -ENOMEM;
187
188 INIT_LIST_HEAD(&cmd->list);
189
190 cmd->op = op;
191 cmd->addr = addr;
192 cmd->cnt8 = cnt8;
193 cmd->buf = buf;
194 cmd->stream = stream;
195 cmd->cb = callback;
196 cmd->cb_private = cb_private;
197 cmd->status = 0;
198
Philip J Kelleherc206c702013-02-18 21:35:59 +0100199 spin_lock(&card->creg_ctrl.lock);
josh.h.morris@us.ibm.com8722ff82013-02-05 14:15:02 +0100200 list_add_tail(&cmd->list, &card->creg_ctrl.queue);
201 card->creg_ctrl.q_depth++;
202 creg_kick_queue(card);
Philip J Kelleherc206c702013-02-18 21:35:59 +0100203 spin_unlock(&card->creg_ctrl.lock);
josh.h.morris@us.ibm.com8722ff82013-02-05 14:15:02 +0100204
205 return 0;
206}
207
208static void creg_cmd_timed_out(unsigned long data)
209{
210 struct rsxx_cardinfo *card = (struct rsxx_cardinfo *) data;
211 struct creg_cmd *cmd;
212
213 cmd = pop_active_cmd(card);
214 if (cmd == NULL) {
215 card->creg_ctrl.creg_stats.creg_timeout++;
216 dev_warn(CARD_TO_DEV(card),
217 "No active command associated with timeout!\n");
218 return;
219 }
220
221 if (cmd->cb)
222 cmd->cb(card, cmd, -ETIMEDOUT);
223
224 kmem_cache_free(creg_cmd_pool, cmd);
225
Philip J Kelleherc206c702013-02-18 21:35:59 +0100226
227 spin_lock(&card->creg_ctrl.lock);
josh.h.morris@us.ibm.com8722ff82013-02-05 14:15:02 +0100228 card->creg_ctrl.active = 0;
229 creg_kick_queue(card);
Philip J Kelleherc206c702013-02-18 21:35:59 +0100230 spin_unlock(&card->creg_ctrl.lock);
josh.h.morris@us.ibm.com8722ff82013-02-05 14:15:02 +0100231}
232
233
234static void creg_cmd_done(struct work_struct *work)
235{
236 struct rsxx_cardinfo *card;
237 struct creg_cmd *cmd;
238 int st = 0;
239
240 card = container_of(work, struct rsxx_cardinfo,
241 creg_ctrl.done_work);
242
243 /*
244 * The timer could not be cancelled for some reason,
245 * race to pop the active command.
246 */
247 if (del_timer_sync(&card->creg_ctrl.cmd_timer) == 0)
248 card->creg_ctrl.creg_stats.failed_cancel_timer++;
249
250 cmd = pop_active_cmd(card);
251 if (cmd == NULL) {
252 dev_err(CARD_TO_DEV(card),
253 "Spurious creg interrupt!\n");
254 return;
255 }
256
257 card->creg_ctrl.creg_stats.stat = ioread32(card->regmap + CREG_STAT);
258 cmd->status = card->creg_ctrl.creg_stats.stat;
259 if ((cmd->status & CREG_STAT_STATUS_MASK) == 0) {
260 dev_err(CARD_TO_DEV(card),
261 "Invalid status on creg command\n");
262 /*
263 * At this point we're probably reading garbage from HW. Don't
264 * do anything else that could mess up the system and let
265 * the sync function return an error.
266 */
267 st = -EIO;
268 goto creg_done;
269 } else if (cmd->status & CREG_STAT_ERROR) {
270 st = -EIO;
271 }
272
273 if ((cmd->op == CREG_OP_READ)) {
274 unsigned int cnt8 = ioread32(card->regmap + CREG_CNT);
275
276 /* Paranoid Sanity Checks */
277 if (!cmd->buf) {
278 dev_err(CARD_TO_DEV(card),
279 "Buffer not given for read.\n");
280 st = -EIO;
281 goto creg_done;
282 }
283 if (cnt8 != cmd->cnt8) {
284 dev_err(CARD_TO_DEV(card),
285 "count mismatch\n");
286 st = -EIO;
287 goto creg_done;
288 }
289
290 copy_from_creg_data(card, cnt8, cmd->buf, cmd->stream);
291 }
292
293creg_done:
294 if (cmd->cb)
295 cmd->cb(card, cmd, st);
296
297 kmem_cache_free(creg_cmd_pool, cmd);
298
Philip J Kelleherc206c702013-02-18 21:35:59 +0100299 spin_lock(&card->creg_ctrl.lock);
josh.h.morris@us.ibm.com8722ff82013-02-05 14:15:02 +0100300 card->creg_ctrl.active = 0;
301 creg_kick_queue(card);
Philip J Kelleherc206c702013-02-18 21:35:59 +0100302 spin_unlock(&card->creg_ctrl.lock);
josh.h.morris@us.ibm.com8722ff82013-02-05 14:15:02 +0100303}
304
305static void creg_reset(struct rsxx_cardinfo *card)
306{
307 struct creg_cmd *cmd = NULL;
308 struct creg_cmd *tmp;
309 unsigned long flags;
310
Philip J Kelleherc206c702013-02-18 21:35:59 +0100311 /*
312 * mutex_trylock is used here because if reset_lock is taken then a
313 * reset is already happening. So, we can just go ahead and return.
314 */
josh.h.morris@us.ibm.com8722ff82013-02-05 14:15:02 +0100315 if (!mutex_trylock(&card->creg_ctrl.reset_lock))
316 return;
317
318 card->creg_ctrl.reset = 1;
319 spin_lock_irqsave(&card->irq_lock, flags);
320 rsxx_disable_ier_and_isr(card, CR_INTR_CREG | CR_INTR_EVENT);
321 spin_unlock_irqrestore(&card->irq_lock, flags);
322
323 dev_warn(CARD_TO_DEV(card),
324 "Resetting creg interface for recovery\n");
325
326 /* Cancel outstanding commands */
Philip J Kelleherc206c702013-02-18 21:35:59 +0100327 spin_lock(&card->creg_ctrl.lock);
josh.h.morris@us.ibm.com8722ff82013-02-05 14:15:02 +0100328 list_for_each_entry_safe(cmd, tmp, &card->creg_ctrl.queue, list) {
329 list_del(&cmd->list);
330 card->creg_ctrl.q_depth--;
331 if (cmd->cb)
332 cmd->cb(card, cmd, -ECANCELED);
333 kmem_cache_free(creg_cmd_pool, cmd);
334 }
335
336 cmd = card->creg_ctrl.active_cmd;
337 card->creg_ctrl.active_cmd = NULL;
338 if (cmd) {
339 if (timer_pending(&card->creg_ctrl.cmd_timer))
340 del_timer_sync(&card->creg_ctrl.cmd_timer);
341
342 if (cmd->cb)
343 cmd->cb(card, cmd, -ECANCELED);
344 kmem_cache_free(creg_cmd_pool, cmd);
345
346 card->creg_ctrl.active = 0;
347 }
Philip J Kelleherc206c702013-02-18 21:35:59 +0100348 spin_unlock(&card->creg_ctrl.lock);
josh.h.morris@us.ibm.com8722ff82013-02-05 14:15:02 +0100349
350 card->creg_ctrl.reset = 0;
351 spin_lock_irqsave(&card->irq_lock, flags);
352 rsxx_enable_ier_and_isr(card, CR_INTR_CREG | CR_INTR_EVENT);
353 spin_unlock_irqrestore(&card->irq_lock, flags);
354
355 mutex_unlock(&card->creg_ctrl.reset_lock);
356}
357
358/* Used for synchronous accesses */
359struct creg_completion {
360 struct completion *cmd_done;
361 int st;
362 u32 creg_status;
363};
364
365static void creg_cmd_done_cb(struct rsxx_cardinfo *card,
366 struct creg_cmd *cmd,
367 int st)
368{
369 struct creg_completion *cmd_completion;
370
Philip J Kelleherc206c702013-02-18 21:35:59 +0100371 cmd_completion = cmd->cb_private;
josh.h.morris@us.ibm.com8722ff82013-02-05 14:15:02 +0100372 BUG_ON(!cmd_completion);
373
374 cmd_completion->st = st;
375 cmd_completion->creg_status = cmd->status;
376 complete(cmd_completion->cmd_done);
377}
378
379static int __issue_creg_rw(struct rsxx_cardinfo *card,
380 unsigned int op,
381 unsigned int addr,
382 unsigned int cnt8,
383 void *buf,
384 int stream,
385 unsigned int *hw_stat)
386{
387 DECLARE_COMPLETION_ONSTACK(cmd_done);
388 struct creg_completion completion;
389 unsigned long timeout;
390 int st;
391
josh.h.morris@us.ibm.com8722ff82013-02-05 14:15:02 +0100392 completion.cmd_done = &cmd_done;
393 completion.st = 0;
394 completion.creg_status = 0;
395
396 st = creg_queue_cmd(card, op, addr, cnt8, buf, stream, creg_cmd_done_cb,
397 &completion);
398 if (st)
399 return st;
400
Philip J Kelleherc206c702013-02-18 21:35:59 +0100401 /*
402 * This timeout is neccessary for unresponsive hardware. The additional
403 * 20 seconds to used to guarantee that each cregs requests has time to
404 * complete.
405 */
josh.h.morris@us.ibm.com8722ff82013-02-05 14:15:02 +0100406 timeout = msecs_to_jiffies((CREG_TIMEOUT_MSEC *
Philip J Kelleherc206c702013-02-18 21:35:59 +0100407 card->creg_ctrl.q_depth) + 20000);
josh.h.morris@us.ibm.com8722ff82013-02-05 14:15:02 +0100408
409 /*
410 * The creg interface is guaranteed to complete. It has a timeout
411 * mechanism that will kick in if hardware does not respond.
412 */
413 st = wait_for_completion_timeout(completion.cmd_done, timeout);
414 if (st == 0) {
415 /*
416 * This is really bad, because the kernel timer did not
417 * expire and notify us of a timeout!
418 */
419 dev_crit(CARD_TO_DEV(card),
420 "cregs timer failed\n");
421 creg_reset(card);
422 return -EIO;
423 }
424
425 *hw_stat = completion.creg_status;
426
427 if (completion.st) {
428 dev_warn(CARD_TO_DEV(card),
429 "creg command failed(%d x%08x)\n",
430 completion.st, addr);
431 return completion.st;
432 }
433
434 return 0;
435}
436
437static int issue_creg_rw(struct rsxx_cardinfo *card,
438 u32 addr,
439 unsigned int size8,
440 void *data,
441 int stream,
442 int read)
443{
444 unsigned int hw_stat;
445 unsigned int xfer;
446 unsigned int op;
447 int st;
448
449 op = read ? CREG_OP_READ : CREG_OP_WRITE;
450
451 do {
452 xfer = min_t(unsigned int, size8, MAX_CREG_DATA8);
453
454 st = __issue_creg_rw(card, op, addr, xfer,
455 data, stream, &hw_stat);
456 if (st)
457 return st;
458
Philip J Kelleherc206c702013-02-18 21:35:59 +0100459 data = (char *)data + xfer;
josh.h.morris@us.ibm.com8722ff82013-02-05 14:15:02 +0100460 addr += xfer;
461 size8 -= xfer;
462 } while (size8);
463
464 return 0;
465}
466
467/* ---------------------------- Public API ---------------------------------- */
468int rsxx_creg_write(struct rsxx_cardinfo *card,
469 u32 addr,
470 unsigned int size8,
471 void *data,
472 int byte_stream)
473{
474 return issue_creg_rw(card, addr, size8, data, byte_stream, 0);
475}
476
477int rsxx_creg_read(struct rsxx_cardinfo *card,
478 u32 addr,
479 unsigned int size8,
480 void *data,
481 int byte_stream)
482{
483 return issue_creg_rw(card, addr, size8, data, byte_stream, 1);
484}
485
486int rsxx_get_card_state(struct rsxx_cardinfo *card, unsigned int *state)
487{
488 return rsxx_creg_read(card, CREG_ADD_CARD_STATE,
489 sizeof(*state), state, 0);
490}
491
492int rsxx_get_card_size8(struct rsxx_cardinfo *card, u64 *size8)
493{
494 unsigned int size;
495 int st;
496
497 st = rsxx_creg_read(card, CREG_ADD_CARD_SIZE,
498 sizeof(size), &size, 0);
499 if (st)
500 return st;
501
502 *size8 = (u64)size * RSXX_HW_BLK_SIZE;
503 return 0;
504}
505
506int rsxx_get_num_targets(struct rsxx_cardinfo *card,
507 unsigned int *n_targets)
508{
509 return rsxx_creg_read(card, CREG_ADD_NUM_TARGETS,
510 sizeof(*n_targets), n_targets, 0);
511}
512
513int rsxx_get_card_capabilities(struct rsxx_cardinfo *card,
514 u32 *capabilities)
515{
516 return rsxx_creg_read(card, CREG_ADD_CAPABILITIES,
517 sizeof(*capabilities), capabilities, 0);
518}
519
520int rsxx_issue_card_cmd(struct rsxx_cardinfo *card, u32 cmd)
521{
522 return rsxx_creg_write(card, CREG_ADD_CARD_CMD,
523 sizeof(cmd), &cmd, 0);
524}
525
526
527/*----------------- HW Log Functions -------------------*/
528static void hw_log_msg(struct rsxx_cardinfo *card, const char *str, int len)
529{
530 static char level;
531
532 /*
533 * New messages start with "<#>", where # is the log level. Messages
534 * that extend past the log buffer will use the previous level
535 */
536 if ((len > 3) && (str[0] == '<') && (str[2] == '>')) {
537 level = str[1];
538 str += 3; /* Skip past the log level. */
539 len -= 3;
540 }
541
542 switch (level) {
543 case '0':
544 dev_emerg(CARD_TO_DEV(card), "HW: %.*s", len, str);
545 break;
546 case '1':
547 dev_alert(CARD_TO_DEV(card), "HW: %.*s", len, str);
548 break;
549 case '2':
550 dev_crit(CARD_TO_DEV(card), "HW: %.*s", len, str);
551 break;
552 case '3':
553 dev_err(CARD_TO_DEV(card), "HW: %.*s", len, str);
554 break;
555 case '4':
556 dev_warn(CARD_TO_DEV(card), "HW: %.*s", len, str);
557 break;
558 case '5':
559 dev_notice(CARD_TO_DEV(card), "HW: %.*s", len, str);
560 break;
561 case '6':
562 dev_info(CARD_TO_DEV(card), "HW: %.*s", len, str);
563 break;
564 case '7':
565 dev_dbg(CARD_TO_DEV(card), "HW: %.*s", len, str);
566 break;
567 default:
568 dev_info(CARD_TO_DEV(card), "HW: %.*s", len, str);
569 break;
570 }
571}
572
573/*
Philip J Kelleherc206c702013-02-18 21:35:59 +0100574 * The substrncpy function copies the src string (which includes the
575 * terminating '\0' character), up to the count into the dest pointer.
576 * Returns the number of bytes copied to dest.
josh.h.morris@us.ibm.com8722ff82013-02-05 14:15:02 +0100577 */
578static int substrncpy(char *dest, const char *src, int count)
579{
580 int max_cnt = count;
581
582 while (count) {
583 count--;
584 *dest = *src;
585 if (*dest == '\0')
586 break;
587 src++;
588 dest++;
589 }
590 return max_cnt - count;
591}
592
593
594static void read_hw_log_done(struct rsxx_cardinfo *card,
595 struct creg_cmd *cmd,
596 int st)
597{
598 char *buf;
599 char *log_str;
600 int cnt;
601 int len;
602 int off;
603
604 buf = cmd->buf;
605 off = 0;
606
607 /* Failed getting the log message */
608 if (st)
609 return;
610
611 while (off < cmd->cnt8) {
612 log_str = &card->log.buf[card->log.buf_len];
613 cnt = min(cmd->cnt8 - off, LOG_BUF_SIZE8 - card->log.buf_len);
614 len = substrncpy(log_str, &buf[off], cnt);
615
616 off += len;
617 card->log.buf_len += len;
618
619 /*
620 * Flush the log if we've hit the end of a message or if we've
621 * run out of buffer space.
622 */
623 if ((log_str[len - 1] == '\0') ||
624 (card->log.buf_len == LOG_BUF_SIZE8)) {
625 if (card->log.buf_len != 1) /* Don't log blank lines. */
626 hw_log_msg(card, card->log.buf,
627 card->log.buf_len);
628 card->log.buf_len = 0;
629 }
630
631 }
632
633 if (cmd->status & CREG_STAT_LOG_PENDING)
634 rsxx_read_hw_log(card);
635}
636
637int rsxx_read_hw_log(struct rsxx_cardinfo *card)
638{
639 int st;
640
641 st = creg_queue_cmd(card, CREG_OP_READ, CREG_ADD_LOG,
642 sizeof(card->log.tmp), card->log.tmp,
643 1, read_hw_log_done, NULL);
644 if (st)
645 dev_err(CARD_TO_DEV(card),
646 "Failed getting log text\n");
647
648 return st;
649}
650
651/*-------------- IOCTL REG Access ------------------*/
652static int issue_reg_cmd(struct rsxx_cardinfo *card,
653 struct rsxx_reg_access *cmd,
654 int read)
655{
656 unsigned int op = read ? CREG_OP_READ : CREG_OP_WRITE;
657
658 return __issue_creg_rw(card, op, cmd->addr, cmd->cnt, cmd->data,
659 cmd->stream, &cmd->stat);
660}
661
662int rsxx_reg_access(struct rsxx_cardinfo *card,
663 struct rsxx_reg_access __user *ucmd,
664 int read)
665{
666 struct rsxx_reg_access cmd;
667 int st;
668
669 st = copy_from_user(&cmd, ucmd, sizeof(cmd));
670 if (st)
671 return -EFAULT;
672
Philip J Kelleherc206c702013-02-18 21:35:59 +0100673 if (cmd.cnt > RSXX_MAX_REG_CNT)
674 return -EFAULT;
675
josh.h.morris@us.ibm.com8722ff82013-02-05 14:15:02 +0100676 st = issue_reg_cmd(card, &cmd, read);
677 if (st)
678 return st;
679
680 st = put_user(cmd.stat, &ucmd->stat);
681 if (st)
682 return -EFAULT;
683
684 if (read) {
685 st = copy_to_user(ucmd->data, cmd.data, cmd.cnt);
686 if (st)
687 return -EFAULT;
688 }
689
690 return 0;
691}
692
693/*------------ Initialization & Setup --------------*/
694int rsxx_creg_setup(struct rsxx_cardinfo *card)
695{
696 card->creg_ctrl.active_cmd = NULL;
697
698 INIT_WORK(&card->creg_ctrl.done_work, creg_cmd_done);
699 mutex_init(&card->creg_ctrl.reset_lock);
700 INIT_LIST_HEAD(&card->creg_ctrl.queue);
Philip J Kelleherc206c702013-02-18 21:35:59 +0100701 spin_lock_init(&card->creg_ctrl.lock);
josh.h.morris@us.ibm.com8722ff82013-02-05 14:15:02 +0100702 setup_timer(&card->creg_ctrl.cmd_timer, creg_cmd_timed_out,
703 (unsigned long) card);
704
705 return 0;
706}
707
708void rsxx_creg_destroy(struct rsxx_cardinfo *card)
709{
710 struct creg_cmd *cmd;
711 struct creg_cmd *tmp;
712 int cnt = 0;
713
714 /* Cancel outstanding commands */
Philip J Kelleherc206c702013-02-18 21:35:59 +0100715 spin_lock(&card->creg_ctrl.lock);
josh.h.morris@us.ibm.com8722ff82013-02-05 14:15:02 +0100716 list_for_each_entry_safe(cmd, tmp, &card->creg_ctrl.queue, list) {
717 list_del(&cmd->list);
718 if (cmd->cb)
719 cmd->cb(card, cmd, -ECANCELED);
720 kmem_cache_free(creg_cmd_pool, cmd);
721 cnt++;
722 }
723
724 if (cnt)
725 dev_info(CARD_TO_DEV(card),
726 "Canceled %d queue creg commands\n", cnt);
727
728 cmd = card->creg_ctrl.active_cmd;
729 card->creg_ctrl.active_cmd = NULL;
730 if (cmd) {
731 if (timer_pending(&card->creg_ctrl.cmd_timer))
732 del_timer_sync(&card->creg_ctrl.cmd_timer);
733
734 if (cmd->cb)
735 cmd->cb(card, cmd, -ECANCELED);
736 dev_info(CARD_TO_DEV(card),
737 "Canceled active creg command\n");
738 kmem_cache_free(creg_cmd_pool, cmd);
739 }
Philip J Kelleherc206c702013-02-18 21:35:59 +0100740 spin_unlock(&card->creg_ctrl.lock);
josh.h.morris@us.ibm.com8722ff82013-02-05 14:15:02 +0100741
742 cancel_work_sync(&card->creg_ctrl.done_work);
743}
744
745
746int rsxx_creg_init(void)
747{
748 creg_cmd_pool = KMEM_CACHE(creg_cmd, SLAB_HWCACHE_ALIGN);
749 if (!creg_cmd_pool)
750 return -ENOMEM;
751
752 return 0;
753}
754
755void rsxx_creg_cleanup(void)
756{
757 kmem_cache_destroy(creg_cmd_pool);
758}