blob: b016eee186570318bd0337b5a466134af173eb22 [file] [log] [blame]
Richard Purdie4b23aff2007-05-29 13:31:42 +01001/*
2 * MTD Oops/Panic logger
3 *
4 * Copyright (C) 2007 Nokia Corporation. All rights reserved.
5 *
6 * Author: Richard Purdie <rpurdie@openedhand.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
20 * 02110-1301 USA
21 *
22 */
23
24#include <linux/kernel.h>
25#include <linux/module.h>
26#include <linux/console.h>
27#include <linux/vmalloc.h>
28#include <linux/workqueue.h>
29#include <linux/sched.h>
30#include <linux/wait.h>
Richard Purdie621e4f82008-02-06 10:17:50 +000031#include <linux/delay.h>
Richard Purdie47c152b2008-01-29 10:21:56 +000032#include <linux/spinlock.h>
David Woodhousef9f7dd22008-02-07 10:50:57 +000033#include <linux/interrupt.h>
Richard Purdie4b23aff2007-05-29 13:31:42 +010034#include <linux/mtd/mtd.h>
35
Simon Kagstrom1114e3d2009-11-03 08:08:41 +020036/* Maximum MTD partition size */
37#define MTDOOPS_MAX_MTD_SIZE (8 * 1024 * 1024)
38
Richard Purdief0482ee2008-07-26 09:22:45 +010039#define MTDOOPS_KERNMSG_MAGIC 0x5d005d00
Richard Purdie4b23aff2007-05-29 13:31:42 +010040#define OOPS_PAGE_SIZE 4096
41
Adrian Bunk7903cba2008-04-18 13:44:11 -070042static struct mtdoops_context {
Richard Purdie4b23aff2007-05-29 13:31:42 +010043 int mtd_index;
Richard Purdie6ce0a852008-01-29 11:27:11 +000044 struct work_struct work_erase;
45 struct work_struct work_write;
Richard Purdie4b23aff2007-05-29 13:31:42 +010046 struct mtd_info *mtd;
47 int oops_pages;
48 int nextpage;
49 int nextcount;
Simon Kagstrombe957452009-10-29 13:41:11 +010050 unsigned long *oops_page_used;
Adrian Huntere2a0f252009-02-16 18:21:35 +020051 char *name;
Richard Purdie4b23aff2007-05-29 13:31:42 +010052
53 void *oops_buf;
Richard Purdie47c152b2008-01-29 10:21:56 +000054
55 /* writecount and disabling ready are spin lock protected */
56 spinlock_t writecount_lock;
Richard Purdie4b23aff2007-05-29 13:31:42 +010057 int ready;
58 int writecount;
59} oops_cxt;
60
Simon Kagstrombe957452009-10-29 13:41:11 +010061static void mark_page_used(struct mtdoops_context *cxt, int page)
62{
63 set_bit(page, cxt->oops_page_used);
64}
65
66static void mark_page_unused(struct mtdoops_context *cxt, int page)
67{
68 clear_bit(page, cxt->oops_page_used);
69}
70
71static int page_is_used(struct mtdoops_context *cxt, int page)
72{
73 return test_bit(page, cxt->oops_page_used);
74}
75
Richard Purdie4b23aff2007-05-29 13:31:42 +010076static void mtdoops_erase_callback(struct erase_info *done)
77{
78 wait_queue_head_t *wait_q = (wait_queue_head_t *)done->priv;
79 wake_up(wait_q);
80}
81
Simon Kagstrombe957452009-10-29 13:41:11 +010082static int mtdoops_erase_block(struct mtdoops_context *cxt, int offset)
Richard Purdie4b23aff2007-05-29 13:31:42 +010083{
Simon Kagstrombe957452009-10-29 13:41:11 +010084 struct mtd_info *mtd = cxt->mtd;
85 u32 start_page_offset = mtd_div_by_eb(offset, mtd) * mtd->erasesize;
86 u32 start_page = start_page_offset / OOPS_PAGE_SIZE;
87 u32 erase_pages = mtd->erasesize / OOPS_PAGE_SIZE;
Richard Purdie4b23aff2007-05-29 13:31:42 +010088 struct erase_info erase;
89 DECLARE_WAITQUEUE(wait, current);
90 wait_queue_head_t wait_q;
91 int ret;
Simon Kagstrombe957452009-10-29 13:41:11 +010092 int page;
Richard Purdie4b23aff2007-05-29 13:31:42 +010093
94 init_waitqueue_head(&wait_q);
95 erase.mtd = mtd;
96 erase.callback = mtdoops_erase_callback;
97 erase.addr = offset;
Richard Purdie79dcd8e2008-01-29 10:25:55 +000098 erase.len = mtd->erasesize;
Richard Purdie4b23aff2007-05-29 13:31:42 +010099 erase.priv = (u_long)&wait_q;
100
101 set_current_state(TASK_INTERRUPTIBLE);
102 add_wait_queue(&wait_q, &wait);
103
104 ret = mtd->erase(mtd, &erase);
105 if (ret) {
106 set_current_state(TASK_RUNNING);
107 remove_wait_queue(&wait_q, &wait);
Artem Bityutskiya15b1242009-10-11 13:40:40 +0300108 printk(KERN_WARNING "mtdoops: erase of region [0x%llx, 0x%llx] on \"%s\" failed\n",
109 (unsigned long long)erase.addr,
110 (unsigned long long)erase.len, mtd->name);
Richard Purdie4b23aff2007-05-29 13:31:42 +0100111 return ret;
112 }
113
114 schedule(); /* Wait for erase to finish. */
115 remove_wait_queue(&wait_q, &wait);
116
Simon Kagstrombe957452009-10-29 13:41:11 +0100117 /* Mark pages as unused */
118 for (page = start_page; page < start_page + erase_pages; page++)
119 mark_page_unused(cxt, page);
120
Richard Purdie4b23aff2007-05-29 13:31:42 +0100121 return 0;
122}
123
Richard Purdie6ce0a852008-01-29 11:27:11 +0000124static void mtdoops_inc_counter(struct mtdoops_context *cxt)
Richard Purdie4b23aff2007-05-29 13:31:42 +0100125{
Richard Purdie4b23aff2007-05-29 13:31:42 +0100126 cxt->nextpage++;
Richard Purdieecd5b312008-07-26 09:17:41 +0100127 if (cxt->nextpage >= cxt->oops_pages)
Richard Purdie4b23aff2007-05-29 13:31:42 +0100128 cxt->nextpage = 0;
129 cxt->nextcount++;
130 if (cxt->nextcount == 0xffffffff)
131 cxt->nextcount = 0;
132
Simon Kagstrombe957452009-10-29 13:41:11 +0100133 if (page_is_used(cxt, cxt->nextpage)) {
Richard Purdie6ce0a852008-01-29 11:27:11 +0000134 schedule_work(&cxt->work_erase);
135 return;
136 }
Richard Purdie4b23aff2007-05-29 13:31:42 +0100137
Artem Bityutskiya15b1242009-10-11 13:40:40 +0300138 printk(KERN_DEBUG "mtdoops: ready %d, %d (no erase)\n",
139 cxt->nextpage, cxt->nextcount);
Richard Purdie4b23aff2007-05-29 13:31:42 +0100140 cxt->ready = 1;
Richard Purdie4b23aff2007-05-29 13:31:42 +0100141}
142
Richard Purdie6ce0a852008-01-29 11:27:11 +0000143/* Scheduled work - when we can't proceed without erasing a block */
144static void mtdoops_workfunc_erase(struct work_struct *work)
Richard Purdie4b23aff2007-05-29 13:31:42 +0100145{
Richard Purdie6ce0a852008-01-29 11:27:11 +0000146 struct mtdoops_context *cxt =
147 container_of(work, struct mtdoops_context, work_erase);
Richard Purdie4b23aff2007-05-29 13:31:42 +0100148 struct mtd_info *mtd = cxt->mtd;
149 int i = 0, j, ret, mod;
150
151 /* We were unregistered */
152 if (!mtd)
153 return;
154
155 mod = (cxt->nextpage * OOPS_PAGE_SIZE) % mtd->erasesize;
156 if (mod != 0) {
157 cxt->nextpage = cxt->nextpage + ((mtd->erasesize - mod) / OOPS_PAGE_SIZE);
Richard Purdieecd5b312008-07-26 09:17:41 +0100158 if (cxt->nextpage >= cxt->oops_pages)
Richard Purdie4b23aff2007-05-29 13:31:42 +0100159 cxt->nextpage = 0;
160 }
161
Richard Purdie2986bd22008-01-29 11:27:09 +0000162 while (mtd->block_isbad) {
163 ret = mtd->block_isbad(mtd, cxt->nextpage * OOPS_PAGE_SIZE);
164 if (!ret)
165 break;
166 if (ret < 0) {
Artem Bityutskiya15b1242009-10-11 13:40:40 +0300167 printk(KERN_ERR "mtdoops: block_isbad failed, aborting\n");
Richard Purdie2986bd22008-01-29 11:27:09 +0000168 return;
169 }
Richard Purdie4b23aff2007-05-29 13:31:42 +0100170badblock:
Artem Bityutskiya15b1242009-10-11 13:40:40 +0300171 printk(KERN_WARNING "mtdoops: bad block at %08x\n",
172 cxt->nextpage * OOPS_PAGE_SIZE);
Richard Purdie4b23aff2007-05-29 13:31:42 +0100173 i++;
174 cxt->nextpage = cxt->nextpage + (mtd->erasesize / OOPS_PAGE_SIZE);
Richard Purdieecd5b312008-07-26 09:17:41 +0100175 if (cxt->nextpage >= cxt->oops_pages)
Richard Purdie4b23aff2007-05-29 13:31:42 +0100176 cxt->nextpage = 0;
Artem Bityutskiya15b1242009-10-11 13:40:40 +0300177 if (i == cxt->oops_pages / (mtd->erasesize / OOPS_PAGE_SIZE)) {
178 printk(KERN_ERR "mtdoops: all blocks bad!\n");
Richard Purdie4b23aff2007-05-29 13:31:42 +0100179 return;
180 }
181 }
182
183 for (j = 0, ret = -1; (j < 3) && (ret < 0); j++)
Simon Kagstrombe957452009-10-29 13:41:11 +0100184 ret = mtdoops_erase_block(cxt, cxt->nextpage * OOPS_PAGE_SIZE);
Richard Purdie4b23aff2007-05-29 13:31:42 +0100185
Richard Purdie2986bd22008-01-29 11:27:09 +0000186 if (ret >= 0) {
Artem Bityutskiya15b1242009-10-11 13:40:40 +0300187 printk(KERN_DEBUG "mtdoops: ready %d, %d\n",
188 cxt->nextpage, cxt->nextcount);
Richard Purdie2986bd22008-01-29 11:27:09 +0000189 cxt->ready = 1;
190 return;
Richard Purdie4b23aff2007-05-29 13:31:42 +0100191 }
192
Artem Bityutskiya15b1242009-10-11 13:40:40 +0300193 if (mtd->block_markbad && ret == -EIO) {
Richard Purdie2986bd22008-01-29 11:27:09 +0000194 ret = mtd->block_markbad(mtd, cxt->nextpage * OOPS_PAGE_SIZE);
195 if (ret < 0) {
Artem Bityutskiya15b1242009-10-11 13:40:40 +0300196 printk(KERN_ERR "mtdoops: block_markbad failed, aborting\n");
Richard Purdie2986bd22008-01-29 11:27:09 +0000197 return;
198 }
199 }
200 goto badblock;
Richard Purdie4b23aff2007-05-29 13:31:42 +0100201}
202
Richard Purdie621e4f82008-02-06 10:17:50 +0000203static void mtdoops_write(struct mtdoops_context *cxt, int panic)
Richard Purdie4b23aff2007-05-29 13:31:42 +0100204{
Richard Purdie6ce0a852008-01-29 11:27:11 +0000205 struct mtd_info *mtd = cxt->mtd;
206 size_t retlen;
207 int ret;
Richard Purdie4b23aff2007-05-29 13:31:42 +0100208
Richard Purdie6ce0a852008-01-29 11:27:11 +0000209 if (cxt->writecount < OOPS_PAGE_SIZE)
210 memset(cxt->oops_buf + cxt->writecount, 0xff,
211 OOPS_PAGE_SIZE - cxt->writecount);
Richard Purdie4b23aff2007-05-29 13:31:42 +0100212
Richard Purdie621e4f82008-02-06 10:17:50 +0000213 if (panic)
214 ret = mtd->panic_write(mtd, cxt->nextpage * OOPS_PAGE_SIZE,
215 OOPS_PAGE_SIZE, &retlen, cxt->oops_buf);
216 else
217 ret = mtd->write(mtd, cxt->nextpage * OOPS_PAGE_SIZE,
Richard Purdie6ce0a852008-01-29 11:27:11 +0000218 OOPS_PAGE_SIZE, &retlen, cxt->oops_buf);
219
220 cxt->writecount = 0;
221
Artem Bityutskiya15b1242009-10-11 13:40:40 +0300222 if (retlen != OOPS_PAGE_SIZE || ret < 0)
223 printk(KERN_ERR "mtdoops: write failure at %d (%td of %d written), error %d\n",
224 cxt->nextpage * OOPS_PAGE_SIZE, retlen, OOPS_PAGE_SIZE, ret);
Simon Kagstrombe957452009-10-29 13:41:11 +0100225 mark_page_used(cxt, cxt->nextpage);
Richard Purdie6ce0a852008-01-29 11:27:11 +0000226
227 mtdoops_inc_counter(cxt);
Richard Purdie621e4f82008-02-06 10:17:50 +0000228}
229
230
231static void mtdoops_workfunc_write(struct work_struct *work)
232{
233 struct mtdoops_context *cxt =
234 container_of(work, struct mtdoops_context, work_write);
235
236 mtdoops_write(cxt, 0);
Artem Bityutskiya15b1242009-10-11 13:40:40 +0300237}
Richard Purdie6ce0a852008-01-29 11:27:11 +0000238
239static void find_next_position(struct mtdoops_context *cxt)
Richard Purdie4b23aff2007-05-29 13:31:42 +0100240{
241 struct mtd_info *mtd = cxt->mtd;
Richard Purdie2986bd22008-01-29 11:27:09 +0000242 int ret, page, maxpos = 0;
Richard Purdief0482ee2008-07-26 09:22:45 +0100243 u32 count[2], maxcount = 0xffffffff;
Richard Purdie4b23aff2007-05-29 13:31:42 +0100244 size_t retlen;
245
246 for (page = 0; page < cxt->oops_pages; page++) {
Simon Kagstrombe957452009-10-29 13:41:11 +0100247 /* Assume the page is used */
248 mark_page_used(cxt, page);
Richard Purdief0482ee2008-07-26 09:22:45 +0100249 ret = mtd->read(mtd, page * OOPS_PAGE_SIZE, 8, &retlen, (u_char *) &count[0]);
Artem Bityutskiya15b1242009-10-11 13:40:40 +0300250 if (retlen != 8 || (ret < 0 && ret != -EUCLEAN)) {
251 printk(KERN_ERR "mtdoops: read failure at %d (%td of 8 read), err %d\n",
252 page * OOPS_PAGE_SIZE, retlen, ret);
Richard Purdie2986bd22008-01-29 11:27:09 +0000253 continue;
254 }
255
Simon Kagstrombe957452009-10-29 13:41:11 +0100256 if (count[0] == 0xffffffff && count[1] == 0xffffffff)
257 mark_page_unused(cxt, page);
Richard Purdief0482ee2008-07-26 09:22:45 +0100258 if (count[1] != MTDOOPS_KERNMSG_MAGIC)
259 continue;
260 if (count[0] == 0xffffffff)
Richard Purdie4b23aff2007-05-29 13:31:42 +0100261 continue;
262 if (maxcount == 0xffffffff) {
Richard Purdief0482ee2008-07-26 09:22:45 +0100263 maxcount = count[0];
Richard Purdie4b23aff2007-05-29 13:31:42 +0100264 maxpos = page;
Artem Bityutskiya15b1242009-10-11 13:40:40 +0300265 } else if (count[0] < 0x40000000 && maxcount > 0xc0000000) {
Richard Purdief0482ee2008-07-26 09:22:45 +0100266 maxcount = count[0];
Richard Purdie4b23aff2007-05-29 13:31:42 +0100267 maxpos = page;
Artem Bityutskiya15b1242009-10-11 13:40:40 +0300268 } else if (count[0] > maxcount && count[0] < 0xc0000000) {
Richard Purdief0482ee2008-07-26 09:22:45 +0100269 maxcount = count[0];
Richard Purdie4b23aff2007-05-29 13:31:42 +0100270 maxpos = page;
Artem Bityutskiya15b1242009-10-11 13:40:40 +0300271 } else if (count[0] > maxcount && count[0] > 0xc0000000
272 && maxcount > 0x80000000) {
Richard Purdief0482ee2008-07-26 09:22:45 +0100273 maxcount = count[0];
Richard Purdie4b23aff2007-05-29 13:31:42 +0100274 maxpos = page;
275 }
276 }
277 if (maxcount == 0xffffffff) {
278 cxt->nextpage = 0;
279 cxt->nextcount = 1;
Richard Purdie43b56932008-07-26 09:25:18 +0100280 schedule_work(&cxt->work_erase);
Richard Purdie6ce0a852008-01-29 11:27:11 +0000281 return;
Richard Purdie4b23aff2007-05-29 13:31:42 +0100282 }
283
284 cxt->nextpage = maxpos;
285 cxt->nextcount = maxcount;
286
Richard Purdie6ce0a852008-01-29 11:27:11 +0000287 mtdoops_inc_counter(cxt);
Richard Purdie4b23aff2007-05-29 13:31:42 +0100288}
289
290
291static void mtdoops_notify_add(struct mtd_info *mtd)
292{
293 struct mtdoops_context *cxt = &oops_cxt;
Simon Kagstrombe957452009-10-29 13:41:11 +0100294 u64 mtdoops_pages = mtd->size;
295
296 do_div(mtdoops_pages, OOPS_PAGE_SIZE);
Richard Purdie4b23aff2007-05-29 13:31:42 +0100297
Adrian Huntere2a0f252009-02-16 18:21:35 +0200298 if (cxt->name && !strcmp(mtd->name, cxt->name))
299 cxt->mtd_index = mtd->index;
300
Artem Bityutskiya15b1242009-10-11 13:40:40 +0300301 if (mtd->index != cxt->mtd_index || cxt->mtd_index < 0)
Richard Purdie4b23aff2007-05-29 13:31:42 +0100302 return;
303
Artem Bityutskiya15b1242009-10-11 13:40:40 +0300304 if (mtd->size < mtd->erasesize * 2) {
305 printk(KERN_ERR "mtdoops: MTD partition %d not big enough for mtdoops\n",
306 mtd->index);
Richard Purdie4b23aff2007-05-29 13:31:42 +0100307 return;
308 }
309
Richard Purdie79dcd8e2008-01-29 10:25:55 +0000310 if (mtd->erasesize < OOPS_PAGE_SIZE) {
Artem Bityutskiya15b1242009-10-11 13:40:40 +0300311 printk(KERN_ERR "mtdoops: eraseblock size of MTD partition %d too small\n",
312 mtd->index);
Richard Purdie79dcd8e2008-01-29 10:25:55 +0000313 return;
314 }
315
Simon Kagstrom1114e3d2009-11-03 08:08:41 +0200316 if (mtd->size > MTDOOPS_MAX_MTD_SIZE) {
317 printk(KERN_ERR "mtdoops: mtd%d is too large (limit is %d MiB)\n",
318 mtd->index, MTDOOPS_MAX_MTD_SIZE / 1024 / 1024);
319 return;
320 }
321
Simon Kagstrombe957452009-10-29 13:41:11 +0100322 /* oops_page_used is a bit field */
323 cxt->oops_page_used = vmalloc(DIV_ROUND_UP(mtdoops_pages,
324 BITS_PER_LONG));
325 if (!cxt->oops_page_used) {
326 printk(KERN_ERR "Could not allocate page array\n");
327 return;
328 }
Simon Kagstrom1114e3d2009-11-03 08:08:41 +0200329
Richard Purdie4b23aff2007-05-29 13:31:42 +0100330 cxt->mtd = mtd;
Simon Kagstrom1114e3d2009-11-03 08:08:41 +0200331 cxt->oops_pages = (int)mtd->size / OOPS_PAGE_SIZE;
Richard Purdie6ce0a852008-01-29 11:27:11 +0000332 find_next_position(cxt);
Richard Purdie79dcd8e2008-01-29 10:25:55 +0000333 printk(KERN_INFO "mtdoops: Attached to MTD device %d\n", mtd->index);
Richard Purdie4b23aff2007-05-29 13:31:42 +0100334}
335
336static void mtdoops_notify_remove(struct mtd_info *mtd)
337{
338 struct mtdoops_context *cxt = &oops_cxt;
339
Artem Bityutskiya15b1242009-10-11 13:40:40 +0300340 if (mtd->index != cxt->mtd_index || cxt->mtd_index < 0)
Richard Purdie4b23aff2007-05-29 13:31:42 +0100341 return;
342
343 cxt->mtd = NULL;
344 flush_scheduled_work();
345}
346
Richard Purdie8691a722007-07-10 20:33:54 +0100347static void mtdoops_console_sync(void)
348{
349 struct mtdoops_context *cxt = &oops_cxt;
350 struct mtd_info *mtd = cxt->mtd;
Richard Purdie47c152b2008-01-29 10:21:56 +0000351 unsigned long flags;
Richard Purdie8691a722007-07-10 20:33:54 +0100352
Richard Purdie6ce0a852008-01-29 11:27:11 +0000353 if (!cxt->ready || !mtd || cxt->writecount == 0)
Richard Purdie8691a722007-07-10 20:33:54 +0100354 return;
355
Artem Bityutskiya15b1242009-10-11 13:40:40 +0300356 /*
357 * Once ready is 0 and we've held the lock no further writes to the
Richard Purdie47c152b2008-01-29 10:21:56 +0000358 * buffer will happen
359 */
360 spin_lock_irqsave(&cxt->writecount_lock, flags);
361 if (!cxt->ready) {
362 spin_unlock_irqrestore(&cxt->writecount_lock, flags);
363 return;
364 }
Richard Purdie8691a722007-07-10 20:33:54 +0100365 cxt->ready = 0;
Richard Purdie47c152b2008-01-29 10:21:56 +0000366 spin_unlock_irqrestore(&cxt->writecount_lock, flags);
Richard Purdie8691a722007-07-10 20:33:54 +0100367
Richard Purdie621e4f82008-02-06 10:17:50 +0000368 if (mtd->panic_write && in_interrupt())
369 /* Interrupt context, we're going to panic so try and log */
370 mtdoops_write(cxt, 1);
371 else
372 schedule_work(&cxt->work_write);
Richard Purdie8691a722007-07-10 20:33:54 +0100373}
Richard Purdie4b23aff2007-05-29 13:31:42 +0100374
375static void
376mtdoops_console_write(struct console *co, const char *s, unsigned int count)
377{
378 struct mtdoops_context *cxt = co->data;
379 struct mtd_info *mtd = cxt->mtd;
Richard Purdie47c152b2008-01-29 10:21:56 +0000380 unsigned long flags;
Richard Purdie4b23aff2007-05-29 13:31:42 +0100381
Richard Purdie8691a722007-07-10 20:33:54 +0100382 if (!oops_in_progress) {
383 mtdoops_console_sync();
Richard Purdie4b23aff2007-05-29 13:31:42 +0100384 return;
Richard Purdie4b23aff2007-05-29 13:31:42 +0100385 }
386
Richard Purdie8691a722007-07-10 20:33:54 +0100387 if (!cxt->ready || !mtd)
Richard Purdie4b23aff2007-05-29 13:31:42 +0100388 return;
389
Richard Purdie47c152b2008-01-29 10:21:56 +0000390 /* Locking on writecount ensures sequential writes to the buffer */
391 spin_lock_irqsave(&cxt->writecount_lock, flags);
392
393 /* Check ready status didn't change whilst waiting for the lock */
Adrian Hunter48ec00a2009-03-04 09:53:40 +0200394 if (!cxt->ready) {
395 spin_unlock_irqrestore(&cxt->writecount_lock, flags);
Richard Purdie47c152b2008-01-29 10:21:56 +0000396 return;
Adrian Hunter48ec00a2009-03-04 09:53:40 +0200397 }
Richard Purdie47c152b2008-01-29 10:21:56 +0000398
Richard Purdie4b23aff2007-05-29 13:31:42 +0100399 if (cxt->writecount == 0) {
400 u32 *stamp = cxt->oops_buf;
Richard Purdief0482ee2008-07-26 09:22:45 +0100401 *stamp++ = cxt->nextcount;
402 *stamp = MTDOOPS_KERNMSG_MAGIC;
403 cxt->writecount = 8;
Richard Purdie4b23aff2007-05-29 13:31:42 +0100404 }
405
Artem Bityutskiya15b1242009-10-11 13:40:40 +0300406 if (count + cxt->writecount > OOPS_PAGE_SIZE)
Richard Purdie4b23aff2007-05-29 13:31:42 +0100407 count = OOPS_PAGE_SIZE - cxt->writecount;
408
Peter Korsgaard235d6202007-11-06 11:56:02 +0100409 memcpy(cxt->oops_buf + cxt->writecount, s, count);
410 cxt->writecount += count;
Richard Purdie47c152b2008-01-29 10:21:56 +0000411
412 spin_unlock_irqrestore(&cxt->writecount_lock, flags);
413
414 if (cxt->writecount == OOPS_PAGE_SIZE)
415 mtdoops_console_sync();
Richard Purdie4b23aff2007-05-29 13:31:42 +0100416}
417
418static int __init mtdoops_console_setup(struct console *co, char *options)
419{
420 struct mtdoops_context *cxt = co->data;
421
Adrian Huntere2a0f252009-02-16 18:21:35 +0200422 if (cxt->mtd_index != -1 || cxt->name)
Richard Purdie4b23aff2007-05-29 13:31:42 +0100423 return -EBUSY;
Adrian Huntere2a0f252009-02-16 18:21:35 +0200424 if (options) {
425 cxt->name = kstrdup(options, GFP_KERNEL);
426 return 0;
427 }
Richard Purdie4b23aff2007-05-29 13:31:42 +0100428 if (co->index == -1)
429 return -EINVAL;
430
431 cxt->mtd_index = co->index;
432 return 0;
433}
434
435static struct mtd_notifier mtdoops_notifier = {
436 .add = mtdoops_notify_add,
437 .remove = mtdoops_notify_remove,
438};
439
440static struct console mtdoops_console = {
441 .name = "ttyMTD",
442 .write = mtdoops_console_write,
443 .setup = mtdoops_console_setup,
Richard Purdie8691a722007-07-10 20:33:54 +0100444 .unblank = mtdoops_console_sync,
Richard Purdie4b23aff2007-05-29 13:31:42 +0100445 .index = -1,
446 .data = &oops_cxt,
447};
448
449static int __init mtdoops_console_init(void)
450{
451 struct mtdoops_context *cxt = &oops_cxt;
452
453 cxt->mtd_index = -1;
454 cxt->oops_buf = vmalloc(OOPS_PAGE_SIZE);
Richard Purdie4b23aff2007-05-29 13:31:42 +0100455 if (!cxt->oops_buf) {
Artem Bityutskiya15b1242009-10-11 13:40:40 +0300456 printk(KERN_ERR "mtdoops: failed to allocate buffer workspace\n");
Richard Purdie4b23aff2007-05-29 13:31:42 +0100457 return -ENOMEM;
458 }
459
Artem Bityutskiya15b1242009-10-11 13:40:40 +0300460 spin_lock_init(&cxt->writecount_lock);
Richard Purdie6ce0a852008-01-29 11:27:11 +0000461 INIT_WORK(&cxt->work_erase, mtdoops_workfunc_erase);
462 INIT_WORK(&cxt->work_write, mtdoops_workfunc_write);
Richard Purdie4b23aff2007-05-29 13:31:42 +0100463
464 register_console(&mtdoops_console);
465 register_mtd_user(&mtdoops_notifier);
466 return 0;
467}
468
469static void __exit mtdoops_console_exit(void)
470{
471 struct mtdoops_context *cxt = &oops_cxt;
472
473 unregister_mtd_user(&mtdoops_notifier);
474 unregister_console(&mtdoops_console);
Adrian Huntere2a0f252009-02-16 18:21:35 +0200475 kfree(cxt->name);
Richard Purdie4b23aff2007-05-29 13:31:42 +0100476 vfree(cxt->oops_buf);
Simon Kagstrombe957452009-10-29 13:41:11 +0100477 vfree(cxt->oops_page_used);
Richard Purdie4b23aff2007-05-29 13:31:42 +0100478}
479
480
481subsys_initcall(mtdoops_console_init);
482module_exit(mtdoops_console_exit);
483
484MODULE_LICENSE("GPL");
485MODULE_AUTHOR("Richard Purdie <rpurdie@openedhand.com>");
486MODULE_DESCRIPTION("MTD Oops/Panic console logger/driver");