From a15b124fc4f15b2c4fc51669c936a30ce179d1f7 Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Sun, 11 Oct 2009 13:40:40 +0300 Subject: mtd: mtdoops: several minor cleanups While looking into the mtdoops module, I've spotted several minor imperfections. This patch addresses them. Namely: 1. Remove several trailing white-spaces and tabs 2. Check 'vmalloc()' return code straight away, not several lines below in the 'mtdoops_console_init()' function. 3. Clean up printks - make them more consistent and use the same code formatting style for them. 4. Remove silly style of putting brackets around everything in "if" operators. Signed-off-by: Artem Bityutskiy Cc: Simon Kagstrom Signed-off-by: David Woodhouse --- drivers/mtd/mtdoops.c | 81 +++++++++++++++++++++++++-------------------------- 1 file changed, 40 insertions(+), 41 deletions(-) (limited to 'drivers/mtd/mtdoops.c') diff --git a/drivers/mtd/mtdoops.c b/drivers/mtd/mtdoops.c index 1060337..c383add 100644 --- a/drivers/mtd/mtdoops.c +++ b/drivers/mtd/mtdoops.c @@ -81,9 +81,9 @@ static int mtdoops_erase_block(struct mtd_info *mtd, int offset) if (ret) { set_current_state(TASK_RUNNING); remove_wait_queue(&wait_q, &wait); - printk (KERN_WARNING "mtdoops: erase of region [0x%llx, 0x%llx] " - "on \"%s\" failed\n", - (unsigned long long)erase.addr, (unsigned long long)erase.len, mtd->name); + printk(KERN_WARNING "mtdoops: erase of region [0x%llx, 0x%llx] on \"%s\" failed\n", + (unsigned long long)erase.addr, + (unsigned long long)erase.len, mtd->name); return ret; } @@ -109,10 +109,9 @@ static void mtdoops_inc_counter(struct mtdoops_context *cxt) ret = mtd->read(mtd, cxt->nextpage * OOPS_PAGE_SIZE, 4, &retlen, (u_char *) &count); - if ((retlen != 4) || ((ret < 0) && (ret != -EUCLEAN))) { - printk(KERN_ERR "mtdoops: Read failure at %d (%td of 4 read)" - ", err %d.\n", cxt->nextpage * OOPS_PAGE_SIZE, - retlen, ret); + if (retlen != 4 || (ret < 0 && ret != -EUCLEAN)) { + printk(KERN_ERR "mtdoops: read failure at %d (%td of 4 read), err %d\n", + cxt->nextpage * OOPS_PAGE_SIZE, retlen, ret); schedule_work(&cxt->work_erase); return; } @@ -123,8 +122,8 @@ static void mtdoops_inc_counter(struct mtdoops_context *cxt) return; } - printk(KERN_DEBUG "mtdoops: Ready %d, %d (no erase)\n", - cxt->nextpage, cxt->nextcount); + printk(KERN_DEBUG "mtdoops: ready %d, %d (no erase)\n", + cxt->nextpage, cxt->nextcount); cxt->ready = 1; } @@ -152,18 +151,18 @@ static void mtdoops_workfunc_erase(struct work_struct *work) if (!ret) break; if (ret < 0) { - printk(KERN_ERR "mtdoops: block_isbad failed, aborting.\n"); + printk(KERN_ERR "mtdoops: block_isbad failed, aborting\n"); return; } badblock: - printk(KERN_WARNING "mtdoops: Bad block at %08x\n", - cxt->nextpage * OOPS_PAGE_SIZE); + printk(KERN_WARNING "mtdoops: bad block at %08x\n", + cxt->nextpage * OOPS_PAGE_SIZE); i++; cxt->nextpage = cxt->nextpage + (mtd->erasesize / OOPS_PAGE_SIZE); if (cxt->nextpage >= cxt->oops_pages) cxt->nextpage = 0; - if (i == (cxt->oops_pages / (mtd->erasesize / OOPS_PAGE_SIZE))) { - printk(KERN_ERR "mtdoops: All blocks bad!\n"); + if (i == cxt->oops_pages / (mtd->erasesize / OOPS_PAGE_SIZE)) { + printk(KERN_ERR "mtdoops: all blocks bad!\n"); return; } } @@ -172,15 +171,16 @@ badblock: ret = mtdoops_erase_block(mtd, cxt->nextpage * OOPS_PAGE_SIZE); if (ret >= 0) { - printk(KERN_DEBUG "mtdoops: Ready %d, %d \n", cxt->nextpage, cxt->nextcount); + printk(KERN_DEBUG "mtdoops: ready %d, %d\n", + cxt->nextpage, cxt->nextcount); cxt->ready = 1; return; } - if (mtd->block_markbad && (ret == -EIO)) { + if (mtd->block_markbad && ret == -EIO) { ret = mtd->block_markbad(mtd, cxt->nextpage * OOPS_PAGE_SIZE); if (ret < 0) { - printk(KERN_ERR "mtdoops: block_markbad failed, aborting.\n"); + printk(KERN_ERR "mtdoops: block_markbad failed, aborting\n"); return; } } @@ -206,9 +206,9 @@ static void mtdoops_write(struct mtdoops_context *cxt, int panic) cxt->writecount = 0; - if ((retlen != OOPS_PAGE_SIZE) || (ret < 0)) - printk(KERN_ERR "mtdoops: Write failure at %d (%td of %d written), err %d.\n", - cxt->nextpage * OOPS_PAGE_SIZE, retlen, OOPS_PAGE_SIZE, ret); + if (retlen != OOPS_PAGE_SIZE || ret < 0) + printk(KERN_ERR "mtdoops: write failure at %d (%td of %d written), error %d\n", + cxt->nextpage * OOPS_PAGE_SIZE, retlen, OOPS_PAGE_SIZE, ret); mtdoops_inc_counter(cxt); } @@ -220,7 +220,7 @@ static void mtdoops_workfunc_write(struct work_struct *work) container_of(work, struct mtdoops_context, work_write); mtdoops_write(cxt, 0); -} +} static void find_next_position(struct mtdoops_context *cxt) { @@ -231,9 +231,9 @@ static void find_next_position(struct mtdoops_context *cxt) for (page = 0; page < cxt->oops_pages; page++) { ret = mtd->read(mtd, page * OOPS_PAGE_SIZE, 8, &retlen, (u_char *) &count[0]); - if ((retlen != 8) || ((ret < 0) && (ret != -EUCLEAN))) { - printk(KERN_ERR "mtdoops: Read failure at %d (%td of 8 read)" - ", err %d.\n", page * OOPS_PAGE_SIZE, retlen, ret); + if (retlen != 8 || (ret < 0 && ret != -EUCLEAN)) { + printk(KERN_ERR "mtdoops: read failure at %d (%td of 8 read), err %d\n", + page * OOPS_PAGE_SIZE, retlen, ret); continue; } @@ -244,14 +244,14 @@ static void find_next_position(struct mtdoops_context *cxt) if (maxcount == 0xffffffff) { maxcount = count[0]; maxpos = page; - } else if ((count[0] < 0x40000000) && (maxcount > 0xc0000000)) { + } else if (count[0] < 0x40000000 && maxcount > 0xc0000000) { maxcount = count[0]; maxpos = page; - } else if ((count[0] > maxcount) && (count[0] < 0xc0000000)) { + } else if (count[0] > maxcount && count[0] < 0xc0000000) { maxcount = count[0]; maxpos = page; - } else if ((count[0] > maxcount) && (count[0] > 0xc0000000) - && (maxcount > 0x80000000)) { + } else if (count[0] > maxcount && count[0] > 0xc0000000 + && maxcount > 0x80000000) { maxcount = count[0]; maxpos = page; } @@ -277,18 +277,18 @@ static void mtdoops_notify_add(struct mtd_info *mtd) if (cxt->name && !strcmp(mtd->name, cxt->name)) cxt->mtd_index = mtd->index; - if ((mtd->index != cxt->mtd_index) || cxt->mtd_index < 0) + if (mtd->index != cxt->mtd_index || cxt->mtd_index < 0) return; - if (mtd->size < (mtd->erasesize * 2)) { - printk(KERN_ERR "MTD partition %d not big enough for mtdoops\n", - mtd->index); + if (mtd->size < mtd->erasesize * 2) { + printk(KERN_ERR "mtdoops: MTD partition %d not big enough for mtdoops\n", + mtd->index); return; } if (mtd->erasesize < OOPS_PAGE_SIZE) { - printk(KERN_ERR "Eraseblock size of MTD partition %d too small\n", - mtd->index); + printk(KERN_ERR "mtdoops: eraseblock size of MTD partition %d too small\n", + mtd->index); return; } @@ -307,7 +307,7 @@ static void mtdoops_notify_remove(struct mtd_info *mtd) { struct mtdoops_context *cxt = &oops_cxt; - if ((mtd->index != cxt->mtd_index) || cxt->mtd_index < 0) + if (mtd->index != cxt->mtd_index || cxt->mtd_index < 0) return; cxt->mtd = NULL; @@ -323,8 +323,8 @@ static void mtdoops_console_sync(void) if (!cxt->ready || !mtd || cxt->writecount == 0) return; - /* - * Once ready is 0 and we've held the lock no further writes to the + /* + * Once ready is 0 and we've held the lock no further writes to the * buffer will happen */ spin_lock_irqsave(&cxt->writecount_lock, flags); @@ -373,7 +373,7 @@ mtdoops_console_write(struct console *co, const char *s, unsigned int count) cxt->writecount = 8; } - if ((count + cxt->writecount) > OOPS_PAGE_SIZE) + if (count + cxt->writecount > OOPS_PAGE_SIZE) count = OOPS_PAGE_SIZE - cxt->writecount; memcpy(cxt->oops_buf + cxt->writecount, s, count); @@ -422,13 +422,12 @@ static int __init mtdoops_console_init(void) cxt->mtd_index = -1; cxt->oops_buf = vmalloc(OOPS_PAGE_SIZE); - spin_lock_init(&cxt->writecount_lock); - if (!cxt->oops_buf) { - printk(KERN_ERR "Failed to allocate mtdoops buffer workspace\n"); + printk(KERN_ERR "mtdoops: failed to allocate buffer workspace\n"); return -ENOMEM; } + spin_lock_init(&cxt->writecount_lock); INIT_WORK(&cxt->work_erase, mtdoops_workfunc_erase); INIT_WORK(&cxt->work_write, mtdoops_workfunc_write); -- cgit v1.1 From be95745f01677245a061a8f51473ef5ec8ad008e Mon Sep 17 00:00:00 2001 From: Simon Kagstrom Date: Thu, 29 Oct 2009 13:41:11 +0100 Subject: mtd: mtdoops: keep track of used/unused pages in an array This patch makes mtdoops keep track of used/unused pages in an array instead of scanning the flash after a write. The advantage with this approach is that it avoids calling mtd->read on a panic, which is not possible for all mtd drivers. Signed-off-by: Simon Kagstrom Reviewed-by: Anders Grafstrom Signed-off-by: Artem Bityutskiy Signed-off-by: David Woodhouse --- drivers/mtd/mtdoops.c | 62 ++++++++++++++++++++++++++++++++++++--------------- 1 file changed, 44 insertions(+), 18 deletions(-) (limited to 'drivers/mtd/mtdoops.c') diff --git a/drivers/mtd/mtdoops.c b/drivers/mtd/mtdoops.c index c383add..06c5382 100644 --- a/drivers/mtd/mtdoops.c +++ b/drivers/mtd/mtdoops.c @@ -44,6 +44,7 @@ static struct mtdoops_context { int oops_pages; int nextpage; int nextcount; + unsigned long *oops_page_used; char *name; void *oops_buf; @@ -54,18 +55,38 @@ static struct mtdoops_context { int writecount; } oops_cxt; +static void mark_page_used(struct mtdoops_context *cxt, int page) +{ + set_bit(page, cxt->oops_page_used); +} + +static void mark_page_unused(struct mtdoops_context *cxt, int page) +{ + clear_bit(page, cxt->oops_page_used); +} + +static int page_is_used(struct mtdoops_context *cxt, int page) +{ + return test_bit(page, cxt->oops_page_used); +} + static void mtdoops_erase_callback(struct erase_info *done) { wait_queue_head_t *wait_q = (wait_queue_head_t *)done->priv; wake_up(wait_q); } -static int mtdoops_erase_block(struct mtd_info *mtd, int offset) +static int mtdoops_erase_block(struct mtdoops_context *cxt, int offset) { + struct mtd_info *mtd = cxt->mtd; + u32 start_page_offset = mtd_div_by_eb(offset, mtd) * mtd->erasesize; + u32 start_page = start_page_offset / OOPS_PAGE_SIZE; + u32 erase_pages = mtd->erasesize / OOPS_PAGE_SIZE; struct erase_info erase; DECLARE_WAITQUEUE(wait, current); wait_queue_head_t wait_q; int ret; + int page; init_waitqueue_head(&wait_q); erase.mtd = mtd; @@ -90,16 +111,15 @@ static int mtdoops_erase_block(struct mtd_info *mtd, int offset) schedule(); /* Wait for erase to finish. */ remove_wait_queue(&wait_q, &wait); + /* Mark pages as unused */ + for (page = start_page; page < start_page + erase_pages; page++) + mark_page_unused(cxt, page); + return 0; } static void mtdoops_inc_counter(struct mtdoops_context *cxt) { - struct mtd_info *mtd = cxt->mtd; - size_t retlen; - u32 count; - int ret; - cxt->nextpage++; if (cxt->nextpage >= cxt->oops_pages) cxt->nextpage = 0; @@ -107,17 +127,7 @@ static void mtdoops_inc_counter(struct mtdoops_context *cxt) if (cxt->nextcount == 0xffffffff) cxt->nextcount = 0; - ret = mtd->read(mtd, cxt->nextpage * OOPS_PAGE_SIZE, 4, - &retlen, (u_char *) &count); - if (retlen != 4 || (ret < 0 && ret != -EUCLEAN)) { - printk(KERN_ERR "mtdoops: read failure at %d (%td of 4 read), err %d\n", - cxt->nextpage * OOPS_PAGE_SIZE, retlen, ret); - schedule_work(&cxt->work_erase); - return; - } - - /* See if we need to erase the next block */ - if (count != 0xffffffff) { + if (page_is_used(cxt, cxt->nextpage)) { schedule_work(&cxt->work_erase); return; } @@ -168,7 +178,7 @@ badblock: } for (j = 0, ret = -1; (j < 3) && (ret < 0); j++) - ret = mtdoops_erase_block(mtd, cxt->nextpage * OOPS_PAGE_SIZE); + ret = mtdoops_erase_block(cxt, cxt->nextpage * OOPS_PAGE_SIZE); if (ret >= 0) { printk(KERN_DEBUG "mtdoops: ready %d, %d\n", @@ -209,6 +219,7 @@ static void mtdoops_write(struct mtdoops_context *cxt, int panic) if (retlen != OOPS_PAGE_SIZE || ret < 0) printk(KERN_ERR "mtdoops: write failure at %d (%td of %d written), error %d\n", cxt->nextpage * OOPS_PAGE_SIZE, retlen, OOPS_PAGE_SIZE, ret); + mark_page_used(cxt, cxt->nextpage); mtdoops_inc_counter(cxt); } @@ -230,6 +241,8 @@ static void find_next_position(struct mtdoops_context *cxt) size_t retlen; for (page = 0; page < cxt->oops_pages; page++) { + /* Assume the page is used */ + mark_page_used(cxt, page); ret = mtd->read(mtd, page * OOPS_PAGE_SIZE, 8, &retlen, (u_char *) &count[0]); if (retlen != 8 || (ret < 0 && ret != -EUCLEAN)) { printk(KERN_ERR "mtdoops: read failure at %d (%td of 8 read), err %d\n", @@ -237,6 +250,8 @@ static void find_next_position(struct mtdoops_context *cxt) continue; } + if (count[0] == 0xffffffff && count[1] == 0xffffffff) + mark_page_unused(cxt, page); if (count[1] != MTDOOPS_KERNMSG_MAGIC) continue; if (count[0] == 0xffffffff) @@ -273,6 +288,9 @@ static void find_next_position(struct mtdoops_context *cxt) static void mtdoops_notify_add(struct mtd_info *mtd) { struct mtdoops_context *cxt = &oops_cxt; + u64 mtdoops_pages = mtd->size; + + do_div(mtdoops_pages, OOPS_PAGE_SIZE); if (cxt->name && !strcmp(mtd->name, cxt->name)) cxt->mtd_index = mtd->index; @@ -292,6 +310,13 @@ static void mtdoops_notify_add(struct mtd_info *mtd) return; } + /* oops_page_used is a bit field */ + cxt->oops_page_used = vmalloc(DIV_ROUND_UP(mtdoops_pages, + BITS_PER_LONG)); + if (!cxt->oops_page_used) { + printk(KERN_ERR "Could not allocate page array\n"); + return; + } cxt->mtd = mtd; if (mtd->size > INT_MAX) cxt->oops_pages = INT_MAX / OOPS_PAGE_SIZE; @@ -444,6 +469,7 @@ static void __exit mtdoops_console_exit(void) unregister_console(&mtdoops_console); kfree(cxt->name); vfree(cxt->oops_buf); + vfree(cxt->oops_page_used); } -- cgit v1.1 From 1114e3d00f539ecb7a8415663f2a47a80e00a537 Mon Sep 17 00:00:00 2001 From: Simon Kagstrom Date: Tue, 3 Nov 2009 08:08:41 +0200 Subject: mtd: mtdoops: limit the maximum mtd partition size Make the maximum mtdoops partition size to be 8MiB. Indeed, it does not make sense to use anything larger than that anyway. This limit makes it possible to catch stupid mistakes where the user gives e.g., a rootfs partition to mtdoops (which will happily erase it). Signed-off-by: Simon Kagstrom Signed-off-by: Artem Bityutskiy Signed-off-by: David Woodhouse --- drivers/mtd/mtdoops.c | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) (limited to 'drivers/mtd/mtdoops.c') diff --git a/drivers/mtd/mtdoops.c b/drivers/mtd/mtdoops.c index 06c5382..b016eee 100644 --- a/drivers/mtd/mtdoops.c +++ b/drivers/mtd/mtdoops.c @@ -33,6 +33,9 @@ #include #include +/* Maximum MTD partition size */ +#define MTDOOPS_MAX_MTD_SIZE (8 * 1024 * 1024) + #define MTDOOPS_KERNMSG_MAGIC 0x5d005d00 #define OOPS_PAGE_SIZE 4096 @@ -310,6 +313,12 @@ static void mtdoops_notify_add(struct mtd_info *mtd) return; } + if (mtd->size > MTDOOPS_MAX_MTD_SIZE) { + printk(KERN_ERR "mtdoops: mtd%d is too large (limit is %d MiB)\n", + mtd->index, MTDOOPS_MAX_MTD_SIZE / 1024 / 1024); + return; + } + /* oops_page_used is a bit field */ cxt->oops_page_used = vmalloc(DIV_ROUND_UP(mtdoops_pages, BITS_PER_LONG)); @@ -317,14 +326,10 @@ static void mtdoops_notify_add(struct mtd_info *mtd) printk(KERN_ERR "Could not allocate page array\n"); return; } - cxt->mtd = mtd; - if (mtd->size > INT_MAX) - cxt->oops_pages = INT_MAX / OOPS_PAGE_SIZE; - else - cxt->oops_pages = (int)mtd->size / OOPS_PAGE_SIZE; + cxt->mtd = mtd; + cxt->oops_pages = (int)mtd->size / OOPS_PAGE_SIZE; find_next_position(cxt); - printk(KERN_INFO "mtdoops: Attached to MTD device %d\n", mtd->index); } -- cgit v1.1 From 9507b0c838e37651030d453b9cf3b136cfeefe89 Mon Sep 17 00:00:00 2001 From: Simon Kagstrom Date: Thu, 29 Oct 2009 13:41:19 +0100 Subject: mtd: mtdoops: make record size configurable The main justification for this is to allow catching long messages during a panic, where the top part might otherwise be lost since moving to the next block can require a flash erase. Signed-off-by: Simon Kagstrom Reviewed-by: Anders Grafstrom Signed-off-by: Artem Bityutskiy Signed-off-by: David Woodhouse --- drivers/mtd/mtdoops.c | 74 ++++++++++++++++++++++++++++++--------------------- 1 file changed, 43 insertions(+), 31 deletions(-) (limited to 'drivers/mtd/mtdoops.c') diff --git a/drivers/mtd/mtdoops.c b/drivers/mtd/mtdoops.c index b016eee..64772dc 100644 --- a/drivers/mtd/mtdoops.c +++ b/drivers/mtd/mtdoops.c @@ -37,7 +37,11 @@ #define MTDOOPS_MAX_MTD_SIZE (8 * 1024 * 1024) #define MTDOOPS_KERNMSG_MAGIC 0x5d005d00 -#define OOPS_PAGE_SIZE 4096 + +static unsigned long record_size = 4096; +module_param(record_size, ulong, 0400); +MODULE_PARM_DESC(record_size, + "record size for MTD OOPS pages in bytes (default 4096)"); static struct mtdoops_context { int mtd_index; @@ -83,8 +87,8 @@ static int mtdoops_erase_block(struct mtdoops_context *cxt, int offset) { struct mtd_info *mtd = cxt->mtd; u32 start_page_offset = mtd_div_by_eb(offset, mtd) * mtd->erasesize; - u32 start_page = start_page_offset / OOPS_PAGE_SIZE; - u32 erase_pages = mtd->erasesize / OOPS_PAGE_SIZE; + u32 start_page = start_page_offset / record_size; + u32 erase_pages = mtd->erasesize / record_size; struct erase_info erase; DECLARE_WAITQUEUE(wait, current); wait_queue_head_t wait_q; @@ -152,15 +156,15 @@ static void mtdoops_workfunc_erase(struct work_struct *work) if (!mtd) return; - mod = (cxt->nextpage * OOPS_PAGE_SIZE) % mtd->erasesize; + mod = (cxt->nextpage * record_size) % mtd->erasesize; if (mod != 0) { - cxt->nextpage = cxt->nextpage + ((mtd->erasesize - mod) / OOPS_PAGE_SIZE); + cxt->nextpage = cxt->nextpage + ((mtd->erasesize - mod) / record_size); if (cxt->nextpage >= cxt->oops_pages) cxt->nextpage = 0; } while (mtd->block_isbad) { - ret = mtd->block_isbad(mtd, cxt->nextpage * OOPS_PAGE_SIZE); + ret = mtd->block_isbad(mtd, cxt->nextpage * record_size); if (!ret) break; if (ret < 0) { @@ -168,20 +172,20 @@ static void mtdoops_workfunc_erase(struct work_struct *work) return; } badblock: - printk(KERN_WARNING "mtdoops: bad block at %08x\n", - cxt->nextpage * OOPS_PAGE_SIZE); + printk(KERN_WARNING "mtdoops: bad block at %08lx\n", + cxt->nextpage * record_size); i++; - cxt->nextpage = cxt->nextpage + (mtd->erasesize / OOPS_PAGE_SIZE); + cxt->nextpage = cxt->nextpage + (mtd->erasesize / record_size); if (cxt->nextpage >= cxt->oops_pages) cxt->nextpage = 0; - if (i == cxt->oops_pages / (mtd->erasesize / OOPS_PAGE_SIZE)) { + if (i == cxt->oops_pages / (mtd->erasesize / record_size)) { printk(KERN_ERR "mtdoops: all blocks bad!\n"); return; } } for (j = 0, ret = -1; (j < 3) && (ret < 0); j++) - ret = mtdoops_erase_block(cxt, cxt->nextpage * OOPS_PAGE_SIZE); + ret = mtdoops_erase_block(cxt, cxt->nextpage * record_size); if (ret >= 0) { printk(KERN_DEBUG "mtdoops: ready %d, %d\n", @@ -191,7 +195,7 @@ badblock: } if (mtd->block_markbad && ret == -EIO) { - ret = mtd->block_markbad(mtd, cxt->nextpage * OOPS_PAGE_SIZE); + ret = mtd->block_markbad(mtd, cxt->nextpage * record_size); if (ret < 0) { printk(KERN_ERR "mtdoops: block_markbad failed, aborting\n"); return; @@ -206,22 +210,22 @@ static void mtdoops_write(struct mtdoops_context *cxt, int panic) size_t retlen; int ret; - if (cxt->writecount < OOPS_PAGE_SIZE) + if (cxt->writecount < record_size) memset(cxt->oops_buf + cxt->writecount, 0xff, - OOPS_PAGE_SIZE - cxt->writecount); + record_size - cxt->writecount); if (panic) - ret = mtd->panic_write(mtd, cxt->nextpage * OOPS_PAGE_SIZE, - OOPS_PAGE_SIZE, &retlen, cxt->oops_buf); + ret = mtd->panic_write(mtd, cxt->nextpage * record_size, + record_size, &retlen, cxt->oops_buf); else - ret = mtd->write(mtd, cxt->nextpage * OOPS_PAGE_SIZE, - OOPS_PAGE_SIZE, &retlen, cxt->oops_buf); + ret = mtd->write(mtd, cxt->nextpage * record_size, + record_size, &retlen, cxt->oops_buf); cxt->writecount = 0; - if (retlen != OOPS_PAGE_SIZE || ret < 0) - printk(KERN_ERR "mtdoops: write failure at %d (%td of %d written), error %d\n", - cxt->nextpage * OOPS_PAGE_SIZE, retlen, OOPS_PAGE_SIZE, ret); + if (retlen != record_size || ret < 0) + printk(KERN_ERR "mtdoops: write failure at %ld (%td of %ld written), error %d\n", + cxt->nextpage * record_size, retlen, record_size, ret); mark_page_used(cxt, cxt->nextpage); mtdoops_inc_counter(cxt); @@ -246,10 +250,10 @@ static void find_next_position(struct mtdoops_context *cxt) for (page = 0; page < cxt->oops_pages; page++) { /* Assume the page is used */ mark_page_used(cxt, page); - ret = mtd->read(mtd, page * OOPS_PAGE_SIZE, 8, &retlen, (u_char *) &count[0]); + ret = mtd->read(mtd, page * record_size, 8, &retlen, (u_char *) &count[0]); if (retlen != 8 || (ret < 0 && ret != -EUCLEAN)) { - printk(KERN_ERR "mtdoops: read failure at %d (%td of 8 read), err %d\n", - page * OOPS_PAGE_SIZE, retlen, ret); + printk(KERN_ERR "mtdoops: read failure at %ld (%td of 8 read), err %d\n", + page * record_size, retlen, ret); continue; } @@ -293,7 +297,7 @@ static void mtdoops_notify_add(struct mtd_info *mtd) struct mtdoops_context *cxt = &oops_cxt; u64 mtdoops_pages = mtd->size; - do_div(mtdoops_pages, OOPS_PAGE_SIZE); + do_div(mtdoops_pages, record_size); if (cxt->name && !strcmp(mtd->name, cxt->name)) cxt->mtd_index = mtd->index; @@ -307,7 +311,7 @@ static void mtdoops_notify_add(struct mtd_info *mtd) return; } - if (mtd->erasesize < OOPS_PAGE_SIZE) { + if (mtd->erasesize < record_size) { printk(KERN_ERR "mtdoops: eraseblock size of MTD partition %d too small\n", mtd->index); return; @@ -328,7 +332,7 @@ static void mtdoops_notify_add(struct mtd_info *mtd) } cxt->mtd = mtd; - cxt->oops_pages = (int)mtd->size / OOPS_PAGE_SIZE; + cxt->oops_pages = (int)mtd->size / record_size; find_next_position(cxt); printk(KERN_INFO "mtdoops: Attached to MTD device %d\n", mtd->index); } @@ -403,15 +407,15 @@ mtdoops_console_write(struct console *co, const char *s, unsigned int count) cxt->writecount = 8; } - if (count + cxt->writecount > OOPS_PAGE_SIZE) - count = OOPS_PAGE_SIZE - cxt->writecount; + if (count + cxt->writecount > record_size) + count = record_size - cxt->writecount; memcpy(cxt->oops_buf + cxt->writecount, s, count); cxt->writecount += count; spin_unlock_irqrestore(&cxt->writecount_lock, flags); - if (cxt->writecount == OOPS_PAGE_SIZE) + if (cxt->writecount == record_size) mtdoops_console_sync(); } @@ -450,8 +454,16 @@ static int __init mtdoops_console_init(void) { struct mtdoops_context *cxt = &oops_cxt; + if ((record_size & 4095) != 0) { + printk(KERN_ERR "mtdoops: record_size must be a multiple of 4096\n"); + return -EINVAL; + } + if (record_size < 4096) { + printk(KERN_ERR "mtdoops: record_size must be over 4096 bytes\n"); + return -EINVAL; + } cxt->mtd_index = -1; - cxt->oops_buf = vmalloc(OOPS_PAGE_SIZE); + cxt->oops_buf = vmalloc(record_size); if (!cxt->oops_buf) { printk(KERN_ERR "mtdoops: failed to allocate buffer workspace\n"); return -ENOMEM; -- cgit v1.1 From 2e386e4bac90554887e73d6f342e845185b33fc3 Mon Sep 17 00:00:00 2001 From: Simon Kagstrom Date: Tue, 3 Nov 2009 14:19:03 +0100 Subject: mtd: mtdoops: refactor as a kmsg_dumper The last messages which happens before a crash might contain interesting information about the crash. This patch reworks mtdoops using the kmsg_dumper support instead of a console, which simplifies the code and also includes the messages before the oops started. On oops callbacks, the MTD device write is scheduled in a work queue (to be able to use the regular mtd->write call), while panics call mtd->panic_write directly. Thus, if panic_on_oops is set, the oops will be written out during the panic. A parameter to specify which mtd device to use (number or name), as well as a flag, writable at runtime, to toggle wheter to dump oopses or only panics (since oopses can often be handled by regular syslog). The patch was massaged and amended by Artem. Signed-off-by: Simon Kagstrom Reviewed-by: Anders Grafstrom Signed-off-by: Artem Bityutskiy Signed-off-by: David Woodhouse --- drivers/mtd/mtdoops.c | 235 ++++++++++++++++++++++---------------------------- 1 file changed, 102 insertions(+), 133 deletions(-) (limited to 'drivers/mtd/mtdoops.c') diff --git a/drivers/mtd/mtdoops.c b/drivers/mtd/mtdoops.c index 64772dc..a714ec4 100644 --- a/drivers/mtd/mtdoops.c +++ b/drivers/mtd/mtdoops.c @@ -29,21 +29,34 @@ #include #include #include -#include #include #include +#include /* Maximum MTD partition size */ #define MTDOOPS_MAX_MTD_SIZE (8 * 1024 * 1024) #define MTDOOPS_KERNMSG_MAGIC 0x5d005d00 +#define MTDOOPS_HEADER_SIZE 8 static unsigned long record_size = 4096; module_param(record_size, ulong, 0400); MODULE_PARM_DESC(record_size, "record size for MTD OOPS pages in bytes (default 4096)"); +static char mtddev[80]; +module_param_string(mtddev, mtddev, 80, 0400); +MODULE_PARM_DESC(mtddev, + "name or index number of the MTD device to use"); + +static int dump_oops = 1; +module_param(dump_oops, int, 0600); +MODULE_PARM_DESC(dump_oops, + "set to 1 to dump oopses, 0 to only dump panics (default 1)"); + static struct mtdoops_context { + struct kmsg_dumper dump; + int mtd_index; struct work_struct work_erase; struct work_struct work_write; @@ -52,14 +65,8 @@ static struct mtdoops_context { int nextpage; int nextcount; unsigned long *oops_page_used; - char *name; void *oops_buf; - - /* writecount and disabling ready are spin lock protected */ - spinlock_t writecount_lock; - int ready; - int writecount; } oops_cxt; static void mark_page_used(struct mtdoops_context *cxt, int page) @@ -111,7 +118,7 @@ static int mtdoops_erase_block(struct mtdoops_context *cxt, int offset) remove_wait_queue(&wait_q, &wait); printk(KERN_WARNING "mtdoops: erase of region [0x%llx, 0x%llx] on \"%s\" failed\n", (unsigned long long)erase.addr, - (unsigned long long)erase.len, mtd->name); + (unsigned long long)erase.len, mtddev); return ret; } @@ -141,7 +148,6 @@ static void mtdoops_inc_counter(struct mtdoops_context *cxt) printk(KERN_DEBUG "mtdoops: ready %d, %d (no erase)\n", cxt->nextpage, cxt->nextcount); - cxt->ready = 1; } /* Scheduled work - when we can't proceed without erasing a block */ @@ -190,7 +196,6 @@ badblock: if (ret >= 0) { printk(KERN_DEBUG "mtdoops: ready %d, %d\n", cxt->nextpage, cxt->nextcount); - cxt->ready = 1; return; } @@ -208,11 +213,13 @@ static void mtdoops_write(struct mtdoops_context *cxt, int panic) { struct mtd_info *mtd = cxt->mtd; size_t retlen; + u32 *hdr; int ret; - if (cxt->writecount < record_size) - memset(cxt->oops_buf + cxt->writecount, 0xff, - record_size - cxt->writecount); + /* Add mtdoops header to the buffer */ + hdr = cxt->oops_buf; + hdr[0] = cxt->nextcount; + hdr[1] = MTDOOPS_KERNMSG_MAGIC; if (panic) ret = mtd->panic_write(mtd, cxt->nextpage * record_size, @@ -221,17 +228,15 @@ static void mtdoops_write(struct mtdoops_context *cxt, int panic) ret = mtd->write(mtd, cxt->nextpage * record_size, record_size, &retlen, cxt->oops_buf); - cxt->writecount = 0; - if (retlen != record_size || ret < 0) printk(KERN_ERR "mtdoops: write failure at %ld (%td of %ld written), error %d\n", cxt->nextpage * record_size, retlen, record_size, ret); mark_page_used(cxt, cxt->nextpage); + memset(cxt->oops_buf, 0xff, record_size); mtdoops_inc_counter(cxt); } - static void mtdoops_workfunc_write(struct work_struct *work) { struct mtdoops_context *cxt = @@ -250,17 +255,18 @@ static void find_next_position(struct mtdoops_context *cxt) for (page = 0; page < cxt->oops_pages; page++) { /* Assume the page is used */ mark_page_used(cxt, page); - ret = mtd->read(mtd, page * record_size, 8, &retlen, (u_char *) &count[0]); - if (retlen != 8 || (ret < 0 && ret != -EUCLEAN)) { - printk(KERN_ERR "mtdoops: read failure at %ld (%td of 8 read), err %d\n", - page * record_size, retlen, ret); + ret = mtd->read(mtd, page * record_size, MTDOOPS_HEADER_SIZE, + &retlen, (u_char *) &count[0]); + if (retlen != MTDOOPS_HEADER_SIZE || + (ret < 0 && ret != -EUCLEAN)) { + printk(KERN_ERR "mtdoops: read failure at %ld (%td of %d read), err %d\n", + page * record_size, retlen, + MTDOOPS_HEADER_SIZE, ret); continue; } if (count[0] == 0xffffffff && count[1] == 0xffffffff) mark_page_unused(cxt, page); - if (count[1] != MTDOOPS_KERNMSG_MAGIC) - continue; if (count[0] == 0xffffffff) continue; if (maxcount == 0xffffffff) { @@ -291,15 +297,50 @@ static void find_next_position(struct mtdoops_context *cxt) mtdoops_inc_counter(cxt); } +static void mtdoops_do_dump(struct kmsg_dumper *dumper, + enum kmsg_dump_reason reason, const char *s1, unsigned long l1, + const char *s2, unsigned long l2) +{ + struct mtdoops_context *cxt = container_of(dumper, + struct mtdoops_context, dump); + unsigned long s1_start, s2_start; + unsigned long l1_cpy, l2_cpy; + char *dst; + + /* Only dump oopses if dump_oops is set */ + if (reason == KMSG_DUMP_OOPS && !dump_oops) + return; + + dst = cxt->oops_buf + MTDOOPS_HEADER_SIZE; /* Skip the header */ + l2_cpy = min(l2, record_size - MTDOOPS_HEADER_SIZE); + l1_cpy = min(l1, record_size - MTDOOPS_HEADER_SIZE - l2_cpy); + + s2_start = l2 - l2_cpy; + s1_start = l1 - l1_cpy; + + memcpy(dst, s1 + s1_start, l1_cpy); + memcpy(dst + l1_cpy, s2 + s2_start, l2_cpy); + + /* Panics must be written immediately */ + if (reason == KMSG_DUMP_PANIC) { + if (!cxt->mtd->panic_write) + printk(KERN_ERR "mtdoops: Cannot write from panic without panic_write\n"); + else + mtdoops_write(cxt, 1); + return; + } + + /* For other cases, schedule work to write it "nicely" */ + schedule_work(&cxt->work_write); +} static void mtdoops_notify_add(struct mtd_info *mtd) { struct mtdoops_context *cxt = &oops_cxt; - u64 mtdoops_pages = mtd->size; - - do_div(mtdoops_pages, record_size); + u64 mtdoops_pages = div_u64(mtd->size, record_size); + int err; - if (cxt->name && !strcmp(mtd->name, cxt->name)) + if (!strcmp(mtd->name, mtddev)) cxt->mtd_index = mtd->index; if (mtd->index != cxt->mtd_index || cxt->mtd_index < 0) @@ -310,13 +351,11 @@ static void mtdoops_notify_add(struct mtd_info *mtd) mtd->index); return; } - if (mtd->erasesize < record_size) { printk(KERN_ERR "mtdoops: eraseblock size of MTD partition %d too small\n", mtd->index); return; } - if (mtd->size > MTDOOPS_MAX_MTD_SIZE) { printk(KERN_ERR "mtdoops: mtd%d is too large (limit is %d MiB)\n", mtd->index, MTDOOPS_MAX_MTD_SIZE / 1024 / 1024); @@ -327,7 +366,16 @@ static void mtdoops_notify_add(struct mtd_info *mtd) cxt->oops_page_used = vmalloc(DIV_ROUND_UP(mtdoops_pages, BITS_PER_LONG)); if (!cxt->oops_page_used) { - printk(KERN_ERR "Could not allocate page array\n"); + printk(KERN_ERR "mtdoops: could not allocate page array\n"); + return; + } + + cxt->dump.dump = mtdoops_do_dump; + err = kmsg_dump_register(&cxt->dump); + if (err) { + printk(KERN_ERR "mtdoops: registering kmsg dumper failed, error %d\n", err); + vfree(cxt->oops_page_used); + cxt->oops_page_used = NULL; return; } @@ -344,116 +392,29 @@ static void mtdoops_notify_remove(struct mtd_info *mtd) if (mtd->index != cxt->mtd_index || cxt->mtd_index < 0) return; + if (kmsg_dump_unregister(&cxt->dump) < 0) + printk(KERN_WARNING "mtdoops: could not unregister kmsg_dumper\n"); + cxt->mtd = NULL; flush_scheduled_work(); } -static void mtdoops_console_sync(void) -{ - struct mtdoops_context *cxt = &oops_cxt; - struct mtd_info *mtd = cxt->mtd; - unsigned long flags; - - if (!cxt->ready || !mtd || cxt->writecount == 0) - return; - - /* - * Once ready is 0 and we've held the lock no further writes to the - * buffer will happen - */ - spin_lock_irqsave(&cxt->writecount_lock, flags); - if (!cxt->ready) { - spin_unlock_irqrestore(&cxt->writecount_lock, flags); - return; - } - cxt->ready = 0; - spin_unlock_irqrestore(&cxt->writecount_lock, flags); - - if (mtd->panic_write && in_interrupt()) - /* Interrupt context, we're going to panic so try and log */ - mtdoops_write(cxt, 1); - else - schedule_work(&cxt->work_write); -} - -static void -mtdoops_console_write(struct console *co, const char *s, unsigned int count) -{ - struct mtdoops_context *cxt = co->data; - struct mtd_info *mtd = cxt->mtd; - unsigned long flags; - - if (!oops_in_progress) { - mtdoops_console_sync(); - return; - } - - if (!cxt->ready || !mtd) - return; - - /* Locking on writecount ensures sequential writes to the buffer */ - spin_lock_irqsave(&cxt->writecount_lock, flags); - - /* Check ready status didn't change whilst waiting for the lock */ - if (!cxt->ready) { - spin_unlock_irqrestore(&cxt->writecount_lock, flags); - return; - } - - if (cxt->writecount == 0) { - u32 *stamp = cxt->oops_buf; - *stamp++ = cxt->nextcount; - *stamp = MTDOOPS_KERNMSG_MAGIC; - cxt->writecount = 8; - } - - if (count + cxt->writecount > record_size) - count = record_size - cxt->writecount; - - memcpy(cxt->oops_buf + cxt->writecount, s, count); - cxt->writecount += count; - - spin_unlock_irqrestore(&cxt->writecount_lock, flags); - - if (cxt->writecount == record_size) - mtdoops_console_sync(); -} - -static int __init mtdoops_console_setup(struct console *co, char *options) -{ - struct mtdoops_context *cxt = co->data; - - if (cxt->mtd_index != -1 || cxt->name) - return -EBUSY; - if (options) { - cxt->name = kstrdup(options, GFP_KERNEL); - return 0; - } - if (co->index == -1) - return -EINVAL; - - cxt->mtd_index = co->index; - return 0; -} static struct mtd_notifier mtdoops_notifier = { .add = mtdoops_notify_add, .remove = mtdoops_notify_remove, }; -static struct console mtdoops_console = { - .name = "ttyMTD", - .write = mtdoops_console_write, - .setup = mtdoops_console_setup, - .unblank = mtdoops_console_sync, - .index = -1, - .data = &oops_cxt, -}; - -static int __init mtdoops_console_init(void) +static int __init mtdoops_init(void) { struct mtdoops_context *cxt = &oops_cxt; + int mtd_index; + char *endp; + if (strlen(mtddev) == 0) { + printk(KERN_ERR "mtdoops: mtd device (mtddev=name/number) must be supplied\n"); + return -EINVAL; + } if ((record_size & 4095) != 0) { printk(KERN_ERR "mtdoops: record_size must be a multiple of 4096\n"); return -EINVAL; @@ -462,36 +423,44 @@ static int __init mtdoops_console_init(void) printk(KERN_ERR "mtdoops: record_size must be over 4096 bytes\n"); return -EINVAL; } + + /* Setup the MTD device to use */ cxt->mtd_index = -1; + mtd_index = simple_strtoul(mtddev, &endp, 0); + if (*endp == '\0') + cxt->mtd_index = mtd_index; + if (cxt->mtd_index > MAX_MTD_DEVICES) { + printk(KERN_ERR "mtdoops: invalid mtd device number (%u) given\n", + mtd_index); + return -EINVAL; + } + cxt->oops_buf = vmalloc(record_size); if (!cxt->oops_buf) { printk(KERN_ERR "mtdoops: failed to allocate buffer workspace\n"); return -ENOMEM; } + memset(cxt->oops_buf, 0xff, record_size); - spin_lock_init(&cxt->writecount_lock); INIT_WORK(&cxt->work_erase, mtdoops_workfunc_erase); INIT_WORK(&cxt->work_write, mtdoops_workfunc_write); - register_console(&mtdoops_console); register_mtd_user(&mtdoops_notifier); return 0; } -static void __exit mtdoops_console_exit(void) +static void __exit mtdoops_exit(void) { struct mtdoops_context *cxt = &oops_cxt; unregister_mtd_user(&mtdoops_notifier); - unregister_console(&mtdoops_console); - kfree(cxt->name); vfree(cxt->oops_buf); vfree(cxt->oops_page_used); } -subsys_initcall(mtdoops_console_init); -module_exit(mtdoops_console_exit); +module_init(mtdoops_init); +module_exit(mtdoops_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Richard Purdie "); -- cgit v1.1