summaryrefslogtreecommitdiff
path: root/drivers/mtd
diff options
context:
space:
mode:
authorArtem Bityutskiy <Artem.Bityutskiy@nokia.com>2011-03-25 18:41:20 +0300
committerArtem Bityutskiy <Artem.Bityutskiy@nokia.com>2011-03-25 18:41:20 +0300
commit7bf7e370d5919112c223a269462cd0b546903829 (patch)
tree03ccc715239df14ae168277dbccc9d9cf4d8a2c8 /drivers/mtd
parent68b1a1e786f29c900fa1c516a402e24f0ece622a (diff)
parentd39dd11c3e6a7af5c20bfac40594db36cf270f42 (diff)
downloadlinux-7bf7e370d5919112c223a269462cd0b546903829.tar.xz
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6 into for-linus-1
* 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6: (9356 commits) [media] rc: update for bitop name changes fs: simplify iget & friends fs: pull inode->i_lock up out of writeback_single_inode fs: rename inode_lock to inode_hash_lock fs: move i_wb_list out from under inode_lock fs: move i_sb_list out from under inode_lock fs: remove inode_lock from iput_final and prune_icache fs: Lock the inode LRU list separately fs: factor inode disposal fs: protect inode->i_state with inode->i_lock lib, arch: add filter argument to show_mem and fix private implementations SLUB: Write to per cpu data when allocating it slub: Fix debugobjects with lockless fastpath autofs4: Do not potentially dereference NULL pointer returned by fget() in autofs_dev_ioctl_setpipefd() autofs4 - remove autofs4_lock autofs4 - fix d_manage() return on rcu-walk autofs4 - fix autofs4_expire_indirect() traversal autofs4 - fix dentry leak in autofs4_expire_direct() autofs4 - reinstate last used update on access vfs - check non-mountpoint dentry might block in __follow_mount_rcu() ... NOTE! This merge commit was created to fix compilation error. The block tree was merged upstream and removed the 'elv_queue_empty()' function which the new 'mtdswap' driver is using. So a simple merge of the mtd tree with upstream does not compile. And the mtd tree has already be published, so re-basing it is not an option. To fix this unfortunate situation, I had to merge upstream into the mtd-2.6.git tree without committing, put the fixup patch on top of this, and then commit this. The result is that we do not have commits which do not compile. In other words, this merge commit "merges" 3 things: the MTD tree, the upstream tree, and the fixup patch.
Diffstat (limited to 'drivers/mtd')
-rw-r--r--drivers/mtd/maps/physmap_of.c15
-rw-r--r--drivers/mtd/maps/sun_uflash.c8
-rw-r--r--drivers/mtd/mtd_blkdevs.c9
-rw-r--r--drivers/mtd/nand/Kconfig19
-rw-r--r--drivers/mtd/nand/fsl_upm.c9
-rw-r--r--drivers/mtd/nand/mpc5121_nfc.c9
-rw-r--r--drivers/mtd/nand/mxc_nand.c5
-rw-r--r--drivers/mtd/nand/ndfc.c9
-rw-r--r--drivers/mtd/nand/omap2.c367
-rw-r--r--drivers/mtd/nand/pasemi_nand.c9
-rw-r--r--drivers/mtd/nand/r852.c2
-rw-r--r--drivers/mtd/nand/socrates_nand.c9
-rw-r--r--drivers/mtd/nand/tmio_nand.c11
-rw-r--r--drivers/mtd/onenand/Kconfig2
-rw-r--r--drivers/mtd/onenand/omap2.c36
-rw-r--r--drivers/mtd/sm_ftl.c2
-rw-r--r--drivers/mtd/ubi/Kconfig8
-rw-r--r--drivers/mtd/ubi/Kconfig.debug73
-rw-r--r--drivers/mtd/ubi/build.c62
-rw-r--r--drivers/mtd/ubi/debug.c14
-rw-r--r--drivers/mtd/ubi/debug.h131
-rw-r--r--drivers/mtd/ubi/io.c145
-rw-r--r--drivers/mtd/ubi/kapi.c2
-rw-r--r--drivers/mtd/ubi/scan.c95
-rw-r--r--drivers/mtd/ubi/scan.h2
-rw-r--r--drivers/mtd/ubi/ubi.h10
-rw-r--r--drivers/mtd/ubi/vmt.c7
-rw-r--r--drivers/mtd/ubi/vtbl.c9
-rw-r--r--drivers/mtd/ubi/wl.c20
29 files changed, 654 insertions, 445 deletions
diff --git a/drivers/mtd/maps/physmap_of.c b/drivers/mtd/maps/physmap_of.c
index c85be8312488..bd483f0c57e1 100644
--- a/drivers/mtd/maps/physmap_of.c
+++ b/drivers/mtd/maps/physmap_of.c
@@ -214,8 +214,7 @@ static void __devinit of_free_probes(const char **probes)
}
#endif
-static int __devinit of_flash_probe(struct platform_device *dev,
- const struct of_device_id *match)
+static int __devinit of_flash_probe(struct platform_device *dev)
{
#ifdef CONFIG_MTD_PARTITIONS
const char **part_probe_types;
@@ -223,7 +222,7 @@ static int __devinit of_flash_probe(struct platform_device *dev,
struct device_node *dp = dev->dev.of_node;
struct resource res;
struct of_flash *info;
- const char *probe_type = match->data;
+ const char *probe_type;
const __be32 *width;
int err;
int i;
@@ -233,6 +232,10 @@ static int __devinit of_flash_probe(struct platform_device *dev,
struct mtd_info **mtd_list = NULL;
resource_size_t res_size;
+ if (!dev->dev.of_match)
+ return -EINVAL;
+ probe_type = dev->dev.of_match->data;
+
reg_tuple_size = (of_n_addr_cells(dp) + of_n_size_cells(dp)) * sizeof(u32);
/*
@@ -410,7 +413,7 @@ static struct of_device_id of_flash_match[] = {
};
MODULE_DEVICE_TABLE(of, of_flash_match);
-static struct of_platform_driver of_flash_driver = {
+static struct platform_driver of_flash_driver = {
.driver = {
.name = "of-flash",
.owner = THIS_MODULE,
@@ -422,12 +425,12 @@ static struct of_platform_driver of_flash_driver = {
static int __init of_flash_init(void)
{
- return of_register_platform_driver(&of_flash_driver);
+ return platform_driver_register(&of_flash_driver);
}
static void __exit of_flash_exit(void)
{
- of_unregister_platform_driver(&of_flash_driver);
+ platform_driver_unregister(&of_flash_driver);
}
module_init(of_flash_init);
diff --git a/drivers/mtd/maps/sun_uflash.c b/drivers/mtd/maps/sun_uflash.c
index 3582ba1f9b09..3f1cb328a574 100644
--- a/drivers/mtd/maps/sun_uflash.c
+++ b/drivers/mtd/maps/sun_uflash.c
@@ -108,7 +108,7 @@ int uflash_devinit(struct platform_device *op, struct device_node *dp)
return 0;
}
-static int __devinit uflash_probe(struct platform_device *op, const struct of_device_id *match)
+static int __devinit uflash_probe(struct platform_device *op)
{
struct device_node *dp = op->dev.of_node;
@@ -148,7 +148,7 @@ static const struct of_device_id uflash_match[] = {
MODULE_DEVICE_TABLE(of, uflash_match);
-static struct of_platform_driver uflash_driver = {
+static struct platform_driver uflash_driver = {
.driver = {
.name = DRIVER_NAME,
.owner = THIS_MODULE,
@@ -160,12 +160,12 @@ static struct of_platform_driver uflash_driver = {
static int __init uflash_init(void)
{
- return of_register_platform_driver(&uflash_driver);
+ return platform_driver_register(&uflash_driver);
}
static void __exit uflash_exit(void)
{
- of_unregister_platform_driver(&uflash_driver);
+ platform_driver_unregister(&uflash_driver);
}
module_init(uflash_init);
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index 650511304030..a534e1f0c348 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -124,7 +124,7 @@ int mtd_blktrans_cease_background(struct mtd_blktrans_dev *dev)
if (kthread_should_stop())
return 1;
- return !elv_queue_empty(dev->rq);
+ return dev->bg_stop;
}
EXPORT_SYMBOL_GPL(mtd_blktrans_cease_background);
@@ -141,6 +141,7 @@ static int mtd_blktrans_thread(void *arg)
while (!kthread_should_stop()) {
int res;
+ dev->bg_stop = false;
if (!req && !(req = blk_fetch_request(rq))) {
if (tr->background && !background_done) {
spin_unlock_irq(rq->queue_lock);
@@ -152,7 +153,7 @@ static int mtd_blktrans_thread(void *arg)
* Do background processing just once per idle
* period.
*/
- background_done = 1;
+ background_done = !dev->bg_stop;
continue;
}
set_current_state(TASK_INTERRUPTIBLE);
@@ -198,8 +199,10 @@ static void mtd_blktrans_request(struct request_queue *rq)
if (!dev)
while ((req = blk_fetch_request(rq)) != NULL)
__blk_end_request_all(req, -ENODEV);
- else
+ else {
+ dev->bg_stop = true;
wake_up_process(dev->thread);
+ }
}
static int blktrans_open(struct block_device *bdev, fmode_t mode)
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index 78205ac2b10f..a92054e945e1 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -121,23 +121,6 @@ config MTD_NAND_OMAP2
help
Support for NAND flash on Texas Instruments OMAP2 and OMAP3 platforms.
-config MTD_NAND_OMAP_PREFETCH
- bool "GPMC prefetch support for NAND Flash device"
- depends on MTD_NAND_OMAP2
- default y
- help
- The NAND device can be accessed for Read/Write using GPMC PREFETCH engine
- to improve the performance.
-
-config MTD_NAND_OMAP_PREFETCH_DMA
- depends on MTD_NAND_OMAP_PREFETCH
- bool "DMA mode"
- default n
- help
- The GPMC PREFETCH engine can be configured eigther in MPU interrupt mode
- or in DMA interrupt mode.
- Say y for DMA mode or MPU mode will be used
-
config MTD_NAND_IDS
tristate
@@ -491,7 +474,7 @@ config MTD_NAND_MPC5121_NFC
config MTD_NAND_MXC
tristate "MXC NAND support"
- depends on ARCH_MX2 || ARCH_MX25 || ARCH_MX3 || ARCH_MX51
+ depends on IMX_HAVE_PLATFORM_MXC_NAND
help
This enables the driver for the NAND flash controller on the
MXC processors.
diff --git a/drivers/mtd/nand/fsl_upm.c b/drivers/mtd/nand/fsl_upm.c
index efdcca94ce55..073ee026a17c 100644
--- a/drivers/mtd/nand/fsl_upm.c
+++ b/drivers/mtd/nand/fsl_upm.c
@@ -217,8 +217,7 @@ err:
return ret;
}
-static int __devinit fun_probe(struct platform_device *ofdev,
- const struct of_device_id *ofid)
+static int __devinit fun_probe(struct platform_device *ofdev)
{
struct fsl_upm_nand *fun;
struct resource io_res;
@@ -360,7 +359,7 @@ static const struct of_device_id of_fun_match[] = {
};
MODULE_DEVICE_TABLE(of, of_fun_match);
-static struct of_platform_driver of_fun_driver = {
+static struct platform_driver of_fun_driver = {
.driver = {
.name = "fsl,upm-nand",
.owner = THIS_MODULE,
@@ -372,13 +371,13 @@ static struct of_platform_driver of_fun_driver = {
static int __init fun_module_init(void)
{
- return of_register_platform_driver(&of_fun_driver);
+ return platform_driver_register(&of_fun_driver);
}
module_init(fun_module_init);
static void __exit fun_module_exit(void)
{
- of_unregister_platform_driver(&of_fun_driver);
+ platform_driver_unregister(&of_fun_driver);
}
module_exit(fun_module_exit);
diff --git a/drivers/mtd/nand/mpc5121_nfc.c b/drivers/mtd/nand/mpc5121_nfc.c
index ddaf0011aa88..0b81b5b499d1 100644
--- a/drivers/mtd/nand/mpc5121_nfc.c
+++ b/drivers/mtd/nand/mpc5121_nfc.c
@@ -651,8 +651,7 @@ static void mpc5121_nfc_free(struct device *dev, struct mtd_info *mtd)
iounmap(prv->csreg);
}
-static int __devinit mpc5121_nfc_probe(struct platform_device *op,
- const struct of_device_id *match)
+static int __devinit mpc5121_nfc_probe(struct platform_device *op)
{
struct device_node *rootnode, *dn = op->dev.of_node;
struct device *dev = &op->dev;
@@ -892,7 +891,7 @@ static struct of_device_id mpc5121_nfc_match[] __devinitdata = {
{},
};
-static struct of_platform_driver mpc5121_nfc_driver = {
+static struct platform_driver mpc5121_nfc_driver = {
.probe = mpc5121_nfc_probe,
.remove = __devexit_p(mpc5121_nfc_remove),
.driver = {
@@ -904,14 +903,14 @@ static struct of_platform_driver mpc5121_nfc_driver = {
static int __init mpc5121_nfc_init(void)
{
- return of_register_platform_driver(&mpc5121_nfc_driver);
+ return platform_driver_register(&mpc5121_nfc_driver);
}
module_init(mpc5121_nfc_init);
static void __exit mpc5121_nfc_cleanup(void)
{
- of_unregister_platform_driver(&mpc5121_nfc_driver);
+ platform_driver_unregister(&mpc5121_nfc_driver);
}
module_exit(mpc5121_nfc_cleanup);
diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c
index b7d5a5b9a543..42a95fb41504 100644
--- a/drivers/mtd/nand/mxc_nand.c
+++ b/drivers/mtd/nand/mxc_nand.c
@@ -747,9 +747,8 @@ static void mxc_do_addr_cycle(struct mtd_info *mtd, int column, int page_addr)
/*
* MXC NANDFC can only perform full page+spare or
* spare-only read/write. When the upper layers
- * layers perform a read/write buf operation,
- * we will used the saved column address to index into
- * the full page.
+ * perform a read/write buf operation, the saved column
+ * address is used to index into the full page.
*/
host->send_addr(host, 0, page_addr == -1);
if (mtd->writesize > 512)
diff --git a/drivers/mtd/nand/ndfc.c b/drivers/mtd/nand/ndfc.c
index c9ae0a5023b6..bbe6d451290d 100644
--- a/drivers/mtd/nand/ndfc.c
+++ b/drivers/mtd/nand/ndfc.c
@@ -225,8 +225,7 @@ err:
return ret;
}
-static int __devinit ndfc_probe(struct platform_device *ofdev,
- const struct of_device_id *match)
+static int __devinit ndfc_probe(struct platform_device *ofdev)
{
struct ndfc_controller *ndfc = &ndfc_ctrl;
const __be32 *reg;
@@ -292,7 +291,7 @@ static const struct of_device_id ndfc_match[] = {
};
MODULE_DEVICE_TABLE(of, ndfc_match);
-static struct of_platform_driver ndfc_driver = {
+static struct platform_driver ndfc_driver = {
.driver = {
.name = "ndfc",
.owner = THIS_MODULE,
@@ -304,12 +303,12 @@ static struct of_platform_driver ndfc_driver = {
static int __init ndfc_nand_init(void)
{
- return of_register_platform_driver(&ndfc_driver);
+ return platform_driver_register(&ndfc_driver);
}
static void __exit ndfc_nand_exit(void)
{
- of_unregister_platform_driver(&ndfc_driver);
+ platform_driver_unregister(&ndfc_driver);
}
module_init(ndfc_nand_init);
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
index d7a4e2550b13..da9a351c9d79 100644
--- a/drivers/mtd/nand/omap2.c
+++ b/drivers/mtd/nand/omap2.c
@@ -11,6 +11,7 @@
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/delay.h>
+#include <linux/interrupt.h>
#include <linux/jiffies.h>
#include <linux/sched.h>
#include <linux/mtd/mtd.h>
@@ -24,6 +25,7 @@
#include <plat/nand.h>
#define DRIVER_NAME "omap2-nand"
+#define OMAP_NAND_TIMEOUT_MS 5000
#define NAND_Ecc_P1e (1 << 0)
#define NAND_Ecc_P2e (1 << 1)
@@ -96,26 +98,19 @@
static const char *part_probes[] = { "cmdlinepart", NULL };
#endif
-#ifdef CONFIG_MTD_NAND_OMAP_PREFETCH
-static int use_prefetch = 1;
-
-/* "modprobe ... use_prefetch=0" etc */
-module_param(use_prefetch, bool, 0);
-MODULE_PARM_DESC(use_prefetch, "enable/disable use of PREFETCH");
-
-#ifdef CONFIG_MTD_NAND_OMAP_PREFETCH_DMA
-static int use_dma = 1;
+/* oob info generated runtime depending on ecc algorithm and layout selected */
+static struct nand_ecclayout omap_oobinfo;
+/* Define some generic bad / good block scan pattern which are used
+ * while scanning a device for factory marked good / bad blocks
+ */
+static uint8_t scan_ff_pattern[] = { 0xff };
+static struct nand_bbt_descr bb_descrip_flashbased = {
+ .options = NAND_BBT_SCANEMPTY | NAND_BBT_SCANALLPAGES,
+ .offs = 0,
+ .len = 1,
+ .pattern = scan_ff_pattern,
+};
-/* "modprobe ... use_dma=0" etc */
-module_param(use_dma, bool, 0);
-MODULE_PARM_DESC(use_dma, "enable/disable use of DMA");
-#else
-static const int use_dma;
-#endif
-#else
-const int use_prefetch;
-static const int use_dma;
-#endif
struct omap_nand_info {
struct nand_hw_control controller;
@@ -129,6 +124,13 @@ struct omap_nand_info {
unsigned long phys_base;
struct completion comp;
int dma_ch;
+ int gpmc_irq;
+ enum {
+ OMAP_NAND_IO_READ = 0, /* read */
+ OMAP_NAND_IO_WRITE, /* write */
+ } iomode;
+ u_char *buf;
+ int buf_len;
};
/**
@@ -256,7 +258,8 @@ static void omap_read_buf_pref(struct mtd_info *mtd, u_char *buf, int len)
}
/* configure and start prefetch transfer */
- ret = gpmc_prefetch_enable(info->gpmc_cs, 0x0, len, 0x0);
+ ret = gpmc_prefetch_enable(info->gpmc_cs,
+ PREFETCH_FIFOTHRESHOLD_MAX, 0x0, len, 0x0);
if (ret) {
/* PFPW engine is busy, use cpu copy method */
if (info->nand.options & NAND_BUSWIDTH_16)
@@ -288,9 +291,10 @@ static void omap_write_buf_pref(struct mtd_info *mtd,
{
struct omap_nand_info *info = container_of(mtd,
struct omap_nand_info, mtd);
- uint32_t pref_count = 0, w_count = 0;
+ uint32_t w_count = 0;
int i = 0, ret = 0;
u16 *p;
+ unsigned long tim, limit;
/* take care of subpage writes */
if (len % 2 != 0) {
@@ -300,7 +304,8 @@ static void omap_write_buf_pref(struct mtd_info *mtd,
}
/* configure and start prefetch transfer */
- ret = gpmc_prefetch_enable(info->gpmc_cs, 0x0, len, 0x1);
+ ret = gpmc_prefetch_enable(info->gpmc_cs,
+ PREFETCH_FIFOTHRESHOLD_MAX, 0x0, len, 0x1);
if (ret) {
/* PFPW engine is busy, use cpu copy method */
if (info->nand.options & NAND_BUSWIDTH_16)
@@ -316,15 +321,17 @@ static void omap_write_buf_pref(struct mtd_info *mtd,
iowrite16(*p++, info->nand.IO_ADDR_W);
}
/* wait for data to flushed-out before reset the prefetch */
- do {
- pref_count = gpmc_read_status(GPMC_PREFETCH_COUNT);
- } while (pref_count);
+ tim = 0;
+ limit = (loops_per_jiffy *
+ msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS));
+ while (gpmc_read_status(GPMC_PREFETCH_COUNT) && (tim++ < limit))
+ cpu_relax();
+
/* disable and stop the PFPW engine */
gpmc_prefetch_reset(info->gpmc_cs);
}
}
-#ifdef CONFIG_MTD_NAND_OMAP_PREFETCH_DMA
/*
* omap_nand_dma_cb: callback on the completion of dma transfer
* @lch: logical channel
@@ -348,14 +355,15 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
{
struct omap_nand_info *info = container_of(mtd,
struct omap_nand_info, mtd);
- uint32_t prefetch_status = 0;
enum dma_data_direction dir = is_write ? DMA_TO_DEVICE :
DMA_FROM_DEVICE;
dma_addr_t dma_addr;
int ret;
+ unsigned long tim, limit;
- /* The fifo depth is 64 bytes. We have a sync at each frame and frame
- * length is 64 bytes.
+ /* The fifo depth is 64 bytes max.
+ * But configure the FIFO-threahold to 32 to get a sync at each frame
+ * and frame length is 32 bytes.
*/
int buf_len = len >> 6;
@@ -396,9 +404,10 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
OMAP24XX_DMA_GPMC, OMAP_DMA_SRC_SYNC);
}
/* configure and start prefetch transfer */
- ret = gpmc_prefetch_enable(info->gpmc_cs, 0x1, len, is_write);
+ ret = gpmc_prefetch_enable(info->gpmc_cs,
+ PREFETCH_FIFOTHRESHOLD_MAX, 0x1, len, is_write);
if (ret)
- /* PFPW engine is busy, use cpu copy methode */
+ /* PFPW engine is busy, use cpu copy method */
goto out_copy;
init_completion(&info->comp);
@@ -407,10 +416,11 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
/* setup and start DMA using dma_addr */
wait_for_completion(&info->comp);
+ tim = 0;
+ limit = (loops_per_jiffy * msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS));
+ while (gpmc_read_status(GPMC_PREFETCH_COUNT) && (tim++ < limit))
+ cpu_relax();
- do {
- prefetch_status = gpmc_read_status(GPMC_PREFETCH_COUNT);
- } while (prefetch_status);
/* disable and stop the PFPW engine */
gpmc_prefetch_reset(info->gpmc_cs);
@@ -426,14 +436,6 @@ out_copy:
: omap_write_buf8(mtd, (u_char *) addr, len);
return 0;
}
-#else
-static void omap_nand_dma_cb(int lch, u16 ch_status, void *data) {}
-static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
- unsigned int len, int is_write)
-{
- return 0;
-}
-#endif
/**
* omap_read_buf_dma_pref - read data from NAND controller into buffer
@@ -466,6 +468,157 @@ static void omap_write_buf_dma_pref(struct mtd_info *mtd,
omap_nand_dma_transfer(mtd, (u_char *) buf, len, 0x1);
}
+/*
+ * omap_nand_irq - GMPC irq handler
+ * @this_irq: gpmc irq number
+ * @dev: omap_nand_info structure pointer is passed here
+ */
+static irqreturn_t omap_nand_irq(int this_irq, void *dev)
+{
+ struct omap_nand_info *info = (struct omap_nand_info *) dev;
+ u32 bytes;
+ u32 irq_stat;
+
+ irq_stat = gpmc_read_status(GPMC_GET_IRQ_STATUS);
+ bytes = gpmc_read_status(GPMC_PREFETCH_FIFO_CNT);
+ bytes = bytes & 0xFFFC; /* io in multiple of 4 bytes */
+ if (info->iomode == OMAP_NAND_IO_WRITE) { /* checks for write io */
+ if (irq_stat & 0x2)
+ goto done;
+
+ if (info->buf_len && (info->buf_len < bytes))
+ bytes = info->buf_len;
+ else if (!info->buf_len)
+ bytes = 0;
+ iowrite32_rep(info->nand.IO_ADDR_W,
+ (u32 *)info->buf, bytes >> 2);
+ info->buf = info->buf + bytes;
+ info->buf_len -= bytes;
+
+ } else {
+ ioread32_rep(info->nand.IO_ADDR_R,
+ (u32 *)info->buf, bytes >> 2);
+ info->buf = info->buf + bytes;
+
+ if (irq_stat & 0x2)
+ goto done;
+ }
+ gpmc_cs_configure(info->gpmc_cs, GPMC_SET_IRQ_STATUS, irq_stat);
+
+ return IRQ_HANDLED;
+
+done:
+ complete(&info->comp);
+ /* disable irq */
+ gpmc_cs_configure(info->gpmc_cs, GPMC_ENABLE_IRQ, 0);
+
+ /* clear status */
+ gpmc_cs_configure(info->gpmc_cs, GPMC_SET_IRQ_STATUS, irq_stat);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * omap_read_buf_irq_pref - read data from NAND controller into buffer
+ * @mtd: MTD device structure
+ * @buf: buffer to store date
+ * @len: number of bytes to read
+ */
+static void omap_read_buf_irq_pref(struct mtd_info *mtd, u_char *buf, int len)
+{
+ struct omap_nand_info *info = container_of(mtd,
+ struct omap_nand_info, mtd);
+ int ret = 0;
+
+ if (len <= mtd->oobsize) {
+ omap_read_buf_pref(mtd, buf, len);
+ return;
+ }
+
+ info->iomode = OMAP_NAND_IO_READ;
+ info->buf = buf;
+ init_completion(&info->comp);
+
+ /* configure and start prefetch transfer */
+ ret = gpmc_prefetch_enable(info->gpmc_cs,
+ PREFETCH_FIFOTHRESHOLD_MAX/2, 0x0, len, 0x0);
+ if (ret)
+ /* PFPW engine is busy, use cpu copy method */
+ goto out_copy;
+
+ info->buf_len = len;
+ /* enable irq */
+ gpmc_cs_configure(info->gpmc_cs, GPMC_ENABLE_IRQ,
+ (GPMC_IRQ_FIFOEVENTENABLE | GPMC_IRQ_COUNT_EVENT));
+
+ /* waiting for read to complete */
+ wait_for_completion(&info->comp);
+
+ /* disable and stop the PFPW engine */
+ gpmc_prefetch_reset(info->gpmc_cs);
+ return;
+
+out_copy:
+ if (info->nand.options & NAND_BUSWIDTH_16)
+ omap_read_buf16(mtd, buf, len);
+ else
+ omap_read_buf8(mtd, buf, len);
+}
+
+/*
+ * omap_write_buf_irq_pref - write buffer to NAND controller
+ * @mtd: MTD device structure
+ * @buf: data buffer
+ * @len: number of bytes to write
+ */
+static void omap_write_buf_irq_pref(struct mtd_info *mtd,
+ const u_char *buf, int len)
+{
+ struct omap_nand_info *info = container_of(mtd,
+ struct omap_nand_info, mtd);
+ int ret = 0;
+ unsigned long tim, limit;
+
+ if (len <= mtd->oobsize) {
+ omap_write_buf_pref(mtd, buf, len);
+ return;
+ }
+
+ info->iomode = OMAP_NAND_IO_WRITE;
+ info->buf = (u_char *) buf;
+ init_completion(&info->comp);
+
+ /* configure and start prefetch transfer : size=24 */
+ ret = gpmc_prefetch_enable(info->gpmc_cs,
+ (PREFETCH_FIFOTHRESHOLD_MAX * 3) / 8, 0x0, len, 0x1);
+ if (ret)
+ /* PFPW engine is busy, use cpu copy method */
+ goto out_copy;
+
+ info->buf_len = len;
+ /* enable irq */
+ gpmc_cs_configure(info->gpmc_cs, GPMC_ENABLE_IRQ,
+ (GPMC_IRQ_FIFOEVENTENABLE | GPMC_IRQ_COUNT_EVENT));
+
+ /* waiting for write to complete */
+ wait_for_completion(&info->comp);
+ /* wait for data to flushed-out before reset the prefetch */
+ tim = 0;
+ limit = (loops_per_jiffy * msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS));
+ while (gpmc_read_status(GPMC_PREFETCH_COUNT) && (tim++ < limit))
+ cpu_relax();
+
+ /* disable and stop the PFPW engine */
+ gpmc_prefetch_reset(info->gpmc_cs);
+ return;
+
+out_copy:
+ if (info->nand.options & NAND_BUSWIDTH_16)
+ omap_write_buf16(mtd, buf, len);
+ else
+ omap_write_buf8(mtd, buf, len);
+}
+
/**
* omap_verify_buf - Verify chip data against buffer
* @mtd: MTD device structure
@@ -487,8 +640,6 @@ static int omap_verify_buf(struct mtd_info *mtd, const u_char * buf, int len)
return 0;
}
-#ifdef CONFIG_MTD_NAND_OMAP_HWECC
-
/**
* gen_true_ecc - This function will generate true ECC value
* @ecc_buf: buffer to store ecc code
@@ -716,8 +867,6 @@ static void omap_enable_hwecc(struct mtd_info *mtd, int mode)
gpmc_enable_hwecc(info->gpmc_cs, mode, dev_width, info->nand.ecc.size);
}
-#endif
-
/**
* omap_wait - wait until the command is done
* @mtd: MTD device structure
@@ -787,6 +936,7 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
struct omap_nand_info *info;
struct omap_nand_platform_data *pdata;
int err;
+ int i, offset;
pdata = pdev->dev.platform_data;
if (pdata == NULL) {
@@ -812,7 +962,7 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
info->mtd.name = dev_name(&pdev->dev);
info->mtd.owner = THIS_MODULE;
- info->nand.options |= pdata->devsize ? NAND_BUSWIDTH_16 : 0;
+ info->nand.options = pdata->devsize;
info->nand.options |= NAND_SKIP_BBTSCAN;
/* NAND write protect off */
@@ -850,28 +1000,13 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
info->nand.chip_delay = 50;
}
- if (use_prefetch) {
-
+ switch (pdata->xfer_type) {
+ case NAND_OMAP_PREFETCH_POLLED:
info->nand.read_buf = omap_read_buf_pref;
info->nand.write_buf = omap_write_buf_pref;
- if (use_dma) {
- err = omap_request_dma(OMAP24XX_DMA_GPMC, "NAND",
- omap_nand_dma_cb, &info->comp, &info->dma_ch);
- if (err < 0) {
- info->dma_ch = -1;
- printk(KERN_WARNING "DMA request failed."
- " Non-dma data transfer mode\n");
- } else {
- omap_set_dma_dest_burst_mode(info->dma_ch,
- OMAP_DMA_DATA_BURST_16);
- omap_set_dma_src_burst_mode(info->dma_ch,
- OMAP_DMA_DATA_BURST_16);
-
- info->nand.read_buf = omap_read_buf_dma_pref;
- info->nand.write_buf = omap_write_buf_dma_pref;
- }
- }
- } else {
+ break;
+
+ case NAND_OMAP_POLLED:
if (info->nand.options & NAND_BUSWIDTH_16) {
info->nand.read_buf = omap_read_buf16;
info->nand.write_buf = omap_write_buf16;
@@ -879,20 +1014,61 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
info->nand.read_buf = omap_read_buf8;
info->nand.write_buf = omap_write_buf8;
}
+ break;
+
+ case NAND_OMAP_PREFETCH_DMA:
+ err = omap_request_dma(OMAP24XX_DMA_GPMC, "NAND",
+ omap_nand_dma_cb, &info->comp, &info->dma_ch);
+ if (err < 0) {
+ info->dma_ch = -1;
+ dev_err(&pdev->dev, "DMA request failed!\n");
+ goto out_release_mem_region;
+ } else {
+ omap_set_dma_dest_burst_mode(info->dma_ch,
+ OMAP_DMA_DATA_BURST_16);
+ omap_set_dma_src_burst_mode(info->dma_ch,
+ OMAP_DMA_DATA_BURST_16);
+
+ info->nand.read_buf = omap_read_buf_dma_pref;
+ info->nand.write_buf = omap_write_buf_dma_pref;
+ }
+ break;
+
+ case NAND_OMAP_PREFETCH_IRQ:
+ err = request_irq(pdata->gpmc_irq,
+ omap_nand_irq, IRQF_SHARED, "gpmc-nand", info);
+ if (err) {
+ dev_err(&pdev->dev, "requesting irq(%d) error:%d",
+ pdata->gpmc_irq, err);
+ goto out_release_mem_region;
+ } else {
+ info->gpmc_irq = pdata->gpmc_irq;
+ info->nand.read_buf = omap_read_buf_irq_pref;
+ info->nand.write_buf = omap_write_buf_irq_pref;
+ }
+ break;
+
+ default:
+ dev_err(&pdev->dev,
+ "xfer_type(%d) not supported!\n", pdata->xfer_type);
+ err = -EINVAL;
+ goto out_release_mem_region;
}
- info->nand.verify_buf = omap_verify_buf;
-#ifdef CONFIG_MTD_NAND_OMAP_HWECC
- info->nand.ecc.bytes = 3;
- info->nand.ecc.size = 512;
- info->nand.ecc.calculate = omap_calculate_ecc;
- info->nand.ecc.hwctl = omap_enable_hwecc;
- info->nand.ecc.correct = omap_correct_data;
- info->nand.ecc.mode = NAND_ECC_HW;
+ info->nand.verify_buf = omap_verify_buf;
-#else
- info->nand.ecc.mode = NAND_ECC_SOFT;
-#endif
+ /* selsect the ecc type */
+ if (pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_DEFAULT)
+ info->nand.ecc.mode = NAND_ECC_SOFT;
+ else if ((pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_HW) ||
+ (pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_HW_ROMCODE)) {
+ info->nand.ecc.bytes = 3;
+ info->nand.ecc.size = 512;
+ info->nand.ecc.calculate = omap_calculate_ecc;
+ info->nand.ecc.hwctl = omap_enable_hwecc;
+ info->nand.ecc.correct = omap_correct_data;
+ info->nand.ecc.mode = NAND_ECC_HW;
+ }
/* DIP switches on some boards change between 8 and 16 bit
* bus widths for flash. Try the other width if the first try fails.
@@ -905,6 +1081,26 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
}
}
+ /* rom code layout */
+ if (pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_HW_ROMCODE) {
+
+ if (info->nand.options & NAND_BUSWIDTH_16)
+ offset = 2;
+ else {
+ offset = 1;
+ info->nand.badblock_pattern = &bb_descrip_flashbased;
+ }
+ omap_oobinfo.eccbytes = 3 * (info->mtd.oobsize/16);
+ for (i = 0; i < omap_oobinfo.eccbytes; i++)
+ omap_oobinfo.eccpos[i] = i+offset;
+
+ omap_oobinfo.oobfree->offset = offset + omap_oobinfo.eccbytes;
+ omap_oobinfo.oobfree->length = info->mtd.oobsize -
+ (offset + omap_oobinfo.eccbytes);
+
+ info->nand.ecc.layout = &omap_oobinfo;
+ }
+
#ifdef CONFIG_MTD_PARTITIONS
err = parse_mtd_partitions(&info->mtd, part_probes, &info->parts, 0);
if (err > 0)
@@ -934,9 +1130,12 @@ static int omap_nand_remove(struct platform_device *pdev)
mtd);
platform_set_drvdata(pdev, NULL);
- if (use_dma)
+ if (info->dma_ch != -1)
omap_free_dma(info->dma_ch);
+ if (info->gpmc_irq)
+ free_irq(info->gpmc_irq, info);
+
/* Release NAND device, its internal structures and partitions */
nand_release(&info->mtd);
iounmap(info->nand.IO_ADDR_R);
@@ -955,16 +1154,8 @@ static struct platform_driver omap_nand_driver = {
static int __init omap_nand_init(void)
{
- printk(KERN_INFO "%s driver initializing\n", DRIVER_NAME);
+ pr_info("%s driver initializing\n", DRIVER_NAME);
- /* This check is required if driver is being
- * loaded run time as a module
- */
- if ((1 == use_dma) && (0 == use_prefetch)) {
- printk(KERN_INFO"Wrong parameters: 'use_dma' can not be 1 "
- "without use_prefetch'. Prefetch will not be"
- " used in either mode (mpu or dma)\n");
- }
return platform_driver_register(&omap_nand_driver);
}
diff --git a/drivers/mtd/nand/pasemi_nand.c b/drivers/mtd/nand/pasemi_nand.c
index bb277a54986f..59efa829ef24 100644
--- a/drivers/mtd/nand/pasemi_nand.c
+++ b/drivers/mtd/nand/pasemi_nand.c
@@ -89,8 +89,7 @@ int pasemi_device_ready(struct mtd_info *mtd)
return !!(inl(lpcctl) & LBICTRL_LPCCTL_NR);
}
-static int __devinit pasemi_nand_probe(struct platform_device *ofdev,
- const struct of_device_id *match)
+static int __devinit pasemi_nand_probe(struct platform_device *ofdev)
{
struct pci_dev *pdev;
struct device_node *np = ofdev->dev.of_node;
@@ -219,7 +218,7 @@ static const struct of_device_id pasemi_nand_match[] =
MODULE_DEVICE_TABLE(of, pasemi_nand_match);
-static struct of_platform_driver pasemi_nand_driver =
+static struct platform_driver pasemi_nand_driver =
{
.driver = {
.name = (char*)driver_name,
@@ -232,13 +231,13 @@ static struct of_platform_driver pasemi_nand_driver =
static int __init pasemi_nand_init(void)
{
- return of_register_platform_driver(&pasemi_nand_driver);
+ return platform_driver_register(&pasemi_nand_driver);
}
module_init(pasemi_nand_init);
static void __exit pasemi_nand_exit(void)
{
- of_unregister_platform_driver(&pasemi_nand_driver);
+ platform_driver_unregister(&pasemi_nand_driver);
}
module_exit(pasemi_nand_exit);
diff --git a/drivers/mtd/nand/r852.c b/drivers/mtd/nand/r852.c
index d9d7efbc77cc..6322d1fb5d62 100644
--- a/drivers/mtd/nand/r852.c
+++ b/drivers/mtd/nand/r852.c
@@ -930,7 +930,7 @@ int r852_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
init_completion(&dev->dma_done);
- dev->card_workqueue = create_freezeable_workqueue(DRV_NAME);
+ dev->card_workqueue = create_freezable_workqueue(DRV_NAME);
if (!dev->card_workqueue)
goto error9;
diff --git a/drivers/mtd/nand/socrates_nand.c b/drivers/mtd/nand/socrates_nand.c
index a8e403eebedb..a853548986f0 100644
--- a/drivers/mtd/nand/socrates_nand.c
+++ b/drivers/mtd/nand/socrates_nand.c
@@ -162,8 +162,7 @@ static const char *part_probes[] = { "cmdlinepart", NULL };
/*
* Probe for the NAND device.
*/
-static int __devinit socrates_nand_probe(struct platform_device *ofdev,
- const struct of_device_id *ofid)
+static int __devinit socrates_nand_probe(struct platform_device *ofdev)
{
struct socrates_nand_host *host;
struct mtd_info *mtd;
@@ -300,7 +299,7 @@ static const struct of_device_id socrates_nand_match[] =
MODULE_DEVICE_TABLE(of, socrates_nand_match);
-static struct of_platform_driver socrates_nand_driver = {
+static struct platform_driver socrates_nand_driver = {
.driver = {
.name = "socrates_nand",
.owner = THIS_MODULE,
@@ -312,12 +311,12 @@ static struct of_platform_driver socrates_nand_driver = {
static int __init socrates_nand_init(void)
{
- return of_register_platform_driver(&socrates_nand_driver);
+ return platform_driver_register(&socrates_nand_driver);
}
static void __exit socrates_nand_exit(void)
{
- of_unregister_platform_driver(&socrates_nand_driver);
+ platform_driver_unregister(&socrates_nand_driver);
}
module_init(socrates_nand_init);
diff --git a/drivers/mtd/nand/tmio_nand.c b/drivers/mtd/nand/tmio_nand.c
index 3041d1f7ae3f..38fb16771f85 100644
--- a/drivers/mtd/nand/tmio_nand.c
+++ b/drivers/mtd/nand/tmio_nand.c
@@ -319,7 +319,7 @@ static int tmio_nand_correct_data(struct mtd_info *mtd, unsigned char *buf,
static int tmio_hw_init(struct platform_device *dev, struct tmio_nand *tmio)
{
- struct mfd_cell *cell = dev_get_platdata(&dev->dev);
+ const struct mfd_cell *cell = mfd_get_cell(dev);
int ret;
if (cell->enable) {
@@ -363,7 +363,7 @@ static int tmio_hw_init(struct platform_device *dev, struct tmio_nand *tmio)
static void tmio_hw_stop(struct platform_device *dev, struct tmio_nand *tmio)
{
- struct mfd_cell *cell = dev_get_platdata(&dev->dev);
+ const struct mfd_cell *cell = mfd_get_cell(dev);
tmio_iowrite8(FCR_MODE_POWER_OFF, tmio->fcr + FCR_MODE);
if (cell->disable)
@@ -372,8 +372,7 @@ static void tmio_hw_stop(struct platform_device *dev, struct tmio_nand *tmio)
static int tmio_probe(struct platform_device *dev)
{
- struct mfd_cell *cell = dev_get_platdata(&dev->dev);
- struct tmio_nand_data *data = cell->driver_data;
+ struct tmio_nand_data *data = mfd_get_data(dev);
struct resource *fcr = platform_get_resource(dev,
IORESOURCE_MEM, 0);
struct resource *ccr = platform_get_resource(dev,
@@ -516,7 +515,7 @@ static int tmio_remove(struct platform_device *dev)
#ifdef CONFIG_PM
static int tmio_suspend(struct platform_device *dev, pm_message_t state)
{
- struct mfd_cell *cell = dev_get_platdata(&dev->dev);
+ const struct mfd_cell *cell = mfd_get_cell(dev);
if (cell->suspend)
cell->suspend(dev);
@@ -527,7 +526,7 @@ static int tmio_suspend(struct platform_device *dev, pm_message_t state)
static int tmio_resume(struct platform_device *dev)
{
- struct mfd_cell *cell = dev_get_platdata(&dev->dev);
+ const struct mfd_cell *cell = mfd_get_cell(dev);
/* FIXME - is this required or merely another attack of the broken
* SHARP platform? Looks suspicious.
diff --git a/drivers/mtd/onenand/Kconfig b/drivers/mtd/onenand/Kconfig
index 4dbd0f58eebf..4f426195f8db 100644
--- a/drivers/mtd/onenand/Kconfig
+++ b/drivers/mtd/onenand/Kconfig
@@ -32,7 +32,7 @@ config MTD_ONENAND_OMAP2
config MTD_ONENAND_SAMSUNG
tristate "OneNAND on Samsung SOC controller support"
- depends on ARCH_S3C64XX || ARCH_S5PC100 || ARCH_S5PV210 || ARCH_S5PV310
+ depends on ARCH_S3C64XX || ARCH_S5PC100 || ARCH_S5PV210 || ARCH_EXYNOS4
help
Support for a OneNAND flash device connected to an Samsung SOC.
S3C64XX/S5PC100 use command mapping method.
diff --git a/drivers/mtd/onenand/omap2.c b/drivers/mtd/onenand/omap2.c
index ea32c2fc4622..f591f615d3f6 100644
--- a/drivers/mtd/onenand/omap2.c
+++ b/drivers/mtd/onenand/omap2.c
@@ -63,7 +63,7 @@ struct omap2_onenand {
struct completion dma_done;
int dma_channel;
int freq;
- int (*setup)(void __iomem *base, int freq);
+ int (*setup)(void __iomem *base, int *freq_ptr);
struct regulator *regulator;
};
@@ -148,11 +148,9 @@ static int omap2_onenand_wait(struct mtd_info *mtd, int state)
wait_err("controller error", state, ctrl, intr);
return -EIO;
}
- if ((intr & intr_flags) != intr_flags) {
- wait_err("timeout", state, ctrl, intr);
- return -EIO;
- }
- return 0;
+ if ((intr & intr_flags) == intr_flags)
+ return 0;
+ /* Continue in wait for interrupt branch */
}
if (state != FL_READING) {
@@ -581,7 +579,7 @@ static int __adjust_timing(struct device *dev, void *data)
/* DMA is not in use so this is all that is needed */
/* Revisit for OMAP3! */
- ret = c->setup(c->onenand.base, c->freq);
+ ret = c->setup(c->onenand.base, &c->freq);
return ret;
}
@@ -674,7 +672,7 @@ static int __devinit omap2_onenand_probe(struct platform_device *pdev)
}
if (pdata->onenand_setup != NULL) {
- r = pdata->onenand_setup(c->onenand.base, c->freq);
+ r = pdata->onenand_setup(c->onenand.base, &c->freq);
if (r < 0) {
dev_err(&pdev->dev, "Onenand platform setup failed: "
"%d\n", r);
@@ -719,8 +717,8 @@ static int __devinit omap2_onenand_probe(struct platform_device *pdev)
}
dev_info(&pdev->dev, "initializing on CS%d, phys base 0x%08lx, virtual "
- "base %p\n", c->gpmc_cs, c->phys_base,
- c->onenand.base);
+ "base %p, freq %d MHz\n", c->gpmc_cs, c->phys_base,
+ c->onenand.base, c->freq);
c->pdev = pdev;
c->mtd.name = dev_name(&pdev->dev);
@@ -757,24 +755,6 @@ static int __devinit omap2_onenand_probe(struct platform_device *pdev)
if ((r = onenand_scan(&c->mtd, 1)) < 0)
goto err_release_regulator;
- switch ((c->onenand.version_id >> 4) & 0xf) {
- case 0:
- c->freq = 40;
- break;
- case 1:
- c->freq = 54;
- break;
- case 2:
- c->freq = 66;
- break;
- case 3:
- c->freq = 83;
- break;
- case 4:
- c->freq = 104;
- break;
- }
-
#ifdef CONFIG_MTD_PARTITIONS
r = parse_mtd_partitions(&c->mtd, part_probes, &c->parts, 0);
if (r > 0)
diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c
index 6405a95dc5bd..2b0daae4018d 100644
--- a/drivers/mtd/sm_ftl.c
+++ b/drivers/mtd/sm_ftl.c
@@ -1276,7 +1276,7 @@ static struct mtd_blktrans_ops sm_ftl_ops = {
static __init int sm_module_init(void)
{
int error = 0;
- cache_flush_workqueue = create_freezeable_workqueue("smflush");
+ cache_flush_workqueue = create_freezable_workqueue("smflush");
if (IS_ERR(cache_flush_workqueue))
return PTR_ERR(cache_flush_workqueue);
diff --git a/drivers/mtd/ubi/Kconfig b/drivers/mtd/ubi/Kconfig
index 3cf193fb5e00..6abeb4f13403 100644
--- a/drivers/mtd/ubi/Kconfig
+++ b/drivers/mtd/ubi/Kconfig
@@ -52,6 +52,12 @@ config MTD_UBI_GLUEBI
work on top of UBI. Do not enable this unless you use legacy
software.
-source "drivers/mtd/ubi/Kconfig.debug"
+config MTD_UBI_DEBUG
+ bool "UBI debugging"
+ depends on SYSFS
+ select DEBUG_FS
+ select KALLSYMS_ALL if KALLSYMS && DEBUG_KERNEL
+ help
+ This option enables UBI debugging.
endif # MTD_UBI
diff --git a/drivers/mtd/ubi/Kconfig.debug b/drivers/mtd/ubi/Kconfig.debug
deleted file mode 100644
index fad4adc0fe2c..000000000000
--- a/drivers/mtd/ubi/Kconfig.debug
+++ /dev/null
@@ -1,73 +0,0 @@
-comment "UBI debugging options"
-
-config MTD_UBI_DEBUG
- bool "UBI debugging"
- depends on SYSFS
- select DEBUG_FS
- select KALLSYMS_ALL if KALLSYMS && DEBUG_KERNEL
- help
- This option enables UBI debugging.
-
-if MTD_UBI_DEBUG
-
-config MTD_UBI_DEBUG_MSG
- bool "UBI debugging messages"
- help
- This option enables UBI debugging messages.
-
-config MTD_UBI_DEBUG_PARANOID
- bool "Extra self-checks"
- help
- This option enables extra checks in UBI code. Note this slows UBI down
- significantly.
-
-config MTD_UBI_DEBUG_DISABLE_BGT
- bool "Do not enable the UBI background thread"
- help
- This option switches the background thread off by default. The thread
- may be also be enabled/disabled via UBI sysfs.
-
-config MTD_UBI_DEBUG_EMULATE_BITFLIPS
- bool "Emulate flash bit-flips"
- help
- This option emulates bit-flips with probability 1/50, which in turn
- causes scrubbing. Useful for debugging and stressing UBI.
-
-config MTD_UBI_DEBUG_EMULATE_WRITE_FAILURES
- bool "Emulate flash write failures"
- help
- This option emulates write failures with probability 1/100. Useful for
- debugging and testing how UBI handlines errors.
-
-config MTD_UBI_DEBUG_EMULATE_ERASE_FAILURES
- bool "Emulate flash erase failures"
- help
- This option emulates erase failures with probability 1/100. Useful for
- debugging and testing how UBI handlines errors.
-
-comment "Additional UBI debugging messages"
-
-config MTD_UBI_DEBUG_MSG_BLD
- bool "Additional UBI initialization and build messages"
- help
- This option enables detailed UBI initialization and device build
- debugging messages.
-
-config MTD_UBI_DEBUG_MSG_EBA
- bool "Eraseblock association unit messages"
- help
- This option enables debugging messages from the UBI eraseblock
- association unit.
-
-config MTD_UBI_DEBUG_MSG_WL
- bool "Wear-leveling unit messages"
- help
- This option enables debugging messages from the UBI wear-leveling
- unit.
-
-config MTD_UBI_DEBUG_MSG_IO
- bool "Input/output unit messages"
- help
- This option enables debugging messages from the UBI input/output unit.
-
-endif # MTD_UBI_DEBUG
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index f49e49dc5928..65626c1c446d 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -672,33 +672,7 @@ static int io_init(struct ubi_device *ubi)
ubi->nor_flash = 1;
}
- /*
- * Set UBI min. I/O size (@ubi->min_io_size). We use @mtd->writebufsize
- * for these purposes, not @mtd->writesize. At the moment this does not
- * matter for NAND, because currently @mtd->writebufsize is equivalent to
- * @mtd->writesize for all NANDs. However, some CFI NOR flashes may
- * have @mtd->writebufsize which is multiple of @mtd->writesize.
- *
- * The reason we use @mtd->writebufsize for @ubi->min_io_size is that
- * UBI and UBIFS recovery algorithms rely on the fact that if there was
- * an unclean power cut, then we can find offset of the last corrupted
- * node, align the offset to @ubi->min_io_size, read the rest of the
- * eraseblock starting from this offset, and check whether there are
- * only 0xFF bytes. If yes, then we are probably dealing with a
- * corruption caused by a power cut, if not, then this is probably some
- * severe corruption.
- *
- * Thus, we have to use the maximum write unit size of the flash, which
- * is @mtd->writebufsize, because @mtd->writesize is the minimum write
- * size, not the maximum.
- */
- if (ubi->mtd->type == MTD_NANDFLASH)
- ubi_assert(ubi->mtd->writebufsize == ubi->mtd->writesize);
- else if (ubi->mtd->type == MTD_NORFLASH)
- ubi_assert(ubi->mtd->writebufsize % ubi->mtd->writesize == 0);
-
- ubi->min_io_size = ubi->mtd->writebufsize;
-
+ ubi->min_io_size = ubi->mtd->writesize;
ubi->hdrs_min_io_size = ubi->mtd->writesize >> ubi->mtd->subpage_sft;
/*
@@ -716,11 +690,25 @@ static int io_init(struct ubi_device *ubi)
ubi_assert(ubi->hdrs_min_io_size <= ubi->min_io_size);
ubi_assert(ubi->min_io_size % ubi->hdrs_min_io_size == 0);
+ ubi->max_write_size = ubi->mtd->writebufsize;
+ /*
+ * Maximum write size has to be greater or equivalent to min. I/O
+ * size, and be multiple of min. I/O size.
+ */
+ if (ubi->max_write_size < ubi->min_io_size ||
+ ubi->max_write_size % ubi->min_io_size ||
+ !is_power_of_2(ubi->max_write_size)) {
+ ubi_err("bad write buffer size %d for %d min. I/O unit",
+ ubi->max_write_size, ubi->min_io_size);
+ return -EINVAL;
+ }
+
/* Calculate default aligned sizes of EC and VID headers */
ubi->ec_hdr_alsize = ALIGN(UBI_EC_HDR_SIZE, ubi->hdrs_min_io_size);
ubi->vid_hdr_alsize = ALIGN(UBI_VID_HDR_SIZE, ubi->hdrs_min_io_size);
dbg_msg("min_io_size %d", ubi->min_io_size);
+ dbg_msg("max_write_size %d", ubi->max_write_size);
dbg_msg("hdrs_min_io_size %d", ubi->hdrs_min_io_size);
dbg_msg("ec_hdr_alsize %d", ubi->ec_hdr_alsize);
dbg_msg("vid_hdr_alsize %d", ubi->vid_hdr_alsize);
@@ -737,7 +725,7 @@ static int io_init(struct ubi_device *ubi)
}
/* Similar for the data offset */
- ubi->leb_start = ubi->vid_hdr_offset + UBI_EC_HDR_SIZE;
+ ubi->leb_start = ubi->vid_hdr_offset + UBI_VID_HDR_SIZE;
ubi->leb_start = ALIGN(ubi->leb_start, ubi->min_io_size);
dbg_msg("vid_hdr_offset %d", ubi->vid_hdr_offset);
@@ -949,6 +937,8 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset)
spin_lock_init(&ubi->volumes_lock);
ubi_msg("attaching mtd%d to ubi%d", mtd->index, ubi_num);
+ dbg_msg("sizeof(struct ubi_scan_leb) %zu", sizeof(struct ubi_scan_leb));
+ dbg_msg("sizeof(struct ubi_wl_entry) %zu", sizeof(struct ubi_wl_entry));
err = io_init(ubi);
if (err)
@@ -963,13 +953,6 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset)
if (!ubi->peb_buf2)
goto out_free;
-#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
- mutex_init(&ubi->dbg_buf_mutex);
- ubi->dbg_peb_buf = vmalloc(ubi->peb_size);
- if (!ubi->dbg_peb_buf)
- goto out_free;
-#endif
-
err = attach_by_scanning(ubi);
if (err) {
dbg_err("failed to attach by scanning, error %d", err);
@@ -1017,8 +1000,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset)
* checks @ubi->thread_enabled. Otherwise we may fail to wake it up.
*/
spin_lock(&ubi->wl_lock);
- if (!DBG_DISABLE_BGT)
- ubi->thread_enabled = 1;
+ ubi->thread_enabled = 1;
wake_up_process(ubi->bgt_thread);
spin_unlock(&ubi->wl_lock);
@@ -1035,9 +1017,6 @@ out_detach:
out_free:
vfree(ubi->peb_buf1);
vfree(ubi->peb_buf2);
-#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
- vfree(ubi->dbg_peb_buf);
-#endif
if (ref)
put_device(&ubi->dev);
else
@@ -1108,9 +1087,6 @@ int ubi_detach_mtd_dev(int ubi_num, int anyway)
put_mtd_device(ubi->mtd);
vfree(ubi->peb_buf1);
vfree(ubi->peb_buf2);
-#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
- vfree(ubi->dbg_peb_buf);
-#endif
ubi_msg("mtd%d is detached from ubi%d", ubi->mtd->index, ubi->ubi_num);
put_device(&ubi->dev);
return 0;
diff --git a/drivers/mtd/ubi/debug.c b/drivers/mtd/ubi/debug.c
index 4876977e52cb..d4d07e5f138f 100644
--- a/drivers/mtd/ubi/debug.c
+++ b/drivers/mtd/ubi/debug.c
@@ -27,6 +27,20 @@
#ifdef CONFIG_MTD_UBI_DEBUG
#include "ubi.h"
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+
+unsigned int ubi_msg_flags;
+unsigned int ubi_chk_flags;
+unsigned int ubi_tst_flags;
+
+module_param_named(debug_msgs, ubi_msg_flags, uint, S_IRUGO | S_IWUSR);
+module_param_named(debug_chks, ubi_chk_flags, uint, S_IRUGO | S_IWUSR);
+module_param_named(debug_tsts, ubi_chk_flags, uint, S_IRUGO | S_IWUSR);
+
+MODULE_PARM_DESC(debug_msgs, "Debug message type flags");
+MODULE_PARM_DESC(debug_chks, "Debug check flags");
+MODULE_PARM_DESC(debug_tsts, "Debug special test flags");
/**
* ubi_dbg_dump_ec_hdr - dump an erase counter header.
diff --git a/drivers/mtd/ubi/debug.h b/drivers/mtd/ubi/debug.h
index 9eca95074bc2..0b0c2888c656 100644
--- a/drivers/mtd/ubi/debug.h
+++ b/drivers/mtd/ubi/debug.h
@@ -38,6 +38,11 @@
printk(KERN_DEBUG "UBI DBG (pid %d): %s: " fmt "\n", \
current->pid, __func__, ##__VA_ARGS__)
+#define dbg_do_msg(typ, fmt, ...) do { \
+ if (ubi_msg_flags & typ) \
+ dbg_msg(fmt, ##__VA_ARGS__); \
+} while (0)
+
#define ubi_dbg_dump_stack() dump_stack()
struct ubi_ec_hdr;
@@ -57,62 +62,88 @@ void ubi_dbg_dump_seb(const struct ubi_scan_leb *seb, int type);
void ubi_dbg_dump_mkvol_req(const struct ubi_mkvol_req *req);
void ubi_dbg_dump_flash(struct ubi_device *ubi, int pnum, int offset, int len);
+extern unsigned int ubi_msg_flags;
+
+/*
+ * Debugging message type flags (must match msg_type_names in debug.c).
+ *
+ * UBI_MSG_GEN: general messages
+ * UBI_MSG_EBA: journal messages
+ * UBI_MSG_WL: mount messages
+ * UBI_MSG_IO: commit messages
+ * UBI_MSG_BLD: LEB find messages
+ */
+enum {
+ UBI_MSG_GEN = 0x1,
+ UBI_MSG_EBA = 0x2,
+ UBI_MSG_WL = 0x4,
+ UBI_MSG_IO = 0x8,
+ UBI_MSG_BLD = 0x10,
+};
+
#define ubi_dbg_print_hex_dump(l, ps, pt, r, g, b, len, a) \
print_hex_dump(l, ps, pt, r, g, b, len, a)
-#ifdef CONFIG_MTD_UBI_DEBUG_MSG
/* General debugging messages */
-#define dbg_gen(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__)
-#else
-#define dbg_gen(fmt, ...) ({})
-#endif
+#define dbg_gen(fmt, ...) dbg_do_msg(UBI_MSG_GEN, fmt, ##__VA_ARGS__)
-#ifdef CONFIG_MTD_UBI_DEBUG_MSG_EBA
/* Messages from the eraseblock association sub-system */
-#define dbg_eba(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__)
-#else
-#define dbg_eba(fmt, ...) ({})
-#endif
+#define dbg_eba(fmt, ...) dbg_do_msg(UBI_MSG_EBA, fmt, ##__VA_ARGS__)
-#ifdef CONFIG_MTD_UBI_DEBUG_MSG_WL
/* Messages from the wear-leveling sub-system */
-#define dbg_wl(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__)
-#else
-#define dbg_wl(fmt, ...) ({})
-#endif
+#define dbg_wl(fmt, ...) dbg_do_msg(UBI_MSG_WL, fmt, ##__VA_ARGS__)
-#ifdef CONFIG_MTD_UBI_DEBUG_MSG_IO
/* Messages from the input/output sub-system */
-#define dbg_io(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__)
-#else
-#define dbg_io(fmt, ...) ({})
-#endif
+#define dbg_io(fmt, ...) dbg_do_msg(UBI_MSG_IO, fmt, ##__VA_ARGS__)
-#ifdef CONFIG_MTD_UBI_DEBUG_MSG_BLD
/* Initialization and build messages */
-#define dbg_bld(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__)
-#define UBI_IO_DEBUG 1
-#else
-#define dbg_bld(fmt, ...) ({})
-#define UBI_IO_DEBUG 0
-#endif
+#define dbg_bld(fmt, ...) dbg_do_msg(UBI_MSG_BLD, fmt, ##__VA_ARGS__)
+
+extern unsigned int ubi_chk_flags;
+
+/*
+ * Debugging check flags.
+ *
+ * UBI_CHK_GEN: general checks
+ * UBI_CHK_IO: check writes and erases
+ */
+enum {
+ UBI_CHK_GEN = 0x1,
+ UBI_CHK_IO = 0x2,
+};
-#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
int ubi_dbg_check_all_ff(struct ubi_device *ubi, int pnum, int offset, int len);
int ubi_dbg_check_write(struct ubi_device *ubi, const void *buf, int pnum,
int offset, int len);
-#else
-#define ubi_dbg_check_all_ff(ubi, pnum, offset, len) 0
-#define ubi_dbg_check_write(ubi, buf, pnum, offset, len) 0
-#endif
-#ifdef CONFIG_MTD_UBI_DEBUG_DISABLE_BGT
-#define DBG_DISABLE_BGT 1
-#else
-#define DBG_DISABLE_BGT 0
-#endif
+extern unsigned int ubi_tst_flags;
+
+/*
+ * Special testing flags.
+ *
+ * UBIFS_TST_DISABLE_BGT: disable the background thread
+ * UBI_TST_EMULATE_BITFLIPS: emulate bit-flips
+ * UBI_TST_EMULATE_WRITE_FAILURES: emulate write failures
+ * UBI_TST_EMULATE_ERASE_FAILURES: emulate erase failures
+ */
+enum {
+ UBI_TST_DISABLE_BGT = 0x1,
+ UBI_TST_EMULATE_BITFLIPS = 0x2,
+ UBI_TST_EMULATE_WRITE_FAILURES = 0x4,
+ UBI_TST_EMULATE_ERASE_FAILURES = 0x8,
+};
+
+/**
+ * ubi_dbg_is_bgt_disabled - if the background thread is disabled.
+ *
+ * Returns non-zero if the UBI background thread is disabled for testing
+ * purposes.
+ */
+static inline int ubi_dbg_is_bgt_disabled(void)
+{
+ return ubi_tst_flags & UBI_TST_DISABLE_BGT;
+}
-#ifdef CONFIG_MTD_UBI_DEBUG_EMULATE_BITFLIPS
/**
* ubi_dbg_is_bitflip - if it is time to emulate a bit-flip.
*
@@ -120,13 +151,11 @@ int ubi_dbg_check_write(struct ubi_device *ubi, const void *buf, int pnum,
*/
static inline int ubi_dbg_is_bitflip(void)
{
- return !(random32() % 200);
+ if (ubi_tst_flags & UBI_TST_EMULATE_BITFLIPS)
+ return !(random32() % 200);
+ return 0;
}
-#else
-#define ubi_dbg_is_bitflip() 0
-#endif
-#ifdef CONFIG_MTD_UBI_DEBUG_EMULATE_WRITE_FAILURES
/**
* ubi_dbg_is_write_failure - if it is time to emulate a write failure.
*
@@ -135,13 +164,11 @@ static inline int ubi_dbg_is_bitflip(void)
*/
static inline int ubi_dbg_is_write_failure(void)
{
- return !(random32() % 500);
+ if (ubi_tst_flags & UBI_TST_EMULATE_WRITE_FAILURES)
+ return !(random32() % 500);
+ return 0;
}
-#else
-#define ubi_dbg_is_write_failure() 0
-#endif
-#ifdef CONFIG_MTD_UBI_DEBUG_EMULATE_ERASE_FAILURES
/**
* ubi_dbg_is_erase_failure - if its time to emulate an erase failure.
*
@@ -150,11 +177,10 @@ static inline int ubi_dbg_is_write_failure(void)
*/
static inline int ubi_dbg_is_erase_failure(void)
{
+ if (ubi_tst_flags & UBI_TST_EMULATE_ERASE_FAILURES)
return !(random32() % 400);
+ return 0;
}
-#else
-#define ubi_dbg_is_erase_failure() 0
-#endif
#else
@@ -177,8 +203,7 @@ static inline int ubi_dbg_is_erase_failure(void)
#define ubi_dbg_dump_flash(ubi, pnum, offset, len) ({})
#define ubi_dbg_print_hex_dump(l, ps, pt, r, g, b, len, a) ({})
-#define UBI_IO_DEBUG 0
-#define DBG_DISABLE_BGT 0
+#define ubi_dbg_is_bgt_disabled() 0
#define ubi_dbg_is_bitflip() 0
#define ubi_dbg_is_write_failure() 0
#define ubi_dbg_is_erase_failure() 0
diff --git a/drivers/mtd/ubi/io.c b/drivers/mtd/ubi/io.c
index 811775aa8ee8..eededf94f5a6 100644
--- a/drivers/mtd/ubi/io.c
+++ b/drivers/mtd/ubi/io.c
@@ -91,7 +91,7 @@
#include <linux/slab.h>
#include "ubi.h"
-#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
+#ifdef CONFIG_MTD_UBI_DEBUG
static int paranoid_check_not_bad(const struct ubi_device *ubi, int pnum);
static int paranoid_check_peb_ec_hdr(const struct ubi_device *ubi, int pnum);
static int paranoid_check_ec_hdr(const struct ubi_device *ubi, int pnum,
@@ -146,6 +146,28 @@ int ubi_io_read(const struct ubi_device *ubi, void *buf, int pnum, int offset,
if (err)
return err;
+ /*
+ * Deliberately corrupt the buffer to improve robustness. Indeed, if we
+ * do not do this, the following may happen:
+ * 1. The buffer contains data from previous operation, e.g., read from
+ * another PEB previously. The data looks like expected, e.g., if we
+ * just do not read anything and return - the caller would not
+ * notice this. E.g., if we are reading a VID header, the buffer may
+ * contain a valid VID header from another PEB.
+ * 2. The driver is buggy and returns us success or -EBADMSG or
+ * -EUCLEAN, but it does not actually put any data to the buffer.
+ *
+ * This may confuse UBI or upper layers - they may think the buffer
+ * contains valid data while in fact it is just old data. This is
+ * especially possible because UBI (and UBIFS) relies on CRC, and
+ * treats data as correct even in case of ECC errors if the CRC is
+ * correct.
+ *
+ * Try to prevent this situation by changing the first byte of the
+ * buffer.
+ */
+ *((uint8_t *)buf) ^= 0xFF;
+
addr = (loff_t)pnum * ubi->peb_size + offset;
retry:
err = ubi->mtd->read(ubi->mtd, addr, len, &read, buf);
@@ -166,7 +188,7 @@ retry:
return UBI_IO_BITFLIPS;
}
- if (read != len && retries++ < UBI_IO_RETRIES) {
+ if (retries++ < UBI_IO_RETRIES) {
dbg_io("error %d%s while reading %d bytes from PEB %d:%d,"
" read only %zd bytes, retry",
err, errstr, len, pnum, offset, read);
@@ -480,6 +502,13 @@ static int nor_erase_prepare(struct ubi_device *ubi, int pnum)
size_t written;
loff_t addr;
uint32_t data = 0;
+ /*
+ * Note, we cannot generally define VID header buffers on stack,
+ * because of the way we deal with these buffers (see the header
+ * comment in this file). But we know this is a NOR-specific piece of
+ * code, so we can do this. But yes, this is error-prone and we should
+ * (pre-)allocate VID header buffer instead.
+ */
struct ubi_vid_hdr vid_hdr;
/*
@@ -507,11 +536,13 @@ static int nor_erase_prepare(struct ubi_device *ubi, int pnum)
* PEB.
*/
err1 = ubi_io_read_vid_hdr(ubi, pnum, &vid_hdr, 0);
- if (err1 == UBI_IO_BAD_HDR_EBADMSG || err1 == UBI_IO_BAD_HDR) {
+ if (err1 == UBI_IO_BAD_HDR_EBADMSG || err1 == UBI_IO_BAD_HDR ||
+ err1 == UBI_IO_FF) {
struct ubi_ec_hdr ec_hdr;
err1 = ubi_io_read_ec_hdr(ubi, pnum, &ec_hdr, 0);
- if (err1 == UBI_IO_BAD_HDR_EBADMSG || err1 == UBI_IO_BAD_HDR)
+ if (err1 == UBI_IO_BAD_HDR_EBADMSG || err1 == UBI_IO_BAD_HDR ||
+ err1 == UBI_IO_FF)
/*
* Both VID and EC headers are corrupted, so we can
* safely erase this PEB and not afraid that it will be
@@ -752,9 +783,8 @@ int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum,
if (verbose)
ubi_warn("no EC header found at PEB %d, "
"only 0xFF bytes", pnum);
- else if (UBI_IO_DEBUG)
- dbg_msg("no EC header found at PEB %d, "
- "only 0xFF bytes", pnum);
+ dbg_bld("no EC header found at PEB %d, "
+ "only 0xFF bytes", pnum);
if (!read_err)
return UBI_IO_FF;
else
@@ -769,9 +799,9 @@ int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum,
ubi_warn("bad magic number at PEB %d: %08x instead of "
"%08x", pnum, magic, UBI_EC_HDR_MAGIC);
ubi_dbg_dump_ec_hdr(ec_hdr);
- } else if (UBI_IO_DEBUG)
- dbg_msg("bad magic number at PEB %d: %08x instead of "
- "%08x", pnum, magic, UBI_EC_HDR_MAGIC);
+ }
+ dbg_bld("bad magic number at PEB %d: %08x instead of "
+ "%08x", pnum, magic, UBI_EC_HDR_MAGIC);
return UBI_IO_BAD_HDR;
}
@@ -783,9 +813,9 @@ int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum,
ubi_warn("bad EC header CRC at PEB %d, calculated "
"%#08x, read %#08x", pnum, crc, hdr_crc);
ubi_dbg_dump_ec_hdr(ec_hdr);
- } else if (UBI_IO_DEBUG)
- dbg_msg("bad EC header CRC at PEB %d, calculated "
- "%#08x, read %#08x", pnum, crc, hdr_crc);
+ }
+ dbg_bld("bad EC header CRC at PEB %d, calculated "
+ "%#08x, read %#08x", pnum, crc, hdr_crc);
if (!read_err)
return UBI_IO_BAD_HDR;
@@ -1008,9 +1038,8 @@ int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum,
if (verbose)
ubi_warn("no VID header found at PEB %d, "
"only 0xFF bytes", pnum);
- else if (UBI_IO_DEBUG)
- dbg_msg("no VID header found at PEB %d, "
- "only 0xFF bytes", pnum);
+ dbg_bld("no VID header found at PEB %d, "
+ "only 0xFF bytes", pnum);
if (!read_err)
return UBI_IO_FF;
else
@@ -1021,9 +1050,9 @@ int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum,
ubi_warn("bad magic number at PEB %d: %08x instead of "
"%08x", pnum, magic, UBI_VID_HDR_MAGIC);
ubi_dbg_dump_vid_hdr(vid_hdr);
- } else if (UBI_IO_DEBUG)
- dbg_msg("bad magic number at PEB %d: %08x instead of "
- "%08x", pnum, magic, UBI_VID_HDR_MAGIC);
+ }
+ dbg_bld("bad magic number at PEB %d: %08x instead of "
+ "%08x", pnum, magic, UBI_VID_HDR_MAGIC);
return UBI_IO_BAD_HDR;
}
@@ -1035,9 +1064,9 @@ int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum,
ubi_warn("bad CRC at PEB %d, calculated %#08x, "
"read %#08x", pnum, crc, hdr_crc);
ubi_dbg_dump_vid_hdr(vid_hdr);
- } else if (UBI_IO_DEBUG)
- dbg_msg("bad CRC at PEB %d, calculated %#08x, "
- "read %#08x", pnum, crc, hdr_crc);
+ }
+ dbg_bld("bad CRC at PEB %d, calculated %#08x, "
+ "read %#08x", pnum, crc, hdr_crc);
if (!read_err)
return UBI_IO_BAD_HDR;
else
@@ -1097,7 +1126,7 @@ int ubi_io_write_vid_hdr(struct ubi_device *ubi, int pnum,
return err;
}
-#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
+#ifdef CONFIG_MTD_UBI_DEBUG
/**
* paranoid_check_not_bad - ensure that a physical eraseblock is not bad.
@@ -1111,6 +1140,9 @@ static int paranoid_check_not_bad(const struct ubi_device *ubi, int pnum)
{
int err;
+ if (!(ubi_chk_flags & UBI_CHK_IO))
+ return 0;
+
err = ubi_io_is_bad(ubi, pnum);
if (!err)
return err;
@@ -1135,6 +1167,9 @@ static int paranoid_check_ec_hdr(const struct ubi_device *ubi, int pnum,
int err;
uint32_t magic;
+ if (!(ubi_chk_flags & UBI_CHK_IO))
+ return 0;
+
magic = be32_to_cpu(ec_hdr->magic);
if (magic != UBI_EC_HDR_MAGIC) {
ubi_err("bad magic %#08x, must be %#08x",
@@ -1170,6 +1205,9 @@ static int paranoid_check_peb_ec_hdr(const struct ubi_device *ubi, int pnum)
uint32_t crc, hdr_crc;
struct ubi_ec_hdr *ec_hdr;
+ if (!(ubi_chk_flags & UBI_CHK_IO))
+ return 0;
+
ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
if (!ec_hdr)
return -ENOMEM;
@@ -1211,6 +1249,9 @@ static int paranoid_check_vid_hdr(const struct ubi_device *ubi, int pnum,
int err;
uint32_t magic;
+ if (!(ubi_chk_flags & UBI_CHK_IO))
+ return 0;
+
magic = be32_to_cpu(vid_hdr->magic);
if (magic != UBI_VID_HDR_MAGIC) {
ubi_err("bad VID header magic %#08x at PEB %d, must be %#08x",
@@ -1249,6 +1290,9 @@ static int paranoid_check_peb_vid_hdr(const struct ubi_device *ubi, int pnum)
struct ubi_vid_hdr *vid_hdr;
void *p;
+ if (!(ubi_chk_flags & UBI_CHK_IO))
+ return 0;
+
vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
if (!vid_hdr)
return -ENOMEM;
@@ -1294,15 +1338,26 @@ int ubi_dbg_check_write(struct ubi_device *ubi, const void *buf, int pnum,
int offset, int len)
{
int err, i;
+ size_t read;
+ void *buf1;
+ loff_t addr = (loff_t)pnum * ubi->peb_size + offset;
- mutex_lock(&ubi->dbg_buf_mutex);
- err = ubi_io_read(ubi, ubi->dbg_peb_buf, pnum, offset, len);
- if (err)
- goto out_unlock;
+ if (!(ubi_chk_flags & UBI_CHK_IO))
+ return 0;
+
+ buf1 = __vmalloc(len, GFP_NOFS, PAGE_KERNEL);
+ if (!buf1) {
+ ubi_err("cannot allocate memory to check writes");
+ return 0;
+ }
+
+ err = ubi->mtd->read(ubi->mtd, addr, len, &read, buf1);
+ if (err && err != -EUCLEAN)
+ goto out_free;
for (i = 0; i < len; i++) {
uint8_t c = ((uint8_t *)buf)[i];
- uint8_t c1 = ((uint8_t *)ubi->dbg_peb_buf)[i];
+ uint8_t c1 = ((uint8_t *)buf1)[i];
int dump_len;
if (c == c1)
@@ -1319,17 +1374,17 @@ int ubi_dbg_check_write(struct ubi_device *ubi, const void *buf, int pnum,
ubi_msg("hex dump of the read buffer from %d to %d",
i, i + dump_len);
print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1,
- ubi->dbg_peb_buf + i, dump_len, 1);
+ buf1 + i, dump_len, 1);
ubi_dbg_dump_stack();
err = -EINVAL;
- goto out_unlock;
+ goto out_free;
}
- mutex_unlock(&ubi->dbg_buf_mutex);
+ vfree(buf1);
return 0;
-out_unlock:
- mutex_unlock(&ubi->dbg_buf_mutex);
+out_free:
+ vfree(buf1);
return err;
}
@@ -1348,36 +1403,44 @@ int ubi_dbg_check_all_ff(struct ubi_device *ubi, int pnum, int offset, int len)
{
size_t read;
int err;
+ void *buf;
loff_t addr = (loff_t)pnum * ubi->peb_size + offset;
- mutex_lock(&ubi->dbg_buf_mutex);
- err = ubi->mtd->read(ubi->mtd, addr, len, &read, ubi->dbg_peb_buf);
+ if (!(ubi_chk_flags & UBI_CHK_IO))
+ return 0;
+
+ buf = __vmalloc(len, GFP_NOFS, PAGE_KERNEL);
+ if (!buf) {
+ ubi_err("cannot allocate memory to check for 0xFFs");
+ return 0;
+ }
+
+ err = ubi->mtd->read(ubi->mtd, addr, len, &read, buf);
if (err && err != -EUCLEAN) {
ubi_err("error %d while reading %d bytes from PEB %d:%d, "
"read %zd bytes", err, len, pnum, offset, read);
goto error;
}
- err = ubi_check_pattern(ubi->dbg_peb_buf, 0xFF, len);
+ err = ubi_check_pattern(buf, 0xFF, len);
if (err == 0) {
ubi_err("flash region at PEB %d:%d, length %d does not "
"contain all 0xFF bytes", pnum, offset, len);
goto fail;
}
- mutex_unlock(&ubi->dbg_buf_mutex);
+ vfree(buf);
return 0;
fail:
ubi_err("paranoid check failed for PEB %d", pnum);
ubi_msg("hex dump of the %d-%d region", offset, offset + len);
- print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1,
- ubi->dbg_peb_buf, len, 1);
+ print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1, buf, len, 1);
err = -EINVAL;
error:
ubi_dbg_dump_stack();
- mutex_unlock(&ubi->dbg_buf_mutex);
+ vfree(buf);
return err;
}
-#endif /* CONFIG_MTD_UBI_DEBUG_PARANOID */
+#endif /* CONFIG_MTD_UBI_DEBUG */
diff --git a/drivers/mtd/ubi/kapi.c b/drivers/mtd/ubi/kapi.c
index 69fa4ef03c53..d39716e5b204 100644
--- a/drivers/mtd/ubi/kapi.c
+++ b/drivers/mtd/ubi/kapi.c
@@ -40,7 +40,9 @@ void ubi_do_get_device_info(struct ubi_device *ubi, struct ubi_device_info *di)
{
di->ubi_num = ubi->ubi_num;
di->leb_size = ubi->leb_size;
+ di->leb_start = ubi->leb_start;
di->min_io_size = ubi->min_io_size;
+ di->max_write_size = ubi->max_write_size;
di->ro_mode = ubi->ro_mode;
di->cdev = ubi->cdev.dev;
}
diff --git a/drivers/mtd/ubi/scan.c b/drivers/mtd/ubi/scan.c
index 79ca304fc4db..11eb8ef12485 100644
--- a/drivers/mtd/ubi/scan.c
+++ b/drivers/mtd/ubi/scan.c
@@ -39,32 +39,46 @@
* eraseblocks are put to the @free list and the physical eraseblock to be
* erased are put to the @erase list.
*
+ * About corruptions
+ * ~~~~~~~~~~~~~~~~~
+ *
+ * UBI protects EC and VID headers with CRC-32 checksums, so it can detect
+ * whether the headers are corrupted or not. Sometimes UBI also protects the
+ * data with CRC-32, e.g., when it executes the atomic LEB change operation, or
+ * when it moves the contents of a PEB for wear-leveling purposes.
+ *
* UBI tries to distinguish between 2 types of corruptions.
- * 1. Corruptions caused by power cuts. These are harmless and expected
- * corruptions and UBI tries to handle them gracefully, without printing too
- * many warnings and error messages. The idea is that we do not lose
- * important data in these case - we may lose only the data which was being
- * written to the media just before the power cut happened, and the upper
- * layers (e.g., UBIFS) are supposed to handle these situations. UBI puts
- * these PEBs to the head of the @erase list and they are scheduled for
- * erasure.
+ *
+ * 1. Corruptions caused by power cuts. These are expected corruptions and UBI
+ * tries to handle them gracefully, without printing too many warnings and
+ * error messages. The idea is that we do not lose important data in these case
+ * - we may lose only the data which was being written to the media just before
+ * the power cut happened, and the upper layers (e.g., UBIFS) are supposed to
+ * handle such data losses (e.g., by using the FS journal).
+ *
+ * When UBI detects a corruption (CRC-32 mismatch) in a PEB, and it looks like
+ * the reason is a power cut, UBI puts this PEB to the @erase list, and all
+ * PEBs in the @erase list are scheduled for erasure later.
*
* 2. Unexpected corruptions which are not caused by power cuts. During
- * scanning, such PEBs are put to the @corr list and UBI preserves them.
- * Obviously, this lessens the amount of available PEBs, and if at some
- * point UBI runs out of free PEBs, it switches to R/O mode. UBI also loudly
- * informs about such PEBs every time the MTD device is attached.
+ * scanning, such PEBs are put to the @corr list and UBI preserves them.
+ * Obviously, this lessens the amount of available PEBs, and if at some point
+ * UBI runs out of free PEBs, it switches to R/O mode. UBI also loudly informs
+ * about such PEBs every time the MTD device is attached.
*
* However, it is difficult to reliably distinguish between these types of
- * corruptions and UBI's strategy is as follows. UBI assumes (2.) if the VID
- * header is corrupted and the data area does not contain all 0xFFs, and there
- * were not bit-flips or integrity errors while reading the data area. Otherwise
- * UBI assumes (1.). The assumptions are:
- * o if the data area contains only 0xFFs, there is no data, and it is safe
- * to just erase this PEB.
- * o if the data area has bit-flips and data integrity errors (ECC errors on
+ * corruptions and UBI's strategy is as follows. UBI assumes corruption type 2
+ * if the VID header is corrupted and the data area does not contain all 0xFFs,
+ * and there were no bit-flips or integrity errors while reading the data area.
+ * Otherwise UBI assumes corruption type 1. So the decision criteria are as
+ * follows.
+ * o If the data area contains only 0xFFs, there is no data, and it is safe
+ * to just erase this PEB - this is corruption type 1.
+ * o If the data area has bit-flips or data integrity errors (ECC errors on
* NAND), it is probably a PEB which was being erased when power cut
- * happened.
+ * happened, so this is corruption type 1. However, this is just a guess,
+ * which might be wrong.
+ * o Otherwise this it corruption type 2.
*/
#include <linux/err.h>
@@ -74,7 +88,7 @@
#include <linux/random.h>
#include "ubi.h"
-#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
+#ifdef CONFIG_MTD_UBI_DEBUG
static int paranoid_check_si(struct ubi_device *ubi, struct ubi_scan_info *si);
#else
#define paranoid_check_si(ubi, si) 0
@@ -115,7 +129,7 @@ static int add_to_list(struct ubi_scan_info *si, int pnum, int ec, int to_head,
} else
BUG();
- seb = kmalloc(sizeof(struct ubi_scan_leb), GFP_KERNEL);
+ seb = kmem_cache_alloc(si->scan_leb_slab, GFP_KERNEL);
if (!seb)
return -ENOMEM;
@@ -144,7 +158,7 @@ static int add_corrupted(struct ubi_scan_info *si, int pnum, int ec)
dbg_bld("add to corrupted: PEB %d, EC %d", pnum, ec);
- seb = kmalloc(sizeof(struct ubi_scan_leb), GFP_KERNEL);
+ seb = kmem_cache_alloc(si->scan_leb_slab, GFP_KERNEL);
if (!seb)
return -ENOMEM;
@@ -553,7 +567,7 @@ int ubi_scan_add_used(struct ubi_device *ubi, struct ubi_scan_info *si,
if (err)
return err;
- seb = kmalloc(sizeof(struct ubi_scan_leb), GFP_KERNEL);
+ seb = kmem_cache_alloc(si->scan_leb_slab, GFP_KERNEL);
if (!seb)
return -ENOMEM;
@@ -1152,9 +1166,15 @@ struct ubi_scan_info *ubi_scan(struct ubi_device *ubi)
si->volumes = RB_ROOT;
err = -ENOMEM;
+ si->scan_leb_slab = kmem_cache_create("ubi_scan_leb_slab",
+ sizeof(struct ubi_scan_leb),
+ 0, 0, NULL);
+ if (!si->scan_leb_slab)
+ goto out_si;
+
ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
if (!ech)
- goto out_si;
+ goto out_slab;
vidh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
if (!vidh)
@@ -1215,6 +1235,8 @@ out_vidh:
ubi_free_vid_hdr(ubi, vidh);
out_ech:
kfree(ech);
+out_slab:
+ kmem_cache_destroy(si->scan_leb_slab);
out_si:
ubi_scan_destroy_si(si);
return ERR_PTR(err);
@@ -1223,11 +1245,12 @@ out_si:
/**
* destroy_sv - free the scanning volume information
* @sv: scanning volume information
+ * @si: scanning information
*
* This function destroys the volume RB-tree (@sv->root) and the scanning
* volume information.
*/
-static void destroy_sv(struct ubi_scan_volume *sv)
+static void destroy_sv(struct ubi_scan_info *si, struct ubi_scan_volume *sv)
{
struct ubi_scan_leb *seb;
struct rb_node *this = sv->root.rb_node;
@@ -1247,7 +1270,7 @@ static void destroy_sv(struct ubi_scan_volume *sv)
this->rb_right = NULL;
}
- kfree(seb);
+ kmem_cache_free(si->scan_leb_slab, seb);
}
}
kfree(sv);
@@ -1265,19 +1288,19 @@ void ubi_scan_destroy_si(struct ubi_scan_info *si)
list_for_each_entry_safe(seb, seb_tmp, &si->alien, u.list) {
list_del(&seb->u.list);
- kfree(seb);
+ kmem_cache_free(si->scan_leb_slab, seb);
}
list_for_each_entry_safe(seb, seb_tmp, &si->erase, u.list) {
list_del(&seb->u.list);
- kfree(seb);
+ kmem_cache_free(si->scan_leb_slab, seb);
}
list_for_each_entry_safe(seb, seb_tmp, &si->corr, u.list) {
list_del(&seb->u.list);
- kfree(seb);
+ kmem_cache_free(si->scan_leb_slab, seb);
}
list_for_each_entry_safe(seb, seb_tmp, &si->free, u.list) {
list_del(&seb->u.list);
- kfree(seb);
+ kmem_cache_free(si->scan_leb_slab, seb);
}
/* Destroy the volume RB-tree */
@@ -1298,14 +1321,15 @@ void ubi_scan_destroy_si(struct ubi_scan_info *si)
rb->rb_right = NULL;
}
- destroy_sv(sv);
+ destroy_sv(si, sv);
}
}
+ kmem_cache_destroy(si->scan_leb_slab);
kfree(si);
}
-#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
+#ifdef CONFIG_MTD_UBI_DEBUG
/**
* paranoid_check_si - check the scanning information.
@@ -1323,6 +1347,9 @@ static int paranoid_check_si(struct ubi_device *ubi, struct ubi_scan_info *si)
struct ubi_scan_leb *seb, *last_seb;
uint8_t *buf;
+ if (!(ubi_chk_flags & UBI_CHK_GEN))
+ return 0;
+
/*
* At first, check that scanning information is OK.
*/
@@ -1575,4 +1602,4 @@ out:
return -EINVAL;
}
-#endif /* CONFIG_MTD_UBI_DEBUG_PARANOID */
+#endif /* CONFIG_MTD_UBI_DEBUG */
diff --git a/drivers/mtd/ubi/scan.h b/drivers/mtd/ubi/scan.h
index a3264f0bef2b..d48aef15ab5d 100644
--- a/drivers/mtd/ubi/scan.h
+++ b/drivers/mtd/ubi/scan.h
@@ -109,6 +109,7 @@ struct ubi_scan_volume {
* @mean_ec: mean erase counter value
* @ec_sum: a temporary variable used when calculating @mean_ec
* @ec_count: a temporary variable used when calculating @mean_ec
+ * @scan_leb_slab: slab cache for &struct ubi_scan_leb objects
*
* This data structure contains the result of scanning and may be used by other
* UBI sub-systems to build final UBI data structures, further error-recovery
@@ -134,6 +135,7 @@ struct ubi_scan_info {
int mean_ec;
uint64_t ec_sum;
int ec_count;
+ struct kmem_cache *scan_leb_slab;
};
struct ubi_device;
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
index 0b0149c41fe3..f1be8b79663c 100644
--- a/drivers/mtd/ubi/ubi.h
+++ b/drivers/mtd/ubi/ubi.h
@@ -40,6 +40,7 @@
#include <linux/notifier.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/ubi.h>
+#include <asm/pgtable.h>
#include "ubi-media.h"
#include "scan.h"
@@ -381,14 +382,14 @@ struct ubi_wl_entry;
* @bad_allowed: whether the MTD device admits of bad physical eraseblocks or
* not
* @nor_flash: non-zero if working on top of NOR flash
+ * @max_write_size: maximum amount of bytes the underlying flash can write at a
+ * time (MTD write buffer size)
* @mtd: MTD device descriptor
*
* @peb_buf1: a buffer of PEB size used for different purposes
* @peb_buf2: another buffer of PEB size used for different purposes
* @buf_mutex: protects @peb_buf1 and @peb_buf2
* @ckvol_mutex: serializes static volume checking when opening
- * @dbg_peb_buf: buffer of PEB size used for debugging
- * @dbg_buf_mutex: protects @dbg_peb_buf
*/
struct ubi_device {
struct cdev cdev;
@@ -464,16 +465,13 @@ struct ubi_device {
int vid_hdr_shift;
unsigned int bad_allowed:1;
unsigned int nor_flash:1;
+ int max_write_size;
struct mtd_info *mtd;
void *peb_buf1;
void *peb_buf2;
struct mutex buf_mutex;
struct mutex ckvol_mutex;
-#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
- void *dbg_peb_buf;
- struct mutex dbg_buf_mutex;
-#endif
};
extern struct kmem_cache *ubi_wl_entry_slab;
diff --git a/drivers/mtd/ubi/vmt.c b/drivers/mtd/ubi/vmt.c
index c47620dfc722..b79e0dea3632 100644
--- a/drivers/mtd/ubi/vmt.c
+++ b/drivers/mtd/ubi/vmt.c
@@ -28,7 +28,7 @@
#include <linux/slab.h>
#include "ubi.h"
-#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
+#ifdef CONFIG_MTD_UBI_DEBUG
static int paranoid_check_volumes(struct ubi_device *ubi);
#else
#define paranoid_check_volumes(ubi) 0
@@ -711,7 +711,7 @@ void ubi_free_volume(struct ubi_device *ubi, struct ubi_volume *vol)
volume_sysfs_close(vol);
}
-#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
+#ifdef CONFIG_MTD_UBI_DEBUG
/**
* paranoid_check_volume - check volume information.
@@ -876,6 +876,9 @@ static int paranoid_check_volumes(struct ubi_device *ubi)
{
int i, err = 0;
+ if (!(ubi_chk_flags & UBI_CHK_GEN))
+ return 0;
+
for (i = 0; i < ubi->vtbl_slots; i++) {
err = paranoid_check_volume(ubi, i);
if (err)
diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c
index 0b8141fc5c26..fd3bf770f518 100644
--- a/drivers/mtd/ubi/vtbl.c
+++ b/drivers/mtd/ubi/vtbl.c
@@ -62,7 +62,7 @@
#include <asm/div64.h>
#include "ubi.h"
-#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
+#ifdef CONFIG_MTD_UBI_DEBUG
static void paranoid_vtbl_check(const struct ubi_device *ubi);
#else
#define paranoid_vtbl_check(ubi)
@@ -868,7 +868,7 @@ out_free:
return err;
}
-#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
+#ifdef CONFIG_MTD_UBI_DEBUG
/**
* paranoid_vtbl_check - check volume table.
@@ -876,10 +876,13 @@ out_free:
*/
static void paranoid_vtbl_check(const struct ubi_device *ubi)
{
+ if (!(ubi_chk_flags & UBI_CHK_GEN))
+ return;
+
if (vtbl_check(ubi, ubi->vtbl)) {
ubi_err("paranoid check failed");
BUG();
}
}
-#endif /* CONFIG_MTD_UBI_DEBUG_PARANOID */
+#endif /* CONFIG_MTD_UBI_DEBUG */
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index 655bbbe415d9..b4cf57db2556 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -161,7 +161,7 @@ struct ubi_work {
int torture;
};
-#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
+#ifdef CONFIG_MTD_UBI_DEBUG
static int paranoid_check_ec(struct ubi_device *ubi, int pnum, int ec);
static int paranoid_check_in_wl_tree(struct ubi_wl_entry *e,
struct rb_root *root);
@@ -613,7 +613,7 @@ static void schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk)
list_add_tail(&wrk->list, &ubi->works);
ubi_assert(ubi->works_count >= 0);
ubi->works_count += 1;
- if (ubi->thread_enabled)
+ if (ubi->thread_enabled && !ubi_dbg_is_bgt_disabled())
wake_up_process(ubi->bgt_thread);
spin_unlock(&ubi->wl_lock);
}
@@ -1364,7 +1364,7 @@ int ubi_thread(void *u)
spin_lock(&ubi->wl_lock);
if (list_empty(&ubi->works) || ubi->ro_mode ||
- !ubi->thread_enabled) {
+ !ubi->thread_enabled || ubi_dbg_is_bgt_disabled()) {
set_current_state(TASK_INTERRUPTIBLE);
spin_unlock(&ubi->wl_lock);
schedule();
@@ -1561,7 +1561,7 @@ void ubi_wl_close(struct ubi_device *ubi)
kfree(ubi->lookuptbl);
}
-#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
+#ifdef CONFIG_MTD_UBI_DEBUG
/**
* paranoid_check_ec - make sure that the erase counter of a PEB is correct.
@@ -1578,6 +1578,9 @@ static int paranoid_check_ec(struct ubi_device *ubi, int pnum, int ec)
long long read_ec;
struct ubi_ec_hdr *ec_hdr;
+ if (!(ubi_chk_flags & UBI_CHK_GEN))
+ return 0;
+
ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
if (!ec_hdr)
return -ENOMEM;
@@ -1614,6 +1617,9 @@ out_free:
static int paranoid_check_in_wl_tree(struct ubi_wl_entry *e,
struct rb_root *root)
{
+ if (!(ubi_chk_flags & UBI_CHK_GEN))
+ return 0;
+
if (in_wl_tree(e, root))
return 0;
@@ -1636,6 +1642,9 @@ static int paranoid_check_in_pq(struct ubi_device *ubi, struct ubi_wl_entry *e)
struct ubi_wl_entry *p;
int i;
+ if (!(ubi_chk_flags & UBI_CHK_GEN))
+ return 0;
+
for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i)
list_for_each_entry(p, &ubi->pq[i], u.list)
if (p == e)
@@ -1646,4 +1655,5 @@ static int paranoid_check_in_pq(struct ubi_device *ubi, struct ubi_wl_entry *e)
ubi_dbg_dump_stack();
return -EINVAL;
}
-#endif /* CONFIG_MTD_UBI_DEBUG_PARANOID */
+
+#endif /* CONFIG_MTD_UBI_DEBUG */