summaryrefslogtreecommitdiff
path: root/drivers/mtd
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/mtd')
-rw-r--r--drivers/mtd/Makefile4
-rw-r--r--drivers/mtd/cfi_mtd.c83
-rw-r--r--drivers/mtd/mtdconcat.c807
-rw-r--r--drivers/mtd/mtdpart.c488
-rw-r--r--drivers/mtd/nand/Makefile1
-rw-r--r--drivers/mtd/nand/mpc5121_nfc.c692
-rw-r--r--drivers/mtd/nand/nand.c2
-rw-r--r--drivers/mtd/nand/nand_util.c11
-rw-r--r--drivers/mtd/onenand/onenand_uboot.c2
-rw-r--r--drivers/mtd/spi/Makefile1
-rw-r--r--drivers/mtd/spi/atmel.c139
-rw-r--r--drivers/mtd/spi/macronix.c312
-rw-r--r--drivers/mtd/spi/spi_flash.c5
-rw-r--r--drivers/mtd/spi/spi_flash_internal.h1
-rw-r--r--drivers/mtd/ubi/build.c4
15 files changed, 2274 insertions, 278 deletions
diff --git a/drivers/mtd/Makefile b/drivers/mtd/Makefile
index ed3f91e..754d648 100644
--- a/drivers/mtd/Makefile
+++ b/drivers/mtd/Makefile
@@ -25,7 +25,9 @@ include $(TOPDIR)/config.mk
LIB := $(obj)libmtd.a
-COBJS-$(CONFIG_MTD_PARTITIONS) += mtdcore.o mtdpart.o
+COBJS-$(CONFIG_MTD_DEVICE) += mtdcore.o
+COBJS-$(CONFIG_MTD_PARTITIONS) += mtdpart.o
+COBJS-$(CONFIG_MTD_CONCAT) += mtdconcat.o
COBJS-$(CONFIG_HAS_DATAFLASH) += at45.o
COBJS-$(CONFIG_FLASH_CFI_DRIVER) += cfi_flash.o
COBJS-$(CONFIG_FLASH_CFI_MTD) += cfi_mtd.o
diff --git a/drivers/mtd/cfi_mtd.c b/drivers/mtd/cfi_mtd.c
index 4a76917..c7e357b 100644
--- a/drivers/mtd/cfi_mtd.c
+++ b/drivers/mtd/cfi_mtd.c
@@ -25,14 +25,19 @@
#include <common.h>
#include <flash.h>
+#include <malloc.h>
#include <asm/errno.h>
#include <linux/mtd/mtd.h>
+#include <linux/mtd/concat.h>
extern flash_info_t flash_info[];
static struct mtd_info cfi_mtd_info[CONFIG_SYS_MAX_FLASH_BANKS];
static char cfi_mtd_names[CONFIG_SYS_MAX_FLASH_BANKS][16];
+#ifdef CONFIG_MTD_CONCAT
+static char c_mtd_name[16];
+#endif
static int cfi_mtd_erase(struct mtd_info *mtd, struct erase_info *instr)
{
@@ -118,7 +123,7 @@ static void cfi_mtd_sync(struct mtd_info *mtd)
*/
}
-static int cfi_mtd_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
+static int cfi_mtd_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
{
flash_info_t *fi = mtd->priv;
@@ -130,7 +135,7 @@ static int cfi_mtd_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
return 0;
}
-static int cfi_mtd_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
+static int cfi_mtd_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
{
flash_info_t *fi = mtd->priv;
@@ -145,16 +150,68 @@ static int cfi_mtd_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
static int cfi_mtd_set_erasesize(struct mtd_info *mtd, flash_info_t *fi)
{
int sect_size = 0;
+ int sect_size_old = 0;
int sect;
+ int regions = 0;
+ int numblocks = 0;
+ ulong offset = 0;
+ ulong base_addr = fi->start[0];
/*
- * Select the largest sector size as erasesize (e.g. for UBI)
+ * First detect the number of eraseregions so that we can allocate
+ * the array of eraseregions correctly
*/
for (sect = 0; sect < fi->sector_count; sect++) {
+ if (sect_size_old != flash_sector_size(fi, sect))
+ regions++;
+ sect_size_old = flash_sector_size(fi, sect);
+ }
+
+ mtd->eraseregions = malloc(sizeof(struct mtd_erase_region_info) * regions);
+
+ /*
+ * Now detect the largest sector and fill the eraseregions
+ */
+ sect_size_old = 0;
+ regions = 0;
+ for (sect = 0; sect < fi->sector_count; sect++) {
+ if ((sect_size_old != flash_sector_size(fi, sect)) &&
+ (sect_size_old != 0)) {
+ mtd->eraseregions[regions].offset = offset - base_addr;
+ mtd->eraseregions[regions].erasesize = sect_size_old;
+ mtd->eraseregions[regions].numblocks = numblocks;
+
+ /* Now start counting the next eraseregions */
+ numblocks = 0;
+ regions++;
+ } else {
+ numblocks++;
+ }
+
+ if (sect_size_old != flash_sector_size(fi, sect))
+ offset = fi->start[sect];
+
+ /*
+ * Select the largest sector size as erasesize (e.g. for UBI)
+ */
if (flash_sector_size(fi, sect) > sect_size)
sect_size = flash_sector_size(fi, sect);
+
+ sect_size_old = flash_sector_size(fi, sect);
}
+ /*
+ * Set the last region
+ */
+ mtd->eraseregions[regions].offset = offset - base_addr;
+ mtd->eraseregions[regions].erasesize = sect_size_old;
+ mtd->eraseregions[regions].numblocks = numblocks + 1;
+
+ if (regions)
+ mtd->numeraseregions = regions + 1;
+ else
+ mtd->numeraseregions = 0;
+
mtd->erasesize = sect_size;
return 0;
@@ -165,6 +222,8 @@ int cfi_mtd_init(void)
struct mtd_info *mtd;
flash_info_t *fi;
int error, i;
+ int devices_found = 0;
+ struct mtd_info *mtd_list[CONFIG_SYS_MAX_FLASH_BANKS];
for (i = 0; i < CONFIG_SYS_MAX_FLASH_BANKS; i++) {
fi = &flash_info[i];
@@ -193,7 +252,25 @@ int cfi_mtd_init(void)
if (add_mtd_device(mtd))
return -ENOMEM;
+
+ mtd_list[devices_found++] = mtd;
+ }
+
+#ifdef CONFIG_MTD_CONCAT
+ if (devices_found > 1) {
+ /*
+ * We detected multiple devices. Concatenate them together.
+ */
+ sprintf(c_mtd_name, "nor%d", devices_found);
+ mtd = mtd_concat_create(mtd_list, devices_found, c_mtd_name);
+
+ if (mtd == NULL)
+ return -ENXIO;
+
+ if (add_mtd_device(mtd))
+ return -ENOMEM;
}
+#endif /* CONFIG_MTD_CONCAT */
return 0;
}
diff --git a/drivers/mtd/mtdconcat.c b/drivers/mtd/mtdconcat.c
new file mode 100644
index 0000000..fc22701
--- /dev/null
+++ b/drivers/mtd/mtdconcat.c
@@ -0,0 +1,807 @@
+/*
+ * MTD device concatenation layer
+ *
+ * (C) 2002 Robert Kaiser <rkaiser@sysgo.de>
+ *
+ * NAND support by Christian Gan <cgan@iders.ca>
+ *
+ * This code is GPL
+ */
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/compat.h>
+#include <linux/mtd/concat.h>
+#include <ubi_uboot.h>
+
+/*
+ * Our storage structure:
+ * Subdev points to an array of pointers to struct mtd_info objects
+ * which is allocated along with this structure
+ *
+ */
+struct mtd_concat {
+ struct mtd_info mtd;
+ int num_subdev;
+ struct mtd_info **subdev;
+};
+
+/*
+ * how to calculate the size required for the above structure,
+ * including the pointer array subdev points to:
+ */
+#define SIZEOF_STRUCT_MTD_CONCAT(num_subdev) \
+ ((sizeof(struct mtd_concat) + (num_subdev) * sizeof(struct mtd_info *)))
+
+/*
+ * Given a pointer to the MTD object in the mtd_concat structure,
+ * we can retrieve the pointer to that structure with this macro.
+ */
+#define CONCAT(x) ((struct mtd_concat *)(x))
+
+/*
+ * MTD methods which look up the relevant subdevice, translate the
+ * effective address and pass through to the subdevice.
+ */
+
+static int
+concat_read(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t * retlen, u_char * buf)
+{
+ struct mtd_concat *concat = CONCAT(mtd);
+ int ret = 0, err;
+ int i;
+
+ *retlen = 0;
+
+ for (i = 0; i < concat->num_subdev; i++) {
+ struct mtd_info *subdev = concat->subdev[i];
+ size_t size, retsize;
+
+ if (from >= subdev->size) {
+ /* Not destined for this subdev */
+ size = 0;
+ from -= subdev->size;
+ continue;
+ }
+ if (from + len > subdev->size)
+ /* First part goes into this subdev */
+ size = subdev->size - from;
+ else
+ /* Entire transaction goes into this subdev */
+ size = len;
+
+ err = subdev->read(subdev, from, size, &retsize, buf);
+
+ /* Save information about bitflips! */
+ if (unlikely(err)) {
+ if (err == -EBADMSG) {
+ mtd->ecc_stats.failed++;
+ ret = err;
+ } else if (err == -EUCLEAN) {
+ mtd->ecc_stats.corrected++;
+ /* Do not overwrite -EBADMSG !! */
+ if (!ret)
+ ret = err;
+ } else
+ return err;
+ }
+
+ *retlen += retsize;
+ len -= size;
+ if (len == 0)
+ return ret;
+
+ buf += size;
+ from = 0;
+ }
+ return -EINVAL;
+}
+
+static int
+concat_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t * retlen, const u_char * buf)
+{
+ struct mtd_concat *concat = CONCAT(mtd);
+ int err = -EINVAL;
+ int i;
+
+ if (!(mtd->flags & MTD_WRITEABLE))
+ return -EROFS;
+
+ *retlen = 0;
+
+ for (i = 0; i < concat->num_subdev; i++) {
+ struct mtd_info *subdev = concat->subdev[i];
+ size_t size, retsize;
+
+ if (to >= subdev->size) {
+ size = 0;
+ to -= subdev->size;
+ continue;
+ }
+ if (to + len > subdev->size)
+ size = subdev->size - to;
+ else
+ size = len;
+
+ if (!(subdev->flags & MTD_WRITEABLE))
+ err = -EROFS;
+ else
+ err = subdev->write(subdev, to, size, &retsize, buf);
+
+ if (err)
+ break;
+
+ *retlen += retsize;
+ len -= size;
+ if (len == 0)
+ break;
+
+ err = -EINVAL;
+ buf += size;
+ to = 0;
+ }
+ return err;
+}
+
+static int
+concat_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
+{
+ struct mtd_concat *concat = CONCAT(mtd);
+ struct mtd_oob_ops devops = *ops;
+ int i, err, ret = 0;
+
+ ops->retlen = ops->oobretlen = 0;
+
+ for (i = 0; i < concat->num_subdev; i++) {
+ struct mtd_info *subdev = concat->subdev[i];
+
+ if (from >= subdev->size) {
+ from -= subdev->size;
+ continue;
+ }
+
+ /* partial read ? */
+ if (from + devops.len > subdev->size)
+ devops.len = subdev->size - from;
+
+ err = subdev->read_oob(subdev, from, &devops);
+ ops->retlen += devops.retlen;
+ ops->oobretlen += devops.oobretlen;
+
+ /* Save information about bitflips! */
+ if (unlikely(err)) {
+ if (err == -EBADMSG) {
+ mtd->ecc_stats.failed++;
+ ret = err;
+ } else if (err == -EUCLEAN) {
+ mtd->ecc_stats.corrected++;
+ /* Do not overwrite -EBADMSG !! */
+ if (!ret)
+ ret = err;
+ } else
+ return err;
+ }
+
+ if (devops.datbuf) {
+ devops.len = ops->len - ops->retlen;
+ if (!devops.len)
+ return ret;
+ devops.datbuf += devops.retlen;
+ }
+ if (devops.oobbuf) {
+ devops.ooblen = ops->ooblen - ops->oobretlen;
+ if (!devops.ooblen)
+ return ret;
+ devops.oobbuf += ops->oobretlen;
+ }
+
+ from = 0;
+ }
+ return -EINVAL;
+}
+
+static int
+concat_write_oob(struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops)
+{
+ struct mtd_concat *concat = CONCAT(mtd);
+ struct mtd_oob_ops devops = *ops;
+ int i, err;
+
+ if (!(mtd->flags & MTD_WRITEABLE))
+ return -EROFS;
+
+ ops->retlen = 0;
+
+ for (i = 0; i < concat->num_subdev; i++) {
+ struct mtd_info *subdev = concat->subdev[i];
+
+ if (to >= subdev->size) {
+ to -= subdev->size;
+ continue;
+ }
+
+ /* partial write ? */
+ if (to + devops.len > subdev->size)
+ devops.len = subdev->size - to;
+
+ err = subdev->write_oob(subdev, to, &devops);
+ ops->retlen += devops.retlen;
+ if (err)
+ return err;
+
+ if (devops.datbuf) {
+ devops.len = ops->len - ops->retlen;
+ if (!devops.len)
+ return 0;
+ devops.datbuf += devops.retlen;
+ }
+ if (devops.oobbuf) {
+ devops.ooblen = ops->ooblen - ops->oobretlen;
+ if (!devops.ooblen)
+ return 0;
+ devops.oobbuf += devops.oobretlen;
+ }
+ to = 0;
+ }
+ return -EINVAL;
+}
+
+static void concat_erase_callback(struct erase_info *instr)
+{
+ /* Nothing to do here in U-Boot */
+}
+
+static int concat_dev_erase(struct mtd_info *mtd, struct erase_info *erase)
+{
+ int err;
+ wait_queue_head_t waitq;
+ DECLARE_WAITQUEUE(wait, current);
+
+ /*
+ * This code was stol^H^H^H^Hinspired by mtdchar.c
+ */
+ init_waitqueue_head(&waitq);
+
+ erase->mtd = mtd;
+ erase->callback = concat_erase_callback;
+ erase->priv = (unsigned long) &waitq;
+
+ /*
+ * FIXME: Allow INTERRUPTIBLE. Which means
+ * not having the wait_queue head on the stack.
+ */
+ err = mtd->erase(mtd, erase);
+ if (!err) {
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ add_wait_queue(&waitq, &wait);
+ if (erase->state != MTD_ERASE_DONE
+ && erase->state != MTD_ERASE_FAILED)
+ schedule();
+ remove_wait_queue(&waitq, &wait);
+ set_current_state(TASK_RUNNING);
+
+ err = (erase->state == MTD_ERASE_FAILED) ? -EIO : 0;
+ }
+ return err;
+}
+
+static int concat_erase(struct mtd_info *mtd, struct erase_info *instr)
+{
+ struct mtd_concat *concat = CONCAT(mtd);
+ struct mtd_info *subdev;
+ int i, err;
+ uint64_t length, offset = 0;
+ struct erase_info *erase;
+
+ if (!(mtd->flags & MTD_WRITEABLE))
+ return -EROFS;
+
+ if (instr->addr > concat->mtd.size)
+ return -EINVAL;
+
+ if (instr->len + instr->addr > concat->mtd.size)
+ return -EINVAL;
+
+ /*
+ * Check for proper erase block alignment of the to-be-erased area.
+ * It is easier to do this based on the super device's erase
+ * region info rather than looking at each particular sub-device
+ * in turn.
+ */
+ if (!concat->mtd.numeraseregions) {
+ /* the easy case: device has uniform erase block size */
+ if (instr->addr & (concat->mtd.erasesize - 1))
+ return -EINVAL;
+ if (instr->len & (concat->mtd.erasesize - 1))
+ return -EINVAL;
+ } else {
+ /* device has variable erase size */
+ struct mtd_erase_region_info *erase_regions =
+ concat->mtd.eraseregions;
+
+ /*
+ * Find the erase region where the to-be-erased area begins:
+ */
+ for (i = 0; i < concat->mtd.numeraseregions &&
+ instr->addr >= erase_regions[i].offset; i++) ;
+ --i;
+
+ /*
+ * Now erase_regions[i] is the region in which the
+ * to-be-erased area begins. Verify that the starting
+ * offset is aligned to this region's erase size:
+ */
+ if (instr->addr & (erase_regions[i].erasesize - 1))
+ return -EINVAL;
+
+ /*
+ * now find the erase region where the to-be-erased area ends:
+ */
+ for (; i < concat->mtd.numeraseregions &&
+ (instr->addr + instr->len) >= erase_regions[i].offset;
+ ++i) ;
+ --i;
+ /*
+ * check if the ending offset is aligned to this region's erase size
+ */
+ if ((instr->addr + instr->len) & (erase_regions[i].erasesize -
+ 1))
+ return -EINVAL;
+ }
+
+ instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
+
+ /* make a local copy of instr to avoid modifying the caller's struct */
+ erase = kmalloc(sizeof (struct erase_info), GFP_KERNEL);
+
+ if (!erase)
+ return -ENOMEM;
+
+ *erase = *instr;
+ length = instr->len;
+
+ /*
+ * find the subdevice where the to-be-erased area begins, adjust
+ * starting offset to be relative to the subdevice start
+ */
+ for (i = 0; i < concat->num_subdev; i++) {
+ subdev = concat->subdev[i];
+ if (subdev->size <= erase->addr) {
+ erase->addr -= subdev->size;
+ offset += subdev->size;
+ } else {
+ break;
+ }
+ }
+
+ /* must never happen since size limit has been verified above */
+ BUG_ON(i >= concat->num_subdev);
+
+ /* now do the erase: */
+ err = 0;
+ for (; length > 0; i++) {
+ /* loop for all subdevices affected by this request */
+ subdev = concat->subdev[i]; /* get current subdevice */
+
+ /* limit length to subdevice's size: */
+ if (erase->addr + length > subdev->size)
+ erase->len = subdev->size - erase->addr;
+ else
+ erase->len = length;
+
+ if (!(subdev->flags & MTD_WRITEABLE)) {
+ err = -EROFS;
+ break;
+ }
+ length -= erase->len;
+ if ((err = concat_dev_erase(subdev, erase))) {
+ /* sanity check: should never happen since
+ * block alignment has been checked above */
+ BUG_ON(err == -EINVAL);
+ if (erase->fail_addr != MTD_FAIL_ADDR_UNKNOWN)
+ instr->fail_addr = erase->fail_addr + offset;
+ break;
+ }
+ /*
+ * erase->addr specifies the offset of the area to be
+ * erased *within the current subdevice*. It can be
+ * non-zero only the first time through this loop, i.e.
+ * for the first subdevice where blocks need to be erased.
+ * All the following erases must begin at the start of the
+ * current subdevice, i.e. at offset zero.
+ */
+ erase->addr = 0;
+ offset += subdev->size;
+ }
+ instr->state = erase->state;
+ kfree(erase);
+ if (err)
+ return err;
+
+ if (instr->callback)
+ instr->callback(instr);
+ return 0;
+}
+
+static int concat_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+ struct mtd_concat *concat = CONCAT(mtd);
+ int i, err = -EINVAL;
+
+ if ((len + ofs) > mtd->size)
+ return -EINVAL;
+
+ for (i = 0; i < concat->num_subdev; i++) {
+ struct mtd_info *subdev = concat->subdev[i];
+ uint64_t size;
+
+ if (ofs >= subdev->size) {
+ size = 0;
+ ofs -= subdev->size;
+ continue;
+ }
+ if (ofs + len > subdev->size)
+ size = subdev->size - ofs;
+ else
+ size = len;
+
+ err = subdev->lock(subdev, ofs, size);
+
+ if (err)
+ break;
+
+ len -= size;
+ if (len == 0)
+ break;
+
+ err = -EINVAL;
+ ofs = 0;
+ }
+
+ return err;
+}
+
+static int concat_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+ struct mtd_concat *concat = CONCAT(mtd);
+ int i, err = 0;
+
+ if ((len + ofs) > mtd->size)
+ return -EINVAL;
+
+ for (i = 0; i < concat->num_subdev; i++) {
+ struct mtd_info *subdev = concat->subdev[i];
+ uint64_t size;
+
+ if (ofs >= subdev->size) {
+ size = 0;
+ ofs -= subdev->size;
+ continue;
+ }
+ if (ofs + len > subdev->size)
+ size = subdev->size - ofs;
+ else
+ size = len;
+
+ err = subdev->unlock(subdev, ofs, size);
+
+ if (err)
+ break;
+
+ len -= size;
+ if (len == 0)
+ break;
+
+ err = -EINVAL;
+ ofs = 0;
+ }
+
+ return err;
+}
+
+static void concat_sync(struct mtd_info *mtd)
+{
+ struct mtd_concat *concat = CONCAT(mtd);
+ int i;
+
+ for (i = 0; i < concat->num_subdev; i++) {
+ struct mtd_info *subdev = concat->subdev[i];
+ subdev->sync(subdev);
+ }
+}
+
+static int concat_block_isbad(struct mtd_info *mtd, loff_t ofs)
+{
+ struct mtd_concat *concat = CONCAT(mtd);
+ int i, res = 0;
+
+ if (!concat->subdev[0]->block_isbad)
+ return res;
+
+ if (ofs > mtd->size)
+ return -EINVAL;
+
+ for (i = 0; i < concat->num_subdev; i++) {
+ struct mtd_info *subdev = concat->subdev[i];
+
+ if (ofs >= subdev->size) {
+ ofs -= subdev->size;
+ continue;
+ }
+
+ res = subdev->block_isbad(subdev, ofs);
+ break;
+ }
+
+ return res;
+}
+
+static int concat_block_markbad(struct mtd_info *mtd, loff_t ofs)
+{
+ struct mtd_concat *concat = CONCAT(mtd);
+ int i, err = -EINVAL;
+
+ if (!concat->subdev[0]->block_markbad)
+ return 0;
+
+ if (ofs > mtd->size)
+ return -EINVAL;
+
+ for (i = 0; i < concat->num_subdev; i++) {
+ struct mtd_info *subdev = concat->subdev[i];
+
+ if (ofs >= subdev->size) {
+ ofs -= subdev->size;
+ continue;
+ }
+
+ err = subdev->block_markbad(subdev, ofs);
+ if (!err)
+ mtd->ecc_stats.badblocks++;
+ break;
+ }
+
+ return err;
+}
+
+/*
+ * This function constructs a virtual MTD device by concatenating
+ * num_devs MTD devices. A pointer to the new device object is
+ * stored to *new_dev upon success. This function does _not_
+ * register any devices: this is the caller's responsibility.
+ */
+struct mtd_info *mtd_concat_create(struct mtd_info *subdev[], /* subdevices to concatenate */
+ int num_devs, /* number of subdevices */
+ const char *name)
+{ /* name for the new device */
+ int i;
+ size_t size;
+ struct mtd_concat *concat;
+ uint32_t max_erasesize, curr_erasesize;
+ int num_erase_region;
+
+ debug("Concatenating MTD devices:\n");
+ for (i = 0; i < num_devs; i++)
+ debug("(%d): \"%s\"\n", i, subdev[i]->name);
+ debug("into device \"%s\"\n", name);
+
+ /* allocate the device structure */
+ size = SIZEOF_STRUCT_MTD_CONCAT(num_devs);
+ concat = kzalloc(size, GFP_KERNEL);
+ if (!concat) {
+ printk
+ ("memory allocation error while creating concatenated device \"%s\"\n",
+ name);
+ return NULL;
+ }
+ concat->subdev = (struct mtd_info **) (concat + 1);
+
+ /*
+ * Set up the new "super" device's MTD object structure, check for
+ * incompatibilites between the subdevices.
+ */
+ concat->mtd.type = subdev[0]->type;
+ concat->mtd.flags = subdev[0]->flags;
+ concat->mtd.size = subdev[0]->size;
+ concat->mtd.erasesize = subdev[0]->erasesize;
+ concat->mtd.writesize = subdev[0]->writesize;
+ concat->mtd.subpage_sft = subdev[0]->subpage_sft;
+ concat->mtd.oobsize = subdev[0]->oobsize;
+ concat->mtd.oobavail = subdev[0]->oobavail;
+ if (subdev[0]->read_oob)
+ concat->mtd.read_oob = concat_read_oob;
+ if (subdev[0]->write_oob)
+ concat->mtd.write_oob = concat_write_oob;
+ if (subdev[0]->block_isbad)
+ concat->mtd.block_isbad = concat_block_isbad;
+ if (subdev[0]->block_markbad)
+ concat->mtd.block_markbad = concat_block_markbad;
+
+ concat->mtd.ecc_stats.badblocks = subdev[0]->ecc_stats.badblocks;
+
+ concat->subdev[0] = subdev[0];
+
+ for (i = 1; i < num_devs; i++) {
+ if (concat->mtd.type != subdev[i]->type) {
+ kfree(concat);
+ printk("Incompatible device type on \"%s\"\n",
+ subdev[i]->name);
+ return NULL;
+ }
+ if (concat->mtd.flags != subdev[i]->flags) {
+ /*
+ * Expect all flags except MTD_WRITEABLE to be
+ * equal on all subdevices.
+ */
+ if ((concat->mtd.flags ^ subdev[i]->
+ flags) & ~MTD_WRITEABLE) {
+ kfree(concat);
+ printk("Incompatible device flags on \"%s\"\n",
+ subdev[i]->name);
+ return NULL;
+ } else
+ /* if writeable attribute differs,
+ make super device writeable */
+ concat->mtd.flags |=
+ subdev[i]->flags & MTD_WRITEABLE;
+ }
+
+ concat->mtd.size += subdev[i]->size;
+ concat->mtd.ecc_stats.badblocks +=
+ subdev[i]->ecc_stats.badblocks;
+ if (concat->mtd.writesize != subdev[i]->writesize ||
+ concat->mtd.subpage_sft != subdev[i]->subpage_sft ||
+ concat->mtd.oobsize != subdev[i]->oobsize ||
+ !concat->mtd.read_oob != !subdev[i]->read_oob ||
+ !concat->mtd.write_oob != !subdev[i]->write_oob) {
+ kfree(concat);
+ printk("Incompatible OOB or ECC data on \"%s\"\n",
+ subdev[i]->name);
+ return NULL;
+ }
+ concat->subdev[i] = subdev[i];
+
+ }
+
+ concat->mtd.ecclayout = subdev[0]->ecclayout;
+
+ concat->num_subdev = num_devs;
+ concat->mtd.name = name;
+
+ concat->mtd.erase = concat_erase;
+ concat->mtd.read = concat_read;
+ concat->mtd.write = concat_write;
+ concat->mtd.sync = concat_sync;
+ concat->mtd.lock = concat_lock;
+ concat->mtd.unlock = concat_unlock;
+
+ /*
+ * Combine the erase block size info of the subdevices:
+ *
+ * first, walk the map of the new device and see how
+ * many changes in erase size we have
+ */
+ max_erasesize = curr_erasesize = subdev[0]->erasesize;
+ num_erase_region = 1;
+ for (i = 0; i < num_devs; i++) {
+ if (subdev[i]->numeraseregions == 0) {
+ /* current subdevice has uniform erase size */
+ if (subdev[i]->erasesize != curr_erasesize) {
+ /* if it differs from the last subdevice's erase size, count it */
+ ++num_erase_region;
+ curr_erasesize = subdev[i]->erasesize;
+ if (curr_erasesize > max_erasesize)
+ max_erasesize = curr_erasesize;
+ }
+ } else {
+ /* current subdevice has variable erase size */
+ int j;
+ for (j = 0; j < subdev[i]->numeraseregions; j++) {
+
+ /* walk the list of erase regions, count any changes */
+ if (subdev[i]->eraseregions[j].erasesize !=
+ curr_erasesize) {
+ ++num_erase_region;
+ curr_erasesize =
+ subdev[i]->eraseregions[j].
+ erasesize;
+ if (curr_erasesize > max_erasesize)
+ max_erasesize = curr_erasesize;
+ }
+ }
+ }
+ }
+
+ if (num_erase_region == 1) {
+ /*
+ * All subdevices have the same uniform erase size.
+ * This is easy:
+ */
+ concat->mtd.erasesize = curr_erasesize;
+ concat->mtd.numeraseregions = 0;
+ } else {
+ uint64_t tmp64;
+
+ /*
+ * erase block size varies across the subdevices: allocate
+ * space to store the data describing the variable erase regions
+ */
+ struct mtd_erase_region_info *erase_region_p;
+ uint64_t begin, position;
+
+ concat->mtd.erasesize = max_erasesize;
+ concat->mtd.numeraseregions = num_erase_region;
+ concat->mtd.eraseregions = erase_region_p =
+ kmalloc(num_erase_region *
+ sizeof (struct mtd_erase_region_info), GFP_KERNEL);
+ if (!erase_region_p) {
+ kfree(concat);
+ printk
+ ("memory allocation error while creating erase region list"
+ " for device \"%s\"\n", name);
+ return NULL;
+ }
+
+ /*
+ * walk the map of the new device once more and fill in
+ * in erase region info:
+ */
+ curr_erasesize = subdev[0]->erasesize;
+ begin = position = 0;
+ for (i = 0; i < num_devs; i++) {
+ if (subdev[i]->numeraseregions == 0) {
+ /* current subdevice has uniform erase size */
+ if (subdev[i]->erasesize != curr_erasesize) {
+ /*
+ * fill in an mtd_erase_region_info structure for the area
+ * we have walked so far:
+ */
+ erase_region_p->offset = begin;
+ erase_region_p->erasesize =
+ curr_erasesize;
+ tmp64 = position - begin;
+ do_div(tmp64, curr_erasesize);
+ erase_region_p->numblocks = tmp64;
+ begin = position;
+
+ curr_erasesize = subdev[i]->erasesize;
+ ++erase_region_p;
+ }
+ position += subdev[i]->size;
+ } else {
+ /* current subdevice has variable erase size */
+ int j;
+ for (j = 0; j < subdev[i]->numeraseregions; j++) {
+ /* walk the list of erase regions, count any changes */
+ if (subdev[i]->eraseregions[j].
+ erasesize != curr_erasesize) {
+ erase_region_p->offset = begin;
+ erase_region_p->erasesize =
+ curr_erasesize;
+ tmp64 = position - begin;
+ do_div(tmp64, curr_erasesize);
+ erase_region_p->numblocks = tmp64;
+ begin = position;
+
+ curr_erasesize =
+ subdev[i]->eraseregions[j].
+ erasesize;
+ ++erase_region_p;
+ }
+ position +=
+ subdev[i]->eraseregions[j].
+ numblocks * (uint64_t)curr_erasesize;
+ }
+ }
+ }
+ /* Now write the final entry */
+ erase_region_p->offset = begin;
+ erase_region_p->erasesize = curr_erasesize;
+ tmp64 = position - begin;
+ do_div(tmp64, curr_erasesize);
+ erase_region_p->numblocks = tmp64;
+ }
+
+ return &concat->mtd;
+}
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
index f010f5e..e2e43ea 100644
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -26,7 +26,7 @@ struct list_head mtd_partitions;
struct mtd_part {
struct mtd_info mtd;
struct mtd_info *master;
- u_int32_t offset;
+ uint64_t offset;
int index;
struct list_head list;
int registered;
@@ -44,50 +44,32 @@ struct mtd_part {
* to the _real_ device.
*/
-static int part_read (struct mtd_info *mtd, loff_t from, size_t len,
- size_t *retlen, u_char *buf)
+static int part_read(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, u_char *buf)
{
struct mtd_part *part = PART(mtd);
+ struct mtd_ecc_stats stats;
int res;
+ stats = part->master->ecc_stats;
+
if (from >= mtd->size)
len = 0;
else if (from + len > mtd->size)
len = mtd->size - from;
- res = part->master->read (part->master, from + part->offset,
+ res = part->master->read(part->master, from + part->offset,
len, retlen, buf);
if (unlikely(res)) {
if (res == -EUCLEAN)
- mtd->ecc_stats.corrected++;
+ mtd->ecc_stats.corrected += part->master->ecc_stats.corrected - stats.corrected;
if (res == -EBADMSG)
- mtd->ecc_stats.failed++;
+ mtd->ecc_stats.failed += part->master->ecc_stats.failed - stats.failed;
}
return res;
}
-#ifdef MTD_LINUX
-static int part_point (struct mtd_info *mtd, loff_t from, size_t len,
- size_t *retlen, void **virt, resource_size_t *phys)
-{
- struct mtd_part *part = PART(mtd);
- if (from >= mtd->size)
- len = 0;
- else if (from + len > mtd->size)
- len = mtd->size - from;
- return part->master->point (part->master, from + part->offset,
- len, retlen, virt, phys);
-}
-
-static void part_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
-{
- struct mtd_part *part = PART(mtd);
-
- part->master->unpoint(part->master, from + part->offset, len);
-}
-#endif
-
static int part_read_oob(struct mtd_info *mtd, loff_t from,
- struct mtd_oob_ops *ops)
+ struct mtd_oob_ops *ops)
{
struct mtd_part *part = PART(mtd);
int res;
@@ -107,38 +89,38 @@ static int part_read_oob(struct mtd_info *mtd, loff_t from,
return res;
}
-static int part_read_user_prot_reg (struct mtd_info *mtd, loff_t from, size_t len,
- size_t *retlen, u_char *buf)
+static int part_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
+ size_t len, size_t *retlen, u_char *buf)
{
struct mtd_part *part = PART(mtd);
- return part->master->read_user_prot_reg (part->master, from,
+ return part->master->read_user_prot_reg(part->master, from,
len, retlen, buf);
}
-static int part_get_user_prot_info (struct mtd_info *mtd,
- struct otp_info *buf, size_t len)
+static int part_get_user_prot_info(struct mtd_info *mtd,
+ struct otp_info *buf, size_t len)
{
struct mtd_part *part = PART(mtd);
- return part->master->get_user_prot_info (part->master, buf, len);
+ return part->master->get_user_prot_info(part->master, buf, len);
}
-static int part_read_fact_prot_reg (struct mtd_info *mtd, loff_t from, size_t len,
- size_t *retlen, u_char *buf)
+static int part_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
+ size_t len, size_t *retlen, u_char *buf)
{
struct mtd_part *part = PART(mtd);
- return part->master->read_fact_prot_reg (part->master, from,
+ return part->master->read_fact_prot_reg(part->master, from,
len, retlen, buf);
}
-static int part_get_fact_prot_info (struct mtd_info *mtd,
- struct otp_info *buf, size_t len)
+static int part_get_fact_prot_info(struct mtd_info *mtd, struct otp_info *buf,
+ size_t len)
{
struct mtd_part *part = PART(mtd);
- return part->master->get_fact_prot_info (part->master, buf, len);
+ return part->master->get_fact_prot_info(part->master, buf, len);
}
-static int part_write (struct mtd_info *mtd, loff_t to, size_t len,
- size_t *retlen, const u_char *buf)
+static int part_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const u_char *buf)
{
struct mtd_part *part = PART(mtd);
if (!(mtd->flags & MTD_WRITEABLE))
@@ -147,13 +129,12 @@ static int part_write (struct mtd_info *mtd, loff_t to, size_t len,
len = 0;
else if (to + len > mtd->size)
len = mtd->size - to;
- return part->master->write (part->master, to + part->offset,
+ return part->master->write(part->master, to + part->offset,
len, retlen, buf);
}
-#ifdef MTD_LINUX
-static int part_panic_write (struct mtd_info *mtd, loff_t to, size_t len,
- size_t *retlen, const u_char *buf)
+static int part_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const u_char *buf)
{
struct mtd_part *part = PART(mtd);
if (!(mtd->flags & MTD_WRITEABLE))
@@ -162,13 +143,12 @@ static int part_panic_write (struct mtd_info *mtd, loff_t to, size_t len,
len = 0;
else if (to + len > mtd->size)
len = mtd->size - to;
- return part->master->panic_write (part->master, to + part->offset,
+ return part->master->panic_write(part->master, to + part->offset,
len, retlen, buf);
}
-#endif
static int part_write_oob(struct mtd_info *mtd, loff_t to,
- struct mtd_oob_ops *ops)
+ struct mtd_oob_ops *ops)
{
struct mtd_part *part = PART(mtd);
@@ -182,33 +162,22 @@ static int part_write_oob(struct mtd_info *mtd, loff_t to,
return part->master->write_oob(part->master, to + part->offset, ops);
}
-static int part_write_user_prot_reg (struct mtd_info *mtd, loff_t from, size_t len,
- size_t *retlen, u_char *buf)
+static int part_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
+ size_t len, size_t *retlen, u_char *buf)
{
struct mtd_part *part = PART(mtd);
- return part->master->write_user_prot_reg (part->master, from,
+ return part->master->write_user_prot_reg(part->master, from,
len, retlen, buf);
}
-static int part_lock_user_prot_reg (struct mtd_info *mtd, loff_t from, size_t len)
-{
- struct mtd_part *part = PART(mtd);
- return part->master->lock_user_prot_reg (part->master, from, len);
-}
-
-#ifdef MTD_LINUX
-static int part_writev (struct mtd_info *mtd, const struct kvec *vecs,
- unsigned long count, loff_t to, size_t *retlen)
+static int part_lock_user_prot_reg(struct mtd_info *mtd, loff_t from,
+ size_t len)
{
struct mtd_part *part = PART(mtd);
- if (!(mtd->flags & MTD_WRITEABLE))
- return -EROFS;
- return part->master->writev (part->master, vecs, count,
- to + part->offset, retlen);
+ return part->master->lock_user_prot_reg(part->master, from, len);
}
-#endif
-static int part_erase (struct mtd_info *mtd, struct erase_info *instr)
+static int part_erase(struct mtd_info *mtd, struct erase_info *instr)
{
struct mtd_part *part = PART(mtd);
int ret;
@@ -219,7 +188,7 @@ static int part_erase (struct mtd_info *mtd, struct erase_info *instr)
instr->addr += part->offset;
ret = part->master->erase(part->master, instr);
if (ret) {
- if (instr->fail_addr != 0xffffffff)
+ if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN)
instr->fail_addr -= part->offset;
instr->addr -= part->offset;
}
@@ -231,19 +200,15 @@ void mtd_erase_callback(struct erase_info *instr)
if (instr->mtd->erase == part_erase) {
struct mtd_part *part = PART(instr->mtd);
- if (instr->fail_addr != 0xffffffff)
+ if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN)
instr->fail_addr -= part->offset;
instr->addr -= part->offset;
}
if (instr->callback)
instr->callback(instr);
}
-#ifdef MTD_LINUX
-EXPORT_SYMBOL_GPL(mtd_erase_callback);
-#endif
-#ifdef MTD_LINUX
-static int part_lock (struct mtd_info *mtd, loff_t ofs, size_t len)
+static int part_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
{
struct mtd_part *part = PART(mtd);
if ((len + ofs) > mtd->size)
@@ -251,14 +216,13 @@ static int part_lock (struct mtd_info *mtd, loff_t ofs, size_t len)
return part->master->lock(part->master, ofs + part->offset, len);
}
-static int part_unlock (struct mtd_info *mtd, loff_t ofs, size_t len)
+static int part_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
{
struct mtd_part *part = PART(mtd);
if ((len + ofs) > mtd->size)
return -EINVAL;
return part->master->unlock(part->master, ofs + part->offset, len);
}
-#endif
static void part_sync(struct mtd_info *mtd)
{
@@ -266,7 +230,6 @@ static void part_sync(struct mtd_info *mtd)
part->master->sync(part->master);
}
-#ifdef MTD_LINUX
static int part_suspend(struct mtd_info *mtd)
{
struct mtd_part *part = PART(mtd);
@@ -278,9 +241,8 @@ static void part_resume(struct mtd_info *mtd)
struct mtd_part *part = PART(mtd);
part->master->resume(part->master);
}
-#endif
-static int part_block_isbad (struct mtd_info *mtd, loff_t ofs)
+static int part_block_isbad(struct mtd_info *mtd, loff_t ofs)
{
struct mtd_part *part = PART(mtd);
if (ofs >= mtd->size)
@@ -289,7 +251,7 @@ static int part_block_isbad (struct mtd_info *mtd, loff_t ofs)
return part->master->block_isbad(part->master, ofs);
}
-static int part_block_markbad (struct mtd_info *mtd, loff_t ofs)
+static int part_block_markbad(struct mtd_info *mtd, loff_t ofs)
{
struct mtd_part *part = PART(mtd);
int res;
@@ -300,10 +262,8 @@ static int part_block_markbad (struct mtd_info *mtd, loff_t ofs)
return -EINVAL;
ofs += part->offset;
res = part->master->block_markbad(part->master, ofs);
-#ifdef MTD_LINUX
if (!res)
mtd->ecc_stats.badblocks++;
-#endif
return res;
}
@@ -314,31 +274,193 @@ static int part_block_markbad (struct mtd_info *mtd, loff_t ofs)
int del_mtd_partitions(struct mtd_info *master)
{
- struct list_head *node;
- struct mtd_part *slave;
+ struct mtd_part *slave, *next;
- for (node = mtd_partitions.next;
- node != &mtd_partitions;
- node = node->next) {
- slave = list_entry(node, struct mtd_part, list);
+ list_for_each_entry_safe(slave, next, &mtd_partitions, list)
if (slave->master == master) {
- struct list_head *prev = node->prev;
- __list_del(prev, node->next);
- if(slave->registered)
+ list_del(&slave->list);
+ if (slave->registered)
del_mtd_device(&slave->mtd);
kfree(slave);
- node = prev;
}
- }
return 0;
}
+static struct mtd_part *add_one_partition(struct mtd_info *master,
+ const struct mtd_partition *part, int partno,
+ uint64_t cur_offset)
+{
+ struct mtd_part *slave;
+
+ /* allocate the partition structure */
+ slave = kzalloc(sizeof(*slave), GFP_KERNEL);
+ if (!slave) {
+ printk(KERN_ERR"memory allocation error while creating partitions for \"%s\"\n",
+ master->name);
+ del_mtd_partitions(master);
+ return NULL;
+ }
+ list_add(&slave->list, &mtd_partitions);
+
+ /* set up the MTD object for this partition */
+ slave->mtd.type = master->type;
+ slave->mtd.flags = master->flags & ~part->mask_flags;
+ slave->mtd.size = part->size;
+ slave->mtd.writesize = master->writesize;
+ slave->mtd.oobsize = master->oobsize;
+ slave->mtd.oobavail = master->oobavail;
+ slave->mtd.subpage_sft = master->subpage_sft;
+
+ slave->mtd.name = part->name;
+ slave->mtd.owner = master->owner;
+
+ slave->mtd.read = part_read;
+ slave->mtd.write = part_write;
+
+ if (master->panic_write)
+ slave->mtd.panic_write = part_panic_write;
+
+ if (master->read_oob)
+ slave->mtd.read_oob = part_read_oob;
+ if (master->write_oob)
+ slave->mtd.write_oob = part_write_oob;
+ if (master->read_user_prot_reg)
+ slave->mtd.read_user_prot_reg = part_read_user_prot_reg;
+ if (master->read_fact_prot_reg)
+ slave->mtd.read_fact_prot_reg = part_read_fact_prot_reg;
+ if (master->write_user_prot_reg)
+ slave->mtd.write_user_prot_reg = part_write_user_prot_reg;
+ if (master->lock_user_prot_reg)
+ slave->mtd.lock_user_prot_reg = part_lock_user_prot_reg;
+ if (master->get_user_prot_info)
+ slave->mtd.get_user_prot_info = part_get_user_prot_info;
+ if (master->get_fact_prot_info)
+ slave->mtd.get_fact_prot_info = part_get_fact_prot_info;
+ if (master->sync)
+ slave->mtd.sync = part_sync;
+ if (!partno && master->suspend && master->resume) {
+ slave->mtd.suspend = part_suspend;
+ slave->mtd.resume = part_resume;
+ }
+ if (master->lock)
+ slave->mtd.lock = part_lock;
+ if (master->unlock)
+ slave->mtd.unlock = part_unlock;
+ if (master->block_isbad)
+ slave->mtd.block_isbad = part_block_isbad;
+ if (master->block_markbad)
+ slave->mtd.block_markbad = part_block_markbad;
+ slave->mtd.erase = part_erase;
+ slave->master = master;
+ slave->offset = part->offset;
+ slave->index = partno;
+
+ if (slave->offset == MTDPART_OFS_APPEND)
+ slave->offset = cur_offset;
+ if (slave->offset == MTDPART_OFS_NXTBLK) {
+ slave->offset = cur_offset;
+ if (mtd_mod_by_eb(cur_offset, master) != 0) {
+ /* Round up to next erasesize */
+ slave->offset = (mtd_div_by_eb(cur_offset, master) + 1) * master->erasesize;
+ printk(KERN_NOTICE "Moving partition %d: "
+ "0x%012llx -> 0x%012llx\n", partno,
+ (unsigned long long)cur_offset, (unsigned long long)slave->offset);
+ }
+ }
+ if (slave->mtd.size == MTDPART_SIZ_FULL)
+ slave->mtd.size = master->size - slave->offset;
+
+ printk(KERN_NOTICE "0x%012llx-0x%012llx : \"%s\"\n", (unsigned long long)slave->offset,
+ (unsigned long long)(slave->offset + slave->mtd.size), slave->mtd.name);
+
+ /* let's do some sanity checks */
+ if (slave->offset >= master->size) {
+ /* let's register it anyway to preserve ordering */
+ slave->offset = 0;
+ slave->mtd.size = 0;
+ printk(KERN_ERR"mtd: partition \"%s\" is out of reach -- disabled\n",
+ part->name);
+ goto out_register;
+ }
+ if (slave->offset + slave->mtd.size > master->size) {
+ slave->mtd.size = master->size - slave->offset;
+ printk(KERN_WARNING"mtd: partition \"%s\" extends beyond the end of device \"%s\" -- size truncated to %#llx\n",
+ part->name, master->name, (unsigned long long)slave->mtd.size);
+ }
+ if (master->numeraseregions > 1) {
+ /* Deal with variable erase size stuff */
+ int i, max = master->numeraseregions;
+ u64 end = slave->offset + slave->mtd.size;
+ struct mtd_erase_region_info *regions = master->eraseregions;
+
+ /* Find the first erase regions which is part of this
+ * partition. */
+ for (i = 0; i < max && regions[i].offset <= slave->offset; i++)
+ ;
+ /* The loop searched for the region _behind_ the first one */
+ i--;
+
+ /* Pick biggest erasesize */
+ for (; i < max && regions[i].offset < end; i++) {
+ if (slave->mtd.erasesize < regions[i].erasesize) {
+ slave->mtd.erasesize = regions[i].erasesize;
+ }
+ }
+ BUG_ON(slave->mtd.erasesize == 0);
+ } else {
+ /* Single erase size */
+ slave->mtd.erasesize = master->erasesize;
+ }
+
+ if ((slave->mtd.flags & MTD_WRITEABLE) &&
+ mtd_mod_by_eb(slave->offset, &slave->mtd)) {
+ /* Doesn't start on a boundary of major erase size */
+ /* FIXME: Let it be writable if it is on a boundary of
+ * _minor_ erase size though */
+ slave->mtd.flags &= ~MTD_WRITEABLE;
+ printk(KERN_WARNING"mtd: partition \"%s\" doesn't start on an erase block boundary -- force read-only\n",
+ part->name);
+ }
+ if ((slave->mtd.flags & MTD_WRITEABLE) &&
+ mtd_mod_by_eb(slave->mtd.size, &slave->mtd)) {
+ slave->mtd.flags &= ~MTD_WRITEABLE;
+ printk(KERN_WARNING"mtd: partition \"%s\" doesn't end on an erase block -- force read-only\n",
+ part->name);
+ }
+
+ slave->mtd.ecclayout = master->ecclayout;
+ if (master->block_isbad) {
+ uint64_t offs = 0;
+
+ while (offs < slave->mtd.size) {
+ if (master->block_isbad(master,
+ offs + slave->offset))
+ slave->mtd.ecc_stats.badblocks++;
+ offs += slave->mtd.erasesize;
+ }
+ }
+
+out_register:
+ if (part->mtdp) {
+ /* store the object pointer (caller may or may not register it*/
+ *part->mtdp = &slave->mtd;
+ slave->registered = 0;
+ } else {
+ /* register our partition */
+ add_mtd_device(&slave->mtd);
+ slave->registered = 1;
+ }
+ return slave;
+}
+
/*
* This function, given a master MTD object and a partition table, creates
* and registers slave MTD objects which are bound to the master according to
* the partition definitions.
- * (Q: should we register the master MTD object as well?)
+ *
+ * We don't register the master, or expect the caller to have done so,
+ * for reasons of data integrity.
*/
int add_mtd_partitions(struct mtd_info *master,
@@ -346,7 +468,7 @@ int add_mtd_partitions(struct mtd_info *master,
int nbparts)
{
struct mtd_part *slave;
- u_int32_t cur_offset = 0;
+ uint64_t cur_offset = 0;
int i;
/*
@@ -357,184 +479,14 @@ int add_mtd_partitions(struct mtd_info *master,
if (mtd_partitions.next == NULL)
INIT_LIST_HEAD(&mtd_partitions);
- printk (KERN_NOTICE "Creating %d MTD partitions on \"%s\":\n", nbparts, master->name);
+ printk(KERN_NOTICE "Creating %d MTD partitions on \"%s\":\n", nbparts, master->name);
for (i = 0; i < nbparts; i++) {
-
- /* allocate the partition structure */
- slave = kzalloc (sizeof(*slave), GFP_KERNEL);
- if (!slave) {
- printk ("memory allocation error while creating partitions for \"%s\"\n",
- master->name);
- del_mtd_partitions(master);
+ slave = add_one_partition(master, parts + i, i, cur_offset);
+ if (!slave)
return -ENOMEM;
- }
- list_add(&slave->list, &mtd_partitions);
-
- /* set up the MTD object for this partition */
- slave->mtd.type = master->type;
- slave->mtd.flags = master->flags & ~parts[i].mask_flags;
- slave->mtd.size = parts[i].size;
- slave->mtd.writesize = master->writesize;
- slave->mtd.oobsize = master->oobsize;
- slave->mtd.oobavail = master->oobavail;
- slave->mtd.subpage_sft = master->subpage_sft;
-
- slave->mtd.name = parts[i].name;
- slave->mtd.owner = master->owner;
-
- slave->mtd.read = part_read;
- slave->mtd.write = part_write;
-
-#ifdef MTD_LINUX
- if (master->panic_write)
- slave->mtd.panic_write = part_panic_write;
-
- if(master->point && master->unpoint){
- slave->mtd.point = part_point;
- slave->mtd.unpoint = part_unpoint;
- }
-#endif
-
- if (master->read_oob)
- slave->mtd.read_oob = part_read_oob;
- if (master->write_oob)
- slave->mtd.write_oob = part_write_oob;
- if(master->read_user_prot_reg)
- slave->mtd.read_user_prot_reg = part_read_user_prot_reg;
- if(master->read_fact_prot_reg)
- slave->mtd.read_fact_prot_reg = part_read_fact_prot_reg;
- if(master->write_user_prot_reg)
- slave->mtd.write_user_prot_reg = part_write_user_prot_reg;
- if(master->lock_user_prot_reg)
- slave->mtd.lock_user_prot_reg = part_lock_user_prot_reg;
- if(master->get_user_prot_info)
- slave->mtd.get_user_prot_info = part_get_user_prot_info;
- if(master->get_fact_prot_info)
- slave->mtd.get_fact_prot_info = part_get_fact_prot_info;
- if (master->sync)
- slave->mtd.sync = part_sync;
-#ifdef MTD_LINUX
- if (!i && master->suspend && master->resume) {
- slave->mtd.suspend = part_suspend;
- slave->mtd.resume = part_resume;
- }
- if (master->writev)
- slave->mtd.writev = part_writev;
- if (master->lock)
- slave->mtd.lock = part_lock;
- if (master->unlock)
- slave->mtd.unlock = part_unlock;
-#endif
- if (master->block_isbad)
- slave->mtd.block_isbad = part_block_isbad;
- if (master->block_markbad)
- slave->mtd.block_markbad = part_block_markbad;
- slave->mtd.erase = part_erase;
- slave->master = master;
- slave->offset = parts[i].offset;
- slave->index = i;
-
- if (slave->offset == MTDPART_OFS_APPEND)
- slave->offset = cur_offset;
- if (slave->offset == MTDPART_OFS_NXTBLK) {
- slave->offset = cur_offset;
- if ((cur_offset % master->erasesize) != 0) {
- /* Round up to next erasesize */
- slave->offset = ((cur_offset / master->erasesize) + 1) * master->erasesize;
- printk(KERN_NOTICE "Moving partition %d: "
- "0x%08x -> 0x%08x\n", i,
- cur_offset, slave->offset);
- }
- }
- if (slave->mtd.size == MTDPART_SIZ_FULL)
- slave->mtd.size = master->size - slave->offset;
cur_offset = slave->offset + slave->mtd.size;
-
- printk (KERN_NOTICE "0x%08x-0x%08x : \"%s\"\n", slave->offset,
- slave->offset + slave->mtd.size, slave->mtd.name);
-
- /* let's do some sanity checks */
- if (slave->offset >= master->size) {
- /* let's register it anyway to preserve ordering */
- slave->offset = 0;
- slave->mtd.size = 0;
- printk ("mtd: partition \"%s\" is out of reach -- disabled\n",
- parts[i].name);
- }
- if (slave->offset + slave->mtd.size > master->size) {
- slave->mtd.size = master->size - slave->offset;
- printk ("mtd: partition \"%s\" extends beyond the end of device \"%s\" -- size truncated to %#x\n",
- parts[i].name, master->name, slave->mtd.size);
- }
- if (master->numeraseregions>1) {
- /* Deal with variable erase size stuff */
- int i;
- struct mtd_erase_region_info *regions = master->eraseregions;
-
- /* Find the first erase regions which is part of this partition. */
- for (i=0; i < master->numeraseregions && slave->offset >= regions[i].offset; i++)
- ;
-
- for (i--; i < master->numeraseregions && slave->offset + slave->mtd.size > regions[i].offset; i++) {
- if (slave->mtd.erasesize < regions[i].erasesize) {
- slave->mtd.erasesize = regions[i].erasesize;
- }
- }
- } else {
- /* Single erase size */
- slave->mtd.erasesize = master->erasesize;
- }
-
- if ((slave->mtd.flags & MTD_WRITEABLE) &&
- (slave->offset % slave->mtd.erasesize)) {
- /* Doesn't start on a boundary of major erase size */
- /* FIXME: Let it be writable if it is on a boundary of _minor_ erase size though */
- slave->mtd.flags &= ~MTD_WRITEABLE;
- printk ("mtd: partition \"%s\" doesn't start on an erase block boundary -- force read-only\n",
- parts[i].name);
- }
- if ((slave->mtd.flags & MTD_WRITEABLE) &&
- (slave->mtd.size % slave->mtd.erasesize)) {
- slave->mtd.flags &= ~MTD_WRITEABLE;
- printk ("mtd: partition \"%s\" doesn't end on an erase block -- force read-only\n",
- parts[i].name);
- }
-
- slave->mtd.ecclayout = master->ecclayout;
- if (master->block_isbad) {
- uint32_t offs = 0;
-
- while(offs < slave->mtd.size) {
- if (master->block_isbad(master,
- offs + slave->offset))
- slave->mtd.ecc_stats.badblocks++;
- offs += slave->mtd.erasesize;
- }
- }
-
-#ifdef MTD_LINUX
- if (parts[i].mtdp) {
- /* store the object pointer
- * (caller may or may not register it */
- *parts[i].mtdp = &slave->mtd;
- slave->registered = 0;
- } else {
- /* register our partition */
- add_mtd_device(&slave->mtd);
- slave->registered = 1;
- }
-#else
- /* register our partition */
- add_mtd_device(&slave->mtd);
- slave->registered = 1;
-#endif
}
return 0;
}
-
-#ifdef MTD_LINUX
-EXPORT_SYMBOL(add_mtd_partitions);
-EXPORT_SYMBOL(del_mtd_partitions);
-#endif
diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile
index 471cd6b..71dd5b9 100644
--- a/drivers/mtd/nand/Makefile
+++ b/drivers/mtd/nand/Makefile
@@ -40,6 +40,7 @@ COBJS-$(CONFIG_DRIVER_NAND_BFIN) += bfin_nand.o
COBJS-$(CONFIG_NAND_DAVINCI) += davinci_nand.o
COBJS-$(CONFIG_NAND_FSL_ELBC) += fsl_elbc_nand.o
COBJS-$(CONFIG_NAND_FSL_UPM) += fsl_upm.o
+COBJS-$(CONFIG_NAND_MPC5121_NFC) += mpc5121_nfc.o
COBJS-$(CONFIG_NAND_NOMADIK) += nomadik.o
COBJS-$(CONFIG_NAND_S3C2410) += s3c2410_nand.c
COBJS-$(CONFIG_NAND_S3C64XX) += s3c64xx.o
diff --git a/drivers/mtd/nand/mpc5121_nfc.c b/drivers/mtd/nand/mpc5121_nfc.c
new file mode 100644
index 0000000..856cb36
--- /dev/null
+++ b/drivers/mtd/nand/mpc5121_nfc.c
@@ -0,0 +1,692 @@
+/*
+ * Copyright 2004-2008 Freescale Semiconductor, Inc.
+ * Copyright 2009 Semihalf.
+ * (C) Copyright 2009 Stefan Roese <sr@denx.de>
+ *
+ * Based on original driver from Freescale Semiconductor
+ * written by John Rigby <jrigby@freescale.com> on basis
+ * of drivers/mtd/nand/mxc_nand.c. Reworked and extended
+ * Piotr Ziecik <kosmo@semihalf.com>.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
+ * MA 02110-1301, USA.
+ */
+
+#include <common.h>
+#include <malloc.h>
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/nand_ecc.h>
+#include <linux/mtd/compat.h>
+
+#include <asm/errno.h>
+#include <asm/io.h>
+#include <asm/processor.h>
+#include <nand.h>
+
+#define DRV_NAME "mpc5121_nfc"
+
+/* Timeouts */
+#define NFC_RESET_TIMEOUT 1000 /* 1 ms */
+#define NFC_TIMEOUT 2000 /* 2000 us */
+
+/* Addresses for NFC MAIN RAM BUFFER areas */
+#define NFC_MAIN_AREA(n) ((n) * 0x200)
+
+/* Addresses for NFC SPARE BUFFER areas */
+#define NFC_SPARE_BUFFERS 8
+#define NFC_SPARE_LEN 0x40
+#define NFC_SPARE_AREA(n) (0x1000 + ((n) * NFC_SPARE_LEN))
+
+/* MPC5121 NFC registers */
+#define NFC_BUF_ADDR 0x1E04
+#define NFC_FLASH_ADDR 0x1E06
+#define NFC_FLASH_CMD 0x1E08
+#define NFC_CONFIG 0x1E0A
+#define NFC_ECC_STATUS1 0x1E0C
+#define NFC_ECC_STATUS2 0x1E0E
+#define NFC_SPAS 0x1E10
+#define NFC_WRPROT 0x1E12
+#define NFC_NF_WRPRST 0x1E18
+#define NFC_CONFIG1 0x1E1A
+#define NFC_CONFIG2 0x1E1C
+#define NFC_UNLOCKSTART_BLK0 0x1E20
+#define NFC_UNLOCKEND_BLK0 0x1E22
+#define NFC_UNLOCKSTART_BLK1 0x1E24
+#define NFC_UNLOCKEND_BLK1 0x1E26
+#define NFC_UNLOCKSTART_BLK2 0x1E28
+#define NFC_UNLOCKEND_BLK2 0x1E2A
+#define NFC_UNLOCKSTART_BLK3 0x1E2C
+#define NFC_UNLOCKEND_BLK3 0x1E2E
+
+/* Bit Definitions: NFC_BUF_ADDR */
+#define NFC_RBA_MASK (7 << 0)
+#define NFC_ACTIVE_CS_SHIFT 5
+#define NFC_ACTIVE_CS_MASK (3 << NFC_ACTIVE_CS_SHIFT)
+
+/* Bit Definitions: NFC_CONFIG */
+#define NFC_BLS_UNLOCKED (1 << 1)
+
+/* Bit Definitions: NFC_CONFIG1 */
+#define NFC_ECC_4BIT (1 << 0)
+#define NFC_FULL_PAGE_DMA (1 << 1)
+#define NFC_SPARE_ONLY (1 << 2)
+#define NFC_ECC_ENABLE (1 << 3)
+#define NFC_INT_MASK (1 << 4)
+#define NFC_BIG_ENDIAN (1 << 5)
+#define NFC_RESET (1 << 6)
+#define NFC_CE (1 << 7)
+#define NFC_ONE_CYCLE (1 << 8)
+#define NFC_PPB_32 (0 << 9)
+#define NFC_PPB_64 (1 << 9)
+#define NFC_PPB_128 (2 << 9)
+#define NFC_PPB_256 (3 << 9)
+#define NFC_PPB_MASK (3 << 9)
+#define NFC_FULL_PAGE_INT (1 << 11)
+
+/* Bit Definitions: NFC_CONFIG2 */
+#define NFC_COMMAND (1 << 0)
+#define NFC_ADDRESS (1 << 1)
+#define NFC_INPUT (1 << 2)
+#define NFC_OUTPUT (1 << 3)
+#define NFC_ID (1 << 4)
+#define NFC_STATUS (1 << 5)
+#define NFC_CMD_FAIL (1 << 15)
+#define NFC_INT (1 << 15)
+
+/* Bit Definitions: NFC_WRPROT */
+#define NFC_WPC_LOCK_TIGHT (1 << 0)
+#define NFC_WPC_LOCK (1 << 1)
+#define NFC_WPC_UNLOCK (1 << 2)
+
+struct mpc5121_nfc_prv {
+ struct mtd_info mtd;
+ struct nand_chip chip;
+ int irq;
+ void __iomem *regs;
+ struct clk *clk;
+ uint column;
+ int spareonly;
+ int chipsel;
+};
+
+int mpc5121_nfc_chip = 0;
+
+static void mpc5121_nfc_done(struct mtd_info *mtd);
+
+/* Read NFC register */
+static inline u16 nfc_read(struct mtd_info *mtd, uint reg)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct mpc5121_nfc_prv *prv = chip->priv;
+
+ return in_be16(prv->regs + reg);
+}
+
+/* Write NFC register */
+static inline void nfc_write(struct mtd_info *mtd, uint reg, u16 val)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct mpc5121_nfc_prv *prv = chip->priv;
+
+ out_be16(prv->regs + reg, val);
+}
+
+/* Set bits in NFC register */
+static inline void nfc_set(struct mtd_info *mtd, uint reg, u16 bits)
+{
+ nfc_write(mtd, reg, nfc_read(mtd, reg) | bits);
+}
+
+/* Clear bits in NFC register */
+static inline void nfc_clear(struct mtd_info *mtd, uint reg, u16 bits)
+{
+ nfc_write(mtd, reg, nfc_read(mtd, reg) & ~bits);
+}
+
+/* Invoke address cycle */
+static inline void mpc5121_nfc_send_addr(struct mtd_info *mtd, u16 addr)
+{
+ nfc_write(mtd, NFC_FLASH_ADDR, addr);
+ nfc_write(mtd, NFC_CONFIG2, NFC_ADDRESS);
+ mpc5121_nfc_done(mtd);
+}
+
+/* Invoke command cycle */
+static inline void mpc5121_nfc_send_cmd(struct mtd_info *mtd, u16 cmd)
+{
+ nfc_write(mtd, NFC_FLASH_CMD, cmd);
+ nfc_write(mtd, NFC_CONFIG2, NFC_COMMAND);
+ mpc5121_nfc_done(mtd);
+}
+
+/* Send data from NFC buffers to NAND flash */
+static inline void mpc5121_nfc_send_prog_page(struct mtd_info *mtd)
+{
+ nfc_clear(mtd, NFC_BUF_ADDR, NFC_RBA_MASK);
+ nfc_write(mtd, NFC_CONFIG2, NFC_INPUT);
+ mpc5121_nfc_done(mtd);
+}
+
+/* Receive data from NAND flash */
+static inline void mpc5121_nfc_send_read_page(struct mtd_info *mtd)
+{
+ nfc_clear(mtd, NFC_BUF_ADDR, NFC_RBA_MASK);
+ nfc_write(mtd, NFC_CONFIG2, NFC_OUTPUT);
+ mpc5121_nfc_done(mtd);
+}
+
+/* Receive ID from NAND flash */
+static inline void mpc5121_nfc_send_read_id(struct mtd_info *mtd)
+{
+ nfc_clear(mtd, NFC_BUF_ADDR, NFC_RBA_MASK);
+ nfc_write(mtd, NFC_CONFIG2, NFC_ID);
+ mpc5121_nfc_done(mtd);
+}
+
+/* Receive status from NAND flash */
+static inline void mpc5121_nfc_send_read_status(struct mtd_info *mtd)
+{
+ nfc_clear(mtd, NFC_BUF_ADDR, NFC_RBA_MASK);
+ nfc_write(mtd, NFC_CONFIG2, NFC_STATUS);
+ mpc5121_nfc_done(mtd);
+}
+
+static void mpc5121_nfc_done(struct mtd_info *mtd)
+{
+ int max_retries = NFC_TIMEOUT;
+
+ while (1) {
+ max_retries--;
+ if (nfc_read(mtd, NFC_CONFIG2) & NFC_INT)
+ break;
+ udelay(1);
+ }
+
+ if (max_retries <= 0)
+ printk(KERN_WARNING DRV_NAME
+ ": Timeout while waiting for completion.\n");
+}
+
+/* Do address cycle(s) */
+static void mpc5121_nfc_addr_cycle(struct mtd_info *mtd, int column, int page)
+{
+ struct nand_chip *chip = mtd->priv;
+ u32 pagemask = chip->pagemask;
+
+ if (column != -1) {
+ mpc5121_nfc_send_addr(mtd, column);
+ if (mtd->writesize > 512)
+ mpc5121_nfc_send_addr(mtd, column >> 8);
+ }
+
+ if (page != -1) {
+ do {
+ mpc5121_nfc_send_addr(mtd, page & 0xFF);
+ page >>= 8;
+ pagemask >>= 8;
+ } while (pagemask);
+ }
+}
+
+/* Control chip select signals */
+
+/*
+ * Selecting the active device:
+ *
+ * This is different than the linux version. Switching between chips
+ * is done via board_nand_select_device(). The Linux select_chip
+ * function used here in U-Boot has only 2 valid chip numbers:
+ * 0 select
+ * -1 deselect
+ */
+
+/*
+ * Implement it as a weak default, so that boards with a specific
+ * chip-select routine can use their own function.
+ */
+void __mpc5121_nfc_select_chip(struct mtd_info *mtd, int chip)
+{
+ if (chip < 0) {
+ nfc_clear(mtd, NFC_CONFIG1, NFC_CE);
+ return;
+ }
+
+ nfc_clear(mtd, NFC_BUF_ADDR, NFC_ACTIVE_CS_MASK);
+ nfc_set(mtd, NFC_BUF_ADDR, (chip << NFC_ACTIVE_CS_SHIFT) &
+ NFC_ACTIVE_CS_MASK);
+ nfc_set(mtd, NFC_CONFIG1, NFC_CE);
+}
+void mpc5121_nfc_select_chip(struct mtd_info *mtd, int chip)
+ __attribute__((weak, alias("__mpc5121_nfc_select_chip")));
+
+void board_nand_select_device(struct nand_chip *nand, int chip)
+{
+ /*
+ * Only save this chip number in global variable here. This
+ * will be used later in mpc5121_nfc_select_chip().
+ */
+ mpc5121_nfc_chip = chip;
+}
+
+/* Read NAND Ready/Busy signal */
+static int mpc5121_nfc_dev_ready(struct mtd_info *mtd)
+{
+ /*
+ * NFC handles ready/busy signal internally. Therefore, this function
+ * always returns status as ready.
+ */
+ return 1;
+}
+
+/* Write command to NAND flash */
+static void mpc5121_nfc_command(struct mtd_info *mtd, unsigned command,
+ int column, int page)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct mpc5121_nfc_prv *prv = chip->priv;
+
+ prv->column = (column >= 0) ? column : 0;
+ prv->spareonly = 0;
+
+ switch (command) {
+ case NAND_CMD_PAGEPROG:
+ mpc5121_nfc_send_prog_page(mtd);
+ break;
+ /*
+ * NFC does not support sub-page reads and writes,
+ * so emulate them using full page transfers.
+ */
+ case NAND_CMD_READ0:
+ column = 0;
+ break;
+
+ case NAND_CMD_READ1:
+ prv->column += 256;
+ command = NAND_CMD_READ0;
+ column = 0;
+ break;
+
+ case NAND_CMD_READOOB:
+ prv->spareonly = 1;
+ command = NAND_CMD_READ0;
+ column = 0;
+ break;
+
+ case NAND_CMD_SEQIN:
+ mpc5121_nfc_command(mtd, NAND_CMD_READ0, column, page);
+ column = 0;
+ break;
+
+ case NAND_CMD_ERASE1:
+ case NAND_CMD_ERASE2:
+ case NAND_CMD_READID:
+ case NAND_CMD_STATUS:
+ break;
+
+ default:
+ return;
+ }
+
+ mpc5121_nfc_send_cmd(mtd, command);
+ mpc5121_nfc_addr_cycle(mtd, column, page);
+
+ switch (command) {
+ case NAND_CMD_READ0:
+ if (mtd->writesize > 512)
+ mpc5121_nfc_send_cmd(mtd, NAND_CMD_READSTART);
+ mpc5121_nfc_send_read_page(mtd);
+ break;
+
+ case NAND_CMD_READID:
+ mpc5121_nfc_send_read_id(mtd);
+ break;
+
+ case NAND_CMD_STATUS:
+ mpc5121_nfc_send_read_status(mtd);
+ if (chip->options & NAND_BUSWIDTH_16)
+ prv->column = 1;
+ else
+ prv->column = 0;
+ break;
+ }
+}
+
+/* Copy data from/to NFC spare buffers. */
+static void mpc5121_nfc_copy_spare(struct mtd_info *mtd, uint offset,
+ u8 * buffer, uint size, int wr)
+{
+ struct nand_chip *nand = mtd->priv;
+ struct mpc5121_nfc_prv *prv = nand->priv;
+ uint o, s, sbsize, blksize;
+
+ /*
+ * NAND spare area is available through NFC spare buffers.
+ * The NFC divides spare area into (page_size / 512) chunks.
+ * Each chunk is placed into separate spare memory area, using
+ * first (spare_size / num_of_chunks) bytes of the buffer.
+ *
+ * For NAND device in which the spare area is not divided fully
+ * by the number of chunks, number of used bytes in each spare
+ * buffer is rounded down to the nearest even number of bytes,
+ * and all remaining bytes are added to the last used spare area.
+ *
+ * For more information read section 26.6.10 of MPC5121e
+ * Microcontroller Reference Manual, Rev. 3.
+ */
+
+ /* Calculate number of valid bytes in each spare buffer */
+ sbsize = (mtd->oobsize / (mtd->writesize / 512)) & ~1;
+
+ while (size) {
+ /* Calculate spare buffer number */
+ s = offset / sbsize;
+ if (s > NFC_SPARE_BUFFERS - 1)
+ s = NFC_SPARE_BUFFERS - 1;
+
+ /*
+ * Calculate offset to requested data block in selected spare
+ * buffer and its size.
+ */
+ o = offset - (s * sbsize);
+ blksize = min(sbsize - o, size);
+
+ if (wr)
+ memcpy_toio(prv->regs + NFC_SPARE_AREA(s) + o,
+ buffer, blksize);
+ else
+ memcpy_fromio(buffer,
+ prv->regs + NFC_SPARE_AREA(s) + o,
+ blksize);
+
+ buffer += blksize;
+ offset += blksize;
+ size -= blksize;
+ };
+}
+
+/* Copy data from/to NFC main and spare buffers */
+static void mpc5121_nfc_buf_copy(struct mtd_info *mtd, u_char * buf, int len,
+ int wr)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct mpc5121_nfc_prv *prv = chip->priv;
+ uint c = prv->column;
+ uint l;
+
+ /* Handle spare area access */
+ if (prv->spareonly || c >= mtd->writesize) {
+ /* Calculate offset from beginning of spare area */
+ if (c >= mtd->writesize)
+ c -= mtd->writesize;
+
+ prv->column += len;
+ mpc5121_nfc_copy_spare(mtd, c, buf, len, wr);
+ return;
+ }
+
+ /*
+ * Handle main area access - limit copy length to prevent
+ * crossing main/spare boundary.
+ */
+ l = min((uint) len, mtd->writesize - c);
+ prv->column += l;
+
+ if (wr)
+ memcpy_toio(prv->regs + NFC_MAIN_AREA(0) + c, buf, l);
+ else
+ memcpy_fromio(buf, prv->regs + NFC_MAIN_AREA(0) + c, l);
+
+ /* Handle crossing main/spare boundary */
+ if (l != len) {
+ buf += l;
+ len -= l;
+ mpc5121_nfc_buf_copy(mtd, buf, len, wr);
+ }
+}
+
+/* Read data from NFC buffers */
+static void mpc5121_nfc_read_buf(struct mtd_info *mtd, u_char * buf, int len)
+{
+ mpc5121_nfc_buf_copy(mtd, buf, len, 0);
+}
+
+/* Write data to NFC buffers */
+static void mpc5121_nfc_write_buf(struct mtd_info *mtd,
+ const u_char * buf, int len)
+{
+ mpc5121_nfc_buf_copy(mtd, (u_char *) buf, len, 1);
+}
+
+/* Compare buffer with NAND flash */
+static int mpc5121_nfc_verify_buf(struct mtd_info *mtd,
+ const u_char * buf, int len)
+{
+ u_char tmp[256];
+ uint bsize;
+
+ while (len) {
+ bsize = min(len, 256);
+ mpc5121_nfc_read_buf(mtd, tmp, bsize);
+
+ if (memcmp(buf, tmp, bsize))
+ return 1;
+
+ buf += bsize;
+ len -= bsize;
+ }
+
+ return 0;
+}
+
+/* Read byte from NFC buffers */
+static u8 mpc5121_nfc_read_byte(struct mtd_info *mtd)
+{
+ u8 tmp;
+
+ mpc5121_nfc_read_buf(mtd, &tmp, sizeof(tmp));
+
+ return tmp;
+}
+
+/* Read word from NFC buffers */
+static u16 mpc5121_nfc_read_word(struct mtd_info *mtd)
+{
+ u16 tmp;
+
+ mpc5121_nfc_read_buf(mtd, (u_char *) & tmp, sizeof(tmp));
+
+ return tmp;
+}
+
+/*
+ * Read NFC configuration from Reset Config Word
+ *
+ * NFC is configured during reset in basis of information stored
+ * in Reset Config Word. There is no other way to set NAND block
+ * size, spare size and bus width.
+ */
+static int mpc5121_nfc_read_hw_config(struct mtd_info *mtd)
+{
+ immap_t *im = (immap_t *)CONFIG_SYS_IMMR;
+ struct nand_chip *chip = mtd->priv;
+ uint rcw_pagesize = 0;
+ uint rcw_sparesize = 0;
+ uint rcw_width;
+ uint rcwh;
+ uint romloc, ps;
+
+ rcwh = in_be32(&(im->reset.rcwh));
+
+ /* Bit 6: NFC bus width */
+ rcw_width = ((rcwh >> 6) & 0x1) ? 2 : 1;
+
+ /* Bit 7: NFC Page/Spare size */
+ ps = (rcwh >> 7) & 0x1;
+
+ /* Bits [22:21]: ROM Location */
+ romloc = (rcwh >> 21) & 0x3;
+
+ /* Decode RCW bits */
+ switch ((ps << 2) | romloc) {
+ case 0x00:
+ case 0x01:
+ rcw_pagesize = 512;
+ rcw_sparesize = 16;
+ break;
+ case 0x02:
+ case 0x03:
+ rcw_pagesize = 4096;
+ rcw_sparesize = 128;
+ break;
+ case 0x04:
+ case 0x05:
+ rcw_pagesize = 2048;
+ rcw_sparesize = 64;
+ break;
+ case 0x06:
+ case 0x07:
+ rcw_pagesize = 4096;
+ rcw_sparesize = 218;
+ break;
+ }
+
+ mtd->writesize = rcw_pagesize;
+ mtd->oobsize = rcw_sparesize;
+ if (rcw_width == 2)
+ chip->options |= NAND_BUSWIDTH_16;
+
+ debug(KERN_NOTICE DRV_NAME ": Configured for "
+ "%u-bit NAND, page size %u with %u spare.\n",
+ rcw_width * 8, rcw_pagesize, rcw_sparesize);
+ return 0;
+}
+
+int board_nand_init(struct nand_chip *chip)
+{
+ struct mpc5121_nfc_prv *prv;
+ struct mtd_info *mtd;
+ int resettime = 0;
+ int retval = 0;
+ int rev;
+ static int chip_nr = 0;
+
+ /*
+ * Check SoC revision. This driver supports only NFC
+ * in MPC5121 revision 2.
+ */
+ rev = (mfspr(SPRN_SVR) >> 4) & 0xF;
+ if (rev != 2) {
+ printk(KERN_ERR DRV_NAME
+ ": SoC revision %u is not supported!\n", rev);
+ return -ENXIO;
+ }
+
+ prv = malloc(sizeof(*prv));
+ if (!prv) {
+ printk(KERN_ERR DRV_NAME ": Memory exhausted!\n");
+ return -ENOMEM;
+ }
+
+ mtd = &nand_info[chip_nr++];
+ mtd->priv = chip;
+ chip->priv = prv;
+
+ /* Read NFC configuration from Reset Config Word */
+ retval = mpc5121_nfc_read_hw_config(mtd);
+ if (retval) {
+ printk(KERN_ERR DRV_NAME ": Unable to read NFC config!\n");
+ return retval;
+ }
+
+ prv->regs = (void __iomem *)CONFIG_SYS_NAND_BASE;
+ chip->dev_ready = mpc5121_nfc_dev_ready;
+ chip->cmdfunc = mpc5121_nfc_command;
+ chip->read_byte = mpc5121_nfc_read_byte;
+ chip->read_word = mpc5121_nfc_read_word;
+ chip->read_buf = mpc5121_nfc_read_buf;
+ chip->write_buf = mpc5121_nfc_write_buf;
+ chip->verify_buf = mpc5121_nfc_verify_buf;
+ chip->select_chip = mpc5121_nfc_select_chip;
+ chip->options = NAND_NO_AUTOINCR | NAND_USE_FLASH_BBT;
+ chip->ecc.mode = NAND_ECC_SOFT;
+
+ /* Reset NAND Flash controller */
+ nfc_set(mtd, NFC_CONFIG1, NFC_RESET);
+ while (nfc_read(mtd, NFC_CONFIG1) & NFC_RESET) {
+ if (resettime++ >= NFC_RESET_TIMEOUT) {
+ printk(KERN_ERR DRV_NAME
+ ": Timeout while resetting NFC!\n");
+ retval = -EINVAL;
+ goto error;
+ }
+
+ udelay(1);
+ }
+
+ /* Enable write to NFC memory */
+ nfc_write(mtd, NFC_CONFIG, NFC_BLS_UNLOCKED);
+
+ /* Enable write to all NAND pages */
+ nfc_write(mtd, NFC_UNLOCKSTART_BLK0, 0x0000);
+ nfc_write(mtd, NFC_UNLOCKEND_BLK0, 0xFFFF);
+ nfc_write(mtd, NFC_WRPROT, NFC_WPC_UNLOCK);
+
+ /*
+ * Setup NFC:
+ * - Big Endian transfers,
+ * - Interrupt after full page read/write.
+ */
+ nfc_write(mtd, NFC_CONFIG1, NFC_BIG_ENDIAN | NFC_INT_MASK |
+ NFC_FULL_PAGE_INT);
+
+ /* Set spare area size */
+ nfc_write(mtd, NFC_SPAS, mtd->oobsize >> 1);
+
+ /* Detect NAND chips */
+ if (nand_scan(mtd, 1)) {
+ printk(KERN_ERR DRV_NAME ": NAND Flash not found !\n");
+ retval = -ENXIO;
+ goto error;
+ }
+
+ /* Set erase block size */
+ switch (mtd->erasesize / mtd->writesize) {
+ case 32:
+ nfc_set(mtd, NFC_CONFIG1, NFC_PPB_32);
+ break;
+
+ case 64:
+ nfc_set(mtd, NFC_CONFIG1, NFC_PPB_64);
+ break;
+
+ case 128:
+ nfc_set(mtd, NFC_CONFIG1, NFC_PPB_128);
+ break;
+
+ case 256:
+ nfc_set(mtd, NFC_CONFIG1, NFC_PPB_256);
+ break;
+
+ default:
+ printk(KERN_ERR DRV_NAME ": Unsupported NAND flash!\n");
+ retval = -ENXIO;
+ goto error;
+ }
+
+ return 0;
+error:
+ return retval;
+}
diff --git a/drivers/mtd/nand/nand.c b/drivers/mtd/nand/nand.c
index d369115..9065fa9 100644
--- a/drivers/mtd/nand/nand.c
+++ b/drivers/mtd/nand/nand.c
@@ -57,7 +57,7 @@ static void nand_init_chip(struct mtd_info *mtd, struct nand_chip *nand,
else
mtd->name += gd->reloc_off;
-#ifdef CONFIG_MTD_PARTITIONS
+#ifdef CONFIG_MTD_DEVICE
/*
* Add MTD device so that we can reference it later
* via the mtdcore infrastructure (e.g. ubi).
diff --git a/drivers/mtd/nand/nand_util.c b/drivers/mtd/nand/nand_util.c
index 6ba52b3..88206d0 100644
--- a/drivers/mtd/nand/nand_util.c
+++ b/drivers/mtd/nand/nand_util.c
@@ -36,12 +36,15 @@
#include <malloc.h>
#include <div64.h>
-
#include <asm/errno.h>
#include <linux/mtd/mtd.h>
#include <nand.h>
#include <jffs2/jffs2.h>
+#if !defined(CONFIG_SYS_64BIT_VSPRINTF)
+#warning Please define CONFIG_SYS_64BIT_VSPRINTF for correct output!
+#endif
+
typedef struct erase_info erase_info_t;
typedef struct mtd_info mtd_info_t;
@@ -127,7 +130,7 @@ int nand_erase_opts(nand_info_t *meminfo, const nand_erase_options_t *opts)
if (ret > 0) {
if (!opts->quiet)
printf("\rSkipping bad block at "
- "0x%08x "
+ "0x%08llx "
" \n",
erase.addr);
continue;
@@ -181,11 +184,11 @@ int nand_erase_opts(nand_info_t *meminfo, const nand_erase_options_t *opts)
if (percent != percent_complete) {
percent_complete = percent;
- printf("\rErasing at 0x%x -- %3d%% complete.",
+ printf("\rErasing at 0x%llx -- %3d%% complete.",
erase.addr, percent);
if (opts->jffs2 && result == 0)
- printf(" Cleanmarker written at 0x%x.",
+ printf(" Cleanmarker written at 0x%llx.",
erase.addr);
}
}
diff --git a/drivers/mtd/onenand/onenand_uboot.c b/drivers/mtd/onenand/onenand_uboot.c
index a95b922..9823b5b 100644
--- a/drivers/mtd/onenand/onenand_uboot.c
+++ b/drivers/mtd/onenand/onenand_uboot.c
@@ -43,7 +43,7 @@ void onenand_init(void)
puts("OneNAND: ");
print_size(onenand_mtd.size, "\n");
-#ifdef CONFIG_MTD_PARTITIONS
+#ifdef CONFIG_MTD_DEVICE
/*
* Add MTD device so that we can reference it later
* via the mtdcore infrastructure (e.g. ubi).
diff --git a/drivers/mtd/spi/Makefile b/drivers/mtd/spi/Makefile
index a71b16e..27dcbff 100644
--- a/drivers/mtd/spi/Makefile
+++ b/drivers/mtd/spi/Makefile
@@ -27,6 +27,7 @@ LIB := $(obj)libspi_flash.a
COBJS-$(CONFIG_SPI_FLASH) += spi_flash.o
COBJS-$(CONFIG_SPI_FLASH_ATMEL) += atmel.o
+COBJS-$(CONFIG_SPI_FLASH_MACRONIX) += macronix.o
COBJS-$(CONFIG_SPI_FLASH_SPANSION) += spansion.o
COBJS-$(CONFIG_SPI_FLASH_SST) += sst.o
COBJS-$(CONFIG_SPI_FLASH_STMICRO) += stmicro.o
diff --git a/drivers/mtd/spi/atmel.c b/drivers/mtd/spi/atmel.c
index c3b936f..3bc2dff 100644
--- a/drivers/mtd/spi/atmel.c
+++ b/drivers/mtd/spi/atmel.c
@@ -196,6 +196,75 @@ static int dataflash_read_fast_at45(struct spi_flash *flash,
return spi_flash_read_common(flash, cmd, sizeof(cmd), buf, len);
}
+/*
+ * TODO: the two write funcs (_p2/_at45) should get unified ...
+ */
+static int dataflash_write_p2(struct spi_flash *flash,
+ u32 offset, size_t len, const void *buf)
+{
+ struct atmel_spi_flash *asf = to_atmel_spi_flash(flash);
+ unsigned long page_size;
+ u32 addr = offset;
+ size_t chunk_len;
+ size_t actual;
+ int ret;
+ u8 cmd[4];
+
+ /*
+ * TODO: This function currently uses only page buffer #1. We can
+ * speed this up by using both buffers and loading one buffer while
+ * the other is being programmed into main memory.
+ */
+
+ page_size = (1 << asf->params->l2_page_size);
+
+ ret = spi_claim_bus(flash->spi);
+ if (ret) {
+ debug("SF: Unable to claim SPI bus\n");
+ return ret;
+ }
+
+ for (actual = 0; actual < len; actual += chunk_len) {
+ chunk_len = min(len - actual, page_size - (addr % page_size));
+
+ /* Use the same address bits for both commands */
+ cmd[0] = CMD_AT45_LOAD_BUF1;
+ cmd[1] = addr >> 16;
+ cmd[2] = addr >> 8;
+ cmd[3] = addr;
+
+ ret = spi_flash_cmd_write(flash->spi, cmd, 4,
+ buf + actual, chunk_len);
+ if (ret < 0) {
+ debug("SF: Loading AT45 buffer failed\n");
+ goto out;
+ }
+
+ cmd[0] = CMD_AT45_PROG_BUF1;
+ ret = spi_flash_cmd_write(flash->spi, cmd, 4, NULL, 0);
+ if (ret < 0) {
+ debug("SF: AT45 page programming failed\n");
+ goto out;
+ }
+
+ ret = at45_wait_ready(flash, SPI_FLASH_PROG_TIMEOUT);
+ if (ret < 0) {
+ debug("SF: AT45 page programming timed out\n");
+ goto out;
+ }
+
+ addr += chunk_len;
+ }
+
+ debug("SF: AT45: Successfully programmed %zu bytes @ 0x%x\n",
+ len, offset);
+ ret = 0;
+
+out:
+ spi_release_bus(flash->spi);
+ return ret;
+}
+
static int dataflash_write_at45(struct spi_flash *flash,
u32 offset, size_t len, const void *buf)
{
@@ -209,6 +278,12 @@ static int dataflash_write_at45(struct spi_flash *flash,
int ret;
u8 cmd[4];
+ /*
+ * TODO: This function currently uses only page buffer #1. We can
+ * speed this up by using both buffers and loading one buffer while
+ * the other is being programmed into main memory.
+ */
+
page_shift = asf->params->l2_page_size;
page_size = (1 << page_shift) + (1 << (page_shift - 5));
page_shift++;
@@ -263,6 +338,68 @@ out:
return ret;
}
+/*
+ * TODO: the two erase funcs (_p2/_at45) should get unified ...
+ */
+int dataflash_erase_p2(struct spi_flash *flash, u32 offset, size_t len)
+{
+ struct atmel_spi_flash *asf = to_atmel_spi_flash(flash);
+ unsigned long page_size;
+
+ size_t actual;
+ int ret;
+ u8 cmd[4];
+
+ /*
+ * TODO: This function currently uses page erase only. We can
+ * probably speed things up by using block and/or sector erase
+ * when possible.
+ */
+
+ page_size = (1 << asf->params->l2_page_size);
+
+ if (offset % page_size || len % page_size) {
+ debug("SF: Erase offset/length not multiple of page size\n");
+ return -1;
+ }
+
+ cmd[0] = CMD_AT45_ERASE_PAGE;
+ cmd[3] = 0x00;
+
+ ret = spi_claim_bus(flash->spi);
+ if (ret) {
+ debug("SF: Unable to claim SPI bus\n");
+ return ret;
+ }
+
+ for (actual = 0; actual < len; actual += page_size) {
+ cmd[1] = offset >> 16;
+ cmd[2] = offset >> 8;
+
+ ret = spi_flash_cmd_write(flash->spi, cmd, 4, NULL, 0);
+ if (ret < 0) {
+ debug("SF: AT45 page erase failed\n");
+ goto out;
+ }
+
+ ret = at45_wait_ready(flash, SPI_FLASH_PAGE_ERASE_TIMEOUT);
+ if (ret < 0) {
+ debug("SF: AT45 page erase timed out\n");
+ goto out;
+ }
+
+ offset += page_size;
+ }
+
+ debug("SF: AT45: Successfully erased %zu bytes @ 0x%x\n",
+ len, offset);
+ ret = 0;
+
+out:
+ spi_release_bus(flash->spi);
+ return ret;
+}
+
int dataflash_erase_at45(struct spi_flash *flash, u32 offset, size_t len)
{
struct atmel_spi_flash *asf = to_atmel_spi_flash(flash);
@@ -382,6 +519,8 @@ struct spi_flash *spi_flash_probe_atmel(struct spi_slave *spi, u8 *idcode)
page_size += 1 << (params->l2_page_size - 5);
} else {
asf->flash.read = dataflash_read_fast_p2;
+ asf->flash.write = dataflash_write_p2;
+ asf->flash.erase = dataflash_erase_p2;
}
break;
diff --git a/drivers/mtd/spi/macronix.c b/drivers/mtd/spi/macronix.c
new file mode 100644
index 0000000..9464c84
--- /dev/null
+++ b/drivers/mtd/spi/macronix.c
@@ -0,0 +1,312 @@
+/*
+ * Copyright 2009(C) Marvell International Ltd. and its affiliates
+ * Prafulla Wadaskar <prafulla@marvell.com>
+ *
+ * Based on drivers/mtd/spi/stmicro.c
+ *
+ * Copyright 2008, Network Appliance Inc.
+ * Jason McMullan <mcmullan@netapp.com>
+ *
+ * Copyright (C) 2004-2007 Freescale Semiconductor, Inc.
+ * TsiChung Liew (Tsi-Chung.Liew@freescale.com)
+ *
+ * See file CREDITS for list of people who contributed to this
+ * project.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
+ * MA 02110-1301 USA
+ */
+
+#include <common.h>
+#include <malloc.h>
+#include <spi_flash.h>
+
+#include "spi_flash_internal.h"
+
+/* MX25xx-specific commands */
+#define CMD_MX25XX_WREN 0x06 /* Write Enable */
+#define CMD_MX25XX_WRDI 0x04 /* Write Disable */
+#define CMD_MX25XX_RDSR 0x05 /* Read Status Register */
+#define CMD_MX25XX_WRSR 0x01 /* Write Status Register */
+#define CMD_MX25XX_READ 0x03 /* Read Data Bytes */
+#define CMD_MX25XX_FAST_READ 0x0b /* Read Data Bytes at Higher Speed */
+#define CMD_MX25XX_PP 0x02 /* Page Program */
+#define CMD_MX25XX_SE 0x20 /* Sector Erase */
+#define CMD_MX25XX_BE 0xD8 /* Block Erase */
+#define CMD_MX25XX_CE 0xc7 /* Chip Erase */
+#define CMD_MX25XX_DP 0xb9 /* Deep Power-down */
+#define CMD_MX25XX_RES 0xab /* Release from DP, and Read Signature */
+
+#define MXIC_ID_MX2516 0x15
+#define MXIC_ID_MX2520 0x12
+#define MXIC_ID_MX2532 0x16
+#define MXIC_ID_MX2540 0x13
+#define MXIC_ID_MX2564 0x17
+#define MXIC_ID_MX2580 0x14
+#define MXIC_ID_MX25128 0x18
+
+#define MACRONIX_SR_WIP (1 << 0) /* Write-in-Progress */
+
+struct macronix_spi_flash_params {
+ u8 idcode1;
+ u16 page_size;
+ u16 pages_per_sector;
+ u16 sectors_per_block;
+ u16 nr_blocks;
+ const char *name;
+};
+
+struct macronix_spi_flash {
+ struct spi_flash flash;
+ const struct macronix_spi_flash_params *params;
+};
+
+static inline struct macronix_spi_flash *to_macronix_spi_flash(struct spi_flash
+ *flash)
+{
+ return container_of(flash, struct macronix_spi_flash, flash);
+}
+
+static const struct macronix_spi_flash_params macronix_spi_flash_table[] = {
+ {
+ .idcode1 = MXIC_ID_MX25128,
+ .page_size = 256,
+ .pages_per_sector = 16,
+ .sectors_per_block = 16,
+ .nr_blocks = 256,
+ .name = "MX25L12805D",
+ },
+};
+
+static int macronix_wait_ready(struct spi_flash *flash, unsigned long timeout)
+{
+ struct spi_slave *spi = flash->spi;
+ unsigned long timebase;
+ int ret;
+ u8 status;
+ u8 cmd = CMD_MX25XX_RDSR;
+
+ ret = spi_xfer(spi, 8, &cmd, NULL, SPI_XFER_BEGIN);
+ if (ret) {
+ debug("SF: Failed to send command %02x: %d\n", cmd, ret);
+ return ret;
+ }
+
+ timebase = get_timer(0);
+ do {
+ ret = spi_xfer(spi, 8, NULL, &status, 0);
+ if (ret)
+ return -1;
+
+ if ((status & MACRONIX_SR_WIP) == 0)
+ break;
+
+ } while (get_timer(timebase) < timeout);
+
+ spi_xfer(spi, 0, NULL, NULL, SPI_XFER_END);
+
+ if ((status & MACRONIX_SR_WIP) == 0)
+ return 0;
+
+ /* Timed out */
+ return -1;
+}
+
+static int macronix_read_fast(struct spi_flash *flash,
+ u32 offset, size_t len, void *buf)
+{
+ struct macronix_spi_flash *mcx = to_macronix_spi_flash(flash);
+ unsigned long page_addr;
+ unsigned long page_size;
+ u8 cmd[5];
+
+ page_size = mcx->params->page_size;
+ page_addr = offset / page_size;
+
+ cmd[0] = CMD_READ_ARRAY_FAST;
+ cmd[1] = page_addr >> 8;
+ cmd[2] = page_addr;
+ cmd[3] = offset % page_size;
+ cmd[4] = 0x00;
+
+ return spi_flash_read_common(flash, cmd, sizeof(cmd), buf, len);
+}
+
+static int macronix_write(struct spi_flash *flash,
+ u32 offset, size_t len, const void *buf)
+{
+ struct macronix_spi_flash *mcx = to_macronix_spi_flash(flash);
+ unsigned long page_addr;
+ unsigned long byte_addr;
+ unsigned long page_size;
+ size_t chunk_len;
+ size_t actual;
+ int ret;
+ u8 cmd[4];
+
+ page_size = mcx->params->page_size;
+ page_addr = offset / page_size;
+ byte_addr = offset % page_size;
+
+ ret = spi_claim_bus(flash->spi);
+ if (ret) {
+ debug("SF: Unable to claim SPI bus\n");
+ return ret;
+ }
+
+ ret = 0;
+ for (actual = 0; actual < len; actual += chunk_len) {
+ chunk_len = min(len - actual, page_size - byte_addr);
+
+ cmd[0] = CMD_MX25XX_PP;
+ cmd[1] = page_addr >> 8;
+ cmd[2] = page_addr;
+ cmd[3] = byte_addr;
+
+ debug
+ ("PP: 0x%p => cmd = { 0x%02x 0x%02x%02x%02x } chunk_len = %d\n",
+ buf + actual, cmd[0], cmd[1], cmd[2], cmd[3], chunk_len);
+
+ ret = spi_flash_cmd(flash->spi, CMD_MX25XX_WREN, NULL, 0);
+ if (ret < 0) {
+ debug("SF: Enabling Write failed\n");
+ break;
+ }
+
+ ret = spi_flash_cmd_write(flash->spi, cmd, 4,
+ buf + actual, chunk_len);
+ if (ret < 0) {
+ debug("SF: Macronix Page Program failed\n");
+ break;
+ }
+
+ ret = macronix_wait_ready(flash, SPI_FLASH_PROG_TIMEOUT);
+ if (ret < 0) {
+ debug("SF: Macronix page programming timed out\n");
+ break;
+ }
+
+ page_addr++;
+ byte_addr = 0;
+ }
+
+ debug("SF: Macronix: Successfully programmed %u bytes @ 0x%x\n",
+ len, offset);
+
+ spi_release_bus(flash->spi);
+ return ret;
+}
+
+int macronix_erase(struct spi_flash *flash, u32 offset, size_t len)
+{
+ struct macronix_spi_flash *mcx = to_macronix_spi_flash(flash);
+ unsigned long sector_size;
+ size_t actual;
+ int ret;
+ u8 cmd[4];
+
+ /*
+ * This function currently uses sector erase only.
+ * probably speed things up by using bulk erase
+ * when possible.
+ */
+
+ sector_size = mcx->params->page_size * mcx->params->pages_per_sector
+ * mcx->params->sectors_per_block;
+
+ if (offset % sector_size || len % sector_size) {
+ debug("SF: Erase offset/length not multiple of sector size\n");
+ return -1;
+ }
+
+ len /= sector_size;
+ cmd[0] = CMD_MX25XX_BE;
+ cmd[2] = 0x00;
+ cmd[3] = 0x00;
+
+ ret = spi_claim_bus(flash->spi);
+ if (ret) {
+ debug("SF: Unable to claim SPI bus\n");
+ return ret;
+ }
+
+ ret = 0;
+ for (actual = 0; actual < len; actual++) {
+ cmd[1] = (offset / sector_size) + actual;
+
+ ret = spi_flash_cmd(flash->spi, CMD_MX25XX_WREN, NULL, 0);
+ if (ret < 0) {
+ debug("SF: Enabling Write failed\n");
+ break;
+ }
+
+ ret = spi_flash_cmd_write(flash->spi, cmd, 4, NULL, 0);
+ if (ret < 0) {
+ debug("SF: Macronix page erase failed\n");
+ break;
+ }
+
+ ret = macronix_wait_ready(flash, SPI_FLASH_PAGE_ERASE_TIMEOUT);
+ if (ret < 0) {
+ debug("SF: Macronix page erase timed out\n");
+ break;
+ }
+ }
+
+ debug("SF: Macronix: Successfully erased %u bytes @ 0x%x\n",
+ len * sector_size, offset);
+
+ spi_release_bus(flash->spi);
+ return ret;
+}
+
+struct spi_flash *spi_flash_probe_macronix(struct spi_slave *spi, u8 *idcode)
+{
+ const struct macronix_spi_flash_params *params;
+ struct macronix_spi_flash *mcx;
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(macronix_spi_flash_table); i++) {
+ params = &macronix_spi_flash_table[i];
+ if (params->idcode1 == idcode[2])
+ break;
+ }
+
+ if (i == ARRAY_SIZE(macronix_spi_flash_table)) {
+ debug("SF: Unsupported Macronix ID %02x\n", idcode[1]);
+ return NULL;
+ }
+
+ mcx = malloc(sizeof(*mcx));
+ if (!mcx) {
+ debug("SF: Failed to allocate memory\n");
+ return NULL;
+ }
+
+ mcx->params = params;
+ mcx->flash.spi = spi;
+ mcx->flash.name = params->name;
+
+ mcx->flash.write = macronix_write;
+ mcx->flash.erase = macronix_erase;
+ mcx->flash.read = macronix_read_fast;
+ mcx->flash.size = params->page_size * params->pages_per_sector
+ * params->sectors_per_block * params->nr_blocks;
+
+ printf("SF: Detected %s with page size %u, total %u bytes\n",
+ params->name, params->page_size, mcx->flash.size);
+
+ return &mcx->flash;
+}
diff --git a/drivers/mtd/spi/spi_flash.c b/drivers/mtd/spi/spi_flash.c
index 274895a..0c83231 100644
--- a/drivers/mtd/spi/spi_flash.c
+++ b/drivers/mtd/spi/spi_flash.c
@@ -134,6 +134,11 @@ struct spi_flash *spi_flash_probe(unsigned int bus, unsigned int cs,
flash = spi_flash_probe_atmel(spi, idcode);
break;
#endif
+#ifdef CONFIG_SPI_FLASH_MACRONIX
+ case 0xc2:
+ flash = spi_flash_probe_macronix(spi, idcode);
+ break;
+#endif
#ifdef CONFIG_SPI_FLASH_STMICRO
case 0x20:
flash = spi_flash_probe_stmicro(spi, idcode);
diff --git a/drivers/mtd/spi/spi_flash_internal.h b/drivers/mtd/spi/spi_flash_internal.h
index 5d1e395..0612383 100644
--- a/drivers/mtd/spi/spi_flash_internal.h
+++ b/drivers/mtd/spi/spi_flash_internal.h
@@ -46,5 +46,6 @@ int spi_flash_read_common(struct spi_flash *flash, const u8 *cmd,
/* Manufacturer-specific probe functions */
struct spi_flash *spi_flash_probe_spansion(struct spi_slave *spi, u8 *idcode);
struct spi_flash *spi_flash_probe_atmel(struct spi_slave *spi, u8 *idcode);
+struct spi_flash *spi_flash_probe_macronix(struct spi_slave *spi, u8 *idcode);
struct spi_flash *spi_flash_probe_sst(struct spi_slave *spi, u8 *idcode);
struct spi_flash *spi_flash_probe_stmicro(struct spi_slave *spi, u8 *idcode);
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index f4b01a9..4f50b2d 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -46,6 +46,10 @@
#include <ubi_uboot.h>
#include "ubi.h"
+#if (CONFIG_SYS_MALLOC_LEN < (512 << 10))
+#error Malloc area too small for UBI, increase CONFIG_SYS_MALLOC_LEN to >= 512k
+#endif
+
/* Maximum length of the 'mtd=' parameter */
#define MTD_PARAM_LEN_MAX 64