diff options
author | Kim Phillips <kim.phillips@freescale.com> | 2009-01-21 18:38:51 -0600 |
---|---|---|
committer | Kim Phillips <kim.phillips@freescale.com> | 2009-01-21 18:38:51 -0600 |
commit | be4880ebe4355e8782be4af4b337a1b98dffcbe3 (patch) | |
tree | 8b699181073305221b95f338a2d9107ab1bbfa3f /drivers/mtd/mtdpart.c | |
parent | 633639587e3596f0dbf5e6247dd3faf80b1d9063 (diff) | |
parent | 72d15e705bc3983884105cb7755c7ba80e74a0a5 (diff) | |
download | u-boot-imx-be4880ebe4355e8782be4af4b337a1b98dffcbe3.zip u-boot-imx-be4880ebe4355e8782be4af4b337a1b98dffcbe3.tar.gz u-boot-imx-be4880ebe4355e8782be4af4b337a1b98dffcbe3.tar.bz2 |
Merge branch 'master' into next
Diffstat (limited to 'drivers/mtd/mtdpart.c')
-rw-r--r-- | drivers/mtd/mtdpart.c | 540 |
1 files changed, 540 insertions, 0 deletions
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c new file mode 100644 index 0000000..f010f5e --- /dev/null +++ b/drivers/mtd/mtdpart.c @@ -0,0 +1,540 @@ +/* + * Simple MTD partitioning layer + * + * (C) 2000 Nicolas Pitre <nico@cam.org> + * + * This code is GPL + * + * 02-21-2002 Thomas Gleixner <gleixner@autronix.de> + * added support for read_oob, write_oob + */ + +#include <common.h> +#include <malloc.h> +#include <asm/errno.h> + +#include <linux/types.h> +#include <linux/list.h> +#include <linux/mtd/mtd.h> +#include <linux/mtd/partitions.h> +#include <linux/mtd/compat.h> + +/* Our partition linked list */ +struct list_head mtd_partitions; + +/* Our partition node structure */ +struct mtd_part { + struct mtd_info mtd; + struct mtd_info *master; + u_int32_t offset; + int index; + struct list_head list; + int registered; +}; + +/* + * Given a pointer to the MTD object in the mtd_part structure, we can retrieve + * the pointer to that structure with this macro. + */ +#define PART(x) ((struct mtd_part *)(x)) + + +/* + * MTD methods which simply translate the effective address and pass through + * to the _real_ device. + */ + +static int part_read (struct mtd_info *mtd, loff_t from, size_t len, + size_t *retlen, u_char *buf) +{ + struct mtd_part *part = PART(mtd); + int res; + + if (from >= mtd->size) + len = 0; + else if (from + len > mtd->size) + len = mtd->size - from; + res = part->master->read (part->master, from + part->offset, + len, retlen, buf); + if (unlikely(res)) { + if (res == -EUCLEAN) + mtd->ecc_stats.corrected++; + if (res == -EBADMSG) + mtd->ecc_stats.failed++; + } + return res; +} + +#ifdef MTD_LINUX +static int part_point (struct mtd_info *mtd, loff_t from, size_t len, + size_t *retlen, void **virt, resource_size_t *phys) +{ + struct mtd_part *part = PART(mtd); + if (from >= mtd->size) + len = 0; + else if (from + len > mtd->size) + len = mtd->size - from; + return part->master->point (part->master, from + part->offset, + len, retlen, virt, phys); +} + +static void part_unpoint(struct mtd_info *mtd, loff_t from, size_t len) +{ + struct mtd_part *part = PART(mtd); + + part->master->unpoint(part->master, from + part->offset, len); +} +#endif + +static int part_read_oob(struct mtd_info *mtd, loff_t from, + struct mtd_oob_ops *ops) +{ + struct mtd_part *part = PART(mtd); + int res; + + if (from >= mtd->size) + return -EINVAL; + if (ops->datbuf && from + ops->len > mtd->size) + return -EINVAL; + res = part->master->read_oob(part->master, from + part->offset, ops); + + if (unlikely(res)) { + if (res == -EUCLEAN) + mtd->ecc_stats.corrected++; + if (res == -EBADMSG) + mtd->ecc_stats.failed++; + } + return res; +} + +static int part_read_user_prot_reg (struct mtd_info *mtd, loff_t from, size_t len, + size_t *retlen, u_char *buf) +{ + struct mtd_part *part = PART(mtd); + return part->master->read_user_prot_reg (part->master, from, + len, retlen, buf); +} + +static int part_get_user_prot_info (struct mtd_info *mtd, + struct otp_info *buf, size_t len) +{ + struct mtd_part *part = PART(mtd); + return part->master->get_user_prot_info (part->master, buf, len); +} + +static int part_read_fact_prot_reg (struct mtd_info *mtd, loff_t from, size_t len, + size_t *retlen, u_char *buf) +{ + struct mtd_part *part = PART(mtd); + return part->master->read_fact_prot_reg (part->master, from, + len, retlen, buf); +} + +static int part_get_fact_prot_info (struct mtd_info *mtd, + struct otp_info *buf, size_t len) +{ + struct mtd_part *part = PART(mtd); + return part->master->get_fact_prot_info (part->master, buf, len); +} + +static int part_write (struct mtd_info *mtd, loff_t to, size_t len, + size_t *retlen, const u_char *buf) +{ + struct mtd_part *part = PART(mtd); + if (!(mtd->flags & MTD_WRITEABLE)) + return -EROFS; + if (to >= mtd->size) + len = 0; + else if (to + len > mtd->size) + len = mtd->size - to; + return part->master->write (part->master, to + part->offset, + len, retlen, buf); +} + +#ifdef MTD_LINUX +static int part_panic_write (struct mtd_info *mtd, loff_t to, size_t len, + size_t *retlen, const u_char *buf) +{ + struct mtd_part *part = PART(mtd); + if (!(mtd->flags & MTD_WRITEABLE)) + return -EROFS; + if (to >= mtd->size) + len = 0; + else if (to + len > mtd->size) + len = mtd->size - to; + return part->master->panic_write (part->master, to + part->offset, + len, retlen, buf); +} +#endif + +static int part_write_oob(struct mtd_info *mtd, loff_t to, + struct mtd_oob_ops *ops) +{ + struct mtd_part *part = PART(mtd); + + if (!(mtd->flags & MTD_WRITEABLE)) + return -EROFS; + + if (to >= mtd->size) + return -EINVAL; + if (ops->datbuf && to + ops->len > mtd->size) + return -EINVAL; + return part->master->write_oob(part->master, to + part->offset, ops); +} + +static int part_write_user_prot_reg (struct mtd_info *mtd, loff_t from, size_t len, + size_t *retlen, u_char *buf) +{ + struct mtd_part *part = PART(mtd); + return part->master->write_user_prot_reg (part->master, from, + len, retlen, buf); +} + +static int part_lock_user_prot_reg (struct mtd_info *mtd, loff_t from, size_t len) +{ + struct mtd_part *part = PART(mtd); + return part->master->lock_user_prot_reg (part->master, from, len); +} + +#ifdef MTD_LINUX +static int part_writev (struct mtd_info *mtd, const struct kvec *vecs, + unsigned long count, loff_t to, size_t *retlen) +{ + struct mtd_part *part = PART(mtd); + if (!(mtd->flags & MTD_WRITEABLE)) + return -EROFS; + return part->master->writev (part->master, vecs, count, + to + part->offset, retlen); +} +#endif + +static int part_erase (struct mtd_info *mtd, struct erase_info *instr) +{ + struct mtd_part *part = PART(mtd); + int ret; + if (!(mtd->flags & MTD_WRITEABLE)) + return -EROFS; + if (instr->addr >= mtd->size) + return -EINVAL; + instr->addr += part->offset; + ret = part->master->erase(part->master, instr); + if (ret) { + if (instr->fail_addr != 0xffffffff) + instr->fail_addr -= part->offset; + instr->addr -= part->offset; + } + return ret; +} + +void mtd_erase_callback(struct erase_info *instr) +{ + if (instr->mtd->erase == part_erase) { + struct mtd_part *part = PART(instr->mtd); + + if (instr->fail_addr != 0xffffffff) + instr->fail_addr -= part->offset; + instr->addr -= part->offset; + } + if (instr->callback) + instr->callback(instr); +} +#ifdef MTD_LINUX +EXPORT_SYMBOL_GPL(mtd_erase_callback); +#endif + +#ifdef MTD_LINUX +static int part_lock (struct mtd_info *mtd, loff_t ofs, size_t len) +{ + struct mtd_part *part = PART(mtd); + if ((len + ofs) > mtd->size) + return -EINVAL; + return part->master->lock(part->master, ofs + part->offset, len); +} + +static int part_unlock (struct mtd_info *mtd, loff_t ofs, size_t len) +{ + struct mtd_part *part = PART(mtd); + if ((len + ofs) > mtd->size) + return -EINVAL; + return part->master->unlock(part->master, ofs + part->offset, len); +} +#endif + +static void part_sync(struct mtd_info *mtd) +{ + struct mtd_part *part = PART(mtd); + part->master->sync(part->master); +} + +#ifdef MTD_LINUX +static int part_suspend(struct mtd_info *mtd) +{ + struct mtd_part *part = PART(mtd); + return part->master->suspend(part->master); +} + +static void part_resume(struct mtd_info *mtd) +{ + struct mtd_part *part = PART(mtd); + part->master->resume(part->master); +} +#endif + +static int part_block_isbad (struct mtd_info *mtd, loff_t ofs) +{ + struct mtd_part *part = PART(mtd); + if (ofs >= mtd->size) + return -EINVAL; + ofs += part->offset; + return part->master->block_isbad(part->master, ofs); +} + +static int part_block_markbad (struct mtd_info *mtd, loff_t ofs) +{ + struct mtd_part *part = PART(mtd); + int res; + + if (!(mtd->flags & MTD_WRITEABLE)) + return -EROFS; + if (ofs >= mtd->size) + return -EINVAL; + ofs += part->offset; + res = part->master->block_markbad(part->master, ofs); +#ifdef MTD_LINUX + if (!res) + mtd->ecc_stats.badblocks++; +#endif + return res; +} + +/* + * This function unregisters and destroy all slave MTD objects which are + * attached to the given master MTD object. + */ + +int del_mtd_partitions(struct mtd_info *master) +{ + struct list_head *node; + struct mtd_part *slave; + + for (node = mtd_partitions.next; + node != &mtd_partitions; + node = node->next) { + slave = list_entry(node, struct mtd_part, list); + if (slave->master == master) { + struct list_head *prev = node->prev; + __list_del(prev, node->next); + if(slave->registered) + del_mtd_device(&slave->mtd); + kfree(slave); + node = prev; + } + } + + return 0; +} + +/* + * This function, given a master MTD object and a partition table, creates + * and registers slave MTD objects which are bound to the master according to + * the partition definitions. + * (Q: should we register the master MTD object as well?) + */ + +int add_mtd_partitions(struct mtd_info *master, + const struct mtd_partition *parts, + int nbparts) +{ + struct mtd_part *slave; + u_int32_t cur_offset = 0; + int i; + + /* + * Need to init the list here, since LIST_INIT() does not + * work on platforms where relocation has problems (like MIPS + * & PPC). + */ + if (mtd_partitions.next == NULL) + INIT_LIST_HEAD(&mtd_partitions); + + printk (KERN_NOTICE "Creating %d MTD partitions on \"%s\":\n", nbparts, master->name); + + for (i = 0; i < nbparts; i++) { + + /* allocate the partition structure */ + slave = kzalloc (sizeof(*slave), GFP_KERNEL); + if (!slave) { + printk ("memory allocation error while creating partitions for \"%s\"\n", + master->name); + del_mtd_partitions(master); + return -ENOMEM; + } + list_add(&slave->list, &mtd_partitions); + + /* set up the MTD object for this partition */ + slave->mtd.type = master->type; + slave->mtd.flags = master->flags & ~parts[i].mask_flags; + slave->mtd.size = parts[i].size; + slave->mtd.writesize = master->writesize; + slave->mtd.oobsize = master->oobsize; + slave->mtd.oobavail = master->oobavail; + slave->mtd.subpage_sft = master->subpage_sft; + + slave->mtd.name = parts[i].name; + slave->mtd.owner = master->owner; + + slave->mtd.read = part_read; + slave->mtd.write = part_write; + +#ifdef MTD_LINUX + if (master->panic_write) + slave->mtd.panic_write = part_panic_write; + + if(master->point && master->unpoint){ + slave->mtd.point = part_point; + slave->mtd.unpoint = part_unpoint; + } +#endif + + if (master->read_oob) + slave->mtd.read_oob = part_read_oob; + if (master->write_oob) + slave->mtd.write_oob = part_write_oob; + if(master->read_user_prot_reg) + slave->mtd.read_user_prot_reg = part_read_user_prot_reg; + if(master->read_fact_prot_reg) + slave->mtd.read_fact_prot_reg = part_read_fact_prot_reg; + if(master->write_user_prot_reg) + slave->mtd.write_user_prot_reg = part_write_user_prot_reg; + if(master->lock_user_prot_reg) + slave->mtd.lock_user_prot_reg = part_lock_user_prot_reg; + if(master->get_user_prot_info) + slave->mtd.get_user_prot_info = part_get_user_prot_info; + if(master->get_fact_prot_info) + slave->mtd.get_fact_prot_info = part_get_fact_prot_info; + if (master->sync) + slave->mtd.sync = part_sync; +#ifdef MTD_LINUX + if (!i && master->suspend && master->resume) { + slave->mtd.suspend = part_suspend; + slave->mtd.resume = part_resume; + } + if (master->writev) + slave->mtd.writev = part_writev; + if (master->lock) + slave->mtd.lock = part_lock; + if (master->unlock) + slave->mtd.unlock = part_unlock; +#endif + if (master->block_isbad) + slave->mtd.block_isbad = part_block_isbad; + if (master->block_markbad) + slave->mtd.block_markbad = part_block_markbad; + slave->mtd.erase = part_erase; + slave->master = master; + slave->offset = parts[i].offset; + slave->index = i; + + if (slave->offset == MTDPART_OFS_APPEND) + slave->offset = cur_offset; + if (slave->offset == MTDPART_OFS_NXTBLK) { + slave->offset = cur_offset; + if ((cur_offset % master->erasesize) != 0) { + /* Round up to next erasesize */ + slave->offset = ((cur_offset / master->erasesize) + 1) * master->erasesize; + printk(KERN_NOTICE "Moving partition %d: " + "0x%08x -> 0x%08x\n", i, + cur_offset, slave->offset); + } + } + if (slave->mtd.size == MTDPART_SIZ_FULL) + slave->mtd.size = master->size - slave->offset; + cur_offset = slave->offset + slave->mtd.size; + + printk (KERN_NOTICE "0x%08x-0x%08x : \"%s\"\n", slave->offset, + slave->offset + slave->mtd.size, slave->mtd.name); + + /* let's do some sanity checks */ + if (slave->offset >= master->size) { + /* let's register it anyway to preserve ordering */ + slave->offset = 0; + slave->mtd.size = 0; + printk ("mtd: partition \"%s\" is out of reach -- disabled\n", + parts[i].name); + } + if (slave->offset + slave->mtd.size > master->size) { + slave->mtd.size = master->size - slave->offset; + printk ("mtd: partition \"%s\" extends beyond the end of device \"%s\" -- size truncated to %#x\n", + parts[i].name, master->name, slave->mtd.size); + } + if (master->numeraseregions>1) { + /* Deal with variable erase size stuff */ + int i; + struct mtd_erase_region_info *regions = master->eraseregions; + + /* Find the first erase regions which is part of this partition. */ + for (i=0; i < master->numeraseregions && slave->offset >= regions[i].offset; i++) + ; + + for (i--; i < master->numeraseregions && slave->offset + slave->mtd.size > regions[i].offset; i++) { + if (slave->mtd.erasesize < regions[i].erasesize) { + slave->mtd.erasesize = regions[i].erasesize; + } + } + } else { + /* Single erase size */ + slave->mtd.erasesize = master->erasesize; + } + + if ((slave->mtd.flags & MTD_WRITEABLE) && + (slave->offset % slave->mtd.erasesize)) { + /* Doesn't start on a boundary of major erase size */ + /* FIXME: Let it be writable if it is on a boundary of _minor_ erase size though */ + slave->mtd.flags &= ~MTD_WRITEABLE; + printk ("mtd: partition \"%s\" doesn't start on an erase block boundary -- force read-only\n", + parts[i].name); + } + if ((slave->mtd.flags & MTD_WRITEABLE) && + (slave->mtd.size % slave->mtd.erasesize)) { + slave->mtd.flags &= ~MTD_WRITEABLE; + printk ("mtd: partition \"%s\" doesn't end on an erase block -- force read-only\n", + parts[i].name); + } + + slave->mtd.ecclayout = master->ecclayout; + if (master->block_isbad) { + uint32_t offs = 0; + + while(offs < slave->mtd.size) { + if (master->block_isbad(master, + offs + slave->offset)) + slave->mtd.ecc_stats.badblocks++; + offs += slave->mtd.erasesize; + } + } + +#ifdef MTD_LINUX + if (parts[i].mtdp) { + /* store the object pointer + * (caller may or may not register it */ + *parts[i].mtdp = &slave->mtd; + slave->registered = 0; + } else { + /* register our partition */ + add_mtd_device(&slave->mtd); + slave->registered = 1; + } +#else + /* register our partition */ + add_mtd_device(&slave->mtd); + slave->registered = 1; +#endif + } + + return 0; +} + +#ifdef MTD_LINUX +EXPORT_SYMBOL(add_mtd_partitions); +EXPORT_SYMBOL(del_mtd_partitions); +#endif |