Merge git://git.infradead.org/mtd-2.6

* git://git.infradead.org/mtd-2.6: (154 commits)
  mtd: cfi_cmdset_0002: use AMD standard command-set with Winbond flash chips
  mtd: cfi_cmdset_0002: Fix MODULE_ALIAS and linkage for new 0701 commandset ID
  mtd: mxc_nand: Remove duplicate NAND_CMD_RESET case value
  mtd: update gfp/slab.h includes
  jffs2: Stop triggering block erases from jffs2_write_super()
  jffs2: Rename jffs2_erase_pending_trigger() to jffs2_dirty_trigger()
  jffs2: Use jffs2_garbage_collect_trigger() to trigger pending erases
  jffs2: Require jffs2_garbage_collect_trigger() to be called with lock held
  jffs2: Wake GC thread when there are blocks to be erased
  jffs2: Erase pending blocks in GC pass, avoid invalid -EIO return
  jffs2: Add 'work_done' return value from jffs2_erase_pending_blocks()
  mtd: mtdchar: Do not corrupt backing device of device node inode
  mtd/maps/pcmciamtd: Fix printk format for ssize_t in debug messages
  drivers/mtd: Use kmemdup
  mtd: cfi_cmdset_0002: Fix argument order in bootloc warning
  mtd: nand: add Toshiba TC58NVG0 device ID
  pcmciamtd: add another ID
  pcmciamtd: coding style cleanups
  pcmciamtd: fixing obvious errors
  mtd: chips: add SST39WF160x NOR-flashes
  ...

Trivial conflicts due to dev_node removal in drivers/mtd/maps/pcmciamtd.c
This commit is contained in:
Linus Torvalds 2010-05-21 07:25:43 -07:00
commit 05ec7dd8dd
117 changed files with 10153 additions and 1644 deletions

View File

@ -4762,6 +4762,12 @@ S: Maintained
F: Documentation/rfkill.txt
F: net/rfkill/
RICOH SMARTMEDIA/XD DRIVER
M: Maxim Levitsky <maximlevitsky@gmail.com>
S: Maintained
F: drivers/mtd/nand/r822.c
F: drivers/mtd/nand/r822.h
RISCOM8 DRIVER
S: Orphan
F: Documentation/serial/riscom8.txt

View File

@ -9,9 +9,6 @@
* febff000 22000000 4K model number register
* febfe000 22400000 4K options register
* febfd000 22800000 4K options register #2
* febfc000 [67]0000000 4K NAND data register
* febfb000 [67]0400000 4K NAND control register
* febfa000 [67]0800000 4K NAND busy register
* febf9000 10800000 4K TS-5620 RTC index register
* febf8000 11700000 4K TS-5620 RTC data register
*/
@ -41,22 +38,6 @@
#define TS72XX_OPTIONS2_TS9420_BOOT 0x02
#define TS72XX_NAND1_DATA_PHYS_BASE 0x60000000
#define TS72XX_NAND2_DATA_PHYS_BASE 0x70000000
#define TS72XX_NAND_DATA_VIRT_BASE 0xfebfc000
#define TS72XX_NAND_DATA_SIZE 0x00001000
#define TS72XX_NAND1_CONTROL_PHYS_BASE 0x60400000
#define TS72XX_NAND2_CONTROL_PHYS_BASE 0x70400000
#define TS72XX_NAND_CONTROL_VIRT_BASE 0xfebfb000
#define TS72XX_NAND_CONTROL_SIZE 0x00001000
#define TS72XX_NAND1_BUSY_PHYS_BASE 0x60800000
#define TS72XX_NAND2_BUSY_PHYS_BASE 0x70800000
#define TS72XX_NAND_BUSY_VIRT_BASE 0xfebfa000
#define TS72XX_NAND_BUSY_SIZE 0x00001000
#define TS72XX_RTC_INDEX_VIRT_BASE 0xfebf9000
#define TS72XX_RTC_INDEX_PHYS_BASE 0x10800000
#define TS72XX_RTC_INDEX_SIZE 0x00001000

View File

@ -10,12 +10,16 @@
* your option) any later version.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/m48t86.h>
#include <linux/mtd/physmap.h>
#include <linux/mtd/nand.h>
#include <linux/mtd/partitions.h>
#include <mach/hardware.h>
#include <mach/ts72xx.h>
@ -54,92 +58,162 @@ static struct map_desc ts72xx_io_desc[] __initdata = {
}
};
static struct map_desc ts72xx_nand_io_desc[] __initdata = {
{
.virtual = TS72XX_NAND_DATA_VIRT_BASE,
.pfn = __phys_to_pfn(TS72XX_NAND1_DATA_PHYS_BASE),
.length = TS72XX_NAND_DATA_SIZE,
.type = MT_DEVICE,
}, {
.virtual = TS72XX_NAND_CONTROL_VIRT_BASE,
.pfn = __phys_to_pfn(TS72XX_NAND1_CONTROL_PHYS_BASE),
.length = TS72XX_NAND_CONTROL_SIZE,
.type = MT_DEVICE,
}, {
.virtual = TS72XX_NAND_BUSY_VIRT_BASE,
.pfn = __phys_to_pfn(TS72XX_NAND1_BUSY_PHYS_BASE),
.length = TS72XX_NAND_BUSY_SIZE,
.type = MT_DEVICE,
}
};
static struct map_desc ts72xx_alternate_nand_io_desc[] __initdata = {
{
.virtual = TS72XX_NAND_DATA_VIRT_BASE,
.pfn = __phys_to_pfn(TS72XX_NAND2_DATA_PHYS_BASE),
.length = TS72XX_NAND_DATA_SIZE,
.type = MT_DEVICE,
}, {
.virtual = TS72XX_NAND_CONTROL_VIRT_BASE,
.pfn = __phys_to_pfn(TS72XX_NAND2_CONTROL_PHYS_BASE),
.length = TS72XX_NAND_CONTROL_SIZE,
.type = MT_DEVICE,
}, {
.virtual = TS72XX_NAND_BUSY_VIRT_BASE,
.pfn = __phys_to_pfn(TS72XX_NAND2_BUSY_PHYS_BASE),
.length = TS72XX_NAND_BUSY_SIZE,
.type = MT_DEVICE,
}
};
static void __init ts72xx_map_io(void)
{
ep93xx_map_io();
iotable_init(ts72xx_io_desc, ARRAY_SIZE(ts72xx_io_desc));
}
/*
* The TS-7200 has NOR flash, the other models have NAND flash.
*/
if (!board_is_ts7200()) {
if (is_ts9420_installed()) {
iotable_init(ts72xx_alternate_nand_io_desc,
ARRAY_SIZE(ts72xx_alternate_nand_io_desc));
} else {
iotable_init(ts72xx_nand_io_desc,
ARRAY_SIZE(ts72xx_nand_io_desc));
}
/*************************************************************************
* NAND flash
*************************************************************************/
#define TS72XX_NAND_CONTROL_ADDR_LINE 22 /* 0xN0400000 */
#define TS72XX_NAND_BUSY_ADDR_LINE 23 /* 0xN0800000 */
static void ts72xx_nand_hwcontrol(struct mtd_info *mtd,
int cmd, unsigned int ctrl)
{
struct nand_chip *chip = mtd->priv;
if (ctrl & NAND_CTRL_CHANGE) {
void __iomem *addr = chip->IO_ADDR_R;
unsigned char bits;
addr += (1 << TS72XX_NAND_CONTROL_ADDR_LINE);
bits = __raw_readb(addr) & ~0x07;
bits |= (ctrl & NAND_NCE) << 2; /* bit 0 -> bit 2 */
bits |= (ctrl & NAND_CLE); /* bit 1 -> bit 1 */
bits |= (ctrl & NAND_ALE) >> 2; /* bit 2 -> bit 0 */
__raw_writeb(bits, addr);
}
if (cmd != NAND_CMD_NONE)
__raw_writeb(cmd, chip->IO_ADDR_W);
}
static int ts72xx_nand_device_ready(struct mtd_info *mtd)
{
struct nand_chip *chip = mtd->priv;
void __iomem *addr = chip->IO_ADDR_R;
addr += (1 << TS72XX_NAND_BUSY_ADDR_LINE);
return !!(__raw_readb(addr) & 0x20);
}
static const char *ts72xx_nand_part_probes[] = { "cmdlinepart", NULL };
#define TS72XX_BOOTROM_PART_SIZE (SZ_16K)
#define TS72XX_REDBOOT_PART_SIZE (SZ_2M + SZ_1M)
static struct mtd_partition ts72xx_nand_parts[] = {
{
.name = "TS-BOOTROM",
.offset = 0,
.size = TS72XX_BOOTROM_PART_SIZE,
.mask_flags = MTD_WRITEABLE, /* force read-only */
}, {
.name = "Linux",
.offset = MTDPART_OFS_APPEND,
.size = 0, /* filled in later */
}, {
.name = "RedBoot",
.offset = MTDPART_OFS_APPEND,
.size = MTDPART_SIZ_FULL,
.mask_flags = MTD_WRITEABLE, /* force read-only */
},
};
static void ts72xx_nand_set_parts(uint64_t size,
struct platform_nand_chip *chip)
{
/* Factory TS-72xx boards only come with 32MiB or 128MiB NAND options */
if (size == SZ_32M || size == SZ_128M) {
/* Set the "Linux" partition size */
ts72xx_nand_parts[1].size = size - TS72XX_REDBOOT_PART_SIZE;
chip->partitions = ts72xx_nand_parts;
chip->nr_partitions = ARRAY_SIZE(ts72xx_nand_parts);
} else {
pr_warning("Unknown nand disk size:%lluMiB\n", size >> 20);
}
}
static struct platform_nand_data ts72xx_nand_data = {
.chip = {
.nr_chips = 1,
.chip_offset = 0,
.chip_delay = 15,
.part_probe_types = ts72xx_nand_part_probes,
.set_parts = ts72xx_nand_set_parts,
},
.ctrl = {
.cmd_ctrl = ts72xx_nand_hwcontrol,
.dev_ready = ts72xx_nand_device_ready,
},
};
static struct resource ts72xx_nand_resource[] = {
{
.start = 0, /* filled in later */
.end = 0, /* filled in later */
.flags = IORESOURCE_MEM,
},
};
static struct platform_device ts72xx_nand_flash = {
.name = "gen_nand",
.id = -1,
.dev.platform_data = &ts72xx_nand_data,
.resource = ts72xx_nand_resource,
.num_resources = ARRAY_SIZE(ts72xx_nand_resource),
};
/*************************************************************************
* NOR flash (TS-7200 only)
*************************************************************************/
static struct physmap_flash_data ts72xx_flash_data = {
static struct physmap_flash_data ts72xx_nor_data = {
.width = 2,
};
static struct resource ts72xx_flash_resource = {
static struct resource ts72xx_nor_resource = {
.start = EP93XX_CS6_PHYS_BASE,
.end = EP93XX_CS6_PHYS_BASE + SZ_16M - 1,
.flags = IORESOURCE_MEM,
};
static struct platform_device ts72xx_flash = {
.name = "physmap-flash",
.id = 0,
.dev = {
.platform_data = &ts72xx_flash_data,
},
.num_resources = 1,
.resource = &ts72xx_flash_resource,
static struct platform_device ts72xx_nor_flash = {
.name = "physmap-flash",
.id = 0,
.dev.platform_data = &ts72xx_nor_data,
.resource = &ts72xx_nor_resource,
.num_resources = 1,
};
static void __init ts72xx_register_flash(void)
{
if (board_is_ts7200())
platform_device_register(&ts72xx_flash);
if (board_is_ts7200()) {
platform_device_register(&ts72xx_nor_flash);
} else {
resource_size_t start;
if (is_ts9420_installed())
start = EP93XX_CS7_PHYS_BASE;
else
start = EP93XX_CS6_PHYS_BASE;
ts72xx_nand_resource[0].start = start;
ts72xx_nand_resource[0].end = start + SZ_16M - 1;
platform_device_register(&ts72xx_nand_flash);
}
}
static unsigned char ts72xx_rtc_readbyte(unsigned long addr)
{
__raw_writeb(addr, TS72XX_RTC_INDEX_VIRT_BASE);

View File

@ -305,6 +305,15 @@ void __init kirkwood_nand_init(struct mtd_partition *parts, int nr_parts,
platform_device_register(&kirkwood_nand_flash);
}
void __init kirkwood_nand_init_rnb(struct mtd_partition *parts, int nr_parts,
int (*dev_ready)(struct mtd_info *))
{
kirkwood_clk_ctrl |= CGC_RUNIT;
kirkwood_nand_data.parts = parts;
kirkwood_nand_data.nr_parts = nr_parts;
kirkwood_nand_data.dev_ready = dev_ready;
platform_device_register(&kirkwood_nand_flash);
}
/*****************************************************************************
* SoC RTC

View File

@ -16,6 +16,7 @@ struct mv643xx_eth_platform_data;
struct mv_sata_platform_data;
struct mvsdio_platform_data;
struct mtd_partition;
struct mtd_info;
/*
* Basic Kirkwood init functions used early by machine-setup.
@ -41,6 +42,7 @@ void kirkwood_i2c_init(void);
void kirkwood_uart0_init(void);
void kirkwood_uart1_init(void);
void kirkwood_nand_init(struct mtd_partition *parts, int nr_parts, int delay);
void kirkwood_nand_init_rnb(struct mtd_partition *parts, int nr_parts, int (*dev_ready)(struct mtd_info *));
extern int kirkwood_tclk;
extern struct sys_timer kirkwood_timer;

View File

@ -14,6 +14,7 @@
*/
struct orion_nand_data {
struct mtd_partition *parts;
int (*dev_ready)(struct mtd_info *mtd);
u32 nr_parts;
u8 ale; /* address line number connected to ALE */
u8 cle; /* address line number connected to CLE */

View File

@ -304,6 +304,19 @@ config SSFDC
This enables read only access to SmartMedia formatted NAND
flash. You can mount it with FAT file system.
config SM_FTL
tristate "SmartMedia/xD new translation layer"
depends on EXPERIMENTAL && BLOCK
select MTD_BLKDEVS
select MTD_NAND_ECC
help
This enables new and very EXPERMENTAL support for SmartMedia/xD
FTL (Flash translation layer).
Write support isn't yet well tested, therefore this code IS likely to
eat your card, so please don't use it together with valuable data.
Use readonly driver (CONFIG_SSFDC) instead.
config MTD_OOPS
tristate "Log panic/oops to an MTD buffer"
depends on MTD

View File

@ -24,6 +24,7 @@ obj-$(CONFIG_NFTL) += nftl.o
obj-$(CONFIG_INFTL) += inftl.o
obj-$(CONFIG_RFD_FTL) += rfd_ftl.o
obj-$(CONFIG_SSFDC) += ssfdc.o
obj-$(CONFIG_SM_FTL) += sm_ftl.o
obj-$(CONFIG_MTD_OOPS) += mtdoops.o
nftl-objs := nftlcore.o nftlmount.o

View File

@ -615,10 +615,8 @@ static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd)
return mtd;
setup_err:
if(mtd) {
kfree(mtd->eraseregions);
kfree(mtd);
}
kfree(mtd->eraseregions);
kfree(mtd);
kfree(cfi->cmdset_priv);
return NULL;
}
@ -727,8 +725,7 @@ static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
/* those should be reset too since
they create memory references. */
init_waitqueue_head(&chip->wq);
spin_lock_init(&chip->_spinlock);
chip->mutex = &chip->_spinlock;
mutex_init(&chip->mutex);
chip++;
}
}
@ -774,9 +771,9 @@ static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long
if (chip->priv && map_word_andequal(map, status, status_PWS, status_PWS))
break;
spin_unlock(chip->mutex);
mutex_unlock(&chip->mutex);
cfi_udelay(1);
spin_lock(chip->mutex);
mutex_lock(&chip->mutex);
/* Someone else might have been playing with it. */
return -EAGAIN;
}
@ -823,9 +820,9 @@ static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long
return -EIO;
}
spin_unlock(chip->mutex);
mutex_unlock(&chip->mutex);
cfi_udelay(1);
spin_lock(chip->mutex);
mutex_lock(&chip->mutex);
/* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
So we can just loop here. */
}
@ -852,10 +849,10 @@ static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long
sleep:
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
spin_unlock(chip->mutex);
mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
spin_lock(chip->mutex);
mutex_lock(&chip->mutex);
return -EAGAIN;
}
}
@ -901,20 +898,20 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
* it'll happily send us to sleep. In any case, when
* get_chip returns success we're clear to go ahead.
*/
ret = spin_trylock(contender->mutex);
ret = mutex_trylock(&contender->mutex);
spin_unlock(&shared->lock);
if (!ret)
goto retry;
spin_unlock(chip->mutex);
mutex_unlock(&chip->mutex);
ret = chip_ready(map, contender, contender->start, mode);
spin_lock(chip->mutex);
mutex_lock(&chip->mutex);
if (ret == -EAGAIN) {
spin_unlock(contender->mutex);
mutex_unlock(&contender->mutex);
goto retry;
}
if (ret) {
spin_unlock(contender->mutex);
mutex_unlock(&contender->mutex);
return ret;
}
spin_lock(&shared->lock);
@ -923,10 +920,10 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
* in FL_SYNCING state. Put contender and retry. */
if (chip->state == FL_SYNCING) {
put_chip(map, contender, contender->start);
spin_unlock(contender->mutex);
mutex_unlock(&contender->mutex);
goto retry;
}
spin_unlock(contender->mutex);
mutex_unlock(&contender->mutex);
}
/* Check if we already have suspended erase
@ -936,10 +933,10 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
spin_unlock(&shared->lock);
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
spin_unlock(chip->mutex);
mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
spin_lock(chip->mutex);
mutex_lock(&chip->mutex);
goto retry;
}
@ -969,12 +966,12 @@ static void put_chip(struct map_info *map, struct flchip *chip, unsigned long ad
if (shared->writing && shared->writing != chip) {
/* give back ownership to who we loaned it from */
struct flchip *loaner = shared->writing;
spin_lock(loaner->mutex);
mutex_lock(&loaner->mutex);
spin_unlock(&shared->lock);
spin_unlock(chip->mutex);
mutex_unlock(&chip->mutex);
put_chip(map, loaner, loaner->start);
spin_lock(chip->mutex);
spin_unlock(loaner->mutex);
mutex_lock(&chip->mutex);
mutex_unlock(&loaner->mutex);
wake_up(&chip->wq);
return;
}
@ -1144,7 +1141,7 @@ static int __xipram xip_wait_for_operation(
(void) map_read(map, adr);
xip_iprefetch();
local_irq_enable();
spin_unlock(chip->mutex);
mutex_unlock(&chip->mutex);
xip_iprefetch();
cond_resched();
@ -1154,15 +1151,15 @@ static int __xipram xip_wait_for_operation(
* a suspended erase state. If so let's wait
* until it's done.
*/
spin_lock(chip->mutex);
mutex_lock(&chip->mutex);
while (chip->state != newstate) {
DECLARE_WAITQUEUE(wait, current);
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
spin_unlock(chip->mutex);
mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
spin_lock(chip->mutex);
mutex_lock(&chip->mutex);
}
/* Disallow XIP again */
local_irq_disable();
@ -1218,10 +1215,10 @@ static int inval_cache_and_wait_for_operation(
int chip_state = chip->state;
unsigned int timeo, sleep_time, reset_timeo;
spin_unlock(chip->mutex);
mutex_unlock(&chip->mutex);
if (inval_len)
INVALIDATE_CACHED_RANGE(map, inval_adr, inval_len);
spin_lock(chip->mutex);
mutex_lock(&chip->mutex);
timeo = chip_op_time_max;
if (!timeo)
@ -1241,7 +1238,7 @@ static int inval_cache_and_wait_for_operation(
}
/* OK Still waiting. Drop the lock, wait a while and retry. */
spin_unlock(chip->mutex);
mutex_unlock(&chip->mutex);
if (sleep_time >= 1000000/HZ) {
/*
* Half of the normal delay still remaining
@ -1256,17 +1253,17 @@ static int inval_cache_and_wait_for_operation(
cond_resched();
timeo--;
}
spin_lock(chip->mutex);
mutex_lock(&chip->mutex);
while (chip->state != chip_state) {
/* Someone's suspended the operation: sleep */
DECLARE_WAITQUEUE(wait, current);
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
spin_unlock(chip->mutex);
mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
spin_lock(chip->mutex);
mutex_lock(&chip->mutex);
}
if (chip->erase_suspended && chip_state == FL_ERASING) {
/* Erase suspend occured while sleep: reset timeout */
@ -1302,7 +1299,7 @@ static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t a
/* Ensure cmd read/writes are aligned. */
cmd_addr = adr & ~(map_bankwidth(map)-1);
spin_lock(chip->mutex);
mutex_lock(&chip->mutex);
ret = get_chip(map, chip, cmd_addr, FL_POINT);
@ -1313,7 +1310,7 @@ static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t a
chip->state = FL_POINT;
chip->ref_point_counter++;
}
spin_unlock(chip->mutex);
mutex_unlock(&chip->mutex);
return ret;
}
@ -1398,7 +1395,7 @@ static void cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
else
thislen = len;
spin_lock(chip->mutex);
mutex_lock(&chip->mutex);
if (chip->state == FL_POINT) {
chip->ref_point_counter--;
if(chip->ref_point_counter == 0)
@ -1407,7 +1404,7 @@ static void cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
printk(KERN_ERR "%s: Warning: unpoint called on non pointed region\n", map->name); /* Should this give an error? */
put_chip(map, chip, chip->start);
spin_unlock(chip->mutex);
mutex_unlock(&chip->mutex);
len -= thislen;
ofs = 0;
@ -1426,10 +1423,10 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof
/* Ensure cmd read/writes are aligned. */
cmd_addr = adr & ~(map_bankwidth(map)-1);
spin_lock(chip->mutex);
mutex_lock(&chip->mutex);
ret = get_chip(map, chip, cmd_addr, FL_READY);
if (ret) {
spin_unlock(chip->mutex);
mutex_unlock(&chip->mutex);
return ret;
}
@ -1443,7 +1440,7 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof
put_chip(map, chip, cmd_addr);
spin_unlock(chip->mutex);
mutex_unlock(&chip->mutex);
return 0;
}
@ -1506,10 +1503,10 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
return -EINVAL;
}
spin_lock(chip->mutex);
mutex_lock(&chip->mutex);
ret = get_chip(map, chip, adr, mode);
if (ret) {
spin_unlock(chip->mutex);
mutex_unlock(&chip->mutex);
return ret;
}
@ -1555,7 +1552,7 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
xip_enable(map, chip, adr);
out: put_chip(map, chip, adr);
spin_unlock(chip->mutex);
mutex_unlock(&chip->mutex);
return ret;
}
@ -1664,10 +1661,10 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
/* Let's determine this according to the interleave only once */
write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0xe8) : CMD(0xe9);
spin_lock(chip->mutex);
mutex_lock(&chip->mutex);
ret = get_chip(map, chip, cmd_adr, FL_WRITING);
if (ret) {
spin_unlock(chip->mutex);
mutex_unlock(&chip->mutex);
return ret;
}
@ -1798,7 +1795,7 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
xip_enable(map, chip, cmd_adr);
out: put_chip(map, chip, cmd_adr);
spin_unlock(chip->mutex);
mutex_unlock(&chip->mutex);
return ret;
}
@ -1877,10 +1874,10 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
adr += chip->start;
retry:
spin_lock(chip->mutex);
mutex_lock(&chip->mutex);
ret = get_chip(map, chip, adr, FL_ERASING);
if (ret) {
spin_unlock(chip->mutex);
mutex_unlock(&chip->mutex);
return ret;
}
@ -1936,7 +1933,7 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
} else if (chipstatus & 0x20 && retries--) {
printk(KERN_DEBUG "block erase failed at 0x%08lx: status 0x%lx. Retrying...\n", adr, chipstatus);
put_chip(map, chip, adr);
spin_unlock(chip->mutex);
mutex_unlock(&chip->mutex);
goto retry;
} else {
printk(KERN_ERR "%s: block erase failed at 0x%08lx (status 0x%lx)\n", map->name, adr, chipstatus);
@ -1948,7 +1945,7 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
xip_enable(map, chip, adr);
out: put_chip(map, chip, adr);
spin_unlock(chip->mutex);
mutex_unlock(&chip->mutex);
return ret;
}
@ -1981,7 +1978,7 @@ static void cfi_intelext_sync (struct mtd_info *mtd)
for (i=0; !ret && i<cfi->numchips; i++) {
chip = &cfi->chips[i];
spin_lock(chip->mutex);
mutex_lock(&chip->mutex);
ret = get_chip(map, chip, chip->start, FL_SYNCING);
if (!ret) {
@ -1992,7 +1989,7 @@ static void cfi_intelext_sync (struct mtd_info *mtd)
* with the chip now anyway.
*/
}
spin_unlock(chip->mutex);
mutex_unlock(&chip->mutex);
}
/* Unlock the chips again */
@ -2000,14 +1997,14 @@ static void cfi_intelext_sync (struct mtd_info *mtd)
for (i--; i >=0; i--) {
chip = &cfi->chips[i];
spin_lock(chip->mutex);
mutex_lock(&chip->mutex);
if (chip->state == FL_SYNCING) {
chip->state = chip->oldstate;
chip->oldstate = FL_READY;
wake_up(&chip->wq);
}
spin_unlock(chip->mutex);
mutex_unlock(&chip->mutex);
}
}
@ -2053,10 +2050,10 @@ static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip
adr += chip->start;
spin_lock(chip->mutex);
mutex_lock(&chip->mutex);
ret = get_chip(map, chip, adr, FL_LOCKING);
if (ret) {
spin_unlock(chip->mutex);
mutex_unlock(&chip->mutex);
return ret;
}
@ -2090,7 +2087,7 @@ static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip
xip_enable(map, chip, adr);
out: put_chip(map, chip, adr);
spin_unlock(chip->mutex);
mutex_unlock(&chip->mutex);
return ret;
}
@ -2155,10 +2152,10 @@ do_otp_read(struct map_info *map, struct flchip *chip, u_long offset,
struct cfi_private *cfi = map->fldrv_priv;
int ret;
spin_lock(chip->mutex);
mutex_lock(&chip->mutex);
ret = get_chip(map, chip, chip->start, FL_JEDEC_QUERY);
if (ret) {
spin_unlock(chip->mutex);
mutex_unlock(&chip->mutex);
return ret;
}
@ -2177,7 +2174,7 @@ do_otp_read(struct map_info *map, struct flchip *chip, u_long offset,
INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
put_chip(map, chip, chip->start);
spin_unlock(chip->mutex);
mutex_unlock(&chip->mutex);
return 0;
}
@ -2452,7 +2449,7 @@ static int cfi_intelext_suspend(struct mtd_info *mtd)
for (i=0; !ret && i<cfi->numchips; i++) {
chip = &cfi->chips[i];
spin_lock(chip->mutex);
mutex_lock(&chip->mutex);
switch (chip->state) {
case FL_READY:
@ -2484,7 +2481,7 @@ static int cfi_intelext_suspend(struct mtd_info *mtd)
case FL_PM_SUSPENDED:
break;
}
spin_unlock(chip->mutex);
mutex_unlock(&chip->mutex);
}
/* Unlock the chips again */
@ -2493,7 +2490,7 @@ static int cfi_intelext_suspend(struct mtd_info *mtd)
for (i--; i >=0; i--) {
chip = &cfi->chips[i];
spin_lock(chip->mutex);
mutex_lock(&chip->mutex);
if (chip->state == FL_PM_SUSPENDED) {
/* No need to force it into a known state here,
@ -2503,7 +2500,7 @@ static int cfi_intelext_suspend(struct mtd_info *mtd)
chip->oldstate = FL_READY;
wake_up(&chip->wq);
}
spin_unlock(chip->mutex);
mutex_unlock(&chip->mutex);
}
}
@ -2544,7 +2541,7 @@ static void cfi_intelext_resume(struct mtd_info *mtd)
chip = &cfi->chips[i];
spin_lock(chip->mutex);
mutex_lock(&chip->mutex);
/* Go to known state. Chip may have been power cycled */
if (chip->state == FL_PM_SUSPENDED) {
@ -2553,7 +2550,7 @@ static void cfi_intelext_resume(struct mtd_info *mtd)
wake_up(&chip->wq);
}
spin_unlock(chip->mutex);
mutex_unlock(&chip->mutex);
}
if ((mtd->flags & MTD_POWERUP_LOCK)
@ -2573,14 +2570,14 @@ static int cfi_intelext_reset(struct mtd_info *mtd)
/* force the completion of any ongoing operation
and switch to array mode so any bootloader in
flash is accessible for soft reboot. */
spin_lock(chip->mutex);
mutex_lock(&chip->mutex);
ret = get_chip(map, chip, chip->start, FL_SHUTDOWN);
if (!ret) {
map_write(map, CMD(0xff), chip->start);
chip->state = FL_SHUTDOWN;
put_chip(map, chip, chip->start);
}
spin_unlock(chip->mutex);
mutex_unlock(&chip->mutex);
}
return 0;

View File

@ -32,6 +32,7 @@
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/reboot.h>
#include <linux/mtd/compatmac.h>
#include <linux/mtd/map.h>
#include <linux/mtd/mtd.h>
@ -43,10 +44,6 @@
#define MAX_WORD_RETRIES 3
#define MANUFACTURER_AMD 0x0001
#define MANUFACTURER_ATMEL 0x001F
#define MANUFACTURER_MACRONIX 0x00C2
#define MANUFACTURER_SST 0x00BF
#define SST49LF004B 0x0060
#define SST49LF040B 0x0050
#define SST49LF008A 0x005a
@ -60,6 +57,7 @@ static int cfi_amdstd_erase_varsize(struct mtd_info *, struct erase_info *);
static void cfi_amdstd_sync (struct mtd_info *);
static int cfi_amdstd_suspend (struct mtd_info *);
static void cfi_amdstd_resume (struct mtd_info *);
static int cfi_amdstd_reboot(struct notifier_block *, unsigned long, void *);
static int cfi_amdstd_secsi_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
static void cfi_amdstd_destroy(struct mtd_info *);
@ -168,7 +166,7 @@ static void fixup_amd_bootblock(struct mtd_info *mtd, void* param)
* This reduces the risk of false detection due to
* the 8-bit device ID.
*/
(cfi->mfr == MANUFACTURER_MACRONIX)) {
(cfi->mfr == CFI_MFR_MACRONIX)) {
DEBUG(MTD_DEBUG_LEVEL1,
"%s: Macronix MX29LV400C with bottom boot block"
" detected\n", map->name);
@ -260,6 +258,42 @@ static void fixup_use_atmel_lock(struct mtd_info *mtd, void *param)
mtd->flags |= MTD_POWERUP_LOCK;
}
static void fixup_old_sst_eraseregion(struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
/*
* These flashes report two seperate eraseblock regions based on the
* sector_erase-size and block_erase-size, although they both operate on the
* same memory. This is not allowed according to CFI, so we just pick the
* sector_erase-size.
*/
cfi->cfiq->NumEraseRegions = 1;
}
static void fixup_sst39vf(struct mtd_info *mtd, void *param)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
fixup_old_sst_eraseregion(mtd);
cfi->addr_unlock1 = 0x5555;
cfi->addr_unlock2 = 0x2AAA;
}
static void fixup_sst39vf_rev_b(struct mtd_info *mtd, void *param)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
fixup_old_sst_eraseregion(mtd);
cfi->addr_unlock1 = 0x555;
cfi->addr_unlock2 = 0x2AA;
}
static void fixup_s29gl064n_sectors(struct mtd_info *mtd, void *param)
{
struct map_info *map = mtd->priv;
@ -282,11 +316,24 @@ static void fixup_s29gl032n_sectors(struct mtd_info *mtd, void *param)
}
}
/* Used to fix CFI-Tables of chips without Extended Query Tables */
static struct cfi_fixup cfi_nopri_fixup_table[] = {
{ CFI_MFR_SST, 0x234A, fixup_sst39vf, NULL, }, // SST39VF1602
{ CFI_MFR_SST, 0x234B, fixup_sst39vf, NULL, }, // SST39VF1601
{ CFI_MFR_SST, 0x235A, fixup_sst39vf, NULL, }, // SST39VF3202
{ CFI_MFR_SST, 0x235B, fixup_sst39vf, NULL, }, // SST39VF3201
{ CFI_MFR_SST, 0x235C, fixup_sst39vf_rev_b, NULL, }, // SST39VF3202B
{ CFI_MFR_SST, 0x235D, fixup_sst39vf_rev_b, NULL, }, // SST39VF3201B
{ CFI_MFR_SST, 0x236C, fixup_sst39vf_rev_b, NULL, }, // SST39VF6402B
{ CFI_MFR_SST, 0x236D, fixup_sst39vf_rev_b, NULL, }, // SST39VF6401B
{ 0, 0, NULL, NULL }
};
static struct cfi_fixup cfi_fixup_table[] = {
{ CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri, NULL },
#ifdef AMD_BOOTLOC_BUG
{ CFI_MFR_AMD, CFI_ID_ANY, fixup_amd_bootblock, NULL },
{ MANUFACTURER_MACRONIX, CFI_ID_ANY, fixup_amd_bootblock, NULL },
{ CFI_MFR_MACRONIX, CFI_ID_ANY, fixup_amd_bootblock, NULL },
#endif
{ CFI_MFR_AMD, 0x0050, fixup_use_secsi, NULL, },
{ CFI_MFR_AMD, 0x0053, fixup_use_secsi, NULL, },
@ -304,9 +351,9 @@ static struct cfi_fixup cfi_fixup_table[] = {
{ 0, 0, NULL, NULL }
};
static struct cfi_fixup jedec_fixup_table[] = {
{ MANUFACTURER_SST, SST49LF004B, fixup_use_fwh_lock, NULL, },
{ MANUFACTURER_SST, SST49LF040B, fixup_use_fwh_lock, NULL, },
{ MANUFACTURER_SST, SST49LF008A, fixup_use_fwh_lock, NULL, },
{ CFI_MFR_SST, SST49LF004B, fixup_use_fwh_lock, NULL, },
{ CFI_MFR_SST, SST49LF040B, fixup_use_fwh_lock, NULL, },
{ CFI_MFR_SST, SST49LF008A, fixup_use_fwh_lock, NULL, },
{ 0, 0, NULL, NULL }
};
@ -355,67 +402,72 @@ struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
mtd->name = map->name;
mtd->writesize = 1;
mtd->reboot_notifier.notifier_call = cfi_amdstd_reboot;
if (cfi->cfi_mode==CFI_MODE_CFI){
unsigned char bootloc;
/*
* It's a real CFI chip, not one for which the probe
* routine faked a CFI structure. So we read the feature
* table from it.
*/
__u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
struct cfi_pri_amdstd *extp;
extp = (struct cfi_pri_amdstd*)cfi_read_pri(map, adr, sizeof(*extp), "Amd/Fujitsu");
if (!extp) {
kfree(mtd);
return NULL;
}
if (extp) {
/*
* It's a real CFI chip, not one for which the probe
* routine faked a CFI structure.
*/
cfi_fixup_major_minor(cfi, extp);
cfi_fixup_major_minor(cfi, extp);
if (extp->MajorVersion != '1' ||
(extp->MinorVersion < '0' || extp->MinorVersion > '4')) {
printk(KERN_ERR " Unknown Amd/Fujitsu Extended Query "
"version %c.%c.\n", extp->MajorVersion,
extp->MinorVersion);
kfree(extp);
kfree(mtd);
return NULL;
}
if (extp->MajorVersion != '1' ||
(extp->MinorVersion < '0' || extp->MinorVersion > '4')) {
printk(KERN_ERR " Unknown Amd/Fujitsu Extended Query "
"version %c.%c.\n", extp->MajorVersion,
extp->MinorVersion);
kfree(extp);
kfree(mtd);
return NULL;
}
/* Install our own private info structure */
cfi->cmdset_priv = extp;
/* Install our own private info structure */
cfi->cmdset_priv = extp;
/* Apply cfi device specific fixups */
cfi_fixup(mtd, cfi_fixup_table);
/* Apply cfi device specific fixups */
cfi_fixup(mtd, cfi_fixup_table);
#ifdef DEBUG_CFI_FEATURES
/* Tell the user about it in lots of lovely detail */
cfi_tell_features(extp);
/* Tell the user about it in lots of lovely detail */
cfi_tell_features(extp);
#endif
bootloc = extp->TopBottom;
if ((bootloc != 2) && (bootloc != 3)) {
printk(KERN_WARNING "%s: CFI does not contain boot "
"bank location. Assuming top.\n", map->name);
bootloc = 2;
}
if (bootloc == 3 && cfi->cfiq->NumEraseRegions > 1) {
printk(KERN_WARNING "%s: Swapping erase regions for broken CFI table.\n", map->name);
for (i=0; i<cfi->cfiq->NumEraseRegions / 2; i++) {
int j = (cfi->cfiq->NumEraseRegions-1)-i;
__u32 swap;
swap = cfi->cfiq->EraseRegionInfo[i];
cfi->cfiq->EraseRegionInfo[i] = cfi->cfiq->EraseRegionInfo[j];
cfi->cfiq->EraseRegionInfo[j] = swap;
bootloc = extp->TopBottom;
if ((bootloc < 2) || (bootloc > 5)) {
printk(KERN_WARNING "%s: CFI contains unrecognised boot "
"bank location (%d). Assuming bottom.\n",
map->name, bootloc);
bootloc = 2;
}
if (bootloc == 3 && cfi->cfiq->NumEraseRegions > 1) {
printk(KERN_WARNING "%s: Swapping erase regions for top-boot CFI table.\n", map->name);
for (i=0; i<cfi->cfiq->NumEraseRegions / 2; i++) {
int j = (cfi->cfiq->NumEraseRegions-1)-i;
__u32 swap;
swap = cfi->cfiq->EraseRegionInfo[i];
cfi->cfiq->EraseRegionInfo[i] = cfi->cfiq->EraseRegionInfo[j];
cfi->cfiq->EraseRegionInfo[j] = swap;
}
}
/* Set the default CFI lock/unlock addresses */
cfi->addr_unlock1 = 0x555;
cfi->addr_unlock2 = 0x2aa;
}
cfi_fixup(mtd, cfi_nopri_fixup_table);
if (!cfi->addr_unlock1 || !cfi->addr_unlock2) {
kfree(mtd);
return NULL;
}
/* Set the default CFI lock/unlock addresses */
cfi->addr_unlock1 = 0x555;
cfi->addr_unlock2 = 0x2aa;
} /* CFI mode */
else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
@ -437,7 +489,11 @@ struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
return cfi_amdstd_setup(mtd);
}
struct mtd_info *cfi_cmdset_0006(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0002")));
struct mtd_info *cfi_cmdset_0701(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0002")));
EXPORT_SYMBOL_GPL(cfi_cmdset_0002);
EXPORT_SYMBOL_GPL(cfi_cmdset_0006);
EXPORT_SYMBOL_GPL(cfi_cmdset_0701);
static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd)
{
@ -491,13 +547,12 @@ static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd)
#endif
__module_get(THIS_MODULE);
register_reboot_notifier(&mtd->reboot_notifier);
return mtd;
setup_err:
if(mtd) {
kfree(mtd->eraseregions);
kfree(mtd);
}
kfree(mtd->eraseregions);
kfree(mtd);
kfree(cfi->cmdset_priv);
kfree(cfi->cfiq);
return NULL;
@ -571,9 +626,9 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
printk(KERN_ERR "Waiting for chip to be ready timed out.\n");
return -EIO;
}
spin_unlock(chip->mutex);
mutex_unlock(&chip->mutex);
cfi_udelay(1);
spin_lock(chip->mutex);
mutex_lock(&chip->mutex);
/* Someone else might have been playing with it. */
goto retry;
}
@ -617,9 +672,9 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
return -EIO;
}
spin_unlock(chip->mutex);
mutex_unlock(&chip->mutex);
cfi_udelay(1);
spin_lock(chip->mutex);
mutex_lock(&chip->mutex);
/* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
So we can just loop here. */
}
@ -634,6 +689,10 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
chip->state = FL_READY;
return 0;
case FL_SHUTDOWN:
/* The machine is rebooting */
return -EIO;
case FL_POINT:
/* Only if there's no operation suspended... */
if (mode == FL_READY && chip->oldstate == FL_READY)
@ -643,10 +702,10 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
sleep:
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
spin_unlock(chip->mutex);
mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
spin_lock(chip->mutex);
mutex_lock(&chip->mutex);
goto resettime;
}
}
@ -778,7 +837,7 @@ static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
(void) map_read(map, adr);
xip_iprefetch();
local_irq_enable();
spin_unlock(chip->mutex);
mutex_unlock(&chip->mutex);
xip_iprefetch();
cond_resched();
@ -788,15 +847,15 @@ static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
* a suspended erase state. If so let's wait
* until it's done.
*/
spin_lock(chip->mutex);
mutex_lock(&chip->mutex);
while (chip->state != FL_XIP_WHILE_ERASING) {
DECLARE_WAITQUEUE(wait, current);
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
spin_unlock(chip->mutex);
mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
spin_lock(chip->mutex);
mutex_lock(&chip->mutex);
}
/* Disallow XIP again */
local_irq_disable();
@ -858,17 +917,17 @@ static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
#define UDELAY(map, chip, adr, usec) \
do { \
spin_unlock(chip->mutex); \
mutex_unlock(&chip->mutex); \
cfi_udelay(usec); \
spin_lock(chip->mutex); \
mutex_lock(&chip->mutex); \
} while (0)
#define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \
do { \
spin_unlock(chip->mutex); \
mutex_unlock(&chip->mutex); \
INVALIDATE_CACHED_RANGE(map, adr, len); \
cfi_udelay(usec); \
spin_lock(chip->mutex); \
mutex_lock(&chip->mutex); \
} while (0)
#endif
@ -884,10 +943,10 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof
/* Ensure cmd read/writes are aligned. */
cmd_addr = adr & ~(map_bankwidth(map)-1);
spin_lock(chip->mutex);
mutex_lock(&chip->mutex);
ret = get_chip(map, chip, cmd_addr, FL_READY);
if (ret) {
spin_unlock(chip->mutex);
mutex_unlock(&chip->mutex);
return ret;
}
@ -900,7 +959,7 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof
put_chip(map, chip, cmd_addr);
spin_unlock(chip->mutex);
mutex_unlock(&chip->mutex);
return 0;
}
@ -954,7 +1013,7 @@ static inline int do_read_secsi_onechip(struct map_info *map, struct flchip *chi
struct cfi_private *cfi = map->fldrv_priv;
retry:
spin_lock(chip->mutex);
mutex_lock(&chip->mutex);
if (chip->state != FL_READY){
#if 0
@ -963,7 +1022,7 @@ static inline int do_read_secsi_onechip(struct map_info *map, struct flchip *chi
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
spin_unlock(chip->mutex);
mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
@ -992,7 +1051,7 @@ static inline int do_read_secsi_onechip(struct map_info *map, struct flchip *chi
cfi_send_gen_cmd(0x00, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
wake_up(&chip->wq);
spin_unlock(chip->mutex);
mutex_unlock(&chip->mutex);
return 0;
}
@ -1061,10 +1120,10 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
adr += chip->start;
spin_lock(chip->mutex);
mutex_lock(&chip->mutex);
ret = get_chip(map, chip, adr, FL_WRITING);
if (ret) {
spin_unlock(chip->mutex);
mutex_unlock(&chip->mutex);
return ret;
}
@ -1107,11 +1166,11 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
spin_unlock(chip->mutex);
mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
timeo = jiffies + (HZ / 2); /* FIXME */
spin_lock(chip->mutex);
mutex_lock(&chip->mutex);
continue;
}
@ -1143,7 +1202,7 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
op_done:
chip->state = FL_READY;
put_chip(map, chip, adr);
spin_unlock(chip->mutex);
mutex_unlock(&chip->mutex);
return ret;
}
@ -1175,7 +1234,7 @@ static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
map_word tmp_buf;
retry:
spin_lock(cfi->chips[chipnum].mutex);
mutex_lock(&cfi->chips[chipnum].mutex);
if (cfi->chips[chipnum].state != FL_READY) {
#if 0
@ -1184,7 +1243,7 @@ static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&cfi->chips[chipnum].wq, &wait);
spin_unlock(cfi->chips[chipnum].mutex);
mutex_unlock(&cfi->chips[chipnum].mutex);
schedule();
remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
@ -1198,7 +1257,7 @@ static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
/* Load 'tmp_buf' with old contents of flash */
tmp_buf = map_read(map, bus_ofs+chipstart);
spin_unlock(cfi->chips[chipnum].mutex);
mutex_unlock(&cfi->chips[chipnum].mutex);
/* Number of bytes to copy from buffer */
n = min_t(int, len, map_bankwidth(map)-i);
@ -1253,7 +1312,7 @@ static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
map_word tmp_buf;
retry1:
spin_lock(cfi->chips[chipnum].mutex);
mutex_lock(&cfi->chips[chipnum].mutex);
if (cfi->chips[chipnum].state != FL_READY) {
#if 0
@ -1262,7 +1321,7 @@ static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&cfi->chips[chipnum].wq, &wait);
spin_unlock(cfi->chips[chipnum].mutex);
mutex_unlock(&cfi->chips[chipnum].mutex);
schedule();
remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
@ -1275,7 +1334,7 @@ static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
tmp_buf = map_read(map, ofs + chipstart);
spin_unlock(cfi->chips[chipnum].mutex);
mutex_unlock(&cfi->chips[chipnum].mutex);
tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len);
@ -1310,10 +1369,10 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
adr += chip->start;
cmd_adr = adr;
spin_lock(chip->mutex);
mutex_lock(&chip->mutex);
ret = get_chip(map, chip, adr, FL_WRITING);
if (ret) {
spin_unlock(chip->mutex);
mutex_unlock(&chip->mutex);
return ret;
}
@ -1368,11 +1427,11 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
spin_unlock(chip->mutex);
mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
timeo = jiffies + (HZ / 2); /* FIXME */
spin_lock(chip->mutex);
mutex_lock(&chip->mutex);
continue;
}
@ -1400,7 +1459,7 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
op_done:
chip->state = FL_READY;
put_chip(map, chip, adr);
spin_unlock(chip->mutex);
mutex_unlock(&chip->mutex);
return ret;
}
@ -1500,10 +1559,10 @@ static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
adr = cfi->addr_unlock1;
spin_lock(chip->mutex);
mutex_lock(&chip->mutex);
ret = get_chip(map, chip, adr, FL_WRITING);
if (ret) {
spin_unlock(chip->mutex);
mutex_unlock(&chip->mutex);
return ret;
}
@ -1536,10 +1595,10 @@ static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
/* Someone's suspended the erase. Sleep */
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
spin_unlock(chip->mutex);
mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
spin_lock(chip->mutex);
mutex_lock(&chip->mutex);
continue;
}
if (chip->erase_suspended) {
@ -1573,7 +1632,7 @@ static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
chip->state = FL_READY;
xip_enable(map, chip, adr);
put_chip(map, chip, adr);
spin_unlock(chip->mutex);
mutex_unlock(&chip->mutex);
return ret;
}
@ -1588,10 +1647,10 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
adr += chip->start;
spin_lock(chip->mutex);
mutex_lock(&chip->mutex);
ret = get_chip(map, chip, adr, FL_ERASING);
if (ret) {
spin_unlock(chip->mutex);
mutex_unlock(&chip->mutex);
return ret;
}
@ -1624,10 +1683,10 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
/* Someone's suspended the erase. Sleep */
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
spin_unlock(chip->mutex);
mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
spin_lock(chip->mutex);
mutex_lock(&chip->mutex);
continue;
}
if (chip->erase_suspended) {
@ -1663,7 +1722,7 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
chip->state = FL_READY;
put_chip(map, chip, adr);
spin_unlock(chip->mutex);
mutex_unlock(&chip->mutex);
return ret;
}
@ -1715,7 +1774,7 @@ static int do_atmel_lock(struct map_info *map, struct flchip *chip,
struct cfi_private *cfi = map->fldrv_priv;
int ret;
spin_lock(chip->mutex);
mutex_lock(&chip->mutex);
ret = get_chip(map, chip, adr + chip->start, FL_LOCKING);
if (ret)
goto out_unlock;
@ -1741,7 +1800,7 @@ static int do_atmel_lock(struct map_info *map, struct flchip *chip,
ret = 0;
out_unlock:
spin_unlock(chip->mutex);
mutex_unlock(&chip->mutex);
return ret;
}
@ -1751,7 +1810,7 @@ static int do_atmel_unlock(struct map_info *map, struct flchip *chip,
struct cfi_private *cfi = map->fldrv_priv;
int ret;
spin_lock(chip->mutex);
mutex_lock(&chip->mutex);
ret = get_chip(map, chip, adr + chip->start, FL_UNLOCKING);
if (ret)
goto out_unlock;
@ -1769,7 +1828,7 @@ static int do_atmel_unlock(struct map_info *map, struct flchip *chip,
ret = 0;
out_unlock:
spin_unlock(chip->mutex);
mutex_unlock(&chip->mutex);
return ret;
}
@ -1797,7 +1856,7 @@ static void cfi_amdstd_sync (struct mtd_info *mtd)
chip = &cfi->chips[i];
retry:
spin_lock(chip->mutex);
mutex_lock(&chip->mutex);
switch(chip->state) {
case FL_READY:
@ -1811,7 +1870,7 @@ static void cfi_amdstd_sync (struct mtd_info *mtd)
* with the chip now anyway.
*/
case FL_SYNCING:
spin_unlock(chip->mutex);
mutex_unlock(&chip->mutex);
break;
default:
@ -1819,7 +1878,7 @@ static void cfi_amdstd_sync (struct mtd_info *mtd)
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
spin_unlock(chip->mutex);
mutex_unlock(&chip->mutex);
schedule();
@ -1834,13 +1893,13 @@ static void cfi_amdstd_sync (struct mtd_info *mtd)
for (i--; i >=0; i--) {
chip = &cfi->chips[i];
spin_lock(chip->mutex);
mutex_lock(&chip->mutex);
if (chip->state == FL_SYNCING) {
chip->state = chip->oldstate;
wake_up(&chip->wq);
}
spin_unlock(chip->mutex);
mutex_unlock(&chip->mutex);
}
}
@ -1856,7 +1915,7 @@ static int cfi_amdstd_suspend(struct mtd_info *mtd)
for (i=0; !ret && i<cfi->numchips; i++) {
chip = &cfi->chips[i];
spin_lock(chip->mutex);
mutex_lock(&chip->mutex);
switch(chip->state) {
case FL_READY:
@ -1876,7 +1935,7 @@ static int cfi_amdstd_suspend(struct mtd_info *mtd)
ret = -EAGAIN;
break;
}
spin_unlock(chip->mutex);
mutex_unlock(&chip->mutex);
}
/* Unlock the chips again */
@ -1885,13 +1944,13 @@ static int cfi_amdstd_suspend(struct mtd_info *mtd)
for (i--; i >=0; i--) {
chip = &cfi->chips[i];
spin_lock(chip->mutex);
mutex_lock(&chip->mutex);
if (chip->state == FL_PM_SUSPENDED) {
chip->state = chip->oldstate;
wake_up(&chip->wq);
}
spin_unlock(chip->mutex);
mutex_unlock(&chip->mutex);
}
}
@ -1910,7 +1969,7 @@ static void cfi_amdstd_resume(struct mtd_info *mtd)
chip = &cfi->chips[i];
spin_lock(chip->mutex);
mutex_lock(&chip->mutex);
if (chip->state == FL_PM_SUSPENDED) {
chip->state = FL_READY;
@ -1920,15 +1979,62 @@ static void cfi_amdstd_resume(struct mtd_info *mtd)
else
printk(KERN_ERR "Argh. Chip not in PM_SUSPENDED state upon resume()\n");
spin_unlock(chip->mutex);
mutex_unlock(&chip->mutex);
}
}
/*
* Ensure that the flash device is put back into read array mode before
* unloading the driver or rebooting. On some systems, rebooting while
* the flash is in query/program/erase mode will prevent the CPU from
* fetching the bootloader code, requiring a hard reset or power cycle.
*/
static int cfi_amdstd_reset(struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
int i, ret;
struct flchip *chip;
for (i = 0; i < cfi->numchips; i++) {
chip = &cfi->chips[i];
mutex_lock(&chip->mutex);
ret = get_chip(map, chip, chip->start, FL_SHUTDOWN);
if (!ret) {
map_write(map, CMD(0xF0), chip->start);
chip->state = FL_SHUTDOWN;
put_chip(map, chip, chip->start);
}
mutex_unlock(&chip->mutex);
}
return 0;
}
static int cfi_amdstd_reboot(struct notifier_block *nb, unsigned long val,
void *v)
{
struct mtd_info *mtd;
mtd = container_of(nb, struct mtd_info, reboot_notifier);
cfi_amdstd_reset(mtd);
return NOTIFY_DONE;
}
static void cfi_amdstd_destroy(struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
cfi_amdstd_reset(mtd);
unregister_reboot_notifier(&mtd->reboot_notifier);
kfree(cfi->cmdset_priv);
kfree(cfi->cfiq);
kfree(cfi);
@ -1938,3 +2044,5 @@ static void cfi_amdstd_destroy(struct mtd_info *mtd)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Crossnet Co. <info@crossnet.co.jp> et al.");
MODULE_DESCRIPTION("MTD chip driver for AMD/Fujitsu flash chips");
MODULE_ALIAS("cfi_cmdset_0006");
MODULE_ALIAS("cfi_cmdset_0701");

View File

@ -265,7 +265,7 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof
timeo = jiffies + HZ;
retry:
spin_lock_bh(chip->mutex);
mutex_lock(&chip->mutex);
/* Check that the chip's ready to talk to us.
* If it's in FL_ERASING state, suspend it and make it talk now.
@ -296,15 +296,15 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof
/* make sure we're in 'read status' mode */
map_write(map, CMD(0x70), cmd_addr);
chip->state = FL_ERASING;
spin_unlock_bh(chip->mutex);
mutex_unlock(&chip->mutex);
printk(KERN_ERR "Chip not ready after erase "
"suspended: status = 0x%lx\n", status.x[0]);
return -EIO;
}
spin_unlock_bh(chip->mutex);
mutex_unlock(&chip->mutex);
cfi_udelay(1);
spin_lock_bh(chip->mutex);
mutex_lock(&chip->mutex);
}
suspended = 1;
@ -335,13 +335,13 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof
/* Urgh. Chip not yet ready to talk to us. */
if (time_after(jiffies, timeo)) {
spin_unlock_bh(chip->mutex);
mutex_unlock(&chip->mutex);
printk(KERN_ERR "waiting for chip to be ready timed out in read. WSM status = %lx\n", status.x[0]);
return -EIO;
}
/* Latency issues. Drop the lock, wait a while and retry */
spin_unlock_bh(chip->mutex);
mutex_unlock(&chip->mutex);
cfi_udelay(1);
goto retry;
@ -351,7 +351,7 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof
someone changes the status */
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
spin_unlock_bh(chip->mutex);
mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
timeo = jiffies + HZ;
@ -376,7 +376,7 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof
}
wake_up(&chip->wq);
spin_unlock_bh(chip->mutex);
mutex_unlock(&chip->mutex);
return 0;
}
@ -445,7 +445,7 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
#ifdef DEBUG_CFI_FEATURES
printk("%s: chip->state[%d]\n", __func__, chip->state);
#endif
spin_lock_bh(chip->mutex);
mutex_lock(&chip->mutex);
/* Check that the chip's ready to talk to us.
* Later, we can actually think about interrupting it
@ -470,14 +470,14 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
break;
/* Urgh. Chip not yet ready to talk to us. */
if (time_after(jiffies, timeo)) {
spin_unlock_bh(chip->mutex);
mutex_unlock(&chip->mutex);
printk(KERN_ERR "waiting for chip to be ready timed out in buffer write Xstatus = %lx, status = %lx\n",
status.x[0], map_read(map, cmd_adr).x[0]);
return -EIO;
}
/* Latency issues. Drop the lock, wait a while and retry */
spin_unlock_bh(chip->mutex);
mutex_unlock(&chip->mutex);
cfi_udelay(1);
goto retry;
@ -486,7 +486,7 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
someone changes the status */
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
spin_unlock_bh(chip->mutex);
mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
timeo = jiffies + HZ;
@ -503,16 +503,16 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
if (map_word_andequal(map, status, status_OK, status_OK))
break;
spin_unlock_bh(chip->mutex);
mutex_unlock(&chip->mutex);
cfi_udelay(1);
spin_lock_bh(chip->mutex);
mutex_lock(&chip->mutex);
if (++z > 100) {
/* Argh. Not ready for write to buffer */
DISABLE_VPP(map);
map_write(map, CMD(0x70), cmd_adr);
chip->state = FL_STATUS;
spin_unlock_bh(chip->mutex);
mutex_unlock(&chip->mutex);
printk(KERN_ERR "Chip not ready for buffer write. Xstatus = %lx\n", status.x[0]);
return -EIO;
}
@ -532,9 +532,9 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
map_write(map, CMD(0xd0), cmd_adr);
chip->state = FL_WRITING;
spin_unlock_bh(chip->mutex);
mutex_unlock(&chip->mutex);
cfi_udelay(chip->buffer_write_time);
spin_lock_bh(chip->mutex);
mutex_lock(&chip->mutex);
timeo = jiffies + (HZ/2);
z = 0;
@ -543,11 +543,11 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
/* Someone's suspended the write. Sleep */
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
spin_unlock_bh(chip->mutex);
mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
timeo = jiffies + (HZ / 2); /* FIXME */
spin_lock_bh(chip->mutex);
mutex_lock(&chip->mutex);
continue;
}
@ -563,16 +563,16 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
map_write(map, CMD(0x70), adr);
chip->state = FL_STATUS;
DISABLE_VPP(map);
spin_unlock_bh(chip->mutex);
mutex_unlock(&chip->mutex);
printk(KERN_ERR "waiting for chip to be ready timed out in bufwrite\n");
return -EIO;
}
/* Latency issues. Drop the lock, wait a while and retry */
spin_unlock_bh(chip->mutex);
mutex_unlock(&chip->mutex);
cfi_udelay(1);
z++;
spin_lock_bh(chip->mutex);
mutex_lock(&chip->mutex);
}
if (!z) {
chip->buffer_write_time--;
@ -596,11 +596,11 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
/* put back into read status register mode */
map_write(map, CMD(0x70), adr);
wake_up(&chip->wq);
spin_unlock_bh(chip->mutex);
mutex_unlock(&chip->mutex);
return map_word_bitsset(map, status, CMD(0x02)) ? -EROFS : -EIO;
}
wake_up(&chip->wq);
spin_unlock_bh(chip->mutex);
mutex_unlock(&chip->mutex);
return 0;
}
@ -749,7 +749,7 @@ static inline int do_erase_oneblock(struct map_info *map, struct flchip *chip, u
timeo = jiffies + HZ;
retry:
spin_lock_bh(chip->mutex);
mutex_lock(&chip->mutex);
/* Check that the chip's ready to talk to us. */
switch (chip->state) {
@ -766,13 +766,13 @@ retry:
/* Urgh. Chip not yet ready to talk to us. */
if (time_after(jiffies, timeo)) {
spin_unlock_bh(chip->mutex);
mutex_unlock(&chip->mutex);
printk(KERN_ERR "waiting for chip to be ready timed out in erase\n");
return -EIO;
}
/* Latency issues. Drop the lock, wait a while and retry */
spin_unlock_bh(chip->mutex);
mutex_unlock(&chip->mutex);
cfi_udelay(1);
goto retry;
@ -781,7 +781,7 @@ retry:
someone changes the status */
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
spin_unlock_bh(chip->mutex);
mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
timeo = jiffies + HZ;
@ -797,9 +797,9 @@ retry:
map_write(map, CMD(0xD0), adr);
chip->state = FL_ERASING;
spin_unlock_bh(chip->mutex);
mutex_unlock(&chip->mutex);
msleep(1000);
spin_lock_bh(chip->mutex);
mutex_lock(&chip->mutex);
/* FIXME. Use a timer to check this, and return immediately. */
/* Once the state machine's known to be working I'll do that */
@ -810,11 +810,11 @@ retry:
/* Someone's suspended the erase. Sleep */
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
spin_unlock_bh(chip->mutex);
mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
timeo = jiffies + (HZ*20); /* FIXME */
spin_lock_bh(chip->mutex);
mutex_lock(&chip->mutex);
continue;
}
@ -828,14 +828,14 @@ retry:
chip->state = FL_STATUS;
printk(KERN_ERR "waiting for erase to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]);
DISABLE_VPP(map);
spin_unlock_bh(chip->mutex);
mutex_unlock(&chip->mutex);
return -EIO;
}
/* Latency issues. Drop the lock, wait a while and retry */
spin_unlock_bh(chip->mutex);
mutex_unlock(&chip->mutex);
cfi_udelay(1);
spin_lock_bh(chip->mutex);
mutex_lock(&chip->mutex);
}
DISABLE_VPP(map);
@ -878,7 +878,7 @@ retry:
printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%x. Retrying...\n", adr, chipstatus);
timeo = jiffies + HZ;
chip->state = FL_STATUS;
spin_unlock_bh(chip->mutex);
mutex_unlock(&chip->mutex);
goto retry;
}
printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%x\n", adr, chipstatus);
@ -887,7 +887,7 @@ retry:
}
wake_up(&chip->wq);
spin_unlock_bh(chip->mutex);
mutex_unlock(&chip->mutex);
return ret;
}
@ -995,7 +995,7 @@ static void cfi_staa_sync (struct mtd_info *mtd)
chip = &cfi->chips[i];
retry:
spin_lock_bh(chip->mutex);
mutex_lock(&chip->mutex);
switch(chip->state) {
case FL_READY:
@ -1009,7 +1009,7 @@ static void cfi_staa_sync (struct mtd_info *mtd)
* with the chip now anyway.
*/
case FL_SYNCING:
spin_unlock_bh(chip->mutex);
mutex_unlock(&chip->mutex);
break;
default:
@ -1017,7 +1017,7 @@ static void cfi_staa_sync (struct mtd_info *mtd)
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
spin_unlock_bh(chip->mutex);
mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
@ -1030,13 +1030,13 @@ static void cfi_staa_sync (struct mtd_info *mtd)
for (i--; i >=0; i--) {
chip = &cfi->chips[i];
spin_lock_bh(chip->mutex);
mutex_lock(&chip->mutex);
if (chip->state == FL_SYNCING) {
chip->state = chip->oldstate;
wake_up(&chip->wq);
}
spin_unlock_bh(chip->mutex);
mutex_unlock(&chip->mutex);
}
}
@ -1054,7 +1054,7 @@ static inline int do_lock_oneblock(struct map_info *map, struct flchip *chip, un
timeo = jiffies + HZ;
retry:
spin_lock_bh(chip->mutex);
mutex_lock(&chip->mutex);
/* Check that the chip's ready to talk to us. */
switch (chip->state) {
@ -1071,13 +1071,13 @@ retry:
/* Urgh. Chip not yet ready to talk to us. */
if (time_after(jiffies, timeo)) {
spin_unlock_bh(chip->mutex);
mutex_unlock(&chip->mutex);
printk(KERN_ERR "waiting for chip to be ready timed out in lock\n");
return -EIO;
}
/* Latency issues. Drop the lock, wait a while and retry */
spin_unlock_bh(chip->mutex);
mutex_unlock(&chip->mutex);
cfi_udelay(1);
goto retry;
@ -1086,7 +1086,7 @@ retry:
someone changes the status */
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
spin_unlock_bh(chip->mutex);
mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
timeo = jiffies + HZ;
@ -1098,9 +1098,9 @@ retry:
map_write(map, CMD(0x01), adr);
chip->state = FL_LOCKING;
spin_unlock_bh(chip->mutex);
mutex_unlock(&chip->mutex);
msleep(1000);
spin_lock_bh(chip->mutex);
mutex_lock(&chip->mutex);
/* FIXME. Use a timer to check this, and return immediately. */
/* Once the state machine's known to be working I'll do that */
@ -1118,21 +1118,21 @@ retry:
chip->state = FL_STATUS;
printk(KERN_ERR "waiting for lock to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]);
DISABLE_VPP(map);
spin_unlock_bh(chip->mutex);
mutex_unlock(&chip->mutex);
return -EIO;
}
/* Latency issues. Drop the lock, wait a while and retry */
spin_unlock_bh(chip->mutex);
mutex_unlock(&chip->mutex);
cfi_udelay(1);
spin_lock_bh(chip->mutex);
mutex_lock(&chip->mutex);
}
/* Done and happy. */
chip->state = FL_STATUS;
DISABLE_VPP(map);
wake_up(&chip->wq);
spin_unlock_bh(chip->mutex);
mutex_unlock(&chip->mutex);
return 0;
}
static int cfi_staa_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
@ -1203,7 +1203,7 @@ static inline int do_unlock_oneblock(struct map_info *map, struct flchip *chip,
timeo = jiffies + HZ;
retry:
spin_lock_bh(chip->mutex);
mutex_lock(&chip->mutex);
/* Check that the chip's ready to talk to us. */
switch (chip->state) {
@ -1220,13 +1220,13 @@ retry:
/* Urgh. Chip not yet ready to talk to us. */
if (time_after(jiffies, timeo)) {
spin_unlock_bh(chip->mutex);
mutex_unlock(&chip->mutex);
printk(KERN_ERR "waiting for chip to be ready timed out in unlock\n");
return -EIO;
}
/* Latency issues. Drop the lock, wait a while and retry */
spin_unlock_bh(chip->mutex);
mutex_unlock(&chip->mutex);
cfi_udelay(1);
goto retry;
@ -1235,7 +1235,7 @@ retry:
someone changes the status */
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
spin_unlock_bh(chip->mutex);
mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
timeo = jiffies + HZ;
@ -1247,9 +1247,9 @@ retry:
map_write(map, CMD(0xD0), adr);
chip->state = FL_UNLOCKING;
spin_unlock_bh(chip->mutex);
mutex_unlock(&chip->mutex);
msleep(1000);
spin_lock_bh(chip->mutex);
mutex_lock(&chip->mutex);
/* FIXME. Use a timer to check this, and return immediately. */
/* Once the state machine's known to be working I'll do that */
@ -1267,21 +1267,21 @@ retry:
chip->state = FL_STATUS;
printk(KERN_ERR "waiting for unlock to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]);
DISABLE_VPP(map);
spin_unlock_bh(chip->mutex);
mutex_unlock(&chip->mutex);
return -EIO;
}
/* Latency issues. Drop the unlock, wait a while and retry */
spin_unlock_bh(chip->mutex);
mutex_unlock(&chip->mutex);
cfi_udelay(1);
spin_lock_bh(chip->mutex);
mutex_lock(&chip->mutex);
}
/* Done and happy. */
chip->state = FL_STATUS;
DISABLE_VPP(map);
wake_up(&chip->wq);
spin_unlock_bh(chip->mutex);
mutex_unlock(&chip->mutex);
return 0;
}
static int cfi_staa_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
@ -1334,7 +1334,7 @@ static int cfi_staa_suspend(struct mtd_info *mtd)
for (i=0; !ret && i<cfi->numchips; i++) {
chip = &cfi->chips[i];
spin_lock_bh(chip->mutex);
mutex_lock(&chip->mutex);
switch(chip->state) {
case FL_READY:
@ -1354,7 +1354,7 @@ static int cfi_staa_suspend(struct mtd_info *mtd)
ret = -EAGAIN;
break;
}
spin_unlock_bh(chip->mutex);
mutex_unlock(&chip->mutex);
}
/* Unlock the chips again */
@ -1363,7 +1363,7 @@ static int cfi_staa_suspend(struct mtd_info *mtd)
for (i--; i >=0; i--) {
chip = &cfi->chips[i];
spin_lock_bh(chip->mutex);
mutex_lock(&chip->mutex);
if (chip->state == FL_PM_SUSPENDED) {
/* No need to force it into a known state here,
@ -1372,7 +1372,7 @@ static int cfi_staa_suspend(struct mtd_info *mtd)
chip->state = chip->oldstate;
wake_up(&chip->wq);
}
spin_unlock_bh(chip->mutex);
mutex_unlock(&chip->mutex);
}
}
@ -1390,7 +1390,7 @@ static void cfi_staa_resume(struct mtd_info *mtd)
chip = &cfi->chips[i];
spin_lock_bh(chip->mutex);
mutex_lock(&chip->mutex);
/* Go to known state. Chip may have been power cycled */
if (chip->state == FL_PM_SUSPENDED) {
@ -1399,7 +1399,7 @@ static void cfi_staa_resume(struct mtd_info *mtd)
wake_up(&chip->wq);
}
spin_unlock_bh(chip->mutex);
mutex_unlock(&chip->mutex);
}
}

View File

@ -158,6 +158,7 @@ static int __xipram cfi_chip_setup(struct map_info *map,
__u32 base = 0;
int num_erase_regions = cfi_read_query(map, base + (0x10 + 28)*ofs_factor);
int i;
int addr_unlock1 = 0x555, addr_unlock2 = 0x2AA;
xip_enable(base, map, cfi);
#ifdef DEBUG_CFI
@ -181,29 +182,6 @@ static int __xipram cfi_chip_setup(struct map_info *map,
for (i=0; i<(sizeof(struct cfi_ident) + num_erase_regions * 4); i++)
((unsigned char *)cfi->cfiq)[i] = cfi_read_query(map,base + (0x10 + i)*ofs_factor);
/* Note we put the device back into Read Mode BEFORE going into Auto
* Select Mode, as some devices support nesting of modes, others
* don't. This way should always work.
* On cmdset 0001 the writes of 0xaa and 0x55 are not needed, and
* so should be treated as nops or illegal (and so put the device
* back into Read Mode, which is a nop in this case).
*/
cfi_send_gen_cmd(0xf0, 0, base, map, cfi, cfi->device_type, NULL);
cfi_send_gen_cmd(0xaa, 0x555, base, map, cfi, cfi->device_type, NULL);
cfi_send_gen_cmd(0x55, 0x2aa, base, map, cfi, cfi->device_type, NULL);
cfi_send_gen_cmd(0x90, 0x555, base, map, cfi, cfi->device_type, NULL);
cfi->mfr = cfi_read_query16(map, base);
cfi->id = cfi_read_query16(map, base + ofs_factor);
/* Get AMD/Spansion extended JEDEC ID */
if (cfi->mfr == CFI_MFR_AMD && (cfi->id & 0xff) == 0x7e)
cfi->id = cfi_read_query(map, base + 0xe * ofs_factor) << 8 |
cfi_read_query(map, base + 0xf * ofs_factor);
/* Put it back into Read Mode */
cfi_qry_mode_off(base, map, cfi);
xip_allowed(base, map);
/* Do any necessary byteswapping */
cfi->cfiq->P_ID = le16_to_cpu(cfi->cfiq->P_ID);
@ -228,6 +206,35 @@ static int __xipram cfi_chip_setup(struct map_info *map,
#endif
}
if (cfi->cfiq->P_ID == P_ID_SST_OLD) {
addr_unlock1 = 0x5555;
addr_unlock2 = 0x2AAA;
}
/*
* Note we put the device back into Read Mode BEFORE going into Auto
* Select Mode, as some devices support nesting of modes, others
* don't. This way should always work.
* On cmdset 0001 the writes of 0xaa and 0x55 are not needed, and
* so should be treated as nops or illegal (and so put the device
* back into Read Mode, which is a nop in this case).
*/
cfi_send_gen_cmd(0xf0, 0, base, map, cfi, cfi->device_type, NULL);
cfi_send_gen_cmd(0xaa, addr_unlock1, base, map, cfi, cfi->device_type, NULL);
cfi_send_gen_cmd(0x55, addr_unlock2, base, map, cfi, cfi->device_type, NULL);
cfi_send_gen_cmd(0x90, addr_unlock1, base, map, cfi, cfi->device_type, NULL);
cfi->mfr = cfi_read_query16(map, base);
cfi->id = cfi_read_query16(map, base + ofs_factor);
/* Get AMD/Spansion extended JEDEC ID */
if (cfi->mfr == CFI_MFR_AMD && (cfi->id & 0xff) == 0x7e)
cfi->id = cfi_read_query(map, base + 0xe * ofs_factor) << 8 |
cfi_read_query(map, base + 0xf * ofs_factor);
/* Put it back into Read Mode */
cfi_qry_mode_off(base, map, cfi);
xip_allowed(base, map);
printk(KERN_INFO "%s: Found %d x%d devices at 0x%x in %d-bit bank\n",
map->name, cfi->interleave, cfi->device_type*8, base,
map->bankwidth*8);
@ -269,6 +276,9 @@ static char *vendorname(__u16 vendor)
case P_ID_SST_PAGE:
return "SST Page Write";
case P_ID_SST_OLD:
return "SST 39VF160x/39VF320x";
case P_ID_INTEL_PERFORMANCE:
return "Intel Performance Code";

View File

@ -104,10 +104,11 @@ __xipram cfi_read_pri(struct map_info *map, __u16 adr, __u16 size, const char* n
int i;
struct cfi_extquery *extp = NULL;
printk(" %s Extended Query Table at 0x%4.4X\n", name, adr);
if (!adr)
goto out;
printk(KERN_INFO "%s Extended Query Table at 0x%4.4X\n", name, adr);
extp = kmalloc(size, GFP_KERNEL);
if (!extp) {
printk(KERN_ERR "Failed to allocate memory\n");

View File

@ -58,10 +58,10 @@ static int fwh_xxlock_oneblock(struct map_info *map, struct flchip *chip,
* to flash memory - that means that we don't have to check status
* and timeout.
*/
spin_lock(chip->mutex);
mutex_lock(&chip->mutex);
ret = get_chip(map, chip, adr, FL_LOCKING);
if (ret) {
spin_unlock(chip->mutex);
mutex_unlock(&chip->mutex);
return ret;
}
@ -72,7 +72,7 @@ static int fwh_xxlock_oneblock(struct map_info *map, struct flchip *chip,
/* Done and happy. */
chip->state = chip->oldstate;
put_chip(map, chip, adr);
spin_unlock(chip->mutex);
mutex_unlock(&chip->mutex);
return 0;
}

View File

@ -155,8 +155,7 @@ static struct cfi_private *genprobe_ident_chips(struct map_info *map, struct chi
pchip->start = (i << cfi.chipshift);
pchip->state = FL_READY;
init_waitqueue_head(&pchip->wq);
spin_lock_init(&pchip->_spinlock);
pchip->mutex = &pchip->_spinlock;
mutex_init(&pchip->mutex);
}
}
@ -242,17 +241,19 @@ static struct mtd_info *check_cmd_set(struct map_info *map, int primary)
/* We need these for the !CONFIG_MODULES case,
because symbol_get() doesn't work there */
#ifdef CONFIG_MTD_CFI_INTELEXT
case 0x0001:
case 0x0003:
case 0x0200:
case P_ID_INTEL_EXT:
case P_ID_INTEL_STD:
case P_ID_INTEL_PERFORMANCE:
return cfi_cmdset_0001(map, primary);
#endif
#ifdef CONFIG_MTD_CFI_AMDSTD
case 0x0002:
case P_ID_AMD_STD:
case P_ID_SST_OLD:
case P_ID_WINBOND:
return cfi_cmdset_0002(map, primary);
#endif
#ifdef CONFIG_MTD_CFI_STAA
case 0x0020:
case P_ID_ST_ADV:
return cfi_cmdset_0020(map, primary);
#endif
default:

File diff suppressed because it is too large Load Diff

View File

@ -1,5 +1,5 @@
#
# linux/drivers/devices/Makefile
# linux/drivers/mtd/devices/Makefile
#
obj-$(CONFIG_MTD_DOC2000) += doc2000.o

View File

@ -276,12 +276,10 @@ static struct block2mtd_dev *add_device(char *devname, int erase_size)
/* Setup the MTD structure */
/* make the name contain the block device in */
name = kmalloc(sizeof("block2mtd: ") + strlen(devname) + 1,
GFP_KERNEL);
name = kasprintf(GFP_KERNEL, "block2mtd: %s", devname);
if (!name)
goto devinit_err;
sprintf(name, "block2mtd: %s", devname);
dev->mtd.name = name;
dev->mtd.size = dev->blkdev->bd_inode->i_size & PAGE_MASK;

View File

@ -668,7 +668,7 @@ static int __init init_pmc551(void)
{
struct pci_dev *PCI_Device = NULL;
struct mypriv *priv;
int count, found = 0;
int found = 0;
struct mtd_info *mtd;
u32 length = 0;
@ -695,7 +695,7 @@ static int __init init_pmc551(void)
/*
* PCU-bus chipset probe.
*/
for (count = 0; count < MAX_MTD_DEVICES; count++) {
for (;;) {
if ((PCI_Device = pci_get_device(PCI_VENDOR_ID_V3_SEMI,
PCI_DEVICE_ID_V3_SEMI_V370PDC,

View File

@ -73,15 +73,25 @@ static struct flash_info __initdata sst25l_flash_info[] = {
static int sst25l_status(struct sst25l_flash *flash, int *status)
{
unsigned char command, response;
struct spi_message m;
struct spi_transfer t;
unsigned char cmd_resp[2];
int err;
command = SST25L_CMD_RDSR;
err = spi_write_then_read(flash->spi, &command, 1, &response, 1);
spi_message_init(&m);
memset(&t, 0, sizeof(struct spi_transfer));
cmd_resp[0] = SST25L_CMD_RDSR;
cmd_resp[1] = 0xff;
t.tx_buf = cmd_resp;
t.rx_buf = cmd_resp;
t.len = sizeof(cmd_resp);
spi_message_add_tail(&t, &m);
err = spi_sync(flash->spi, &m);
if (err < 0)
return err;
*status = response;
*status = cmd_resp[1];
return 0;
}
@ -328,33 +338,32 @@ out:
static struct flash_info *__init sst25l_match_device(struct spi_device *spi)
{
struct flash_info *flash_info = NULL;
unsigned char command[4], response;
struct spi_message m;
struct spi_transfer t;
unsigned char cmd_resp[6];
int i, err;
uint16_t id;
command[0] = SST25L_CMD_READ_ID;
command[1] = 0;
command[2] = 0;
command[3] = 0;
err = spi_write_then_read(spi, command, sizeof(command), &response, 1);
spi_message_init(&m);
memset(&t, 0, sizeof(struct spi_transfer));
cmd_resp[0] = SST25L_CMD_READ_ID;
cmd_resp[1] = 0;
cmd_resp[2] = 0;
cmd_resp[3] = 0;
cmd_resp[4] = 0xff;
cmd_resp[5] = 0xff;
t.tx_buf = cmd_resp;
t.rx_buf = cmd_resp;
t.len = sizeof(cmd_resp);
spi_message_add_tail(&t, &m);
err = spi_sync(spi, &m);
if (err < 0) {
dev_err(&spi->dev, "error reading device id msb\n");
dev_err(&spi->dev, "error reading device id\n");
return NULL;
}
id = response << 8;
command[0] = SST25L_CMD_READ_ID;
command[1] = 0;
command[2] = 0;
command[3] = 1;
err = spi_write_then_read(spi, command, sizeof(command), &response, 1);
if (err < 0) {
dev_err(&spi->dev, "error reading device id lsb\n");
return NULL;
}
id |= response;
id = (cmd_resp[4] << 8) | cmd_resp[5];
for (i = 0; i < ARRAY_SIZE(sst25l_flash_info); i++)
if (sst25l_flash_info[i].device_id == id)
@ -411,17 +420,6 @@ static int __init sst25l_probe(struct spi_device *spi)
flash->mtd.erasesize, flash->mtd.erasesize / 1024,
flash->mtd.numeraseregions);
if (flash->mtd.numeraseregions)
for (i = 0; i < flash->mtd.numeraseregions; i++)
DEBUG(MTD_DEBUG_LEVEL2,
"mtd.eraseregions[%d] = { .offset = 0x%llx, "
".erasesize = 0x%.8x (%uKiB), "
".numblocks = %d }\n",
i, (long long)flash->mtd.eraseregions[i].offset,
flash->mtd.eraseregions[i].erasesize,
flash->mtd.eraseregions[i].erasesize / 1024,
flash->mtd.eraseregions[i].numblocks);
if (mtd_has_partitions()) {
struct mtd_partition *parts = NULL;
int nr_parts = 0;

View File

@ -1082,7 +1082,6 @@ static void ftl_remove_dev(struct mtd_blktrans_dev *dev)
{
del_mtd_blktrans_dev(dev);
ftl_freepart((partition_t *)dev);
kfree(dev);
}
static struct mtd_blktrans_ops ftl_tr = {

View File

@ -139,7 +139,6 @@ static void inftl_remove_dev(struct mtd_blktrans_dev *dev)
kfree(inftl->PUtable);
kfree(inftl->VUtable);
kfree(inftl);
}
/*

View File

@ -100,9 +100,10 @@ static int find_boot_record(struct INFTLrecord *inftl)
}
/* To be safer with BIOS, also use erase mark as discriminant */
if ((ret = inftl_read_oob(mtd, block * inftl->EraseSize +
SECTORSIZE + 8, 8, &retlen,
(char *)&h1) < 0)) {
ret = inftl_read_oob(mtd,
block * inftl->EraseSize + SECTORSIZE + 8,
8, &retlen,(char *)&h1);
if (ret < 0) {
printk(KERN_WARNING "INFTL: ANAND header found at "
"0x%x in mtd%d, but OOB data read failed "
"(err %d)\n", block * inftl->EraseSize,

View File

@ -107,8 +107,7 @@ struct mtd_info *lpddr_cmdset(struct map_info *map)
/* those should be reset too since
they create memory references. */
init_waitqueue_head(&chip->wq);
spin_lock_init(&chip->_spinlock);
chip->mutex = &chip->_spinlock;
mutex_init(&chip->mutex);
chip++;
}
}
@ -144,7 +143,7 @@ static int wait_for_ready(struct map_info *map, struct flchip *chip,
}
/* OK Still waiting. Drop the lock, wait a while and retry. */
spin_unlock(chip->mutex);
mutex_unlock(&chip->mutex);
if (sleep_time >= 1000000/HZ) {
/*
* Half of the normal delay still remaining
@ -159,17 +158,17 @@ static int wait_for_ready(struct map_info *map, struct flchip *chip,
cond_resched();
timeo--;
}
spin_lock(chip->mutex);
mutex_lock(&chip->mutex);
while (chip->state != chip_state) {
/* Someone's suspended the operation: sleep */
DECLARE_WAITQUEUE(wait, current);
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
spin_unlock(chip->mutex);
mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
spin_lock(chip->mutex);
mutex_lock(&chip->mutex);
}
if (chip->erase_suspended || chip->write_suspended) {
/* Suspend has occured while sleep: reset timeout */
@ -230,20 +229,20 @@ static int get_chip(struct map_info *map, struct flchip *chip, int mode)
* it'll happily send us to sleep. In any case, when
* get_chip returns success we're clear to go ahead.
*/
ret = spin_trylock(contender->mutex);
ret = mutex_trylock(&contender->mutex);
spin_unlock(&shared->lock);
if (!ret)
goto retry;
spin_unlock(chip->mutex);
mutex_unlock(&chip->mutex);
ret = chip_ready(map, contender, mode);
spin_lock(chip->mutex);
mutex_lock(&chip->mutex);
if (ret == -EAGAIN) {
spin_unlock(contender->mutex);
mutex_unlock(&contender->mutex);
goto retry;
}
if (ret) {
spin_unlock(contender->mutex);
mutex_unlock(&contender->mutex);
return ret;
}
spin_lock(&shared->lock);
@ -252,10 +251,10 @@ static int get_chip(struct map_info *map, struct flchip *chip, int mode)
* state. Put contender and retry. */
if (chip->state == FL_SYNCING) {
put_chip(map, contender);
spin_unlock(contender->mutex);
mutex_unlock(&contender->mutex);
goto retry;
}
spin_unlock(contender->mutex);
mutex_unlock(&contender->mutex);
}
/* Check if we have suspended erase on this chip.
@ -265,10 +264,10 @@ static int get_chip(struct map_info *map, struct flchip *chip, int mode)
spin_unlock(&shared->lock);
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
spin_unlock(chip->mutex);
mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
spin_lock(chip->mutex);
mutex_lock(&chip->mutex);
goto retry;
}
@ -337,10 +336,10 @@ static int chip_ready(struct map_info *map, struct flchip *chip, int mode)
sleep:
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
spin_unlock(chip->mutex);
mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
spin_lock(chip->mutex);
mutex_lock(&chip->mutex);
return -EAGAIN;
}
}
@ -356,12 +355,12 @@ static void put_chip(struct map_info *map, struct flchip *chip)
if (shared->writing && shared->writing != chip) {
/* give back the ownership */
struct flchip *loaner = shared->writing;
spin_lock(loaner->mutex);
mutex_lock(&loaner->mutex);
spin_unlock(&shared->lock);
spin_unlock(chip->mutex);
mutex_unlock(&chip->mutex);
put_chip(map, loaner);
spin_lock(chip->mutex);
spin_unlock(loaner->mutex);
mutex_lock(&chip->mutex);
mutex_unlock(&loaner->mutex);
wake_up(&chip->wq);
return;
}
@ -414,10 +413,10 @@ int do_write_buffer(struct map_info *map, struct flchip *chip,
wbufsize = 1 << lpddr->qinfo->BufSizeShift;
spin_lock(chip->mutex);
mutex_lock(&chip->mutex);
ret = get_chip(map, chip, FL_WRITING);
if (ret) {
spin_unlock(chip->mutex);
mutex_unlock(&chip->mutex);
return ret;
}
/* Figure out the number of words to write */
@ -478,7 +477,7 @@ int do_write_buffer(struct map_info *map, struct flchip *chip,
}
out: put_chip(map, chip);
spin_unlock(chip->mutex);
mutex_unlock(&chip->mutex);
return ret;
}
@ -490,10 +489,10 @@ int do_erase_oneblock(struct mtd_info *mtd, loff_t adr)
struct flchip *chip = &lpddr->chips[chipnum];
int ret;
spin_lock(chip->mutex);
mutex_lock(&chip->mutex);
ret = get_chip(map, chip, FL_ERASING);
if (ret) {
spin_unlock(chip->mutex);
mutex_unlock(&chip->mutex);
return ret;
}
send_pfow_command(map, LPDDR_BLOCK_ERASE, adr, 0, NULL);
@ -505,7 +504,7 @@ int do_erase_oneblock(struct mtd_info *mtd, loff_t adr)
goto out;
}
out: put_chip(map, chip);
spin_unlock(chip->mutex);
mutex_unlock(&chip->mutex);
return ret;
}
@ -518,10 +517,10 @@ static int lpddr_read(struct mtd_info *mtd, loff_t adr, size_t len,
struct flchip *chip = &lpddr->chips[chipnum];
int ret = 0;
spin_lock(chip->mutex);
mutex_lock(&chip->mutex);
ret = get_chip(map, chip, FL_READY);
if (ret) {
spin_unlock(chip->mutex);
mutex_unlock(&chip->mutex);
return ret;
}
@ -529,7 +528,7 @@ static int lpddr_read(struct mtd_info *mtd, loff_t adr, size_t len,
*retlen = len;
put_chip(map, chip);
spin_unlock(chip->mutex);
mutex_unlock(&chip->mutex);
return ret;
}
@ -569,9 +568,9 @@ static int lpddr_point(struct mtd_info *mtd, loff_t adr, size_t len,
else
thislen = len;
/* get the chip */
spin_lock(chip->mutex);
mutex_lock(&chip->mutex);
ret = get_chip(map, chip, FL_POINT);
spin_unlock(chip->mutex);
mutex_unlock(&chip->mutex);
if (ret)
break;
@ -611,7 +610,7 @@ static void lpddr_unpoint (struct mtd_info *mtd, loff_t adr, size_t len)
else
thislen = len;
spin_lock(chip->mutex);
mutex_lock(&chip->mutex);
if (chip->state == FL_POINT) {
chip->ref_point_counter--;
if (chip->ref_point_counter == 0)
@ -621,7 +620,7 @@ static void lpddr_unpoint (struct mtd_info *mtd, loff_t adr, size_t len)
"pointed region\n", map->name);
put_chip(map, chip);
spin_unlock(chip->mutex);
mutex_unlock(&chip->mutex);
len -= thislen;
ofs = 0;
@ -727,10 +726,10 @@ int do_xxlock(struct mtd_info *mtd, loff_t adr, uint32_t len, int thunk)
int chipnum = adr >> lpddr->chipshift;
struct flchip *chip = &lpddr->chips[chipnum];
spin_lock(chip->mutex);
mutex_lock(&chip->mutex);
ret = get_chip(map, chip, FL_LOCKING);
if (ret) {
spin_unlock(chip->mutex);
mutex_unlock(&chip->mutex);
return ret;
}
@ -750,7 +749,7 @@ int do_xxlock(struct mtd_info *mtd, loff_t adr, uint32_t len, int thunk)
goto out;
}
out: put_chip(map, chip);
spin_unlock(chip->mutex);
mutex_unlock(&chip->mutex);
return ret;
}
@ -771,10 +770,10 @@ int word_program(struct map_info *map, loff_t adr, uint32_t curval)
int chipnum = adr >> lpddr->chipshift;
struct flchip *chip = &lpddr->chips[chipnum];
spin_lock(chip->mutex);
mutex_lock(&chip->mutex);
ret = get_chip(map, chip, FL_WRITING);
if (ret) {
spin_unlock(chip->mutex);
mutex_unlock(&chip->mutex);
return ret;
}
@ -788,7 +787,7 @@ int word_program(struct map_info *map, loff_t adr, uint32_t curval)
}
out: put_chip(map, chip);
spin_unlock(chip->mutex);
mutex_unlock(&chip->mutex);
return ret;
}

View File

@ -134,13 +134,12 @@ out:
static int lpddr_chip_setup(struct map_info *map, struct lpddr_private *lpddr)
{
lpddr->qinfo = kmalloc(sizeof(struct qinfo_chip), GFP_KERNEL);
lpddr->qinfo = kzalloc(sizeof(struct qinfo_chip), GFP_KERNEL);
if (!lpddr->qinfo) {
printk(KERN_WARNING "%s: no memory for LPDDR qinfo structure\n",
map->name);
return 0;
}
memset(lpddr->qinfo, 0, sizeof(struct qinfo_chip));
/* Get the ManuID */
lpddr->ManufactId = CMDVAL(map_read(map, map->pfow_base + PFOW_MANUFACTURER_ID));
@ -185,13 +184,11 @@ static struct lpddr_private *lpddr_probe_chip(struct map_info *map)
lpddr.numchips = 1;
numvirtchips = lpddr.numchips * lpddr.qinfo->HWPartsNum;
retlpddr = kmalloc(sizeof(struct lpddr_private) +
retlpddr = kzalloc(sizeof(struct lpddr_private) +
numvirtchips * sizeof(struct flchip), GFP_KERNEL);
if (!retlpddr)
return NULL;
memset(retlpddr, 0, sizeof(struct lpddr_private) +
numvirtchips * sizeof(struct flchip));
memcpy(retlpddr, &lpddr, sizeof(struct lpddr_private));
retlpddr->numchips = numvirtchips;

View File

@ -435,7 +435,7 @@ config MTD_PCI
config MTD_PCMCIA
tristate "PCMCIA MTD driver"
depends on PCMCIA && MTD_COMPLEX_MAPPINGS && BROKEN
depends on PCMCIA && MTD_COMPLEX_MAPPINGS
help
Map driver for accessing PCMCIA linear flash memory cards. These
cards are usually around 4-16MiB in size. This does not include

View File

@ -70,7 +70,7 @@ static void switch_back(struct async_state *state)
local_irq_restore(state->irq_flags);
}
static map_word bfin_read(struct map_info *map, unsigned long ofs)
static map_word bfin_flash_read(struct map_info *map, unsigned long ofs)
{
struct async_state *state = (struct async_state *)map->map_priv_1;
uint16_t word;
@ -86,7 +86,7 @@ static map_word bfin_read(struct map_info *map, unsigned long ofs)
return test;
}
static void bfin_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len)
static void bfin_flash_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len)
{
struct async_state *state = (struct async_state *)map->map_priv_1;
@ -97,7 +97,7 @@ static void bfin_copy_from(struct map_info *map, void *to, unsigned long from, s
switch_back(state);
}
static void bfin_write(struct map_info *map, map_word d1, unsigned long ofs)
static void bfin_flash_write(struct map_info *map, map_word d1, unsigned long ofs)
{
struct async_state *state = (struct async_state *)map->map_priv_1;
uint16_t d;
@ -112,7 +112,7 @@ static void bfin_write(struct map_info *map, map_word d1, unsigned long ofs)
switch_back(state);
}
static void bfin_copy_to(struct map_info *map, unsigned long to, const void *from, ssize_t len)
static void bfin_flash_copy_to(struct map_info *map, unsigned long to, const void *from, ssize_t len)
{
struct async_state *state = (struct async_state *)map->map_priv_1;
@ -141,10 +141,10 @@ static int __devinit bfin_flash_probe(struct platform_device *pdev)
return -ENOMEM;
state->map.name = DRIVER_NAME;
state->map.read = bfin_read;
state->map.copy_from = bfin_copy_from;
state->map.write = bfin_write;
state->map.copy_to = bfin_copy_to;
state->map.read = bfin_flash_read;
state->map.copy_from = bfin_flash_copy_from;
state->map.write = bfin_flash_write;
state->map.copy_to = bfin_flash_copy_to;
state->map.bankwidth = pdata->width;
state->map.size = memory->end - memory->start + 1;
state->map.virt = (void __iomem *)memory->start;

View File

@ -253,7 +253,7 @@ static void __exit clps_destroy_mtd(struct clps_info *clps, struct mtd_info *mtd
static int __init clps_setup_flash(void)
{
int nr;
int nr = 0;
#ifdef CONFIG_ARCH_CEIVA
if (machine_is_ceiva()) {

View File

@ -165,12 +165,11 @@ static int ixp2000_flash_probe(struct platform_device *dev)
return -EIO;
}
info = kmalloc(sizeof(struct ixp2000_flash_info), GFP_KERNEL);
info = kzalloc(sizeof(struct ixp2000_flash_info), GFP_KERNEL);
if(!info) {
err = -ENOMEM;
goto Error;
}
memset(info, 0, sizeof(struct ixp2000_flash_info));
platform_set_drvdata(dev, info);

View File

@ -107,8 +107,8 @@ static void ixp4xx_copy_from(struct map_info *map, void *to,
return;
if (from & 1) {
*dest++ = BYTE1(flash_read16(src));
src++;
*dest++ = BYTE1(flash_read16(src-1));
src++;
--len;
}
@ -196,12 +196,11 @@ static int ixp4xx_flash_probe(struct platform_device *dev)
return err;
}
info = kmalloc(sizeof(struct ixp4xx_flash_info), GFP_KERNEL);
info = kzalloc(sizeof(struct ixp4xx_flash_info), GFP_KERNEL);
if(!info) {
err = -ENOMEM;
goto Error;
}
memset(info, 0, sizeof(struct ixp4xx_flash_info));
platform_set_drvdata(dev, info);

View File

@ -40,10 +40,7 @@ MODULE_PARM_DESC(debug, "Set Debug Level 0=quiet, 5=noisy");
static const int debug = 0;
#endif
#define err(format, arg...) printk(KERN_ERR "pcmciamtd: " format "\n" , ## arg)
#define info(format, arg...) printk(KERN_INFO "pcmciamtd: " format "\n" , ## arg)
#define warn(format, arg...) printk(KERN_WARNING "pcmciamtd: " format "\n" , ## arg)
#define DRIVER_DESC "PCMCIA Flash memory card driver"
@ -99,7 +96,9 @@ module_param(mem_type, int, 0);
MODULE_PARM_DESC(mem_type, "Set Memory type (0=Flash, 1=RAM, 2=ROM, default=0)");
/* read/write{8,16} copy_{from,to} routines with window remapping to access whole card */
/* read/write{8,16} copy_{from,to} routines with window remapping
* to access whole card
*/
static caddr_t remap_window(struct map_info *map, unsigned long to)
{
struct pcmciamtd_dev *dev = (struct pcmciamtd_dev *)map->map_priv_1;
@ -136,7 +135,7 @@ static map_word pcmcia_read8_remap(struct map_info *map, unsigned long ofs)
return d;
d.x[0] = readb(addr);
DEBUG(3, "ofs = 0x%08lx (%p) data = 0x%02x", ofs, addr, d.x[0]);
DEBUG(3, "ofs = 0x%08lx (%p) data = 0x%02lx", ofs, addr, d.x[0]);
return d;
}
@ -151,7 +150,7 @@ static map_word pcmcia_read16_remap(struct map_info *map, unsigned long ofs)
return d;
d.x[0] = readw(addr);
DEBUG(3, "ofs = 0x%08lx (%p) data = 0x%04x", ofs, addr, d.x[0]);
DEBUG(3, "ofs = 0x%08lx (%p) data = 0x%04lx", ofs, addr, d.x[0]);
return d;
}
@ -161,7 +160,7 @@ static void pcmcia_copy_from_remap(struct map_info *map, void *to, unsigned long
struct pcmciamtd_dev *dev = (struct pcmciamtd_dev *)map->map_priv_1;
unsigned long win_size = dev->win_size;
DEBUG(3, "to = %p from = %lu len = %u", to, from, len);
DEBUG(3, "to = %p from = %lu len = %zd", to, from, len);
while(len) {
int toread = win_size - (from & (win_size-1));
caddr_t addr;
@ -189,7 +188,7 @@ static void pcmcia_write8_remap(struct map_info *map, map_word d, unsigned long
if(!addr)
return;
DEBUG(3, "adr = 0x%08lx (%p) data = 0x%02x", adr, addr, d.x[0]);
DEBUG(3, "adr = 0x%08lx (%p) data = 0x%02lx", adr, addr, d.x[0]);
writeb(d.x[0], addr);
}
@ -200,7 +199,7 @@ static void pcmcia_write16_remap(struct map_info *map, map_word d, unsigned long
if(!addr)
return;
DEBUG(3, "adr = 0x%08lx (%p) data = 0x%04x", adr, addr, d.x[0]);
DEBUG(3, "adr = 0x%08lx (%p) data = 0x%04lx", adr, addr, d.x[0]);
writew(d.x[0], addr);
}
@ -210,7 +209,7 @@ static void pcmcia_copy_to_remap(struct map_info *map, unsigned long to, const v
struct pcmciamtd_dev *dev = (struct pcmciamtd_dev *)map->map_priv_1;
unsigned long win_size = dev->win_size;
DEBUG(3, "to = %lu from = %p len = %u", to, from, len);
DEBUG(3, "to = %lu from = %p len = %zd", to, from, len);
while(len) {
int towrite = win_size - (to & (win_size-1));
caddr_t addr;
@ -244,7 +243,8 @@ static map_word pcmcia_read8(struct map_info *map, unsigned long ofs)
return d;
d.x[0] = readb(win_base + ofs);
DEBUG(3, "ofs = 0x%08lx (%p) data = 0x%02x", ofs, win_base + ofs, d.x[0]);
DEBUG(3, "ofs = 0x%08lx (%p) data = 0x%02lx",
ofs, win_base + ofs, d.x[0]);
return d;
}
@ -258,7 +258,8 @@ static map_word pcmcia_read16(struct map_info *map, unsigned long ofs)
return d;
d.x[0] = readw(win_base + ofs);
DEBUG(3, "ofs = 0x%08lx (%p) data = 0x%04x", ofs, win_base + ofs, d.x[0]);
DEBUG(3, "ofs = 0x%08lx (%p) data = 0x%04lx",
ofs, win_base + ofs, d.x[0]);
return d;
}
@ -270,32 +271,34 @@ static void pcmcia_copy_from(struct map_info *map, void *to, unsigned long from,
if(DEV_REMOVED(map))
return;
DEBUG(3, "to = %p from = %lu len = %u", to, from, len);
DEBUG(3, "to = %p from = %lu len = %zd", to, from, len);
memcpy_fromio(to, win_base + from, len);
}
static void pcmcia_write8(struct map_info *map, u8 d, unsigned long adr)
static void pcmcia_write8(struct map_info *map, map_word d, unsigned long adr)
{
caddr_t win_base = (caddr_t)map->map_priv_2;
if(DEV_REMOVED(map))
return;
DEBUG(3, "adr = 0x%08lx (%p) data = 0x%02x", adr, win_base + adr, d);
writeb(d, win_base + adr);
DEBUG(3, "adr = 0x%08lx (%p) data = 0x%02lx",
adr, win_base + adr, d.x[0]);
writeb(d.x[0], win_base + adr);
}
static void pcmcia_write16(struct map_info *map, u16 d, unsigned long adr)
static void pcmcia_write16(struct map_info *map, map_word d, unsigned long adr)
{
caddr_t win_base = (caddr_t)map->map_priv_2;
if(DEV_REMOVED(map))
return;
DEBUG(3, "adr = 0x%08lx (%p) data = 0x%04x", adr, win_base + adr, d);
writew(d, win_base + adr);
DEBUG(3, "adr = 0x%08lx (%p) data = 0x%04lx",
adr, win_base + adr, d.x[0]);
writew(d.x[0], win_base + adr);
}
@ -306,7 +309,7 @@ static void pcmcia_copy_to(struct map_info *map, unsigned long to, const void *f
if(DEV_REMOVED(map))
return;
DEBUG(3, "to = %lu from = %p len = %u", to, from, len);
DEBUG(3, "to = %lu from = %p len = %zd", to, from, len);
memcpy_toio(win_base + to, from, len);
}
@ -375,7 +378,8 @@ static int pcmciamtd_cistpl_jedec(struct pcmcia_device *p_dev,
if (!pcmcia_parse_tuple(tuple, &parse)) {
cistpl_jedec_t *t = &parse.jedec;
for (i = 0; i < t->nid; i++)
DEBUG(2, "JEDEC: 0x%02x 0x%02x", t->id[i].mfr, t->id[i].info);
DEBUG(2, "JEDEC: 0x%02x 0x%02x",
t->id[i].mfr, t->id[i].info);
}
return -ENOSPC;
}
@ -431,7 +435,7 @@ static int pcmciamtd_cistpl_geo(struct pcmcia_device *p_dev,
}
static void card_settings(struct pcmciamtd_dev *dev, struct pcmcia_device *link, int *new_name)
static void card_settings(struct pcmciamtd_dev *dev, struct pcmcia_device *p_dev, int *new_name)
{
int i;
@ -476,7 +480,8 @@ static void card_settings(struct pcmciamtd_dev *dev, struct pcmcia_device *link,
}
DEBUG(1, "Device: Size: %lu Width:%d Name: %s",
dev->pcmcia_map.size, dev->pcmcia_map.bankwidth << 3, dev->mtd_name);
dev->pcmcia_map.size,
dev->pcmcia_map.bankwidth << 3, dev->mtd_name);
}
@ -489,7 +494,6 @@ static int pcmciamtd_config(struct pcmcia_device *link)
{
struct pcmciamtd_dev *dev = link->priv;
struct mtd_info *mtd = NULL;
cs_status_t status;
win_req_t req;
int ret;
int i;
@ -513,9 +517,11 @@ static int pcmciamtd_config(struct pcmcia_device *link)
if(setvpp == 1)
dev->pcmcia_map.set_vpp = pcmciamtd_set_vpp;
/* Request a memory window for PCMCIA. Some architeures can map windows upto the maximum
that PCMCIA can support (64MiB) - this is ideal and we aim for a window the size of the
whole card - otherwise we try smaller windows until we succeed */
/* Request a memory window for PCMCIA. Some architeures can map windows
* upto the maximum that PCMCIA can support (64MiB) - this is ideal and
* we aim for a window the size of the whole card - otherwise we try
* smaller windows until we succeed
*/
req.Attributes = WIN_MEMORY_TYPE_CM | WIN_ENABLE;
req.Attributes |= (dev->pcmcia_map.bankwidth == 1) ? WIN_DATA_WIDTH_8 : WIN_DATA_WIDTH_16;
@ -543,7 +549,7 @@ static int pcmciamtd_config(struct pcmcia_device *link)
DEBUG(2, "dev->win_size = %d", dev->win_size);
if(!dev->win_size) {
err("Cant allocate memory window");
dev_err(&dev->p_dev->dev, "Cannot allocate memory window\n");
pcmciamtd_release(link);
return -ENODEV;
}
@ -553,7 +559,8 @@ static int pcmciamtd_config(struct pcmcia_device *link)
DEBUG(2, "window handle = 0x%8.8lx", (unsigned long)link->win);
dev->win_base = ioremap(req.Base, req.Size);
if(!dev->win_base) {
err("ioremap(%lu, %u) failed", req.Base, req.Size);
dev_err(&dev->p_dev->dev, "ioremap(%lu, %u) failed\n",
req.Base, req.Size);
pcmciamtd_release(link);
return -ENODEV;
}
@ -564,7 +571,7 @@ static int pcmciamtd_config(struct pcmcia_device *link)
dev->pcmcia_map.map_priv_1 = (unsigned long)dev;
dev->pcmcia_map.map_priv_2 = (unsigned long)link->win;
dev->vpp = (vpp) ? vpp : link->socket.socket.Vpp;
dev->vpp = (vpp) ? vpp : link->socket->socket.Vpp;
link->conf.Attributes = 0;
if(setvpp == 2) {
link->conf.Vpp = dev->vpp;
@ -600,7 +607,7 @@ static int pcmciamtd_config(struct pcmcia_device *link)
}
if(!mtd) {
DEBUG(1, "Cant find an MTD");
DEBUG(1, "Can not find an MTD");
pcmciamtd_release(link);
return -ENODEV;
}
@ -611,8 +618,9 @@ static int pcmciamtd_config(struct pcmcia_device *link)
if(new_name) {
int size = 0;
char unit = ' ';
/* Since we are using a default name, make it better by adding in the
size */
/* Since we are using a default name, make it better by adding
* in the size
*/
if(mtd->size < 1048576) { /* <1MiB in size, show size in KiB */
size = mtd->size >> 10;
unit = 'K';
@ -642,15 +650,15 @@ static int pcmciamtd_config(struct pcmcia_device *link)
if(add_mtd_device(mtd)) {
map_destroy(mtd);
dev->mtd_info = NULL;
err("Couldnt register MTD device");
dev_err(&dev->p_dev->dev,
"Could not register the MTD device\n");
pcmciamtd_release(link);
return -ENODEV;
}
info("mtd%d: %s", mtd->index, mtd->name);
dev_info(&dev->p_dev->dev, "mtd%d: %s\n", mtd->index, mtd->name);
return 0;
failed:
err("CS Error, exiting");
dev_err(&dev->p_dev->dev, "CS Error, exiting\n");
pcmciamtd_release(link);
return -ENODEV;
}
@ -689,8 +697,9 @@ static void pcmciamtd_detach(struct pcmcia_device *link)
if(dev->mtd_info) {
del_mtd_device(dev->mtd_info);
dev_info(&dev->p_dev->dev, "mtd%d: Removing\n",
dev->mtd_info->index);
map_destroy(dev->mtd_info);
info("mtd%d: Removed", dev->mtd_info->index);
}
pcmciamtd_release(link);
@ -734,8 +743,11 @@ static struct pcmcia_device_id pcmciamtd_ids[] = {
PCMCIA_DEVICE_PROD_ID12("intel", "VALUE SERIES 100 ", 0x40ade711, 0xdf8506d8),
PCMCIA_DEVICE_PROD_ID12("KINGMAX TECHNOLOGY INC.", "SRAM 256K Bytes", 0x54d0c69c, 0xad12c29c),
PCMCIA_DEVICE_PROD_ID12("Maxtor", "MAXFL MobileMax Flash Memory Card", 0xb68968c8, 0x2dfb47b0),
PCMCIA_DEVICE_PROD_ID123("M-Systems", "M-SYS Flash Memory Card", "(c) M-Systems", 0x7ed2ad87, 0x675dc3fb, 0x7aef3965),
PCMCIA_DEVICE_PROD_ID12("PRETEC", " 2MB SRAM CARD", 0xebf91155, 0x805360ca),
PCMCIA_DEVICE_PROD_ID12("SEIKO EPSON", "WWB101EN20", 0xf9876baf, 0xad0b207b),
PCMCIA_DEVICE_PROD_ID12("SEIKO EPSON", "WWB513EN20", 0xf9876baf, 0xe8d884ad),
PCMCIA_DEVICE_PROD_ID12("SMART Modular Technologies", " 4MB FLASH Card", 0x96fd8277, 0x737a5b05),
PCMCIA_DEVICE_PROD_ID12("Starfish, Inc.", "REX-3000", 0x05ddca47, 0xe7d67bca),
PCMCIA_DEVICE_PROD_ID12("Starfish, Inc.", "REX-4100", 0x05ddca47, 0x7bc32944),
/* the following was commented out in pcmcia-cs-3.2.7 */

View File

@ -264,8 +264,11 @@ static int __init physmap_init(void)
err = platform_driver_register(&physmap_flash_driver);
#ifdef CONFIG_MTD_PHYSMAP_COMPAT
if (err == 0)
platform_device_register(&physmap_flash);
if (err == 0) {
err = platform_device_register(&physmap_flash);
if (err)
platform_driver_unregister(&physmap_flash_driver);
}
#endif
return err;

View File

@ -173,12 +173,53 @@ static struct mtd_info * __devinit obsolete_probe(struct of_device *dev,
}
}
#ifdef CONFIG_MTD_PARTITIONS
/* When partitions are set we look for a linux,part-probe property which
specifies the list of partition probers to use. If none is given then the
default is use. These take precedence over other device tree
information. */
static const char *part_probe_types_def[] = { "cmdlinepart", "RedBoot", NULL };
static const char ** __devinit of_get_probes(struct device_node *dp)
{
const char *cp;
int cplen;
unsigned int l;
unsigned int count;
const char **res;
cp = of_get_property(dp, "linux,part-probe", &cplen);
if (cp == NULL)
return part_probe_types_def;
count = 0;
for (l = 0; l != cplen; l++)
if (cp[l] == 0)
count++;
res = kzalloc((count + 1)*sizeof(*res), GFP_KERNEL);
count = 0;
while (cplen > 0) {
res[count] = cp;
l = strlen(cp) + 1;
cp += l;
cplen -= l;
count++;
}
return res;
}
static void __devinit of_free_probes(const char **probes)
{
if (probes != part_probe_types_def)
kfree(probes);
}
#endif
static int __devinit of_flash_probe(struct of_device *dev,
const struct of_device_id *match)
{
#ifdef CONFIG_MTD_PARTITIONS
static const char *part_probe_types[]
= { "cmdlinepart", "RedBoot", NULL };
const char **part_probe_types;
#endif
struct device_node *dp = dev->node;
struct resource res;
@ -218,7 +259,7 @@ static int __devinit of_flash_probe(struct of_device *dev,
dev_set_drvdata(&dev->dev, info);
mtd_list = kzalloc(sizeof(struct mtd_info) * count, GFP_KERNEL);
mtd_list = kzalloc(sizeof(*mtd_list) * count, GFP_KERNEL);
if (!mtd_list)
goto err_flash_remove;
@ -307,12 +348,14 @@ static int __devinit of_flash_probe(struct of_device *dev,
goto err_out;
#ifdef CONFIG_MTD_PARTITIONS
/* First look for RedBoot table or partitions on the command
* line, these take precedence over device tree information */
part_probe_types = of_get_probes(dp);
err = parse_mtd_partitions(info->cmtd, part_probe_types,
&info->parts, 0);
if (err < 0)
if (err < 0) {
of_free_probes(part_probe_types);
return err;
}
of_free_probes(part_probe_types);
#ifdef CONFIG_MTD_OF_PARTS
if (err == 0) {

View File

@ -234,6 +234,7 @@ static int __devexit pismo_remove(struct i2c_client *client)
/* FIXME: set_vpp needs saner arguments */
pismo_setvpp_remove_fix(pismo);
i2c_set_clientdata(client, NULL);
kfree(pismo);
return 0;
@ -272,7 +273,7 @@ static int __devinit pismo_probe(struct i2c_client *client,
ret = pismo_eeprom_read(client, &eeprom, 0, sizeof(eeprom));
if (ret < 0) {
dev_err(&client->dev, "error reading EEPROM: %d\n", ret);
return ret;
goto exit_free;
}
dev_info(&client->dev, "%.15s board found\n", eeprom.board);
@ -283,6 +284,11 @@ static int __devinit pismo_probe(struct i2c_client *client,
pdata->cs_addrs[i]);
return 0;
exit_free:
i2c_set_clientdata(client, NULL);
kfree(pismo);
return ret;
}
static const struct i2c_device_id pismo_id[] = {

View File

@ -63,11 +63,10 @@ static int __init pxa2xx_flash_probe(struct platform_device *pdev)
if (!res)
return -ENODEV;
info = kmalloc(sizeof(struct pxa2xx_flash_info), GFP_KERNEL);
info = kzalloc(sizeof(struct pxa2xx_flash_info), GFP_KERNEL);
if (!info)
return -ENOMEM;
memset(info, 0, sizeof(struct pxa2xx_flash_info));
info->map.name = (char *) flash->name;
info->map.bankwidth = flash->width;
info->map.phys = res->start;

View File

@ -14,7 +14,6 @@
#include <linux/mtd/mtd.h>
#include <linux/blkdev.h>
#include <linux/blkpg.h>
#include <linux/freezer.h>
#include <linux/spinlock.h>
#include <linux/hdreg.h>
#include <linux/init.h>
@ -25,12 +24,42 @@
#include "mtdcore.h"
static LIST_HEAD(blktrans_majors);
static DEFINE_MUTEX(blktrans_ref_mutex);
void blktrans_dev_release(struct kref *kref)
{
struct mtd_blktrans_dev *dev =
container_of(kref, struct mtd_blktrans_dev, ref);
dev->disk->private_data = NULL;
blk_cleanup_queue(dev->rq);
put_disk(dev->disk);
list_del(&dev->list);
kfree(dev);
}
static struct mtd_blktrans_dev *blktrans_dev_get(struct gendisk *disk)
{
struct mtd_blktrans_dev *dev;
mutex_lock(&blktrans_ref_mutex);
dev = disk->private_data;
if (!dev)
goto unlock;
kref_get(&dev->ref);
unlock:
mutex_unlock(&blktrans_ref_mutex);
return dev;
}
void blktrans_dev_put(struct mtd_blktrans_dev *dev)
{
mutex_lock(&blktrans_ref_mutex);
kref_put(&dev->ref, blktrans_dev_release);
mutex_unlock(&blktrans_ref_mutex);
}
struct mtd_blkcore_priv {
struct task_struct *thread;
struct request_queue *rq;
spinlock_t queue_lock;
};
static int do_blktrans_request(struct mtd_blktrans_ops *tr,
struct mtd_blktrans_dev *dev,
@ -61,7 +90,6 @@ static int do_blktrans_request(struct mtd_blktrans_ops *tr,
return -EIO;
rq_flush_dcache_pages(req);
return 0;
case WRITE:
if (!tr->writesect)
return -EIO;
@ -71,7 +99,6 @@ static int do_blktrans_request(struct mtd_blktrans_ops *tr,
if (tr->writesect(dev, block, buf))
return -EIO;
return 0;
default:
printk(KERN_NOTICE "Unknown request %u\n", rq_data_dir(req));
return -EIO;
@ -80,14 +107,13 @@ static int do_blktrans_request(struct mtd_blktrans_ops *tr,
static int mtd_blktrans_thread(void *arg)
{
struct mtd_blktrans_ops *tr = arg;
struct request_queue *rq = tr->blkcore_priv->rq;
struct mtd_blktrans_dev *dev = arg;
struct request_queue *rq = dev->rq;
struct request *req = NULL;
spin_lock_irq(rq->queue_lock);
while (!kthread_should_stop()) {
struct mtd_blktrans_dev *dev;
int res;
if (!req && !(req = blk_fetch_request(rq))) {
@ -98,13 +124,10 @@ static int mtd_blktrans_thread(void *arg)
continue;
}
dev = req->rq_disk->private_data;
tr = dev->tr;
spin_unlock_irq(rq->queue_lock);
mutex_lock(&dev->lock);
res = do_blktrans_request(tr, dev, req);
res = do_blktrans_request(dev->tr, dev, req);
mutex_unlock(&dev->lock);
spin_lock_irq(rq->queue_lock);
@ -123,81 +146,112 @@ static int mtd_blktrans_thread(void *arg)
static void mtd_blktrans_request(struct request_queue *rq)
{
struct mtd_blktrans_ops *tr = rq->queuedata;
wake_up_process(tr->blkcore_priv->thread);
}
struct mtd_blktrans_dev *dev;
struct request *req = NULL;
dev = rq->queuedata;
if (!dev)
while ((req = blk_fetch_request(rq)) != NULL)
__blk_end_request_all(req, -ENODEV);
else
wake_up_process(dev->thread);
}
static int blktrans_open(struct block_device *bdev, fmode_t mode)
{
struct mtd_blktrans_dev *dev = bdev->bd_disk->private_data;
struct mtd_blktrans_ops *tr = dev->tr;
int ret = -ENODEV;
struct mtd_blktrans_dev *dev = blktrans_dev_get(bdev->bd_disk);
int ret;
if (!get_mtd_device(NULL, dev->mtd->index))
goto out;
if (!dev)
return -ERESTARTSYS;
if (!try_module_get(tr->owner))
goto out_tr;
mutex_lock(&dev->lock);
/* FIXME: Locking. A hot pluggable device can go away
(del_mtd_device can be called for it) without its module
being unloaded. */
dev->mtd->usecount++;
ret = 0;
if (tr->open && (ret = tr->open(dev))) {
dev->mtd->usecount--;
put_mtd_device(dev->mtd);
out_tr:
module_put(tr->owner);
if (!dev->mtd) {
ret = -ENXIO;
goto unlock;
}
out:
ret = !dev->open++ && dev->tr->open ? dev->tr->open(dev) : 0;
/* Take another reference on the device so it won't go away till
last release */
if (!ret)
kref_get(&dev->ref);
unlock:
mutex_unlock(&dev->lock);
blktrans_dev_put(dev);
return ret;
}
static int blktrans_release(struct gendisk *disk, fmode_t mode)
{
struct mtd_blktrans_dev *dev = disk->private_data;
struct mtd_blktrans_ops *tr = dev->tr;
int ret = 0;
struct mtd_blktrans_dev *dev = blktrans_dev_get(disk);
int ret = -ENXIO;
if (tr->release)
ret = tr->release(dev);
if (!dev)
return ret;
if (!ret) {
dev->mtd->usecount--;
put_mtd_device(dev->mtd);
module_put(tr->owner);
}
mutex_lock(&dev->lock);
/* Release one reference, we sure its not the last one here*/
kref_put(&dev->ref, blktrans_dev_release);
if (!dev->mtd)
goto unlock;
ret = !--dev->open && dev->tr->release ? dev->tr->release(dev) : 0;
unlock:
mutex_unlock(&dev->lock);
blktrans_dev_put(dev);
return ret;
}
static int blktrans_getgeo(struct block_device *bdev, struct hd_geometry *geo)
{
struct mtd_blktrans_dev *dev = bdev->bd_disk->private_data;
struct mtd_blktrans_dev *dev = blktrans_dev_get(bdev->bd_disk);
int ret = -ENXIO;
if (dev->tr->getgeo)
return dev->tr->getgeo(dev, geo);
return -ENOTTY;
if (!dev)
return ret;
mutex_lock(&dev->lock);
if (!dev->mtd)
goto unlock;
ret = dev->tr->getgeo ? dev->tr->getgeo(dev, geo) : 0;
unlock:
mutex_unlock(&dev->lock);
blktrans_dev_put(dev);
return ret;
}
static int blktrans_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg)
{
struct mtd_blktrans_dev *dev = bdev->bd_disk->private_data;
struct mtd_blktrans_ops *tr = dev->tr;
struct mtd_blktrans_dev *dev = blktrans_dev_get(bdev->bd_disk);
int ret = -ENXIO;
if (!dev)
return ret;
mutex_lock(&dev->lock);
if (!dev->mtd)
goto unlock;
switch (cmd) {
case BLKFLSBUF:
if (tr->flush)
return tr->flush(dev);
/* The core code did the work, we had nothing to do. */
return 0;
ret = dev->tr->flush ? dev->tr->flush(dev) : 0;
default:
return -ENOTTY;
ret = -ENOTTY;
}
unlock:
mutex_unlock(&dev->lock);
blktrans_dev_put(dev);
return ret;
}
static const struct block_device_operations mtd_blktrans_ops = {
@ -214,12 +268,14 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
struct mtd_blktrans_dev *d;
int last_devnum = -1;
struct gendisk *gd;
int ret;
if (mutex_trylock(&mtd_table_mutex)) {
mutex_unlock(&mtd_table_mutex);
BUG();
}
mutex_lock(&blktrans_ref_mutex);
list_for_each_entry(d, &tr->devs, list) {
if (new->devnum == -1) {
/* Use first free number */
@ -231,6 +287,7 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
}
} else if (d->devnum == new->devnum) {
/* Required number taken */
mutex_unlock(&blktrans_ref_mutex);
return -EBUSY;
} else if (d->devnum > new->devnum) {
/* Required number was free */
@ -239,24 +296,38 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
}
last_devnum = d->devnum;
}
ret = -EBUSY;
if (new->devnum == -1)
new->devnum = last_devnum+1;
if ((new->devnum << tr->part_bits) > 256) {
return -EBUSY;
/* Check that the device and any partitions will get valid
* minor numbers and that the disk naming code below can cope
* with this number. */
if (new->devnum > (MINORMASK >> tr->part_bits) ||
(tr->part_bits && new->devnum >= 27 * 26)) {
mutex_unlock(&blktrans_ref_mutex);
goto error1;
}
list_add_tail(&new->list, &tr->devs);
added:
mutex_unlock(&blktrans_ref_mutex);
mutex_init(&new->lock);
kref_init(&new->ref);
if (!tr->writesect)
new->readonly = 1;
/* Create gendisk */
ret = -ENOMEM;
gd = alloc_disk(1 << tr->part_bits);
if (!gd) {
list_del(&new->list);
return -ENOMEM;
}
if (!gd)
goto error2;
new->disk = gd;
gd->private_data = new;
gd->major = tr->major;
gd->first_minor = (new->devnum) << tr->part_bits;
gd->fops = &mtd_blktrans_ops;
@ -274,13 +345,35 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
snprintf(gd->disk_name, sizeof(gd->disk_name),
"%s%d", tr->name, new->devnum);
/* 2.5 has capacity in units of 512 bytes while still
having BLOCK_SIZE_BITS set to 10. Just to keep us amused. */
set_capacity(gd, (new->size * tr->blksize) >> 9);
gd->private_data = new;
new->blkcore_priv = gd;
gd->queue = tr->blkcore_priv->rq;
/* Create the request queue */
spin_lock_init(&new->queue_lock);
new->rq = blk_init_queue(mtd_blktrans_request, &new->queue_lock);
if (!new->rq)
goto error3;
new->rq->queuedata = new;
blk_queue_logical_block_size(new->rq, tr->blksize);
if (tr->discard)
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD,
new->rq);
gd->queue = new->rq;
__get_mtd_device(new->mtd);
__module_get(tr->owner);
/* Create processing thread */
/* TODO: workqueue ? */
new->thread = kthread_run(mtd_blktrans_thread, new,
"%s%d", tr->name, new->mtd->index);
if (IS_ERR(new->thread)) {
ret = PTR_ERR(new->thread);
goto error4;
}
gd->driverfs_dev = &new->mtd->dev;
if (new->readonly)
@ -288,21 +381,65 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
add_disk(gd);
if (new->disk_attributes) {
ret = sysfs_create_group(&disk_to_dev(gd)->kobj,
new->disk_attributes);
WARN_ON(ret);
}
return 0;
error4:
module_put(tr->owner);
__put_mtd_device(new->mtd);
blk_cleanup_queue(new->rq);
error3:
put_disk(new->disk);
error2:
list_del(&new->list);
error1:
kfree(new);
return ret;
}
int del_mtd_blktrans_dev(struct mtd_blktrans_dev *old)
{
unsigned long flags;
if (mutex_trylock(&mtd_table_mutex)) {
mutex_unlock(&mtd_table_mutex);
BUG();
}
list_del(&old->list);
/* Stop new requests to arrive */
del_gendisk(old->disk);
del_gendisk(old->blkcore_priv);
put_disk(old->blkcore_priv);
if (old->disk_attributes)
sysfs_remove_group(&disk_to_dev(old->disk)->kobj,
old->disk_attributes);
/* Stop the thread */
kthread_stop(old->thread);
/* Kill current requests */
spin_lock_irqsave(&old->queue_lock, flags);
old->rq->queuedata = NULL;
blk_start_queue(old->rq);
spin_unlock_irqrestore(&old->queue_lock, flags);
/* Ask trans driver for release to the mtd device */
mutex_lock(&old->lock);
if (old->open && old->tr->release) {
old->tr->release(old);
old->open = 0;
}
__put_mtd_device(old->mtd);
module_put(old->tr->owner);
/* At that point, we don't touch the mtd anymore */
old->mtd = NULL;
mutex_unlock(&old->lock);
blktrans_dev_put(old);
return 0;
}
@ -335,7 +472,8 @@ static struct mtd_notifier blktrans_notifier = {
int register_mtd_blktrans(struct mtd_blktrans_ops *tr)
{
int ret, i;
struct mtd_info *mtd;
int ret;
/* Register the notifier if/when the first device type is
registered, to prevent the link/init ordering from fucking
@ -343,9 +481,6 @@ int register_mtd_blktrans(struct mtd_blktrans_ops *tr)
if (!blktrans_notifier.list.next)
register_mtd_user(&blktrans_notifier);
tr->blkcore_priv = kzalloc(sizeof(*tr->blkcore_priv), GFP_KERNEL);
if (!tr->blkcore_priv)
return -ENOMEM;
mutex_lock(&mtd_table_mutex);
@ -353,49 +488,20 @@ int register_mtd_blktrans(struct mtd_blktrans_ops *tr)
if (ret) {
printk(KERN_WARNING "Unable to register %s block device on major %d: %d\n",
tr->name, tr->major, ret);
kfree(tr->blkcore_priv);
mutex_unlock(&mtd_table_mutex);
return ret;
}
spin_lock_init(&tr->blkcore_priv->queue_lock);
tr->blkcore_priv->rq = blk_init_queue(mtd_blktrans_request, &tr->blkcore_priv->queue_lock);
if (!tr->blkcore_priv->rq) {
unregister_blkdev(tr->major, tr->name);
kfree(tr->blkcore_priv);
mutex_unlock(&mtd_table_mutex);
return -ENOMEM;
}
tr->blkcore_priv->rq->queuedata = tr;
blk_queue_logical_block_size(tr->blkcore_priv->rq, tr->blksize);
if (tr->discard)
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD,
tr->blkcore_priv->rq);
tr->blkshift = ffs(tr->blksize) - 1;
tr->blkcore_priv->thread = kthread_run(mtd_blktrans_thread, tr,
"%sd", tr->name);
if (IS_ERR(tr->blkcore_priv->thread)) {
ret = PTR_ERR(tr->blkcore_priv->thread);
blk_cleanup_queue(tr->blkcore_priv->rq);
unregister_blkdev(tr->major, tr->name);
kfree(tr->blkcore_priv);
mutex_unlock(&mtd_table_mutex);
return ret;
}
INIT_LIST_HEAD(&tr->devs);
list_add(&tr->list, &blktrans_majors);
for (i=0; i<MAX_MTD_DEVICES; i++) {
if (mtd_table[i] && mtd_table[i]->type != MTD_ABSENT)
tr->add_mtd(tr, mtd_table[i]);
}
mtd_for_each_device(mtd)
if (mtd->type != MTD_ABSENT)
tr->add_mtd(tr, mtd);
mutex_unlock(&mtd_table_mutex);
return 0;
}
@ -405,22 +511,15 @@ int deregister_mtd_blktrans(struct mtd_blktrans_ops *tr)
mutex_lock(&mtd_table_mutex);
/* Clean up the kernel thread */
kthread_stop(tr->blkcore_priv->thread);
/* Remove it from the list of active majors */
list_del(&tr->list);
list_for_each_entry_safe(dev, next, &tr->devs, list)
tr->remove_dev(dev);
blk_cleanup_queue(tr->blkcore_priv->rq);
unregister_blkdev(tr->major, tr->name);
mutex_unlock(&mtd_table_mutex);
kfree(tr->blkcore_priv);
BUG_ON(!list_empty(&tr->devs));
return 0;
}

View File

@ -19,15 +19,15 @@
#include <linux/mutex.h>
static struct mtdblk_dev {
struct mtd_info *mtd;
struct mtdblk_dev {
struct mtd_blktrans_dev mbd;
int count;
struct mutex cache_mutex;
unsigned char *cache_data;
unsigned long cache_offset;
unsigned int cache_size;
enum { STATE_EMPTY, STATE_CLEAN, STATE_DIRTY } cache_state;
} *mtdblks[MAX_MTD_DEVICES];
};
static struct mutex mtdblks_lock;
@ -98,7 +98,7 @@ static int erase_write (struct mtd_info *mtd, unsigned long pos,
static int write_cached_data (struct mtdblk_dev *mtdblk)
{
struct mtd_info *mtd = mtdblk->mtd;
struct mtd_info *mtd = mtdblk->mbd.mtd;
int ret;
if (mtdblk->cache_state != STATE_DIRTY)
@ -128,7 +128,7 @@ static int write_cached_data (struct mtdblk_dev *mtdblk)
static int do_cached_write (struct mtdblk_dev *mtdblk, unsigned long pos,
int len, const char *buf)
{
struct mtd_info *mtd = mtdblk->mtd;
struct mtd_info *mtd = mtdblk->mbd.mtd;
unsigned int sect_size = mtdblk->cache_size;
size_t retlen;
int ret;
@ -198,7 +198,7 @@ static int do_cached_write (struct mtdblk_dev *mtdblk, unsigned long pos,
static int do_cached_read (struct mtdblk_dev *mtdblk, unsigned long pos,
int len, char *buf)
{
struct mtd_info *mtd = mtdblk->mtd;
struct mtd_info *mtd = mtdblk->mbd.mtd;
unsigned int sect_size = mtdblk->cache_size;
size_t retlen;
int ret;
@ -244,16 +244,16 @@ static int do_cached_read (struct mtdblk_dev *mtdblk, unsigned long pos,
static int mtdblock_readsect(struct mtd_blktrans_dev *dev,
unsigned long block, char *buf)
{
struct mtdblk_dev *mtdblk = mtdblks[dev->devnum];
struct mtdblk_dev *mtdblk = container_of(dev, struct mtdblk_dev, mbd);
return do_cached_read(mtdblk, block<<9, 512, buf);
}
static int mtdblock_writesect(struct mtd_blktrans_dev *dev,
unsigned long block, char *buf)
{
struct mtdblk_dev *mtdblk = mtdblks[dev->devnum];
struct mtdblk_dev *mtdblk = container_of(dev, struct mtdblk_dev, mbd);
if (unlikely(!mtdblk->cache_data && mtdblk->cache_size)) {
mtdblk->cache_data = vmalloc(mtdblk->mtd->erasesize);
mtdblk->cache_data = vmalloc(mtdblk->mbd.mtd->erasesize);
if (!mtdblk->cache_data)
return -EINTR;
/* -EINTR is not really correct, but it is the best match
@ -266,37 +266,26 @@ static int mtdblock_writesect(struct mtd_blktrans_dev *dev,
static int mtdblock_open(struct mtd_blktrans_dev *mbd)
{
struct mtdblk_dev *mtdblk;
struct mtd_info *mtd = mbd->mtd;
int dev = mbd->devnum;
struct mtdblk_dev *mtdblk = container_of(mbd, struct mtdblk_dev, mbd);
DEBUG(MTD_DEBUG_LEVEL1,"mtdblock_open\n");
mutex_lock(&mtdblks_lock);
if (mtdblks[dev]) {
mtdblks[dev]->count++;
if (mtdblk->count) {
mtdblk->count++;
mutex_unlock(&mtdblks_lock);
return 0;
}
/* OK, it's not open. Create cache info for it */
mtdblk = kzalloc(sizeof(struct mtdblk_dev), GFP_KERNEL);
if (!mtdblk) {
mutex_unlock(&mtdblks_lock);
return -ENOMEM;
}
mtdblk->count = 1;
mtdblk->mtd = mtd;
mutex_init(&mtdblk->cache_mutex);
mtdblk->cache_state = STATE_EMPTY;
if ( !(mtdblk->mtd->flags & MTD_NO_ERASE) && mtdblk->mtd->erasesize) {
mtdblk->cache_size = mtdblk->mtd->erasesize;
if (!(mbd->mtd->flags & MTD_NO_ERASE) && mbd->mtd->erasesize) {
mtdblk->cache_size = mbd->mtd->erasesize;
mtdblk->cache_data = NULL;
}
mtdblks[dev] = mtdblk;
mutex_unlock(&mtdblks_lock);
DEBUG(MTD_DEBUG_LEVEL1, "ok\n");
@ -306,8 +295,7 @@ static int mtdblock_open(struct mtd_blktrans_dev *mbd)
static int mtdblock_release(struct mtd_blktrans_dev *mbd)
{
int dev = mbd->devnum;
struct mtdblk_dev *mtdblk = mtdblks[dev];
struct mtdblk_dev *mtdblk = container_of(mbd, struct mtdblk_dev, mbd);
DEBUG(MTD_DEBUG_LEVEL1, "mtdblock_release\n");
@ -318,12 +306,10 @@ static int mtdblock_release(struct mtd_blktrans_dev *mbd)
mutex_unlock(&mtdblk->cache_mutex);
if (!--mtdblk->count) {
/* It was the last usage. Free the device */
mtdblks[dev] = NULL;
if (mtdblk->mtd->sync)
mtdblk->mtd->sync(mtdblk->mtd);
/* It was the last usage. Free the cache */
if (mbd->mtd->sync)
mbd->mtd->sync(mbd->mtd);
vfree(mtdblk->cache_data);
kfree(mtdblk);
}
mutex_unlock(&mtdblks_lock);
@ -335,40 +321,40 @@ static int mtdblock_release(struct mtd_blktrans_dev *mbd)
static int mtdblock_flush(struct mtd_blktrans_dev *dev)
{
struct mtdblk_dev *mtdblk = mtdblks[dev->devnum];
struct mtdblk_dev *mtdblk = container_of(dev, struct mtdblk_dev, mbd);
mutex_lock(&mtdblk->cache_mutex);
write_cached_data(mtdblk);
mutex_unlock(&mtdblk->cache_mutex);
if (mtdblk->mtd->sync)
mtdblk->mtd->sync(mtdblk->mtd);
if (dev->mtd->sync)
dev->mtd->sync(dev->mtd);
return 0;
}
static void mtdblock_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
{
struct mtd_blktrans_dev *dev = kzalloc(sizeof(*dev), GFP_KERNEL);
struct mtdblk_dev *dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev)
return;
dev->mtd = mtd;
dev->devnum = mtd->index;
dev->mbd.mtd = mtd;
dev->mbd.devnum = mtd->index;
dev->size = mtd->size >> 9;
dev->tr = tr;
dev->mbd.size = mtd->size >> 9;
dev->mbd.tr = tr;
if (!(mtd->flags & MTD_WRITEABLE))
dev->readonly = 1;
dev->mbd.readonly = 1;
add_mtd_blktrans_dev(dev);
if (add_mtd_blktrans_dev(&dev->mbd))
kfree(dev);
}
static void mtdblock_remove_dev(struct mtd_blktrans_dev *dev)
{
del_mtd_blktrans_dev(dev);
kfree(dev);
}
static struct mtd_blktrans_ops mtdblock_tr = {

View File

@ -43,13 +43,13 @@ static void mtdblock_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
dev->tr = tr;
dev->readonly = 1;
add_mtd_blktrans_dev(dev);
if (add_mtd_blktrans_dev(dev))
kfree(dev);
}
static void mtdblock_remove_dev(struct mtd_blktrans_dev *dev)
{
del_mtd_blktrans_dev(dev);
kfree(dev);
}
static struct mtd_blktrans_ops mtdblock_tr = {

View File

@ -15,12 +15,15 @@
#include <linux/smp_lock.h>
#include <linux/backing-dev.h>
#include <linux/compat.h>
#include <linux/mount.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/compatmac.h>
#include <asm/uaccess.h>
#define MTD_INODE_FS_MAGIC 0x11307854
static struct vfsmount *mtd_inode_mnt __read_mostly;
/*
* Data structure to hold the pointer to the mtd device as well
@ -28,6 +31,7 @@
*/
struct mtd_file_info {
struct mtd_info *mtd;
struct inode *ino;
enum mtd_file_modes mode;
};
@ -64,12 +68,10 @@ static int mtd_open(struct inode *inode, struct file *file)
int ret = 0;
struct mtd_info *mtd;
struct mtd_file_info *mfi;
struct inode *mtd_ino;
DEBUG(MTD_DEBUG_LEVEL0, "MTD_open\n");
if (devnum >= MAX_MTD_DEVICES)
return -ENODEV;
/* You can't open the RO devices RW */
if ((file->f_mode & FMODE_WRITE) && (minor & 1))
return -EACCES;
@ -88,11 +90,23 @@ static int mtd_open(struct inode *inode, struct file *file)
goto out;
}
if (mtd->backing_dev_info)
file->f_mapping->backing_dev_info = mtd->backing_dev_info;
mtd_ino = iget_locked(mtd_inode_mnt->mnt_sb, devnum);
if (!mtd_ino) {
put_mtd_device(mtd);
ret = -ENOMEM;
goto out;
}
if (mtd_ino->i_state & I_NEW) {
mtd_ino->i_private = mtd;
mtd_ino->i_mode = S_IFCHR;
mtd_ino->i_data.backing_dev_info = mtd->backing_dev_info;
unlock_new_inode(mtd_ino);
}
file->f_mapping = mtd_ino->i_mapping;
/* You can't open it RW if it's not a writeable device */
if ((file->f_mode & FMODE_WRITE) && !(mtd->flags & MTD_WRITEABLE)) {
iput(mtd_ino);
put_mtd_device(mtd);
ret = -EACCES;
goto out;
@ -100,10 +114,12 @@ static int mtd_open(struct inode *inode, struct file *file)
mfi = kzalloc(sizeof(*mfi), GFP_KERNEL);
if (!mfi) {
iput(mtd_ino);
put_mtd_device(mtd);
ret = -ENOMEM;
goto out;
}
mfi->ino = mtd_ino;
mfi->mtd = mtd;
file->private_data = mfi;
@ -125,6 +141,8 @@ static int mtd_close(struct inode *inode, struct file *file)
if ((file->f_mode & FMODE_WRITE) && mtd->sync)
mtd->sync(mtd);
iput(mfi->ino);
put_mtd_device(mtd);
file->private_data = NULL;
kfree(mfi);
@ -373,7 +391,7 @@ static int mtd_do_writeoob(struct file *file, struct mtd_info *mtd,
if (!mtd->write_oob)
ret = -EOPNOTSUPP;
else
ret = access_ok(VERIFY_READ, ptr, length) ? 0 : EFAULT;
ret = access_ok(VERIFY_READ, ptr, length) ? 0 : -EFAULT;
if (ret)
return ret;
@ -482,7 +500,7 @@ static int mtd_ioctl(struct inode *inode, struct file *file,
{
uint32_t ur_idx;
struct mtd_erase_region_info *kr;
struct region_info_user *ur = (struct region_info_user *) argp;
struct region_info_user __user *ur = argp;
if (get_user(ur_idx, &(ur->regionindex)))
return -EFAULT;
@ -954,22 +972,81 @@ static const struct file_operations mtd_fops = {
#endif
};
static int mtd_inodefs_get_sb(struct file_system_type *fs_type, int flags,
const char *dev_name, void *data,
struct vfsmount *mnt)
{
return get_sb_pseudo(fs_type, "mtd_inode:", NULL, MTD_INODE_FS_MAGIC,
mnt);
}
static struct file_system_type mtd_inodefs_type = {
.name = "mtd_inodefs",
.get_sb = mtd_inodefs_get_sb,
.kill_sb = kill_anon_super,
};
static void mtdchar_notify_add(struct mtd_info *mtd)
{
}
static void mtdchar_notify_remove(struct mtd_info *mtd)
{
struct inode *mtd_ino = ilookup(mtd_inode_mnt->mnt_sb, mtd->index);
if (mtd_ino) {
/* Destroy the inode if it exists */
mtd_ino->i_nlink = 0;
iput(mtd_ino);
}
}
static struct mtd_notifier mtdchar_notifier = {
.add = mtdchar_notify_add,
.remove = mtdchar_notify_remove,
};
static int __init init_mtdchar(void)
{
int status;
int ret;
status = register_chrdev(MTD_CHAR_MAJOR, "mtd", &mtd_fops);
if (status < 0) {
printk(KERN_NOTICE "Can't allocate major number %d for Memory Technology Devices.\n",
MTD_CHAR_MAJOR);
ret = __register_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS,
"mtd", &mtd_fops);
if (ret < 0) {
pr_notice("Can't allocate major number %d for "
"Memory Technology Devices.\n", MTD_CHAR_MAJOR);
return ret;
}
return status;
ret = register_filesystem(&mtd_inodefs_type);
if (ret) {
pr_notice("Can't register mtd_inodefs filesystem: %d\n", ret);
goto err_unregister_chdev;
}
mtd_inode_mnt = kern_mount(&mtd_inodefs_type);
if (IS_ERR(mtd_inode_mnt)) {
ret = PTR_ERR(mtd_inode_mnt);
pr_notice("Error mounting mtd_inodefs filesystem: %d\n", ret);
goto err_unregister_filesystem;
}
register_mtd_user(&mtdchar_notifier);
return ret;
err_unregister_filesystem:
unregister_filesystem(&mtd_inodefs_type);
err_unregister_chdev:
__unregister_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS, "mtd");
return ret;
}
static void __exit cleanup_mtdchar(void)
{
unregister_chrdev(MTD_CHAR_MAJOR, "mtd");
unregister_mtd_user(&mtdchar_notifier);
mntput(mtd_inode_mnt);
unregister_filesystem(&mtd_inodefs_type);
__unregister_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS, "mtd");
}
module_init(init_mtdchar);

View File

@ -183,10 +183,9 @@ concat_writev(struct mtd_info *mtd, const struct kvec *vecs,
}
/* make a copy of vecs */
vecs_copy = kmalloc(sizeof(struct kvec) * count, GFP_KERNEL);
vecs_copy = kmemdup(vecs, sizeof(struct kvec) * count, GFP_KERNEL);
if (!vecs_copy)
return -ENOMEM;
memcpy(vecs_copy, vecs, sizeof(struct kvec) * count);
entry_low = 0;
for (i = 0; i < concat->num_subdev; i++) {

View File

@ -19,7 +19,9 @@
#include <linux/init.h>
#include <linux/mtd/compatmac.h>
#include <linux/proc_fs.h>
#include <linux/idr.h>
#include <linux/backing-dev.h>
#include <linux/gfp.h>
#include <linux/mtd/mtd.h>
@ -63,13 +65,18 @@ static struct class mtd_class = {
.resume = mtd_cls_resume,
};
static DEFINE_IDR(mtd_idr);
/* These are exported solely for the purpose of mtd_blkdevs.c. You
should not use them for _anything_ else */
DEFINE_MUTEX(mtd_table_mutex);
struct mtd_info *mtd_table[MAX_MTD_DEVICES];
EXPORT_SYMBOL_GPL(mtd_table_mutex);
EXPORT_SYMBOL_GPL(mtd_table);
struct mtd_info *__mtd_next_device(int i)
{
return idr_get_next(&mtd_idr, &i);
}
EXPORT_SYMBOL_GPL(__mtd_next_device);
static LIST_HEAD(mtd_notifiers);
@ -265,13 +272,13 @@ static struct device_type mtd_devtype = {
* Add a device to the list of MTD devices present in the system, and
* notify each currently active MTD 'user' of its arrival. Returns
* zero on success or 1 on failure, which currently will only happen
* if the number of present devices exceeds MAX_MTD_DEVICES (i.e. 16)
* or there's a sysfs error.
* if there is insufficient memory or a sysfs error.
*/
int add_mtd_device(struct mtd_info *mtd)
{
int i;
struct mtd_notifier *not;
int i, error;
if (!mtd->backing_dev_info) {
switch (mtd->type) {
@ -290,70 +297,73 @@ int add_mtd_device(struct mtd_info *mtd)
BUG_ON(mtd->writesize == 0);
mutex_lock(&mtd_table_mutex);
for (i=0; i < MAX_MTD_DEVICES; i++)
if (!mtd_table[i]) {
struct mtd_notifier *not;
do {
if (!idr_pre_get(&mtd_idr, GFP_KERNEL))
goto fail_locked;
error = idr_get_new(&mtd_idr, mtd, &i);
} while (error == -EAGAIN);
mtd_table[i] = mtd;
mtd->index = i;
mtd->usecount = 0;
if (error)
goto fail_locked;
if (is_power_of_2(mtd->erasesize))
mtd->erasesize_shift = ffs(mtd->erasesize) - 1;
else
mtd->erasesize_shift = 0;
mtd->index = i;
mtd->usecount = 0;
if (is_power_of_2(mtd->writesize))
mtd->writesize_shift = ffs(mtd->writesize) - 1;
else
mtd->writesize_shift = 0;
if (is_power_of_2(mtd->erasesize))
mtd->erasesize_shift = ffs(mtd->erasesize) - 1;
else
mtd->erasesize_shift = 0;
mtd->erasesize_mask = (1 << mtd->erasesize_shift) - 1;
mtd->writesize_mask = (1 << mtd->writesize_shift) - 1;
if (is_power_of_2(mtd->writesize))
mtd->writesize_shift = ffs(mtd->writesize) - 1;
else
mtd->writesize_shift = 0;
/* Some chips always power up locked. Unlock them now */
if ((mtd->flags & MTD_WRITEABLE)
&& (mtd->flags & MTD_POWERUP_LOCK) && mtd->unlock) {
if (mtd->unlock(mtd, 0, mtd->size))
printk(KERN_WARNING
"%s: unlock failed, "
"writes may not work\n",
mtd->name);
}
mtd->erasesize_mask = (1 << mtd->erasesize_shift) - 1;
mtd->writesize_mask = (1 << mtd->writesize_shift) - 1;
/* Caller should have set dev.parent to match the
* physical device.
*/
mtd->dev.type = &mtd_devtype;
mtd->dev.class = &mtd_class;
mtd->dev.devt = MTD_DEVT(i);
dev_set_name(&mtd->dev, "mtd%d", i);
dev_set_drvdata(&mtd->dev, mtd);
if (device_register(&mtd->dev) != 0) {
mtd_table[i] = NULL;
break;
}
/* Some chips always power up locked. Unlock them now */
if ((mtd->flags & MTD_WRITEABLE)
&& (mtd->flags & MTD_POWERUP_LOCK) && mtd->unlock) {
if (mtd->unlock(mtd, 0, mtd->size))
printk(KERN_WARNING
"%s: unlock failed, writes may not work\n",
mtd->name);
}
if (MTD_DEVT(i))
device_create(&mtd_class, mtd->dev.parent,
MTD_DEVT(i) + 1,
NULL, "mtd%dro", i);
/* Caller should have set dev.parent to match the
* physical device.
*/
mtd->dev.type = &mtd_devtype;
mtd->dev.class = &mtd_class;
mtd->dev.devt = MTD_DEVT(i);
dev_set_name(&mtd->dev, "mtd%d", i);
dev_set_drvdata(&mtd->dev, mtd);
if (device_register(&mtd->dev) != 0)
goto fail_added;
DEBUG(0, "mtd: Giving out device %d to %s\n",i, mtd->name);
/* No need to get a refcount on the module containing
the notifier, since we hold the mtd_table_mutex */
list_for_each_entry(not, &mtd_notifiers, list)
not->add(mtd);
if (MTD_DEVT(i))
device_create(&mtd_class, mtd->dev.parent,
MTD_DEVT(i) + 1,
NULL, "mtd%dro", i);
mutex_unlock(&mtd_table_mutex);
/* We _know_ we aren't being removed, because
our caller is still holding us here. So none
of this try_ nonsense, and no bitching about it
either. :) */
__module_get(THIS_MODULE);
return 0;
}
DEBUG(0, "mtd: Giving out device %d to %s\n", i, mtd->name);
/* No need to get a refcount on the module containing
the notifier, since we hold the mtd_table_mutex */
list_for_each_entry(not, &mtd_notifiers, list)
not->add(mtd);
mutex_unlock(&mtd_table_mutex);
/* We _know_ we aren't being removed, because
our caller is still holding us here. So none
of this try_ nonsense, and no bitching about it
either. :) */
__module_get(THIS_MODULE);
return 0;
fail_added:
idr_remove(&mtd_idr, i);
fail_locked:
mutex_unlock(&mtd_table_mutex);
return 1;
}
@ -371,31 +381,34 @@ int add_mtd_device(struct mtd_info *mtd)
int del_mtd_device (struct mtd_info *mtd)
{
int ret;
struct mtd_notifier *not;
mutex_lock(&mtd_table_mutex);
if (mtd_table[mtd->index] != mtd) {
if (idr_find(&mtd_idr, mtd->index) != mtd) {
ret = -ENODEV;
} else if (mtd->usecount) {
goto out_error;
}
/* No need to get a refcount on the module containing
the notifier, since we hold the mtd_table_mutex */
list_for_each_entry(not, &mtd_notifiers, list)
not->remove(mtd);
if (mtd->usecount) {
printk(KERN_NOTICE "Removing MTD device #%d (%s) with use count %d\n",
mtd->index, mtd->name, mtd->usecount);
ret = -EBUSY;
} else {
struct mtd_notifier *not;
device_unregister(&mtd->dev);
/* No need to get a refcount on the module containing
the notifier, since we hold the mtd_table_mutex */
list_for_each_entry(not, &mtd_notifiers, list)
not->remove(mtd);
mtd_table[mtd->index] = NULL;
idr_remove(&mtd_idr, mtd->index);
module_put(THIS_MODULE);
ret = 0;
}
out_error:
mutex_unlock(&mtd_table_mutex);
return ret;
}
@ -411,7 +424,7 @@ int del_mtd_device (struct mtd_info *mtd)
void register_mtd_user (struct mtd_notifier *new)
{
int i;
struct mtd_info *mtd;
mutex_lock(&mtd_table_mutex);
@ -419,9 +432,8 @@ void register_mtd_user (struct mtd_notifier *new)
__module_get(THIS_MODULE);
for (i=0; i< MAX_MTD_DEVICES; i++)
if (mtd_table[i])
new->add(mtd_table[i]);
mtd_for_each_device(mtd)
new->add(mtd);
mutex_unlock(&mtd_table_mutex);
}
@ -438,15 +450,14 @@ void register_mtd_user (struct mtd_notifier *new)
int unregister_mtd_user (struct mtd_notifier *old)
{
int i;
struct mtd_info *mtd;
mutex_lock(&mtd_table_mutex);
module_put(THIS_MODULE);
for (i=0; i< MAX_MTD_DEVICES; i++)
if (mtd_table[i])
old->remove(mtd_table[i]);
mtd_for_each_device(mtd)
old->remove(mtd);
list_del(&old->list);
mutex_unlock(&mtd_table_mutex);
@ -468,42 +479,56 @@ int unregister_mtd_user (struct mtd_notifier *old)
struct mtd_info *get_mtd_device(struct mtd_info *mtd, int num)
{
struct mtd_info *ret = NULL;
int i, err = -ENODEV;
struct mtd_info *ret = NULL, *other;
int err = -ENODEV;
mutex_lock(&mtd_table_mutex);
if (num == -1) {
for (i=0; i< MAX_MTD_DEVICES; i++)
if (mtd_table[i] == mtd)
ret = mtd_table[i];
} else if (num >= 0 && num < MAX_MTD_DEVICES) {
ret = mtd_table[num];
mtd_for_each_device(other) {
if (other == mtd) {
ret = mtd;
break;
}
}
} else if (num >= 0) {
ret = idr_find(&mtd_idr, num);
if (mtd && mtd != ret)
ret = NULL;
}
if (!ret)
goto out_unlock;
if (!try_module_get(ret->owner))
goto out_unlock;
if (ret->get_device) {
err = ret->get_device(ret);
if (err)
goto out_put;
if (!ret) {
ret = ERR_PTR(err);
goto out;
}
ret->usecount++;
err = __get_mtd_device(ret);
if (err)
ret = ERR_PTR(err);
out:
mutex_unlock(&mtd_table_mutex);
return ret;
}
out_put:
module_put(ret->owner);
out_unlock:
mutex_unlock(&mtd_table_mutex);
return ERR_PTR(err);
int __get_mtd_device(struct mtd_info *mtd)
{
int err;
if (!try_module_get(mtd->owner))
return -ENODEV;
if (mtd->get_device) {
err = mtd->get_device(mtd);
if (err) {
module_put(mtd->owner);
return err;
}
}
mtd->usecount++;
return 0;
}
/**
@ -517,14 +542,14 @@ out_unlock:
struct mtd_info *get_mtd_device_nm(const char *name)
{
int i, err = -ENODEV;
struct mtd_info *mtd = NULL;
int err = -ENODEV;
struct mtd_info *mtd = NULL, *other;
mutex_lock(&mtd_table_mutex);
for (i = 0; i < MAX_MTD_DEVICES; i++) {
if (mtd_table[i] && !strcmp(name, mtd_table[i]->name)) {
mtd = mtd_table[i];
mtd_for_each_device(other) {
if (!strcmp(name, other->name)) {
mtd = other;
break;
}
}
@ -554,14 +579,19 @@ out_unlock:
void put_mtd_device(struct mtd_info *mtd)
{
int c;
mutex_lock(&mtd_table_mutex);
c = --mtd->usecount;
__put_mtd_device(mtd);
mutex_unlock(&mtd_table_mutex);
}
void __put_mtd_device(struct mtd_info *mtd)
{
--mtd->usecount;
BUG_ON(mtd->usecount < 0);
if (mtd->put_device)
mtd->put_device(mtd);
mutex_unlock(&mtd_table_mutex);
BUG_ON(c < 0);
module_put(mtd->owner);
}
@ -599,7 +629,9 @@ EXPORT_SYMBOL_GPL(add_mtd_device);
EXPORT_SYMBOL_GPL(del_mtd_device);
EXPORT_SYMBOL_GPL(get_mtd_device);
EXPORT_SYMBOL_GPL(get_mtd_device_nm);
EXPORT_SYMBOL_GPL(__get_mtd_device);
EXPORT_SYMBOL_GPL(put_mtd_device);
EXPORT_SYMBOL_GPL(__put_mtd_device);
EXPORT_SYMBOL_GPL(register_mtd_user);
EXPORT_SYMBOL_GPL(unregister_mtd_user);
EXPORT_SYMBOL_GPL(default_mtd_writev);
@ -611,14 +643,9 @@ EXPORT_SYMBOL_GPL(default_mtd_writev);
static struct proc_dir_entry *proc_mtd;
static inline int mtd_proc_info (char *buf, int i)
static inline int mtd_proc_info(char *buf, struct mtd_info *this)
{
struct mtd_info *this = mtd_table[i];
if (!this)
return 0;
return sprintf(buf, "mtd%d: %8.8llx %8.8x \"%s\"\n", i,
return sprintf(buf, "mtd%d: %8.8llx %8.8x \"%s\"\n", this->index,
(unsigned long long)this->size,
this->erasesize, this->name);
}
@ -626,15 +653,15 @@ static inline int mtd_proc_info (char *buf, int i)
static int mtd_read_proc (char *page, char **start, off_t off, int count,
int *eof, void *data_unused)
{
int len, l, i;
struct mtd_info *mtd;
int len, l;
off_t begin = 0;
mutex_lock(&mtd_table_mutex);
len = sprintf(page, "dev: size erasesize name\n");
for (i=0; i< MAX_MTD_DEVICES; i++) {
l = mtd_proc_info(page + len, i);
mtd_for_each_device(mtd) {
l = mtd_proc_info(page + len, mtd);
len += l;
if (len+begin > off+count)
goto done;

View File

@ -8,4 +8,9 @@
should not use them for _anything_ else */
extern struct mutex mtd_table_mutex;
extern struct mtd_info *mtd_table[MAX_MTD_DEVICES];
extern struct mtd_info *__mtd_next_device(int i);
#define mtd_for_each_device(mtd) \
for ((mtd) = __mtd_next_device(0); \
(mtd) != NULL; \
(mtd) = __mtd_next_device(mtd->index + 1))

View File

@ -429,11 +429,6 @@ static int __init mtdoops_init(void)
mtd_index = simple_strtoul(mtddev, &endp, 0);
if (*endp == '\0')
cxt->mtd_index = mtd_index;
if (cxt->mtd_index > MAX_MTD_DEVICES) {
printk(KERN_ERR "mtdoops: invalid mtd device number (%u) given\n",
mtd_index);
return -EINVAL;
}
cxt->oops_buf = vmalloc(record_size);
if (!cxt->oops_buf) {

View File

@ -152,18 +152,12 @@ int get_sb_mtd(struct file_system_type *fs_type, int flags,
DEBUG(1, "MTDSB: mtd:%%s, name \"%s\"\n",
dev_name + 4);
for (mtdnr = 0; mtdnr < MAX_MTD_DEVICES; mtdnr++) {
mtd = get_mtd_device(NULL, mtdnr);
if (!IS_ERR(mtd)) {
if (!strcmp(mtd->name, dev_name + 4))
return get_sb_mtd_aux(
fs_type, flags,
dev_name, data, mtd,
fill_super, mnt);
put_mtd_device(mtd);
}
}
mtd = get_mtd_device_nm(dev_name + 4);
if (!IS_ERR(mtd))
return get_sb_mtd_aux(
fs_type, flags,
dev_name, data, mtd,
fill_super, mnt);
printk(KERN_NOTICE "MTD:"
" MTD device with name \"%s\" not found.\n",

View File

@ -2,11 +2,23 @@ menuconfig MTD_NAND
tristate "NAND Device Support"
depends on MTD
select MTD_NAND_IDS
select MTD_NAND_ECC
help
This enables support for accessing all type of NAND flash
devices. For further information see
<http://www.linux-mtd.infradead.org/doc/nand.html>.
config MTD_NAND_ECC
tristate
config MTD_NAND_ECC_SMC
bool "NAND ECC Smart Media byte order"
depends on MTD_NAND_ECC
default n
help
Software ECC according to the Smart Media Specification.
The original Linux implementation had byte 0 and 1 swapped.
if MTD_NAND
config MTD_NAND_VERIFY_WRITE
@ -18,12 +30,9 @@ config MTD_NAND_VERIFY_WRITE
device thinks the write was successful, a bit could have been
flipped accidentally due to device wear or something else.
config MTD_NAND_ECC_SMC
bool "NAND ECC Smart Media byte order"
config MTD_SM_COMMON
tristate
default n
help
Software ECC according to the Smart Media Specification.
The original Linux implementation had byte 0 and 1 swapped.
config MTD_NAND_MUSEUM_IDS
bool "Enable chip ids for obsolete ancient NAND devices"
@ -41,6 +50,23 @@ config MTD_NAND_AUTCPU12
This enables the driver for the autronix autcpu12 board to
access the SmartMediaCard.
config MTD_NAND_DENALI
depends on PCI
tristate "Support Denali NAND controller on Intel Moorestown"
help
Enable the driver for NAND flash on Intel Moorestown, using the
Denali NAND controller core.
config MTD_NAND_DENALI_SCRATCH_REG_ADDR
hex "Denali NAND size scratch register address"
default "0xFF108018"
help
Some platforms place the NAND chip size in a scratch register
because (some versions of) the driver aren't able to automatically
determine the size of certain chips. Set the address of the
scratch register here to enable this feature. On Intel Moorestown
boards, the scratch register is at 0xFF108018.
config MTD_NAND_EDB7312
tristate "Support for Cirrus Logic EBD7312 evaluation board"
depends on ARCH_EDB7312
@ -95,15 +121,21 @@ config MTD_NAND_OMAP_PREFETCH_DMA
or in DMA interrupt mode.
Say y for DMA mode or MPU mode will be used
config MTD_NAND_TS7250
tristate "NAND Flash device on TS-7250 board"
depends on MACH_TS72XX
help
Support for NAND flash on Technologic Systems TS-7250 platform.
config MTD_NAND_IDS
tristate
config MTD_NAND_RICOH
tristate "Ricoh xD card reader"
default n
depends on PCI
select MTD_SM_COMMON
help
Enable support for Ricoh R5C852 xD card reader
You also need to enable ether
NAND SSFDC (SmartMedia) read only translation layer' or new
expermental, readwrite
'SmartMedia/xD new translation layer'
config MTD_NAND_AU1550
tristate "Au1550/1200 NAND support"
depends on SOC_AU1200 || SOC_AU1550
@ -358,8 +390,6 @@ config MTD_NAND_ATMEL_ECC_NONE
If unsure, say N
endchoice
endchoice
config MTD_NAND_PXA3xx
@ -442,6 +472,13 @@ config MTD_NAND_FSL_UPM
Enables support for NAND Flash chips wired onto Freescale PowerPC
processor localbus with User-Programmable Machine support.
config MTD_NAND_MPC5121_NFC
tristate "MPC5121 built-in NAND Flash Controller support"
depends on PPC_MPC512x
help
This enables the driver for the NAND flash controller on the
MPC5121 SoC.
config MTD_NAND_MXC
tristate "MXC NAND support"
depends on ARCH_MX2 || ARCH_MX25 || ARCH_MX3
@ -481,11 +518,11 @@ config MTD_NAND_SOCRATES
help
Enables support for NAND Flash chips wired onto Socrates board.
config MTD_NAND_W90P910
tristate "Support for NAND on w90p910 evaluation board."
config MTD_NAND_NUC900
tristate "Support for NAND on Nuvoton NUC9xx/w90p910 evaluation boards."
depends on ARCH_W90X900 && MTD_PARTITIONS
help
This enables the driver for the NAND Flash on evaluation board based
on w90p910.
on w90p910 / NUC9xx.
endif # MTD_NAND

View File

@ -2,13 +2,16 @@
# linux/drivers/nand/Makefile
#
obj-$(CONFIG_MTD_NAND) += nand.o nand_ecc.o
obj-$(CONFIG_MTD_NAND) += nand.o
obj-$(CONFIG_MTD_NAND_ECC) += nand_ecc.o
obj-$(CONFIG_MTD_NAND_IDS) += nand_ids.o
obj-$(CONFIG_MTD_SM_COMMON) += sm_common.o
obj-$(CONFIG_MTD_NAND_CAFE) += cafe_nand.o
obj-$(CONFIG_MTD_NAND_SPIA) += spia.o
obj-$(CONFIG_MTD_NAND_AMS_DELTA) += ams-delta.o
obj-$(CONFIG_MTD_NAND_AUTCPU12) += autcpu12.o
obj-$(CONFIG_MTD_NAND_DENALI) += denali.o
obj-$(CONFIG_MTD_NAND_EDB7312) += edb7312.o
obj-$(CONFIG_MTD_NAND_AU1550) += au1550nd.o
obj-$(CONFIG_MTD_NAND_BF5XX) += bf5xx_nand.o
@ -19,7 +22,6 @@ obj-$(CONFIG_MTD_NAND_DISKONCHIP) += diskonchip.o
obj-$(CONFIG_MTD_NAND_H1900) += h1910.o
obj-$(CONFIG_MTD_NAND_RTC_FROM4) += rtc_from4.o
obj-$(CONFIG_MTD_NAND_SHARPSL) += sharpsl.o
obj-$(CONFIG_MTD_NAND_TS7250) += ts7250.o
obj-$(CONFIG_MTD_NAND_NANDSIM) += nandsim.o
obj-$(CONFIG_MTD_NAND_CS553X) += cs553x_nand.o
obj-$(CONFIG_MTD_NAND_NDFC) += ndfc.o
@ -39,8 +41,10 @@ obj-$(CONFIG_MTD_NAND_SH_FLCTL) += sh_flctl.o
obj-$(CONFIG_MTD_NAND_MXC) += mxc_nand.o
obj-$(CONFIG_MTD_NAND_SOCRATES) += socrates_nand.o
obj-$(CONFIG_MTD_NAND_TXX9NDFMC) += txx9ndfmc.o
obj-$(CONFIG_MTD_NAND_W90P910) += w90p910_nand.o
obj-$(CONFIG_MTD_NAND_NUC900) += nuc900_nand.o
obj-$(CONFIG_MTD_NAND_NOMADIK) += nomadik_nand.o
obj-$(CONFIG_MTD_NAND_BCM_UMI) += bcm_umi_nand.o nand_bcm_umi.o
obj-$(CONFIG_MTD_NAND_MPC5121_NFC) += mpc5121_nfc.o
obj-$(CONFIG_MTD_NAND_RICOH) += r852.o
nand-objs := nand_base.o nand_bbt.o

View File

@ -49,7 +49,7 @@
#define TIMEOUT HZ
static struct usb_device_id alauda_table [] = {
static const struct usb_device_id alauda_table[] = {
{ USB_DEVICE(0x0584, 0x0008) }, /* Fujifilm DPC-R1 */
{ USB_DEVICE(0x07b4, 0x010a) }, /* Olympus MAUSB-10 */
{ }

View File

@ -474,7 +474,7 @@ static int __init atmel_nand_probe(struct platform_device *pdev)
}
/* first scan to find the device and get the page size */
if (nand_scan_ident(mtd, 1)) {
if (nand_scan_ident(mtd, 1, NULL)) {
res = -ENXIO;
goto err_scan_ident;
}

View File

@ -451,7 +451,7 @@ static int __init au1xxx_nand_init(void)
u32 nand_phys;
/* Allocate memory for MTD device structure and private data */
au1550_mtd = kmalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip), GFP_KERNEL);
au1550_mtd = kzalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip), GFP_KERNEL);
if (!au1550_mtd) {
printk("Unable to allocate NAND MTD dev structure.\n");
return -ENOMEM;
@ -460,10 +460,6 @@ static int __init au1xxx_nand_init(void)
/* Get pointer to private data */
this = (struct nand_chip *)(&au1550_mtd[1]);
/* Initialize structures */
memset(au1550_mtd, 0, sizeof(struct mtd_info));
memset(this, 0, sizeof(struct nand_chip));
/* Link the private data with the MTD structure */
au1550_mtd->priv = this;
au1550_mtd->owner = THIS_MODULE;
@ -544,7 +540,7 @@ static int __init au1xxx_nand_init(void)
}
nand_phys = (mem_staddr << 4) & 0xFFFC0000;
p_nand = (void __iomem *)ioremap(nand_phys, 0x1000);
p_nand = ioremap(nand_phys, 0x1000);
/* make controller and MTD agree */
if (NAND_CS == 0)
@ -589,7 +585,7 @@ static int __init au1xxx_nand_init(void)
return 0;
outio:
iounmap((void *)p_nand);
iounmap(p_nand);
outmem:
kfree(au1550_mtd);
@ -610,7 +606,7 @@ static void __exit au1550_cleanup(void)
kfree(au1550_mtd);
/* Unmap */
iounmap((void *)p_nand);
iounmap(p_nand);
}
module_exit(au1550_cleanup);

View File

@ -13,7 +13,6 @@
*****************************************************************************/
/* ---- Include Files ---------------------------------------------------- */
#include <linux/version.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/init.h>
@ -447,7 +446,7 @@ static int __devinit bcm_umi_nand_probe(struct platform_device *pdev)
* layout we'll be using.
*/
err = nand_scan_ident(board_mtd, 1);
err = nand_scan_ident(board_mtd, 1, NULL);
if (err) {
printk(KERN_ERR "nand_scan failed: %d\n", err);
iounmap(bcm_umi_io_base);

View File

@ -68,6 +68,27 @@
#define DRV_AUTHOR "Bryan Wu <bryan.wu@analog.com>"
#define DRV_DESC "BF5xx on-chip NAND FLash Controller Driver"
/* NFC_STAT Masks */
#define NBUSY 0x01 /* Not Busy */
#define WB_FULL 0x02 /* Write Buffer Full */
#define PG_WR_STAT 0x04 /* Page Write Pending */
#define PG_RD_STAT 0x08 /* Page Read Pending */
#define WB_EMPTY 0x10 /* Write Buffer Empty */
/* NFC_IRQSTAT Masks */
#define NBUSYIRQ 0x01 /* Not Busy IRQ */
#define WB_OVF 0x02 /* Write Buffer Overflow */
#define WB_EDGE 0x04 /* Write Buffer Edge Detect */
#define RD_RDY 0x08 /* Read Data Ready */
#define WR_DONE 0x10 /* Page Write Done */
/* NFC_RST Masks */
#define ECC_RST 0x01 /* ECC (and NFC counters) Reset */
/* NFC_PGCTL Masks */
#define PG_RD_START 0x01 /* Page Read Start */
#define PG_WR_START 0x02 /* Page Write Start */
#ifdef CONFIG_MTD_NAND_BF5XX_HWECC
static int hardware_ecc = 1;
#else
@ -487,7 +508,7 @@ static void bf5xx_nand_dma_rw(struct mtd_info *mtd,
* transferred to generate the correct ECC register
* values.
*/
bfin_write_NFC_RST(0x1);
bfin_write_NFC_RST(ECC_RST);
SSYNC();
disable_dma(CH_NFC);
@ -497,7 +518,7 @@ static void bf5xx_nand_dma_rw(struct mtd_info *mtd,
set_dma_config(CH_NFC, 0x0);
set_dma_start_addr(CH_NFC, (unsigned long) buf);
/* The DMAs have different size on BF52x and BF54x */
/* The DMAs have different size on BF52x and BF54x */
#ifdef CONFIG_BF52x
set_dma_x_count(CH_NFC, (page_size >> 1));
set_dma_x_modify(CH_NFC, 2);
@ -517,9 +538,9 @@ static void bf5xx_nand_dma_rw(struct mtd_info *mtd,
/* Start PAGE read/write operation */
if (is_read)
bfin_write_NFC_PGCTL(0x1);
bfin_write_NFC_PGCTL(PG_RD_START);
else
bfin_write_NFC_PGCTL(0x2);
bfin_write_NFC_PGCTL(PG_WR_START);
wait_for_completion(&info->dma_completion);
}

View File

@ -762,7 +762,7 @@ static int __devinit cafe_nand_probe(struct pci_dev *pdev,
cafe_readl(cafe, GLOBAL_CTRL), cafe_readl(cafe, GLOBAL_IRQ_MASK));
/* Scan to find existence of the device */
if (nand_scan_ident(mtd, 2)) {
if (nand_scan_ident(mtd, 2, NULL)) {
err = -ENXIO;
goto out_irq;
}
@ -849,7 +849,7 @@ static void __devexit cafe_nand_remove(struct pci_dev *pdev)
kfree(mtd);
}
static struct pci_device_id cafe_nand_tbl[] = {
static const struct pci_device_id cafe_nand_tbl[] = {
{ PCI_VENDOR_ID_MARVELL, PCI_DEVICE_ID_MARVELL_88ALP01_NAND,
PCI_ANY_ID, PCI_ANY_ID },
{ }

View File

@ -567,8 +567,8 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
goto err_nomem;
}
vaddr = ioremap(res1->start, res1->end - res1->start);
base = ioremap(res2->start, res2->end - res2->start);
vaddr = ioremap(res1->start, resource_size(res1));
base = ioremap(res2->start, resource_size(res2));
if (!vaddr || !base) {
dev_err(&pdev->dev, "ioremap failed\n");
ret = -EINVAL;
@ -691,7 +691,7 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
spin_unlock_irq(&davinci_nand_lock);
/* Scan to find existence of the device(s) */
ret = nand_scan_ident(&info->mtd, pdata->mask_chipsel ? 2 : 1);
ret = nand_scan_ident(&info->mtd, pdata->mask_chipsel ? 2 : 1, NULL);
if (ret < 0) {
dev_dbg(&pdev->dev, "no NAND chip(s) found\n");
goto err_scan;

2134
drivers/mtd/nand/denali.c Normal file

File diff suppressed because it is too large Load Diff

816
drivers/mtd/nand/denali.h Normal file
View File

@ -0,0 +1,816 @@
/*
* NAND Flash Controller Device Driver
* Copyright (c) 2009 - 2010, Intel Corporation and its suppliers.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*
*/
#include <linux/mtd/nand.h>
#define DEVICE_RESET 0x0
#define DEVICE_RESET__BANK0 0x0001
#define DEVICE_RESET__BANK1 0x0002
#define DEVICE_RESET__BANK2 0x0004
#define DEVICE_RESET__BANK3 0x0008
#define TRANSFER_SPARE_REG 0x10
#define TRANSFER_SPARE_REG__FLAG 0x0001
#define LOAD_WAIT_CNT 0x20
#define LOAD_WAIT_CNT__VALUE 0xffff
#define PROGRAM_WAIT_CNT 0x30
#define PROGRAM_WAIT_CNT__VALUE 0xffff
#define ERASE_WAIT_CNT 0x40
#define ERASE_WAIT_CNT__VALUE 0xffff
#define INT_MON_CYCCNT 0x50
#define INT_MON_CYCCNT__VALUE 0xffff
#define RB_PIN_ENABLED 0x60
#define RB_PIN_ENABLED__BANK0 0x0001
#define RB_PIN_ENABLED__BANK1 0x0002
#define RB_PIN_ENABLED__BANK2 0x0004
#define RB_PIN_ENABLED__BANK3 0x0008
#define MULTIPLANE_OPERATION 0x70
#define MULTIPLANE_OPERATION__FLAG 0x0001
#define MULTIPLANE_READ_ENABLE 0x80
#define MULTIPLANE_READ_ENABLE__FLAG 0x0001
#define COPYBACK_DISABLE 0x90
#define COPYBACK_DISABLE__FLAG 0x0001
#define CACHE_WRITE_ENABLE 0xa0
#define CACHE_WRITE_ENABLE__FLAG 0x0001
#define CACHE_READ_ENABLE 0xb0
#define CACHE_READ_ENABLE__FLAG 0x0001
#define PREFETCH_MODE 0xc0
#define PREFETCH_MODE__PREFETCH_EN 0x0001
#define PREFETCH_MODE__PREFETCH_BURST_LENGTH 0xfff0
#define CHIP_ENABLE_DONT_CARE 0xd0
#define CHIP_EN_DONT_CARE__FLAG 0x01
#define ECC_ENABLE 0xe0
#define ECC_ENABLE__FLAG 0x0001
#define GLOBAL_INT_ENABLE 0xf0
#define GLOBAL_INT_EN_FLAG 0x01
#define WE_2_RE 0x100
#define WE_2_RE__VALUE 0x003f
#define ADDR_2_DATA 0x110
#define ADDR_2_DATA__VALUE 0x003f
#define RE_2_WE 0x120
#define RE_2_WE__VALUE 0x003f
#define ACC_CLKS 0x130
#define ACC_CLKS__VALUE 0x000f
#define NUMBER_OF_PLANES 0x140
#define NUMBER_OF_PLANES__VALUE 0x0007
#define PAGES_PER_BLOCK 0x150
#define PAGES_PER_BLOCK__VALUE 0xffff
#define DEVICE_WIDTH 0x160
#define DEVICE_WIDTH__VALUE 0x0003
#define DEVICE_MAIN_AREA_SIZE 0x170
#define DEVICE_MAIN_AREA_SIZE__VALUE 0xffff
#define DEVICE_SPARE_AREA_SIZE 0x180
#define DEVICE_SPARE_AREA_SIZE__VALUE 0xffff
#define TWO_ROW_ADDR_CYCLES 0x190
#define TWO_ROW_ADDR_CYCLES__FLAG 0x0001
#define MULTIPLANE_ADDR_RESTRICT 0x1a0
#define MULTIPLANE_ADDR_RESTRICT__FLAG 0x0001
#define ECC_CORRECTION 0x1b0
#define ECC_CORRECTION__VALUE 0x001f
#define READ_MODE 0x1c0
#define READ_MODE__VALUE 0x000f
#define WRITE_MODE 0x1d0
#define WRITE_MODE__VALUE 0x000f
#define COPYBACK_MODE 0x1e0
#define COPYBACK_MODE__VALUE 0x000f
#define RDWR_EN_LO_CNT 0x1f0
#define RDWR_EN_LO_CNT__VALUE 0x001f
#define RDWR_EN_HI_CNT 0x200
#define RDWR_EN_HI_CNT__VALUE 0x001f
#define MAX_RD_DELAY 0x210
#define MAX_RD_DELAY__VALUE 0x000f
#define CS_SETUP_CNT 0x220
#define CS_SETUP_CNT__VALUE 0x001f
#define SPARE_AREA_SKIP_BYTES 0x230
#define SPARE_AREA_SKIP_BYTES__VALUE 0x003f
#define SPARE_AREA_MARKER 0x240
#define SPARE_AREA_MARKER__VALUE 0xffff
#define DEVICES_CONNECTED 0x250
#define DEVICES_CONNECTED__VALUE 0x0007
#define DIE_MASK 0x260
#define DIE_MASK__VALUE 0x00ff
#define FIRST_BLOCK_OF_NEXT_PLANE 0x270
#define FIRST_BLOCK_OF_NEXT_PLANE__VALUE 0xffff
#define WRITE_PROTECT 0x280
#define WRITE_PROTECT__FLAG 0x0001
#define RE_2_RE 0x290
#define RE_2_RE__VALUE 0x003f
#define MANUFACTURER_ID 0x300
#define MANUFACTURER_ID__VALUE 0x00ff
#define DEVICE_ID 0x310
#define DEVICE_ID__VALUE 0x00ff
#define DEVICE_PARAM_0 0x320
#define DEVICE_PARAM_0__VALUE 0x00ff
#define DEVICE_PARAM_1 0x330
#define DEVICE_PARAM_1__VALUE 0x00ff
#define DEVICE_PARAM_2 0x340
#define DEVICE_PARAM_2__VALUE 0x00ff
#define LOGICAL_PAGE_DATA_SIZE 0x350
#define LOGICAL_PAGE_DATA_SIZE__VALUE 0xffff
#define LOGICAL_PAGE_SPARE_SIZE 0x360
#define LOGICAL_PAGE_SPARE_SIZE__VALUE 0xffff
#define REVISION 0x370
#define REVISION__VALUE 0xffff
#define ONFI_DEVICE_FEATURES 0x380
#define ONFI_DEVICE_FEATURES__VALUE 0x003f
#define ONFI_OPTIONAL_COMMANDS 0x390
#define ONFI_OPTIONAL_COMMANDS__VALUE 0x003f
#define ONFI_TIMING_MODE 0x3a0
#define ONFI_TIMING_MODE__VALUE 0x003f
#define ONFI_PGM_CACHE_TIMING_MODE 0x3b0
#define ONFI_PGM_CACHE_TIMING_MODE__VALUE 0x003f
#define ONFI_DEVICE_NO_OF_LUNS 0x3c0
#define ONFI_DEVICE_NO_OF_LUNS__NO_OF_LUNS 0x00ff
#define ONFI_DEVICE_NO_OF_LUNS__ONFI_DEVICE 0x0100
#define ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_L 0x3d0
#define ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_L__VALUE 0xffff
#define ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_U 0x3e0
#define ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_U__VALUE 0xffff
#define FEATURES 0x3f0
#define FEATURES__N_BANKS 0x0003
#define FEATURES__ECC_MAX_ERR 0x003c
#define FEATURES__DMA 0x0040
#define FEATURES__CMD_DMA 0x0080
#define FEATURES__PARTITION 0x0100
#define FEATURES__XDMA_SIDEBAND 0x0200
#define FEATURES__GPREG 0x0400
#define FEATURES__INDEX_ADDR 0x0800
#define TRANSFER_MODE 0x400
#define TRANSFER_MODE__VALUE 0x0003
#define INTR_STATUS0 0x410
#define INTR_STATUS0__ECC_TRANSACTION_DONE 0x0001
#define INTR_STATUS0__ECC_ERR 0x0002
#define INTR_STATUS0__DMA_CMD_COMP 0x0004
#define INTR_STATUS0__TIME_OUT 0x0008
#define INTR_STATUS0__PROGRAM_FAIL 0x0010
#define INTR_STATUS0__ERASE_FAIL 0x0020
#define INTR_STATUS0__LOAD_COMP 0x0040
#define INTR_STATUS0__PROGRAM_COMP 0x0080
#define INTR_STATUS0__ERASE_COMP 0x0100
#define INTR_STATUS0__PIPE_CPYBCK_CMD_COMP 0x0200
#define INTR_STATUS0__LOCKED_BLK 0x0400
#define INTR_STATUS0__UNSUP_CMD 0x0800
#define INTR_STATUS0__INT_ACT 0x1000
#define INTR_STATUS0__RST_COMP 0x2000
#define INTR_STATUS0__PIPE_CMD_ERR 0x4000
#define INTR_STATUS0__PAGE_XFER_INC 0x8000
#define INTR_EN0 0x420
#define INTR_EN0__ECC_TRANSACTION_DONE 0x0001
#define INTR_EN0__ECC_ERR 0x0002
#define INTR_EN0__DMA_CMD_COMP 0x0004
#define INTR_EN0__TIME_OUT 0x0008
#define INTR_EN0__PROGRAM_FAIL 0x0010
#define INTR_EN0__ERASE_FAIL 0x0020
#define INTR_EN0__LOAD_COMP 0x0040
#define INTR_EN0__PROGRAM_COMP 0x0080
#define INTR_EN0__ERASE_COMP 0x0100
#define INTR_EN0__PIPE_CPYBCK_CMD_COMP 0x0200
#define INTR_EN0__LOCKED_BLK 0x0400
#define INTR_EN0__UNSUP_CMD 0x0800
#define INTR_EN0__INT_ACT 0x1000
#define INTR_EN0__RST_COMP 0x2000
#define INTR_EN0__PIPE_CMD_ERR 0x4000
#define INTR_EN0__PAGE_XFER_INC 0x8000
#define PAGE_CNT0 0x430
#define PAGE_CNT0__VALUE 0x00ff
#define ERR_PAGE_ADDR0 0x440
#define ERR_PAGE_ADDR0__VALUE 0xffff
#define ERR_BLOCK_ADDR0 0x450
#define ERR_BLOCK_ADDR0__VALUE 0xffff
#define INTR_STATUS1 0x460
#define INTR_STATUS1__ECC_TRANSACTION_DONE 0x0001
#define INTR_STATUS1__ECC_ERR 0x0002
#define INTR_STATUS1__DMA_CMD_COMP 0x0004
#define INTR_STATUS1__TIME_OUT 0x0008
#define INTR_STATUS1__PROGRAM_FAIL 0x0010
#define INTR_STATUS1__ERASE_FAIL 0x0020
#define INTR_STATUS1__LOAD_COMP 0x0040
#define INTR_STATUS1__PROGRAM_COMP 0x0080
#define INTR_STATUS1__ERASE_COMP 0x0100
#define INTR_STATUS1__PIPE_CPYBCK_CMD_COMP 0x0200
#define INTR_STATUS1__LOCKED_BLK 0x0400
#define INTR_STATUS1__UNSUP_CMD 0x0800
#define INTR_STATUS1__INT_ACT 0x1000
#define INTR_STATUS1__RST_COMP 0x2000
#define INTR_STATUS1__PIPE_CMD_ERR 0x4000
#define INTR_STATUS1__PAGE_XFER_INC 0x8000
#define INTR_EN1 0x470
#define INTR_EN1__ECC_TRANSACTION_DONE 0x0001
#define INTR_EN1__ECC_ERR 0x0002
#define INTR_EN1__DMA_CMD_COMP 0x0004
#define INTR_EN1__TIME_OUT 0x0008
#define INTR_EN1__PROGRAM_FAIL 0x0010
#define INTR_EN1__ERASE_FAIL 0x0020
#define INTR_EN1__LOAD_COMP 0x0040
#define INTR_EN1__PROGRAM_COMP 0x0080
#define INTR_EN1__ERASE_COMP 0x0100
#define INTR_EN1__PIPE_CPYBCK_CMD_COMP 0x0200
#define INTR_EN1__LOCKED_BLK 0x0400
#define INTR_EN1__UNSUP_CMD 0x0800
#define INTR_EN1__INT_ACT 0x1000
#define INTR_EN1__RST_COMP 0x2000
#define INTR_EN1__PIPE_CMD_ERR 0x4000
#define INTR_EN1__PAGE_XFER_INC 0x8000
#define PAGE_CNT1 0x480
#define PAGE_CNT1__VALUE 0x00ff
#define ERR_PAGE_ADDR1 0x490
#define ERR_PAGE_ADDR1__VALUE 0xffff
#define ERR_BLOCK_ADDR1 0x4a0
#define ERR_BLOCK_ADDR1__VALUE 0xffff
#define INTR_STATUS2 0x4b0
#define INTR_STATUS2__ECC_TRANSACTION_DONE 0x0001
#define INTR_STATUS2__ECC_ERR 0x0002
#define INTR_STATUS2__DMA_CMD_COMP 0x0004
#define INTR_STATUS2__TIME_OUT 0x0008
#define INTR_STATUS2__PROGRAM_FAIL 0x0010
#define INTR_STATUS2__ERASE_FAIL 0x0020
#define INTR_STATUS2__LOAD_COMP 0x0040
#define INTR_STATUS2__PROGRAM_COMP 0x0080
#define INTR_STATUS2__ERASE_COMP 0x0100
#define INTR_STATUS2__PIPE_CPYBCK_CMD_COMP 0x0200
#define INTR_STATUS2__LOCKED_BLK 0x0400
#define INTR_STATUS2__UNSUP_CMD 0x0800
#define INTR_STATUS2__INT_ACT 0x1000
#define INTR_STATUS2__RST_COMP 0x2000
#define INTR_STATUS2__PIPE_CMD_ERR 0x4000
#define INTR_STATUS2__PAGE_XFER_INC 0x8000
#define INTR_EN2 0x4c0
#define INTR_EN2__ECC_TRANSACTION_DONE 0x0001
#define INTR_EN2__ECC_ERR 0x0002
#define INTR_EN2__DMA_CMD_COMP 0x0004
#define INTR_EN2__TIME_OUT 0x0008
#define INTR_EN2__PROGRAM_FAIL 0x0010
#define INTR_EN2__ERASE_FAIL 0x0020
#define INTR_EN2__LOAD_COMP 0x0040
#define INTR_EN2__PROGRAM_COMP 0x0080
#define INTR_EN2__ERASE_COMP 0x0100
#define INTR_EN2__PIPE_CPYBCK_CMD_COMP 0x0200
#define INTR_EN2__LOCKED_BLK 0x0400
#define INTR_EN2__UNSUP_CMD 0x0800
#define INTR_EN2__INT_ACT 0x1000
#define INTR_EN2__RST_COMP 0x2000
#define INTR_EN2__PIPE_CMD_ERR 0x4000
#define INTR_EN2__PAGE_XFER_INC 0x8000
#define PAGE_CNT2 0x4d0
#define PAGE_CNT2__VALUE 0x00ff
#define ERR_PAGE_ADDR2 0x4e0
#define ERR_PAGE_ADDR2__VALUE 0xffff
#define ERR_BLOCK_ADDR2 0x4f0
#define ERR_BLOCK_ADDR2__VALUE 0xffff
#define INTR_STATUS3 0x500
#define INTR_STATUS3__ECC_TRANSACTION_DONE 0x0001
#define INTR_STATUS3__ECC_ERR 0x0002
#define INTR_STATUS3__DMA_CMD_COMP 0x0004
#define INTR_STATUS3__TIME_OUT 0x0008
#define INTR_STATUS3__PROGRAM_FAIL 0x0010
#define INTR_STATUS3__ERASE_FAIL 0x0020
#define INTR_STATUS3__LOAD_COMP 0x0040
#define INTR_STATUS3__PROGRAM_COMP 0x0080
#define INTR_STATUS3__ERASE_COMP 0x0100
#define INTR_STATUS3__PIPE_CPYBCK_CMD_COMP 0x0200
#define INTR_STATUS3__LOCKED_BLK 0x0400
#define INTR_STATUS3__UNSUP_CMD 0x0800
#define INTR_STATUS3__INT_ACT 0x1000
#define INTR_STATUS3__RST_COMP 0x2000
#define INTR_STATUS3__PIPE_CMD_ERR 0x4000
#define INTR_STATUS3__PAGE_XFER_INC 0x8000
#define INTR_EN3 0x510
#define INTR_EN3__ECC_TRANSACTION_DONE 0x0001
#define INTR_EN3__ECC_ERR 0x0002
#define INTR_EN3__DMA_CMD_COMP 0x0004
#define INTR_EN3__TIME_OUT 0x0008
#define INTR_EN3__PROGRAM_FAIL 0x0010
#define INTR_EN3__ERASE_FAIL 0x0020
#define INTR_EN3__LOAD_COMP 0x0040
#define INTR_EN3__PROGRAM_COMP 0x0080
#define INTR_EN3__ERASE_COMP 0x0100
#define INTR_EN3__PIPE_CPYBCK_CMD_COMP 0x0200
#define INTR_EN3__LOCKED_BLK 0x0400
#define INTR_EN3__UNSUP_CMD 0x0800
#define INTR_EN3__INT_ACT 0x1000
#define INTR_EN3__RST_COMP 0x2000
#define INTR_EN3__PIPE_CMD_ERR 0x4000
#define INTR_EN3__PAGE_XFER_INC 0x8000
#define PAGE_CNT3 0x520
#define PAGE_CNT3__VALUE 0x00ff
#define ERR_PAGE_ADDR3 0x530
#define ERR_PAGE_ADDR3__VALUE 0xffff
#define ERR_BLOCK_ADDR3 0x540
#define ERR_BLOCK_ADDR3__VALUE 0xffff
#define DATA_INTR 0x550
#define DATA_INTR__WRITE_SPACE_AV 0x0001
#define DATA_INTR__READ_DATA_AV 0x0002
#define DATA_INTR_EN 0x560
#define DATA_INTR_EN__WRITE_SPACE_AV 0x0001
#define DATA_INTR_EN__READ_DATA_AV 0x0002
#define GPREG_0 0x570
#define GPREG_0__VALUE 0xffff
#define GPREG_1 0x580
#define GPREG_1__VALUE 0xffff
#define GPREG_2 0x590
#define GPREG_2__VALUE 0xffff
#define GPREG_3 0x5a0
#define GPREG_3__VALUE 0xffff
#define ECC_THRESHOLD 0x600
#define ECC_THRESHOLD__VALUE 0x03ff
#define ECC_ERROR_BLOCK_ADDRESS 0x610
#define ECC_ERROR_BLOCK_ADDRESS__VALUE 0xffff
#define ECC_ERROR_PAGE_ADDRESS 0x620
#define ECC_ERROR_PAGE_ADDRESS__VALUE 0x0fff
#define ECC_ERROR_PAGE_ADDRESS__BANK 0xf000
#define ECC_ERROR_ADDRESS 0x630
#define ECC_ERROR_ADDRESS__OFFSET 0x0fff
#define ECC_ERROR_ADDRESS__SECTOR_NR 0xf000
#define ERR_CORRECTION_INFO 0x640
#define ERR_CORRECTION_INFO__BYTEMASK 0x00ff
#define ERR_CORRECTION_INFO__DEVICE_NR 0x0f00
#define ERR_CORRECTION_INFO__ERROR_TYPE 0x4000
#define ERR_CORRECTION_INFO__LAST_ERR_INFO 0x8000
#define DMA_ENABLE 0x700
#define DMA_ENABLE__FLAG 0x0001
#define IGNORE_ECC_DONE 0x710
#define IGNORE_ECC_DONE__FLAG 0x0001
#define DMA_INTR 0x720
#define DMA_INTR__TARGET_ERROR 0x0001
#define DMA_INTR__DESC_COMP_CHANNEL0 0x0002
#define DMA_INTR__DESC_COMP_CHANNEL1 0x0004
#define DMA_INTR__DESC_COMP_CHANNEL2 0x0008
#define DMA_INTR__DESC_COMP_CHANNEL3 0x0010
#define DMA_INTR__MEMCOPY_DESC_COMP 0x0020
#define DMA_INTR_EN 0x730
#define DMA_INTR_EN__TARGET_ERROR 0x0001
#define DMA_INTR_EN__DESC_COMP_CHANNEL0 0x0002
#define DMA_INTR_EN__DESC_COMP_CHANNEL1 0x0004
#define DMA_INTR_EN__DESC_COMP_CHANNEL2 0x0008
#define DMA_INTR_EN__DESC_COMP_CHANNEL3 0x0010
#define DMA_INTR_EN__MEMCOPY_DESC_COMP 0x0020
#define TARGET_ERR_ADDR_LO 0x740
#define TARGET_ERR_ADDR_LO__VALUE 0xffff
#define TARGET_ERR_ADDR_HI 0x750
#define TARGET_ERR_ADDR_HI__VALUE 0xffff
#define CHNL_ACTIVE 0x760
#define CHNL_ACTIVE__CHANNEL0 0x0001
#define CHNL_ACTIVE__CHANNEL1 0x0002
#define CHNL_ACTIVE__CHANNEL2 0x0004
#define CHNL_ACTIVE__CHANNEL3 0x0008
#define ACTIVE_SRC_ID 0x800
#define ACTIVE_SRC_ID__VALUE 0x00ff
#define PTN_INTR 0x810
#define PTN_INTR__CONFIG_ERROR 0x0001
#define PTN_INTR__ACCESS_ERROR_BANK0 0x0002
#define PTN_INTR__ACCESS_ERROR_BANK1 0x0004
#define PTN_INTR__ACCESS_ERROR_BANK2 0x0008
#define PTN_INTR__ACCESS_ERROR_BANK3 0x0010
#define PTN_INTR__REG_ACCESS_ERROR 0x0020
#define PTN_INTR_EN 0x820
#define PTN_INTR_EN__CONFIG_ERROR 0x0001
#define PTN_INTR_EN__ACCESS_ERROR_BANK0 0x0002
#define PTN_INTR_EN__ACCESS_ERROR_BANK1 0x0004
#define PTN_INTR_EN__ACCESS_ERROR_BANK2 0x0008
#define PTN_INTR_EN__ACCESS_ERROR_BANK3 0x0010
#define PTN_INTR_EN__REG_ACCESS_ERROR 0x0020
#define PERM_SRC_ID_0 0x830
#define PERM_SRC_ID_0__SRCID 0x00ff
#define PERM_SRC_ID_0__DIRECT_ACCESS_ACTIVE 0x0800
#define PERM_SRC_ID_0__WRITE_ACTIVE 0x2000
#define PERM_SRC_ID_0__READ_ACTIVE 0x4000
#define PERM_SRC_ID_0__PARTITION_VALID 0x8000
#define MIN_BLK_ADDR_0 0x840
#define MIN_BLK_ADDR_0__VALUE 0xffff
#define MAX_BLK_ADDR_0 0x850
#define MAX_BLK_ADDR_0__VALUE 0xffff
#define MIN_MAX_BANK_0 0x860
#define MIN_MAX_BANK_0__MIN_VALUE 0x0003
#define MIN_MAX_BANK_0__MAX_VALUE 0x000c
#define PERM_SRC_ID_1 0x870
#define PERM_SRC_ID_1__SRCID 0x00ff
#define PERM_SRC_ID_1__DIRECT_ACCESS_ACTIVE 0x0800
#define PERM_SRC_ID_1__WRITE_ACTIVE 0x2000
#define PERM_SRC_ID_1__READ_ACTIVE 0x4000
#define PERM_SRC_ID_1__PARTITION_VALID 0x8000
#define MIN_BLK_ADDR_1 0x880
#define MIN_BLK_ADDR_1__VALUE 0xffff
#define MAX_BLK_ADDR_1 0x890
#define MAX_BLK_ADDR_1__VALUE 0xffff
#define MIN_MAX_BANK_1 0x8a0
#define MIN_MAX_BANK_1__MIN_VALUE 0x0003
#define MIN_MAX_BANK_1__MAX_VALUE 0x000c
#define PERM_SRC_ID_2 0x8b0
#define PERM_SRC_ID_2__SRCID 0x00ff
#define PERM_SRC_ID_2__DIRECT_ACCESS_ACTIVE 0x0800
#define PERM_SRC_ID_2__WRITE_ACTIVE 0x2000
#define PERM_SRC_ID_2__READ_ACTIVE 0x4000
#define PERM_SRC_ID_2__PARTITION_VALID 0x8000
#define MIN_BLK_ADDR_2 0x8c0
#define MIN_BLK_ADDR_2__VALUE 0xffff
#define MAX_BLK_ADDR_2 0x8d0
#define MAX_BLK_ADDR_2__VALUE 0xffff
#define MIN_MAX_BANK_2 0x8e0
#define MIN_MAX_BANK_2__MIN_VALUE 0x0003
#define MIN_MAX_BANK_2__MAX_VALUE 0x000c
#define PERM_SRC_ID_3 0x8f0
#define PERM_SRC_ID_3__SRCID 0x00ff
#define PERM_SRC_ID_3__DIRECT_ACCESS_ACTIVE 0x0800
#define PERM_SRC_ID_3__WRITE_ACTIVE 0x2000
#define PERM_SRC_ID_3__READ_ACTIVE 0x4000
#define PERM_SRC_ID_3__PARTITION_VALID 0x8000
#define MIN_BLK_ADDR_3 0x900
#define MIN_BLK_ADDR_3__VALUE 0xffff
#define MAX_BLK_ADDR_3 0x910
#define MAX_BLK_ADDR_3__VALUE 0xffff
#define MIN_MAX_BANK_3 0x920
#define MIN_MAX_BANK_3__MIN_VALUE 0x0003
#define MIN_MAX_BANK_3__MAX_VALUE 0x000c
#define PERM_SRC_ID_4 0x930
#define PERM_SRC_ID_4__SRCID 0x00ff
#define PERM_SRC_ID_4__DIRECT_ACCESS_ACTIVE 0x0800
#define PERM_SRC_ID_4__WRITE_ACTIVE 0x2000
#define PERM_SRC_ID_4__READ_ACTIVE 0x4000
#define PERM_SRC_ID_4__PARTITION_VALID 0x8000
#define MIN_BLK_ADDR_4 0x940
#define MIN_BLK_ADDR_4__VALUE 0xffff
#define MAX_BLK_ADDR_4 0x950
#define MAX_BLK_ADDR_4__VALUE 0xffff
#define MIN_MAX_BANK_4 0x960
#define MIN_MAX_BANK_4__MIN_VALUE 0x0003
#define MIN_MAX_BANK_4__MAX_VALUE 0x000c
#define PERM_SRC_ID_5 0x970
#define PERM_SRC_ID_5__SRCID 0x00ff
#define PERM_SRC_ID_5__DIRECT_ACCESS_ACTIVE 0x0800
#define PERM_SRC_ID_5__WRITE_ACTIVE 0x2000
#define PERM_SRC_ID_5__READ_ACTIVE 0x4000
#define PERM_SRC_ID_5__PARTITION_VALID 0x8000
#define MIN_BLK_ADDR_5 0x980
#define MIN_BLK_ADDR_5__VALUE 0xffff
#define MAX_BLK_ADDR_5 0x990
#define MAX_BLK_ADDR_5__VALUE 0xffff
#define MIN_MAX_BANK_5 0x9a0
#define MIN_MAX_BANK_5__MIN_VALUE 0x0003
#define MIN_MAX_BANK_5__MAX_VALUE 0x000c
#define PERM_SRC_ID_6 0x9b0
#define PERM_SRC_ID_6__SRCID 0x00ff
#define PERM_SRC_ID_6__DIRECT_ACCESS_ACTIVE 0x0800
#define PERM_SRC_ID_6__WRITE_ACTIVE 0x2000
#define PERM_SRC_ID_6__READ_ACTIVE 0x4000
#define PERM_SRC_ID_6__PARTITION_VALID 0x8000
#define MIN_BLK_ADDR_6 0x9c0
#define MIN_BLK_ADDR_6__VALUE 0xffff
#define MAX_BLK_ADDR_6 0x9d0
#define MAX_BLK_ADDR_6__VALUE 0xffff
#define MIN_MAX_BANK_6 0x9e0
#define MIN_MAX_BANK_6__MIN_VALUE 0x0003
#define MIN_MAX_BANK_6__MAX_VALUE 0x000c
#define PERM_SRC_ID_7 0x9f0
#define PERM_SRC_ID_7__SRCID 0x00ff
#define PERM_SRC_ID_7__DIRECT_ACCESS_ACTIVE 0x0800
#define PERM_SRC_ID_7__WRITE_ACTIVE 0x2000
#define PERM_SRC_ID_7__READ_ACTIVE 0x4000
#define PERM_SRC_ID_7__PARTITION_VALID 0x8000
#define MIN_BLK_ADDR_7 0xa00
#define MIN_BLK_ADDR_7__VALUE 0xffff
#define MAX_BLK_ADDR_7 0xa10
#define MAX_BLK_ADDR_7__VALUE 0xffff
#define MIN_MAX_BANK_7 0xa20
#define MIN_MAX_BANK_7__MIN_VALUE 0x0003
#define MIN_MAX_BANK_7__MAX_VALUE 0x000c
/* flash.h */
struct device_info_tag {
uint16_t wDeviceMaker;
uint16_t wDeviceID;
uint8_t bDeviceParam0;
uint8_t bDeviceParam1;
uint8_t bDeviceParam2;
uint32_t wDeviceType;
uint32_t wSpectraStartBlock;
uint32_t wSpectraEndBlock;
uint32_t wTotalBlocks;
uint16_t wPagesPerBlock;
uint16_t wPageSize;
uint16_t wPageDataSize;
uint16_t wPageSpareSize;
uint16_t wNumPageSpareFlag;
uint16_t wECCBytesPerSector;
uint32_t wBlockSize;
uint32_t wBlockDataSize;
uint32_t wDataBlockNum;
uint8_t bPlaneNum;
uint16_t wDeviceMainAreaSize;
uint16_t wDeviceSpareAreaSize;
uint16_t wDevicesConnected;
uint16_t wDeviceWidth;
uint16_t wHWRevision;
uint16_t wHWFeatures;
uint16_t wONFIDevFeatures;
uint16_t wONFIOptCommands;
uint16_t wONFITimingMode;
uint16_t wONFIPgmCacheTimingMode;
uint16_t MLCDevice;
uint16_t wSpareSkipBytes;
uint8_t nBitsInPageNumber;
uint8_t nBitsInPageDataSize;
uint8_t nBitsInBlockDataSize;
};
/* ffsdefs.h */
#define CLEAR 0 /*use this to clear a field instead of "fail"*/
#define SET 1 /*use this to set a field instead of "pass"*/
#define FAIL 1 /*failed flag*/
#define PASS 0 /*success flag*/
#define ERR -1 /*error flag*/
/* lld.h */
#define GOOD_BLOCK 0
#define DEFECTIVE_BLOCK 1
#define READ_ERROR 2
#define CLK_X 5
#define CLK_MULTI 4
/* ffsport.h */
#define VERBOSE 1
#define NAND_DBG_WARN 1
#define NAND_DBG_DEBUG 2
#define NAND_DBG_TRACE 3
#ifdef VERBOSE
#define nand_dbg_print(level, args...) \
do { \
if (level <= nand_debug_level) \
printk(KERN_ALERT args); \
} while (0)
#else
#define nand_dbg_print(level, args...)
#endif
/* spectraswconfig.h */
#define CMD_DMA 0
#define SPECTRA_PARTITION_ID 0
/**** Block Table and Reserved Block Parameters *****/
#define SPECTRA_START_BLOCK 3
#define NUM_FREE_BLOCKS_GATE 30
/* KBV - Updated to LNW scratch register address */
#define SCRATCH_REG_ADDR CONFIG_MTD_NAND_DENALI_SCRATCH_REG_ADDR
#define SCRATCH_REG_SIZE 64
#define GLOB_HWCTL_DEFAULT_BLKS 2048
#define SUPPORT_15BITECC 1
#define SUPPORT_8BITECC 1
#define CUSTOM_CONF_PARAMS 0
#define ONFI_BLOOM_TIME 1
#define MODE5_WORKAROUND 0
/* lld_nand.h */
/*
* NAND Flash Controller Device Driver
* Copyright (c) 2009, Intel Corporation and its suppliers.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*
*/
#ifndef _LLD_NAND_
#define _LLD_NAND_
#define MODE_00 0x00000000
#define MODE_01 0x04000000
#define MODE_10 0x08000000
#define MODE_11 0x0C000000
#define DATA_TRANSFER_MODE 0
#define PROTECTION_PER_BLOCK 1
#define LOAD_WAIT_COUNT 2
#define PROGRAM_WAIT_COUNT 3
#define ERASE_WAIT_COUNT 4
#define INT_MONITOR_CYCLE_COUNT 5
#define READ_BUSY_PIN_ENABLED 6
#define MULTIPLANE_OPERATION_SUPPORT 7
#define PRE_FETCH_MODE 8
#define CE_DONT_CARE_SUPPORT 9
#define COPYBACK_SUPPORT 10
#define CACHE_WRITE_SUPPORT 11
#define CACHE_READ_SUPPORT 12
#define NUM_PAGES_IN_BLOCK 13
#define ECC_ENABLE_SELECT 14
#define WRITE_ENABLE_2_READ_ENABLE 15
#define ADDRESS_2_DATA 16
#define READ_ENABLE_2_WRITE_ENABLE 17
#define TWO_ROW_ADDRESS_CYCLES 18
#define MULTIPLANE_ADDRESS_RESTRICT 19
#define ACC_CLOCKS 20
#define READ_WRITE_ENABLE_LOW_COUNT 21
#define READ_WRITE_ENABLE_HIGH_COUNT 22
#define ECC_SECTOR_SIZE 512
#define LLD_MAX_FLASH_BANKS 4
#define DENALI_BUF_SIZE NAND_MAX_PAGESIZE + NAND_MAX_OOBSIZE
struct nand_buf
{
int head;
int tail;
uint8_t buf[DENALI_BUF_SIZE];
dma_addr_t dma_buf;
};
#define INTEL_CE4100 1
#define INTEL_MRST 2
struct denali_nand_info {
struct mtd_info mtd;
struct nand_chip nand;
struct device_info_tag dev_info;
int flash_bank; /* currently selected chip */
int status;
int platform;
struct nand_buf buf;
struct pci_dev *dev;
int total_used_banks;
uint32_t block; /* stored for future use */
uint16_t page;
void __iomem *flash_reg; /* Mapped io reg base address */
void __iomem *flash_mem; /* Mapped io reg base address */
/* elements used by ISR */
struct completion complete;
spinlock_t irq_lock;
uint32_t irq_status;
int irq_debug_array[32];
int idx;
};
static uint16_t NAND_Flash_Reset(struct denali_nand_info *denali);
static uint16_t NAND_Read_Device_ID(struct denali_nand_info *denali);
static void NAND_LLD_Enable_Disable_Interrupts(struct denali_nand_info *denali, uint16_t INT_ENABLE);
#endif /*_LLD_NAND_*/

View File

@ -874,7 +874,7 @@ static int __devinit fsl_elbc_chip_probe(struct fsl_elbc_ctrl *ctrl,
priv->ctrl = ctrl;
priv->dev = ctrl->dev;
priv->vbase = ioremap(res.start, res.end - res.start + 1);
priv->vbase = ioremap(res.start, resource_size(&res));
if (!priv->vbase) {
dev_err(ctrl->dev, "failed to map chip region\n");
ret = -ENOMEM;
@ -891,7 +891,7 @@ static int __devinit fsl_elbc_chip_probe(struct fsl_elbc_ctrl *ctrl,
if (ret)
goto err;
ret = nand_scan_ident(&priv->mtd, 1);
ret = nand_scan_ident(&priv->mtd, 1, NULL);
if (ret)
goto err;

View File

@ -49,7 +49,10 @@ struct fsl_upm_nand {
uint32_t wait_flags;
};
#define to_fsl_upm_nand(mtd) container_of(mtd, struct fsl_upm_nand, mtd)
static inline struct fsl_upm_nand *to_fsl_upm_nand(struct mtd_info *mtdinfo)
{
return container_of(mtdinfo, struct fsl_upm_nand, mtd);
}
static int fun_chip_ready(struct mtd_info *mtd)
{
@ -303,7 +306,7 @@ static int __devinit fun_probe(struct of_device *ofdev,
FSL_UPM_WAIT_WRITE_BYTE;
fun->io_base = devm_ioremap_nocache(&ofdev->dev, io_res.start,
io_res.end - io_res.start + 1);
resource_size(&io_res));
if (!fun->io_base) {
ret = -ENOMEM;
goto err2;
@ -350,7 +353,7 @@ static int __devexit fun_remove(struct of_device *ofdev)
return 0;
}
static struct of_device_id of_fun_match[] = {
static const struct of_device_id of_fun_match[] = {
{ .compatible = "fsl,upm-nand" },
{},
};

View File

@ -181,11 +181,11 @@ static int __devexit gpio_nand_remove(struct platform_device *dev)
res = platform_get_resource(dev, IORESOURCE_MEM, 1);
iounmap(gpiomtd->io_sync);
if (res)
release_mem_region(res->start, res->end - res->start + 1);
release_mem_region(res->start, resource_size(res));
res = platform_get_resource(dev, IORESOURCE_MEM, 0);
iounmap(gpiomtd->nand_chip.IO_ADDR_R);
release_mem_region(res->start, res->end - res->start + 1);
release_mem_region(res->start, resource_size(res));
if (gpio_is_valid(gpiomtd->plat.gpio_nwp))
gpio_set_value(gpiomtd->plat.gpio_nwp, 0);
@ -208,14 +208,14 @@ static void __iomem *request_and_remap(struct resource *res, size_t size,
{
void __iomem *ptr;
if (!request_mem_region(res->start, res->end - res->start + 1, name)) {
if (!request_mem_region(res->start, resource_size(res), name)) {
*err = -EBUSY;
return NULL;
}
ptr = ioremap(res->start, size);
if (!ptr) {
release_mem_region(res->start, res->end - res->start + 1);
release_mem_region(res->start, resource_size(res));
*err = -ENOMEM;
}
return ptr;
@ -338,10 +338,10 @@ err_nwp:
err_nce:
iounmap(gpiomtd->io_sync);
if (res1)
release_mem_region(res1->start, res1->end - res1->start + 1);
release_mem_region(res1->start, resource_size(res1));
err_sync:
iounmap(gpiomtd->nand_chip.IO_ADDR_R);
release_mem_region(res0->start, res0->end - res0->start + 1);
release_mem_region(res0->start, resource_size(res0));
err_map:
kfree(gpiomtd);
return ret;

View File

@ -0,0 +1,917 @@
/*
* Copyright 2004-2008 Freescale Semiconductor, Inc.
* Copyright 2009 Semihalf.
*
* Approved as OSADL project by a majority of OSADL members and funded
* by OSADL membership fees in 2009; for details see www.osadl.org.
*
* Based on original driver from Freescale Semiconductor
* written by John Rigby <jrigby@freescale.com> on basis
* of drivers/mtd/nand/mxc_nand.c. Reworked and extended
* Piotr Ziecik <kosmo@semihalf.com>.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
* MA 02110-1301, USA.
*/
#include <linux/module.h>
#include <linux/clk.h>
#include <linux/gfp.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
#include <linux/mtd/partitions.h>
#include <linux/of_device.h>
#include <linux/of_platform.h>
#include <asm/mpc5121.h>
/* Addresses for NFC MAIN RAM BUFFER areas */
#define NFC_MAIN_AREA(n) ((n) * 0x200)
/* Addresses for NFC SPARE BUFFER areas */
#define NFC_SPARE_BUFFERS 8
#define NFC_SPARE_LEN 0x40
#define NFC_SPARE_AREA(n) (0x1000 + ((n) * NFC_SPARE_LEN))
/* MPC5121 NFC registers */
#define NFC_BUF_ADDR 0x1E04
#define NFC_FLASH_ADDR 0x1E06
#define NFC_FLASH_CMD 0x1E08
#define NFC_CONFIG 0x1E0A
#define NFC_ECC_STATUS1 0x1E0C
#define NFC_ECC_STATUS2 0x1E0E
#define NFC_SPAS 0x1E10
#define NFC_WRPROT 0x1E12
#define NFC_NF_WRPRST 0x1E18
#define NFC_CONFIG1 0x1E1A
#define NFC_CONFIG2 0x1E1C
#define NFC_UNLOCKSTART_BLK0 0x1E20
#define NFC_UNLOCKEND_BLK0 0x1E22
#define NFC_UNLOCKSTART_BLK1 0x1E24
#define NFC_UNLOCKEND_BLK1 0x1E26
#define NFC_UNLOCKSTART_BLK2 0x1E28
#define NFC_UNLOCKEND_BLK2 0x1E2A
#define NFC_UNLOCKSTART_BLK3 0x1E2C
#define NFC_UNLOCKEND_BLK3 0x1E2E
/* Bit Definitions: NFC_BUF_ADDR */
#define NFC_RBA_MASK (7 << 0)
#define NFC_ACTIVE_CS_SHIFT 5
#define NFC_ACTIVE_CS_MASK (3 << NFC_ACTIVE_CS_SHIFT)
/* Bit Definitions: NFC_CONFIG */
#define NFC_BLS_UNLOCKED (1 << 1)
/* Bit Definitions: NFC_CONFIG1 */
#define NFC_ECC_4BIT (1 << 0)
#define NFC_FULL_PAGE_DMA (1 << 1)
#define NFC_SPARE_ONLY (1 << 2)
#define NFC_ECC_ENABLE (1 << 3)
#define NFC_INT_MASK (1 << 4)
#define NFC_BIG_ENDIAN (1 << 5)
#define NFC_RESET (1 << 6)
#define NFC_CE (1 << 7)
#define NFC_ONE_CYCLE (1 << 8)
#define NFC_PPB_32 (0 << 9)
#define NFC_PPB_64 (1 << 9)
#define NFC_PPB_128 (2 << 9)
#define NFC_PPB_256 (3 << 9)
#define NFC_PPB_MASK (3 << 9)
#define NFC_FULL_PAGE_INT (1 << 11)
/* Bit Definitions: NFC_CONFIG2 */
#define NFC_COMMAND (1 << 0)
#define NFC_ADDRESS (1 << 1)
#define NFC_INPUT (1 << 2)
#define NFC_OUTPUT (1 << 3)
#define NFC_ID (1 << 4)
#define NFC_STATUS (1 << 5)
#define NFC_CMD_FAIL (1 << 15)
#define NFC_INT (1 << 15)
/* Bit Definitions: NFC_WRPROT */
#define NFC_WPC_LOCK_TIGHT (1 << 0)
#define NFC_WPC_LOCK (1 << 1)
#define NFC_WPC_UNLOCK (1 << 2)
#define DRV_NAME "mpc5121_nfc"
/* Timeouts */
#define NFC_RESET_TIMEOUT 1000 /* 1 ms */
#define NFC_TIMEOUT (HZ / 10) /* 1/10 s */
struct mpc5121_nfc_prv {
struct mtd_info mtd;
struct nand_chip chip;
int irq;
void __iomem *regs;
struct clk *clk;
wait_queue_head_t irq_waitq;
uint column;
int spareonly;
void __iomem *csreg;
struct device *dev;
};
static void mpc5121_nfc_done(struct mtd_info *mtd);
#ifdef CONFIG_MTD_PARTITIONS
static const char *mpc5121_nfc_pprobes[] = { "cmdlinepart", NULL };
#endif
/* Read NFC register */
static inline u16 nfc_read(struct mtd_info *mtd, uint reg)
{
struct nand_chip *chip = mtd->priv;
struct mpc5121_nfc_prv *prv = chip->priv;
return in_be16(prv->regs + reg);
}
/* Write NFC register */
static inline void nfc_write(struct mtd_info *mtd, uint reg, u16 val)
{
struct nand_chip *chip = mtd->priv;
struct mpc5121_nfc_prv *prv = chip->priv;
out_be16(prv->regs + reg, val);
}
/* Set bits in NFC register */
static inline void nfc_set(struct mtd_info *mtd, uint reg, u16 bits)
{
nfc_write(mtd, reg, nfc_read(mtd, reg) | bits);
}
/* Clear bits in NFC register */
static inline void nfc_clear(struct mtd_info *mtd, uint reg, u16 bits)
{
nfc_write(mtd, reg, nfc_read(mtd, reg) & ~bits);
}
/* Invoke address cycle */
static inline void mpc5121_nfc_send_addr(struct mtd_info *mtd, u16 addr)
{
nfc_write(mtd, NFC_FLASH_ADDR, addr);
nfc_write(mtd, NFC_CONFIG2, NFC_ADDRESS);
mpc5121_nfc_done(mtd);
}
/* Invoke command cycle */
static inline void mpc5121_nfc_send_cmd(struct mtd_info *mtd, u16 cmd)
{
nfc_write(mtd, NFC_FLASH_CMD, cmd);
nfc_write(mtd, NFC_CONFIG2, NFC_COMMAND);
mpc5121_nfc_done(mtd);
}
/* Send data from NFC buffers to NAND flash */
static inline void mpc5121_nfc_send_prog_page(struct mtd_info *mtd)
{
nfc_clear(mtd, NFC_BUF_ADDR, NFC_RBA_MASK);
nfc_write(mtd, NFC_CONFIG2, NFC_INPUT);
mpc5121_nfc_done(mtd);
}
/* Receive data from NAND flash */
static inline void mpc5121_nfc_send_read_page(struct mtd_info *mtd)
{
nfc_clear(mtd, NFC_BUF_ADDR, NFC_RBA_MASK);
nfc_write(mtd, NFC_CONFIG2, NFC_OUTPUT);
mpc5121_nfc_done(mtd);
}
/* Receive ID from NAND flash */
static inline void mpc5121_nfc_send_read_id(struct mtd_info *mtd)
{
nfc_clear(mtd, NFC_BUF_ADDR, NFC_RBA_MASK);
nfc_write(mtd, NFC_CONFIG2, NFC_ID);
mpc5121_nfc_done(mtd);
}
/* Receive status from NAND flash */
static inline void mpc5121_nfc_send_read_status(struct mtd_info *mtd)
{
nfc_clear(mtd, NFC_BUF_ADDR, NFC_RBA_MASK);
nfc_write(mtd, NFC_CONFIG2, NFC_STATUS);
mpc5121_nfc_done(mtd);
}
/* NFC interrupt handler */
static irqreturn_t mpc5121_nfc_irq(int irq, void *data)
{
struct mtd_info *mtd = data;
struct nand_chip *chip = mtd->priv;
struct mpc5121_nfc_prv *prv = chip->priv;
nfc_set(mtd, NFC_CONFIG1, NFC_INT_MASK);
wake_up(&prv->irq_waitq);
return IRQ_HANDLED;
}
/* Wait for operation complete */
static void mpc5121_nfc_done(struct mtd_info *mtd)
{
struct nand_chip *chip = mtd->priv;
struct mpc5121_nfc_prv *prv = chip->priv;
int rv;
if ((nfc_read(mtd, NFC_CONFIG2) & NFC_INT) == 0) {
nfc_clear(mtd, NFC_CONFIG1, NFC_INT_MASK);
rv = wait_event_timeout(prv->irq_waitq,
(nfc_read(mtd, NFC_CONFIG2) & NFC_INT), NFC_TIMEOUT);
if (!rv)
dev_warn(prv->dev,
"Timeout while waiting for interrupt.\n");
}
nfc_clear(mtd, NFC_CONFIG2, NFC_INT);
}
/* Do address cycle(s) */
static void mpc5121_nfc_addr_cycle(struct mtd_info *mtd, int column, int page)
{
struct nand_chip *chip = mtd->priv;
u32 pagemask = chip->pagemask;
if (column != -1) {
mpc5121_nfc_send_addr(mtd, column);
if (mtd->writesize > 512)
mpc5121_nfc_send_addr(mtd, column >> 8);
}
if (page != -1) {
do {
mpc5121_nfc_send_addr(mtd, page & 0xFF);
page >>= 8;
pagemask >>= 8;
} while (pagemask);
}
}
/* Control chip select signals */
static void mpc5121_nfc_select_chip(struct mtd_info *mtd, int chip)
{
if (chip < 0) {
nfc_clear(mtd, NFC_CONFIG1, NFC_CE);
return;
}
nfc_clear(mtd, NFC_BUF_ADDR, NFC_ACTIVE_CS_MASK);
nfc_set(mtd, NFC_BUF_ADDR, (chip << NFC_ACTIVE_CS_SHIFT) &
NFC_ACTIVE_CS_MASK);
nfc_set(mtd, NFC_CONFIG1, NFC_CE);
}
/* Init external chip select logic on ADS5121 board */
static int ads5121_chipselect_init(struct mtd_info *mtd)
{
struct nand_chip *chip = mtd->priv;
struct mpc5121_nfc_prv *prv = chip->priv;
struct device_node *dn;
dn = of_find_compatible_node(NULL, NULL, "fsl,mpc5121ads-cpld");
if (dn) {
prv->csreg = of_iomap(dn, 0);
of_node_put(dn);
if (!prv->csreg)
return -ENOMEM;
/* CPLD Register 9 controls NAND /CE Lines */
prv->csreg += 9;
return 0;
}
return -EINVAL;
}
/* Control chips select signal on ADS5121 board */
static void ads5121_select_chip(struct mtd_info *mtd, int chip)
{
struct nand_chip *nand = mtd->priv;
struct mpc5121_nfc_prv *prv = nand->priv;
u8 v;
v = in_8(prv->csreg);
v |= 0x0F;
if (chip >= 0) {
mpc5121_nfc_select_chip(mtd, 0);
v &= ~(1 << chip);
} else
mpc5121_nfc_select_chip(mtd, -1);
out_8(prv->csreg, v);
}
/* Read NAND Ready/Busy signal */
static int mpc5121_nfc_dev_ready(struct mtd_info *mtd)
{
/*
* NFC handles ready/busy signal internally. Therefore, this function
* always returns status as ready.
*/
return 1;
}
/* Write command to NAND flash */
static void mpc5121_nfc_command(struct mtd_info *mtd, unsigned command,
int column, int page)
{
struct nand_chip *chip = mtd->priv;
struct mpc5121_nfc_prv *prv = chip->priv;
prv->column = (column >= 0) ? column : 0;
prv->spareonly = 0;
switch (command) {
case NAND_CMD_PAGEPROG:
mpc5121_nfc_send_prog_page(mtd);
break;
/*
* NFC does not support sub-page reads and writes,
* so emulate them using full page transfers.
*/
case NAND_CMD_READ0:
column = 0;
break;
case NAND_CMD_READ1:
prv->column += 256;
command = NAND_CMD_READ0;
column = 0;
break;
case NAND_CMD_READOOB:
prv->spareonly = 1;
command = NAND_CMD_READ0;
column = 0;
break;
case NAND_CMD_SEQIN:
mpc5121_nfc_command(mtd, NAND_CMD_READ0, column, page);
column = 0;
break;
case NAND_CMD_ERASE1:
case NAND_CMD_ERASE2:
case NAND_CMD_READID:
case NAND_CMD_STATUS:
break;
default:
return;
}
mpc5121_nfc_send_cmd(mtd, command);
mpc5121_nfc_addr_cycle(mtd, column, page);
switch (command) {
case NAND_CMD_READ0:
if (mtd->writesize > 512)
mpc5121_nfc_send_cmd(mtd, NAND_CMD_READSTART);
mpc5121_nfc_send_read_page(mtd);
break;
case NAND_CMD_READID:
mpc5121_nfc_send_read_id(mtd);
break;
case NAND_CMD_STATUS:
mpc5121_nfc_send_read_status(mtd);
if (chip->options & NAND_BUSWIDTH_16)
prv->column = 1;
else
prv->column = 0;
break;
}
}
/* Copy data from/to NFC spare buffers. */
static void mpc5121_nfc_copy_spare(struct mtd_info *mtd, uint offset,
u8 *buffer, uint size, int wr)
{
struct nand_chip *nand = mtd->priv;
struct mpc5121_nfc_prv *prv = nand->priv;
uint o, s, sbsize, blksize;
/*
* NAND spare area is available through NFC spare buffers.
* The NFC divides spare area into (page_size / 512) chunks.
* Each chunk is placed into separate spare memory area, using
* first (spare_size / num_of_chunks) bytes of the buffer.
*
* For NAND device in which the spare area is not divided fully
* by the number of chunks, number of used bytes in each spare
* buffer is rounded down to the nearest even number of bytes,
* and all remaining bytes are added to the last used spare area.
*
* For more information read section 26.6.10 of MPC5121e
* Microcontroller Reference Manual, Rev. 3.
*/
/* Calculate number of valid bytes in each spare buffer */
sbsize = (mtd->oobsize / (mtd->writesize / 512)) & ~1;
while (size) {
/* Calculate spare buffer number */
s = offset / sbsize;
if (s > NFC_SPARE_BUFFERS - 1)
s = NFC_SPARE_BUFFERS - 1;
/*
* Calculate offset to requested data block in selected spare
* buffer and its size.
*/
o = offset - (s * sbsize);
blksize = min(sbsize - o, size);
if (wr)
memcpy_toio(prv->regs + NFC_SPARE_AREA(s) + o,
buffer, blksize);
else
memcpy_fromio(buffer,
prv->regs + NFC_SPARE_AREA(s) + o, blksize);
buffer += blksize;
offset += blksize;
size -= blksize;
};
}
/* Copy data from/to NFC main and spare buffers */
static void mpc5121_nfc_buf_copy(struct mtd_info *mtd, u_char *buf, int len,
int wr)
{
struct nand_chip *chip = mtd->priv;
struct mpc5121_nfc_prv *prv = chip->priv;
uint c = prv->column;
uint l;
/* Handle spare area access */
if (prv->spareonly || c >= mtd->writesize) {
/* Calculate offset from beginning of spare area */
if (c >= mtd->writesize)
c -= mtd->writesize;
prv->column += len;
mpc5121_nfc_copy_spare(mtd, c, buf, len, wr);
return;
}
/*
* Handle main area access - limit copy length to prevent
* crossing main/spare boundary.
*/
l = min((uint)len, mtd->writesize - c);
prv->column += l;
if (wr)
memcpy_toio(prv->regs + NFC_MAIN_AREA(0) + c, buf, l);
else
memcpy_fromio(buf, prv->regs + NFC_MAIN_AREA(0) + c, l);
/* Handle crossing main/spare boundary */
if (l != len) {
buf += l;
len -= l;
mpc5121_nfc_buf_copy(mtd, buf, len, wr);
}
}
/* Read data from NFC buffers */
static void mpc5121_nfc_read_buf(struct mtd_info *mtd, u_char *buf, int len)
{
mpc5121_nfc_buf_copy(mtd, buf, len, 0);
}
/* Write data to NFC buffers */
static void mpc5121_nfc_write_buf(struct mtd_info *mtd,
const u_char *buf, int len)
{
mpc5121_nfc_buf_copy(mtd, (u_char *)buf, len, 1);
}
/* Compare buffer with NAND flash */
static int mpc5121_nfc_verify_buf(struct mtd_info *mtd,
const u_char *buf, int len)
{
u_char tmp[256];
uint bsize;
while (len) {
bsize = min(len, 256);
mpc5121_nfc_read_buf(mtd, tmp, bsize);
if (memcmp(buf, tmp, bsize))
return 1;
buf += bsize;
len -= bsize;
}
return 0;
}
/* Read byte from NFC buffers */
static u8 mpc5121_nfc_read_byte(struct mtd_info *mtd)
{
u8 tmp;
mpc5121_nfc_read_buf(mtd, &tmp, sizeof(tmp));
return tmp;
}
/* Read word from NFC buffers */
static u16 mpc5121_nfc_read_word(struct mtd_info *mtd)
{
u16 tmp;
mpc5121_nfc_read_buf(mtd, (u_char *)&tmp, sizeof(tmp));
return tmp;
}
/*
* Read NFC configuration from Reset Config Word
*
* NFC is configured during reset in basis of information stored
* in Reset Config Word. There is no other way to set NAND block
* size, spare size and bus width.
*/
static int mpc5121_nfc_read_hw_config(struct mtd_info *mtd)
{
struct nand_chip *chip = mtd->priv;
struct mpc5121_nfc_prv *prv = chip->priv;
struct mpc512x_reset_module *rm;
struct device_node *rmnode;
uint rcw_pagesize = 0;
uint rcw_sparesize = 0;
uint rcw_width;
uint rcwh;
uint romloc, ps;
rmnode = of_find_compatible_node(NULL, NULL, "fsl,mpc5121-reset");
if (!rmnode) {
dev_err(prv->dev, "Missing 'fsl,mpc5121-reset' "
"node in device tree!\n");
return -ENODEV;
}
rm = of_iomap(rmnode, 0);
if (!rm) {
dev_err(prv->dev, "Error mapping reset module node!\n");
return -EBUSY;
}
rcwh = in_be32(&rm->rcwhr);
/* Bit 6: NFC bus width */
rcw_width = ((rcwh >> 6) & 0x1) ? 2 : 1;
/* Bit 7: NFC Page/Spare size */
ps = (rcwh >> 7) & 0x1;
/* Bits [22:21]: ROM Location */
romloc = (rcwh >> 21) & 0x3;
/* Decode RCW bits */
switch ((ps << 2) | romloc) {
case 0x00:
case 0x01:
rcw_pagesize = 512;
rcw_sparesize = 16;
break;
case 0x02:
case 0x03:
rcw_pagesize = 4096;
rcw_sparesize = 128;
break;
case 0x04:
case 0x05:
rcw_pagesize = 2048;
rcw_sparesize = 64;
break;
case 0x06:
case 0x07:
rcw_pagesize = 4096;
rcw_sparesize = 218;
break;
}
mtd->writesize = rcw_pagesize;
mtd->oobsize = rcw_sparesize;
if (rcw_width == 2)
chip->options |= NAND_BUSWIDTH_16;
dev_notice(prv->dev, "Configured for "
"%u-bit NAND, page size %u "
"with %u spare.\n",
rcw_width * 8, rcw_pagesize,
rcw_sparesize);
iounmap(rm);
of_node_put(rmnode);
return 0;
}
/* Free driver resources */
static void mpc5121_nfc_free(struct device *dev, struct mtd_info *mtd)
{
struct nand_chip *chip = mtd->priv;
struct mpc5121_nfc_prv *prv = chip->priv;
if (prv->clk) {
clk_disable(prv->clk);
clk_put(prv->clk);
}
if (prv->csreg)
iounmap(prv->csreg);
}
static int __devinit mpc5121_nfc_probe(struct of_device *op,
const struct of_device_id *match)
{
struct device_node *rootnode, *dn = op->node;
struct device *dev = &op->dev;
struct mpc5121_nfc_prv *prv;
struct resource res;
struct mtd_info *mtd;
#ifdef CONFIG_MTD_PARTITIONS
struct mtd_partition *parts;
#endif
struct nand_chip *chip;
unsigned long regs_paddr, regs_size;
const uint *chips_no;
int resettime = 0;
int retval = 0;
int rev, len;
/*
* Check SoC revision. This driver supports only NFC
* in MPC5121 revision 2 and MPC5123 revision 3.
*/
rev = (mfspr(SPRN_SVR) >> 4) & 0xF;
if ((rev != 2) && (rev != 3)) {
dev_err(dev, "SoC revision %u is not supported!\n", rev);
return -ENXIO;
}
prv = devm_kzalloc(dev, sizeof(*prv), GFP_KERNEL);
if (!prv) {
dev_err(dev, "Memory exhausted!\n");
return -ENOMEM;
}
mtd = &prv->mtd;
chip = &prv->chip;
mtd->priv = chip;
chip->priv = prv;
prv->dev = dev;
/* Read NFC configuration from Reset Config Word */
retval = mpc5121_nfc_read_hw_config(mtd);
if (retval) {
dev_err(dev, "Unable to read NFC config!\n");
return retval;
}
prv->irq = irq_of_parse_and_map(dn, 0);
if (prv->irq == NO_IRQ) {
dev_err(dev, "Error mapping IRQ!\n");
return -EINVAL;
}
retval = of_address_to_resource(dn, 0, &res);
if (retval) {
dev_err(dev, "Error parsing memory region!\n");
return retval;
}
chips_no = of_get_property(dn, "chips", &len);
if (!chips_no || len != sizeof(*chips_no)) {
dev_err(dev, "Invalid/missing 'chips' property!\n");
return -EINVAL;
}
regs_paddr = res.start;
regs_size = res.end - res.start + 1;
if (!devm_request_mem_region(dev, regs_paddr, regs_size, DRV_NAME)) {
dev_err(dev, "Error requesting memory region!\n");
return -EBUSY;
}
prv->regs = devm_ioremap(dev, regs_paddr, regs_size);
if (!prv->regs) {
dev_err(dev, "Error mapping memory region!\n");
return -ENOMEM;
}
mtd->name = "MPC5121 NAND";
chip->dev_ready = mpc5121_nfc_dev_ready;
chip->cmdfunc = mpc5121_nfc_command;
chip->read_byte = mpc5121_nfc_read_byte;
chip->read_word = mpc5121_nfc_read_word;
chip->read_buf = mpc5121_nfc_read_buf;
chip->write_buf = mpc5121_nfc_write_buf;
chip->verify_buf = mpc5121_nfc_verify_buf;
chip->select_chip = mpc5121_nfc_select_chip;
chip->options = NAND_NO_AUTOINCR | NAND_USE_FLASH_BBT;
chip->ecc.mode = NAND_ECC_SOFT;
/* Support external chip-select logic on ADS5121 board */
rootnode = of_find_node_by_path("/");
if (of_device_is_compatible(rootnode, "fsl,mpc5121ads")) {
retval = ads5121_chipselect_init(mtd);
if (retval) {
dev_err(dev, "Chipselect init error!\n");
of_node_put(rootnode);
return retval;
}
chip->select_chip = ads5121_select_chip;
}
of_node_put(rootnode);
/* Enable NFC clock */
prv->clk = clk_get(dev, "nfc_clk");
if (!prv->clk) {
dev_err(dev, "Unable to acquire NFC clock!\n");
retval = -ENODEV;
goto error;
}
clk_enable(prv->clk);
/* Reset NAND Flash controller */
nfc_set(mtd, NFC_CONFIG1, NFC_RESET);
while (nfc_read(mtd, NFC_CONFIG1) & NFC_RESET) {
if (resettime++ >= NFC_RESET_TIMEOUT) {
dev_err(dev, "Timeout while resetting NFC!\n");
retval = -EINVAL;
goto error;
}
udelay(1);
}
/* Enable write to NFC memory */
nfc_write(mtd, NFC_CONFIG, NFC_BLS_UNLOCKED);
/* Enable write to all NAND pages */
nfc_write(mtd, NFC_UNLOCKSTART_BLK0, 0x0000);
nfc_write(mtd, NFC_UNLOCKEND_BLK0, 0xFFFF);
nfc_write(mtd, NFC_WRPROT, NFC_WPC_UNLOCK);
/*
* Setup NFC:
* - Big Endian transfers,
* - Interrupt after full page read/write.
*/
nfc_write(mtd, NFC_CONFIG1, NFC_BIG_ENDIAN | NFC_INT_MASK |
NFC_FULL_PAGE_INT);
/* Set spare area size */
nfc_write(mtd, NFC_SPAS, mtd->oobsize >> 1);
init_waitqueue_head(&prv->irq_waitq);
retval = devm_request_irq(dev, prv->irq, &mpc5121_nfc_irq, 0, DRV_NAME,
mtd);
if (retval) {
dev_err(dev, "Error requesting IRQ!\n");
goto error;
}
/* Detect NAND chips */
if (nand_scan(mtd, *chips_no)) {
dev_err(dev, "NAND Flash not found !\n");
devm_free_irq(dev, prv->irq, mtd);
retval = -ENXIO;
goto error;
}
/* Set erase block size */
switch (mtd->erasesize / mtd->writesize) {
case 32:
nfc_set(mtd, NFC_CONFIG1, NFC_PPB_32);
break;
case 64:
nfc_set(mtd, NFC_CONFIG1, NFC_PPB_64);
break;
case 128:
nfc_set(mtd, NFC_CONFIG1, NFC_PPB_128);
break;
case 256:
nfc_set(mtd, NFC_CONFIG1, NFC_PPB_256);
break;
default:
dev_err(dev, "Unsupported NAND flash!\n");
devm_free_irq(dev, prv->irq, mtd);
retval = -ENXIO;
goto error;
}
dev_set_drvdata(dev, mtd);
/* Register device in MTD */
#ifdef CONFIG_MTD_PARTITIONS
retval = parse_mtd_partitions(mtd, mpc5121_nfc_pprobes, &parts, 0);
#ifdef CONFIG_MTD_OF_PARTS
if (retval == 0)
retval = of_mtd_parse_partitions(dev, dn, &parts);
#endif
if (retval < 0) {
dev_err(dev, "Error parsing MTD partitions!\n");
devm_free_irq(dev, prv->irq, mtd);
retval = -EINVAL;
goto error;
}
if (retval > 0)
retval = add_mtd_partitions(mtd, parts, retval);
else
#endif
retval = add_mtd_device(mtd);
if (retval) {
dev_err(dev, "Error adding MTD device!\n");
devm_free_irq(dev, prv->irq, mtd);
goto error;
}
return 0;
error:
mpc5121_nfc_free(dev, mtd);
return retval;
}
static int __devexit mpc5121_nfc_remove(struct of_device *op)
{
struct device *dev = &op->dev;
struct mtd_info *mtd = dev_get_drvdata(dev);
struct nand_chip *chip = mtd->priv;
struct mpc5121_nfc_prv *prv = chip->priv;
nand_release(mtd);
devm_free_irq(dev, prv->irq, mtd);
mpc5121_nfc_free(dev, mtd);
return 0;
}
static struct of_device_id mpc5121_nfc_match[] __devinitdata = {
{ .compatible = "fsl,mpc5121-nfc", },
{},
};
static struct of_platform_driver mpc5121_nfc_driver = {
.match_table = mpc5121_nfc_match,
.probe = mpc5121_nfc_probe,
.remove = __devexit_p(mpc5121_nfc_remove),
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
},
};
static int __init mpc5121_nfc_init(void)
{
return of_register_platform_driver(&mpc5121_nfc_driver);
}
module_init(mpc5121_nfc_init);
static void __exit mpc5121_nfc_cleanup(void)
{
of_unregister_platform_driver(&mpc5121_nfc_driver);
}
module_exit(mpc5121_nfc_cleanup);
MODULE_AUTHOR("Freescale Semiconductor, Inc.");
MODULE_DESCRIPTION("MPC5121 NAND MTD driver");
MODULE_LICENSE("GPL");

View File

@ -38,7 +38,7 @@
#define DRIVER_NAME "mxc_nand"
#define nfc_is_v21() (cpu_is_mx25() || cpu_is_mx35())
#define nfc_is_v1() (cpu_is_mx31() || cpu_is_mx27())
#define nfc_is_v1() (cpu_is_mx31() || cpu_is_mx27() || cpu_is_mx21())
/* Addresses for NFC registers */
#define NFC_BUF_SIZE 0xE00
@ -168,11 +168,7 @@ static irqreturn_t mxc_nfc_irq(int irq, void *dev_id)
{
struct mxc_nand_host *host = dev_id;
uint16_t tmp;
tmp = readw(host->regs + NFC_CONFIG1);
tmp |= NFC_INT_MSK; /* Disable interrupt */
writew(tmp, host->regs + NFC_CONFIG1);
disable_irq_nosync(irq);
wake_up(&host->irq_waitq);
@ -184,15 +180,13 @@ static irqreturn_t mxc_nfc_irq(int irq, void *dev_id)
*/
static void wait_op_done(struct mxc_nand_host *host, int useirq)
{
uint32_t tmp;
int max_retries = 2000;
uint16_t tmp;
int max_retries = 8000;
if (useirq) {
if ((readw(host->regs + NFC_CONFIG2) & NFC_INT) == 0) {
tmp = readw(host->regs + NFC_CONFIG1);
tmp &= ~NFC_INT_MSK; /* Enable interrupt */
writew(tmp, host->regs + NFC_CONFIG1);
enable_irq(host->irq);
wait_event(host->irq_waitq,
readw(host->regs + NFC_CONFIG2) & NFC_INT);
@ -226,8 +220,23 @@ static void send_cmd(struct mxc_nand_host *host, uint16_t cmd, int useirq)
writew(cmd, host->regs + NFC_FLASH_CMD);
writew(NFC_CMD, host->regs + NFC_CONFIG2);
/* Wait for operation to complete */
wait_op_done(host, useirq);
if (cpu_is_mx21() && (cmd == NAND_CMD_RESET)) {
int max_retries = 100;
/* Reset completion is indicated by NFC_CONFIG2 */
/* being set to 0 */
while (max_retries-- > 0) {
if (readw(host->regs + NFC_CONFIG2) == 0) {
break;
}
udelay(1);
}
if (max_retries < 0)
DEBUG(MTD_DEBUG_LEVEL0, "%s: RESET failed\n",
__func__);
} else {
/* Wait for operation to complete */
wait_op_done(host, useirq);
}
}
/* This function sends an address (or partial address) to the
@ -542,6 +551,41 @@ static void mxc_do_addr_cycle(struct mtd_info *mtd, int column, int page_addr)
}
}
static void preset(struct mtd_info *mtd)
{
struct nand_chip *nand_chip = mtd->priv;
struct mxc_nand_host *host = nand_chip->priv;
uint16_t tmp;
/* enable interrupt, disable spare enable */
tmp = readw(host->regs + NFC_CONFIG1);
tmp &= ~NFC_INT_MSK;
tmp &= ~NFC_SP_EN;
if (nand_chip->ecc.mode == NAND_ECC_HW) {
tmp |= NFC_ECC_EN;
} else {
tmp &= ~NFC_ECC_EN;
}
writew(tmp, host->regs + NFC_CONFIG1);
/* preset operation */
/* Unlock the internal RAM Buffer */
writew(0x2, host->regs + NFC_CONFIG);
/* Blocks to be unlocked */
if (nfc_is_v21()) {
writew(0x0, host->regs + NFC_V21_UNLOCKSTART_BLKADDR);
writew(0xffff, host->regs + NFC_V21_UNLOCKEND_BLKADDR);
} else if (nfc_is_v1()) {
writew(0x0, host->regs + NFC_V1_UNLOCKSTART_BLKADDR);
writew(0x4000, host->regs + NFC_V1_UNLOCKEND_BLKADDR);
} else
BUG();
/* Unlock Block Command for given address range */
writew(0x4, host->regs + NFC_WRPROT);
}
/* Used by the upper layer to write command to NAND Flash for
* different operations to be carried out on NAND Flash */
static void mxc_nand_command(struct mtd_info *mtd, unsigned command,
@ -559,6 +603,10 @@ static void mxc_nand_command(struct mtd_info *mtd, unsigned command,
/* Command pre-processing step */
switch (command) {
case NAND_CMD_RESET:
send_cmd(host, command, false);
preset(mtd);
break;
case NAND_CMD_STATUS:
host->buf_start = 0;
@ -679,7 +727,6 @@ static int __init mxcnd_probe(struct platform_device *pdev)
struct mxc_nand_platform_data *pdata = pdev->dev.platform_data;
struct mxc_nand_host *host;
struct resource *res;
uint16_t tmp;
int err = 0, nr_parts = 0;
struct nand_ecclayout *oob_smallpage, *oob_largepage;
@ -743,51 +790,17 @@ static int __init mxcnd_probe(struct platform_device *pdev)
host->spare_len = 64;
oob_smallpage = &nandv2_hw_eccoob_smallpage;
oob_largepage = &nandv2_hw_eccoob_largepage;
this->ecc.bytes = 9;
} else if (nfc_is_v1()) {
host->regs = host->base;
host->spare0 = host->base + 0x800;
host->spare_len = 16;
oob_smallpage = &nandv1_hw_eccoob_smallpage;
oob_largepage = &nandv1_hw_eccoob_largepage;
} else
BUG();
/* disable interrupt and spare enable */
tmp = readw(host->regs + NFC_CONFIG1);
tmp |= NFC_INT_MSK;
tmp &= ~NFC_SP_EN;
writew(tmp, host->regs + NFC_CONFIG1);
init_waitqueue_head(&host->irq_waitq);
host->irq = platform_get_irq(pdev, 0);
err = request_irq(host->irq, mxc_nfc_irq, 0, DRIVER_NAME, host);
if (err)
goto eirq;
/* Reset NAND */
this->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
/* preset operation */
/* Unlock the internal RAM Buffer */
writew(0x2, host->regs + NFC_CONFIG);
/* Blocks to be unlocked */
if (nfc_is_v21()) {
writew(0x0, host->regs + NFC_V21_UNLOCKSTART_BLKADDR);
writew(0xffff, host->regs + NFC_V21_UNLOCKEND_BLKADDR);
this->ecc.bytes = 9;
} else if (nfc_is_v1()) {
writew(0x0, host->regs + NFC_V1_UNLOCKSTART_BLKADDR);
writew(0x4000, host->regs + NFC_V1_UNLOCKEND_BLKADDR);
this->ecc.bytes = 3;
} else
BUG();
/* Unlock Block Command for given address range */
writew(0x4, host->regs + NFC_WRPROT);
this->ecc.size = 512;
this->ecc.layout = oob_smallpage;
@ -796,14 +809,8 @@ static int __init mxcnd_probe(struct platform_device *pdev)
this->ecc.hwctl = mxc_nand_enable_hwecc;
this->ecc.correct = mxc_nand_correct_data;
this->ecc.mode = NAND_ECC_HW;
tmp = readw(host->regs + NFC_CONFIG1);
tmp |= NFC_ECC_EN;
writew(tmp, host->regs + NFC_CONFIG1);
} else {
this->ecc.mode = NAND_ECC_SOFT;
tmp = readw(host->regs + NFC_CONFIG1);
tmp &= ~NFC_ECC_EN;
writew(tmp, host->regs + NFC_CONFIG1);
}
/* NAND bus width determines access funtions used by upper layer */
@ -817,8 +824,16 @@ static int __init mxcnd_probe(struct platform_device *pdev)
this->options |= NAND_USE_FLASH_BBT;
}
init_waitqueue_head(&host->irq_waitq);
host->irq = platform_get_irq(pdev, 0);
err = request_irq(host->irq, mxc_nfc_irq, IRQF_DISABLED, DRIVER_NAME, host);
if (err)
goto eirq;
/* first scan to find the device and get the page size */
if (nand_scan_ident(mtd, 1)) {
if (nand_scan_ident(mtd, 1, NULL)) {
err = -ENXIO;
goto escan;
}
@ -886,11 +901,14 @@ static int mxcnd_suspend(struct platform_device *pdev, pm_message_t state)
int ret = 0;
DEBUG(MTD_DEBUG_LEVEL0, "MXC_ND : NAND suspend\n");
if (mtd) {
ret = mtd->suspend(mtd);
/* Disable the NFC clock */
clk_disable(host->clk);
}
ret = mtd->suspend(mtd);
/*
* nand_suspend locks the device for exclusive access, so
* the clock must already be off.
*/
BUG_ON(!ret && host->clk_act);
return ret;
}
@ -904,11 +922,7 @@ static int mxcnd_resume(struct platform_device *pdev)
DEBUG(MTD_DEBUG_LEVEL0, "MXC_ND : NAND resume\n");
if (mtd) {
/* Enable the NFC clock */
clk_enable(host->clk);
mtd->resume(mtd);
}
mtd->resume(mtd);
return ret;
}

View File

@ -108,6 +108,35 @@ static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
*/
DEFINE_LED_TRIGGER(nand_led_trigger);
static int check_offs_len(struct mtd_info *mtd,
loff_t ofs, uint64_t len)
{
struct nand_chip *chip = mtd->priv;
int ret = 0;
/* Start address must align on block boundary */
if (ofs & ((1 << chip->phys_erase_shift) - 1)) {
DEBUG(MTD_DEBUG_LEVEL0, "%s: Unaligned address\n", __func__);
ret = -EINVAL;
}
/* Length must align on block boundary */
if (len & ((1 << chip->phys_erase_shift) - 1)) {
DEBUG(MTD_DEBUG_LEVEL0, "%s: Length not block aligned\n",
__func__);
ret = -EINVAL;
}
/* Do not allow past end of device */
if (ofs + len > mtd->size) {
DEBUG(MTD_DEBUG_LEVEL0, "%s: Past end of device\n",
__func__);
ret = -EINVAL;
}
return ret;
}
/**
* nand_release_device - [GENERIC] release chip
* @mtd: MTD device structure
@ -318,6 +347,9 @@ static int nand_block_bad(struct mtd_info *mtd, loff_t ofs, int getchip)
struct nand_chip *chip = mtd->priv;
u16 bad;
if (chip->options & NAND_BB_LAST_PAGE)
ofs += mtd->erasesize - mtd->writesize;
page = (int)(ofs >> chip->page_shift) & chip->pagemask;
if (getchip) {
@ -335,14 +367,18 @@ static int nand_block_bad(struct mtd_info *mtd, loff_t ofs, int getchip)
bad = cpu_to_le16(chip->read_word(mtd));
if (chip->badblockpos & 0x1)
bad >>= 8;
if ((bad & 0xFF) != 0xff)
res = 1;
else
bad &= 0xFF;
} else {
chip->cmdfunc(mtd, NAND_CMD_READOOB, chip->badblockpos, page);
if (chip->read_byte(mtd) != 0xff)
res = 1;
bad = chip->read_byte(mtd);
}
if (likely(chip->badblockbits == 8))
res = bad != 0xFF;
else
res = hweight8(bad) < chip->badblockbits;
if (getchip)
nand_release_device(mtd);
@ -363,6 +399,9 @@ static int nand_default_block_markbad(struct mtd_info *mtd, loff_t ofs)
uint8_t buf[2] = { 0, 0 };
int block, ret;
if (chip->options & NAND_BB_LAST_PAGE)
ofs += mtd->erasesize - mtd->writesize;
/* Get block number */
block = (int)(ofs >> chip->bbt_erase_shift);
if (chip->bbt)
@ -401,6 +440,11 @@ static int nand_default_block_markbad(struct mtd_info *mtd, loff_t ofs)
static int nand_check_wp(struct mtd_info *mtd)
{
struct nand_chip *chip = mtd->priv;
/* broken xD cards report WP despite being writable */
if (chip->options & NAND_BROKEN_XD)
return 0;
/* Check the WP bit */
chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
return (chip->read_byte(mtd) & NAND_STATUS_WP) ? 0 : 1;
@ -744,9 +788,6 @@ nand_get_device(struct nand_chip *chip, struct mtd_info *mtd, int new_state)
chip->state = FL_PM_SUSPENDED;
spin_unlock(lock);
return 0;
} else {
spin_unlock(lock);
return -EAGAIN;
}
}
set_current_state(TASK_UNINTERRUPTIBLE);
@ -834,6 +875,168 @@ static int nand_wait(struct mtd_info *mtd, struct nand_chip *chip)
return status;
}
/**
* __nand_unlock - [REPLACABLE] unlocks specified locked blockes
*
* @param mtd - mtd info
* @param ofs - offset to start unlock from
* @param len - length to unlock
* @invert - when = 0, unlock the range of blocks within the lower and
* upper boundary address
* whne = 1, unlock the range of blocks outside the boundaries
* of the lower and upper boundary address
*
* @return - unlock status
*/
static int __nand_unlock(struct mtd_info *mtd, loff_t ofs,
uint64_t len, int invert)
{
int ret = 0;
int status, page;
struct nand_chip *chip = mtd->priv;
/* Submit address of first page to unlock */
page = ofs >> chip->page_shift;
chip->cmdfunc(mtd, NAND_CMD_UNLOCK1, -1, page & chip->pagemask);
/* Submit address of last page to unlock */
page = (ofs + len) >> chip->page_shift;
chip->cmdfunc(mtd, NAND_CMD_UNLOCK2, -1,
(page | invert) & chip->pagemask);
/* Call wait ready function */
status = chip->waitfunc(mtd, chip);
udelay(1000);
/* See if device thinks it succeeded */
if (status & 0x01) {
DEBUG(MTD_DEBUG_LEVEL0, "%s: Error status = 0x%08x\n",
__func__, status);
ret = -EIO;
}
return ret;
}
/**
* nand_unlock - [REPLACABLE] unlocks specified locked blockes
*
* @param mtd - mtd info
* @param ofs - offset to start unlock from
* @param len - length to unlock
*
* @return - unlock status
*/
int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
{
int ret = 0;
int chipnr;
struct nand_chip *chip = mtd->priv;
DEBUG(MTD_DEBUG_LEVEL3, "%s: start = 0x%012llx, len = %llu\n",
__func__, (unsigned long long)ofs, len);
if (check_offs_len(mtd, ofs, len))
ret = -EINVAL;
/* Align to last block address if size addresses end of the device */
if (ofs + len == mtd->size)
len -= mtd->erasesize;
nand_get_device(chip, mtd, FL_UNLOCKING);
/* Shift to get chip number */
chipnr = ofs >> chip->chip_shift;
chip->select_chip(mtd, chipnr);
/* Check, if it is write protected */
if (nand_check_wp(mtd)) {
DEBUG(MTD_DEBUG_LEVEL0, "%s: Device is write protected!!!\n",
__func__);
ret = -EIO;
goto out;
}
ret = __nand_unlock(mtd, ofs, len, 0);
out:
/* de-select the NAND device */
chip->select_chip(mtd, -1);
nand_release_device(mtd);
return ret;
}
/**
* nand_lock - [REPLACABLE] locks all blockes present in the device
*
* @param mtd - mtd info
* @param ofs - offset to start unlock from
* @param len - length to unlock
*
* @return - lock status
*
* This feature is not support in many NAND parts. 'Micron' NAND parts
* do have this feature, but it allows only to lock all blocks not for
* specified range for block.
*
* Implementing 'lock' feature by making use of 'unlock', for now.
*/
int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
{
int ret = 0;
int chipnr, status, page;
struct nand_chip *chip = mtd->priv;
DEBUG(MTD_DEBUG_LEVEL3, "%s: start = 0x%012llx, len = %llu\n",
__func__, (unsigned long long)ofs, len);
if (check_offs_len(mtd, ofs, len))
ret = -EINVAL;
nand_get_device(chip, mtd, FL_LOCKING);
/* Shift to get chip number */
chipnr = ofs >> chip->chip_shift;
chip->select_chip(mtd, chipnr);
/* Check, if it is write protected */
if (nand_check_wp(mtd)) {
DEBUG(MTD_DEBUG_LEVEL0, "%s: Device is write protected!!!\n",
__func__);
status = MTD_ERASE_FAILED;
ret = -EIO;
goto out;
}
/* Submit address of first page to lock */
page = ofs >> chip->page_shift;
chip->cmdfunc(mtd, NAND_CMD_LOCK, -1, page & chip->pagemask);
/* Call wait ready function */
status = chip->waitfunc(mtd, chip);
udelay(1000);
/* See if device thinks it succeeded */
if (status & 0x01) {
DEBUG(MTD_DEBUG_LEVEL0, "%s: Error status = 0x%08x\n",
__func__, status);
ret = -EIO;
goto out;
}
ret = __nand_unlock(mtd, ofs, len, 0x1);
out:
/* de-select the NAND device */
chip->select_chip(mtd, -1);
nand_release_device(mtd);
return ret;
}
/**
* nand_read_page_raw - [Intern] read raw page data without ecc
* @mtd: mtd info structure
@ -1232,6 +1435,9 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
int ret = 0;
uint32_t readlen = ops->len;
uint32_t oobreadlen = ops->ooblen;
uint32_t max_oobsize = ops->mode == MTD_OOB_AUTO ?
mtd->oobavail : mtd->oobsize;
uint8_t *bufpoi, *oob, *buf;
stats = mtd->ecc_stats;
@ -1282,18 +1488,14 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
buf += bytes;
if (unlikely(oob)) {
/* Raw mode does data:oob:data:oob */
if (ops->mode != MTD_OOB_RAW) {
int toread = min(oobreadlen,
chip->ecc.layout->oobavail);
if (toread) {
oob = nand_transfer_oob(chip,
oob, ops, toread);
oobreadlen -= toread;
}
} else
buf = nand_transfer_oob(chip,
buf, ops, mtd->oobsize);
int toread = min(oobreadlen, max_oobsize);
if (toread) {
oob = nand_transfer_oob(chip,
oob, ops, toread);
oobreadlen -= toread;
}
}
if (!(chip->options & NAND_NO_READRDY)) {
@ -1880,11 +2082,9 @@ static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
* @oob: oob data buffer
* @ops: oob ops structure
*/
static uint8_t *nand_fill_oob(struct nand_chip *chip, uint8_t *oob,
struct mtd_oob_ops *ops)
static uint8_t *nand_fill_oob(struct nand_chip *chip, uint8_t *oob, size_t len,
struct mtd_oob_ops *ops)
{
size_t len = ops->ooblen;
switch(ops->mode) {
case MTD_OOB_PLACE:
@ -1939,6 +2139,11 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
int chipnr, realpage, page, blockmask, column;
struct nand_chip *chip = mtd->priv;
uint32_t writelen = ops->len;
uint32_t oobwritelen = ops->ooblen;
uint32_t oobmaxlen = ops->mode == MTD_OOB_AUTO ?
mtd->oobavail : mtd->oobsize;
uint8_t *oob = ops->oobbuf;
uint8_t *buf = ops->datbuf;
int ret, subpage;
@ -1980,6 +2185,10 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
if (likely(!oob))
memset(chip->oob_poi, 0xff, mtd->oobsize);
/* Don't allow multipage oob writes with offset */
if (ops->ooboffs && (ops->ooboffs + ops->ooblen > oobmaxlen))
return -EINVAL;
while(1) {
int bytes = mtd->writesize;
int cached = writelen > bytes && page != blockmask;
@ -1995,8 +2204,11 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
wbuf = chip->buffers->databuf;
}
if (unlikely(oob))
oob = nand_fill_oob(chip, oob, ops);
if (unlikely(oob)) {
size_t len = min(oobwritelen, oobmaxlen);
oob = nand_fill_oob(chip, oob, len, ops);
oobwritelen -= len;
}
ret = chip->write_page(mtd, chip, wbuf, page, cached,
(ops->mode == MTD_OOB_RAW));
@ -2170,7 +2382,7 @@ static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
chip->pagebuf = -1;
memset(chip->oob_poi, 0xff, mtd->oobsize);
nand_fill_oob(chip, ops->oobbuf, ops);
nand_fill_oob(chip, ops->oobbuf, ops->ooblen, ops);
status = chip->ecc.write_oob(mtd, chip, page & chip->pagemask);
memset(chip->oob_poi, 0xff, mtd->oobsize);
@ -2293,25 +2505,8 @@ int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
__func__, (unsigned long long)instr->addr,
(unsigned long long)instr->len);
/* Start address must align on block boundary */
if (instr->addr & ((1 << chip->phys_erase_shift) - 1)) {
DEBUG(MTD_DEBUG_LEVEL0, "%s: Unaligned address\n", __func__);
if (check_offs_len(mtd, instr->addr, instr->len))
return -EINVAL;
}
/* Length must align on block boundary */
if (instr->len & ((1 << chip->phys_erase_shift) - 1)) {
DEBUG(MTD_DEBUG_LEVEL0, "%s: Length not block aligned\n",
__func__);
return -EINVAL;
}
/* Do not allow erase past end of device */
if ((instr->len + instr->addr) > mtd->size) {
DEBUG(MTD_DEBUG_LEVEL0, "%s: Erase past end of device\n",
__func__);
return -EINVAL;
}
instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
@ -2582,11 +2777,11 @@ static void nand_set_defaults(struct nand_chip *chip, int busw)
*/
static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
struct nand_chip *chip,
int busw, int *maf_id)
int busw, int *maf_id,
struct nand_flash_dev *type)
{
struct nand_flash_dev *type = NULL;
int i, dev_id, maf_idx;
int tmp_id, tmp_manf;
u8 id_data[8];
/* Select the device */
chip->select_chip(mtd, 0);
@ -2612,27 +2807,26 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
/* Read manufacturer and device IDs */
/* Read entire ID string */
tmp_manf = chip->read_byte(mtd);
tmp_id = chip->read_byte(mtd);
for (i = 0; i < 8; i++)
id_data[i] = chip->read_byte(mtd);
if (tmp_manf != *maf_id || tmp_id != dev_id) {
if (id_data[0] != *maf_id || id_data[1] != dev_id) {
printk(KERN_INFO "%s: second ID read did not match "
"%02x,%02x against %02x,%02x\n", __func__,
*maf_id, dev_id, tmp_manf, tmp_id);
*maf_id, dev_id, id_data[0], id_data[1]);
return ERR_PTR(-ENODEV);
}
/* Lookup the flash id */
for (i = 0; nand_flash_ids[i].name != NULL; i++) {
if (dev_id == nand_flash_ids[i].id) {
type = &nand_flash_ids[i];
break;
}
}
if (!type)
type = nand_flash_ids;
for (; type->name != NULL; type++)
if (dev_id == type->id)
break;
if (!type->name)
return ERR_PTR(-ENODEV);
if (!mtd->name)
@ -2644,21 +2838,45 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
if (!type->pagesize) {
int extid;
/* The 3rd id byte holds MLC / multichip data */
chip->cellinfo = chip->read_byte(mtd);
chip->cellinfo = id_data[2];
/* The 4th id byte is the important one */
extid = chip->read_byte(mtd);
/* Calc pagesize */
mtd->writesize = 1024 << (extid & 0x3);
extid >>= 2;
/* Calc oobsize */
mtd->oobsize = (8 << (extid & 0x01)) * (mtd->writesize >> 9);
extid >>= 2;
/* Calc blocksize. Blocksize is multiples of 64KiB */
mtd->erasesize = (64 * 1024) << (extid & 0x03);
extid >>= 2;
/* Get buswidth information */
busw = (extid & 0x01) ? NAND_BUSWIDTH_16 : 0;
extid = id_data[3];
/*
* Field definitions are in the following datasheets:
* Old style (4,5 byte ID): Samsung K9GAG08U0M (p.32)
* New style (6 byte ID): Samsung K9GAG08U0D (p.40)
*
* Check for wraparound + Samsung ID + nonzero 6th byte
* to decide what to do.
*/
if (id_data[0] == id_data[6] && id_data[1] == id_data[7] &&
id_data[0] == NAND_MFR_SAMSUNG &&
id_data[5] != 0x00) {
/* Calc pagesize */
mtd->writesize = 2048 << (extid & 0x03);
extid >>= 2;
/* Calc oobsize */
mtd->oobsize = (extid & 0x03) == 0x01 ? 128 : 218;
extid >>= 2;
/* Calc blocksize */
mtd->erasesize = (128 * 1024) <<
(((extid >> 1) & 0x04) | (extid & 0x03));
busw = 0;
} else {
/* Calc pagesize */
mtd->writesize = 1024 << (extid & 0x03);
extid >>= 2;
/* Calc oobsize */
mtd->oobsize = (8 << (extid & 0x01)) *
(mtd->writesize >> 9);
extid >>= 2;
/* Calc blocksize. Blocksize is multiples of 64KiB */
mtd->erasesize = (64 * 1024) << (extid & 0x03);
extid >>= 2;
/* Get buswidth information */
busw = (extid & 0x01) ? NAND_BUSWIDTH_16 : 0;
}
} else {
/*
* Old devices have chip data hardcoded in the device id table
@ -2704,6 +2922,7 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
/* Set the bad block position */
chip->badblockpos = mtd->writesize > 512 ?
NAND_LARGE_BADBLOCK_POS : NAND_SMALL_BADBLOCK_POS;
chip->badblockbits = 8;
/* Get chip options, preserve non chip based options */
chip->options &= ~NAND_CHIPOPTIONS_MSK;
@ -2720,6 +2939,15 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
if (*maf_id != NAND_MFR_SAMSUNG && !type->pagesize)
chip->options &= ~NAND_SAMSUNG_LP_OPTIONS;
/*
* Bad block marker is stored in the last page of each block
* on Samsung and Hynix MLC devices
*/
if ((chip->cellinfo & NAND_CI_CELLTYPE_MSK) &&
(*maf_id == NAND_MFR_SAMSUNG ||
*maf_id == NAND_MFR_HYNIX))
chip->options |= NAND_BB_LAST_PAGE;
/* Check for AND chips with 4 page planes */
if (chip->options & NAND_4PAGE_ARRAY)
chip->erase_cmd = multi_erase_cmd;
@ -2741,13 +2969,15 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
* nand_scan_ident - [NAND Interface] Scan for the NAND device
* @mtd: MTD device structure
* @maxchips: Number of chips to scan for
* @table: Alternative NAND ID table
*
* This is the first phase of the normal nand_scan() function. It
* reads the flash ID and sets up MTD fields accordingly.
*
* The mtd->owner field must be set to the module of the caller.
*/
int nand_scan_ident(struct mtd_info *mtd, int maxchips)
int nand_scan_ident(struct mtd_info *mtd, int maxchips,
struct nand_flash_dev *table)
{
int i, busw, nand_maf_id;
struct nand_chip *chip = mtd->priv;
@ -2759,7 +2989,7 @@ int nand_scan_ident(struct mtd_info *mtd, int maxchips)
nand_set_defaults(chip, busw);
/* Read the flash type */
type = nand_get_flash_type(mtd, chip, busw, &nand_maf_id);
type = nand_get_flash_type(mtd, chip, busw, &nand_maf_id, table);
if (IS_ERR(type)) {
if (!(chip->options & NAND_SCAN_SILENT_NODEV))
@ -2989,7 +3219,8 @@ int nand_scan_tail(struct mtd_info *mtd)
/* Fill in remaining MTD driver data */
mtd->type = MTD_NANDFLASH;
mtd->flags = MTD_CAP_NANDFLASH;
mtd->flags = (chip->options & NAND_ROM) ? MTD_CAP_ROM :
MTD_CAP_NANDFLASH;
mtd->erase = nand_erase;
mtd->point = NULL;
mtd->unpoint = NULL;
@ -3050,7 +3281,7 @@ int nand_scan(struct mtd_info *mtd, int maxchips)
BUG();
}
ret = nand_scan_ident(mtd, maxchips);
ret = nand_scan_ident(mtd, maxchips, NULL);
if (!ret)
ret = nand_scan_tail(mtd);
return ret;
@ -3077,6 +3308,8 @@ void nand_release(struct mtd_info *mtd)
kfree(chip->buffers);
}
EXPORT_SYMBOL_GPL(nand_lock);
EXPORT_SYMBOL_GPL(nand_unlock);
EXPORT_SYMBOL_GPL(nand_scan);
EXPORT_SYMBOL_GPL(nand_scan_ident);
EXPORT_SYMBOL_GPL(nand_scan_tail);

View File

@ -237,15 +237,33 @@ static int scan_read_raw(struct mtd_info *mtd, uint8_t *buf, loff_t offs,
size_t len)
{
struct mtd_oob_ops ops;
int res;
ops.mode = MTD_OOB_RAW;
ops.ooboffs = 0;
ops.ooblen = mtd->oobsize;
ops.oobbuf = buf;
ops.datbuf = buf;
ops.len = len;
return mtd->read_oob(mtd, offs, &ops);
while (len > 0) {
if (len <= mtd->writesize) {
ops.oobbuf = buf + len;
ops.datbuf = buf;
ops.len = len;
return mtd->read_oob(mtd, offs, &ops);
} else {
ops.oobbuf = buf + mtd->writesize;
ops.datbuf = buf;
ops.len = mtd->writesize;
res = mtd->read_oob(mtd, offs, &ops);
if (res)
return res;
}
buf += mtd->oobsize + mtd->writesize;
len -= mtd->writesize;
}
return 0;
}
/*
@ -414,6 +432,9 @@ static int create_bbt(struct mtd_info *mtd, uint8_t *buf,
from = (loff_t)startblock << (this->bbt_erase_shift - 1);
}
if (this->options & NAND_BB_LAST_PAGE)
from += mtd->erasesize - (mtd->writesize * len);
for (i = startblock; i < numblocks;) {
int ret;

View File

@ -167,18 +167,27 @@ static inline void nand_bcm_umi_bch_read_oobEcc(uint32_t pageSize,
int numToRead = 16; /* There are 16 bytes per sector in the OOB */
/* ECC is already paused when this function is called */
if (pageSize != NAND_DATA_ACCESS_SIZE) {
/* skip BI */
#if defined(__KERNEL__) && !defined(STANDALONE)
*oobp++ = REG_NAND_DATA8;
#else
REG_NAND_DATA8;
#endif
numToRead--;
}
while (numToRead > numEccBytes) {
/* skip free oob region */
#if defined(__KERNEL__) && !defined(STANDALONE)
*oobp++ = REG_NAND_DATA8;
#else
REG_NAND_DATA8;
#endif
numToRead--;
}
if (pageSize == NAND_DATA_ACCESS_SIZE) {
while (numToRead > numEccBytes) {
/* skip free oob region */
#if defined(__KERNEL__) && !defined(STANDALONE)
*oobp++ = REG_NAND_DATA8;
#else
REG_NAND_DATA8;
#endif
numToRead--;
}
/* read ECC bytes before BI */
nand_bcm_umi_bch_resume_read_ecc_calc();
@ -190,6 +199,7 @@ static inline void nand_bcm_umi_bch_read_oobEcc(uint32_t pageSize,
#else
eccCalc[eccPos++] = REG_NAND_DATA8;
#endif
numToRead--;
}
nand_bcm_umi_bch_pause_read_ecc_calc();
@ -204,49 +214,18 @@ static inline void nand_bcm_umi_bch_read_oobEcc(uint32_t pageSize,
numToRead--;
}
/* read ECC bytes */
nand_bcm_umi_bch_resume_read_ecc_calc();
while (numToRead) {
}
/* read ECC bytes */
nand_bcm_umi_bch_resume_read_ecc_calc();
while (numToRead) {
#if defined(__KERNEL__) && !defined(STANDALONE)
*oobp = REG_NAND_DATA8;
eccCalc[eccPos++] = *oobp;
oobp++;
*oobp = REG_NAND_DATA8;
eccCalc[eccPos++] = *oobp;
oobp++;
#else
eccCalc[eccPos++] = REG_NAND_DATA8;
#endif
numToRead--;
}
} else {
/* skip BI */
#if defined(__KERNEL__) && !defined(STANDALONE)
*oobp++ = REG_NAND_DATA8;
#else
REG_NAND_DATA8;
eccCalc[eccPos++] = REG_NAND_DATA8;
#endif
numToRead--;
while (numToRead > numEccBytes) {
/* skip free oob region */
#if defined(__KERNEL__) && !defined(STANDALONE)
*oobp++ = REG_NAND_DATA8;
#else
REG_NAND_DATA8;
#endif
numToRead--;
}
/* read ECC bytes */
nand_bcm_umi_bch_resume_read_ecc_calc();
while (numToRead) {
#if defined(__KERNEL__) && !defined(STANDALONE)
*oobp = REG_NAND_DATA8;
eccCalc[eccPos++] = *oobp;
oobp++;
#else
eccCalc[eccPos++] = REG_NAND_DATA8;
#endif
numToRead--;
}
}
}

View File

@ -82,6 +82,7 @@ struct nand_flash_dev nand_flash_ids[] = {
/* 1 Gigabit */
{"NAND 128MiB 1,8V 8-bit", 0xA1, 0, 128, 0, LP_OPTIONS},
{"NAND 128MiB 3,3V 8-bit", 0xF1, 0, 128, 0, LP_OPTIONS},
{"NAND 128MiB 3,3V 8-bit", 0xD1, 0, 128, 0, LP_OPTIONS},
{"NAND 128MiB 1,8V 16-bit", 0xB1, 0, 128, 0, LP_OPTIONS16},
{"NAND 128MiB 3,3V 16-bit", 0xC1, 0, 128, 0, LP_OPTIONS16},

View File

@ -80,6 +80,9 @@
#ifndef CONFIG_NANDSIM_DBG
#define CONFIG_NANDSIM_DBG 0
#endif
#ifndef CONFIG_NANDSIM_MAX_PARTS
#define CONFIG_NANDSIM_MAX_PARTS 32
#endif
static uint first_id_byte = CONFIG_NANDSIM_FIRST_ID_BYTE;
static uint second_id_byte = CONFIG_NANDSIM_SECOND_ID_BYTE;
@ -94,7 +97,7 @@ static uint bus_width = CONFIG_NANDSIM_BUS_WIDTH;
static uint do_delays = CONFIG_NANDSIM_DO_DELAYS;
static uint log = CONFIG_NANDSIM_LOG;
static uint dbg = CONFIG_NANDSIM_DBG;
static unsigned long parts[MAX_MTD_DEVICES];
static unsigned long parts[CONFIG_NANDSIM_MAX_PARTS];
static unsigned int parts_num;
static char *badblocks = NULL;
static char *weakblocks = NULL;
@ -135,8 +138,8 @@ MODULE_PARM_DESC(fourth_id_byte, "The fourth byte returned by NAND Flash 'read I
MODULE_PARM_DESC(access_delay, "Initial page access delay (microseconds)");
MODULE_PARM_DESC(programm_delay, "Page programm delay (microseconds");
MODULE_PARM_DESC(erase_delay, "Sector erase delay (milliseconds)");
MODULE_PARM_DESC(output_cycle, "Word output (from flash) time (nanodeconds)");
MODULE_PARM_DESC(input_cycle, "Word input (to flash) time (nanodeconds)");
MODULE_PARM_DESC(output_cycle, "Word output (from flash) time (nanoseconds)");
MODULE_PARM_DESC(input_cycle, "Word input (to flash) time (nanoseconds)");
MODULE_PARM_DESC(bus_width, "Chip's bus width (8- or 16-bit)");
MODULE_PARM_DESC(do_delays, "Simulate NAND delays using busy-waits if not zero");
MODULE_PARM_DESC(log, "Perform logging if not zero");
@ -288,7 +291,7 @@ union ns_mem {
* The structure which describes all the internal simulator data.
*/
struct nandsim {
struct mtd_partition partitions[MAX_MTD_DEVICES];
struct mtd_partition partitions[CONFIG_NANDSIM_MAX_PARTS];
unsigned int nbparts;
uint busw; /* flash chip bus width (8 or 16) */
@ -312,7 +315,7 @@ struct nandsim {
union ns_mem buf;
/* NAND flash "geometry" */
struct nandsin_geometry {
struct {
uint64_t totsz; /* total flash size, bytes */
uint32_t secsz; /* flash sector (erase block) size, bytes */
uint pgsz; /* NAND flash page size, bytes */
@ -331,7 +334,7 @@ struct nandsim {
} geom;
/* NAND flash internal registers */
struct nandsim_regs {
struct {
unsigned command; /* the command register */
u_char status; /* the status register */
uint row; /* the page number */
@ -342,7 +345,7 @@ struct nandsim {
} regs;
/* NAND flash lines state */
struct ns_lines_status {
struct {
int ce; /* chip Enable */
int cle; /* command Latch Enable */
int ale; /* address Latch Enable */

View File

@ -105,21 +105,21 @@ static int nomadik_nand_probe(struct platform_device *pdev)
ret = -EIO;
goto err_unmap;
}
host->addr_va = ioremap(res->start, res->end - res->start + 1);
host->addr_va = ioremap(res->start, resource_size(res));
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_data");
if (!res) {
ret = -EIO;
goto err_unmap;
}
host->data_va = ioremap(res->start, res->end - res->start + 1);
host->data_va = ioremap(res->start, resource_size(res));
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_cmd");
if (!res) {
ret = -EIO;
goto err_unmap;
}
host->cmd_va = ioremap(res->start, res->end - res->start + 1);
host->cmd_va = ioremap(res->start, resource_size(res));
if (!host->addr_va || !host->data_va || !host->cmd_va) {
ret = -ENOMEM;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2009 Nuvoton technology corporation.
* Copyright © 2009 Nuvoton technology corporation.
*
* Wan ZongShun <mcuos.com@gmail.com>
*
@ -55,7 +55,7 @@
#define write_addr_reg(dev, val) \
__raw_writel((val), (dev)->reg + REG_SMADDR)
struct w90p910_nand {
struct nuc900_nand {
struct mtd_info mtd;
struct nand_chip chip;
void __iomem *reg;
@ -76,49 +76,49 @@ static const struct mtd_partition partitions[] = {
}
};
static unsigned char w90p910_nand_read_byte(struct mtd_info *mtd)
static unsigned char nuc900_nand_read_byte(struct mtd_info *mtd)
{
unsigned char ret;
struct w90p910_nand *nand;
struct nuc900_nand *nand;
nand = container_of(mtd, struct w90p910_nand, mtd);
nand = container_of(mtd, struct nuc900_nand, mtd);
ret = (unsigned char)read_data_reg(nand);
return ret;
}
static void w90p910_nand_read_buf(struct mtd_info *mtd,
unsigned char *buf, int len)
static void nuc900_nand_read_buf(struct mtd_info *mtd,
unsigned char *buf, int len)
{
int i;
struct w90p910_nand *nand;
struct nuc900_nand *nand;
nand = container_of(mtd, struct w90p910_nand, mtd);
nand = container_of(mtd, struct nuc900_nand, mtd);
for (i = 0; i < len; i++)
buf[i] = (unsigned char)read_data_reg(nand);
}
static void w90p910_nand_write_buf(struct mtd_info *mtd,
const unsigned char *buf, int len)
static void nuc900_nand_write_buf(struct mtd_info *mtd,
const unsigned char *buf, int len)
{
int i;
struct w90p910_nand *nand;
struct nuc900_nand *nand;
nand = container_of(mtd, struct w90p910_nand, mtd);
nand = container_of(mtd, struct nuc900_nand, mtd);
for (i = 0; i < len; i++)
write_data_reg(nand, buf[i]);
}
static int w90p910_verify_buf(struct mtd_info *mtd,
const unsigned char *buf, int len)
static int nuc900_verify_buf(struct mtd_info *mtd,
const unsigned char *buf, int len)
{
int i;
struct w90p910_nand *nand;
struct nuc900_nand *nand;
nand = container_of(mtd, struct w90p910_nand, mtd);
nand = container_of(mtd, struct nuc900_nand, mtd);
for (i = 0; i < len; i++) {
if (buf[i] != (unsigned char)read_data_reg(nand))
@ -128,7 +128,7 @@ static int w90p910_verify_buf(struct mtd_info *mtd,
return 0;
}
static int w90p910_check_rb(struct w90p910_nand *nand)
static int nuc900_check_rb(struct nuc900_nand *nand)
{
unsigned int val;
spin_lock(&nand->lock);
@ -139,24 +139,24 @@ static int w90p910_check_rb(struct w90p910_nand *nand)
return val;
}
static int w90p910_nand_devready(struct mtd_info *mtd)
static int nuc900_nand_devready(struct mtd_info *mtd)
{
struct w90p910_nand *nand;
struct nuc900_nand *nand;
int ready;
nand = container_of(mtd, struct w90p910_nand, mtd);
nand = container_of(mtd, struct nuc900_nand, mtd);
ready = (w90p910_check_rb(nand)) ? 1 : 0;
ready = (nuc900_check_rb(nand)) ? 1 : 0;
return ready;
}
static void w90p910_nand_command_lp(struct mtd_info *mtd,
unsigned int command, int column, int page_addr)
static void nuc900_nand_command_lp(struct mtd_info *mtd, unsigned int command,
int column, int page_addr)
{
register struct nand_chip *chip = mtd->priv;
struct w90p910_nand *nand;
struct nuc900_nand *nand;
nand = container_of(mtd, struct w90p910_nand, mtd);
nand = container_of(mtd, struct nuc900_nand, mtd);
if (command == NAND_CMD_READOOB) {
column += mtd->writesize;
@ -212,7 +212,7 @@ static void w90p910_nand_command_lp(struct mtd_info *mtd,
write_cmd_reg(nand, NAND_CMD_STATUS);
write_cmd_reg(nand, command);
while (!w90p910_check_rb(nand))
while (!nuc900_check_rb(nand))
;
return;
@ -241,7 +241,7 @@ static void w90p910_nand_command_lp(struct mtd_info *mtd,
}
static void w90p910_nand_enable(struct w90p910_nand *nand)
static void nuc900_nand_enable(struct nuc900_nand *nand)
{
unsigned int val;
spin_lock(&nand->lock);
@ -262,37 +262,37 @@ static void w90p910_nand_enable(struct w90p910_nand *nand)
spin_unlock(&nand->lock);
}
static int __devinit w90p910_nand_probe(struct platform_device *pdev)
static int __devinit nuc900_nand_probe(struct platform_device *pdev)
{
struct w90p910_nand *w90p910_nand;
struct nuc900_nand *nuc900_nand;
struct nand_chip *chip;
int retval;
struct resource *res;
retval = 0;
w90p910_nand = kzalloc(sizeof(struct w90p910_nand), GFP_KERNEL);
if (!w90p910_nand)
nuc900_nand = kzalloc(sizeof(struct nuc900_nand), GFP_KERNEL);
if (!nuc900_nand)
return -ENOMEM;
chip = &(w90p910_nand->chip);
chip = &(nuc900_nand->chip);
w90p910_nand->mtd.priv = chip;
w90p910_nand->mtd.owner = THIS_MODULE;
spin_lock_init(&w90p910_nand->lock);
nuc900_nand->mtd.priv = chip;
nuc900_nand->mtd.owner = THIS_MODULE;
spin_lock_init(&nuc900_nand->lock);
w90p910_nand->clk = clk_get(&pdev->dev, NULL);
if (IS_ERR(w90p910_nand->clk)) {
nuc900_nand->clk = clk_get(&pdev->dev, NULL);
if (IS_ERR(nuc900_nand->clk)) {
retval = -ENOENT;
goto fail1;
}
clk_enable(w90p910_nand->clk);
clk_enable(nuc900_nand->clk);
chip->cmdfunc = w90p910_nand_command_lp;
chip->dev_ready = w90p910_nand_devready;
chip->read_byte = w90p910_nand_read_byte;
chip->write_buf = w90p910_nand_write_buf;
chip->read_buf = w90p910_nand_read_buf;
chip->verify_buf = w90p910_verify_buf;
chip->cmdfunc = nuc900_nand_command_lp;
chip->dev_ready = nuc900_nand_devready;
chip->read_byte = nuc900_nand_read_byte;
chip->write_buf = nuc900_nand_write_buf;
chip->read_buf = nuc900_nand_read_buf;
chip->verify_buf = nuc900_verify_buf;
chip->chip_delay = 50;
chip->options = 0;
chip->ecc.mode = NAND_ECC_SOFT;
@ -308,75 +308,75 @@ static int __devinit w90p910_nand_probe(struct platform_device *pdev)
goto fail1;
}
w90p910_nand->reg = ioremap(res->start, resource_size(res));
if (!w90p910_nand->reg) {
nuc900_nand->reg = ioremap(res->start, resource_size(res));
if (!nuc900_nand->reg) {
retval = -ENOMEM;
goto fail2;
}
w90p910_nand_enable(w90p910_nand);
nuc900_nand_enable(nuc900_nand);
if (nand_scan(&(w90p910_nand->mtd), 1)) {
if (nand_scan(&(nuc900_nand->mtd), 1)) {
retval = -ENXIO;
goto fail3;
}
add_mtd_partitions(&(w90p910_nand->mtd), partitions,
add_mtd_partitions(&(nuc900_nand->mtd), partitions,
ARRAY_SIZE(partitions));
platform_set_drvdata(pdev, w90p910_nand);
platform_set_drvdata(pdev, nuc900_nand);
return retval;
fail3: iounmap(w90p910_nand->reg);
fail3: iounmap(nuc900_nand->reg);
fail2: release_mem_region(res->start, resource_size(res));
fail1: kfree(w90p910_nand);
fail1: kfree(nuc900_nand);
return retval;
}
static int __devexit w90p910_nand_remove(struct platform_device *pdev)
static int __devexit nuc900_nand_remove(struct platform_device *pdev)
{
struct w90p910_nand *w90p910_nand = platform_get_drvdata(pdev);
struct nuc900_nand *nuc900_nand = platform_get_drvdata(pdev);
struct resource *res;
iounmap(w90p910_nand->reg);
iounmap(nuc900_nand->reg);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
release_mem_region(res->start, resource_size(res));
clk_disable(w90p910_nand->clk);
clk_put(w90p910_nand->clk);
clk_disable(nuc900_nand->clk);
clk_put(nuc900_nand->clk);
kfree(w90p910_nand);
kfree(nuc900_nand);
platform_set_drvdata(pdev, NULL);
return 0;
}
static struct platform_driver w90p910_nand_driver = {
.probe = w90p910_nand_probe,
.remove = __devexit_p(w90p910_nand_remove),
static struct platform_driver nuc900_nand_driver = {
.probe = nuc900_nand_probe,
.remove = __devexit_p(nuc900_nand_remove),
.driver = {
.name = "w90p910-fmi",
.name = "nuc900-fmi",
.owner = THIS_MODULE,
},
};
static int __init w90p910_nand_init(void)
static int __init nuc900_nand_init(void)
{
return platform_driver_register(&w90p910_nand_driver);
return platform_driver_register(&nuc900_nand_driver);
}
static void __exit w90p910_nand_exit(void)
static void __exit nuc900_nand_exit(void)
{
platform_driver_unregister(&w90p910_nand_driver);
platform_driver_unregister(&nuc900_nand_driver);
}
module_init(w90p910_nand_init);
module_exit(w90p910_nand_exit);
module_init(nuc900_nand_init);
module_exit(nuc900_nand_exit);
MODULE_AUTHOR("Wan ZongShun <mcuos.com@gmail.com>");
MODULE_DESCRIPTION("w90p910 nand driver!");
MODULE_DESCRIPTION("w90p910/NUC9xx nand driver!");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:w90p910-fmi");
MODULE_ALIAS("platform:nuc900-fmi");

View File

@ -292,11 +292,14 @@ static void omap_read_buf_pref(struct mtd_info *mtd, u_char *buf, int len)
u32 *p = (u32 *)buf;
/* take care of subpage reads */
for (; len % 4 != 0; ) {
*buf++ = __raw_readb(info->nand.IO_ADDR_R);
len--;
if (len % 4) {
if (info->nand.options & NAND_BUSWIDTH_16)
omap_read_buf16(mtd, buf, len % 4);
else
omap_read_buf8(mtd, buf, len % 4);
p = (u32 *) (buf + len % 4);
len -= len % 4;
}
p = (u32 *) buf;
/* configure and start prefetch transfer */
ret = gpmc_prefetch_enable(info->gpmc_cs, 0x0, len, 0x0);
@ -502,7 +505,7 @@ static void omap_write_buf_dma_pref(struct mtd_info *mtd,
omap_write_buf_pref(mtd, buf, len);
else
/* start transfer in DMA mode */
omap_nand_dma_transfer(mtd, buf, len, 0x1);
omap_nand_dma_transfer(mtd, (u_char *) buf, len, 0x1);
}
/**
@ -1028,7 +1031,8 @@ out_free_info:
static int omap_nand_remove(struct platform_device *pdev)
{
struct mtd_info *mtd = platform_get_drvdata(pdev);
struct omap_nand_info *info = mtd->priv;
struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
mtd);
platform_set_drvdata(pdev, NULL);
if (use_dma)

View File

@ -80,6 +80,7 @@ static int __init orion_nand_probe(struct platform_device *pdev)
struct mtd_info *mtd;
struct nand_chip *nc;
struct orion_nand_data *board;
struct resource *res;
void __iomem *io_base;
int ret = 0;
#ifdef CONFIG_MTD_PARTITIONS
@ -95,8 +96,13 @@ static int __init orion_nand_probe(struct platform_device *pdev)
}
mtd = (struct mtd_info *)(nc + 1);
io_base = ioremap(pdev->resource[0].start,
pdev->resource[0].end - pdev->resource[0].start + 1);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
ret = -ENODEV;
goto no_res;
}
io_base = ioremap(res->start, resource_size(res));
if (!io_base) {
printk(KERN_ERR "orion_nand: ioremap failed\n");
ret = -EIO;
@ -120,6 +126,9 @@ static int __init orion_nand_probe(struct platform_device *pdev)
if (board->width == 16)
nc->options |= NAND_BUSWIDTH_16;
if (board->dev_ready)
nc->dev_ready = board->dev_ready;
platform_set_drvdata(pdev, mtd);
if (nand_scan(mtd, 1)) {

View File

@ -209,7 +209,7 @@ static int __devexit pasemi_nand_remove(struct of_device *ofdev)
return 0;
}
static struct of_device_id pasemi_nand_match[] =
static const struct of_device_id pasemi_nand_match[] =
{
{
.compatible = "pasemi,localbus-nand",

View File

@ -1320,6 +1320,17 @@ static int pxa3xx_nand_probe(struct platform_device *pdev)
goto fail_free_irq;
}
if (mtd_has_cmdlinepart()) {
static const char *probes[] = { "cmdlinepart", NULL };
struct mtd_partition *parts;
int nr_parts;
nr_parts = parse_mtd_partitions(mtd, probes, &parts, 0);
if (nr_parts)
return add_mtd_partitions(mtd, parts, nr_parts);
}
return add_mtd_partitions(mtd, pdata->parts, pdata->nr_parts);
fail_free_irq:

1140
drivers/mtd/nand/r852.c Normal file

File diff suppressed because it is too large Load Diff

163
drivers/mtd/nand/r852.h Normal file
View File

@ -0,0 +1,163 @@
/*
* Copyright © 2009 - Maxim Levitsky
* driver for Ricoh xD readers
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/pci.h>
#include <linux/completion.h>
#include <linux/workqueue.h>
#include <linux/mtd/nand.h>
#include <linux/spinlock.h>
/* nand interface + ecc
byte write/read does one cycle on nand data lines.
dword write/read does 4 cycles
if R852_CTL_ECC_ACCESS is set in R852_CTL, then dword read reads
results of ecc correction, if DMA read was done before.
If write was done two dword reads read generated ecc checksums
*/
#define R852_DATALINE 0x00
/* control register */
#define R852_CTL 0x04
#define R852_CTL_COMMAND 0x01 /* send command (#CLE)*/
#define R852_CTL_DATA 0x02 /* read/write data (#ALE)*/
#define R852_CTL_ON 0x04 /* only seem to controls the hd led, */
/* but has to be set on start...*/
#define R852_CTL_RESET 0x08 /* unknown, set only on start once*/
#define R852_CTL_CARDENABLE 0x10 /* probably (#CE) - always set*/
#define R852_CTL_ECC_ENABLE 0x20 /* enable ecc engine */
#define R852_CTL_ECC_ACCESS 0x40 /* read/write ecc via reg #0*/
#define R852_CTL_WRITE 0x80 /* set when performing writes (#WP) */
/* card detection status */
#define R852_CARD_STA 0x05
#define R852_CARD_STA_CD 0x01 /* state of #CD line, same as 0x04 */
#define R852_CARD_STA_RO 0x02 /* card is readonly */
#define R852_CARD_STA_PRESENT 0x04 /* card is present (#CD) */
#define R852_CARD_STA_ABSENT 0x08 /* card is absent */
#define R852_CARD_STA_BUSY 0x80 /* card is busy - (#R/B) */
/* card detection irq status & enable*/
#define R852_CARD_IRQ_STA 0x06 /* IRQ status */
#define R852_CARD_IRQ_ENABLE 0x07 /* IRQ enable */
#define R852_CARD_IRQ_CD 0x01 /* fire when #CD lights, same as 0x04*/
#define R852_CARD_IRQ_REMOVE 0x04 /* detect card removal */
#define R852_CARD_IRQ_INSERT 0x08 /* detect card insert */
#define R852_CARD_IRQ_UNK1 0x10 /* unknown */
#define R852_CARD_IRQ_GENABLE 0x80 /* general enable */
#define R852_CARD_IRQ_MASK 0x1D
/* hardware enable */
#define R852_HW 0x08
#define R852_HW_ENABLED 0x01 /* hw enabled */
#define R852_HW_UNKNOWN 0x80
/* dma capabilities */
#define R852_DMA_CAP 0x09
#define R852_SMBIT 0x20 /* if set with bit #6 or bit #7, then */
/* hw is smartmedia */
#define R852_DMA1 0x40 /* if set w/bit #7, dma is supported */
#define R852_DMA2 0x80 /* if set w/bit #6, dma is supported */
/* physical DMA address - 32 bit value*/
#define R852_DMA_ADDR 0x0C
/* dma settings */
#define R852_DMA_SETTINGS 0x10
#define R852_DMA_MEMORY 0x01 /* (memory <-> internal hw buffer) */
#define R852_DMA_READ 0x02 /* 0 = write, 1 = read */
#define R852_DMA_INTERNAL 0x04 /* (internal hw buffer <-> card) */
/* dma IRQ status */
#define R852_DMA_IRQ_STA 0x14
/* dma IRQ enable */
#define R852_DMA_IRQ_ENABLE 0x18
#define R852_DMA_IRQ_MEMORY 0x01 /* (memory <-> internal hw buffer) */
#define R852_DMA_IRQ_ERROR 0x02 /* error did happen */
#define R852_DMA_IRQ_INTERNAL 0x04 /* (internal hw buffer <-> card) */
#define R852_DMA_IRQ_MASK 0x07 /* mask of all IRQ bits */
/* ECC syndrome format - read from reg #0 will return two copies of these for
each half of the page.
first byte is error byte location, and second, bit location + flags */
#define R852_ECC_ERR_BIT_MSK 0x07 /* error bit location */
#define R852_ECC_CORRECT 0x10 /* no errors - (guessed) */
#define R852_ECC_CORRECTABLE 0x20 /* correctable error exist */
#define R852_ECC_FAIL 0x40 /* non correctable error detected */
#define R852_DMA_LEN 512
#define DMA_INTERNAL 0
#define DMA_MEMORY 1
struct r852_device {
void __iomem *mmio; /* mmio */
struct mtd_info *mtd; /* mtd backpointer */
struct nand_chip *chip; /* nand chip backpointer */
struct pci_dev *pci_dev; /* pci backpointer */
/* dma area */
dma_addr_t phys_dma_addr; /* bus address of buffer*/
struct completion dma_done; /* data transfer done */
dma_addr_t phys_bounce_buffer; /* bus address of bounce buffer */
uint8_t *bounce_buffer; /* virtual address of bounce buffer */
int dma_dir; /* 1 = read, 0 = write */
int dma_stage; /* 0 - idle, 1 - first step,
2 - second step */
int dma_state; /* 0 = internal, 1 = memory */
int dma_error; /* dma errors */
int dma_usable; /* is it possible to use dma */
/* card status area */
struct delayed_work card_detect_work;
struct workqueue_struct *card_workqueue;
int card_registred; /* card registered with mtd */
int card_detected; /* card detected in slot */
int card_unstable; /* whenever the card is inserted,
is not known yet */
int readonly; /* card is readonly */
int sm; /* Is card smartmedia */
/* interrupt handling */
spinlock_t irqlock; /* IRQ protecting lock */
int irq; /* irq num */
int insuspend; /* device is suspended */
/* misc */
void *tmp_buffer; /* temporary buffer */
uint8_t ctlreg; /* cached contents of control reg */
};
#define DRV_NAME "r852"
#define dbg(format, ...) \
if (debug) \
printk(KERN_DEBUG DRV_NAME ": " format "\n", ## __VA_ARGS__)
#define dbg_verbose(format, ...) \
if (debug > 1) \
printk(KERN_DEBUG DRV_NAME ": " format "\n", ## __VA_ARGS__)
#define message(format, ...) \
printk(KERN_INFO DRV_NAME ": " format "\n", ## __VA_ARGS__)

View File

@ -929,14 +929,13 @@ static int s3c24xx_nand_probe(struct platform_device *pdev)
pr_debug("s3c2410_nand_probe(%p)\n", pdev);
info = kmalloc(sizeof(*info), GFP_KERNEL);
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (info == NULL) {
dev_err(&pdev->dev, "no memory for flash info\n");
err = -ENOMEM;
goto exit_error;
}
memset(info, 0, sizeof(*info));
platform_set_drvdata(pdev, info);
spin_lock_init(&info->controller.lock);
@ -957,7 +956,7 @@ static int s3c24xx_nand_probe(struct platform_device *pdev)
/* currently we assume we have the one resource */
res = pdev->resource;
size = res->end - res->start + 1;
size = resource_size(res);
info->area = request_mem_region(res->start, size, pdev->name);
@ -994,15 +993,13 @@ static int s3c24xx_nand_probe(struct platform_device *pdev)
/* allocate our information */
size = nr_sets * sizeof(*info->mtds);
info->mtds = kmalloc(size, GFP_KERNEL);
info->mtds = kzalloc(size, GFP_KERNEL);
if (info->mtds == NULL) {
dev_err(&pdev->dev, "failed to allocate mtd storage\n");
err = -ENOMEM;
goto exit_error;
}
memset(info->mtds, 0, size);
/* initialise all possible chips */
nmtd = info->mtds;
@ -1013,7 +1010,8 @@ static int s3c24xx_nand_probe(struct platform_device *pdev)
s3c2410_nand_init_chip(info, nmtd, sets);
nmtd->scan_res = nand_scan_ident(&nmtd->mtd,
(sets) ? sets->nr_chips : 1);
(sets) ? sets->nr_chips : 1,
NULL);
if (nmtd->scan_res == 0) {
s3c2410_nand_update_chip(info, nmtd);

View File

@ -855,7 +855,7 @@ static int __devinit flctl_probe(struct platform_device *pdev)
nand->read_word = flctl_read_word;
}
ret = nand_scan_ident(flctl_mtd, 1);
ret = nand_scan_ident(flctl_mtd, 1, NULL);
if (ret)
goto err;

View File

@ -0,0 +1,148 @@
/*
* Copyright © 2009 - Maxim Levitsky
* Common routines & support for xD format
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/mtd/nand.h>
#include "sm_common.h"
static struct nand_ecclayout nand_oob_sm = {
.eccbytes = 6,
.eccpos = {8, 9, 10, 13, 14, 15},
.oobfree = {
{.offset = 0 , .length = 4}, /* reserved */
{.offset = 6 , .length = 2}, /* LBA1 */
{.offset = 11, .length = 2} /* LBA2 */
}
};
/* NOTE: This layout is is not compatabable with SmartMedia, */
/* because the 256 byte devices have page depenent oob layout */
/* However it does preserve the bad block markers */
/* If you use smftl, it will bypass this and work correctly */
/* If you not, then you break SmartMedia compliance anyway */
static struct nand_ecclayout nand_oob_sm_small = {
.eccbytes = 3,
.eccpos = {0, 1, 2},
.oobfree = {
{.offset = 3 , .length = 2}, /* reserved */
{.offset = 6 , .length = 2}, /* LBA1 */
}
};
static int sm_block_markbad(struct mtd_info *mtd, loff_t ofs)
{
struct mtd_oob_ops ops;
struct sm_oob oob;
int ret, error = 0;
memset(&oob, -1, SM_OOB_SIZE);
oob.block_status = 0x0F;
/* As long as this function is called on erase block boundaries
it will work correctly for 256 byte nand */
ops.mode = MTD_OOB_PLACE;
ops.ooboffs = 0;
ops.ooblen = mtd->oobsize;
ops.oobbuf = (void *)&oob;
ops.datbuf = NULL;
ret = mtd->write_oob(mtd, ofs, &ops);
if (ret < 0 || ops.oobretlen != SM_OOB_SIZE) {
printk(KERN_NOTICE
"sm_common: can't mark sector at %i as bad\n",
(int)ofs);
error = -EIO;
} else
mtd->ecc_stats.badblocks++;
return error;
}
static struct nand_flash_dev nand_smartmedia_flash_ids[] = {
{"SmartMedia 1MiB 5V", 0x6e, 256, 1, 0x1000, 0},
{"SmartMedia 1MiB 3,3V", 0xe8, 256, 1, 0x1000, 0},
{"SmartMedia 1MiB 3,3V", 0xec, 256, 1, 0x1000, 0},
{"SmartMedia 2MiB 3,3V", 0xea, 256, 2, 0x1000, 0},
{"SmartMedia 2MiB 5V", 0x64, 256, 2, 0x1000, 0},
{"SmartMedia 2MiB 3,3V ROM", 0x5d, 512, 2, 0x2000, NAND_ROM},
{"SmartMedia 4MiB 3,3V", 0xe3, 512, 4, 0x2000, 0},
{"SmartMedia 4MiB 3,3/5V", 0xe5, 512, 4, 0x2000, 0},
{"SmartMedia 4MiB 5V", 0x6b, 512, 4, 0x2000, 0},
{"SmartMedia 4MiB 3,3V ROM", 0xd5, 512, 4, 0x2000, NAND_ROM},
{"SmartMedia 8MiB 3,3V", 0xe6, 512, 8, 0x2000, 0},
{"SmartMedia 8MiB 3,3V ROM", 0xd6, 512, 8, 0x2000, NAND_ROM},
{"SmartMedia 16MiB 3,3V", 0x73, 512, 16, 0x4000, 0},
{"SmartMedia 16MiB 3,3V ROM", 0x57, 512, 16, 0x4000, NAND_ROM},
{"SmartMedia 32MiB 3,3V", 0x75, 512, 32, 0x4000, 0},
{"SmartMedia 32MiB 3,3V ROM", 0x58, 512, 32, 0x4000, NAND_ROM},
{"SmartMedia 64MiB 3,3V", 0x76, 512, 64, 0x4000, 0},
{"SmartMedia 64MiB 3,3V ROM", 0xd9, 512, 64, 0x4000, NAND_ROM},
{"SmartMedia 128MiB 3,3V", 0x79, 512, 128, 0x4000, 0},
{"SmartMedia 128MiB 3,3V ROM", 0xda, 512, 128, 0x4000, NAND_ROM},
{"SmartMedia 256MiB 3,3V", 0x71, 512, 256, 0x4000 },
{"SmartMedia 256MiB 3,3V ROM", 0x5b, 512, 256, 0x4000, NAND_ROM},
{NULL,}
};
#define XD_TYPEM (NAND_NO_AUTOINCR | NAND_BROKEN_XD)
static struct nand_flash_dev nand_xd_flash_ids[] = {
{"xD 16MiB 3,3V", 0x73, 512, 16, 0x4000, 0},
{"xD 32MiB 3,3V", 0x75, 512, 32, 0x4000, 0},
{"xD 64MiB 3,3V", 0x76, 512, 64, 0x4000, 0},
{"xD 128MiB 3,3V", 0x79, 512, 128, 0x4000, 0},
{"xD 256MiB 3,3V", 0x71, 512, 256, 0x4000, XD_TYPEM},
{"xD 512MiB 3,3V", 0xdc, 512, 512, 0x4000, XD_TYPEM},
{"xD 1GiB 3,3V", 0xd3, 512, 1024, 0x4000, XD_TYPEM},
{"xD 2GiB 3,3V", 0xd5, 512, 2048, 0x4000, XD_TYPEM},
{NULL,}
};
int sm_register_device(struct mtd_info *mtd, int smartmedia)
{
struct nand_chip *chip = (struct nand_chip *)mtd->priv;
int ret;
chip->options |= NAND_SKIP_BBTSCAN;
/* Scan for card properties */
ret = nand_scan_ident(mtd, 1, smartmedia ?
nand_smartmedia_flash_ids : nand_xd_flash_ids);
if (ret)
return ret;
/* Bad block marker postion */
chip->badblockpos = 0x05;
chip->badblockbits = 7;
chip->block_markbad = sm_block_markbad;
/* ECC layout */
if (mtd->writesize == SM_SECTOR_SIZE)
chip->ecc.layout = &nand_oob_sm;
else if (mtd->writesize == SM_SMALL_PAGE)
chip->ecc.layout = &nand_oob_sm_small;
else
return -ENODEV;
ret = nand_scan_tail(mtd);
if (ret)
return ret;
return add_mtd_device(mtd);
}
EXPORT_SYMBOL_GPL(sm_register_device);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Maxim Levitsky <maximlevitsky@gmail.com>");
MODULE_DESCRIPTION("Common SmartMedia/xD functions");

View File

@ -0,0 +1,61 @@
/*
* Copyright © 2009 - Maxim Levitsky
* Common routines & support for SmartMedia/xD format
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/bitops.h>
#include <linux/mtd/mtd.h>
/* Full oob structure as written on the flash */
struct sm_oob {
uint32_t reserved;
uint8_t data_status;
uint8_t block_status;
uint8_t lba_copy1[2];
uint8_t ecc2[3];
uint8_t lba_copy2[2];
uint8_t ecc1[3];
} __attribute__((packed));
/* one sector is always 512 bytes, but it can consist of two nand pages */
#define SM_SECTOR_SIZE 512
/* oob area is also 16 bytes, but might be from two pages */
#define SM_OOB_SIZE 16
/* This is maximum zone size, and all devices that have more that one zone
have this size */
#define SM_MAX_ZONE_SIZE 1024
/* support for small page nand */
#define SM_SMALL_PAGE 256
#define SM_SMALL_OOB_SIZE 8
extern int sm_register_device(struct mtd_info *mtd, int smartmedia);
static inline int sm_sector_valid(struct sm_oob *oob)
{
return hweight16(oob->data_status) >= 5;
}
static inline int sm_block_valid(struct sm_oob *oob)
{
return hweight16(oob->block_status) >= 7;
}
static inline int sm_block_erased(struct sm_oob *oob)
{
static const uint32_t erased_pattern[4] = {
0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF };
/* First test for erased block */
if (!memcmp(oob, erased_pattern, sizeof(*oob)))
return 1;
return 0;
}

View File

@ -220,7 +220,7 @@ static int __devinit socrates_nand_probe(struct of_device *ofdev,
dev_set_drvdata(&ofdev->dev, host);
/* first scan to find the device and get the page size */
if (nand_scan_ident(mtd, 1)) {
if (nand_scan_ident(mtd, 1, NULL)) {
res = -ENXIO;
goto out;
}
@ -290,7 +290,7 @@ static int __devexit socrates_nand_remove(struct of_device *ofdev)
return 0;
}
static struct of_device_id socrates_nand_match[] =
static const struct of_device_id socrates_nand_match[] =
{
{
.compatible = "abb,socrates-nand",

View File

@ -319,7 +319,7 @@ static int tmio_nand_correct_data(struct mtd_info *mtd, unsigned char *buf,
static int tmio_hw_init(struct platform_device *dev, struct tmio_nand *tmio)
{
struct mfd_cell *cell = (struct mfd_cell *)dev->dev.platform_data;
struct mfd_cell *cell = dev_get_platdata(&dev->dev);
int ret;
if (cell->enable) {
@ -363,7 +363,7 @@ static int tmio_hw_init(struct platform_device *dev, struct tmio_nand *tmio)
static void tmio_hw_stop(struct platform_device *dev, struct tmio_nand *tmio)
{
struct mfd_cell *cell = (struct mfd_cell *)dev->dev.platform_data;
struct mfd_cell *cell = dev_get_platdata(&dev->dev);
tmio_iowrite8(FCR_MODE_POWER_OFF, tmio->fcr + FCR_MODE);
if (cell->disable)
@ -372,7 +372,7 @@ static void tmio_hw_stop(struct platform_device *dev, struct tmio_nand *tmio)
static int tmio_probe(struct platform_device *dev)
{
struct mfd_cell *cell = (struct mfd_cell *)dev->dev.platform_data;
struct mfd_cell *cell = dev_get_platdata(&dev->dev);
struct tmio_nand_data *data = cell->driver_data;
struct resource *fcr = platform_get_resource(dev,
IORESOURCE_MEM, 0);
@ -405,14 +405,14 @@ static int tmio_probe(struct platform_device *dev)
mtd->priv = nand_chip;
mtd->name = "tmio-nand";
tmio->ccr = ioremap(ccr->start, ccr->end - ccr->start + 1);
tmio->ccr = ioremap(ccr->start, resource_size(ccr));
if (!tmio->ccr) {
retval = -EIO;
goto err_iomap_ccr;
}
tmio->fcr_base = fcr->start & 0xfffff;
tmio->fcr = ioremap(fcr->start, fcr->end - fcr->start + 1);
tmio->fcr = ioremap(fcr->start, resource_size(fcr));
if (!tmio->fcr) {
retval = -EIO;
goto err_iomap_fcr;
@ -516,7 +516,7 @@ static int tmio_remove(struct platform_device *dev)
#ifdef CONFIG_PM
static int tmio_suspend(struct platform_device *dev, pm_message_t state)
{
struct mfd_cell *cell = (struct mfd_cell *)dev->dev.platform_data;
struct mfd_cell *cell = dev_get_platdata(&dev->dev);
if (cell->suspend)
cell->suspend(dev);
@ -527,7 +527,7 @@ static int tmio_suspend(struct platform_device *dev, pm_message_t state)
static int tmio_resume(struct platform_device *dev)
{
struct mfd_cell *cell = (struct mfd_cell *)dev->dev.platform_data;
struct mfd_cell *cell = dev_get_platdata(&dev->dev);
/* FIXME - is this required or merely another attack of the broken
* SHARP platform? Looks suspicious.

View File

@ -1,207 +0,0 @@
/*
* drivers/mtd/nand/ts7250.c
*
* Copyright (C) 2004 Technologic Systems (support@embeddedARM.com)
*
* Derived from drivers/mtd/nand/edb7312.c
* Copyright (C) 2004 Marius Gröger (mag@sysgo.de)
*
* Derived from drivers/mtd/nand/autcpu12.c
* Copyright (c) 2001 Thomas Gleixner (gleixner@autronix.de)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Overview:
* This is a device driver for the NAND flash device found on the
* TS-7250 board which utilizes a Samsung 32 Mbyte part.
*/
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
#include <linux/mtd/partitions.h>
#include <linux/io.h>
#include <mach/hardware.h>
#include <mach/ts72xx.h>
#include <asm/sizes.h>
#include <asm/mach-types.h>
/*
* MTD structure for TS7250 board
*/
static struct mtd_info *ts7250_mtd = NULL;
#ifdef CONFIG_MTD_PARTITIONS
static const char *part_probes[] = { "cmdlinepart", NULL };
#define NUM_PARTITIONS 3
/*
* Define static partitions for flash device
*/
static struct mtd_partition partition_info32[] = {
{
.name = "TS-BOOTROM",
.offset = 0x00000000,
.size = 0x00004000,
}, {
.name = "Linux",
.offset = 0x00004000,
.size = 0x01d00000,
}, {
.name = "RedBoot",
.offset = 0x01d04000,
.size = 0x002fc000,
},
};
/*
* Define static partitions for flash device
*/
static struct mtd_partition partition_info128[] = {
{
.name = "TS-BOOTROM",
.offset = 0x00000000,
.size = 0x00004000,
}, {
.name = "Linux",
.offset = 0x00004000,
.size = 0x07d00000,
}, {
.name = "RedBoot",
.offset = 0x07d04000,
.size = 0x002fc000,
},
};
#endif
/*
* hardware specific access to control-lines
*
* ctrl:
* NAND_NCE: bit 0 -> bit 2
* NAND_CLE: bit 1 -> bit 1
* NAND_ALE: bit 2 -> bit 0
*/
static void ts7250_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int ctrl)
{
struct nand_chip *chip = mtd->priv;
if (ctrl & NAND_CTRL_CHANGE) {
unsigned long addr = TS72XX_NAND_CONTROL_VIRT_BASE;
unsigned char bits;
bits = (ctrl & NAND_NCE) << 2;
bits |= ctrl & NAND_CLE;
bits |= (ctrl & NAND_ALE) >> 2;
__raw_writeb((__raw_readb(addr) & ~0x7) | bits, addr);
}
if (cmd != NAND_CMD_NONE)
writeb(cmd, chip->IO_ADDR_W);
}
/*
* read device ready pin
*/
static int ts7250_device_ready(struct mtd_info *mtd)
{
return __raw_readb(TS72XX_NAND_BUSY_VIRT_BASE) & 0x20;
}
/*
* Main initialization routine
*/
static int __init ts7250_init(void)
{
struct nand_chip *this;
const char *part_type = 0;
int mtd_parts_nb = 0;
struct mtd_partition *mtd_parts = 0;
if (!machine_is_ts72xx() || board_is_ts7200())
return -ENXIO;
/* Allocate memory for MTD device structure and private data */
ts7250_mtd = kmalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip), GFP_KERNEL);
if (!ts7250_mtd) {
printk("Unable to allocate TS7250 NAND MTD device structure.\n");
return -ENOMEM;
}
/* Get pointer to private data */
this = (struct nand_chip *)(&ts7250_mtd[1]);
/* Initialize structures */
memset(ts7250_mtd, 0, sizeof(struct mtd_info));
memset(this, 0, sizeof(struct nand_chip));
/* Link the private data with the MTD structure */
ts7250_mtd->priv = this;
ts7250_mtd->owner = THIS_MODULE;
/* insert callbacks */
this->IO_ADDR_R = (void *)TS72XX_NAND_DATA_VIRT_BASE;
this->IO_ADDR_W = (void *)TS72XX_NAND_DATA_VIRT_BASE;
this->cmd_ctrl = ts7250_hwcontrol;
this->dev_ready = ts7250_device_ready;
this->chip_delay = 15;
this->ecc.mode = NAND_ECC_SOFT;
printk("Searching for NAND flash...\n");
/* Scan to find existence of the device */
if (nand_scan(ts7250_mtd, 1)) {
kfree(ts7250_mtd);
return -ENXIO;
}
#ifdef CONFIG_MTD_PARTITIONS
ts7250_mtd->name = "ts7250-nand";
mtd_parts_nb = parse_mtd_partitions(ts7250_mtd, part_probes, &mtd_parts, 0);
if (mtd_parts_nb > 0)
part_type = "command line";
else
mtd_parts_nb = 0;
#endif
if (mtd_parts_nb == 0) {
mtd_parts = partition_info32;
if (ts7250_mtd->size >= (128 * 0x100000))
mtd_parts = partition_info128;
mtd_parts_nb = NUM_PARTITIONS;
part_type = "static";
}
/* Register the partitions */
printk(KERN_NOTICE "Using %s partition definition\n", part_type);
add_mtd_partitions(ts7250_mtd, mtd_parts, mtd_parts_nb);
/* Return happy */
return 0;
}
module_init(ts7250_init);
/*
* Clean up routine
*/
static void __exit ts7250_cleanup(void)
{
/* Unregister the device */
del_mtd_device(ts7250_mtd);
/* Free the MTD device structure */
kfree(ts7250_mtd);
}
module_exit(ts7250_cleanup);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jesse Off <joff@embeddedARM.com>");
MODULE_DESCRIPTION("MTD map driver for Technologic Systems TS-7250 board");

View File

@ -274,7 +274,7 @@ static int txx9ndfmc_nand_scan(struct mtd_info *mtd)
struct nand_chip *chip = mtd->priv;
int ret;
ret = nand_scan_ident(mtd, 1);
ret = nand_scan_ident(mtd, 1, NULL);
if (!ret) {
if (mtd->writesize >= 512) {
chip->ecc.size = mtd->writesize;

View File

@ -126,7 +126,6 @@ static void nftl_remove_dev(struct mtd_blktrans_dev *dev)
del_mtd_blktrans_dev(dev);
kfree(nftl->ReplUnitTable);
kfree(nftl->EUNtable);
kfree(nftl);
}
/*

View File

@ -30,6 +30,13 @@ config MTD_ONENAND_OMAP2
Support for a OneNAND flash device connected to an OMAP2/OMAP3 CPU
via the GPMC memory controller.
config MTD_ONENAND_SAMSUNG
tristate "OneNAND on Samsung SOC controller support"
depends on MTD_ONENAND && (ARCH_S3C64XX || ARCH_S5PC100 || ARCH_S5PV210)
help
Support for a OneNAND flash device connected to an Samsung SOC
S3C64XX/S5PC1XX controller.
config MTD_ONENAND_OTP
bool "OneNAND OTP Support"
select HAVE_MTD_OTP

View File

@ -8,6 +8,7 @@ obj-$(CONFIG_MTD_ONENAND) += onenand.o
# Board specific.
obj-$(CONFIG_MTD_ONENAND_GENERIC) += generic.o
obj-$(CONFIG_MTD_ONENAND_OMAP2) += omap2.o
obj-$(CONFIG_MTD_ONENAND_SAMSUNG) += samsung.o
# Simulator
obj-$(CONFIG_MTD_ONENAND_SIM) += onenand_sim.o

View File

@ -309,7 +309,7 @@ static int omap3_onenand_read_bufferram(struct mtd_info *mtd, int area,
goto out_copy;
/* panic_write() may be in an interrupt context */
if (in_interrupt())
if (in_interrupt() || oops_in_progress)
goto out_copy;
if (buf >= high_memory) {
@ -386,7 +386,7 @@ static int omap3_onenand_write_bufferram(struct mtd_info *mtd, int area,
goto out_copy;
/* panic_write() may be in an interrupt context */
if (in_interrupt())
if (in_interrupt() || oops_in_progress)
goto out_copy;
if (buf >= high_memory) {
@ -403,7 +403,7 @@ static int omap3_onenand_write_bufferram(struct mtd_info *mtd, int area,
dma_src = dma_map_single(&c->pdev->dev, buf, count, DMA_TO_DEVICE);
dma_dst = c->phys_base + bram_offset;
if (dma_mapping_error(&c->pdev->dev, dma_dst)) {
if (dma_mapping_error(&c->pdev->dev, dma_src)) {
dev_err(&c->pdev->dev,
"Couldn't DMA map a %d byte buffer\n",
count);
@ -426,7 +426,7 @@ static int omap3_onenand_write_bufferram(struct mtd_info *mtd, int area,
if (*done)
break;
dma_unmap_single(&c->pdev->dev, dma_dst, count, DMA_TO_DEVICE);
dma_unmap_single(&c->pdev->dev, dma_src, count, DMA_TO_DEVICE);
if (!*done) {
dev_err(&c->pdev->dev, "timeout waiting for DMA\n");
@ -521,7 +521,7 @@ static int omap2_onenand_write_bufferram(struct mtd_info *mtd, int area,
dma_src = dma_map_single(&c->pdev->dev, (void *) buffer, count,
DMA_TO_DEVICE);
dma_dst = c->phys_base + bram_offset;
if (dma_mapping_error(&c->pdev->dev, dma_dst)) {
if (dma_mapping_error(&c->pdev->dev, dma_src)) {
dev_err(&c->pdev->dev,
"Couldn't DMA map a %d byte buffer\n",
count);
@ -539,7 +539,7 @@ static int omap2_onenand_write_bufferram(struct mtd_info *mtd, int area,
omap_start_dma(c->dma_channel);
wait_for_completion(&c->dma_done);
dma_unmap_single(&c->pdev->dev, dma_dst, count, DMA_TO_DEVICE);
dma_unmap_single(&c->pdev->dev, dma_src, count, DMA_TO_DEVICE);
return 0;
}

View File

@ -397,7 +397,8 @@ static int onenand_command(struct mtd_info *mtd, int cmd, loff_t addr, size_t le
value = onenand_bufferram_address(this, block);
this->write_word(value, this->base + ONENAND_REG_START_ADDRESS2);
if (ONENAND_IS_MLC(this) || ONENAND_IS_2PLANE(this))
if (ONENAND_IS_MLC(this) || ONENAND_IS_2PLANE(this) ||
ONENAND_IS_4KB_PAGE(this))
/* It is always BufferRAM0 */
ONENAND_SET_BUFFERRAM0(this);
else
@ -426,7 +427,7 @@ static int onenand_command(struct mtd_info *mtd, int cmd, loff_t addr, size_t le
case FLEXONENAND_CMD_RECOVER_LSB:
case ONENAND_CMD_READ:
case ONENAND_CMD_READOOB:
if (ONENAND_IS_MLC(this))
if (ONENAND_IS_MLC(this) || ONENAND_IS_4KB_PAGE(this))
/* It is always BufferRAM0 */
dataram = ONENAND_SET_BUFFERRAM0(this);
else
@ -466,11 +467,11 @@ static inline int onenand_read_ecc(struct onenand_chip *this)
{
int ecc, i, result = 0;
if (!FLEXONENAND(this))
if (!FLEXONENAND(this) && !ONENAND_IS_4KB_PAGE(this))
return this->read_word(this->base + ONENAND_REG_ECC_STATUS);
for (i = 0; i < 4; i++) {
ecc = this->read_word(this->base + ONENAND_REG_ECC_STATUS + i);
ecc = this->read_word(this->base + ONENAND_REG_ECC_STATUS + i*2);
if (likely(!ecc))
continue;
if (ecc & FLEXONENAND_UNCORRECTABLE_ERROR)
@ -1425,7 +1426,7 @@ static int onenand_read(struct mtd_info *mtd, loff_t from, size_t len,
int ret;
onenand_get_device(mtd, FL_READING);
ret = ONENAND_IS_MLC(this) ?
ret = ONENAND_IS_MLC(this) || ONENAND_IS_4KB_PAGE(this) ?
onenand_mlc_read_ops_nolock(mtd, from, &ops) :
onenand_read_ops_nolock(mtd, from, &ops);
onenand_release_device(mtd);
@ -1460,7 +1461,7 @@ static int onenand_read_oob(struct mtd_info *mtd, loff_t from,
onenand_get_device(mtd, FL_READING);
if (ops->datbuf)
ret = ONENAND_IS_MLC(this) ?
ret = ONENAND_IS_MLC(this) || ONENAND_IS_4KB_PAGE(this) ?
onenand_mlc_read_ops_nolock(mtd, from, ops) :
onenand_read_ops_nolock(mtd, from, ops);
else
@ -1634,7 +1635,6 @@ static int onenand_verify_oob(struct mtd_info *mtd, const u_char *buf, loff_t to
static int onenand_verify(struct mtd_info *mtd, const u_char *buf, loff_t addr, size_t len)
{
struct onenand_chip *this = mtd->priv;
void __iomem *dataram;
int ret = 0;
int thislen, column;
@ -1654,10 +1654,9 @@ static int onenand_verify(struct mtd_info *mtd, const u_char *buf, loff_t addr,
onenand_update_bufferram(mtd, addr, 1);
dataram = this->base + ONENAND_DATARAM;
dataram += onenand_bufferram_offset(mtd, ONENAND_DATARAM);
this->read_bufferram(mtd, ONENAND_DATARAM, this->verify_buf, 0, mtd->writesize);
if (memcmp(buf, dataram + column, thislen))
if (memcmp(buf, this->verify_buf, thislen))
return -EBADMSG;
len -= thislen;
@ -1926,7 +1925,7 @@ static int onenand_write_ops_nolock(struct mtd_info *mtd, loff_t to,
* 2 PLANE, MLC, and Flex-OneNAND do not support
* write-while-program feature.
*/
if (!ONENAND_IS_2PLANE(this) && !first) {
if (!ONENAND_IS_2PLANE(this) && !ONENAND_IS_4KB_PAGE(this) && !first) {
ONENAND_SET_PREV_BUFFERRAM(this);
ret = this->wait(mtd, FL_WRITING);
@ -1957,7 +1956,7 @@ static int onenand_write_ops_nolock(struct mtd_info *mtd, loff_t to,
/*
* 2 PLANE, MLC, and Flex-OneNAND wait here
*/
if (ONENAND_IS_2PLANE(this)) {
if (ONENAND_IS_2PLANE(this) || ONENAND_IS_4KB_PAGE(this)) {
ret = this->wait(mtd, FL_WRITING);
/* In partial page write we don't update bufferram */
@ -2084,7 +2083,7 @@ static int onenand_write_oob_nolock(struct mtd_info *mtd, loff_t to,
memcpy(oobbuf + column, buf, thislen);
this->write_bufferram(mtd, ONENAND_SPARERAM, oobbuf, 0, mtd->oobsize);
if (ONENAND_IS_MLC(this)) {
if (ONENAND_IS_MLC(this) || ONENAND_IS_4KB_PAGE(this)) {
/* Set main area of DataRAM to 0xff*/
memset(this->page_buf, 0xff, mtd->writesize);
this->write_bufferram(mtd, ONENAND_DATARAM,
@ -3027,7 +3026,7 @@ static int do_otp_read(struct mtd_info *mtd, loff_t from, size_t len,
this->command(mtd, ONENAND_CMD_OTP_ACCESS, 0, 0);
this->wait(mtd, FL_OTPING);
ret = ONENAND_IS_MLC(this) ?
ret = ONENAND_IS_MLC(this) || ONENAND_IS_4KB_PAGE(this) ?
onenand_mlc_read_ops_nolock(mtd, from, &ops) :
onenand_read_ops_nolock(mtd, from, &ops);
@ -3372,7 +3371,10 @@ static void onenand_check_features(struct mtd_info *mtd)
/* Lock scheme */
switch (density) {
case ONENAND_DEVICE_DENSITY_4Gb:
this->options |= ONENAND_HAS_2PLANE;
if (ONENAND_IS_DDP(this))
this->options |= ONENAND_HAS_2PLANE;
else
this->options |= ONENAND_HAS_4KB_PAGE;
case ONENAND_DEVICE_DENSITY_2Gb:
/* 2Gb DDP does not have 2 plane */
@ -3393,7 +3395,7 @@ static void onenand_check_features(struct mtd_info *mtd)
break;
}
if (ONENAND_IS_MLC(this))
if (ONENAND_IS_MLC(this) || ONENAND_IS_4KB_PAGE(this))
this->options &= ~ONENAND_HAS_2PLANE;
if (FLEXONENAND(this)) {
@ -3407,6 +3409,8 @@ static void onenand_check_features(struct mtd_info *mtd)
printk(KERN_DEBUG "Chip support all block unlock\n");
if (this->options & ONENAND_HAS_2PLANE)
printk(KERN_DEBUG "Chip has 2 plane\n");
if (this->options & ONENAND_HAS_4KB_PAGE)
printk(KERN_DEBUG "Chip has 4KiB pagesize\n");
}
/**
@ -3759,6 +3763,12 @@ static int onenand_probe(struct mtd_info *mtd)
/* Restore system configuration 1 */
this->write_word(syscfg, this->base + ONENAND_REG_SYS_CFG1);
/* Workaround */
if (syscfg & ONENAND_SYS_CFG1_SYNC_WRITE) {
bram_maf_id = this->read_word(this->base + ONENAND_REG_MANUFACTURER_ID);
bram_dev_id = this->read_word(this->base + ONENAND_REG_DEVICE_ID);
}
/* Check manufacturer ID */
if (onenand_check_maf(bram_maf_id))
return -ENXIO;
@ -3778,6 +3788,9 @@ static int onenand_probe(struct mtd_info *mtd)
this->device_id = dev_id;
this->version_id = ver_id;
/* Check OneNAND features */
onenand_check_features(mtd);
density = onenand_get_density(dev_id);
if (FLEXONENAND(this)) {
this->dies = ONENAND_IS_DDP(this) ? 2 : 1;
@ -3799,7 +3812,7 @@ static int onenand_probe(struct mtd_info *mtd)
/* The data buffer size is equal to page size */
mtd->writesize = this->read_word(this->base + ONENAND_REG_DATA_BUFFER_SIZE);
/* We use the full BufferRAM */
if (ONENAND_IS_MLC(this))
if (ONENAND_IS_MLC(this) || ONENAND_IS_4KB_PAGE(this))
mtd->writesize <<= 1;
mtd->oobsize = mtd->writesize >> 5;
@ -3829,9 +3842,6 @@ static int onenand_probe(struct mtd_info *mtd)
else
mtd->size = this->chipsize;
/* Check OneNAND features */
onenand_check_features(mtd);
/*
* We emulate the 4KiB page and 256KiB erase block size
* But oobsize is still 64 bytes.
@ -3926,6 +3936,13 @@ int onenand_scan(struct mtd_info *mtd, int maxchips)
__func__);
return -ENOMEM;
}
#ifdef CONFIG_MTD_ONENAND_VERIFY_WRITE
this->verify_buf = kzalloc(mtd->writesize, GFP_KERNEL);
if (!this->verify_buf) {
kfree(this->page_buf);
return -ENOMEM;
}
#endif
this->options |= ONENAND_PAGEBUF_ALLOC;
}
if (!this->oob_buf) {
@ -4053,8 +4070,12 @@ void onenand_release(struct mtd_info *mtd)
kfree(this->bbm);
}
/* Buffers allocated by onenand_scan */
if (this->options & ONENAND_PAGEBUF_ALLOC)
if (this->options & ONENAND_PAGEBUF_ALLOC) {
kfree(this->page_buf);
#ifdef CONFIG_MTD_ONENAND_VERIFY_WRITE
kfree(this->verify_buf);
#endif
}
if (this->options & ONENAND_OOBBUF_ALLOC)
kfree(this->oob_buf);
kfree(mtd->eraseregions);

File diff suppressed because it is too large Load Diff

View File

@ -817,7 +817,6 @@ static void rfd_ftl_remove_dev(struct mtd_blktrans_dev *dev)
vfree(part->sector_map);
kfree(part->header_cache);
kfree(part->blocks);
kfree(part);
}
static struct mtd_blktrans_ops rfd_ftl_tr = {

1284
drivers/mtd/sm_ftl.c Normal file

File diff suppressed because it is too large Load Diff

94
drivers/mtd/sm_ftl.h Normal file
View File

@ -0,0 +1,94 @@
/*
* Copyright © 2009 - Maxim Levitsky
* SmartMedia/xD translation layer
*
* Based loosly on ssfdc.c which is
* © 2005 Eptar srl
* Author: Claudio Lanconelli <lanconelli.claudio@eptar.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/mtd/blktrans.h>
#include <linux/kfifo.h>
#include <linux/sched.h>
#include <linux/completion.h>
#include <linux/mtd/mtd.h>
struct ftl_zone {
int initialized;
int16_t *lba_to_phys_table; /* LBA to physical table */
struct kfifo free_sectors; /* queue of free sectors */
};
struct sm_ftl {
struct mtd_blktrans_dev *trans;
struct mutex mutex; /* protects the structure */
struct ftl_zone *zones; /* FTL tables for each zone */
/* Media information */
int block_size; /* block size in bytes */
int zone_size; /* zone size in blocks */
int zone_count; /* number of zones */
int max_lba; /* maximum lba in a zone */
int smallpagenand; /* 256 bytes/page nand */
int readonly; /* is FS readonly */
int unstable;
int cis_block; /* CIS block location */
int cis_boffset; /* CIS offset in the block */
int cis_page_offset; /* CIS offset in the page */
void *cis_buffer; /* tmp buffer for cis reads */
/* Cache */
int cache_block; /* block number of cached block */
int cache_zone; /* zone of cached block */
unsigned char *cache_data; /* cached block data */
long unsigned int cache_data_invalid_bitmap;
int cache_clean;
struct work_struct flush_work;
struct timer_list timer;
/* Async erase stuff */
struct completion erase_completion;
/* Geometry stuff */
int heads;
int sectors;
int cylinders;
struct attribute_group *disk_attributes;
};
struct chs_entry {
unsigned long size;
unsigned short cyl;
unsigned char head;
unsigned char sec;
};
#define SM_FTL_PARTN_BITS 3
#define sm_printk(format, ...) \
printk(KERN_WARNING "sm_ftl" ": " format "\n", ## __VA_ARGS__)
#define dbg(format, ...) \
if (debug) \
printk(KERN_DEBUG "sm_ftl" ": " format "\n", ## __VA_ARGS__)
#define dbg_verbose(format, ...) \
if (debug > 1) \
printk(KERN_DEBUG "sm_ftl" ": " format "\n", ## __VA_ARGS__)
static void sm_erase_callback(struct erase_info *self);
static int sm_erase_block(struct sm_ftl *ftl, int zone_num, uint16_t block,
int put_free);
static void sm_mark_block_bad(struct sm_ftl *ftl, int zone_num, int block);
static int sm_recheck_media(struct sm_ftl *ftl);

View File

@ -375,7 +375,6 @@ static void ssfdcr_remove_dev(struct mtd_blktrans_dev *dev)
del_mtd_blktrans_dev(dev);
kfree(ssfdc->logic_block_map);
kfree(ssfdc);
}
static int ssfdcr_readsect(struct mtd_blktrans_dev *dev,

View File

@ -480,12 +480,11 @@ static int scan_for_bad_eraseblocks(void)
{
int i, bad = 0;
bbt = kmalloc(ebcnt, GFP_KERNEL);
bbt = kzalloc(ebcnt, GFP_KERNEL);
if (!bbt) {
printk(PRINT_PREF "error: cannot allocate memory\n");
return -ENOMEM;
}
memset(bbt, 0 , ebcnt);
printk(PRINT_PREF "scanning for bad eraseblocks\n");
for (i = 0; i < ebcnt; ++i) {

View File

@ -141,12 +141,11 @@ static int scan_for_bad_eraseblocks(void)
{
int i, bad = 0;
bbt = kmalloc(ebcnt, GFP_KERNEL);
bbt = kzalloc(ebcnt, GFP_KERNEL);
if (!bbt) {
printk(PRINT_PREF "error: cannot allocate memory\n");
return -ENOMEM;
}
memset(bbt, 0 , ebcnt);
/* NOR flash does not implement block_isbad */
if (mtd->block_isbad == NULL)

View File

@ -295,12 +295,11 @@ static int scan_for_bad_eraseblocks(void)
{
int i, bad = 0;
bbt = kmalloc(ebcnt, GFP_KERNEL);
bbt = kzalloc(ebcnt, GFP_KERNEL);
if (!bbt) {
printk(PRINT_PREF "error: cannot allocate memory\n");
return -ENOMEM;
}
memset(bbt, 0 , ebcnt);
/* NOR flash does not implement block_isbad */
if (mtd->block_isbad == NULL)

View File

@ -221,12 +221,11 @@ static int scan_for_bad_eraseblocks(void)
{
int i, bad = 0;
bbt = kmalloc(ebcnt, GFP_KERNEL);
bbt = kzalloc(ebcnt, GFP_KERNEL);
if (!bbt) {
printk(PRINT_PREF "error: cannot allocate memory\n");
return -ENOMEM;
}
memset(bbt, 0 , ebcnt);
/* NOR flash does not implement block_isbad */
if (mtd->block_isbad == NULL)

View File

@ -354,12 +354,11 @@ static int scan_for_bad_eraseblocks(void)
{
int i, bad = 0;
bbt = kmalloc(ebcnt, GFP_KERNEL);
bbt = kzalloc(ebcnt, GFP_KERNEL);
if (!bbt) {
printk(PRINT_PREF "error: cannot allocate memory\n");
return -ENOMEM;
}
memset(bbt, 0 , ebcnt);
printk(PRINT_PREF "scanning for bad eraseblocks\n");
for (i = 0; i < ebcnt; ++i) {

View File

@ -23,10 +23,9 @@ static int jffs2_garbage_collect_thread(void *);
void jffs2_garbage_collect_trigger(struct jffs2_sb_info *c)
{
spin_lock(&c->erase_completion_lock);
assert_spin_locked(&c->erase_completion_lock);
if (c->gc_task && jffs2_thread_should_wake(c))
send_sig(SIGHUP, c->gc_task, 1);
spin_unlock(&c->erase_completion_lock);
}
/* This must only ever be called when no GC thread is currently running */

View File

@ -103,9 +103,10 @@ static void jffs2_erase_block(struct jffs2_sb_info *c,
jffs2_erase_failed(c, jeb, bad_offset);
}
void jffs2_erase_pending_blocks(struct jffs2_sb_info *c, int count)
int jffs2_erase_pending_blocks(struct jffs2_sb_info *c, int count)
{
struct jffs2_eraseblock *jeb;
int work_done = 0;
mutex_lock(&c->erase_free_sem);
@ -121,6 +122,7 @@ void jffs2_erase_pending_blocks(struct jffs2_sb_info *c, int count)
mutex_unlock(&c->erase_free_sem);
jffs2_mark_erased_block(c, jeb);
work_done++;
if (!--count) {
D1(printk(KERN_DEBUG "Count reached. jffs2_erase_pending_blocks leaving\n"));
goto done;
@ -157,6 +159,7 @@ void jffs2_erase_pending_blocks(struct jffs2_sb_info *c, int count)
mutex_unlock(&c->erase_free_sem);
done:
D1(printk(KERN_DEBUG "jffs2_erase_pending_blocks completed\n"));
return work_done;
}
static void jffs2_erase_succeeded(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
@ -165,10 +168,11 @@ static void jffs2_erase_succeeded(struct jffs2_sb_info *c, struct jffs2_eraseblo
mutex_lock(&c->erase_free_sem);
spin_lock(&c->erase_completion_lock);
list_move_tail(&jeb->list, &c->erase_complete_list);
/* Wake the GC thread to mark them clean */
jffs2_garbage_collect_trigger(c);
spin_unlock(&c->erase_completion_lock);
mutex_unlock(&c->erase_free_sem);
/* Ensure that kupdated calls us again to mark them clean */
jffs2_erase_pending_trigger(c);
wake_up(&c->erase_wait);
}
static void jffs2_erase_failed(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint32_t bad_offset)
@ -487,9 +491,9 @@ filebad:
refile:
/* Stick it back on the list from whence it came and come back later */
jffs2_erase_pending_trigger(c);
mutex_lock(&c->erase_free_sem);
spin_lock(&c->erase_completion_lock);
jffs2_garbage_collect_trigger(c);
list_move(&jeb->list, &c->erase_complete_list);
spin_unlock(&c->erase_completion_lock);
mutex_unlock(&c->erase_free_sem);

View File

@ -313,8 +313,8 @@ struct inode *jffs2_iget(struct super_block *sb, unsigned long ino)
case S_IFBLK:
case S_IFCHR:
/* Read the device numbers from the media */
if (f->metadata->size != sizeof(jdev.old) &&
f->metadata->size != sizeof(jdev.new)) {
if (f->metadata->size != sizeof(jdev.old_id) &&
f->metadata->size != sizeof(jdev.new_id)) {
printk(KERN_NOTICE "Device node has strange size %d\n", f->metadata->size);
goto error_io;
}
@ -325,10 +325,10 @@ struct inode *jffs2_iget(struct super_block *sb, unsigned long ino)
printk(KERN_NOTICE "Read device numbers for inode %lu failed\n", (unsigned long)inode->i_ino);
goto error;
}
if (f->metadata->size == sizeof(jdev.old))
rdev = old_decode_dev(je16_to_cpu(jdev.old));
if (f->metadata->size == sizeof(jdev.old_id))
rdev = old_decode_dev(je16_to_cpu(jdev.old_id));
else
rdev = new_decode_dev(je32_to_cpu(jdev.new));
rdev = new_decode_dev(je32_to_cpu(jdev.new_id));
case S_IFSOCK:
case S_IFIFO:

View File

@ -214,6 +214,19 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c)
return ret;
}
/* If there are any blocks which need erasing, erase them now */
if (!list_empty(&c->erase_complete_list) ||
!list_empty(&c->erase_pending_list)) {
spin_unlock(&c->erase_completion_lock);
D1(printk(KERN_DEBUG "jffs2_garbage_collect_pass() erasing pending blocks\n"));
if (jffs2_erase_pending_blocks(c, 1)) {
mutex_unlock(&c->alloc_sem);
return 0;
}
D1(printk(KERN_DEBUG "No progress from erasing blocks; doing GC anyway\n"));
spin_lock(&c->erase_completion_lock);
}
/* First, work out which block we're garbage-collecting */
jeb = c->gcblock;
@ -222,7 +235,7 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c)
if (!jeb) {
/* Couldn't find a free block. But maybe we can just erase one and make 'progress'? */
if (!list_empty(&c->erase_pending_list)) {
if (c->nr_erasing_blocks) {
spin_unlock(&c->erase_completion_lock);
mutex_unlock(&c->alloc_sem);
return -EAGAIN;
@ -435,7 +448,7 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c)
list_add_tail(&c->gcblock->list, &c->erase_pending_list);
c->gcblock = NULL;
c->nr_erasing_blocks++;
jffs2_erase_pending_trigger(c);
jffs2_garbage_collect_trigger(c);
}
spin_unlock(&c->erase_completion_lock);

Some files were not shown because too many files have changed in this diff Show More