mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-10-31 16:38:12 +00:00
Raw NAND core changes:
* Add support for manufacturer specific suspend/resume operation * Add support for manufacturer specific lock/unlock operation * Replace zero-length array with flexible-array member * Fix a typo ("manufecturer") * Ensure nand_soft_waitrdy wait period is enough Raw NAND controller driver changes: * Brcmnand: Add support for flash-edu for dma transfers (+ bindings) * Cadence: Reinit completion before executing a new command Change bad block marker size Fix the calculation of the avaialble OOB size Get meta data size from registers * Qualcom: Use dma_request_chan() instead dma_request_slave_channel() Release resources on failure within qcom_nandc_alloc() * Allwinner: Use dma_request_chan() instead dma_request_slave_channel() * Marvell: Use dma_request_chan() instead dma_request_slave_channel() Release DMA channel on error * Freescale: Use dma_request_chan() instead dma_request_slave_channel() * Macronix: Add support for Macronix NAND randomizer (+ bindings) * Ams-delta: Rename structures and functions to gpio_nand* Make the driver custom I/O ready Drop useless local variable Support custom driver initialisation Add module device tables Handle more GPIO pins as optional Make read pulses optional Don't hardcode read/write pulse widths Push inversion handling to gpiolib Enable OF partition info support Drop board specific partition info Use struct gpio_nand_platdata Write protect device during probe * Ingenic: Use devm_platform_ioremap_resource() Add dependency on MIPS || COMPILE_TEST * Denali: Deassert write protect pin * ST: Use dma_request_chan() instead dma_request_slave_channel() Raw NAND chip driver changes: * Toshiba: Support reading the number of bitflips for BENAND (Built-in ECC NAND) * Macronix: Add support for deep power down mode Add support for block protection SPI-NAND core changes: * Do not erase the block before writing a bad block marker * Explicitly use MTD_OPS_RAW to write the bad block marker to OOB * Stop using spinand->oobbuf for buffering bad block markers * Rework detect procedure for different READ_ID operation SPI-NAND driver changes: * Toshiba: Support for new Kioxia Serial NAND Rename function name to change suffix and prefix (8Gbit) Add comment about Kioxia ID * Micron: Add new Micron SPI NAND devices with multiple dies Add M70A series Micron SPI NAND devices identify SPI NAND device with Continuous Read mode Add new Micron SPI NAND devices Describe the SPI NAND device MT29F2G01ABAGD Generalize the OOB layout structure and function names -----BEGIN PGP SIGNATURE----- iQEzBAABCgAdFiEE9HuaYnbmDhq/XIDIJWrqGEe9VoQFAl5+XuIACgkQJWrqGEe9 VoRCAggAh0bpso9l8enk+ae1LrEGGLCV0sWMBy/pwM4zaBovqJqSdXRybg1IQE1N 5AvZrcQUBzdr9RzusB0J/xVC31gStjNQJROekArHJI6rzjoHRM/779fx0JV1JJHI sWgX7UY7AVMH1l4BMB0mMezFeqJnZU5JRHQ+we3X4funNJ2JcBwvp1u4XJ2BDLRn oCDLYgMC9S4lWEP7/ARgv27w7GfaegCUqNOHBVH75d2c+l476z5TKUap8s/LjI8u 8eRGerwLTj8/PLj1OqSEKXeJem2gZvZNpDCylgzrGZbU9r0M5I/EdXxJ90Pj6bR0 +G/0JHb4C1DXeDXzw79KisggHDkUUA== =Zmzp -----END PGP SIGNATURE----- Merge tag 'nand/for-5.7' into mtd/next Raw NAND core changes: * Add support for manufacturer specific suspend/resume operation * Add support for manufacturer specific lock/unlock operation * Replace zero-length array with flexible-array member * Fix a typo ("manufecturer") * Ensure nand_soft_waitrdy wait period is enough Raw NAND controller driver changes: * Brcmnand: Add support for flash-edu for dma transfers (+ bindings) * Cadence: Reinit completion before executing a new command Change bad block marker size Fix the calculation of the avaialble OOB size Get meta data size from registers * Qualcom: Use dma_request_chan() instead dma_request_slave_channel() Release resources on failure within qcom_nandc_alloc() * Allwinner: Use dma_request_chan() instead dma_request_slave_channel() * Marvell: Use dma_request_chan() instead dma_request_slave_channel() Release DMA channel on error * Freescale: Use dma_request_chan() instead dma_request_slave_channel() * Macronix: Add support for Macronix NAND randomizer (+ bindings) * Ams-delta: Rename structures and functions to gpio_nand* Make the driver custom I/O ready Drop useless local variable Support custom driver initialisation Add module device tables Handle more GPIO pins as optional Make read pulses optional Don't hardcode read/write pulse widths Push inversion handling to gpiolib Enable OF partition info support Drop board specific partition info Use struct gpio_nand_platdata Write protect device during probe * Ingenic: Use devm_platform_ioremap_resource() Add dependency on MIPS || COMPILE_TEST * Denali: Deassert write protect pin * ST: Use dma_request_chan() instead dma_request_slave_channel() Raw NAND chip driver changes: * Toshiba: Support reading the number of bitflips for BENAND (Built-in ECC NAND) * Macronix: Add support for deep power down mode Add support for block protection SPI-NAND core changes: * Do not erase the block before writing a bad block marker * Explicitly use MTD_OPS_RAW to write the bad block marker to OOB * Stop using spinand->oobbuf for buffering bad block markers * Rework detect procedure for different READ_ID operation SPI-NAND driver changes: * Toshiba: Support for new Kioxia Serial NAND Rename function name to change suffix and prefix (8Gbit) Add comment about Kioxia ID * Micron: Add new Micron SPI NAND devices with multiple dies Add M70A series Micron SPI NAND devices identify SPI NAND device with Continuous Read mode Add new Micron SPI NAND devices Describe the SPI NAND device MT29F2G01ABAGD Generalize the OOB layout structure and function names
This commit is contained in:
commit
699274b1a1
32 changed files with 1479 additions and 461 deletions
|
@ -35,11 +35,11 @@ Required properties:
|
|||
(optional) NAND flash cache range (if at non-standard offset)
|
||||
- reg-names : a list of the names corresponding to the previous register
|
||||
ranges. Should contain "nand" and (optionally)
|
||||
"flash-dma" and/or "nand-cache".
|
||||
- interrupts : The NAND CTLRDY interrupt and (if Flash DMA is available)
|
||||
FLASH_DMA_DONE
|
||||
- interrupt-names : May be "nand_ctlrdy" or "flash_dma_done", if broken out as
|
||||
individual interrupts.
|
||||
"flash-dma" or "flash-edu" and/or "nand-cache".
|
||||
- interrupts : The NAND CTLRDY interrupt, (if Flash DMA is available)
|
||||
FLASH_DMA_DONE and if EDU is avaialble and used FLASH_EDU_DONE
|
||||
- interrupt-names : May be "nand_ctlrdy" or "flash_dma_done" or "flash_edu_done",
|
||||
if broken out as individual interrupts.
|
||||
May be "nand", if the SoC has the individual NAND
|
||||
interrupts multiplexed behind another custom piece of
|
||||
hardware
|
||||
|
|
27
Documentation/devicetree/bindings/mtd/nand-macronix.txt
Normal file
27
Documentation/devicetree/bindings/mtd/nand-macronix.txt
Normal file
|
@ -0,0 +1,27 @@
|
|||
Macronix NANDs Device Tree Bindings
|
||||
-----------------------------------
|
||||
|
||||
Macronix NANDs support randomizer operation for scrambling user data,
|
||||
which can be enabled with a SET_FEATURE. The penalty when using the
|
||||
randomizer are subpage accesses prohibited and more time period needed
|
||||
for program operation, i.e., tPROG 300us to 340us (randomizer enabled).
|
||||
Enabling the randomizer is a one time persistent and non reversible
|
||||
operation.
|
||||
|
||||
For more high-reliability concern, if subpage write is not available
|
||||
with hardware ECC and not enabled at UBI level, then enabling the
|
||||
randomizer is recommended by default by adding a new specific property
|
||||
in children nodes.
|
||||
|
||||
Required NAND chip properties in children mode:
|
||||
- randomizer enable: should be "mxic,enable-randomizer-otp"
|
||||
|
||||
Example:
|
||||
|
||||
nand: nand-controller@unit-address {
|
||||
|
||||
nand@0 {
|
||||
reg = <0>;
|
||||
mxic,enable-randomizer-otp;
|
||||
};
|
||||
};
|
|
@ -17,6 +17,8 @@
|
|||
#include <linux/input.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/leds.h>
|
||||
#include <linux/mtd/nand-gpio.h>
|
||||
#include <linux/mtd/partitions.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/regulator/consumer.h>
|
||||
#include <linux/regulator/fixed.h>
|
||||
|
@ -294,9 +296,42 @@ struct modem_private_data {
|
|||
|
||||
static struct modem_private_data modem_priv;
|
||||
|
||||
/*
|
||||
* Define partitions for flash device
|
||||
*/
|
||||
|
||||
static struct mtd_partition partition_info[] = {
|
||||
{ .name = "Kernel",
|
||||
.offset = 0,
|
||||
.size = 3 * SZ_1M + SZ_512K },
|
||||
{ .name = "u-boot",
|
||||
.offset = 3 * SZ_1M + SZ_512K,
|
||||
.size = SZ_256K },
|
||||
{ .name = "u-boot params",
|
||||
.offset = 3 * SZ_1M + SZ_512K + SZ_256K,
|
||||
.size = SZ_256K },
|
||||
{ .name = "Amstrad LDR",
|
||||
.offset = 4 * SZ_1M,
|
||||
.size = SZ_256K },
|
||||
{ .name = "File system",
|
||||
.offset = 4 * SZ_1M + 1 * SZ_256K,
|
||||
.size = 27 * SZ_1M },
|
||||
{ .name = "PBL reserved",
|
||||
.offset = 32 * SZ_1M - 3 * SZ_256K,
|
||||
.size = 3 * SZ_256K },
|
||||
};
|
||||
|
||||
static struct gpio_nand_platdata nand_platdata = {
|
||||
.parts = partition_info,
|
||||
.num_parts = ARRAY_SIZE(partition_info),
|
||||
};
|
||||
|
||||
static struct platform_device ams_delta_nand_device = {
|
||||
.name = "ams-delta-nand",
|
||||
.id = -1,
|
||||
.dev = {
|
||||
.platform_data = &nand_platdata,
|
||||
},
|
||||
};
|
||||
|
||||
#define OMAP_GPIO_LABEL "gpio-0-15"
|
||||
|
@ -306,10 +341,14 @@ static struct gpiod_lookup_table ams_delta_nand_gpio_table = {
|
|||
.table = {
|
||||
GPIO_LOOKUP(OMAP_GPIO_LABEL, AMS_DELTA_GPIO_PIN_NAND_RB, "rdy",
|
||||
0),
|
||||
GPIO_LOOKUP(LATCH2_LABEL, LATCH2_PIN_NAND_NCE, "nce", 0),
|
||||
GPIO_LOOKUP(LATCH2_LABEL, LATCH2_PIN_NAND_NRE, "nre", 0),
|
||||
GPIO_LOOKUP(LATCH2_LABEL, LATCH2_PIN_NAND_NWP, "nwp", 0),
|
||||
GPIO_LOOKUP(LATCH2_LABEL, LATCH2_PIN_NAND_NWE, "nwe", 0),
|
||||
GPIO_LOOKUP(LATCH2_LABEL, LATCH2_PIN_NAND_NCE, "nce",
|
||||
GPIO_ACTIVE_LOW),
|
||||
GPIO_LOOKUP(LATCH2_LABEL, LATCH2_PIN_NAND_NRE, "nre",
|
||||
GPIO_ACTIVE_LOW),
|
||||
GPIO_LOOKUP(LATCH2_LABEL, LATCH2_PIN_NAND_NWP, "nwp",
|
||||
GPIO_ACTIVE_LOW),
|
||||
GPIO_LOOKUP(LATCH2_LABEL, LATCH2_PIN_NAND_NWE, "nwe",
|
||||
GPIO_ACTIVE_LOW),
|
||||
GPIO_LOOKUP(LATCH2_LABEL, LATCH2_PIN_NAND_ALE, "ale", 0),
|
||||
GPIO_LOOKUP(LATCH2_LABEL, LATCH2_PIN_NAND_CLE, "cle", 0),
|
||||
GPIO_LOOKUP_IDX(OMAP_MPUIO_LABEL, 0, "data", 0, 0),
|
||||
|
|
|
@ -403,8 +403,8 @@ nand: nand@41b800 {
|
|||
compatible = "brcm,brcmnand-v5.0", "brcm,brcmnand";
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
reg-names = "nand";
|
||||
reg = <0x41b800 0x400>;
|
||||
reg-names = "nand", "flash-edu";
|
||||
reg = <0x41b800 0x400>, <0x41bc00 0x24>;
|
||||
interrupt-parent = <&hif_l2_intc>;
|
||||
interrupts = <24>;
|
||||
status = "disabled";
|
||||
|
|
|
@ -19,15 +19,17 @@
|
|||
#include <linux/delay.h>
|
||||
#include <linux/gpio/consumer.h>
|
||||
#include <linux/mtd/mtd.h>
|
||||
#include <linux/mtd/nand-gpio.h>
|
||||
#include <linux/mtd/rawnand.h>
|
||||
#include <linux/mtd/partitions.h>
|
||||
#include <linux/of_device.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/sizes.h>
|
||||
|
||||
/*
|
||||
* MTD structure for E3 (Delta)
|
||||
*/
|
||||
struct ams_delta_nand {
|
||||
struct gpio_nand {
|
||||
struct nand_controller base;
|
||||
struct nand_chip nand_chip;
|
||||
struct gpio_desc *gpiod_rdy;
|
||||
|
@ -39,41 +41,20 @@ struct ams_delta_nand {
|
|||
struct gpio_desc *gpiod_cle;
|
||||
struct gpio_descs *data_gpiods;
|
||||
bool data_in;
|
||||
unsigned int tRP;
|
||||
unsigned int tWP;
|
||||
u8 (*io_read)(struct gpio_nand *this);
|
||||
void (*io_write)(struct gpio_nand *this, u8 byte);
|
||||
};
|
||||
|
||||
/*
|
||||
* Define partitions for flash devices
|
||||
*/
|
||||
|
||||
static const struct mtd_partition partition_info[] = {
|
||||
{ .name = "Kernel",
|
||||
.offset = 0,
|
||||
.size = 3 * SZ_1M + SZ_512K },
|
||||
{ .name = "u-boot",
|
||||
.offset = 3 * SZ_1M + SZ_512K,
|
||||
.size = SZ_256K },
|
||||
{ .name = "u-boot params",
|
||||
.offset = 3 * SZ_1M + SZ_512K + SZ_256K,
|
||||
.size = SZ_256K },
|
||||
{ .name = "Amstrad LDR",
|
||||
.offset = 4 * SZ_1M,
|
||||
.size = SZ_256K },
|
||||
{ .name = "File system",
|
||||
.offset = 4 * SZ_1M + 1 * SZ_256K,
|
||||
.size = 27 * SZ_1M },
|
||||
{ .name = "PBL reserved",
|
||||
.offset = 32 * SZ_1M - 3 * SZ_256K,
|
||||
.size = 3 * SZ_256K },
|
||||
};
|
||||
|
||||
static void ams_delta_write_commit(struct ams_delta_nand *priv)
|
||||
static void gpio_nand_write_commit(struct gpio_nand *priv)
|
||||
{
|
||||
gpiod_set_value(priv->gpiod_nwe, 0);
|
||||
ndelay(40);
|
||||
gpiod_set_value(priv->gpiod_nwe, 1);
|
||||
ndelay(priv->tWP);
|
||||
gpiod_set_value(priv->gpiod_nwe, 0);
|
||||
}
|
||||
|
||||
static void ams_delta_io_write(struct ams_delta_nand *priv, u8 byte)
|
||||
static void gpio_nand_io_write(struct gpio_nand *priv, u8 byte)
|
||||
{
|
||||
struct gpio_descs *data_gpiods = priv->data_gpiods;
|
||||
DECLARE_BITMAP(values, BITS_PER_TYPE(byte)) = { byte, };
|
||||
|
@ -81,10 +62,10 @@ static void ams_delta_io_write(struct ams_delta_nand *priv, u8 byte)
|
|||
gpiod_set_raw_array_value(data_gpiods->ndescs, data_gpiods->desc,
|
||||
data_gpiods->info, values);
|
||||
|
||||
ams_delta_write_commit(priv);
|
||||
gpio_nand_write_commit(priv);
|
||||
}
|
||||
|
||||
static void ams_delta_dir_output(struct ams_delta_nand *priv, u8 byte)
|
||||
static void gpio_nand_dir_output(struct gpio_nand *priv, u8 byte)
|
||||
{
|
||||
struct gpio_descs *data_gpiods = priv->data_gpiods;
|
||||
DECLARE_BITMAP(values, BITS_PER_TYPE(byte)) = { byte, };
|
||||
|
@ -94,30 +75,30 @@ static void ams_delta_dir_output(struct ams_delta_nand *priv, u8 byte)
|
|||
gpiod_direction_output_raw(data_gpiods->desc[i],
|
||||
test_bit(i, values));
|
||||
|
||||
ams_delta_write_commit(priv);
|
||||
gpio_nand_write_commit(priv);
|
||||
|
||||
priv->data_in = false;
|
||||
}
|
||||
|
||||
static u8 ams_delta_io_read(struct ams_delta_nand *priv)
|
||||
static u8 gpio_nand_io_read(struct gpio_nand *priv)
|
||||
{
|
||||
u8 res;
|
||||
struct gpio_descs *data_gpiods = priv->data_gpiods;
|
||||
DECLARE_BITMAP(values, BITS_PER_TYPE(res)) = { 0, };
|
||||
|
||||
gpiod_set_value(priv->gpiod_nre, 0);
|
||||
ndelay(40);
|
||||
gpiod_set_value(priv->gpiod_nre, 1);
|
||||
ndelay(priv->tRP);
|
||||
|
||||
gpiod_get_raw_array_value(data_gpiods->ndescs, data_gpiods->desc,
|
||||
data_gpiods->info, values);
|
||||
|
||||
gpiod_set_value(priv->gpiod_nre, 1);
|
||||
gpiod_set_value(priv->gpiod_nre, 0);
|
||||
|
||||
res = values[0];
|
||||
return res;
|
||||
}
|
||||
|
||||
static void ams_delta_dir_input(struct ams_delta_nand *priv)
|
||||
static void gpio_nand_dir_input(struct gpio_nand *priv)
|
||||
{
|
||||
struct gpio_descs *data_gpiods = priv->data_gpiods;
|
||||
int i;
|
||||
|
@ -128,68 +109,67 @@ static void ams_delta_dir_input(struct ams_delta_nand *priv)
|
|||
priv->data_in = true;
|
||||
}
|
||||
|
||||
static void ams_delta_write_buf(struct ams_delta_nand *priv, const u8 *buf,
|
||||
int len)
|
||||
static void gpio_nand_write_buf(struct gpio_nand *priv, const u8 *buf, int len)
|
||||
{
|
||||
int i = 0;
|
||||
|
||||
if (len > 0 && priv->data_in)
|
||||
ams_delta_dir_output(priv, buf[i++]);
|
||||
gpio_nand_dir_output(priv, buf[i++]);
|
||||
|
||||
while (i < len)
|
||||
ams_delta_io_write(priv, buf[i++]);
|
||||
priv->io_write(priv, buf[i++]);
|
||||
}
|
||||
|
||||
static void ams_delta_read_buf(struct ams_delta_nand *priv, u8 *buf, int len)
|
||||
static void gpio_nand_read_buf(struct gpio_nand *priv, u8 *buf, int len)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (!priv->data_in)
|
||||
ams_delta_dir_input(priv);
|
||||
if (priv->data_gpiods && !priv->data_in)
|
||||
gpio_nand_dir_input(priv);
|
||||
|
||||
for (i = 0; i < len; i++)
|
||||
buf[i] = ams_delta_io_read(priv);
|
||||
buf[i] = priv->io_read(priv);
|
||||
}
|
||||
|
||||
static void ams_delta_ctrl_cs(struct ams_delta_nand *priv, bool assert)
|
||||
static void gpio_nand_ctrl_cs(struct gpio_nand *priv, bool assert)
|
||||
{
|
||||
gpiod_set_value(priv->gpiod_nce, assert ? 0 : 1);
|
||||
gpiod_set_value(priv->gpiod_nce, assert);
|
||||
}
|
||||
|
||||
static int ams_delta_exec_op(struct nand_chip *this,
|
||||
static int gpio_nand_exec_op(struct nand_chip *this,
|
||||
const struct nand_operation *op, bool check_only)
|
||||
{
|
||||
struct ams_delta_nand *priv = nand_get_controller_data(this);
|
||||
struct gpio_nand *priv = nand_get_controller_data(this);
|
||||
const struct nand_op_instr *instr;
|
||||
int ret = 0;
|
||||
|
||||
if (check_only)
|
||||
return 0;
|
||||
|
||||
ams_delta_ctrl_cs(priv, 1);
|
||||
gpio_nand_ctrl_cs(priv, 1);
|
||||
|
||||
for (instr = op->instrs; instr < op->instrs + op->ninstrs; instr++) {
|
||||
switch (instr->type) {
|
||||
case NAND_OP_CMD_INSTR:
|
||||
gpiod_set_value(priv->gpiod_cle, 1);
|
||||
ams_delta_write_buf(priv, &instr->ctx.cmd.opcode, 1);
|
||||
gpio_nand_write_buf(priv, &instr->ctx.cmd.opcode, 1);
|
||||
gpiod_set_value(priv->gpiod_cle, 0);
|
||||
break;
|
||||
|
||||
case NAND_OP_ADDR_INSTR:
|
||||
gpiod_set_value(priv->gpiod_ale, 1);
|
||||
ams_delta_write_buf(priv, instr->ctx.addr.addrs,
|
||||
gpio_nand_write_buf(priv, instr->ctx.addr.addrs,
|
||||
instr->ctx.addr.naddrs);
|
||||
gpiod_set_value(priv->gpiod_ale, 0);
|
||||
break;
|
||||
|
||||
case NAND_OP_DATA_IN_INSTR:
|
||||
ams_delta_read_buf(priv, instr->ctx.data.buf.in,
|
||||
gpio_nand_read_buf(priv, instr->ctx.data.buf.in,
|
||||
instr->ctx.data.len);
|
||||
break;
|
||||
|
||||
case NAND_OP_DATA_OUT_INSTR:
|
||||
ams_delta_write_buf(priv, instr->ctx.data.buf.out,
|
||||
gpio_nand_write_buf(priv, instr->ctx.data.buf.out,
|
||||
instr->ctx.data.len);
|
||||
break;
|
||||
|
||||
|
@ -206,28 +186,61 @@ static int ams_delta_exec_op(struct nand_chip *this,
|
|||
break;
|
||||
}
|
||||
|
||||
ams_delta_ctrl_cs(priv, 0);
|
||||
gpio_nand_ctrl_cs(priv, 0);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const struct nand_controller_ops ams_delta_ops = {
|
||||
.exec_op = ams_delta_exec_op,
|
||||
static int gpio_nand_setup_data_interface(struct nand_chip *this, int csline,
|
||||
const struct nand_data_interface *cf)
|
||||
{
|
||||
struct gpio_nand *priv = nand_get_controller_data(this);
|
||||
const struct nand_sdr_timings *sdr = nand_get_sdr_timings(cf);
|
||||
struct device *dev = &nand_to_mtd(this)->dev;
|
||||
|
||||
if (IS_ERR(sdr))
|
||||
return PTR_ERR(sdr);
|
||||
|
||||
if (csline == NAND_DATA_IFACE_CHECK_ONLY)
|
||||
return 0;
|
||||
|
||||
if (priv->gpiod_nre) {
|
||||
priv->tRP = DIV_ROUND_UP(sdr->tRP_min, 1000);
|
||||
dev_dbg(dev, "using %u ns read pulse width\n", priv->tRP);
|
||||
}
|
||||
|
||||
priv->tWP = DIV_ROUND_UP(sdr->tWP_min, 1000);
|
||||
dev_dbg(dev, "using %u ns write pulse width\n", priv->tWP);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct nand_controller_ops gpio_nand_ops = {
|
||||
.exec_op = gpio_nand_exec_op,
|
||||
.setup_data_interface = gpio_nand_setup_data_interface,
|
||||
};
|
||||
|
||||
/*
|
||||
* Main initialization routine
|
||||
*/
|
||||
static int ams_delta_init(struct platform_device *pdev)
|
||||
static int gpio_nand_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct ams_delta_nand *priv;
|
||||
struct gpio_nand_platdata *pdata = dev_get_platdata(&pdev->dev);
|
||||
const struct mtd_partition *partitions = NULL;
|
||||
int num_partitions = 0;
|
||||
struct gpio_nand *priv;
|
||||
struct nand_chip *this;
|
||||
struct mtd_info *mtd;
|
||||
struct gpio_descs *data_gpiods;
|
||||
int (*probe)(struct platform_device *pdev, struct gpio_nand *priv);
|
||||
int err = 0;
|
||||
|
||||
if (pdata) {
|
||||
partitions = pdata->parts;
|
||||
num_partitions = pdata->num_parts;
|
||||
}
|
||||
|
||||
/* Allocate memory for MTD device structure and private data */
|
||||
priv = devm_kzalloc(&pdev->dev, sizeof(struct ams_delta_nand),
|
||||
priv = devm_kzalloc(&pdev->dev, sizeof(struct gpio_nand),
|
||||
GFP_KERNEL);
|
||||
if (!priv)
|
||||
return -ENOMEM;
|
||||
|
@ -238,6 +251,7 @@ static int ams_delta_init(struct platform_device *pdev)
|
|||
mtd->dev.parent = &pdev->dev;
|
||||
|
||||
nand_set_controller_data(this, priv);
|
||||
nand_set_flash_node(this, pdev->dev.of_node);
|
||||
|
||||
priv->gpiod_rdy = devm_gpiod_get_optional(&pdev->dev, "rdy", GPIOD_IN);
|
||||
if (IS_ERR(priv->gpiod_rdy)) {
|
||||
|
@ -251,29 +265,33 @@ static int ams_delta_init(struct platform_device *pdev)
|
|||
|
||||
platform_set_drvdata(pdev, priv);
|
||||
|
||||
/* Set chip enabled, but */
|
||||
priv->gpiod_nwp = devm_gpiod_get(&pdev->dev, "nwp", GPIOD_OUT_HIGH);
|
||||
/* Set chip enabled but write protected */
|
||||
priv->gpiod_nwp = devm_gpiod_get_optional(&pdev->dev, "nwp",
|
||||
GPIOD_OUT_HIGH);
|
||||
if (IS_ERR(priv->gpiod_nwp)) {
|
||||
err = PTR_ERR(priv->gpiod_nwp);
|
||||
dev_err(&pdev->dev, "NWP GPIO request failed (%d)\n", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
priv->gpiod_nce = devm_gpiod_get(&pdev->dev, "nce", GPIOD_OUT_HIGH);
|
||||
priv->gpiod_nce = devm_gpiod_get_optional(&pdev->dev, "nce",
|
||||
GPIOD_OUT_LOW);
|
||||
if (IS_ERR(priv->gpiod_nce)) {
|
||||
err = PTR_ERR(priv->gpiod_nce);
|
||||
dev_err(&pdev->dev, "NCE GPIO request failed (%d)\n", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
priv->gpiod_nre = devm_gpiod_get(&pdev->dev, "nre", GPIOD_OUT_HIGH);
|
||||
priv->gpiod_nre = devm_gpiod_get_optional(&pdev->dev, "nre",
|
||||
GPIOD_OUT_LOW);
|
||||
if (IS_ERR(priv->gpiod_nre)) {
|
||||
err = PTR_ERR(priv->gpiod_nre);
|
||||
dev_err(&pdev->dev, "NRE GPIO request failed (%d)\n", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
priv->gpiod_nwe = devm_gpiod_get(&pdev->dev, "nwe", GPIOD_OUT_HIGH);
|
||||
priv->gpiod_nwe = devm_gpiod_get_optional(&pdev->dev, "nwe",
|
||||
GPIOD_OUT_LOW);
|
||||
if (IS_ERR(priv->gpiod_nwe)) {
|
||||
err = PTR_ERR(priv->gpiod_nwe);
|
||||
dev_err(&pdev->dev, "NWE GPIO request failed (%d)\n", err);
|
||||
|
@ -295,28 +313,62 @@ static int ams_delta_init(struct platform_device *pdev)
|
|||
}
|
||||
|
||||
/* Request array of data pins, initialize them as input */
|
||||
data_gpiods = devm_gpiod_get_array(&pdev->dev, "data", GPIOD_IN);
|
||||
if (IS_ERR(data_gpiods)) {
|
||||
err = PTR_ERR(data_gpiods);
|
||||
priv->data_gpiods = devm_gpiod_get_array_optional(&pdev->dev, "data",
|
||||
GPIOD_IN);
|
||||
if (IS_ERR(priv->data_gpiods)) {
|
||||
err = PTR_ERR(priv->data_gpiods);
|
||||
dev_err(&pdev->dev, "data GPIO request failed: %d\n", err);
|
||||
return err;
|
||||
}
|
||||
priv->data_gpiods = data_gpiods;
|
||||
priv->data_in = true;
|
||||
if (priv->data_gpiods) {
|
||||
if (!priv->gpiod_nwe) {
|
||||
dev_err(&pdev->dev,
|
||||
"mandatory NWE pin not provided by platform\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
/* Initialize the NAND controller object embedded in ams_delta_nand. */
|
||||
priv->base.ops = &ams_delta_ops;
|
||||
priv->io_read = gpio_nand_io_read;
|
||||
priv->io_write = gpio_nand_io_write;
|
||||
priv->data_in = true;
|
||||
}
|
||||
|
||||
if (pdev->id_entry)
|
||||
probe = (void *) pdev->id_entry->driver_data;
|
||||
else
|
||||
probe = of_device_get_match_data(&pdev->dev);
|
||||
if (probe)
|
||||
err = probe(pdev, priv);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (!priv->io_read || !priv->io_write) {
|
||||
dev_err(&pdev->dev, "incomplete device configuration\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
/* Initialize the NAND controller object embedded in gpio_nand. */
|
||||
priv->base.ops = &gpio_nand_ops;
|
||||
nand_controller_init(&priv->base);
|
||||
this->controller = &priv->base;
|
||||
|
||||
/*
|
||||
* FIXME: We should release write protection only after nand_scan() to
|
||||
* be on the safe side but we can't do that until we have a generic way
|
||||
* to assert/deassert WP from the core. Even if the core shouldn't
|
||||
* write things in the nand_scan() path, it should have control on this
|
||||
* pin just in case we ever need to disable write protection during
|
||||
* chip detection/initialization.
|
||||
*/
|
||||
/* Release write protection */
|
||||
gpiod_set_value(priv->gpiod_nwp, 0);
|
||||
|
||||
/* Scan to find existence of the device */
|
||||
err = nand_scan(this, 1);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
/* Register the partitions */
|
||||
err = mtd_device_register(mtd, partition_info,
|
||||
ARRAY_SIZE(partition_info));
|
||||
err = mtd_device_register(mtd, partitions, num_partitions);
|
||||
if (err)
|
||||
goto err_nand_cleanup;
|
||||
|
||||
|
@ -331,26 +383,47 @@ static int ams_delta_init(struct platform_device *pdev)
|
|||
/*
|
||||
* Clean up routine
|
||||
*/
|
||||
static int ams_delta_cleanup(struct platform_device *pdev)
|
||||
static int gpio_nand_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct ams_delta_nand *priv = platform_get_drvdata(pdev);
|
||||
struct gpio_nand *priv = platform_get_drvdata(pdev);
|
||||
struct mtd_info *mtd = nand_to_mtd(&priv->nand_chip);
|
||||
|
||||
/* Apply write protection */
|
||||
gpiod_set_value(priv->gpiod_nwp, 1);
|
||||
|
||||
/* Unregister device */
|
||||
nand_release(mtd_to_nand(mtd));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct platform_driver ams_delta_nand_driver = {
|
||||
.probe = ams_delta_init,
|
||||
.remove = ams_delta_cleanup,
|
||||
static const struct of_device_id gpio_nand_of_id_table[] = {
|
||||
{
|
||||
/* sentinel */
|
||||
},
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, gpio_nand_of_id_table);
|
||||
|
||||
static const struct platform_device_id gpio_nand_plat_id_table[] = {
|
||||
{
|
||||
.name = "ams-delta-nand",
|
||||
}, {
|
||||
/* sentinel */
|
||||
},
|
||||
};
|
||||
MODULE_DEVICE_TABLE(platform, gpio_nand_plat_id_table);
|
||||
|
||||
static struct platform_driver gpio_nand_driver = {
|
||||
.probe = gpio_nand_probe,
|
||||
.remove = gpio_nand_remove,
|
||||
.id_table = gpio_nand_plat_id_table,
|
||||
.driver = {
|
||||
.name = "ams-delta-nand",
|
||||
.of_match_table = of_match_ptr(gpio_nand_of_id_table),
|
||||
},
|
||||
};
|
||||
|
||||
module_platform_driver(ams_delta_nand_driver);
|
||||
module_platform_driver(gpio_nand_driver);
|
||||
|
||||
MODULE_LICENSE("GPL v2");
|
||||
MODULE_AUTHOR("Jonathan McDowell <noodles@earth.li>");
|
||||
|
|
|
@ -102,6 +102,45 @@ struct brcm_nand_dma_desc {
|
|||
#define NAND_CTRL_RDY (INTFC_CTLR_READY | INTFC_FLASH_READY)
|
||||
#define NAND_POLL_STATUS_TIMEOUT_MS 100
|
||||
|
||||
#define EDU_CMD_WRITE 0x00
|
||||
#define EDU_CMD_READ 0x01
|
||||
#define EDU_STATUS_ACTIVE BIT(0)
|
||||
#define EDU_ERR_STATUS_ERRACK BIT(0)
|
||||
#define EDU_DONE_MASK GENMASK(1, 0)
|
||||
|
||||
#define EDU_CONFIG_MODE_NAND BIT(0)
|
||||
#define EDU_CONFIG_SWAP_BYTE BIT(1)
|
||||
#ifdef CONFIG_CPU_BIG_ENDIAN
|
||||
#define EDU_CONFIG_SWAP_CFG EDU_CONFIG_SWAP_BYTE
|
||||
#else
|
||||
#define EDU_CONFIG_SWAP_CFG 0
|
||||
#endif
|
||||
|
||||
/* edu registers */
|
||||
enum edu_reg {
|
||||
EDU_CONFIG = 0,
|
||||
EDU_DRAM_ADDR,
|
||||
EDU_EXT_ADDR,
|
||||
EDU_LENGTH,
|
||||
EDU_CMD,
|
||||
EDU_STOP,
|
||||
EDU_STATUS,
|
||||
EDU_DONE,
|
||||
EDU_ERR_STATUS,
|
||||
};
|
||||
|
||||
static const u16 edu_regs[] = {
|
||||
[EDU_CONFIG] = 0x00,
|
||||
[EDU_DRAM_ADDR] = 0x04,
|
||||
[EDU_EXT_ADDR] = 0x08,
|
||||
[EDU_LENGTH] = 0x0c,
|
||||
[EDU_CMD] = 0x10,
|
||||
[EDU_STOP] = 0x14,
|
||||
[EDU_STATUS] = 0x18,
|
||||
[EDU_DONE] = 0x1c,
|
||||
[EDU_ERR_STATUS] = 0x20,
|
||||
};
|
||||
|
||||
/* flash_dma registers */
|
||||
enum flash_dma_reg {
|
||||
FLASH_DMA_REVISION = 0,
|
||||
|
@ -167,6 +206,8 @@ enum {
|
|||
BRCMNAND_HAS_WP = BIT(3),
|
||||
};
|
||||
|
||||
struct brcmnand_host;
|
||||
|
||||
struct brcmnand_controller {
|
||||
struct device *dev;
|
||||
struct nand_controller controller;
|
||||
|
@ -185,17 +226,32 @@ struct brcmnand_controller {
|
|||
|
||||
int cmd_pending;
|
||||
bool dma_pending;
|
||||
bool edu_pending;
|
||||
struct completion done;
|
||||
struct completion dma_done;
|
||||
struct completion edu_done;
|
||||
|
||||
/* List of NAND hosts (one for each chip-select) */
|
||||
struct list_head host_list;
|
||||
|
||||
/* EDU info, per-transaction */
|
||||
const u16 *edu_offsets;
|
||||
void __iomem *edu_base;
|
||||
int edu_irq;
|
||||
int edu_count;
|
||||
u64 edu_dram_addr;
|
||||
u32 edu_ext_addr;
|
||||
u32 edu_cmd;
|
||||
u32 edu_config;
|
||||
|
||||
/* flash_dma reg */
|
||||
const u16 *flash_dma_offsets;
|
||||
struct brcm_nand_dma_desc *dma_desc;
|
||||
dma_addr_t dma_pa;
|
||||
|
||||
int (*dma_trans)(struct brcmnand_host *host, u64 addr, u32 *buf,
|
||||
u32 len, u8 dma_cmd);
|
||||
|
||||
/* in-memory cache of the FLASH_CACHE, used only for some commands */
|
||||
u8 flash_cache[FC_BYTES];
|
||||
|
||||
|
@ -216,6 +272,7 @@ struct brcmnand_controller {
|
|||
u32 nand_cs_nand_xor;
|
||||
u32 corr_stat_threshold;
|
||||
u32 flash_dma_mode;
|
||||
u32 flash_edu_mode;
|
||||
bool pio_poll_mode;
|
||||
};
|
||||
|
||||
|
@ -657,6 +714,22 @@ static inline void brcmnand_write_fc(struct brcmnand_controller *ctrl,
|
|||
__raw_writel(val, ctrl->nand_fc + word * 4);
|
||||
}
|
||||
|
||||
static inline void edu_writel(struct brcmnand_controller *ctrl,
|
||||
enum edu_reg reg, u32 val)
|
||||
{
|
||||
u16 offs = ctrl->edu_offsets[reg];
|
||||
|
||||
brcmnand_writel(val, ctrl->edu_base + offs);
|
||||
}
|
||||
|
||||
static inline u32 edu_readl(struct brcmnand_controller *ctrl,
|
||||
enum edu_reg reg)
|
||||
{
|
||||
u16 offs = ctrl->edu_offsets[reg];
|
||||
|
||||
return brcmnand_readl(ctrl->edu_base + offs);
|
||||
}
|
||||
|
||||
static void brcmnand_clear_ecc_addr(struct brcmnand_controller *ctrl)
|
||||
{
|
||||
|
||||
|
@ -926,6 +999,16 @@ static inline bool has_flash_dma(struct brcmnand_controller *ctrl)
|
|||
return ctrl->flash_dma_base;
|
||||
}
|
||||
|
||||
static inline bool has_edu(struct brcmnand_controller *ctrl)
|
||||
{
|
||||
return ctrl->edu_base;
|
||||
}
|
||||
|
||||
static inline bool use_dma(struct brcmnand_controller *ctrl)
|
||||
{
|
||||
return has_flash_dma(ctrl) || has_edu(ctrl);
|
||||
}
|
||||
|
||||
static inline void disable_ctrl_irqs(struct brcmnand_controller *ctrl)
|
||||
{
|
||||
if (ctrl->pio_poll_mode)
|
||||
|
@ -1299,6 +1382,52 @@ static int write_oob_to_regs(struct brcmnand_controller *ctrl, int i,
|
|||
return tbytes;
|
||||
}
|
||||
|
||||
static void brcmnand_edu_init(struct brcmnand_controller *ctrl)
|
||||
{
|
||||
/* initialize edu */
|
||||
edu_writel(ctrl, EDU_ERR_STATUS, 0);
|
||||
edu_readl(ctrl, EDU_ERR_STATUS);
|
||||
edu_writel(ctrl, EDU_DONE, 0);
|
||||
edu_writel(ctrl, EDU_DONE, 0);
|
||||
edu_writel(ctrl, EDU_DONE, 0);
|
||||
edu_writel(ctrl, EDU_DONE, 0);
|
||||
edu_readl(ctrl, EDU_DONE);
|
||||
}
|
||||
|
||||
/* edu irq */
|
||||
static irqreturn_t brcmnand_edu_irq(int irq, void *data)
|
||||
{
|
||||
struct brcmnand_controller *ctrl = data;
|
||||
|
||||
if (ctrl->edu_count) {
|
||||
ctrl->edu_count--;
|
||||
while (!(edu_readl(ctrl, EDU_DONE) & EDU_DONE_MASK))
|
||||
udelay(1);
|
||||
edu_writel(ctrl, EDU_DONE, 0);
|
||||
edu_readl(ctrl, EDU_DONE);
|
||||
}
|
||||
|
||||
if (ctrl->edu_count) {
|
||||
ctrl->edu_dram_addr += FC_BYTES;
|
||||
ctrl->edu_ext_addr += FC_BYTES;
|
||||
|
||||
edu_writel(ctrl, EDU_DRAM_ADDR, (u32)ctrl->edu_dram_addr);
|
||||
edu_readl(ctrl, EDU_DRAM_ADDR);
|
||||
edu_writel(ctrl, EDU_EXT_ADDR, ctrl->edu_ext_addr);
|
||||
edu_readl(ctrl, EDU_EXT_ADDR);
|
||||
|
||||
mb(); /* flush previous writes */
|
||||
edu_writel(ctrl, EDU_CMD, ctrl->edu_cmd);
|
||||
edu_readl(ctrl, EDU_CMD);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
complete(&ctrl->edu_done);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static irqreturn_t brcmnand_ctlrdy_irq(int irq, void *data)
|
||||
{
|
||||
struct brcmnand_controller *ctrl = data;
|
||||
|
@ -1307,6 +1436,16 @@ static irqreturn_t brcmnand_ctlrdy_irq(int irq, void *data)
|
|||
if (ctrl->dma_pending)
|
||||
return IRQ_HANDLED;
|
||||
|
||||
/* check if you need to piggy back on the ctrlrdy irq */
|
||||
if (ctrl->edu_pending) {
|
||||
if (irq == ctrl->irq && ((int)ctrl->edu_irq >= 0))
|
||||
/* Discard interrupts while using dedicated edu irq */
|
||||
return IRQ_HANDLED;
|
||||
|
||||
/* no registered edu irq, call handler */
|
||||
return brcmnand_edu_irq(irq, data);
|
||||
}
|
||||
|
||||
complete(&ctrl->done);
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
@ -1644,6 +1783,81 @@ static void brcmnand_write_buf(struct nand_chip *chip, const uint8_t *buf,
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Kick EDU engine
|
||||
*/
|
||||
static int brcmnand_edu_trans(struct brcmnand_host *host, u64 addr, u32 *buf,
|
||||
u32 len, u8 cmd)
|
||||
{
|
||||
struct brcmnand_controller *ctrl = host->ctrl;
|
||||
unsigned long timeo = msecs_to_jiffies(200);
|
||||
int ret = 0;
|
||||
int dir = (cmd == CMD_PAGE_READ ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
|
||||
u8 edu_cmd = (cmd == CMD_PAGE_READ ? EDU_CMD_READ : EDU_CMD_WRITE);
|
||||
unsigned int trans = len >> FC_SHIFT;
|
||||
dma_addr_t pa;
|
||||
|
||||
pa = dma_map_single(ctrl->dev, buf, len, dir);
|
||||
if (dma_mapping_error(ctrl->dev, pa)) {
|
||||
dev_err(ctrl->dev, "unable to map buffer for EDU DMA\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
ctrl->edu_pending = true;
|
||||
ctrl->edu_dram_addr = pa;
|
||||
ctrl->edu_ext_addr = addr;
|
||||
ctrl->edu_cmd = edu_cmd;
|
||||
ctrl->edu_count = trans;
|
||||
|
||||
edu_writel(ctrl, EDU_DRAM_ADDR, (u32)ctrl->edu_dram_addr);
|
||||
edu_readl(ctrl, EDU_DRAM_ADDR);
|
||||
edu_writel(ctrl, EDU_EXT_ADDR, ctrl->edu_ext_addr);
|
||||
edu_readl(ctrl, EDU_EXT_ADDR);
|
||||
edu_writel(ctrl, EDU_LENGTH, FC_BYTES);
|
||||
edu_readl(ctrl, EDU_LENGTH);
|
||||
|
||||
/* Start edu engine */
|
||||
mb(); /* flush previous writes */
|
||||
edu_writel(ctrl, EDU_CMD, ctrl->edu_cmd);
|
||||
edu_readl(ctrl, EDU_CMD);
|
||||
|
||||
if (wait_for_completion_timeout(&ctrl->edu_done, timeo) <= 0) {
|
||||
dev_err(ctrl->dev,
|
||||
"timeout waiting for EDU; status %#x, error status %#x\n",
|
||||
edu_readl(ctrl, EDU_STATUS),
|
||||
edu_readl(ctrl, EDU_ERR_STATUS));
|
||||
}
|
||||
|
||||
dma_unmap_single(ctrl->dev, pa, len, dir);
|
||||
|
||||
/* for program page check NAND status */
|
||||
if (((brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS) &
|
||||
INTFC_FLASH_STATUS) & NAND_STATUS_FAIL) &&
|
||||
edu_cmd == EDU_CMD_WRITE) {
|
||||
dev_info(ctrl->dev, "program failed at %llx\n",
|
||||
(unsigned long long)addr);
|
||||
ret = -EIO;
|
||||
}
|
||||
|
||||
/* Make sure the EDU status is clean */
|
||||
if (edu_readl(ctrl, EDU_STATUS) & EDU_STATUS_ACTIVE)
|
||||
dev_warn(ctrl->dev, "EDU still active: %#x\n",
|
||||
edu_readl(ctrl, EDU_STATUS));
|
||||
|
||||
if (unlikely(edu_readl(ctrl, EDU_ERR_STATUS) & EDU_ERR_STATUS_ERRACK)) {
|
||||
dev_warn(ctrl->dev, "EDU RBUS error at addr %llx\n",
|
||||
(unsigned long long)addr);
|
||||
ret = -EIO;
|
||||
}
|
||||
|
||||
ctrl->edu_pending = false;
|
||||
brcmnand_edu_init(ctrl);
|
||||
edu_writel(ctrl, EDU_STOP, 0); /* force stop */
|
||||
edu_readl(ctrl, EDU_STOP);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* Construct a FLASH_DMA descriptor as part of a linked list. You must know the
|
||||
* following ahead of time:
|
||||
|
@ -1850,9 +2064,11 @@ static int brcmnand_read(struct mtd_info *mtd, struct nand_chip *chip,
|
|||
try_dmaread:
|
||||
brcmnand_clear_ecc_addr(ctrl);
|
||||
|
||||
if (has_flash_dma(ctrl) && !oob && flash_dma_buf_ok(buf)) {
|
||||
err = brcmnand_dma_trans(host, addr, buf, trans * FC_BYTES,
|
||||
CMD_PAGE_READ);
|
||||
if (ctrl->dma_trans && !oob && flash_dma_buf_ok(buf)) {
|
||||
err = ctrl->dma_trans(host, addr, buf,
|
||||
trans * FC_BYTES,
|
||||
CMD_PAGE_READ);
|
||||
|
||||
if (err) {
|
||||
if (mtd_is_bitflip_or_eccerr(err))
|
||||
err_addr = addr;
|
||||
|
@ -1988,10 +2204,12 @@ static int brcmnand_write(struct mtd_info *mtd, struct nand_chip *chip,
|
|||
for (i = 0; i < ctrl->max_oob; i += 4)
|
||||
oob_reg_write(ctrl, i, 0xffffffff);
|
||||
|
||||
if (has_flash_dma(ctrl) && !oob && flash_dma_buf_ok(buf)) {
|
||||
if (brcmnand_dma_trans(host, addr, (u32 *)buf,
|
||||
mtd->writesize, CMD_PROGRAM_PAGE))
|
||||
if (use_dma(ctrl) && !oob && flash_dma_buf_ok(buf)) {
|
||||
if (ctrl->dma_trans(host, addr, (u32 *)buf, mtd->writesize,
|
||||
CMD_PROGRAM_PAGE))
|
||||
|
||||
ret = -EIO;
|
||||
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
@ -2494,6 +2712,8 @@ static int brcmnand_suspend(struct device *dev)
|
|||
|
||||
if (has_flash_dma(ctrl))
|
||||
ctrl->flash_dma_mode = flash_dma_readl(ctrl, FLASH_DMA_MODE);
|
||||
else if (has_edu(ctrl))
|
||||
ctrl->edu_config = edu_readl(ctrl, EDU_CONFIG);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -2508,6 +2728,14 @@ static int brcmnand_resume(struct device *dev)
|
|||
flash_dma_writel(ctrl, FLASH_DMA_ERROR_STATUS, 0);
|
||||
}
|
||||
|
||||
if (has_edu(ctrl))
|
||||
ctrl->edu_config = edu_readl(ctrl, EDU_CONFIG);
|
||||
else {
|
||||
edu_writel(ctrl, EDU_CONFIG, ctrl->edu_config);
|
||||
edu_readl(ctrl, EDU_CONFIG);
|
||||
brcmnand_edu_init(ctrl);
|
||||
}
|
||||
|
||||
brcmnand_write_reg(ctrl, BRCMNAND_CS_SELECT, ctrl->nand_cs_nand_select);
|
||||
brcmnand_write_reg(ctrl, BRCMNAND_CS_XOR, ctrl->nand_cs_nand_xor);
|
||||
brcmnand_write_reg(ctrl, BRCMNAND_CORR_THRESHOLD,
|
||||
|
@ -2553,6 +2781,49 @@ MODULE_DEVICE_TABLE(of, brcmnand_of_match);
|
|||
/***********************************************************************
|
||||
* Platform driver setup (per controller)
|
||||
***********************************************************************/
|
||||
static int brcmnand_edu_setup(struct platform_device *pdev)
|
||||
{
|
||||
struct device *dev = &pdev->dev;
|
||||
struct brcmnand_controller *ctrl = dev_get_drvdata(&pdev->dev);
|
||||
struct resource *res;
|
||||
int ret;
|
||||
|
||||
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "flash-edu");
|
||||
if (res) {
|
||||
ctrl->edu_base = devm_ioremap_resource(dev, res);
|
||||
if (IS_ERR(ctrl->edu_base))
|
||||
return PTR_ERR(ctrl->edu_base);
|
||||
|
||||
ctrl->edu_offsets = edu_regs;
|
||||
|
||||
edu_writel(ctrl, EDU_CONFIG, EDU_CONFIG_MODE_NAND |
|
||||
EDU_CONFIG_SWAP_CFG);
|
||||
edu_readl(ctrl, EDU_CONFIG);
|
||||
|
||||
/* initialize edu */
|
||||
brcmnand_edu_init(ctrl);
|
||||
|
||||
ctrl->edu_irq = platform_get_irq_optional(pdev, 1);
|
||||
if (ctrl->edu_irq < 0) {
|
||||
dev_warn(dev,
|
||||
"FLASH EDU enabled, using ctlrdy irq\n");
|
||||
} else {
|
||||
ret = devm_request_irq(dev, ctrl->edu_irq,
|
||||
brcmnand_edu_irq, 0,
|
||||
"brcmnand-edu", ctrl);
|
||||
if (ret < 0) {
|
||||
dev_err(ctrl->dev, "can't allocate IRQ %d: error %d\n",
|
||||
ctrl->edu_irq, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
dev_info(dev, "FLASH EDU enabled using irq %u\n",
|
||||
ctrl->edu_irq);
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int brcmnand_probe(struct platform_device *pdev, struct brcmnand_soc *soc)
|
||||
{
|
||||
|
@ -2578,6 +2849,7 @@ int brcmnand_probe(struct platform_device *pdev, struct brcmnand_soc *soc)
|
|||
|
||||
init_completion(&ctrl->done);
|
||||
init_completion(&ctrl->dma_done);
|
||||
init_completion(&ctrl->edu_done);
|
||||
nand_controller_init(&ctrl->controller);
|
||||
ctrl->controller.ops = &brcmnand_controller_ops;
|
||||
INIT_LIST_HEAD(&ctrl->host_list);
|
||||
|
@ -2675,6 +2947,15 @@ int brcmnand_probe(struct platform_device *pdev, struct brcmnand_soc *soc)
|
|||
}
|
||||
|
||||
dev_info(dev, "enabling FLASH_DMA\n");
|
||||
/* set flash dma transfer function to call */
|
||||
ctrl->dma_trans = brcmnand_dma_trans;
|
||||
} else {
|
||||
ret = brcmnand_edu_setup(pdev);
|
||||
if (ret < 0)
|
||||
goto err;
|
||||
|
||||
/* set edu transfer function to call */
|
||||
ctrl->dma_trans = brcmnand_edu_trans;
|
||||
}
|
||||
|
||||
/* Disable automatic device ID config, direct addressing */
|
||||
|
|
|
@ -30,7 +30,6 @@
|
|||
* Generic mode is used for executing rest of commands.
|
||||
*/
|
||||
|
||||
#define MAX_OOB_SIZE_PER_SECTOR 32
|
||||
#define MAX_ADDRESS_CYC 6
|
||||
#define MAX_ERASE_ADDRESS_CYC 3
|
||||
#define MAX_DATA_SIZE 0xFFFC
|
||||
|
@ -190,6 +189,7 @@
|
|||
|
||||
/* BCH Engine identification register 3. */
|
||||
#define BCH_CFG_3 0x844
|
||||
#define BCH_CFG_3_METADATA_SIZE GENMASK(23, 16)
|
||||
|
||||
/* Ready/Busy# line status. */
|
||||
#define RBN_SETINGS 0x1004
|
||||
|
@ -499,6 +499,7 @@ struct cdns_nand_ctrl {
|
|||
|
||||
unsigned long assigned_cs;
|
||||
struct list_head chips;
|
||||
u8 bch_metadata_size;
|
||||
};
|
||||
|
||||
struct cdns_nand_chip {
|
||||
|
@ -997,6 +998,7 @@ static int cadence_nand_cdma_send(struct cdns_nand_ctrl *cdns_ctrl,
|
|||
return status;
|
||||
|
||||
cadence_nand_reset_irq(cdns_ctrl);
|
||||
reinit_completion(&cdns_ctrl->complete);
|
||||
|
||||
writel_relaxed((u32)cdns_ctrl->dma_cdma_desc,
|
||||
cdns_ctrl->reg + CMD_REG2);
|
||||
|
@ -1077,6 +1079,14 @@ static int cadence_nand_read_bch_caps(struct cdns_nand_ctrl *cdns_ctrl)
|
|||
int max_step_size = 0, nstrengths, i;
|
||||
u32 reg;
|
||||
|
||||
reg = readl_relaxed(cdns_ctrl->reg + BCH_CFG_3);
|
||||
cdns_ctrl->bch_metadata_size = FIELD_GET(BCH_CFG_3_METADATA_SIZE, reg);
|
||||
if (cdns_ctrl->bch_metadata_size < 4) {
|
||||
dev_err(cdns_ctrl->dev,
|
||||
"Driver needs at least 4 bytes of BCH meta data\n");
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
reg = readl_relaxed(cdns_ctrl->reg + BCH_CFG_0);
|
||||
cdns_ctrl->ecc_strengths[0] = FIELD_GET(BCH_CFG_0_CORR_CAP_0, reg);
|
||||
cdns_ctrl->ecc_strengths[1] = FIELD_GET(BCH_CFG_0_CORR_CAP_1, reg);
|
||||
|
@ -1170,7 +1180,8 @@ static int cadence_nand_hw_init(struct cdns_nand_ctrl *cdns_ctrl)
|
|||
writel_relaxed(0xFFFFFFFF, cdns_ctrl->reg + INTR_STATUS);
|
||||
|
||||
cadence_nand_get_caps(cdns_ctrl);
|
||||
cadence_nand_read_bch_caps(cdns_ctrl);
|
||||
if (cadence_nand_read_bch_caps(cdns_ctrl))
|
||||
return -EIO;
|
||||
|
||||
/*
|
||||
* Set IO width access to 8.
|
||||
|
@ -2585,9 +2596,8 @@ int cadence_nand_attach_chip(struct nand_chip *chip)
|
|||
{
|
||||
struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
|
||||
struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
|
||||
u32 ecc_size = cdns_chip->sector_count * chip->ecc.bytes;
|
||||
u32 ecc_size;
|
||||
struct mtd_info *mtd = nand_to_mtd(chip);
|
||||
u32 max_oob_data_size;
|
||||
int ret;
|
||||
|
||||
if (chip->options & NAND_BUSWIDTH_16) {
|
||||
|
@ -2603,12 +2613,9 @@ int cadence_nand_attach_chip(struct nand_chip *chip)
|
|||
chip->options |= NAND_NO_SUBPAGE_WRITE;
|
||||
|
||||
cdns_chip->bbm_offs = chip->badblockpos;
|
||||
if (chip->options & NAND_BUSWIDTH_16) {
|
||||
cdns_chip->bbm_offs &= ~0x01;
|
||||
cdns_chip->bbm_len = 2;
|
||||
} else {
|
||||
cdns_chip->bbm_len = 1;
|
||||
}
|
||||
cdns_chip->bbm_offs &= ~0x01;
|
||||
/* this value should be even number */
|
||||
cdns_chip->bbm_len = 2;
|
||||
|
||||
ret = nand_ecc_choose_conf(chip,
|
||||
&cdns_ctrl->ecc_caps,
|
||||
|
@ -2625,13 +2632,12 @@ int cadence_nand_attach_chip(struct nand_chip *chip)
|
|||
/* Error correction configuration. */
|
||||
cdns_chip->sector_size = chip->ecc.size;
|
||||
cdns_chip->sector_count = mtd->writesize / cdns_chip->sector_size;
|
||||
ecc_size = cdns_chip->sector_count * chip->ecc.bytes;
|
||||
|
||||
cdns_chip->avail_oob_size = mtd->oobsize - ecc_size;
|
||||
|
||||
max_oob_data_size = MAX_OOB_SIZE_PER_SECTOR;
|
||||
|
||||
if (cdns_chip->avail_oob_size > max_oob_data_size)
|
||||
cdns_chip->avail_oob_size = max_oob_data_size;
|
||||
if (cdns_chip->avail_oob_size > cdns_ctrl->bch_metadata_size)
|
||||
cdns_chip->avail_oob_size = cdns_ctrl->bch_metadata_size;
|
||||
|
||||
if ((cdns_chip->avail_oob_size + cdns_chip->bbm_len + ecc_size)
|
||||
> mtd->oobsize)
|
||||
|
|
|
@ -1317,6 +1317,7 @@ int denali_init(struct denali_controller *denali)
|
|||
iowrite32(CHIP_EN_DONT_CARE__FLAG, denali->reg + CHIP_ENABLE_DONT_CARE);
|
||||
iowrite32(ECC_ENABLE__FLAG, denali->reg + ECC_ENABLE);
|
||||
iowrite32(0xffff, denali->reg + SPARE_AREA_MARKER);
|
||||
iowrite32(WRITE_PROTECT__FLAG, denali->reg + WRITE_PROTECT);
|
||||
|
||||
denali_clear_irq_all(denali);
|
||||
|
||||
|
|
|
@ -328,7 +328,7 @@ struct denali_chip {
|
|||
struct nand_chip chip;
|
||||
struct list_head node;
|
||||
unsigned int nsels;
|
||||
struct denali_chip_sel sels[0];
|
||||
struct denali_chip_sel sels[];
|
||||
};
|
||||
|
||||
/**
|
||||
|
|
|
@ -1148,20 +1148,21 @@ static int acquire_dma_channels(struct gpmi_nand_data *this)
|
|||
{
|
||||
struct platform_device *pdev = this->pdev;
|
||||
struct dma_chan *dma_chan;
|
||||
int ret = 0;
|
||||
|
||||
/* request dma channel */
|
||||
dma_chan = dma_request_slave_channel(&pdev->dev, "rx-tx");
|
||||
if (!dma_chan) {
|
||||
dev_err(this->dev, "Failed to request DMA channel.\n");
|
||||
goto acquire_err;
|
||||
dma_chan = dma_request_chan(&pdev->dev, "rx-tx");
|
||||
if (IS_ERR(dma_chan)) {
|
||||
ret = PTR_ERR(dma_chan);
|
||||
if (ret != -EPROBE_DEFER)
|
||||
dev_err(this->dev, "DMA channel request failed: %d\n",
|
||||
ret);
|
||||
release_dma_channels(this);
|
||||
} else {
|
||||
this->dma_chans[0] = dma_chan;
|
||||
}
|
||||
|
||||
this->dma_chans[0] = dma_chan;
|
||||
return 0;
|
||||
|
||||
acquire_err:
|
||||
release_dma_channels(this);
|
||||
return -EINVAL;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int gpmi_get_clks(struct gpmi_nand_data *this)
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
config MTD_NAND_JZ4780
|
||||
tristate "JZ4780 NAND controller"
|
||||
depends on MIPS || COMPILE_TEST
|
||||
depends on JZ4780_NEMC
|
||||
help
|
||||
Enables support for NAND Flash connected to the NEMC on JZ4780 SoC
|
||||
|
|
|
@ -124,7 +124,6 @@ int ingenic_ecc_probe(struct platform_device *pdev)
|
|||
{
|
||||
struct device *dev = &pdev->dev;
|
||||
struct ingenic_ecc *ecc;
|
||||
struct resource *res;
|
||||
|
||||
ecc = devm_kzalloc(dev, sizeof(*ecc), GFP_KERNEL);
|
||||
if (!ecc)
|
||||
|
@ -134,8 +133,7 @@ int ingenic_ecc_probe(struct platform_device *pdev)
|
|||
if (!ecc->ops)
|
||||
return -EINVAL;
|
||||
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
ecc->base = devm_ioremap_resource(dev, res);
|
||||
ecc->base = devm_platform_ioremap_resource(pdev, 0);
|
||||
if (IS_ERR(ecc->base))
|
||||
return PTR_ERR(ecc->base);
|
||||
|
||||
|
|
|
@ -30,6 +30,7 @@
|
|||
#define NAND_MFR_SAMSUNG 0xec
|
||||
#define NAND_MFR_SANDISK 0x45
|
||||
#define NAND_MFR_STMICRO 0x20
|
||||
/* Kioxia is new name of Toshiba memory. */
|
||||
#define NAND_MFR_TOSHIBA 0x98
|
||||
#define NAND_MFR_WINBOND 0xef
|
||||
|
||||
|
|
|
@ -334,7 +334,7 @@ struct marvell_nand_chip {
|
|||
int addr_cyc;
|
||||
int selected_die;
|
||||
unsigned int nsels;
|
||||
struct marvell_nand_chip_sel sels[0];
|
||||
struct marvell_nand_chip_sel sels[];
|
||||
};
|
||||
|
||||
static inline struct marvell_nand_chip *to_marvell_nand(struct nand_chip *chip)
|
||||
|
@ -2743,16 +2743,21 @@ static int marvell_nfc_init_dma(struct marvell_nfc *nfc)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
nfc->dma_chan = dma_request_slave_channel(nfc->dev, "data");
|
||||
if (!nfc->dma_chan) {
|
||||
dev_err(nfc->dev,
|
||||
"Unable to request data DMA channel\n");
|
||||
return -ENODEV;
|
||||
nfc->dma_chan = dma_request_chan(nfc->dev, "data");
|
||||
if (IS_ERR(nfc->dma_chan)) {
|
||||
ret = PTR_ERR(nfc->dma_chan);
|
||||
nfc->dma_chan = NULL;
|
||||
if (ret != -EPROBE_DEFER)
|
||||
dev_err(nfc->dev, "DMA channel request failed: %d\n",
|
||||
ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
if (!r)
|
||||
return -ENXIO;
|
||||
if (!r) {
|
||||
ret = -ENXIO;
|
||||
goto release_channel;
|
||||
}
|
||||
|
||||
config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
|
||||
config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
|
||||
|
@ -2763,7 +2768,7 @@ static int marvell_nfc_init_dma(struct marvell_nfc *nfc)
|
|||
ret = dmaengine_slave_config(nfc->dma_chan, &config);
|
||||
if (ret < 0) {
|
||||
dev_err(nfc->dev, "Failed to configure DMA channel\n");
|
||||
return ret;
|
||||
goto release_channel;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -2773,12 +2778,20 @@ static int marvell_nfc_init_dma(struct marvell_nfc *nfc)
|
|||
* the provided buffer.
|
||||
*/
|
||||
nfc->dma_buf = kmalloc(MAX_CHUNK_SIZE, GFP_KERNEL | GFP_DMA);
|
||||
if (!nfc->dma_buf)
|
||||
return -ENOMEM;
|
||||
if (!nfc->dma_buf) {
|
||||
ret = -ENOMEM;
|
||||
goto release_channel;
|
||||
}
|
||||
|
||||
nfc->use_dma = true;
|
||||
|
||||
return 0;
|
||||
|
||||
release_channel:
|
||||
dma_release_channel(nfc->dma_chan);
|
||||
nfc->dma_chan = NULL;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void marvell_nfc_reset(struct marvell_nfc *nfc)
|
||||
|
@ -2920,10 +2933,13 @@ static int marvell_nfc_probe(struct platform_device *pdev)
|
|||
|
||||
ret = marvell_nand_chips_init(dev, nfc);
|
||||
if (ret)
|
||||
goto unprepare_reg_clk;
|
||||
goto release_dma;
|
||||
|
||||
return 0;
|
||||
|
||||
release_dma:
|
||||
if (nfc->use_dma)
|
||||
dma_release_channel(nfc->dma_chan);
|
||||
unprepare_reg_clk:
|
||||
clk_disable_unprepare(nfc->reg_clk);
|
||||
unprepare_core_clk:
|
||||
|
|
|
@ -118,7 +118,7 @@ struct meson_nfc_nand_chip {
|
|||
u8 *data_buf;
|
||||
__le64 *info_buf;
|
||||
u32 nsels;
|
||||
u8 sels[0];
|
||||
u8 sels[];
|
||||
};
|
||||
|
||||
struct meson_nand_ecc {
|
||||
|
|
|
@ -131,7 +131,7 @@ struct mtk_nfc_nand_chip {
|
|||
u32 spare_per_sector;
|
||||
|
||||
int nsels;
|
||||
u8 sels[0];
|
||||
u8 sels[];
|
||||
/* nothing after this field */
|
||||
};
|
||||
|
||||
|
|
|
@ -683,7 +683,12 @@ int nand_soft_waitrdy(struct nand_chip *chip, unsigned long timeout_ms)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
timeout_ms = jiffies + msecs_to_jiffies(timeout_ms);
|
||||
/*
|
||||
* +1 below is necessary because if we are now in the last fraction
|
||||
* of jiffy and msecs_to_jiffies is 1 then we will wait only that
|
||||
* small jiffy fraction - possibly leading to false timeout
|
||||
*/
|
||||
timeout_ms = jiffies + msecs_to_jiffies(timeout_ms) + 1;
|
||||
do {
|
||||
ret = nand_read_data_op(chip, &status, sizeof(status), true);
|
||||
if (ret)
|
||||
|
@ -4321,16 +4326,22 @@ static int nand_block_markbad(struct mtd_info *mtd, loff_t ofs)
|
|||
/**
|
||||
* nand_suspend - [MTD Interface] Suspend the NAND flash
|
||||
* @mtd: MTD device structure
|
||||
*
|
||||
* Returns 0 for success or negative error code otherwise.
|
||||
*/
|
||||
static int nand_suspend(struct mtd_info *mtd)
|
||||
{
|
||||
struct nand_chip *chip = mtd_to_nand(mtd);
|
||||
int ret = 0;
|
||||
|
||||
mutex_lock(&chip->lock);
|
||||
chip->suspended = 1;
|
||||
if (chip->suspend)
|
||||
ret = chip->suspend(chip);
|
||||
if (!ret)
|
||||
chip->suspended = 1;
|
||||
mutex_unlock(&chip->lock);
|
||||
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -4342,11 +4353,14 @@ static void nand_resume(struct mtd_info *mtd)
|
|||
struct nand_chip *chip = mtd_to_nand(mtd);
|
||||
|
||||
mutex_lock(&chip->lock);
|
||||
if (chip->suspended)
|
||||
if (chip->suspended) {
|
||||
if (chip->resume)
|
||||
chip->resume(chip);
|
||||
chip->suspended = 0;
|
||||
else
|
||||
} else {
|
||||
pr_err("%s called for a chip which is not in suspended state\n",
|
||||
__func__);
|
||||
}
|
||||
mutex_unlock(&chip->lock);
|
||||
}
|
||||
|
||||
|
@ -4360,6 +4374,38 @@ static void nand_shutdown(struct mtd_info *mtd)
|
|||
nand_suspend(mtd);
|
||||
}
|
||||
|
||||
/**
|
||||
* nand_lock - [MTD Interface] Lock the NAND flash
|
||||
* @mtd: MTD device structure
|
||||
* @ofs: offset byte address
|
||||
* @len: number of bytes to lock (must be a multiple of block/page size)
|
||||
*/
|
||||
static int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
|
||||
{
|
||||
struct nand_chip *chip = mtd_to_nand(mtd);
|
||||
|
||||
if (!chip->lock_area)
|
||||
return -ENOTSUPP;
|
||||
|
||||
return chip->lock_area(chip, ofs, len);
|
||||
}
|
||||
|
||||
/**
|
||||
* nand_unlock - [MTD Interface] Unlock the NAND flash
|
||||
* @mtd: MTD device structure
|
||||
* @ofs: offset byte address
|
||||
* @len: number of bytes to unlock (must be a multiple of block/page size)
|
||||
*/
|
||||
static int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
|
||||
{
|
||||
struct nand_chip *chip = mtd_to_nand(mtd);
|
||||
|
||||
if (!chip->unlock_area)
|
||||
return -ENOTSUPP;
|
||||
|
||||
return chip->unlock_area(chip, ofs, len);
|
||||
}
|
||||
|
||||
/* Set default functions */
|
||||
static void nand_set_defaults(struct nand_chip *chip)
|
||||
{
|
||||
|
@ -5786,8 +5832,8 @@ static int nand_scan_tail(struct nand_chip *chip)
|
|||
mtd->_read_oob = nand_read_oob;
|
||||
mtd->_write_oob = nand_write_oob;
|
||||
mtd->_sync = nand_sync;
|
||||
mtd->_lock = NULL;
|
||||
mtd->_unlock = NULL;
|
||||
mtd->_lock = nand_lock;
|
||||
mtd->_unlock = nand_unlock;
|
||||
mtd->_suspend = nand_suspend;
|
||||
mtd->_resume = nand_resume;
|
||||
mtd->_reboot = nand_shutdown;
|
||||
|
@ -5907,6 +5953,8 @@ void nand_cleanup(struct nand_chip *chip)
|
|||
chip->ecc.algo == NAND_ECC_BCH)
|
||||
nand_bch_free((struct nand_bch_control *)chip->ecc.priv);
|
||||
|
||||
nanddev_cleanup(&chip->base);
|
||||
|
||||
/* Free bad block table memory */
|
||||
kfree(chip->bbt);
|
||||
kfree(chip->data_buf);
|
||||
|
|
|
@ -26,7 +26,7 @@
|
|||
struct hynix_read_retry {
|
||||
int nregs;
|
||||
const u8 *regs;
|
||||
u8 values[0];
|
||||
u8 values[];
|
||||
};
|
||||
|
||||
/**
|
||||
|
|
|
@ -6,11 +6,31 @@
|
|||
* Author: Boris Brezillon <boris.brezillon@free-electrons.com>
|
||||
*/
|
||||
|
||||
#include "linux/delay.h"
|
||||
#include "internals.h"
|
||||
|
||||
#define MACRONIX_READ_RETRY_BIT BIT(0)
|
||||
#define MACRONIX_NUM_READ_RETRY_MODES 6
|
||||
|
||||
#define ONFI_FEATURE_ADDR_MXIC_PROTECTION 0xA0
|
||||
#define MXIC_BLOCK_PROTECTION_ALL_LOCK 0x38
|
||||
#define MXIC_BLOCK_PROTECTION_ALL_UNLOCK 0x0
|
||||
|
||||
#define ONFI_FEATURE_ADDR_MXIC_RANDOMIZER 0xB0
|
||||
#define MACRONIX_RANDOMIZER_BIT BIT(1)
|
||||
#define MACRONIX_RANDOMIZER_ENPGM BIT(0)
|
||||
#define MACRONIX_RANDOMIZER_RANDEN BIT(1)
|
||||
#define MACRONIX_RANDOMIZER_RANDOPT BIT(2)
|
||||
#define MACRONIX_RANDOMIZER_MODE_ENTER \
|
||||
(MACRONIX_RANDOMIZER_ENPGM | \
|
||||
MACRONIX_RANDOMIZER_RANDEN | \
|
||||
MACRONIX_RANDOMIZER_RANDOPT)
|
||||
#define MACRONIX_RANDOMIZER_MODE_EXIT \
|
||||
(MACRONIX_RANDOMIZER_RANDEN | \
|
||||
MACRONIX_RANDOMIZER_RANDOPT)
|
||||
|
||||
#define MXIC_CMD_POWER_DOWN 0xB9
|
||||
|
||||
struct nand_onfi_vendor_macronix {
|
||||
u8 reserved;
|
||||
u8 reliability_func;
|
||||
|
@ -29,15 +49,83 @@ static int macronix_nand_setup_read_retry(struct nand_chip *chip, int mode)
|
|||
return nand_set_features(chip, ONFI_FEATURE_ADDR_READ_RETRY, feature);
|
||||
}
|
||||
|
||||
static int macronix_nand_randomizer_check_enable(struct nand_chip *chip)
|
||||
{
|
||||
u8 feature[ONFI_SUBFEATURE_PARAM_LEN];
|
||||
int ret;
|
||||
|
||||
ret = nand_get_features(chip, ONFI_FEATURE_ADDR_MXIC_RANDOMIZER,
|
||||
feature);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
if (feature[0])
|
||||
return feature[0];
|
||||
|
||||
feature[0] = MACRONIX_RANDOMIZER_MODE_ENTER;
|
||||
ret = nand_set_features(chip, ONFI_FEATURE_ADDR_MXIC_RANDOMIZER,
|
||||
feature);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
/* RANDEN and RANDOPT OTP bits are programmed */
|
||||
feature[0] = 0x0;
|
||||
ret = nand_prog_page_op(chip, 0, 0, feature, 1);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
ret = nand_get_features(chip, ONFI_FEATURE_ADDR_MXIC_RANDOMIZER,
|
||||
feature);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
feature[0] &= MACRONIX_RANDOMIZER_MODE_EXIT;
|
||||
ret = nand_set_features(chip, ONFI_FEATURE_ADDR_MXIC_RANDOMIZER,
|
||||
feature);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void macronix_nand_onfi_init(struct nand_chip *chip)
|
||||
{
|
||||
struct nand_parameters *p = &chip->parameters;
|
||||
struct nand_onfi_vendor_macronix *mxic;
|
||||
struct device_node *dn = nand_get_flash_node(chip);
|
||||
int rand_otp = 0;
|
||||
int ret;
|
||||
|
||||
if (!p->onfi)
|
||||
return;
|
||||
|
||||
if (of_find_property(dn, "mxic,enable-randomizer-otp", NULL))
|
||||
rand_otp = 1;
|
||||
|
||||
mxic = (struct nand_onfi_vendor_macronix *)p->onfi->vendor;
|
||||
/* Subpage write is prohibited in randomizer operatoin */
|
||||
if (rand_otp && chip->options & NAND_NO_SUBPAGE_WRITE &&
|
||||
mxic->reliability_func & MACRONIX_RANDOMIZER_BIT) {
|
||||
if (p->supports_set_get_features) {
|
||||
bitmap_set(p->set_feature_list,
|
||||
ONFI_FEATURE_ADDR_MXIC_RANDOMIZER, 1);
|
||||
bitmap_set(p->get_feature_list,
|
||||
ONFI_FEATURE_ADDR_MXIC_RANDOMIZER, 1);
|
||||
ret = macronix_nand_randomizer_check_enable(chip);
|
||||
if (ret < 0) {
|
||||
bitmap_clear(p->set_feature_list,
|
||||
ONFI_FEATURE_ADDR_MXIC_RANDOMIZER,
|
||||
1);
|
||||
bitmap_clear(p->get_feature_list,
|
||||
ONFI_FEATURE_ADDR_MXIC_RANDOMIZER,
|
||||
1);
|
||||
pr_info("Macronix NAND randomizer failed\n");
|
||||
} else {
|
||||
pr_info("Macronix NAND randomizer enabled\n");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if ((mxic->reliability_func & MACRONIX_READ_RETRY_BIT) == 0)
|
||||
return;
|
||||
|
||||
|
@ -91,6 +179,143 @@ static void macronix_nand_fix_broken_get_timings(struct nand_chip *chip)
|
|||
ONFI_FEATURE_ADDR_TIMING_MODE, 1);
|
||||
}
|
||||
|
||||
/*
|
||||
* Macronix NAND supports Block Protection by Protectoin(PT) pin;
|
||||
* active high at power-on which protects the entire chip even the #WP is
|
||||
* disabled. Lock/unlock protection area can be partition according to
|
||||
* protection bits, i.e. upper 1/2 locked, upper 1/4 locked and so on.
|
||||
*/
|
||||
static int mxic_nand_lock(struct nand_chip *chip, loff_t ofs, uint64_t len)
|
||||
{
|
||||
u8 feature[ONFI_SUBFEATURE_PARAM_LEN];
|
||||
int ret;
|
||||
|
||||
feature[0] = MXIC_BLOCK_PROTECTION_ALL_LOCK;
|
||||
nand_select_target(chip, 0);
|
||||
ret = nand_set_features(chip, ONFI_FEATURE_ADDR_MXIC_PROTECTION,
|
||||
feature);
|
||||
nand_deselect_target(chip);
|
||||
if (ret)
|
||||
pr_err("%s all blocks failed\n", __func__);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int mxic_nand_unlock(struct nand_chip *chip, loff_t ofs, uint64_t len)
|
||||
{
|
||||
u8 feature[ONFI_SUBFEATURE_PARAM_LEN];
|
||||
int ret;
|
||||
|
||||
feature[0] = MXIC_BLOCK_PROTECTION_ALL_UNLOCK;
|
||||
nand_select_target(chip, 0);
|
||||
ret = nand_set_features(chip, ONFI_FEATURE_ADDR_MXIC_PROTECTION,
|
||||
feature);
|
||||
nand_deselect_target(chip);
|
||||
if (ret)
|
||||
pr_err("%s all blocks failed\n", __func__);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void macronix_nand_block_protection_support(struct nand_chip *chip)
|
||||
{
|
||||
u8 feature[ONFI_SUBFEATURE_PARAM_LEN];
|
||||
int ret;
|
||||
|
||||
bitmap_set(chip->parameters.get_feature_list,
|
||||
ONFI_FEATURE_ADDR_MXIC_PROTECTION, 1);
|
||||
|
||||
feature[0] = MXIC_BLOCK_PROTECTION_ALL_UNLOCK;
|
||||
nand_select_target(chip, 0);
|
||||
ret = nand_get_features(chip, ONFI_FEATURE_ADDR_MXIC_PROTECTION,
|
||||
feature);
|
||||
nand_deselect_target(chip);
|
||||
if (ret || feature[0] != MXIC_BLOCK_PROTECTION_ALL_LOCK) {
|
||||
if (ret)
|
||||
pr_err("Block protection check failed\n");
|
||||
|
||||
bitmap_clear(chip->parameters.get_feature_list,
|
||||
ONFI_FEATURE_ADDR_MXIC_PROTECTION, 1);
|
||||
return;
|
||||
}
|
||||
|
||||
bitmap_set(chip->parameters.set_feature_list,
|
||||
ONFI_FEATURE_ADDR_MXIC_PROTECTION, 1);
|
||||
|
||||
chip->lock_area = mxic_nand_lock;
|
||||
chip->unlock_area = mxic_nand_unlock;
|
||||
}
|
||||
|
||||
static int nand_power_down_op(struct nand_chip *chip)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (nand_has_exec_op(chip)) {
|
||||
struct nand_op_instr instrs[] = {
|
||||
NAND_OP_CMD(MXIC_CMD_POWER_DOWN, 0),
|
||||
};
|
||||
|
||||
struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
|
||||
|
||||
ret = nand_exec_op(chip, &op);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
} else {
|
||||
chip->legacy.cmdfunc(chip, MXIC_CMD_POWER_DOWN, -1, -1);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mxic_nand_suspend(struct nand_chip *chip)
|
||||
{
|
||||
int ret;
|
||||
|
||||
nand_select_target(chip, 0);
|
||||
ret = nand_power_down_op(chip);
|
||||
if (ret < 0)
|
||||
pr_err("Suspending MXIC NAND chip failed (%d)\n", ret);
|
||||
nand_deselect_target(chip);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void mxic_nand_resume(struct nand_chip *chip)
|
||||
{
|
||||
/*
|
||||
* Toggle #CS pin to resume NAND device and don't care
|
||||
* of the others CLE, #WE, #RE pins status.
|
||||
* A NAND controller ensure it is able to assert/de-assert #CS
|
||||
* by sending any byte over the NAND bus.
|
||||
* i.e.,
|
||||
* NAND power down command or reset command w/o R/B# status checking.
|
||||
*/
|
||||
nand_select_target(chip, 0);
|
||||
nand_power_down_op(chip);
|
||||
/* The minimum of a recovery time tRDP is 35 us */
|
||||
usleep_range(35, 100);
|
||||
nand_deselect_target(chip);
|
||||
}
|
||||
|
||||
static void macronix_nand_deep_power_down_support(struct nand_chip *chip)
|
||||
{
|
||||
int i;
|
||||
static const char * const deep_power_down_dev[] = {
|
||||
"MX30UF1G28AD",
|
||||
"MX30UF2G28AD",
|
||||
"MX30UF4G28AD",
|
||||
};
|
||||
|
||||
i = match_string(deep_power_down_dev, ARRAY_SIZE(deep_power_down_dev),
|
||||
chip->parameters.model);
|
||||
if (i < 0)
|
||||
return;
|
||||
|
||||
chip->suspend = mxic_nand_suspend;
|
||||
chip->resume = mxic_nand_resume;
|
||||
}
|
||||
|
||||
static int macronix_nand_init(struct nand_chip *chip)
|
||||
{
|
||||
if (nand_is_slc(chip))
|
||||
|
@ -98,6 +323,8 @@ static int macronix_nand_init(struct nand_chip *chip)
|
|||
|
||||
macronix_nand_fix_broken_get_timings(chip);
|
||||
macronix_nand_onfi_init(chip);
|
||||
macronix_nand_block_protection_support(chip);
|
||||
macronix_nand_deep_power_down_support(chip);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -14,14 +14,68 @@
|
|||
/* Recommended to rewrite for BENAND */
|
||||
#define TOSHIBA_NAND_STATUS_REWRITE_RECOMMENDED BIT(3)
|
||||
|
||||
/* ECC Status Read Command for BENAND */
|
||||
#define TOSHIBA_NAND_CMD_ECC_STATUS_READ 0x7A
|
||||
|
||||
/* ECC Status Mask for BENAND */
|
||||
#define TOSHIBA_NAND_ECC_STATUS_MASK 0x0F
|
||||
|
||||
/* Uncorrectable Error for BENAND */
|
||||
#define TOSHIBA_NAND_ECC_STATUS_UNCORR 0x0F
|
||||
|
||||
/* Max ECC Steps for BENAND */
|
||||
#define TOSHIBA_NAND_MAX_ECC_STEPS 8
|
||||
|
||||
static int toshiba_nand_benand_read_eccstatus_op(struct nand_chip *chip,
|
||||
u8 *buf)
|
||||
{
|
||||
u8 *ecc_status = buf;
|
||||
|
||||
if (nand_has_exec_op(chip)) {
|
||||
const struct nand_sdr_timings *sdr =
|
||||
nand_get_sdr_timings(&chip->data_interface);
|
||||
struct nand_op_instr instrs[] = {
|
||||
NAND_OP_CMD(TOSHIBA_NAND_CMD_ECC_STATUS_READ,
|
||||
PSEC_TO_NSEC(sdr->tADL_min)),
|
||||
NAND_OP_8BIT_DATA_IN(chip->ecc.steps, ecc_status, 0),
|
||||
};
|
||||
struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
|
||||
|
||||
return nand_exec_op(chip, &op);
|
||||
}
|
||||
|
||||
return -ENOTSUPP;
|
||||
}
|
||||
|
||||
static int toshiba_nand_benand_eccstatus(struct nand_chip *chip)
|
||||
{
|
||||
struct mtd_info *mtd = nand_to_mtd(chip);
|
||||
int ret;
|
||||
unsigned int max_bitflips = 0;
|
||||
u8 status;
|
||||
u8 status, ecc_status[TOSHIBA_NAND_MAX_ECC_STEPS];
|
||||
|
||||
/* Check Status */
|
||||
ret = toshiba_nand_benand_read_eccstatus_op(chip, ecc_status);
|
||||
if (!ret) {
|
||||
unsigned int i, bitflips = 0;
|
||||
|
||||
for (i = 0; i < chip->ecc.steps; i++) {
|
||||
bitflips = ecc_status[i] & TOSHIBA_NAND_ECC_STATUS_MASK;
|
||||
if (bitflips == TOSHIBA_NAND_ECC_STATUS_UNCORR) {
|
||||
mtd->ecc_stats.failed++;
|
||||
} else {
|
||||
mtd->ecc_stats.corrected += bitflips;
|
||||
max_bitflips = max(max_bitflips, bitflips);
|
||||
}
|
||||
}
|
||||
|
||||
return max_bitflips;
|
||||
}
|
||||
|
||||
/*
|
||||
* Fallback to regular status check if
|
||||
* toshiba_nand_benand_read_eccstatus_op() failed.
|
||||
*/
|
||||
ret = nand_status_op(chip, &status);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -108,7 +162,7 @@ static void toshiba_nand_decode_id(struct nand_chip *chip)
|
|||
*/
|
||||
if (chip->id.len >= 6 && nand_is_slc(chip) &&
|
||||
(chip->id.data[5] & 0x7) == 0x6 /* 24nm */ &&
|
||||
!(chip->id.data[4] & 0x80) /* !BENAND */) {
|
||||
!(chip->id.data[4] & TOSHIBA_NAND_ID4_IS_BENAND) /* !BENAND */) {
|
||||
memorg->oobsize = 32 * memorg->pagesize >> 9;
|
||||
mtd->oobsize = memorg->oobsize;
|
||||
}
|
||||
|
|
|
@ -2628,6 +2628,29 @@ static const struct nand_controller_ops qcom_nandc_ops = {
|
|||
.attach_chip = qcom_nand_attach_chip,
|
||||
};
|
||||
|
||||
static void qcom_nandc_unalloc(struct qcom_nand_controller *nandc)
|
||||
{
|
||||
if (nandc->props->is_bam) {
|
||||
if (!dma_mapping_error(nandc->dev, nandc->reg_read_dma))
|
||||
dma_unmap_single(nandc->dev, nandc->reg_read_dma,
|
||||
MAX_REG_RD *
|
||||
sizeof(*nandc->reg_read_buf),
|
||||
DMA_FROM_DEVICE);
|
||||
|
||||
if (nandc->tx_chan)
|
||||
dma_release_channel(nandc->tx_chan);
|
||||
|
||||
if (nandc->rx_chan)
|
||||
dma_release_channel(nandc->rx_chan);
|
||||
|
||||
if (nandc->cmd_chan)
|
||||
dma_release_channel(nandc->cmd_chan);
|
||||
} else {
|
||||
if (nandc->chan)
|
||||
dma_release_channel(nandc->chan);
|
||||
}
|
||||
}
|
||||
|
||||
static int qcom_nandc_alloc(struct qcom_nand_controller *nandc)
|
||||
{
|
||||
int ret;
|
||||
|
@ -2673,22 +2696,37 @@ static int qcom_nandc_alloc(struct qcom_nand_controller *nandc)
|
|||
return -EIO;
|
||||
}
|
||||
|
||||
nandc->tx_chan = dma_request_slave_channel(nandc->dev, "tx");
|
||||
if (!nandc->tx_chan) {
|
||||
dev_err(nandc->dev, "failed to request tx channel\n");
|
||||
return -ENODEV;
|
||||
nandc->tx_chan = dma_request_chan(nandc->dev, "tx");
|
||||
if (IS_ERR(nandc->tx_chan)) {
|
||||
ret = PTR_ERR(nandc->tx_chan);
|
||||
nandc->tx_chan = NULL;
|
||||
if (ret != -EPROBE_DEFER)
|
||||
dev_err(nandc->dev,
|
||||
"tx DMA channel request failed: %d\n",
|
||||
ret);
|
||||
goto unalloc;
|
||||
}
|
||||
|
||||
nandc->rx_chan = dma_request_slave_channel(nandc->dev, "rx");
|
||||
if (!nandc->rx_chan) {
|
||||
dev_err(nandc->dev, "failed to request rx channel\n");
|
||||
return -ENODEV;
|
||||
nandc->rx_chan = dma_request_chan(nandc->dev, "rx");
|
||||
if (IS_ERR(nandc->rx_chan)) {
|
||||
ret = PTR_ERR(nandc->rx_chan);
|
||||
nandc->rx_chan = NULL;
|
||||
if (ret != -EPROBE_DEFER)
|
||||
dev_err(nandc->dev,
|
||||
"rx DMA channel request failed: %d\n",
|
||||
ret);
|
||||
goto unalloc;
|
||||
}
|
||||
|
||||
nandc->cmd_chan = dma_request_slave_channel(nandc->dev, "cmd");
|
||||
if (!nandc->cmd_chan) {
|
||||
dev_err(nandc->dev, "failed to request cmd channel\n");
|
||||
return -ENODEV;
|
||||
nandc->cmd_chan = dma_request_chan(nandc->dev, "cmd");
|
||||
if (IS_ERR(nandc->cmd_chan)) {
|
||||
ret = PTR_ERR(nandc->cmd_chan);
|
||||
nandc->cmd_chan = NULL;
|
||||
if (ret != -EPROBE_DEFER)
|
||||
dev_err(nandc->dev,
|
||||
"cmd DMA channel request failed: %d\n",
|
||||
ret);
|
||||
goto unalloc;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -2702,14 +2740,19 @@ static int qcom_nandc_alloc(struct qcom_nand_controller *nandc)
|
|||
if (!nandc->bam_txn) {
|
||||
dev_err(nandc->dev,
|
||||
"failed to allocate bam transaction\n");
|
||||
return -ENOMEM;
|
||||
ret = -ENOMEM;
|
||||
goto unalloc;
|
||||
}
|
||||
} else {
|
||||
nandc->chan = dma_request_slave_channel(nandc->dev, "rxtx");
|
||||
if (!nandc->chan) {
|
||||
dev_err(nandc->dev,
|
||||
"failed to request slave channel\n");
|
||||
return -ENODEV;
|
||||
nandc->chan = dma_request_chan(nandc->dev, "rxtx");
|
||||
if (IS_ERR(nandc->chan)) {
|
||||
ret = PTR_ERR(nandc->chan);
|
||||
nandc->chan = NULL;
|
||||
if (ret != -EPROBE_DEFER)
|
||||
dev_err(nandc->dev,
|
||||
"rxtx DMA channel request failed: %d\n",
|
||||
ret);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2720,29 +2763,9 @@ static int qcom_nandc_alloc(struct qcom_nand_controller *nandc)
|
|||
nandc->controller.ops = &qcom_nandc_ops;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void qcom_nandc_unalloc(struct qcom_nand_controller *nandc)
|
||||
{
|
||||
if (nandc->props->is_bam) {
|
||||
if (!dma_mapping_error(nandc->dev, nandc->reg_read_dma))
|
||||
dma_unmap_single(nandc->dev, nandc->reg_read_dma,
|
||||
MAX_REG_RD *
|
||||
sizeof(*nandc->reg_read_buf),
|
||||
DMA_FROM_DEVICE);
|
||||
|
||||
if (nandc->tx_chan)
|
||||
dma_release_channel(nandc->tx_chan);
|
||||
|
||||
if (nandc->rx_chan)
|
||||
dma_release_channel(nandc->rx_chan);
|
||||
|
||||
if (nandc->cmd_chan)
|
||||
dma_release_channel(nandc->cmd_chan);
|
||||
} else {
|
||||
if (nandc->chan)
|
||||
dma_release_channel(nandc->chan);
|
||||
}
|
||||
unalloc:
|
||||
qcom_nandc_unalloc(nandc);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* one time setup of a few nand controller registers */
|
||||
|
|
|
@ -1606,15 +1606,36 @@ static int stm32_fmc2_setup_interface(struct nand_chip *chip, int chipnr,
|
|||
/* DMA configuration */
|
||||
static int stm32_fmc2_dma_setup(struct stm32_fmc2_nfc *fmc2)
|
||||
{
|
||||
int ret;
|
||||
int ret = 0;
|
||||
|
||||
fmc2->dma_tx_ch = dma_request_slave_channel(fmc2->dev, "tx");
|
||||
fmc2->dma_rx_ch = dma_request_slave_channel(fmc2->dev, "rx");
|
||||
fmc2->dma_ecc_ch = dma_request_slave_channel(fmc2->dev, "ecc");
|
||||
fmc2->dma_tx_ch = dma_request_chan(fmc2->dev, "tx");
|
||||
if (IS_ERR(fmc2->dma_tx_ch)) {
|
||||
ret = PTR_ERR(fmc2->dma_tx_ch);
|
||||
if (ret != -ENODEV)
|
||||
dev_err(fmc2->dev,
|
||||
"failed to request tx DMA channel: %d\n", ret);
|
||||
fmc2->dma_tx_ch = NULL;
|
||||
goto err_dma;
|
||||
}
|
||||
|
||||
if (!fmc2->dma_tx_ch || !fmc2->dma_rx_ch || !fmc2->dma_ecc_ch) {
|
||||
dev_warn(fmc2->dev, "DMAs not defined in the device tree, polling mode is used\n");
|
||||
return 0;
|
||||
fmc2->dma_rx_ch = dma_request_chan(fmc2->dev, "rx");
|
||||
if (IS_ERR(fmc2->dma_rx_ch)) {
|
||||
ret = PTR_ERR(fmc2->dma_rx_ch);
|
||||
if (ret != -ENODEV)
|
||||
dev_err(fmc2->dev,
|
||||
"failed to request rx DMA channel: %d\n", ret);
|
||||
fmc2->dma_rx_ch = NULL;
|
||||
goto err_dma;
|
||||
}
|
||||
|
||||
fmc2->dma_ecc_ch = dma_request_chan(fmc2->dev, "ecc");
|
||||
if (IS_ERR(fmc2->dma_ecc_ch)) {
|
||||
ret = PTR_ERR(fmc2->dma_ecc_ch);
|
||||
if (ret != -ENODEV)
|
||||
dev_err(fmc2->dev,
|
||||
"failed to request ecc DMA channel: %d\n", ret);
|
||||
fmc2->dma_ecc_ch = NULL;
|
||||
goto err_dma;
|
||||
}
|
||||
|
||||
ret = sg_alloc_table(&fmc2->dma_ecc_sg, FMC2_MAX_SG, GFP_KERNEL);
|
||||
|
@ -1635,6 +1656,15 @@ static int stm32_fmc2_dma_setup(struct stm32_fmc2_nfc *fmc2)
|
|||
init_completion(&fmc2->dma_ecc_complete);
|
||||
|
||||
return 0;
|
||||
|
||||
err_dma:
|
||||
if (ret == -ENODEV) {
|
||||
dev_warn(fmc2->dev,
|
||||
"DMAs not defined in the DT, polling mode is used\n");
|
||||
ret = 0;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* NAND callbacks setup */
|
||||
|
|
|
@ -195,7 +195,7 @@ struct sunxi_nand_chip {
|
|||
u32 timing_cfg;
|
||||
u32 timing_ctl;
|
||||
int nsels;
|
||||
struct sunxi_nand_chip_sel sels[0];
|
||||
struct sunxi_nand_chip_sel sels[];
|
||||
};
|
||||
|
||||
static inline struct sunxi_nand_chip *to_sunxi_nand(struct nand_chip *nand)
|
||||
|
@ -2123,8 +2123,16 @@ static int sunxi_nfc_probe(struct platform_device *pdev)
|
|||
if (ret)
|
||||
goto out_ahb_reset_reassert;
|
||||
|
||||
nfc->dmac = dma_request_slave_channel(dev, "rxtx");
|
||||
if (nfc->dmac) {
|
||||
nfc->dmac = dma_request_chan(dev, "rxtx");
|
||||
if (IS_ERR(nfc->dmac)) {
|
||||
ret = PTR_ERR(nfc->dmac);
|
||||
if (ret == -EPROBE_DEFER)
|
||||
goto out_ahb_reset_reassert;
|
||||
|
||||
/* Ignore errors to fall back to PIO mode */
|
||||
dev_warn(dev, "failed to request rxtx DMA channel: %d\n", ret);
|
||||
nfc->dmac = NULL;
|
||||
} else {
|
||||
struct dma_slave_config dmac_cfg = { };
|
||||
|
||||
dmac_cfg.src_addr = r->start + nfc->caps->reg_io_data;
|
||||
|
@ -2138,9 +2146,6 @@ static int sunxi_nfc_probe(struct platform_device *pdev)
|
|||
if (nfc->caps->extra_mbus_conf)
|
||||
writel(readl(nfc->regs + NFC_REG_CTL) |
|
||||
NFC_DMA_TYPE_NORMAL, nfc->regs + NFC_REG_CTL);
|
||||
|
||||
} else {
|
||||
dev_warn(dev, "failed to request rxtx DMA channel\n");
|
||||
}
|
||||
|
||||
platform_set_drvdata(pdev, nfc);
|
||||
|
|
|
@ -16,6 +16,7 @@
|
|||
#include <linux/mtd/spinand.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/spi/spi.h>
|
||||
#include <linux/spi/spi-mem.h>
|
||||
|
||||
|
@ -370,10 +371,11 @@ static int spinand_wait(struct spinand_device *spinand, u8 *s)
|
|||
return status & STATUS_BUSY ? -ETIMEDOUT : 0;
|
||||
}
|
||||
|
||||
static int spinand_read_id_op(struct spinand_device *spinand, u8 *buf)
|
||||
static int spinand_read_id_op(struct spinand_device *spinand, u8 naddr,
|
||||
u8 ndummy, u8 *buf)
|
||||
{
|
||||
struct spi_mem_op op = SPINAND_READID_OP(0, spinand->scratchbuf,
|
||||
SPINAND_MAX_ID_LEN);
|
||||
struct spi_mem_op op = SPINAND_READID_OP(
|
||||
naddr, ndummy, spinand->scratchbuf, SPINAND_MAX_ID_LEN);
|
||||
int ret;
|
||||
|
||||
ret = spi_mem_exec_op(spinand->spimem, &op);
|
||||
|
@ -568,18 +570,18 @@ static int spinand_mtd_write(struct mtd_info *mtd, loff_t to,
|
|||
static bool spinand_isbad(struct nand_device *nand, const struct nand_pos *pos)
|
||||
{
|
||||
struct spinand_device *spinand = nand_to_spinand(nand);
|
||||
u8 marker[2] = { };
|
||||
struct nand_page_io_req req = {
|
||||
.pos = *pos,
|
||||
.ooblen = 2,
|
||||
.ooblen = sizeof(marker),
|
||||
.ooboffs = 0,
|
||||
.oobbuf.in = spinand->oobbuf,
|
||||
.oobbuf.in = marker,
|
||||
.mode = MTD_OPS_RAW,
|
||||
};
|
||||
|
||||
memset(spinand->oobbuf, 0, 2);
|
||||
spinand_select_target(spinand, pos->target);
|
||||
spinand_read_page(spinand, &req, false);
|
||||
if (spinand->oobbuf[0] != 0xff || spinand->oobbuf[1] != 0xff)
|
||||
if (marker[0] != 0xff || marker[1] != 0xff)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
|
@ -603,15 +605,16 @@ static int spinand_mtd_block_isbad(struct mtd_info *mtd, loff_t offs)
|
|||
static int spinand_markbad(struct nand_device *nand, const struct nand_pos *pos)
|
||||
{
|
||||
struct spinand_device *spinand = nand_to_spinand(nand);
|
||||
u8 marker[2] = { };
|
||||
struct nand_page_io_req req = {
|
||||
.pos = *pos,
|
||||
.ooboffs = 0,
|
||||
.ooblen = 2,
|
||||
.oobbuf.out = spinand->oobbuf,
|
||||
.ooblen = sizeof(marker),
|
||||
.oobbuf.out = marker,
|
||||
.mode = MTD_OPS_RAW,
|
||||
};
|
||||
int ret;
|
||||
|
||||
/* Erase block before marking it bad. */
|
||||
ret = spinand_select_target(spinand, pos->target);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -620,9 +623,6 @@ static int spinand_markbad(struct nand_device *nand, const struct nand_pos *pos)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
spinand_erase_op(spinand, pos);
|
||||
|
||||
memset(spinand->oobbuf, 0, 2);
|
||||
return spinand_write_page(spinand, &req);
|
||||
}
|
||||
|
||||
|
@ -762,24 +762,62 @@ static const struct spinand_manufacturer *spinand_manufacturers[] = {
|
|||
&winbond_spinand_manufacturer,
|
||||
};
|
||||
|
||||
static int spinand_manufacturer_detect(struct spinand_device *spinand)
|
||||
static int spinand_manufacturer_match(struct spinand_device *spinand,
|
||||
enum spinand_readid_method rdid_method)
|
||||
{
|
||||
u8 *id = spinand->id.data;
|
||||
unsigned int i;
|
||||
int ret;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(spinand_manufacturers); i++) {
|
||||
ret = spinand_manufacturers[i]->ops->detect(spinand);
|
||||
if (ret > 0) {
|
||||
spinand->manufacturer = spinand_manufacturers[i];
|
||||
return 0;
|
||||
} else if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
const struct spinand_manufacturer *manufacturer =
|
||||
spinand_manufacturers[i];
|
||||
|
||||
if (id[0] != manufacturer->id)
|
||||
continue;
|
||||
|
||||
ret = spinand_match_and_init(spinand,
|
||||
manufacturer->chips,
|
||||
manufacturer->nchips,
|
||||
rdid_method);
|
||||
if (ret < 0)
|
||||
continue;
|
||||
|
||||
spinand->manufacturer = manufacturer;
|
||||
return 0;
|
||||
}
|
||||
return -ENOTSUPP;
|
||||
}
|
||||
|
||||
static int spinand_id_detect(struct spinand_device *spinand)
|
||||
{
|
||||
u8 *id = spinand->id.data;
|
||||
int ret;
|
||||
|
||||
ret = spinand_read_id_op(spinand, 0, 0, id);
|
||||
if (ret)
|
||||
return ret;
|
||||
ret = spinand_manufacturer_match(spinand, SPINAND_READID_METHOD_OPCODE);
|
||||
if (!ret)
|
||||
return 0;
|
||||
|
||||
ret = spinand_read_id_op(spinand, 1, 0, id);
|
||||
if (ret)
|
||||
return ret;
|
||||
ret = spinand_manufacturer_match(spinand,
|
||||
SPINAND_READID_METHOD_OPCODE_ADDR);
|
||||
if (!ret)
|
||||
return 0;
|
||||
|
||||
ret = spinand_read_id_op(spinand, 0, 1, id);
|
||||
if (ret)
|
||||
return ret;
|
||||
ret = spinand_manufacturer_match(spinand,
|
||||
SPINAND_READID_METHOD_OPCODE_DUMMY);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int spinand_manufacturer_init(struct spinand_device *spinand)
|
||||
{
|
||||
if (spinand->manufacturer->ops->init)
|
||||
|
@ -835,9 +873,9 @@ spinand_select_op_variant(struct spinand_device *spinand,
|
|||
* @spinand: SPI NAND object
|
||||
* @table: SPI NAND device description table
|
||||
* @table_size: size of the device description table
|
||||
* @rdid_method: read id method to match
|
||||
*
|
||||
* Should be used by SPI NAND manufacturer drivers when they want to find a
|
||||
* match between a device ID retrieved through the READ_ID command and an
|
||||
* Match between a device ID retrieved through the READ_ID command and an
|
||||
* entry in the SPI NAND description table. If a match is found, the spinand
|
||||
* object will be initialized with information provided by the matching
|
||||
* spinand_info entry.
|
||||
|
@ -846,8 +884,10 @@ spinand_select_op_variant(struct spinand_device *spinand,
|
|||
*/
|
||||
int spinand_match_and_init(struct spinand_device *spinand,
|
||||
const struct spinand_info *table,
|
||||
unsigned int table_size, u16 devid)
|
||||
unsigned int table_size,
|
||||
enum spinand_readid_method rdid_method)
|
||||
{
|
||||
u8 *id = spinand->id.data;
|
||||
struct nand_device *nand = spinand_to_nand(spinand);
|
||||
unsigned int i;
|
||||
|
||||
|
@ -855,13 +895,17 @@ int spinand_match_and_init(struct spinand_device *spinand,
|
|||
const struct spinand_info *info = &table[i];
|
||||
const struct spi_mem_op *op;
|
||||
|
||||
if (devid != info->devid)
|
||||
if (rdid_method != info->devid.method)
|
||||
continue;
|
||||
|
||||
if (memcmp(id + 1, info->devid.id, info->devid.len))
|
||||
continue;
|
||||
|
||||
nand->memorg = table[i].memorg;
|
||||
nand->eccreq = table[i].eccreq;
|
||||
spinand->eccinfo = table[i].eccinfo;
|
||||
spinand->flags = table[i].flags;
|
||||
spinand->id.len = 1 + table[i].devid.len;
|
||||
spinand->select_target = table[i].select_target;
|
||||
|
||||
op = spinand_select_op_variant(spinand,
|
||||
|
@ -898,13 +942,7 @@ static int spinand_detect(struct spinand_device *spinand)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = spinand_read_id_op(spinand, spinand->id.data);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
spinand->id.len = SPINAND_MAX_ID_LEN;
|
||||
|
||||
ret = spinand_manufacturer_detect(spinand);
|
||||
ret = spinand_id_detect(spinand);
|
||||
if (ret) {
|
||||
dev_err(dev, "unknown raw ID %*phN\n", SPINAND_MAX_ID_LEN,
|
||||
spinand->id.data);
|
||||
|
|
|
@ -195,7 +195,8 @@ static int gd5fxgq4ufxxg_ecc_get_status(struct spinand_device *spinand,
|
|||
}
|
||||
|
||||
static const struct spinand_info gigadevice_spinand_table[] = {
|
||||
SPINAND_INFO("GD5F1GQ4xA", 0xF1,
|
||||
SPINAND_INFO("GD5F1GQ4xA",
|
||||
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0xf1),
|
||||
NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
|
||||
NAND_ECCREQ(8, 512),
|
||||
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
|
||||
|
@ -204,7 +205,8 @@ static const struct spinand_info gigadevice_spinand_table[] = {
|
|||
0,
|
||||
SPINAND_ECCINFO(&gd5fxgq4xa_ooblayout,
|
||||
gd5fxgq4xa_ecc_get_status)),
|
||||
SPINAND_INFO("GD5F2GQ4xA", 0xF2,
|
||||
SPINAND_INFO("GD5F2GQ4xA",
|
||||
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0xf2),
|
||||
NAND_MEMORG(1, 2048, 64, 64, 2048, 40, 1, 1, 1),
|
||||
NAND_ECCREQ(8, 512),
|
||||
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
|
||||
|
@ -213,7 +215,8 @@ static const struct spinand_info gigadevice_spinand_table[] = {
|
|||
0,
|
||||
SPINAND_ECCINFO(&gd5fxgq4xa_ooblayout,
|
||||
gd5fxgq4xa_ecc_get_status)),
|
||||
SPINAND_INFO("GD5F4GQ4xA", 0xF4,
|
||||
SPINAND_INFO("GD5F4GQ4xA",
|
||||
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0xf4),
|
||||
NAND_MEMORG(1, 2048, 64, 64, 4096, 80, 1, 1, 1),
|
||||
NAND_ECCREQ(8, 512),
|
||||
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
|
||||
|
@ -222,7 +225,8 @@ static const struct spinand_info gigadevice_spinand_table[] = {
|
|||
0,
|
||||
SPINAND_ECCINFO(&gd5fxgq4xa_ooblayout,
|
||||
gd5fxgq4xa_ecc_get_status)),
|
||||
SPINAND_INFO("GD5F1GQ4UExxG", 0xd1,
|
||||
SPINAND_INFO("GD5F1GQ4UExxG",
|
||||
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0xd1),
|
||||
NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
|
||||
NAND_ECCREQ(8, 512),
|
||||
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
|
||||
|
@ -231,7 +235,8 @@ static const struct spinand_info gigadevice_spinand_table[] = {
|
|||
0,
|
||||
SPINAND_ECCINFO(&gd5fxgq4_variant2_ooblayout,
|
||||
gd5fxgq4uexxg_ecc_get_status)),
|
||||
SPINAND_INFO("GD5F1GQ4UFxxG", 0xb148,
|
||||
SPINAND_INFO("GD5F1GQ4UFxxG",
|
||||
SPINAND_ID(SPINAND_READID_METHOD_OPCODE, 0xb1, 0x48),
|
||||
NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
|
||||
NAND_ECCREQ(8, 512),
|
||||
SPINAND_INFO_OP_VARIANTS(&read_cache_variants_f,
|
||||
|
@ -242,39 +247,13 @@ static const struct spinand_info gigadevice_spinand_table[] = {
|
|||
gd5fxgq4ufxxg_ecc_get_status)),
|
||||
};
|
||||
|
||||
static int gigadevice_spinand_detect(struct spinand_device *spinand)
|
||||
{
|
||||
u8 *id = spinand->id.data;
|
||||
u16 did;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* Earlier GDF5-series devices (A,E) return [0][MID][DID]
|
||||
* Later (F) devices return [MID][DID1][DID2]
|
||||
*/
|
||||
|
||||
if (id[0] == SPINAND_MFR_GIGADEVICE)
|
||||
did = (id[1] << 8) + id[2];
|
||||
else if (id[0] == 0 && id[1] == SPINAND_MFR_GIGADEVICE)
|
||||
did = id[2];
|
||||
else
|
||||
return 0;
|
||||
|
||||
ret = spinand_match_and_init(spinand, gigadevice_spinand_table,
|
||||
ARRAY_SIZE(gigadevice_spinand_table),
|
||||
did);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
static const struct spinand_manufacturer_ops gigadevice_spinand_manuf_ops = {
|
||||
.detect = gigadevice_spinand_detect,
|
||||
};
|
||||
|
||||
const struct spinand_manufacturer gigadevice_spinand_manufacturer = {
|
||||
.id = SPINAND_MFR_GIGADEVICE,
|
||||
.name = "GigaDevice",
|
||||
.chips = gigadevice_spinand_table,
|
||||
.nchips = ARRAY_SIZE(gigadevice_spinand_table),
|
||||
.ops = &gigadevice_spinand_manuf_ops,
|
||||
};
|
||||
|
|
|
@ -99,7 +99,8 @@ static int mx35lf1ge4ab_ecc_get_status(struct spinand_device *spinand,
|
|||
}
|
||||
|
||||
static const struct spinand_info macronix_spinand_table[] = {
|
||||
SPINAND_INFO("MX35LF1GE4AB", 0x12,
|
||||
SPINAND_INFO("MX35LF1GE4AB",
|
||||
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x12),
|
||||
NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
|
||||
NAND_ECCREQ(4, 512),
|
||||
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
|
||||
|
@ -108,7 +109,8 @@ static const struct spinand_info macronix_spinand_table[] = {
|
|||
SPINAND_HAS_QE_BIT,
|
||||
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
|
||||
mx35lf1ge4ab_ecc_get_status)),
|
||||
SPINAND_INFO("MX35LF2GE4AB", 0x22,
|
||||
SPINAND_INFO("MX35LF2GE4AB",
|
||||
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x22),
|
||||
NAND_MEMORG(1, 2048, 64, 64, 2048, 40, 2, 1, 1),
|
||||
NAND_ECCREQ(4, 512),
|
||||
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
|
||||
|
@ -118,33 +120,13 @@ static const struct spinand_info macronix_spinand_table[] = {
|
|||
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, NULL)),
|
||||
};
|
||||
|
||||
static int macronix_spinand_detect(struct spinand_device *spinand)
|
||||
{
|
||||
u8 *id = spinand->id.data;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* Macronix SPI NAND read ID needs a dummy byte, so the first byte in
|
||||
* raw_id is garbage.
|
||||
*/
|
||||
if (id[1] != SPINAND_MFR_MACRONIX)
|
||||
return 0;
|
||||
|
||||
ret = spinand_match_and_init(spinand, macronix_spinand_table,
|
||||
ARRAY_SIZE(macronix_spinand_table),
|
||||
id[2]);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
static const struct spinand_manufacturer_ops macronix_spinand_manuf_ops = {
|
||||
.detect = macronix_spinand_detect,
|
||||
};
|
||||
|
||||
const struct spinand_manufacturer macronix_spinand_manufacturer = {
|
||||
.id = SPINAND_MFR_MACRONIX,
|
||||
.name = "Macronix",
|
||||
.chips = macronix_spinand_table,
|
||||
.nchips = ARRAY_SIZE(macronix_spinand_table),
|
||||
.ops = ¯onix_spinand_manuf_ops,
|
||||
};
|
||||
|
|
|
@ -18,6 +18,16 @@
|
|||
#define MICRON_STATUS_ECC_4TO6_BITFLIPS (3 << 4)
|
||||
#define MICRON_STATUS_ECC_7TO8_BITFLIPS (5 << 4)
|
||||
|
||||
#define MICRON_CFG_CR BIT(0)
|
||||
|
||||
/*
|
||||
* As per datasheet, die selection is done by the 6th bit of Die
|
||||
* Select Register (Address 0xD0).
|
||||
*/
|
||||
#define MICRON_DIE_SELECT_REG 0xD0
|
||||
|
||||
#define MICRON_SELECT_DIE(x) ((x) << 6)
|
||||
|
||||
static SPINAND_OP_VARIANTS(read_cache_variants,
|
||||
SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0),
|
||||
SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
|
||||
|
@ -34,38 +44,52 @@ static SPINAND_OP_VARIANTS(update_cache_variants,
|
|||
SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
|
||||
SPINAND_PROG_LOAD(false, 0, NULL, 0));
|
||||
|
||||
static int mt29f2g01abagd_ooblayout_ecc(struct mtd_info *mtd, int section,
|
||||
struct mtd_oob_region *region)
|
||||
static int micron_8_ooblayout_ecc(struct mtd_info *mtd, int section,
|
||||
struct mtd_oob_region *region)
|
||||
{
|
||||
if (section)
|
||||
return -ERANGE;
|
||||
|
||||
region->offset = 64;
|
||||
region->length = 64;
|
||||
region->offset = mtd->oobsize / 2;
|
||||
region->length = mtd->oobsize / 2;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mt29f2g01abagd_ooblayout_free(struct mtd_info *mtd, int section,
|
||||
struct mtd_oob_region *region)
|
||||
static int micron_8_ooblayout_free(struct mtd_info *mtd, int section,
|
||||
struct mtd_oob_region *region)
|
||||
{
|
||||
if (section)
|
||||
return -ERANGE;
|
||||
|
||||
/* Reserve 2 bytes for the BBM. */
|
||||
region->offset = 2;
|
||||
region->length = 62;
|
||||
region->length = (mtd->oobsize / 2) - 2;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct mtd_ooblayout_ops mt29f2g01abagd_ooblayout = {
|
||||
.ecc = mt29f2g01abagd_ooblayout_ecc,
|
||||
.free = mt29f2g01abagd_ooblayout_free,
|
||||
static const struct mtd_ooblayout_ops micron_8_ooblayout = {
|
||||
.ecc = micron_8_ooblayout_ecc,
|
||||
.free = micron_8_ooblayout_free,
|
||||
};
|
||||
|
||||
static int mt29f2g01abagd_ecc_get_status(struct spinand_device *spinand,
|
||||
u8 status)
|
||||
static int micron_select_target(struct spinand_device *spinand,
|
||||
unsigned int target)
|
||||
{
|
||||
struct spi_mem_op op = SPINAND_SET_FEATURE_OP(MICRON_DIE_SELECT_REG,
|
||||
spinand->scratchbuf);
|
||||
|
||||
if (target > 1)
|
||||
return -EINVAL;
|
||||
|
||||
*spinand->scratchbuf = MICRON_SELECT_DIE(target);
|
||||
|
||||
return spi_mem_exec_op(spinand->spimem, &op);
|
||||
}
|
||||
|
||||
static int micron_8_ecc_get_status(struct spinand_device *spinand,
|
||||
u8 status)
|
||||
{
|
||||
switch (status & MICRON_STATUS_ECC_MASK) {
|
||||
case STATUS_ECC_NO_BITFLIPS:
|
||||
|
@ -91,43 +115,131 @@ static int mt29f2g01abagd_ecc_get_status(struct spinand_device *spinand,
|
|||
}
|
||||
|
||||
static const struct spinand_info micron_spinand_table[] = {
|
||||
SPINAND_INFO("MT29F2G01ABAGD", 0x24,
|
||||
/* M79A 2Gb 3.3V */
|
||||
SPINAND_INFO("MT29F2G01ABAGD",
|
||||
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x24),
|
||||
NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 2, 1, 1),
|
||||
NAND_ECCREQ(8, 512),
|
||||
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
|
||||
&write_cache_variants,
|
||||
&update_cache_variants),
|
||||
0,
|
||||
SPINAND_ECCINFO(&mt29f2g01abagd_ooblayout,
|
||||
mt29f2g01abagd_ecc_get_status)),
|
||||
SPINAND_ECCINFO(µn_8_ooblayout,
|
||||
micron_8_ecc_get_status)),
|
||||
/* M79A 2Gb 1.8V */
|
||||
SPINAND_INFO("MT29F2G01ABBGD",
|
||||
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x25),
|
||||
NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 2, 1, 1),
|
||||
NAND_ECCREQ(8, 512),
|
||||
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
|
||||
&write_cache_variants,
|
||||
&update_cache_variants),
|
||||
0,
|
||||
SPINAND_ECCINFO(µn_8_ooblayout,
|
||||
micron_8_ecc_get_status)),
|
||||
/* M78A 1Gb 3.3V */
|
||||
SPINAND_INFO("MT29F1G01ABAFD",
|
||||
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x14),
|
||||
NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
|
||||
NAND_ECCREQ(8, 512),
|
||||
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
|
||||
&write_cache_variants,
|
||||
&update_cache_variants),
|
||||
0,
|
||||
SPINAND_ECCINFO(µn_8_ooblayout,
|
||||
micron_8_ecc_get_status)),
|
||||
/* M78A 1Gb 1.8V */
|
||||
SPINAND_INFO("MT29F1G01ABAFD",
|
||||
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x15),
|
||||
NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
|
||||
NAND_ECCREQ(8, 512),
|
||||
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
|
||||
&write_cache_variants,
|
||||
&update_cache_variants),
|
||||
0,
|
||||
SPINAND_ECCINFO(µn_8_ooblayout,
|
||||
micron_8_ecc_get_status)),
|
||||
/* M79A 4Gb 3.3V */
|
||||
SPINAND_INFO("MT29F4G01ADAGD",
|
||||
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x36),
|
||||
NAND_MEMORG(1, 2048, 128, 64, 2048, 80, 2, 1, 2),
|
||||
NAND_ECCREQ(8, 512),
|
||||
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
|
||||
&write_cache_variants,
|
||||
&update_cache_variants),
|
||||
0,
|
||||
SPINAND_ECCINFO(µn_8_ooblayout,
|
||||
micron_8_ecc_get_status),
|
||||
SPINAND_SELECT_TARGET(micron_select_target)),
|
||||
/* M70A 4Gb 3.3V */
|
||||
SPINAND_INFO("MT29F4G01ABAFD",
|
||||
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x34),
|
||||
NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 1),
|
||||
NAND_ECCREQ(8, 512),
|
||||
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
|
||||
&write_cache_variants,
|
||||
&update_cache_variants),
|
||||
SPINAND_HAS_CR_FEAT_BIT,
|
||||
SPINAND_ECCINFO(µn_8_ooblayout,
|
||||
micron_8_ecc_get_status)),
|
||||
/* M70A 4Gb 1.8V */
|
||||
SPINAND_INFO("MT29F4G01ABBFD",
|
||||
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x35),
|
||||
NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 1),
|
||||
NAND_ECCREQ(8, 512),
|
||||
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
|
||||
&write_cache_variants,
|
||||
&update_cache_variants),
|
||||
SPINAND_HAS_CR_FEAT_BIT,
|
||||
SPINAND_ECCINFO(µn_8_ooblayout,
|
||||
micron_8_ecc_get_status)),
|
||||
/* M70A 8Gb 3.3V */
|
||||
SPINAND_INFO("MT29F8G01ADAFD",
|
||||
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x46),
|
||||
NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 2),
|
||||
NAND_ECCREQ(8, 512),
|
||||
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
|
||||
&write_cache_variants,
|
||||
&update_cache_variants),
|
||||
SPINAND_HAS_CR_FEAT_BIT,
|
||||
SPINAND_ECCINFO(µn_8_ooblayout,
|
||||
micron_8_ecc_get_status),
|
||||
SPINAND_SELECT_TARGET(micron_select_target)),
|
||||
/* M70A 8Gb 1.8V */
|
||||
SPINAND_INFO("MT29F8G01ADBFD",
|
||||
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x47),
|
||||
NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 2),
|
||||
NAND_ECCREQ(8, 512),
|
||||
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
|
||||
&write_cache_variants,
|
||||
&update_cache_variants),
|
||||
SPINAND_HAS_CR_FEAT_BIT,
|
||||
SPINAND_ECCINFO(µn_8_ooblayout,
|
||||
micron_8_ecc_get_status),
|
||||
SPINAND_SELECT_TARGET(micron_select_target)),
|
||||
};
|
||||
|
||||
static int micron_spinand_detect(struct spinand_device *spinand)
|
||||
static int micron_spinand_init(struct spinand_device *spinand)
|
||||
{
|
||||
u8 *id = spinand->id.data;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* Micron SPI NAND read ID need a dummy byte,
|
||||
* so the first byte in raw_id is dummy.
|
||||
* M70A device series enable Continuous Read feature at Power-up,
|
||||
* which is not supported. Disable this bit to avoid any possible
|
||||
* failure.
|
||||
*/
|
||||
if (id[1] != SPINAND_MFR_MICRON)
|
||||
return 0;
|
||||
if (spinand->flags & SPINAND_HAS_CR_FEAT_BIT)
|
||||
return spinand_upd_cfg(spinand, MICRON_CFG_CR, 0);
|
||||
|
||||
ret = spinand_match_and_init(spinand, micron_spinand_table,
|
||||
ARRAY_SIZE(micron_spinand_table), id[2]);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct spinand_manufacturer_ops micron_spinand_manuf_ops = {
|
||||
.detect = micron_spinand_detect,
|
||||
.init = micron_spinand_init,
|
||||
};
|
||||
|
||||
const struct spinand_manufacturer micron_spinand_manufacturer = {
|
||||
.id = SPINAND_MFR_MICRON,
|
||||
.name = "Micron",
|
||||
.chips = micron_spinand_table,
|
||||
.nchips = ARRAY_SIZE(micron_spinand_table),
|
||||
.ops = µn_spinand_manuf_ops,
|
||||
};
|
||||
|
|
|
@ -97,7 +97,8 @@ static const struct mtd_ooblayout_ops pn26g0xa_ooblayout = {
|
|||
|
||||
|
||||
static const struct spinand_info paragon_spinand_table[] = {
|
||||
SPINAND_INFO("PN26G01A", 0xe1,
|
||||
SPINAND_INFO("PN26G01A",
|
||||
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xe1),
|
||||
NAND_MEMORG(1, 2048, 128, 64, 1024, 21, 1, 1, 1),
|
||||
NAND_ECCREQ(8, 512),
|
||||
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
|
||||
|
@ -106,7 +107,8 @@ static const struct spinand_info paragon_spinand_table[] = {
|
|||
0,
|
||||
SPINAND_ECCINFO(&pn26g0xa_ooblayout,
|
||||
pn26g0xa_ecc_get_status)),
|
||||
SPINAND_INFO("PN26G02A", 0xe2,
|
||||
SPINAND_INFO("PN26G02A",
|
||||
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xe2),
|
||||
NAND_MEMORG(1, 2048, 128, 64, 2048, 41, 1, 1, 1),
|
||||
NAND_ECCREQ(8, 512),
|
||||
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
|
||||
|
@ -117,31 +119,13 @@ static const struct spinand_info paragon_spinand_table[] = {
|
|||
pn26g0xa_ecc_get_status)),
|
||||
};
|
||||
|
||||
static int paragon_spinand_detect(struct spinand_device *spinand)
|
||||
{
|
||||
u8 *id = spinand->id.data;
|
||||
int ret;
|
||||
|
||||
/* Read ID returns [0][MID][DID] */
|
||||
|
||||
if (id[1] != SPINAND_MFR_PARAGON)
|
||||
return 0;
|
||||
|
||||
ret = spinand_match_and_init(spinand, paragon_spinand_table,
|
||||
ARRAY_SIZE(paragon_spinand_table),
|
||||
id[2]);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
static const struct spinand_manufacturer_ops paragon_spinand_manuf_ops = {
|
||||
.detect = paragon_spinand_detect,
|
||||
};
|
||||
|
||||
const struct spinand_manufacturer paragon_spinand_manufacturer = {
|
||||
.id = SPINAND_MFR_PARAGON,
|
||||
.name = "Paragon",
|
||||
.chips = paragon_spinand_table,
|
||||
.nchips = ARRAY_SIZE(paragon_spinand_table),
|
||||
.ops = ¶gon_spinand_manuf_ops,
|
||||
};
|
||||
|
|
|
@ -10,6 +10,7 @@
|
|||
#include <linux/kernel.h>
|
||||
#include <linux/mtd/spinand.h>
|
||||
|
||||
/* Kioxia is new name of Toshiba memory. */
|
||||
#define SPINAND_MFR_TOSHIBA 0x98
|
||||
#define TOSH_STATUS_ECC_HAS_BITFLIPS_T (3 << 4)
|
||||
|
||||
|
@ -19,14 +20,26 @@ static SPINAND_OP_VARIANTS(read_cache_variants,
|
|||
SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
|
||||
SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
|
||||
|
||||
static SPINAND_OP_VARIANTS(write_cache_x4_variants,
|
||||
SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
|
||||
SPINAND_PROG_LOAD(true, 0, NULL, 0));
|
||||
|
||||
static SPINAND_OP_VARIANTS(update_cache_x4_variants,
|
||||
SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
|
||||
SPINAND_PROG_LOAD(false, 0, NULL, 0));
|
||||
|
||||
/**
|
||||
* Backward compatibility for 1st generation Serial NAND devices
|
||||
* which don't support Quad Program Load operation.
|
||||
*/
|
||||
static SPINAND_OP_VARIANTS(write_cache_variants,
|
||||
SPINAND_PROG_LOAD(true, 0, NULL, 0));
|
||||
|
||||
static SPINAND_OP_VARIANTS(update_cache_variants,
|
||||
SPINAND_PROG_LOAD(false, 0, NULL, 0));
|
||||
|
||||
static int tc58cxgxsx_ooblayout_ecc(struct mtd_info *mtd, int section,
|
||||
struct mtd_oob_region *region)
|
||||
static int tx58cxgxsxraix_ooblayout_ecc(struct mtd_info *mtd, int section,
|
||||
struct mtd_oob_region *region)
|
||||
{
|
||||
if (section > 0)
|
||||
return -ERANGE;
|
||||
|
@ -37,8 +50,8 @@ static int tc58cxgxsx_ooblayout_ecc(struct mtd_info *mtd, int section,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int tc58cxgxsx_ooblayout_free(struct mtd_info *mtd, int section,
|
||||
struct mtd_oob_region *region)
|
||||
static int tx58cxgxsxraix_ooblayout_free(struct mtd_info *mtd, int section,
|
||||
struct mtd_oob_region *region)
|
||||
{
|
||||
if (section > 0)
|
||||
return -ERANGE;
|
||||
|
@ -50,13 +63,13 @@ static int tc58cxgxsx_ooblayout_free(struct mtd_info *mtd, int section,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static const struct mtd_ooblayout_ops tc58cxgxsx_ooblayout = {
|
||||
.ecc = tc58cxgxsx_ooblayout_ecc,
|
||||
.free = tc58cxgxsx_ooblayout_free,
|
||||
static const struct mtd_ooblayout_ops tx58cxgxsxraix_ooblayout = {
|
||||
.ecc = tx58cxgxsxraix_ooblayout_ecc,
|
||||
.free = tx58cxgxsxraix_ooblayout_free,
|
||||
};
|
||||
|
||||
static int tc58cxgxsx_ecc_get_status(struct spinand_device *spinand,
|
||||
u8 status)
|
||||
static int tx58cxgxsxraix_ecc_get_status(struct spinand_device *spinand,
|
||||
u8 status)
|
||||
{
|
||||
struct nand_device *nand = spinand_to_nand(spinand);
|
||||
u8 mbf = 0;
|
||||
|
@ -94,105 +107,174 @@ static int tc58cxgxsx_ecc_get_status(struct spinand_device *spinand,
|
|||
}
|
||||
|
||||
static const struct spinand_info toshiba_spinand_table[] = {
|
||||
/* 3.3V 1Gb */
|
||||
SPINAND_INFO("TC58CVG0S3", 0xC2,
|
||||
/* 3.3V 1Gb (1st generation) */
|
||||
SPINAND_INFO("TC58CVG0S3HRAIG",
|
||||
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xC2),
|
||||
NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
|
||||
NAND_ECCREQ(8, 512),
|
||||
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
|
||||
&write_cache_variants,
|
||||
&update_cache_variants),
|
||||
0,
|
||||
SPINAND_ECCINFO(&tc58cxgxsx_ooblayout,
|
||||
tc58cxgxsx_ecc_get_status)),
|
||||
/* 3.3V 2Gb */
|
||||
SPINAND_INFO("TC58CVG1S3", 0xCB,
|
||||
SPINAND_ECCINFO(&tx58cxgxsxraix_ooblayout,
|
||||
tx58cxgxsxraix_ecc_get_status)),
|
||||
/* 3.3V 2Gb (1st generation) */
|
||||
SPINAND_INFO("TC58CVG1S3HRAIG",
|
||||
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xCB),
|
||||
NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1),
|
||||
NAND_ECCREQ(8, 512),
|
||||
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
|
||||
&write_cache_variants,
|
||||
&update_cache_variants),
|
||||
0,
|
||||
SPINAND_ECCINFO(&tc58cxgxsx_ooblayout,
|
||||
tc58cxgxsx_ecc_get_status)),
|
||||
/* 3.3V 4Gb */
|
||||
SPINAND_INFO("TC58CVG2S0", 0xCD,
|
||||
SPINAND_ECCINFO(&tx58cxgxsxraix_ooblayout,
|
||||
tx58cxgxsxraix_ecc_get_status)),
|
||||
/* 3.3V 4Gb (1st generation) */
|
||||
SPINAND_INFO("TC58CVG2S0HRAIG",
|
||||
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xCD),
|
||||
NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 1),
|
||||
NAND_ECCREQ(8, 512),
|
||||
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
|
||||
&write_cache_variants,
|
||||
&update_cache_variants),
|
||||
0,
|
||||
SPINAND_ECCINFO(&tc58cxgxsx_ooblayout,
|
||||
tc58cxgxsx_ecc_get_status)),
|
||||
/* 3.3V 4Gb */
|
||||
SPINAND_INFO("TC58CVG2S0", 0xED,
|
||||
NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 1),
|
||||
NAND_ECCREQ(8, 512),
|
||||
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
|
||||
&write_cache_variants,
|
||||
&update_cache_variants),
|
||||
0,
|
||||
SPINAND_ECCINFO(&tc58cxgxsx_ooblayout,
|
||||
tc58cxgxsx_ecc_get_status)),
|
||||
/* 1.8V 1Gb */
|
||||
SPINAND_INFO("TC58CYG0S3", 0xB2,
|
||||
SPINAND_ECCINFO(&tx58cxgxsxraix_ooblayout,
|
||||
tx58cxgxsxraix_ecc_get_status)),
|
||||
/* 1.8V 1Gb (1st generation) */
|
||||
SPINAND_INFO("TC58CYG0S3HRAIG",
|
||||
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xB2),
|
||||
NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
|
||||
NAND_ECCREQ(8, 512),
|
||||
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
|
||||
&write_cache_variants,
|
||||
&update_cache_variants),
|
||||
0,
|
||||
SPINAND_ECCINFO(&tc58cxgxsx_ooblayout,
|
||||
tc58cxgxsx_ecc_get_status)),
|
||||
/* 1.8V 2Gb */
|
||||
SPINAND_INFO("TC58CYG1S3", 0xBB,
|
||||
SPINAND_ECCINFO(&tx58cxgxsxraix_ooblayout,
|
||||
tx58cxgxsxraix_ecc_get_status)),
|
||||
/* 1.8V 2Gb (1st generation) */
|
||||
SPINAND_INFO("TC58CYG1S3HRAIG",
|
||||
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xBB),
|
||||
NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1),
|
||||
NAND_ECCREQ(8, 512),
|
||||
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
|
||||
&write_cache_variants,
|
||||
&update_cache_variants),
|
||||
0,
|
||||
SPINAND_ECCINFO(&tc58cxgxsx_ooblayout,
|
||||
tc58cxgxsx_ecc_get_status)),
|
||||
/* 1.8V 4Gb */
|
||||
SPINAND_INFO("TC58CYG2S0", 0xBD,
|
||||
SPINAND_ECCINFO(&tx58cxgxsxraix_ooblayout,
|
||||
tx58cxgxsxraix_ecc_get_status)),
|
||||
/* 1.8V 4Gb (1st generation) */
|
||||
SPINAND_INFO("TC58CYG2S0HRAIG",
|
||||
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xBD),
|
||||
NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 1),
|
||||
NAND_ECCREQ(8, 512),
|
||||
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
|
||||
&write_cache_variants,
|
||||
&update_cache_variants),
|
||||
0,
|
||||
SPINAND_ECCINFO(&tc58cxgxsx_ooblayout,
|
||||
tc58cxgxsx_ecc_get_status)),
|
||||
};
|
||||
|
||||
static int toshiba_spinand_detect(struct spinand_device *spinand)
|
||||
{
|
||||
u8 *id = spinand->id.data;
|
||||
int ret;
|
||||
SPINAND_ECCINFO(&tx58cxgxsxraix_ooblayout,
|
||||
tx58cxgxsxraix_ecc_get_status)),
|
||||
|
||||
/*
|
||||
* Toshiba SPI NAND read ID needs a dummy byte,
|
||||
* so the first byte in id is garbage.
|
||||
* 2nd generation serial nand has HOLD_D which is equivalent to
|
||||
* QE_BIT.
|
||||
*/
|
||||
if (id[1] != SPINAND_MFR_TOSHIBA)
|
||||
return 0;
|
||||
|
||||
ret = spinand_match_and_init(spinand, toshiba_spinand_table,
|
||||
ARRAY_SIZE(toshiba_spinand_table),
|
||||
id[2]);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return 1;
|
||||
}
|
||||
/* 3.3V 1Gb (2nd generation) */
|
||||
SPINAND_INFO("TC58CVG0S3HRAIJ",
|
||||
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xE2),
|
||||
NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
|
||||
NAND_ECCREQ(8, 512),
|
||||
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
|
||||
&write_cache_x4_variants,
|
||||
&update_cache_x4_variants),
|
||||
SPINAND_HAS_QE_BIT,
|
||||
SPINAND_ECCINFO(&tx58cxgxsxraix_ooblayout,
|
||||
tx58cxgxsxraix_ecc_get_status)),
|
||||
/* 3.3V 2Gb (2nd generation) */
|
||||
SPINAND_INFO("TC58CVG1S3HRAIJ",
|
||||
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xEB),
|
||||
NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1),
|
||||
NAND_ECCREQ(8, 512),
|
||||
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
|
||||
&write_cache_x4_variants,
|
||||
&update_cache_x4_variants),
|
||||
SPINAND_HAS_QE_BIT,
|
||||
SPINAND_ECCINFO(&tx58cxgxsxraix_ooblayout,
|
||||
tx58cxgxsxraix_ecc_get_status)),
|
||||
/* 3.3V 4Gb (2nd generation) */
|
||||
SPINAND_INFO("TC58CVG2S0HRAIJ",
|
||||
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xED),
|
||||
NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 1),
|
||||
NAND_ECCREQ(8, 512),
|
||||
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
|
||||
&write_cache_x4_variants,
|
||||
&update_cache_x4_variants),
|
||||
SPINAND_HAS_QE_BIT,
|
||||
SPINAND_ECCINFO(&tx58cxgxsxraix_ooblayout,
|
||||
tx58cxgxsxraix_ecc_get_status)),
|
||||
/* 3.3V 8Gb (2nd generation) */
|
||||
SPINAND_INFO("TH58CVG3S0HRAIJ",
|
||||
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xE4),
|
||||
NAND_MEMORG(1, 4096, 256, 64, 4096, 80, 1, 1, 1),
|
||||
NAND_ECCREQ(8, 512),
|
||||
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
|
||||
&write_cache_x4_variants,
|
||||
&update_cache_x4_variants),
|
||||
SPINAND_HAS_QE_BIT,
|
||||
SPINAND_ECCINFO(&tx58cxgxsxraix_ooblayout,
|
||||
tx58cxgxsxraix_ecc_get_status)),
|
||||
/* 1.8V 1Gb (2nd generation) */
|
||||
SPINAND_INFO("TC58CYG0S3HRAIJ",
|
||||
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xD2),
|
||||
NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
|
||||
NAND_ECCREQ(8, 512),
|
||||
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
|
||||
&write_cache_x4_variants,
|
||||
&update_cache_x4_variants),
|
||||
SPINAND_HAS_QE_BIT,
|
||||
SPINAND_ECCINFO(&tx58cxgxsxraix_ooblayout,
|
||||
tx58cxgxsxraix_ecc_get_status)),
|
||||
/* 1.8V 2Gb (2nd generation) */
|
||||
SPINAND_INFO("TC58CYG1S3HRAIJ",
|
||||
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xDB),
|
||||
NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1),
|
||||
NAND_ECCREQ(8, 512),
|
||||
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
|
||||
&write_cache_x4_variants,
|
||||
&update_cache_x4_variants),
|
||||
SPINAND_HAS_QE_BIT,
|
||||
SPINAND_ECCINFO(&tx58cxgxsxraix_ooblayout,
|
||||
tx58cxgxsxraix_ecc_get_status)),
|
||||
/* 1.8V 4Gb (2nd generation) */
|
||||
SPINAND_INFO("TC58CYG2S0HRAIJ",
|
||||
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xDD),
|
||||
NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 1),
|
||||
NAND_ECCREQ(8, 512),
|
||||
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
|
||||
&write_cache_x4_variants,
|
||||
&update_cache_x4_variants),
|
||||
SPINAND_HAS_QE_BIT,
|
||||
SPINAND_ECCINFO(&tx58cxgxsxraix_ooblayout,
|
||||
tx58cxgxsxraix_ecc_get_status)),
|
||||
/* 1.8V 8Gb (2nd generation) */
|
||||
SPINAND_INFO("TH58CYG3S0HRAIJ",
|
||||
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xD4),
|
||||
NAND_MEMORG(1, 4096, 256, 64, 4096, 80, 1, 1, 1),
|
||||
NAND_ECCREQ(8, 512),
|
||||
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
|
||||
&write_cache_x4_variants,
|
||||
&update_cache_x4_variants),
|
||||
SPINAND_HAS_QE_BIT,
|
||||
SPINAND_ECCINFO(&tx58cxgxsxraix_ooblayout,
|
||||
tx58cxgxsxraix_ecc_get_status)),
|
||||
};
|
||||
|
||||
static const struct spinand_manufacturer_ops toshiba_spinand_manuf_ops = {
|
||||
.detect = toshiba_spinand_detect,
|
||||
};
|
||||
|
||||
const struct spinand_manufacturer toshiba_spinand_manufacturer = {
|
||||
.id = SPINAND_MFR_TOSHIBA,
|
||||
.name = "Toshiba",
|
||||
.chips = toshiba_spinand_table,
|
||||
.nchips = ARRAY_SIZE(toshiba_spinand_table),
|
||||
.ops = &toshiba_spinand_manuf_ops,
|
||||
};
|
||||
|
|
|
@ -75,7 +75,8 @@ static int w25m02gv_select_target(struct spinand_device *spinand,
|
|||
}
|
||||
|
||||
static const struct spinand_info winbond_spinand_table[] = {
|
||||
SPINAND_INFO("W25M02GV", 0xAB,
|
||||
SPINAND_INFO("W25M02GV",
|
||||
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xab),
|
||||
NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 2),
|
||||
NAND_ECCREQ(1, 512),
|
||||
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
|
||||
|
@ -84,7 +85,8 @@ static const struct spinand_info winbond_spinand_table[] = {
|
|||
0,
|
||||
SPINAND_ECCINFO(&w25m02gv_ooblayout, NULL),
|
||||
SPINAND_SELECT_TARGET(w25m02gv_select_target)),
|
||||
SPINAND_INFO("W25N01GV", 0xAA,
|
||||
SPINAND_INFO("W25N01GV",
|
||||
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xaa),
|
||||
NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
|
||||
NAND_ECCREQ(1, 512),
|
||||
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
|
||||
|
@ -94,31 +96,6 @@ static const struct spinand_info winbond_spinand_table[] = {
|
|||
SPINAND_ECCINFO(&w25m02gv_ooblayout, NULL)),
|
||||
};
|
||||
|
||||
/**
|
||||
* winbond_spinand_detect - initialize device related part in spinand_device
|
||||
* struct if it is a Winbond device.
|
||||
* @spinand: SPI NAND device structure
|
||||
*/
|
||||
static int winbond_spinand_detect(struct spinand_device *spinand)
|
||||
{
|
||||
u8 *id = spinand->id.data;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* Winbond SPI NAND read ID need a dummy byte,
|
||||
* so the first byte in raw_id is dummy.
|
||||
*/
|
||||
if (id[1] != SPINAND_MFR_WINBOND)
|
||||
return 0;
|
||||
|
||||
ret = spinand_match_and_init(spinand, winbond_spinand_table,
|
||||
ARRAY_SIZE(winbond_spinand_table), id[2]);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int winbond_spinand_init(struct spinand_device *spinand)
|
||||
{
|
||||
struct nand_device *nand = spinand_to_nand(spinand);
|
||||
|
@ -138,12 +115,13 @@ static int winbond_spinand_init(struct spinand_device *spinand)
|
|||
}
|
||||
|
||||
static const struct spinand_manufacturer_ops winbond_spinand_manuf_ops = {
|
||||
.detect = winbond_spinand_detect,
|
||||
.init = winbond_spinand_init,
|
||||
};
|
||||
|
||||
const struct spinand_manufacturer winbond_spinand_manufacturer = {
|
||||
.id = SPINAND_MFR_WINBOND,
|
||||
.name = "Winbond",
|
||||
.chips = winbond_spinand_table,
|
||||
.nchips = ARRAY_SIZE(winbond_spinand_table),
|
||||
.ops = &winbond_spinand_manuf_ops,
|
||||
};
|
||||
|
|
|
@ -1064,6 +1064,8 @@ struct nand_legacy {
|
|||
* @lock: lock protecting the suspended field. Also used to
|
||||
* serialize accesses to the NAND device.
|
||||
* @suspended: set to 1 when the device is suspended, 0 when it's not.
|
||||
* @suspend: [REPLACEABLE] specific NAND device suspend operation
|
||||
* @resume: [REPLACEABLE] specific NAND device resume operation
|
||||
* @bbt: [INTERN] bad block table pointer
|
||||
* @bbt_td: [REPLACEABLE] bad block table descriptor for flash
|
||||
* lookup.
|
||||
|
@ -1077,6 +1079,8 @@ struct nand_legacy {
|
|||
* @manufacturer: [INTERN] Contains manufacturer information
|
||||
* @manufacturer.desc: [INTERN] Contains manufacturer's description
|
||||
* @manufacturer.priv: [INTERN] Contains manufacturer private information
|
||||
* @lock_area: [REPLACEABLE] specific NAND chip lock operation
|
||||
* @unlock_area: [REPLACEABLE] specific NAND chip unlock operation
|
||||
*/
|
||||
|
||||
struct nand_chip {
|
||||
|
@ -1117,6 +1121,8 @@ struct nand_chip {
|
|||
|
||||
struct mutex lock;
|
||||
unsigned int suspended : 1;
|
||||
int (*suspend)(struct nand_chip *chip);
|
||||
void (*resume)(struct nand_chip *chip);
|
||||
|
||||
uint8_t *oob_poi;
|
||||
struct nand_controller *controller;
|
||||
|
@ -1136,6 +1142,9 @@ struct nand_chip {
|
|||
const struct nand_manufacturer *desc;
|
||||
void *priv;
|
||||
} manufacturer;
|
||||
|
||||
int (*lock_area)(struct nand_chip *chip, loff_t ofs, uint64_t len);
|
||||
int (*unlock_area)(struct nand_chip *chip, loff_t ofs, uint64_t len);
|
||||
};
|
||||
|
||||
extern const struct mtd_ooblayout_ops nand_ooblayout_sp_ops;
|
||||
|
@ -1215,7 +1224,7 @@ static inline struct device_node *nand_get_flash_node(struct nand_chip *chip)
|
|||
* struct nand_flash_dev - NAND Flash Device ID Structure
|
||||
* @name: a human-readable name of the NAND chip
|
||||
* @dev_id: the device ID (the second byte of the full chip ID array)
|
||||
* @mfr_id: manufecturer ID part of the full chip ID array (refers the same
|
||||
* @mfr_id: manufacturer ID part of the full chip ID array (refers the same
|
||||
* memory address as ``id[0]``)
|
||||
* @dev_id: device ID part of the full chip ID array (refers the same memory
|
||||
* address as ``id[1]``)
|
||||
|
|
|
@ -32,9 +32,9 @@
|
|||
SPI_MEM_OP_NO_DUMMY, \
|
||||
SPI_MEM_OP_NO_DATA)
|
||||
|
||||
#define SPINAND_READID_OP(ndummy, buf, len) \
|
||||
#define SPINAND_READID_OP(naddr, ndummy, buf, len) \
|
||||
SPI_MEM_OP(SPI_MEM_OP_CMD(0x9f, 1), \
|
||||
SPI_MEM_OP_NO_ADDR, \
|
||||
SPI_MEM_OP_ADDR(naddr, 0, 1), \
|
||||
SPI_MEM_OP_DUMMY(ndummy, 1), \
|
||||
SPI_MEM_OP_DATA_IN(len, buf, 1))
|
||||
|
||||
|
@ -176,37 +176,46 @@ struct spinand_device;
|
|||
* @data: buffer containing the id bytes. Currently 4 bytes large, but can
|
||||
* be extended if required
|
||||
* @len: ID length
|
||||
*
|
||||
* struct_spinand_id->data contains all bytes returned after a READ_ID command,
|
||||
* including dummy bytes if the chip does not emit ID bytes right after the
|
||||
* READ_ID command. The responsibility to extract real ID bytes is left to
|
||||
* struct_manufacurer_ops->detect().
|
||||
*/
|
||||
struct spinand_id {
|
||||
u8 data[SPINAND_MAX_ID_LEN];
|
||||
int len;
|
||||
};
|
||||
|
||||
enum spinand_readid_method {
|
||||
SPINAND_READID_METHOD_OPCODE,
|
||||
SPINAND_READID_METHOD_OPCODE_ADDR,
|
||||
SPINAND_READID_METHOD_OPCODE_DUMMY,
|
||||
};
|
||||
|
||||
/**
|
||||
* struct spinand_devid - SPI NAND device id structure
|
||||
* @id: device id of current chip
|
||||
* @len: number of bytes in device id
|
||||
* @method: method to read chip id
|
||||
* There are 3 possible variants:
|
||||
* SPINAND_READID_METHOD_OPCODE: chip id is returned immediately
|
||||
* after read_id opcode.
|
||||
* SPINAND_READID_METHOD_OPCODE_ADDR: chip id is returned after
|
||||
* read_id opcode + 1-byte address.
|
||||
* SPINAND_READID_METHOD_OPCODE_DUMMY: chip id is returned after
|
||||
* read_id opcode + 1 dummy byte.
|
||||
*/
|
||||
struct spinand_devid {
|
||||
const u8 *id;
|
||||
const u8 len;
|
||||
const enum spinand_readid_method method;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct manufacurer_ops - SPI NAND manufacturer specific operations
|
||||
* @detect: detect a SPI NAND device. Every time a SPI NAND device is probed
|
||||
* the core calls the struct_manufacurer_ops->detect() hook of each
|
||||
* registered manufacturer until one of them return 1. Note that
|
||||
* the first thing to check in this hook is that the manufacturer ID
|
||||
* in struct_spinand_device->id matches the manufacturer whose
|
||||
* ->detect() hook has been called. Should return 1 if there's a
|
||||
* match, 0 if the manufacturer ID does not match and a negative
|
||||
* error code otherwise. When true is returned, the core assumes
|
||||
* that properties of the NAND chip (spinand->base.memorg and
|
||||
* spinand->base.eccreq) have been filled
|
||||
* @init: initialize a SPI NAND device
|
||||
* @cleanup: cleanup a SPI NAND device
|
||||
*
|
||||
* Each SPI NAND manufacturer driver should implement this interface so that
|
||||
* NAND chips coming from this vendor can be detected and initialized properly.
|
||||
* NAND chips coming from this vendor can be initialized properly.
|
||||
*/
|
||||
struct spinand_manufacturer_ops {
|
||||
int (*detect)(struct spinand_device *spinand);
|
||||
int (*init)(struct spinand_device *spinand);
|
||||
void (*cleanup)(struct spinand_device *spinand);
|
||||
};
|
||||
|
@ -215,11 +224,16 @@ struct spinand_manufacturer_ops {
|
|||
* struct spinand_manufacturer - SPI NAND manufacturer instance
|
||||
* @id: manufacturer ID
|
||||
* @name: manufacturer name
|
||||
* @devid_len: number of bytes in device ID
|
||||
* @chips: supported SPI NANDs under current manufacturer
|
||||
* @nchips: number of SPI NANDs available in chips array
|
||||
* @ops: manufacturer operations
|
||||
*/
|
||||
struct spinand_manufacturer {
|
||||
u8 id;
|
||||
char *name;
|
||||
const struct spinand_info *chips;
|
||||
const size_t nchips;
|
||||
const struct spinand_manufacturer_ops *ops;
|
||||
};
|
||||
|
||||
|
@ -270,6 +284,7 @@ struct spinand_ecc_info {
|
|||
};
|
||||
|
||||
#define SPINAND_HAS_QE_BIT BIT(0)
|
||||
#define SPINAND_HAS_CR_FEAT_BIT BIT(1)
|
||||
|
||||
/**
|
||||
* struct spinand_info - Structure used to describe SPI NAND chips
|
||||
|
@ -291,7 +306,7 @@ struct spinand_ecc_info {
|
|||
*/
|
||||
struct spinand_info {
|
||||
const char *model;
|
||||
u16 devid;
|
||||
struct spinand_devid devid;
|
||||
u32 flags;
|
||||
struct nand_memory_organization memorg;
|
||||
struct nand_ecc_req eccreq;
|
||||
|
@ -305,6 +320,13 @@ struct spinand_info {
|
|||
unsigned int target);
|
||||
};
|
||||
|
||||
#define SPINAND_ID(__method, ...) \
|
||||
{ \
|
||||
.id = (const u8[]){ __VA_ARGS__ }, \
|
||||
.len = sizeof((u8[]){ __VA_ARGS__ }), \
|
||||
.method = __method, \
|
||||
}
|
||||
|
||||
#define SPINAND_INFO_OP_VARIANTS(__read, __write, __update) \
|
||||
{ \
|
||||
.read_cache = __read, \
|
||||
|
@ -451,9 +473,10 @@ static inline void spinand_set_of_node(struct spinand_device *spinand,
|
|||
nanddev_set_of_node(&spinand->base, np);
|
||||
}
|
||||
|
||||
int spinand_match_and_init(struct spinand_device *dev,
|
||||
int spinand_match_and_init(struct spinand_device *spinand,
|
||||
const struct spinand_info *table,
|
||||
unsigned int table_size, u16 devid);
|
||||
unsigned int table_size,
|
||||
enum spinand_readid_method rdid_method);
|
||||
|
||||
int spinand_upd_cfg(struct spinand_device *spinand, u8 mask, u8 val);
|
||||
int spinand_select_target(struct spinand_device *spinand, unsigned int target);
|
||||
|
|
Loading…
Reference in a new issue