Merge tag 'mtd/for-5.3' of git://git.kernel.org/pub/scm/linux/kernel/git/mtd/linux
Pull MTD updates from Miquel Raynal: "This contains the following changes for MTD: MTD core changes: - New Hyperbus framework - New _is_locked (concat) implementation - Various cleanups NAND core changes: - use longest matching pattern in ->exec_op() default parser - export NAND operation tracer - add flag to indicate panic_write in MTD - use kzalloc() instead of kmalloc() and memset() Raw NAND controller drivers changes: - brcmnand: - fix BCH ECC layout for large page NAND parts - fallback to detected ecc-strength, ecc-step-size - when oops in progress use pio and interrupt polling - code refactor code to introduce helper functions - add support for v7.3 controller - FSMC: - use nand_op_trace for operation tracing - GPMI: - move all driver code into single file - various cleanups (including dmaengine changes) - use runtime PM to manage clocks - implement exec_op - MTK: - correct low level time calculation of r/w cycle - improve data sampling timing for read cycle - add validity check for CE# pin setting - fix wrongly assigned OOB buffer pointer issue - re-license MTK NAND driver as Dual MIT/GPL - STM32: - manage the get_irq error case - increase DMA completion timeouts Raw NAND chips drivers changes: - Macronix: add read-retry support Onenand driver changes: - add support for 8Gb datasize chips - avoid fall-through warnings SPI-NAND changes: - define macros for page-read ops with three-byte addresses - add support for two-byte device IDs and then for GigaDevice GD5F1GQ4UFxxG - add initial support for Paragon PN26G0xA - handle the case where the last page read has bitflips SPI-NOR core changes: - add support for the mt25ql02g and w25q16jv flashes - print error in case of jedec read id fails - is25lp256: add post BFPT fix to correct the addr_width SPI NOR controller drivers changes: - intel-spi: Add support for Intel Elkhart Lake SPI serial flash - smt32: remove the driver as the driver was replaced by spi-stm32-qspi.c - cadence-quadspi: add reset control" * tag 'mtd/for-5.3' of git://git.kernel.org/pub/scm/linux/kernel/git/mtd/linux: (60 commits) mtd: concat: implement _is_locked mtd operation mtd: concat: refactor concat_lock/concat_unlock mtd: abi: do not use C++ style comments in uapi header mtd: afs: remove unneeded NULL check mtd: rawnand: stm32_fmc2: increase DMA completion timeouts mtd: rawnand: Use kzalloc() instead of kmalloc() and memset() mtd: hyperbus: Add driver for TI's HyperBus memory controller mtd: spinand: read returns badly if the last page has bitflips mtd: spinand: Add initial support for Paragon PN26G0xA mtd: rawnand: mtk: Re-license MTK NAND driver as Dual MIT/GPL mtd: rawnand: gpmi: remove double assignment to block_size dt-bindings: mtd: brcmnand: Add brcmnand, brcmnand-v7.3 support mtd: rawnand: brcmnand: Add support for v7.3 controller mtd: rawnand: brcmnand: Refactored code to introduce helper functions mtd: rawnand: brcmnand: When oops in progress use pio and interrupt polling mtd: Add flag to indicate panic_write mtd: rawnand: Add Macronix NAND read retry support mtd: onenand: Avoid fall-through warnings mtd: spinand: Add support for GigaDevice GD5F1GQ4UFxxG mtd: spinand: Add support for two-byte device IDs ...
This commit is contained in:
@@ -3257,6 +3257,8 @@ static void onenand_check_features(struct mtd_info *mtd)
|
||||
|
||||
/* Lock scheme */
|
||||
switch (density) {
|
||||
case ONENAND_DEVICE_DENSITY_8Gb:
|
||||
this->options |= ONENAND_HAS_NOP_1;
|
||||
case ONENAND_DEVICE_DENSITY_4Gb:
|
||||
if (ONENAND_IS_DDP(this))
|
||||
this->options |= ONENAND_HAS_2PLANE;
|
||||
@@ -3277,12 +3279,15 @@ static void onenand_check_features(struct mtd_info *mtd)
|
||||
if ((this->version_id & 0xf) == 0xe)
|
||||
this->options |= ONENAND_HAS_NOP_1;
|
||||
}
|
||||
this->options |= ONENAND_HAS_UNLOCK_ALL;
|
||||
break;
|
||||
|
||||
case ONENAND_DEVICE_DENSITY_2Gb:
|
||||
/* 2Gb DDP does not have 2 plane */
|
||||
if (!ONENAND_IS_DDP(this))
|
||||
this->options |= ONENAND_HAS_2PLANE;
|
||||
this->options |= ONENAND_HAS_UNLOCK_ALL;
|
||||
break;
|
||||
|
||||
case ONENAND_DEVICE_DENSITY_1Gb:
|
||||
/* A-Die has all block unlock */
|
||||
|
@@ -84,6 +84,12 @@ struct brcm_nand_dma_desc {
|
||||
#define FLASH_DMA_ECC_ERROR (1 << 8)
|
||||
#define FLASH_DMA_CORR_ERROR (1 << 9)
|
||||
|
||||
/* Bitfields for DMA_MODE */
|
||||
#define FLASH_DMA_MODE_STOP_ON_ERROR BIT(1) /* stop in Uncorr ECC error */
|
||||
#define FLASH_DMA_MODE_MODE BIT(0) /* link list */
|
||||
#define FLASH_DMA_MODE_MASK (FLASH_DMA_MODE_STOP_ON_ERROR | \
|
||||
FLASH_DMA_MODE_MODE)
|
||||
|
||||
/* 512B flash cache in the NAND controller HW */
|
||||
#define FC_SHIFT 9U
|
||||
#define FC_BYTES 512U
|
||||
@@ -96,6 +102,51 @@ struct brcm_nand_dma_desc {
|
||||
#define NAND_CTRL_RDY (INTFC_CTLR_READY | INTFC_FLASH_READY)
|
||||
#define NAND_POLL_STATUS_TIMEOUT_MS 100
|
||||
|
||||
/* flash_dma registers */
|
||||
enum flash_dma_reg {
|
||||
FLASH_DMA_REVISION = 0,
|
||||
FLASH_DMA_FIRST_DESC,
|
||||
FLASH_DMA_FIRST_DESC_EXT,
|
||||
FLASH_DMA_CTRL,
|
||||
FLASH_DMA_MODE,
|
||||
FLASH_DMA_STATUS,
|
||||
FLASH_DMA_INTERRUPT_DESC,
|
||||
FLASH_DMA_INTERRUPT_DESC_EXT,
|
||||
FLASH_DMA_ERROR_STATUS,
|
||||
FLASH_DMA_CURRENT_DESC,
|
||||
FLASH_DMA_CURRENT_DESC_EXT,
|
||||
};
|
||||
|
||||
/* flash_dma registers v1*/
|
||||
static const u16 flash_dma_regs_v1[] = {
|
||||
[FLASH_DMA_REVISION] = 0x00,
|
||||
[FLASH_DMA_FIRST_DESC] = 0x04,
|
||||
[FLASH_DMA_FIRST_DESC_EXT] = 0x08,
|
||||
[FLASH_DMA_CTRL] = 0x0c,
|
||||
[FLASH_DMA_MODE] = 0x10,
|
||||
[FLASH_DMA_STATUS] = 0x14,
|
||||
[FLASH_DMA_INTERRUPT_DESC] = 0x18,
|
||||
[FLASH_DMA_INTERRUPT_DESC_EXT] = 0x1c,
|
||||
[FLASH_DMA_ERROR_STATUS] = 0x20,
|
||||
[FLASH_DMA_CURRENT_DESC] = 0x24,
|
||||
[FLASH_DMA_CURRENT_DESC_EXT] = 0x28,
|
||||
};
|
||||
|
||||
/* flash_dma registers v4 */
|
||||
static const u16 flash_dma_regs_v4[] = {
|
||||
[FLASH_DMA_REVISION] = 0x00,
|
||||
[FLASH_DMA_FIRST_DESC] = 0x08,
|
||||
[FLASH_DMA_FIRST_DESC_EXT] = 0x0c,
|
||||
[FLASH_DMA_CTRL] = 0x10,
|
||||
[FLASH_DMA_MODE] = 0x14,
|
||||
[FLASH_DMA_STATUS] = 0x18,
|
||||
[FLASH_DMA_INTERRUPT_DESC] = 0x20,
|
||||
[FLASH_DMA_INTERRUPT_DESC_EXT] = 0x24,
|
||||
[FLASH_DMA_ERROR_STATUS] = 0x28,
|
||||
[FLASH_DMA_CURRENT_DESC] = 0x30,
|
||||
[FLASH_DMA_CURRENT_DESC_EXT] = 0x34,
|
||||
};
|
||||
|
||||
/* Controller feature flags */
|
||||
enum {
|
||||
BRCMNAND_HAS_1K_SECTORS = BIT(0),
|
||||
@@ -128,6 +179,8 @@ struct brcmnand_controller {
|
||||
/* List of NAND hosts (one for each chip-select) */
|
||||
struct list_head host_list;
|
||||
|
||||
/* flash_dma reg */
|
||||
const u16 *flash_dma_offsets;
|
||||
struct brcm_nand_dma_desc *dma_desc;
|
||||
dma_addr_t dma_pa;
|
||||
|
||||
@@ -151,6 +204,7 @@ struct brcmnand_controller {
|
||||
u32 nand_cs_nand_xor;
|
||||
u32 corr_stat_threshold;
|
||||
u32 flash_dma_mode;
|
||||
bool pio_poll_mode;
|
||||
};
|
||||
|
||||
struct brcmnand_cfg {
|
||||
@@ -462,7 +516,7 @@ static int brcmnand_revision_init(struct brcmnand_controller *ctrl)
|
||||
/* Register offsets */
|
||||
if (ctrl->nand_version >= 0x0702)
|
||||
ctrl->reg_offsets = brcmnand_regs_v72;
|
||||
else if (ctrl->nand_version >= 0x0701)
|
||||
else if (ctrl->nand_version == 0x0701)
|
||||
ctrl->reg_offsets = brcmnand_regs_v71;
|
||||
else if (ctrl->nand_version >= 0x0600)
|
||||
ctrl->reg_offsets = brcmnand_regs_v60;
|
||||
@@ -507,7 +561,7 @@ static int brcmnand_revision_init(struct brcmnand_controller *ctrl)
|
||||
}
|
||||
|
||||
/* Maximum spare area sector size (per 512B) */
|
||||
if (ctrl->nand_version >= 0x0702)
|
||||
if (ctrl->nand_version == 0x0702)
|
||||
ctrl->max_oob = 128;
|
||||
else if (ctrl->nand_version >= 0x0600)
|
||||
ctrl->max_oob = 64;
|
||||
@@ -538,6 +592,15 @@ static int brcmnand_revision_init(struct brcmnand_controller *ctrl)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void brcmnand_flash_dma_revision_init(struct brcmnand_controller *ctrl)
|
||||
{
|
||||
/* flash_dma register offsets */
|
||||
if (ctrl->nand_version >= 0x0703)
|
||||
ctrl->flash_dma_offsets = flash_dma_regs_v4;
|
||||
else
|
||||
ctrl->flash_dma_offsets = flash_dma_regs_v1;
|
||||
}
|
||||
|
||||
static inline u32 brcmnand_read_reg(struct brcmnand_controller *ctrl,
|
||||
enum brcmnand_reg reg)
|
||||
{
|
||||
@@ -580,6 +643,54 @@ static inline void brcmnand_write_fc(struct brcmnand_controller *ctrl,
|
||||
__raw_writel(val, ctrl->nand_fc + word * 4);
|
||||
}
|
||||
|
||||
static void brcmnand_clear_ecc_addr(struct brcmnand_controller *ctrl)
|
||||
{
|
||||
|
||||
/* Clear error addresses */
|
||||
brcmnand_write_reg(ctrl, BRCMNAND_UNCORR_ADDR, 0);
|
||||
brcmnand_write_reg(ctrl, BRCMNAND_CORR_ADDR, 0);
|
||||
brcmnand_write_reg(ctrl, BRCMNAND_UNCORR_EXT_ADDR, 0);
|
||||
brcmnand_write_reg(ctrl, BRCMNAND_CORR_EXT_ADDR, 0);
|
||||
}
|
||||
|
||||
static u64 brcmnand_get_uncorrecc_addr(struct brcmnand_controller *ctrl)
|
||||
{
|
||||
u64 err_addr;
|
||||
|
||||
err_addr = brcmnand_read_reg(ctrl, BRCMNAND_UNCORR_ADDR);
|
||||
err_addr |= ((u64)(brcmnand_read_reg(ctrl,
|
||||
BRCMNAND_UNCORR_EXT_ADDR)
|
||||
& 0xffff) << 32);
|
||||
|
||||
return err_addr;
|
||||
}
|
||||
|
||||
static u64 brcmnand_get_correcc_addr(struct brcmnand_controller *ctrl)
|
||||
{
|
||||
u64 err_addr;
|
||||
|
||||
err_addr = brcmnand_read_reg(ctrl, BRCMNAND_CORR_ADDR);
|
||||
err_addr |= ((u64)(brcmnand_read_reg(ctrl,
|
||||
BRCMNAND_CORR_EXT_ADDR)
|
||||
& 0xffff) << 32);
|
||||
|
||||
return err_addr;
|
||||
}
|
||||
|
||||
static void brcmnand_set_cmd_addr(struct mtd_info *mtd, u64 addr)
|
||||
{
|
||||
struct nand_chip *chip = mtd_to_nand(mtd);
|
||||
struct brcmnand_host *host = nand_get_controller_data(chip);
|
||||
struct brcmnand_controller *ctrl = host->ctrl;
|
||||
|
||||
brcmnand_write_reg(ctrl, BRCMNAND_CMD_EXT_ADDRESS,
|
||||
(host->cs << 16) | ((addr >> 32) & 0xffff));
|
||||
(void)brcmnand_read_reg(ctrl, BRCMNAND_CMD_EXT_ADDRESS);
|
||||
brcmnand_write_reg(ctrl, BRCMNAND_CMD_ADDRESS,
|
||||
lower_32_bits(addr));
|
||||
(void)brcmnand_read_reg(ctrl, BRCMNAND_CMD_ADDRESS);
|
||||
}
|
||||
|
||||
static inline u16 brcmnand_cs_offset(struct brcmnand_controller *ctrl, int cs,
|
||||
enum brcmnand_cs_reg reg)
|
||||
{
|
||||
@@ -612,7 +723,7 @@ static void brcmnand_wr_corr_thresh(struct brcmnand_host *host, u8 val)
|
||||
enum brcmnand_reg reg = BRCMNAND_CORR_THRESHOLD;
|
||||
int cs = host->cs;
|
||||
|
||||
if (ctrl->nand_version >= 0x0702)
|
||||
if (ctrl->nand_version == 0x0702)
|
||||
bits = 7;
|
||||
else if (ctrl->nand_version >= 0x0600)
|
||||
bits = 6;
|
||||
@@ -666,7 +777,7 @@ enum {
|
||||
|
||||
static inline u32 brcmnand_spare_area_mask(struct brcmnand_controller *ctrl)
|
||||
{
|
||||
if (ctrl->nand_version >= 0x0702)
|
||||
if (ctrl->nand_version == 0x0702)
|
||||
return GENMASK(7, 0);
|
||||
else if (ctrl->nand_version >= 0x0600)
|
||||
return GENMASK(6, 0);
|
||||
@@ -796,39 +907,44 @@ static inline void brcmnand_set_wp(struct brcmnand_controller *ctrl, bool en)
|
||||
* Flash DMA
|
||||
***********************************************************************/
|
||||
|
||||
enum flash_dma_reg {
|
||||
FLASH_DMA_REVISION = 0x00,
|
||||
FLASH_DMA_FIRST_DESC = 0x04,
|
||||
FLASH_DMA_FIRST_DESC_EXT = 0x08,
|
||||
FLASH_DMA_CTRL = 0x0c,
|
||||
FLASH_DMA_MODE = 0x10,
|
||||
FLASH_DMA_STATUS = 0x14,
|
||||
FLASH_DMA_INTERRUPT_DESC = 0x18,
|
||||
FLASH_DMA_INTERRUPT_DESC_EXT = 0x1c,
|
||||
FLASH_DMA_ERROR_STATUS = 0x20,
|
||||
FLASH_DMA_CURRENT_DESC = 0x24,
|
||||
FLASH_DMA_CURRENT_DESC_EXT = 0x28,
|
||||
};
|
||||
|
||||
static inline bool has_flash_dma(struct brcmnand_controller *ctrl)
|
||||
{
|
||||
return ctrl->flash_dma_base;
|
||||
}
|
||||
|
||||
static inline void disable_ctrl_irqs(struct brcmnand_controller *ctrl)
|
||||
{
|
||||
if (ctrl->pio_poll_mode)
|
||||
return;
|
||||
|
||||
if (has_flash_dma(ctrl)) {
|
||||
ctrl->flash_dma_base = 0;
|
||||
disable_irq(ctrl->dma_irq);
|
||||
}
|
||||
|
||||
disable_irq(ctrl->irq);
|
||||
ctrl->pio_poll_mode = true;
|
||||
}
|
||||
|
||||
static inline bool flash_dma_buf_ok(const void *buf)
|
||||
{
|
||||
return buf && !is_vmalloc_addr(buf) &&
|
||||
likely(IS_ALIGNED((uintptr_t)buf, 4));
|
||||
}
|
||||
|
||||
static inline void flash_dma_writel(struct brcmnand_controller *ctrl, u8 offs,
|
||||
u32 val)
|
||||
static inline void flash_dma_writel(struct brcmnand_controller *ctrl,
|
||||
enum flash_dma_reg dma_reg, u32 val)
|
||||
{
|
||||
u16 offs = ctrl->flash_dma_offsets[dma_reg];
|
||||
|
||||
brcmnand_writel(val, ctrl->flash_dma_base + offs);
|
||||
}
|
||||
|
||||
static inline u32 flash_dma_readl(struct brcmnand_controller *ctrl, u8 offs)
|
||||
static inline u32 flash_dma_readl(struct brcmnand_controller *ctrl,
|
||||
enum flash_dma_reg dma_reg)
|
||||
{
|
||||
u16 offs = ctrl->flash_dma_offsets[dma_reg];
|
||||
|
||||
return brcmnand_readl(ctrl->flash_dma_base + offs);
|
||||
}
|
||||
|
||||
@@ -931,7 +1047,7 @@ static int brcmnand_bch_ooblayout_ecc(struct mtd_info *mtd, int section,
|
||||
if (section >= sectors)
|
||||
return -ERANGE;
|
||||
|
||||
oobregion->offset = (section * (sas + 1)) - chip->ecc.bytes;
|
||||
oobregion->offset = ((section + 1) * sas) - chip->ecc.bytes;
|
||||
oobregion->length = chip->ecc.bytes;
|
||||
|
||||
return 0;
|
||||
@@ -1205,9 +1321,12 @@ static void brcmnand_send_cmd(struct brcmnand_host *host, int cmd)
|
||||
{
|
||||
struct brcmnand_controller *ctrl = host->ctrl;
|
||||
int ret;
|
||||
u64 cmd_addr;
|
||||
|
||||
cmd_addr = brcmnand_read_reg(ctrl, BRCMNAND_CMD_ADDRESS);
|
||||
|
||||
dev_dbg(ctrl->dev, "send native cmd %d addr 0x%llx\n", cmd, cmd_addr);
|
||||
|
||||
dev_dbg(ctrl->dev, "send native cmd %d addr_lo 0x%x\n", cmd,
|
||||
brcmnand_read_reg(ctrl, BRCMNAND_CMD_ADDRESS));
|
||||
BUG_ON(ctrl->cmd_pending != 0);
|
||||
ctrl->cmd_pending = cmd;
|
||||
|
||||
@@ -1229,15 +1348,42 @@ static void brcmnand_cmd_ctrl(struct nand_chip *chip, int dat,
|
||||
/* intentionally left blank */
|
||||
}
|
||||
|
||||
static bool brcmstb_nand_wait_for_completion(struct nand_chip *chip)
|
||||
{
|
||||
struct brcmnand_host *host = nand_get_controller_data(chip);
|
||||
struct brcmnand_controller *ctrl = host->ctrl;
|
||||
struct mtd_info *mtd = nand_to_mtd(chip);
|
||||
bool err = false;
|
||||
int sts;
|
||||
|
||||
if (mtd->oops_panic_write) {
|
||||
/* switch to interrupt polling and PIO mode */
|
||||
disable_ctrl_irqs(ctrl);
|
||||
sts = bcmnand_ctrl_poll_status(ctrl, NAND_CTRL_RDY,
|
||||
NAND_CTRL_RDY, 0);
|
||||
err = (sts < 0) ? true : false;
|
||||
} else {
|
||||
unsigned long timeo = msecs_to_jiffies(
|
||||
NAND_POLL_STATUS_TIMEOUT_MS);
|
||||
/* wait for completion interrupt */
|
||||
sts = wait_for_completion_timeout(&ctrl->done, timeo);
|
||||
err = (sts <= 0) ? true : false;
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int brcmnand_waitfunc(struct nand_chip *chip)
|
||||
{
|
||||
struct brcmnand_host *host = nand_get_controller_data(chip);
|
||||
struct brcmnand_controller *ctrl = host->ctrl;
|
||||
unsigned long timeo = msecs_to_jiffies(100);
|
||||
bool err = false;
|
||||
|
||||
dev_dbg(ctrl->dev, "wait on native cmd %d\n", ctrl->cmd_pending);
|
||||
if (ctrl->cmd_pending &&
|
||||
wait_for_completion_timeout(&ctrl->done, timeo) <= 0) {
|
||||
if (ctrl->cmd_pending)
|
||||
err = brcmstb_nand_wait_for_completion(chip);
|
||||
|
||||
if (err) {
|
||||
u32 cmd = brcmnand_read_reg(ctrl, BRCMNAND_CMD_START)
|
||||
>> brcmnand_cmd_shift(ctrl);
|
||||
|
||||
@@ -1366,12 +1512,7 @@ static void brcmnand_cmdfunc(struct nand_chip *chip, unsigned command,
|
||||
if (!native_cmd)
|
||||
return;
|
||||
|
||||
brcmnand_write_reg(ctrl, BRCMNAND_CMD_EXT_ADDRESS,
|
||||
(host->cs << 16) | ((addr >> 32) & 0xffff));
|
||||
(void)brcmnand_read_reg(ctrl, BRCMNAND_CMD_EXT_ADDRESS);
|
||||
brcmnand_write_reg(ctrl, BRCMNAND_CMD_ADDRESS, lower_32_bits(addr));
|
||||
(void)brcmnand_read_reg(ctrl, BRCMNAND_CMD_ADDRESS);
|
||||
|
||||
brcmnand_set_cmd_addr(mtd, addr);
|
||||
brcmnand_send_cmd(host, native_cmd);
|
||||
brcmnand_waitfunc(chip);
|
||||
|
||||
@@ -1589,20 +1730,10 @@ static int brcmnand_read_by_pio(struct mtd_info *mtd, struct nand_chip *chip,
|
||||
struct brcmnand_controller *ctrl = host->ctrl;
|
||||
int i, j, ret = 0;
|
||||
|
||||
/* Clear error addresses */
|
||||
brcmnand_write_reg(ctrl, BRCMNAND_UNCORR_ADDR, 0);
|
||||
brcmnand_write_reg(ctrl, BRCMNAND_CORR_ADDR, 0);
|
||||
brcmnand_write_reg(ctrl, BRCMNAND_UNCORR_EXT_ADDR, 0);
|
||||
brcmnand_write_reg(ctrl, BRCMNAND_CORR_EXT_ADDR, 0);
|
||||
|
||||
brcmnand_write_reg(ctrl, BRCMNAND_CMD_EXT_ADDRESS,
|
||||
(host->cs << 16) | ((addr >> 32) & 0xffff));
|
||||
(void)brcmnand_read_reg(ctrl, BRCMNAND_CMD_EXT_ADDRESS);
|
||||
brcmnand_clear_ecc_addr(ctrl);
|
||||
|
||||
for (i = 0; i < trans; i++, addr += FC_BYTES) {
|
||||
brcmnand_write_reg(ctrl, BRCMNAND_CMD_ADDRESS,
|
||||
lower_32_bits(addr));
|
||||
(void)brcmnand_read_reg(ctrl, BRCMNAND_CMD_ADDRESS);
|
||||
brcmnand_set_cmd_addr(mtd, addr);
|
||||
/* SPARE_AREA_READ does not use ECC, so just use PAGE_READ */
|
||||
brcmnand_send_cmd(host, CMD_PAGE_READ);
|
||||
brcmnand_waitfunc(chip);
|
||||
@@ -1622,21 +1753,15 @@ static int brcmnand_read_by_pio(struct mtd_info *mtd, struct nand_chip *chip,
|
||||
host->hwcfg.sector_size_1k);
|
||||
|
||||
if (!ret) {
|
||||
*err_addr = brcmnand_read_reg(ctrl,
|
||||
BRCMNAND_UNCORR_ADDR) |
|
||||
((u64)(brcmnand_read_reg(ctrl,
|
||||
BRCMNAND_UNCORR_EXT_ADDR)
|
||||
& 0xffff) << 32);
|
||||
*err_addr = brcmnand_get_uncorrecc_addr(ctrl);
|
||||
|
||||
if (*err_addr)
|
||||
ret = -EBADMSG;
|
||||
}
|
||||
|
||||
if (!ret) {
|
||||
*err_addr = brcmnand_read_reg(ctrl,
|
||||
BRCMNAND_CORR_ADDR) |
|
||||
((u64)(brcmnand_read_reg(ctrl,
|
||||
BRCMNAND_CORR_EXT_ADDR)
|
||||
& 0xffff) << 32);
|
||||
*err_addr = brcmnand_get_correcc_addr(ctrl);
|
||||
|
||||
if (*err_addr)
|
||||
ret = -EUCLEAN;
|
||||
}
|
||||
@@ -1703,7 +1828,7 @@ static int brcmnand_read(struct mtd_info *mtd, struct nand_chip *chip,
|
||||
dev_dbg(ctrl->dev, "read %llx -> %p\n", (unsigned long long)addr, buf);
|
||||
|
||||
try_dmaread:
|
||||
brcmnand_write_reg(ctrl, BRCMNAND_UNCORR_COUNT, 0);
|
||||
brcmnand_clear_ecc_addr(ctrl);
|
||||
|
||||
if (has_flash_dma(ctrl) && !oob && flash_dma_buf_ok(buf)) {
|
||||
err = brcmnand_dma_trans(host, addr, buf, trans * FC_BYTES,
|
||||
@@ -1850,15 +1975,9 @@ static int brcmnand_write(struct mtd_info *mtd, struct nand_chip *chip,
|
||||
goto out;
|
||||
}
|
||||
|
||||
brcmnand_write_reg(ctrl, BRCMNAND_CMD_EXT_ADDRESS,
|
||||
(host->cs << 16) | ((addr >> 32) & 0xffff));
|
||||
(void)brcmnand_read_reg(ctrl, BRCMNAND_CMD_EXT_ADDRESS);
|
||||
|
||||
for (i = 0; i < trans; i++, addr += FC_BYTES) {
|
||||
/* full address MUST be set before populating FC */
|
||||
brcmnand_write_reg(ctrl, BRCMNAND_CMD_ADDRESS,
|
||||
lower_32_bits(addr));
|
||||
(void)brcmnand_read_reg(ctrl, BRCMNAND_CMD_ADDRESS);
|
||||
brcmnand_set_cmd_addr(mtd, addr);
|
||||
|
||||
if (buf) {
|
||||
brcmnand_soc_data_bus_prepare(ctrl->soc, false);
|
||||
@@ -2136,6 +2255,17 @@ static int brcmnand_setup_dev(struct brcmnand_host *host)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (chip->ecc.mode != NAND_ECC_NONE &&
|
||||
(!chip->ecc.size || !chip->ecc.strength)) {
|
||||
if (chip->base.eccreq.step_size && chip->base.eccreq.strength) {
|
||||
/* use detected ECC parameters */
|
||||
chip->ecc.size = chip->base.eccreq.step_size;
|
||||
chip->ecc.strength = chip->base.eccreq.strength;
|
||||
dev_info(ctrl->dev, "Using ECC step-size %d, strength %d\n",
|
||||
chip->ecc.size, chip->ecc.strength);
|
||||
}
|
||||
}
|
||||
|
||||
switch (chip->ecc.size) {
|
||||
case 512:
|
||||
if (chip->ecc.algo == NAND_ECC_HAMMING)
|
||||
@@ -2395,6 +2525,7 @@ static const struct of_device_id brcmnand_of_match[] = {
|
||||
{ .compatible = "brcm,brcmnand-v7.0" },
|
||||
{ .compatible = "brcm,brcmnand-v7.1" },
|
||||
{ .compatible = "brcm,brcmnand-v7.2" },
|
||||
{ .compatible = "brcm,brcmnand-v7.3" },
|
||||
{},
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, brcmnand_of_match);
|
||||
@@ -2481,7 +2612,11 @@ int brcmnand_probe(struct platform_device *pdev, struct brcmnand_soc *soc)
|
||||
goto err;
|
||||
}
|
||||
|
||||
flash_dma_writel(ctrl, FLASH_DMA_MODE, 1); /* linked-list */
|
||||
/* initialize the dma version */
|
||||
brcmnand_flash_dma_revision_init(ctrl);
|
||||
|
||||
/* linked-list and stop on error */
|
||||
flash_dma_writel(ctrl, FLASH_DMA_MODE, FLASH_DMA_MODE_MASK);
|
||||
flash_dma_writel(ctrl, FLASH_DMA_ERROR_STATUS, 0);
|
||||
|
||||
/* Allocate descriptor(s) */
|
||||
|
@@ -613,28 +613,20 @@ static int fsmc_exec_op(struct nand_chip *chip, const struct nand_operation *op,
|
||||
for (op_id = 0; op_id < op->ninstrs; op_id++) {
|
||||
instr = &op->instrs[op_id];
|
||||
|
||||
nand_op_trace(" ", instr);
|
||||
|
||||
switch (instr->type) {
|
||||
case NAND_OP_CMD_INSTR:
|
||||
pr_debug(" ->CMD [0x%02x]\n",
|
||||
instr->ctx.cmd.opcode);
|
||||
|
||||
writeb_relaxed(instr->ctx.cmd.opcode, host->cmd_va);
|
||||
break;
|
||||
|
||||
case NAND_OP_ADDR_INSTR:
|
||||
pr_debug(" ->ADDR [%d cyc]",
|
||||
instr->ctx.addr.naddrs);
|
||||
|
||||
for (i = 0; i < instr->ctx.addr.naddrs; i++)
|
||||
writeb_relaxed(instr->ctx.addr.addrs[i],
|
||||
host->addr_va);
|
||||
break;
|
||||
|
||||
case NAND_OP_DATA_IN_INSTR:
|
||||
pr_debug(" ->DATA_IN [%d B%s]\n", instr->ctx.data.len,
|
||||
instr->ctx.data.force_8bit ?
|
||||
", force 8-bit" : "");
|
||||
|
||||
if (host->mode == USE_DMA_ACCESS)
|
||||
fsmc_read_buf_dma(host, instr->ctx.data.buf.in,
|
||||
instr->ctx.data.len);
|
||||
@@ -644,10 +636,6 @@ static int fsmc_exec_op(struct nand_chip *chip, const struct nand_operation *op,
|
||||
break;
|
||||
|
||||
case NAND_OP_DATA_OUT_INSTR:
|
||||
pr_debug(" ->DATA_OUT [%d B%s]\n", instr->ctx.data.len,
|
||||
instr->ctx.data.force_8bit ?
|
||||
", force 8-bit" : "");
|
||||
|
||||
if (host->mode == USE_DMA_ACCESS)
|
||||
fsmc_write_buf_dma(host,
|
||||
instr->ctx.data.buf.out,
|
||||
@@ -658,9 +646,6 @@ static int fsmc_exec_op(struct nand_chip *chip, const struct nand_operation *op,
|
||||
break;
|
||||
|
||||
case NAND_OP_WAITRDY_INSTR:
|
||||
pr_debug(" ->WAITRDY [max %d ms]\n",
|
||||
instr->ctx.waitrdy.timeout_ms);
|
||||
|
||||
ret = nand_soft_waitrdy(chip,
|
||||
instr->ctx.waitrdy.timeout_ms);
|
||||
break;
|
||||
|
@@ -1,4 +1,3 @@
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
obj-$(CONFIG_MTD_NAND_GPMI_NAND) += gpmi_nand.o
|
||||
gpmi_nand-objs += gpmi-nand.o
|
||||
gpmi_nand-objs += gpmi-lib.o
|
||||
|
@@ -1,934 +0,0 @@
|
||||
// SPDX-License-Identifier: GPL-2.0+
|
||||
/*
|
||||
* Freescale GPMI NAND Flash Driver
|
||||
*
|
||||
* Copyright (C) 2008-2011 Freescale Semiconductor, Inc.
|
||||
* Copyright (C) 2008 Embedded Alley Solutions, Inc.
|
||||
*/
|
||||
#include <linux/delay.h>
|
||||
#include <linux/clk.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
#include "gpmi-nand.h"
|
||||
#include "gpmi-regs.h"
|
||||
#include "bch-regs.h"
|
||||
|
||||
/* Converts time to clock cycles */
|
||||
#define TO_CYCLES(duration, period) DIV_ROUND_UP_ULL(duration, period)
|
||||
|
||||
#define MXS_SET_ADDR 0x4
|
||||
#define MXS_CLR_ADDR 0x8
|
||||
/*
|
||||
* Clear the bit and poll it cleared. This is usually called with
|
||||
* a reset address and mask being either SFTRST(bit 31) or CLKGATE
|
||||
* (bit 30).
|
||||
*/
|
||||
static int clear_poll_bit(void __iomem *addr, u32 mask)
|
||||
{
|
||||
int timeout = 0x400;
|
||||
|
||||
/* clear the bit */
|
||||
writel(mask, addr + MXS_CLR_ADDR);
|
||||
|
||||
/*
|
||||
* SFTRST needs 3 GPMI clocks to settle, the reference manual
|
||||
* recommends to wait 1us.
|
||||
*/
|
||||
udelay(1);
|
||||
|
||||
/* poll the bit becoming clear */
|
||||
while ((readl(addr) & mask) && --timeout)
|
||||
/* nothing */;
|
||||
|
||||
return !timeout;
|
||||
}
|
||||
|
||||
#define MODULE_CLKGATE (1 << 30)
|
||||
#define MODULE_SFTRST (1 << 31)
|
||||
/*
|
||||
* The current mxs_reset_block() will do two things:
|
||||
* [1] enable the module.
|
||||
* [2] reset the module.
|
||||
*
|
||||
* In most of the cases, it's ok.
|
||||
* But in MX23, there is a hardware bug in the BCH block (see erratum #2847).
|
||||
* If you try to soft reset the BCH block, it becomes unusable until
|
||||
* the next hard reset. This case occurs in the NAND boot mode. When the board
|
||||
* boots by NAND, the ROM of the chip will initialize the BCH blocks itself.
|
||||
* So If the driver tries to reset the BCH again, the BCH will not work anymore.
|
||||
* You will see a DMA timeout in this case. The bug has been fixed
|
||||
* in the following chips, such as MX28.
|
||||
*
|
||||
* To avoid this bug, just add a new parameter `just_enable` for
|
||||
* the mxs_reset_block(), and rewrite it here.
|
||||
*/
|
||||
static int gpmi_reset_block(void __iomem *reset_addr, bool just_enable)
|
||||
{
|
||||
int ret;
|
||||
int timeout = 0x400;
|
||||
|
||||
/* clear and poll SFTRST */
|
||||
ret = clear_poll_bit(reset_addr, MODULE_SFTRST);
|
||||
if (unlikely(ret))
|
||||
goto error;
|
||||
|
||||
/* clear CLKGATE */
|
||||
writel(MODULE_CLKGATE, reset_addr + MXS_CLR_ADDR);
|
||||
|
||||
if (!just_enable) {
|
||||
/* set SFTRST to reset the block */
|
||||
writel(MODULE_SFTRST, reset_addr + MXS_SET_ADDR);
|
||||
udelay(1);
|
||||
|
||||
/* poll CLKGATE becoming set */
|
||||
while ((!(readl(reset_addr) & MODULE_CLKGATE)) && --timeout)
|
||||
/* nothing */;
|
||||
if (unlikely(!timeout))
|
||||
goto error;
|
||||
}
|
||||
|
||||
/* clear and poll SFTRST */
|
||||
ret = clear_poll_bit(reset_addr, MODULE_SFTRST);
|
||||
if (unlikely(ret))
|
||||
goto error;
|
||||
|
||||
/* clear and poll CLKGATE */
|
||||
ret = clear_poll_bit(reset_addr, MODULE_CLKGATE);
|
||||
if (unlikely(ret))
|
||||
goto error;
|
||||
|
||||
return 0;
|
||||
|
||||
error:
|
||||
pr_err("%s(%p): module reset timeout\n", __func__, reset_addr);
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
||||
static int __gpmi_enable_clk(struct gpmi_nand_data *this, bool v)
|
||||
{
|
||||
struct clk *clk;
|
||||
int ret;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < GPMI_CLK_MAX; i++) {
|
||||
clk = this->resources.clock[i];
|
||||
if (!clk)
|
||||
break;
|
||||
|
||||
if (v) {
|
||||
ret = clk_prepare_enable(clk);
|
||||
if (ret)
|
||||
goto err_clk;
|
||||
} else {
|
||||
clk_disable_unprepare(clk);
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
|
||||
err_clk:
|
||||
for (; i > 0; i--)
|
||||
clk_disable_unprepare(this->resources.clock[i - 1]);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int gpmi_enable_clk(struct gpmi_nand_data *this)
|
||||
{
|
||||
return __gpmi_enable_clk(this, true);
|
||||
}
|
||||
|
||||
int gpmi_disable_clk(struct gpmi_nand_data *this)
|
||||
{
|
||||
return __gpmi_enable_clk(this, false);
|
||||
}
|
||||
|
||||
int gpmi_init(struct gpmi_nand_data *this)
|
||||
{
|
||||
struct resources *r = &this->resources;
|
||||
int ret;
|
||||
|
||||
ret = gpmi_enable_clk(this);
|
||||
if (ret)
|
||||
return ret;
|
||||
ret = gpmi_reset_block(r->gpmi_regs, false);
|
||||
if (ret)
|
||||
goto err_out;
|
||||
|
||||
/*
|
||||
* Reset BCH here, too. We got failures otherwise :(
|
||||
* See later BCH reset for explanation of MX23 and MX28 handling
|
||||
*/
|
||||
ret = gpmi_reset_block(r->bch_regs, GPMI_IS_MXS(this));
|
||||
if (ret)
|
||||
goto err_out;
|
||||
|
||||
/* Choose NAND mode. */
|
||||
writel(BM_GPMI_CTRL1_GPMI_MODE, r->gpmi_regs + HW_GPMI_CTRL1_CLR);
|
||||
|
||||
/* Set the IRQ polarity. */
|
||||
writel(BM_GPMI_CTRL1_ATA_IRQRDY_POLARITY,
|
||||
r->gpmi_regs + HW_GPMI_CTRL1_SET);
|
||||
|
||||
/* Disable Write-Protection. */
|
||||
writel(BM_GPMI_CTRL1_DEV_RESET, r->gpmi_regs + HW_GPMI_CTRL1_SET);
|
||||
|
||||
/* Select BCH ECC. */
|
||||
writel(BM_GPMI_CTRL1_BCH_MODE, r->gpmi_regs + HW_GPMI_CTRL1_SET);
|
||||
|
||||
/*
|
||||
* Decouple the chip select from dma channel. We use dma0 for all
|
||||
* the chips.
|
||||
*/
|
||||
writel(BM_GPMI_CTRL1_DECOUPLE_CS, r->gpmi_regs + HW_GPMI_CTRL1_SET);
|
||||
|
||||
gpmi_disable_clk(this);
|
||||
return 0;
|
||||
err_out:
|
||||
gpmi_disable_clk(this);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* This function is very useful. It is called only when the bug occur. */
|
||||
void gpmi_dump_info(struct gpmi_nand_data *this)
|
||||
{
|
||||
struct resources *r = &this->resources;
|
||||
struct bch_geometry *geo = &this->bch_geometry;
|
||||
u32 reg;
|
||||
int i;
|
||||
|
||||
dev_err(this->dev, "Show GPMI registers :\n");
|
||||
for (i = 0; i <= HW_GPMI_DEBUG / 0x10 + 1; i++) {
|
||||
reg = readl(r->gpmi_regs + i * 0x10);
|
||||
dev_err(this->dev, "offset 0x%.3x : 0x%.8x\n", i * 0x10, reg);
|
||||
}
|
||||
|
||||
/* start to print out the BCH info */
|
||||
dev_err(this->dev, "Show BCH registers :\n");
|
||||
for (i = 0; i <= HW_BCH_VERSION / 0x10 + 1; i++) {
|
||||
reg = readl(r->bch_regs + i * 0x10);
|
||||
dev_err(this->dev, "offset 0x%.3x : 0x%.8x\n", i * 0x10, reg);
|
||||
}
|
||||
dev_err(this->dev, "BCH Geometry :\n"
|
||||
"GF length : %u\n"
|
||||
"ECC Strength : %u\n"
|
||||
"Page Size in Bytes : %u\n"
|
||||
"Metadata Size in Bytes : %u\n"
|
||||
"ECC Chunk Size in Bytes: %u\n"
|
||||
"ECC Chunk Count : %u\n"
|
||||
"Payload Size in Bytes : %u\n"
|
||||
"Auxiliary Size in Bytes: %u\n"
|
||||
"Auxiliary Status Offset: %u\n"
|
||||
"Block Mark Byte Offset : %u\n"
|
||||
"Block Mark Bit Offset : %u\n",
|
||||
geo->gf_len,
|
||||
geo->ecc_strength,
|
||||
geo->page_size,
|
||||
geo->metadata_size,
|
||||
geo->ecc_chunk_size,
|
||||
geo->ecc_chunk_count,
|
||||
geo->payload_size,
|
||||
geo->auxiliary_size,
|
||||
geo->auxiliary_status_offset,
|
||||
geo->block_mark_byte_offset,
|
||||
geo->block_mark_bit_offset);
|
||||
}
|
||||
|
||||
/* Configures the geometry for BCH. */
|
||||
int bch_set_geometry(struct gpmi_nand_data *this)
|
||||
{
|
||||
struct resources *r = &this->resources;
|
||||
struct bch_geometry *bch_geo = &this->bch_geometry;
|
||||
unsigned int block_count;
|
||||
unsigned int block_size;
|
||||
unsigned int metadata_size;
|
||||
unsigned int ecc_strength;
|
||||
unsigned int page_size;
|
||||
unsigned int gf_len;
|
||||
int ret;
|
||||
|
||||
ret = common_nfc_set_geometry(this);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
block_count = bch_geo->ecc_chunk_count - 1;
|
||||
block_size = bch_geo->ecc_chunk_size;
|
||||
metadata_size = bch_geo->metadata_size;
|
||||
ecc_strength = bch_geo->ecc_strength >> 1;
|
||||
page_size = bch_geo->page_size;
|
||||
gf_len = bch_geo->gf_len;
|
||||
|
||||
ret = gpmi_enable_clk(this);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/*
|
||||
* Due to erratum #2847 of the MX23, the BCH cannot be soft reset on this
|
||||
* chip, otherwise it will lock up. So we skip resetting BCH on the MX23.
|
||||
* and MX28.
|
||||
*/
|
||||
ret = gpmi_reset_block(r->bch_regs, GPMI_IS_MXS(this));
|
||||
if (ret)
|
||||
goto err_out;
|
||||
|
||||
/* Configure layout 0. */
|
||||
writel(BF_BCH_FLASH0LAYOUT0_NBLOCKS(block_count)
|
||||
| BF_BCH_FLASH0LAYOUT0_META_SIZE(metadata_size)
|
||||
| BF_BCH_FLASH0LAYOUT0_ECC0(ecc_strength, this)
|
||||
| BF_BCH_FLASH0LAYOUT0_GF(gf_len, this)
|
||||
| BF_BCH_FLASH0LAYOUT0_DATA0_SIZE(block_size, this),
|
||||
r->bch_regs + HW_BCH_FLASH0LAYOUT0);
|
||||
|
||||
writel(BF_BCH_FLASH0LAYOUT1_PAGE_SIZE(page_size)
|
||||
| BF_BCH_FLASH0LAYOUT1_ECCN(ecc_strength, this)
|
||||
| BF_BCH_FLASH0LAYOUT1_GF(gf_len, this)
|
||||
| BF_BCH_FLASH0LAYOUT1_DATAN_SIZE(block_size, this),
|
||||
r->bch_regs + HW_BCH_FLASH0LAYOUT1);
|
||||
|
||||
/* Set *all* chip selects to use layout 0. */
|
||||
writel(0, r->bch_regs + HW_BCH_LAYOUTSELECT);
|
||||
|
||||
/* Enable interrupts. */
|
||||
writel(BM_BCH_CTRL_COMPLETE_IRQ_EN,
|
||||
r->bch_regs + HW_BCH_CTRL_SET);
|
||||
|
||||
gpmi_disable_clk(this);
|
||||
return 0;
|
||||
err_out:
|
||||
gpmi_disable_clk(this);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* <1> Firstly, we should know what's the GPMI-clock means.
|
||||
* The GPMI-clock is the internal clock in the gpmi nand controller.
|
||||
* If you set 100MHz to gpmi nand controller, the GPMI-clock's period
|
||||
* is 10ns. Mark the GPMI-clock's period as GPMI-clock-period.
|
||||
*
|
||||
* <2> Secondly, we should know what's the frequency on the nand chip pins.
|
||||
* The frequency on the nand chip pins is derived from the GPMI-clock.
|
||||
* We can get it from the following equation:
|
||||
*
|
||||
* F = G / (DS + DH)
|
||||
*
|
||||
* F : the frequency on the nand chip pins.
|
||||
* G : the GPMI clock, such as 100MHz.
|
||||
* DS : GPMI_HW_GPMI_TIMING0:DATA_SETUP
|
||||
* DH : GPMI_HW_GPMI_TIMING0:DATA_HOLD
|
||||
*
|
||||
* <3> Thirdly, when the frequency on the nand chip pins is above 33MHz,
|
||||
* the nand EDO(extended Data Out) timing could be applied.
|
||||
* The GPMI implements a feedback read strobe to sample the read data.
|
||||
* The feedback read strobe can be delayed to support the nand EDO timing
|
||||
* where the read strobe may deasserts before the read data is valid, and
|
||||
* read data is valid for some time after read strobe.
|
||||
*
|
||||
* The following figure illustrates some aspects of a NAND Flash read:
|
||||
*
|
||||
* |<---tREA---->|
|
||||
* | |
|
||||
* | | |
|
||||
* |<--tRP-->| |
|
||||
* | | |
|
||||
* __ ___|__________________________________
|
||||
* RDN \________/ |
|
||||
* |
|
||||
* /---------\
|
||||
* Read Data --------------< >---------
|
||||
* \---------/
|
||||
* | |
|
||||
* |<-D->|
|
||||
* FeedbackRDN ________ ____________
|
||||
* \___________/
|
||||
*
|
||||
* D stands for delay, set in the HW_GPMI_CTRL1:RDN_DELAY.
|
||||
*
|
||||
*
|
||||
* <4> Now, we begin to describe how to compute the right RDN_DELAY.
|
||||
*
|
||||
* 4.1) From the aspect of the nand chip pins:
|
||||
* Delay = (tREA + C - tRP) {1}
|
||||
*
|
||||
* tREA : the maximum read access time.
|
||||
* C : a constant to adjust the delay. default is 4000ps.
|
||||
* tRP : the read pulse width, which is exactly:
|
||||
* tRP = (GPMI-clock-period) * DATA_SETUP
|
||||
*
|
||||
* 4.2) From the aspect of the GPMI nand controller:
|
||||
* Delay = RDN_DELAY * 0.125 * RP {2}
|
||||
*
|
||||
* RP : the DLL reference period.
|
||||
* if (GPMI-clock-period > DLL_THRETHOLD)
|
||||
* RP = GPMI-clock-period / 2;
|
||||
* else
|
||||
* RP = GPMI-clock-period;
|
||||
*
|
||||
* Set the HW_GPMI_CTRL1:HALF_PERIOD if GPMI-clock-period
|
||||
* is greater DLL_THRETHOLD. In other SOCs, the DLL_THRETHOLD
|
||||
* is 16000ps, but in mx6q, we use 12000ps.
|
||||
*
|
||||
* 4.3) since {1} equals {2}, we get:
|
||||
*
|
||||
* (tREA + 4000 - tRP) * 8
|
||||
* RDN_DELAY = ----------------------- {3}
|
||||
* RP
|
||||
*/
|
||||
static void gpmi_nfc_compute_timings(struct gpmi_nand_data *this,
|
||||
const struct nand_sdr_timings *sdr)
|
||||
{
|
||||
struct gpmi_nfc_hardware_timing *hw = &this->hw;
|
||||
unsigned int dll_threshold_ps = this->devdata->max_chain_delay;
|
||||
unsigned int period_ps, reference_period_ps;
|
||||
unsigned int data_setup_cycles, data_hold_cycles, addr_setup_cycles;
|
||||
unsigned int tRP_ps;
|
||||
bool use_half_period;
|
||||
int sample_delay_ps, sample_delay_factor;
|
||||
u16 busy_timeout_cycles;
|
||||
u8 wrn_dly_sel;
|
||||
|
||||
if (sdr->tRC_min >= 30000) {
|
||||
/* ONFI non-EDO modes [0-3] */
|
||||
hw->clk_rate = 22000000;
|
||||
wrn_dly_sel = BV_GPMI_CTRL1_WRN_DLY_SEL_4_TO_8NS;
|
||||
} else if (sdr->tRC_min >= 25000) {
|
||||
/* ONFI EDO mode 4 */
|
||||
hw->clk_rate = 80000000;
|
||||
wrn_dly_sel = BV_GPMI_CTRL1_WRN_DLY_SEL_NO_DELAY;
|
||||
} else {
|
||||
/* ONFI EDO mode 5 */
|
||||
hw->clk_rate = 100000000;
|
||||
wrn_dly_sel = BV_GPMI_CTRL1_WRN_DLY_SEL_NO_DELAY;
|
||||
}
|
||||
|
||||
/* SDR core timings are given in picoseconds */
|
||||
period_ps = div_u64((u64)NSEC_PER_SEC * 1000, hw->clk_rate);
|
||||
|
||||
addr_setup_cycles = TO_CYCLES(sdr->tALS_min, period_ps);
|
||||
data_setup_cycles = TO_CYCLES(sdr->tDS_min, period_ps);
|
||||
data_hold_cycles = TO_CYCLES(sdr->tDH_min, period_ps);
|
||||
busy_timeout_cycles = TO_CYCLES(sdr->tWB_max + sdr->tR_max, period_ps);
|
||||
|
||||
hw->timing0 = BF_GPMI_TIMING0_ADDRESS_SETUP(addr_setup_cycles) |
|
||||
BF_GPMI_TIMING0_DATA_HOLD(data_hold_cycles) |
|
||||
BF_GPMI_TIMING0_DATA_SETUP(data_setup_cycles);
|
||||
hw->timing1 = BF_GPMI_TIMING1_BUSY_TIMEOUT(busy_timeout_cycles * 4096);
|
||||
|
||||
/*
|
||||
* Derive NFC ideal delay from {3}:
|
||||
*
|
||||
* (tREA + 4000 - tRP) * 8
|
||||
* RDN_DELAY = -----------------------
|
||||
* RP
|
||||
*/
|
||||
if (period_ps > dll_threshold_ps) {
|
||||
use_half_period = true;
|
||||
reference_period_ps = period_ps / 2;
|
||||
} else {
|
||||
use_half_period = false;
|
||||
reference_period_ps = period_ps;
|
||||
}
|
||||
|
||||
tRP_ps = data_setup_cycles * period_ps;
|
||||
sample_delay_ps = (sdr->tREA_max + 4000 - tRP_ps) * 8;
|
||||
if (sample_delay_ps > 0)
|
||||
sample_delay_factor = sample_delay_ps / reference_period_ps;
|
||||
else
|
||||
sample_delay_factor = 0;
|
||||
|
||||
hw->ctrl1n = BF_GPMI_CTRL1_WRN_DLY_SEL(wrn_dly_sel);
|
||||
if (sample_delay_factor)
|
||||
hw->ctrl1n |= BF_GPMI_CTRL1_RDN_DELAY(sample_delay_factor) |
|
||||
BM_GPMI_CTRL1_DLL_ENABLE |
|
||||
(use_half_period ? BM_GPMI_CTRL1_HALF_PERIOD : 0);
|
||||
}
|
||||
|
||||
void gpmi_nfc_apply_timings(struct gpmi_nand_data *this)
|
||||
{
|
||||
struct gpmi_nfc_hardware_timing *hw = &this->hw;
|
||||
struct resources *r = &this->resources;
|
||||
void __iomem *gpmi_regs = r->gpmi_regs;
|
||||
unsigned int dll_wait_time_us;
|
||||
|
||||
clk_set_rate(r->clock[0], hw->clk_rate);
|
||||
|
||||
writel(hw->timing0, gpmi_regs + HW_GPMI_TIMING0);
|
||||
writel(hw->timing1, gpmi_regs + HW_GPMI_TIMING1);
|
||||
|
||||
/*
|
||||
* Clear several CTRL1 fields, DLL must be disabled when setting
|
||||
* RDN_DELAY or HALF_PERIOD.
|
||||
*/
|
||||
writel(BM_GPMI_CTRL1_CLEAR_MASK, gpmi_regs + HW_GPMI_CTRL1_CLR);
|
||||
writel(hw->ctrl1n, gpmi_regs + HW_GPMI_CTRL1_SET);
|
||||
|
||||
/* Wait 64 clock cycles before using the GPMI after enabling the DLL */
|
||||
dll_wait_time_us = USEC_PER_SEC / hw->clk_rate * 64;
|
||||
if (!dll_wait_time_us)
|
||||
dll_wait_time_us = 1;
|
||||
|
||||
/* Wait for the DLL to settle. */
|
||||
udelay(dll_wait_time_us);
|
||||
}
|
||||
|
||||
int gpmi_setup_data_interface(struct nand_chip *chip, int chipnr,
|
||||
const struct nand_data_interface *conf)
|
||||
{
|
||||
struct gpmi_nand_data *this = nand_get_controller_data(chip);
|
||||
const struct nand_sdr_timings *sdr;
|
||||
|
||||
/* Retrieve required NAND timings */
|
||||
sdr = nand_get_sdr_timings(conf);
|
||||
if (IS_ERR(sdr))
|
||||
return PTR_ERR(sdr);
|
||||
|
||||
/* Only MX6 GPMI controller can reach EDO timings */
|
||||
if (sdr->tRC_min <= 25000 && !GPMI_IS_MX6(this))
|
||||
return -ENOTSUPP;
|
||||
|
||||
/* Stop here if this call was just a check */
|
||||
if (chipnr < 0)
|
||||
return 0;
|
||||
|
||||
/* Do the actual derivation of the controller timings */
|
||||
gpmi_nfc_compute_timings(this, sdr);
|
||||
|
||||
this->hw.must_apply_timings = true;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Clears a BCH interrupt. */
|
||||
void gpmi_clear_bch(struct gpmi_nand_data *this)
|
||||
{
|
||||
struct resources *r = &this->resources;
|
||||
writel(BM_BCH_CTRL_COMPLETE_IRQ, r->bch_regs + HW_BCH_CTRL_CLR);
|
||||
}
|
||||
|
||||
/* Returns the Ready/Busy status of the given chip. */
|
||||
int gpmi_is_ready(struct gpmi_nand_data *this, unsigned chip)
|
||||
{
|
||||
struct resources *r = &this->resources;
|
||||
uint32_t mask = 0;
|
||||
uint32_t reg = 0;
|
||||
|
||||
if (GPMI_IS_MX23(this)) {
|
||||
mask = MX23_BM_GPMI_DEBUG_READY0 << chip;
|
||||
reg = readl(r->gpmi_regs + HW_GPMI_DEBUG);
|
||||
} else if (GPMI_IS_MX28(this) || GPMI_IS_MX6(this)) {
|
||||
/*
|
||||
* In the imx6, all the ready/busy pins are bound
|
||||
* together. So we only need to check chip 0.
|
||||
*/
|
||||
if (GPMI_IS_MX6(this))
|
||||
chip = 0;
|
||||
|
||||
/* MX28 shares the same R/B register as MX6Q. */
|
||||
mask = MX28_BF_GPMI_STAT_READY_BUSY(1 << chip);
|
||||
reg = readl(r->gpmi_regs + HW_GPMI_STAT);
|
||||
} else
|
||||
dev_err(this->dev, "unknown arch.\n");
|
||||
return reg & mask;
|
||||
}
|
||||
|
||||
int gpmi_send_command(struct gpmi_nand_data *this)
|
||||
{
|
||||
struct dma_chan *channel = get_dma_chan(this);
|
||||
struct dma_async_tx_descriptor *desc;
|
||||
struct scatterlist *sgl;
|
||||
int chip = this->current_chip;
|
||||
int ret;
|
||||
u32 pio[3];
|
||||
|
||||
/* [1] send out the PIO words */
|
||||
pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(BV_GPMI_CTRL0_COMMAND_MODE__WRITE)
|
||||
| BM_GPMI_CTRL0_WORD_LENGTH
|
||||
| BF_GPMI_CTRL0_CS(chip, this)
|
||||
| BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
|
||||
| BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_CLE)
|
||||
| BM_GPMI_CTRL0_ADDRESS_INCREMENT
|
||||
| BF_GPMI_CTRL0_XFER_COUNT(this->command_length);
|
||||
pio[1] = pio[2] = 0;
|
||||
desc = dmaengine_prep_slave_sg(channel,
|
||||
(struct scatterlist *)pio,
|
||||
ARRAY_SIZE(pio), DMA_TRANS_NONE, 0);
|
||||
if (!desc)
|
||||
return -EINVAL;
|
||||
|
||||
/* [2] send out the COMMAND + ADDRESS string stored in @buffer */
|
||||
sgl = &this->cmd_sgl;
|
||||
|
||||
sg_init_one(sgl, this->cmd_buffer, this->command_length);
|
||||
dma_map_sg(this->dev, sgl, 1, DMA_TO_DEVICE);
|
||||
desc = dmaengine_prep_slave_sg(channel,
|
||||
sgl, 1, DMA_MEM_TO_DEV,
|
||||
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
|
||||
if (!desc)
|
||||
return -EINVAL;
|
||||
|
||||
/* [3] submit the DMA */
|
||||
ret = start_dma_without_bch_irq(this, desc);
|
||||
|
||||
dma_unmap_sg(this->dev, sgl, 1, DMA_TO_DEVICE);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int gpmi_send_data(struct gpmi_nand_data *this, const void *buf, int len)
|
||||
{
|
||||
struct dma_async_tx_descriptor *desc;
|
||||
struct dma_chan *channel = get_dma_chan(this);
|
||||
int chip = this->current_chip;
|
||||
int ret;
|
||||
uint32_t command_mode;
|
||||
uint32_t address;
|
||||
u32 pio[2];
|
||||
|
||||
/* [1] PIO */
|
||||
command_mode = BV_GPMI_CTRL0_COMMAND_MODE__WRITE;
|
||||
address = BV_GPMI_CTRL0_ADDRESS__NAND_DATA;
|
||||
|
||||
pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(command_mode)
|
||||
| BM_GPMI_CTRL0_WORD_LENGTH
|
||||
| BF_GPMI_CTRL0_CS(chip, this)
|
||||
| BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
|
||||
| BF_GPMI_CTRL0_ADDRESS(address)
|
||||
| BF_GPMI_CTRL0_XFER_COUNT(len);
|
||||
pio[1] = 0;
|
||||
desc = dmaengine_prep_slave_sg(channel, (struct scatterlist *)pio,
|
||||
ARRAY_SIZE(pio), DMA_TRANS_NONE, 0);
|
||||
if (!desc)
|
||||
return -EINVAL;
|
||||
|
||||
/* [2] send DMA request */
|
||||
prepare_data_dma(this, buf, len, DMA_TO_DEVICE);
|
||||
desc = dmaengine_prep_slave_sg(channel, &this->data_sgl,
|
||||
1, DMA_MEM_TO_DEV,
|
||||
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
|
||||
if (!desc)
|
||||
return -EINVAL;
|
||||
|
||||
/* [3] submit the DMA */
|
||||
ret = start_dma_without_bch_irq(this, desc);
|
||||
|
||||
dma_unmap_sg(this->dev, &this->data_sgl, 1, DMA_TO_DEVICE);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int gpmi_read_data(struct gpmi_nand_data *this, void *buf, int len)
|
||||
{
|
||||
struct dma_async_tx_descriptor *desc;
|
||||
struct dma_chan *channel = get_dma_chan(this);
|
||||
int chip = this->current_chip;
|
||||
int ret;
|
||||
u32 pio[2];
|
||||
bool direct;
|
||||
|
||||
/* [1] : send PIO */
|
||||
pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(BV_GPMI_CTRL0_COMMAND_MODE__READ)
|
||||
| BM_GPMI_CTRL0_WORD_LENGTH
|
||||
| BF_GPMI_CTRL0_CS(chip, this)
|
||||
| BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
|
||||
| BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_DATA)
|
||||
| BF_GPMI_CTRL0_XFER_COUNT(len);
|
||||
pio[1] = 0;
|
||||
desc = dmaengine_prep_slave_sg(channel,
|
||||
(struct scatterlist *)pio,
|
||||
ARRAY_SIZE(pio), DMA_TRANS_NONE, 0);
|
||||
if (!desc)
|
||||
return -EINVAL;
|
||||
|
||||
/* [2] : send DMA request */
|
||||
direct = prepare_data_dma(this, buf, len, DMA_FROM_DEVICE);
|
||||
desc = dmaengine_prep_slave_sg(channel, &this->data_sgl,
|
||||
1, DMA_DEV_TO_MEM,
|
||||
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
|
||||
if (!desc)
|
||||
return -EINVAL;
|
||||
|
||||
/* [3] : submit the DMA */
|
||||
|
||||
ret = start_dma_without_bch_irq(this, desc);
|
||||
|
||||
dma_unmap_sg(this->dev, &this->data_sgl, 1, DMA_FROM_DEVICE);
|
||||
if (!direct)
|
||||
memcpy(buf, this->data_buffer_dma, len);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int gpmi_send_page(struct gpmi_nand_data *this,
|
||||
dma_addr_t payload, dma_addr_t auxiliary)
|
||||
{
|
||||
struct bch_geometry *geo = &this->bch_geometry;
|
||||
uint32_t command_mode;
|
||||
uint32_t address;
|
||||
uint32_t ecc_command;
|
||||
uint32_t buffer_mask;
|
||||
struct dma_async_tx_descriptor *desc;
|
||||
struct dma_chan *channel = get_dma_chan(this);
|
||||
int chip = this->current_chip;
|
||||
u32 pio[6];
|
||||
|
||||
/* A DMA descriptor that does an ECC page read. */
|
||||
command_mode = BV_GPMI_CTRL0_COMMAND_MODE__WRITE;
|
||||
address = BV_GPMI_CTRL0_ADDRESS__NAND_DATA;
|
||||
ecc_command = BV_GPMI_ECCCTRL_ECC_CMD__BCH_ENCODE;
|
||||
buffer_mask = BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_PAGE |
|
||||
BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_AUXONLY;
|
||||
|
||||
pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(command_mode)
|
||||
| BM_GPMI_CTRL0_WORD_LENGTH
|
||||
| BF_GPMI_CTRL0_CS(chip, this)
|
||||
| BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
|
||||
| BF_GPMI_CTRL0_ADDRESS(address)
|
||||
| BF_GPMI_CTRL0_XFER_COUNT(0);
|
||||
pio[1] = 0;
|
||||
pio[2] = BM_GPMI_ECCCTRL_ENABLE_ECC
|
||||
| BF_GPMI_ECCCTRL_ECC_CMD(ecc_command)
|
||||
| BF_GPMI_ECCCTRL_BUFFER_MASK(buffer_mask);
|
||||
pio[3] = geo->page_size;
|
||||
pio[4] = payload;
|
||||
pio[5] = auxiliary;
|
||||
|
||||
desc = dmaengine_prep_slave_sg(channel,
|
||||
(struct scatterlist *)pio,
|
||||
ARRAY_SIZE(pio), DMA_TRANS_NONE,
|
||||
DMA_CTRL_ACK);
|
||||
if (!desc)
|
||||
return -EINVAL;
|
||||
|
||||
return start_dma_with_bch_irq(this, desc);
|
||||
}
|
||||
|
||||
int gpmi_read_page(struct gpmi_nand_data *this,
|
||||
dma_addr_t payload, dma_addr_t auxiliary)
|
||||
{
|
||||
struct bch_geometry *geo = &this->bch_geometry;
|
||||
uint32_t command_mode;
|
||||
uint32_t address;
|
||||
uint32_t ecc_command;
|
||||
uint32_t buffer_mask;
|
||||
struct dma_async_tx_descriptor *desc;
|
||||
struct dma_chan *channel = get_dma_chan(this);
|
||||
int chip = this->current_chip;
|
||||
u32 pio[6];
|
||||
|
||||
/* [1] Wait for the chip to report ready. */
|
||||
command_mode = BV_GPMI_CTRL0_COMMAND_MODE__WAIT_FOR_READY;
|
||||
address = BV_GPMI_CTRL0_ADDRESS__NAND_DATA;
|
||||
|
||||
pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(command_mode)
|
||||
| BM_GPMI_CTRL0_WORD_LENGTH
|
||||
| BF_GPMI_CTRL0_CS(chip, this)
|
||||
| BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
|
||||
| BF_GPMI_CTRL0_ADDRESS(address)
|
||||
| BF_GPMI_CTRL0_XFER_COUNT(0);
|
||||
pio[1] = 0;
|
||||
desc = dmaengine_prep_slave_sg(channel,
|
||||
(struct scatterlist *)pio, 2,
|
||||
DMA_TRANS_NONE, 0);
|
||||
if (!desc)
|
||||
return -EINVAL;
|
||||
|
||||
/* [2] Enable the BCH block and read. */
|
||||
command_mode = BV_GPMI_CTRL0_COMMAND_MODE__READ;
|
||||
address = BV_GPMI_CTRL0_ADDRESS__NAND_DATA;
|
||||
ecc_command = BV_GPMI_ECCCTRL_ECC_CMD__BCH_DECODE;
|
||||
buffer_mask = BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_PAGE
|
||||
| BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_AUXONLY;
|
||||
|
||||
pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(command_mode)
|
||||
| BM_GPMI_CTRL0_WORD_LENGTH
|
||||
| BF_GPMI_CTRL0_CS(chip, this)
|
||||
| BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
|
||||
| BF_GPMI_CTRL0_ADDRESS(address)
|
||||
| BF_GPMI_CTRL0_XFER_COUNT(geo->page_size);
|
||||
|
||||
pio[1] = 0;
|
||||
pio[2] = BM_GPMI_ECCCTRL_ENABLE_ECC
|
||||
| BF_GPMI_ECCCTRL_ECC_CMD(ecc_command)
|
||||
| BF_GPMI_ECCCTRL_BUFFER_MASK(buffer_mask);
|
||||
pio[3] = geo->page_size;
|
||||
pio[4] = payload;
|
||||
pio[5] = auxiliary;
|
||||
desc = dmaengine_prep_slave_sg(channel,
|
||||
(struct scatterlist *)pio,
|
||||
ARRAY_SIZE(pio), DMA_TRANS_NONE,
|
||||
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
|
||||
if (!desc)
|
||||
return -EINVAL;
|
||||
|
||||
/* [3] Disable the BCH block */
|
||||
command_mode = BV_GPMI_CTRL0_COMMAND_MODE__WAIT_FOR_READY;
|
||||
address = BV_GPMI_CTRL0_ADDRESS__NAND_DATA;
|
||||
|
||||
pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(command_mode)
|
||||
| BM_GPMI_CTRL0_WORD_LENGTH
|
||||
| BF_GPMI_CTRL0_CS(chip, this)
|
||||
| BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
|
||||
| BF_GPMI_CTRL0_ADDRESS(address)
|
||||
| BF_GPMI_CTRL0_XFER_COUNT(geo->page_size);
|
||||
pio[1] = 0;
|
||||
pio[2] = 0; /* clear GPMI_HW_GPMI_ECCCTRL, disable the BCH. */
|
||||
desc = dmaengine_prep_slave_sg(channel,
|
||||
(struct scatterlist *)pio, 3,
|
||||
DMA_TRANS_NONE,
|
||||
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
|
||||
if (!desc)
|
||||
return -EINVAL;
|
||||
|
||||
/* [4] submit the DMA */
|
||||
return start_dma_with_bch_irq(this, desc);
|
||||
}
|
||||
|
||||
/**
|
||||
* gpmi_copy_bits - copy bits from one memory region to another
|
||||
* @dst: destination buffer
|
||||
* @dst_bit_off: bit offset we're starting to write at
|
||||
* @src: source buffer
|
||||
* @src_bit_off: bit offset we're starting to read from
|
||||
* @nbits: number of bits to copy
|
||||
*
|
||||
* This functions copies bits from one memory region to another, and is used by
|
||||
* the GPMI driver to copy ECC sections which are not guaranteed to be byte
|
||||
* aligned.
|
||||
*
|
||||
* src and dst should not overlap.
|
||||
*
|
||||
*/
|
||||
void gpmi_copy_bits(u8 *dst, size_t dst_bit_off,
|
||||
const u8 *src, size_t src_bit_off,
|
||||
size_t nbits)
|
||||
{
|
||||
size_t i;
|
||||
size_t nbytes;
|
||||
u32 src_buffer = 0;
|
||||
size_t bits_in_src_buffer = 0;
|
||||
|
||||
if (!nbits)
|
||||
return;
|
||||
|
||||
/*
|
||||
* Move src and dst pointers to the closest byte pointer and store bit
|
||||
* offsets within a byte.
|
||||
*/
|
||||
src += src_bit_off / 8;
|
||||
src_bit_off %= 8;
|
||||
|
||||
dst += dst_bit_off / 8;
|
||||
dst_bit_off %= 8;
|
||||
|
||||
/*
|
||||
* Initialize the src_buffer value with bits available in the first
|
||||
* byte of data so that we end up with a byte aligned src pointer.
|
||||
*/
|
||||
if (src_bit_off) {
|
||||
src_buffer = src[0] >> src_bit_off;
|
||||
if (nbits >= (8 - src_bit_off)) {
|
||||
bits_in_src_buffer += 8 - src_bit_off;
|
||||
} else {
|
||||
src_buffer &= GENMASK(nbits - 1, 0);
|
||||
bits_in_src_buffer += nbits;
|
||||
}
|
||||
nbits -= bits_in_src_buffer;
|
||||
src++;
|
||||
}
|
||||
|
||||
/* Calculate the number of bytes that can be copied from src to dst. */
|
||||
nbytes = nbits / 8;
|
||||
|
||||
/* Try to align dst to a byte boundary. */
|
||||
if (dst_bit_off) {
|
||||
if (bits_in_src_buffer < (8 - dst_bit_off) && nbytes) {
|
||||
src_buffer |= src[0] << bits_in_src_buffer;
|
||||
bits_in_src_buffer += 8;
|
||||
src++;
|
||||
nbytes--;
|
||||
}
|
||||
|
||||
if (bits_in_src_buffer >= (8 - dst_bit_off)) {
|
||||
dst[0] &= GENMASK(dst_bit_off - 1, 0);
|
||||
dst[0] |= src_buffer << dst_bit_off;
|
||||
src_buffer >>= (8 - dst_bit_off);
|
||||
bits_in_src_buffer -= (8 - dst_bit_off);
|
||||
dst_bit_off = 0;
|
||||
dst++;
|
||||
if (bits_in_src_buffer > 7) {
|
||||
bits_in_src_buffer -= 8;
|
||||
dst[0] = src_buffer;
|
||||
dst++;
|
||||
src_buffer >>= 8;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!bits_in_src_buffer && !dst_bit_off) {
|
||||
/*
|
||||
* Both src and dst pointers are byte aligned, thus we can
|
||||
* just use the optimized memcpy function.
|
||||
*/
|
||||
if (nbytes)
|
||||
memcpy(dst, src, nbytes);
|
||||
} else {
|
||||
/*
|
||||
* src buffer is not byte aligned, hence we have to copy each
|
||||
* src byte to the src_buffer variable before extracting a byte
|
||||
* to store in dst.
|
||||
*/
|
||||
for (i = 0; i < nbytes; i++) {
|
||||
src_buffer |= src[i] << bits_in_src_buffer;
|
||||
dst[i] = src_buffer;
|
||||
src_buffer >>= 8;
|
||||
}
|
||||
}
|
||||
/* Update dst and src pointers */
|
||||
dst += nbytes;
|
||||
src += nbytes;
|
||||
|
||||
/*
|
||||
* nbits is the number of remaining bits. It should not exceed 8 as
|
||||
* we've already copied as much bytes as possible.
|
||||
*/
|
||||
nbits %= 8;
|
||||
|
||||
/*
|
||||
* If there's no more bits to copy to the destination and src buffer
|
||||
* was already byte aligned, then we're done.
|
||||
*/
|
||||
if (!nbits && !bits_in_src_buffer)
|
||||
return;
|
||||
|
||||
/* Copy the remaining bits to src_buffer */
|
||||
if (nbits)
|
||||
src_buffer |= (*src & GENMASK(nbits - 1, 0)) <<
|
||||
bits_in_src_buffer;
|
||||
bits_in_src_buffer += nbits;
|
||||
|
||||
/*
|
||||
* In case there were not enough bits to get a byte aligned dst buffer
|
||||
* prepare the src_buffer variable to match the dst organization (shift
|
||||
* src_buffer by dst_bit_off and retrieve the least significant bits
|
||||
* from dst).
|
||||
*/
|
||||
if (dst_bit_off)
|
||||
src_buffer = (src_buffer << dst_bit_off) |
|
||||
(*dst & GENMASK(dst_bit_off - 1, 0));
|
||||
bits_in_src_buffer += dst_bit_off;
|
||||
|
||||
/*
|
||||
* Keep most significant bits from dst if we end up with an unaligned
|
||||
* number of bits.
|
||||
*/
|
||||
nbytes = bits_in_src_buffer / 8;
|
||||
if (bits_in_src_buffer % 8) {
|
||||
src_buffer |= (dst[nbytes] &
|
||||
GENMASK(7, bits_in_src_buffer % 8)) <<
|
||||
(nbytes * 8);
|
||||
nbytes++;
|
||||
}
|
||||
|
||||
/* Copy the remaining bytes to dst */
|
||||
for (i = 0; i < nbytes; i++) {
|
||||
dst[i] = src_buffer;
|
||||
src_buffer >>= 8;
|
||||
}
|
||||
}
|
File diff suppressed because it is too large
Load Diff
@@ -103,6 +103,14 @@ struct gpmi_nfc_hardware_timing {
|
||||
u32 ctrl1n;
|
||||
};
|
||||
|
||||
#define GPMI_MAX_TRANSFERS 8
|
||||
|
||||
struct gpmi_transfer {
|
||||
u8 cmdbuf[8];
|
||||
struct scatterlist sgl;
|
||||
enum dma_data_direction direction;
|
||||
};
|
||||
|
||||
struct gpmi_nand_data {
|
||||
/* Devdata */
|
||||
const struct gpmi_devdata *devdata;
|
||||
@@ -126,25 +134,18 @@ struct gpmi_nand_data {
|
||||
struct boot_rom_geometry rom_geometry;
|
||||
|
||||
/* MTD / NAND */
|
||||
struct nand_controller base;
|
||||
struct nand_chip nand;
|
||||
|
||||
/* General-use Variables */
|
||||
int current_chip;
|
||||
unsigned int command_length;
|
||||
struct gpmi_transfer transfers[GPMI_MAX_TRANSFERS];
|
||||
int ntransfers;
|
||||
|
||||
struct scatterlist cmd_sgl;
|
||||
char *cmd_buffer;
|
||||
bool bch;
|
||||
uint32_t bch_flashlayout0;
|
||||
uint32_t bch_flashlayout1;
|
||||
|
||||
struct scatterlist data_sgl;
|
||||
char *data_buffer_dma;
|
||||
|
||||
void *page_buffer_virt;
|
||||
dma_addr_t page_buffer_phys;
|
||||
unsigned int page_buffer_size;
|
||||
|
||||
void *payload_virt;
|
||||
dma_addr_t payload_phys;
|
||||
|
||||
void *auxiliary_virt;
|
||||
dma_addr_t auxiliary_phys;
|
||||
|
||||
@@ -154,45 +155,8 @@ struct gpmi_nand_data {
|
||||
#define DMA_CHANS 8
|
||||
struct dma_chan *dma_chans[DMA_CHANS];
|
||||
struct completion dma_done;
|
||||
|
||||
/* private */
|
||||
void *private;
|
||||
};
|
||||
|
||||
/* Common Services */
|
||||
int common_nfc_set_geometry(struct gpmi_nand_data *);
|
||||
struct dma_chan *get_dma_chan(struct gpmi_nand_data *);
|
||||
bool prepare_data_dma(struct gpmi_nand_data *, const void *buf, int len,
|
||||
enum dma_data_direction dr);
|
||||
int start_dma_without_bch_irq(struct gpmi_nand_data *,
|
||||
struct dma_async_tx_descriptor *);
|
||||
int start_dma_with_bch_irq(struct gpmi_nand_data *,
|
||||
struct dma_async_tx_descriptor *);
|
||||
|
||||
/* GPMI-NAND helper function library */
|
||||
int gpmi_init(struct gpmi_nand_data *);
|
||||
void gpmi_clear_bch(struct gpmi_nand_data *);
|
||||
void gpmi_dump_info(struct gpmi_nand_data *);
|
||||
int bch_set_geometry(struct gpmi_nand_data *);
|
||||
int gpmi_is_ready(struct gpmi_nand_data *, unsigned chip);
|
||||
int gpmi_send_command(struct gpmi_nand_data *);
|
||||
int gpmi_enable_clk(struct gpmi_nand_data *this);
|
||||
int gpmi_disable_clk(struct gpmi_nand_data *this);
|
||||
int gpmi_setup_data_interface(struct nand_chip *chip, int chipnr,
|
||||
const struct nand_data_interface *conf);
|
||||
void gpmi_nfc_apply_timings(struct gpmi_nand_data *this);
|
||||
int gpmi_read_data(struct gpmi_nand_data *, void *buf, int len);
|
||||
int gpmi_send_data(struct gpmi_nand_data *, const void *buf, int len);
|
||||
|
||||
int gpmi_send_page(struct gpmi_nand_data *,
|
||||
dma_addr_t payload, dma_addr_t auxiliary);
|
||||
int gpmi_read_page(struct gpmi_nand_data *,
|
||||
dma_addr_t payload, dma_addr_t auxiliary);
|
||||
|
||||
void gpmi_copy_bits(u8 *dst, size_t dst_bit_off,
|
||||
const u8 *src, size_t src_bit_off,
|
||||
size_t nbits);
|
||||
|
||||
/* BCH : Status Block Completion Codes */
|
||||
#define STATUS_GOOD 0x00
|
||||
#define STATUS_ERASED 0xff
|
||||
|
@@ -1,4 +1,4 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
// SPDX-License-Identifier: GPL-2.0 OR MIT
|
||||
/*
|
||||
* MTK ECC controller driver.
|
||||
* Copyright (C) 2016 MediaTek Inc.
|
||||
@@ -596,4 +596,4 @@ module_platform_driver(mtk_ecc_driver);
|
||||
|
||||
MODULE_AUTHOR("Xiaolei Li <xiaolei.li@mediatek.com>");
|
||||
MODULE_DESCRIPTION("MTK Nand ECC Driver");
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_LICENSE("Dual MIT/GPL");
|
||||
|
@@ -1,4 +1,4 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/* SPDX-License-Identifier: GPL-2.0 OR MIT */
|
||||
/*
|
||||
* MTK SDG1 ECC controller
|
||||
*
|
||||
|
@@ -1,4 +1,4 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
// SPDX-License-Identifier: GPL-2.0 OR MIT
|
||||
/*
|
||||
* MTK NAND Flash controller driver.
|
||||
* Copyright (C) 2016 MediaTek Inc.
|
||||
@@ -79,6 +79,10 @@
|
||||
#define NFI_FDMM(x) (0xA4 + (x) * sizeof(u32) * 2)
|
||||
#define NFI_FDM_MAX_SIZE (8)
|
||||
#define NFI_FDM_MIN_SIZE (1)
|
||||
#define NFI_DEBUG_CON1 (0x220)
|
||||
#define STROBE_MASK GENMASK(4, 3)
|
||||
#define STROBE_SHIFT (3)
|
||||
#define MAX_STROBE_DLY (3)
|
||||
#define NFI_MASTER_STA (0x224)
|
||||
#define MASTER_STA_MASK (0x0FFF)
|
||||
#define NFI_EMPTY_THRESH (0x23C)
|
||||
@@ -150,6 +154,8 @@ struct mtk_nfc {
|
||||
struct list_head chips;
|
||||
|
||||
u8 *buffer;
|
||||
|
||||
unsigned long assigned_cs;
|
||||
};
|
||||
|
||||
/*
|
||||
@@ -500,7 +506,8 @@ static int mtk_nfc_setup_data_interface(struct nand_chip *chip, int csline,
|
||||
{
|
||||
struct mtk_nfc *nfc = nand_get_controller_data(chip);
|
||||
const struct nand_sdr_timings *timings;
|
||||
u32 rate, tpoecs, tprecs, tc2r, tw2r, twh, twst, trlt;
|
||||
u32 rate, tpoecs, tprecs, tc2r, tw2r, twh, twst = 0, trlt = 0;
|
||||
u32 temp, tsel = 0;
|
||||
|
||||
timings = nand_get_sdr_timings(conf);
|
||||
if (IS_ERR(timings))
|
||||
@@ -536,14 +543,53 @@ static int mtk_nfc_setup_data_interface(struct nand_chip *chip, int csline,
|
||||
twh = DIV_ROUND_UP(twh * rate, 1000000) - 1;
|
||||
twh &= 0xf;
|
||||
|
||||
twst = timings->tWP_min / 1000;
|
||||
/* Calculate real WE#/RE# hold time in nanosecond */
|
||||
temp = (twh + 1) * 1000000 / rate;
|
||||
/* nanosecond to picosecond */
|
||||
temp *= 1000;
|
||||
|
||||
/*
|
||||
* WE# low level time should be expaned to meet WE# pulse time
|
||||
* and WE# cycle time at the same time.
|
||||
*/
|
||||
if (temp < timings->tWC_min)
|
||||
twst = timings->tWC_min - temp;
|
||||
twst = max(timings->tWP_min, twst) / 1000;
|
||||
twst = DIV_ROUND_UP(twst * rate, 1000000) - 1;
|
||||
twst &= 0xf;
|
||||
|
||||
trlt = max(timings->tREA_max, timings->tRP_min) / 1000;
|
||||
/*
|
||||
* RE# low level time should be expaned to meet RE# pulse time
|
||||
* and RE# cycle time at the same time.
|
||||
*/
|
||||
if (temp < timings->tRC_min)
|
||||
trlt = timings->tRC_min - temp;
|
||||
trlt = max(trlt, timings->tRP_min) / 1000;
|
||||
trlt = DIV_ROUND_UP(trlt * rate, 1000000) - 1;
|
||||
trlt &= 0xf;
|
||||
|
||||
/* Calculate RE# pulse time in nanosecond. */
|
||||
temp = (trlt + 1) * 1000000 / rate;
|
||||
/* nanosecond to picosecond */
|
||||
temp *= 1000;
|
||||
/*
|
||||
* If RE# access time is bigger than RE# pulse time,
|
||||
* delay sampling data timing.
|
||||
*/
|
||||
if (temp < timings->tREA_max) {
|
||||
tsel = timings->tREA_max / 1000;
|
||||
tsel = DIV_ROUND_UP(tsel * rate, 1000000);
|
||||
tsel -= (trlt + 1);
|
||||
if (tsel > MAX_STROBE_DLY) {
|
||||
trlt += tsel - MAX_STROBE_DLY;
|
||||
tsel = MAX_STROBE_DLY;
|
||||
}
|
||||
}
|
||||
temp = nfi_readl(nfc, NFI_DEBUG_CON1);
|
||||
temp &= ~STROBE_MASK;
|
||||
temp |= tsel << STROBE_SHIFT;
|
||||
nfi_writel(nfc, temp, NFI_DEBUG_CON1);
|
||||
|
||||
/*
|
||||
* ACCON: access timing control register
|
||||
* -------------------------------------
|
||||
@@ -835,19 +881,21 @@ static int mtk_nfc_write_oob_std(struct nand_chip *chip, int page)
|
||||
return mtk_nfc_write_page_raw(chip, NULL, 1, page);
|
||||
}
|
||||
|
||||
static int mtk_nfc_update_ecc_stats(struct mtd_info *mtd, u8 *buf, u32 sectors)
|
||||
static int mtk_nfc_update_ecc_stats(struct mtd_info *mtd, u8 *buf, u32 start,
|
||||
u32 sectors)
|
||||
{
|
||||
struct nand_chip *chip = mtd_to_nand(mtd);
|
||||
struct mtk_nfc *nfc = nand_get_controller_data(chip);
|
||||
struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
|
||||
struct mtk_ecc_stats stats;
|
||||
u32 reg_size = mtk_nand->fdm.reg_size;
|
||||
int rc, i;
|
||||
|
||||
rc = nfi_readl(nfc, NFI_STA) & STA_EMP_PAGE;
|
||||
if (rc) {
|
||||
memset(buf, 0xff, sectors * chip->ecc.size);
|
||||
for (i = 0; i < sectors; i++)
|
||||
memset(oob_ptr(chip, i), 0xff, mtk_nand->fdm.reg_size);
|
||||
memset(oob_ptr(chip, start + i), 0xff, reg_size);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -867,7 +915,7 @@ static int mtk_nfc_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
|
||||
u32 spare = mtk_nand->spare_per_sector;
|
||||
u32 column, sectors, start, end, reg;
|
||||
dma_addr_t addr;
|
||||
int bitflips;
|
||||
int bitflips = 0;
|
||||
size_t len;
|
||||
u8 *buf;
|
||||
int rc;
|
||||
@@ -934,14 +982,11 @@ static int mtk_nfc_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
|
||||
if (rc < 0) {
|
||||
dev_err(nfc->dev, "subpage done timeout\n");
|
||||
bitflips = -EIO;
|
||||
} else {
|
||||
bitflips = 0;
|
||||
if (!raw) {
|
||||
rc = mtk_ecc_wait_done(nfc->ecc, ECC_DECODE);
|
||||
bitflips = rc < 0 ? -ETIMEDOUT :
|
||||
mtk_nfc_update_ecc_stats(mtd, buf, sectors);
|
||||
mtk_nfc_read_fdm(chip, start, sectors);
|
||||
}
|
||||
} else if (!raw) {
|
||||
rc = mtk_ecc_wait_done(nfc->ecc, ECC_DECODE);
|
||||
bitflips = rc < 0 ? -ETIMEDOUT :
|
||||
mtk_nfc_update_ecc_stats(mtd, buf, start, sectors);
|
||||
mtk_nfc_read_fdm(chip, start, sectors);
|
||||
}
|
||||
|
||||
dma_unmap_single(nfc->dev, addr, len, DMA_FROM_DEVICE);
|
||||
@@ -1315,6 +1360,17 @@ static int mtk_nfc_nand_chip_init(struct device *dev, struct mtk_nfc *nfc,
|
||||
dev_err(dev, "reg property failure : %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (tmp >= MTK_NAND_MAX_NSELS) {
|
||||
dev_err(dev, "invalid CS: %u\n", tmp);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (test_and_set_bit(tmp, &nfc->assigned_cs)) {
|
||||
dev_err(dev, "CS %u already assigned\n", tmp);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
chip->sels[i] = tmp;
|
||||
}
|
||||
|
||||
@@ -1589,6 +1645,6 @@ static struct platform_driver mtk_nfc_driver = {
|
||||
|
||||
module_platform_driver(mtk_nfc_driver);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_LICENSE("Dual MIT/GPL");
|
||||
MODULE_AUTHOR("Xiaolei Li <xiaolei.li@mediatek.com>");
|
||||
MODULE_DESCRIPTION("MTK Nand Flash Controller Driver");
|
||||
|
@@ -2111,35 +2111,7 @@ static void nand_op_parser_trace(const struct nand_op_parser_ctx *ctx)
|
||||
if (instr == &ctx->subop.instrs[0])
|
||||
prefix = " ->";
|
||||
|
||||
switch (instr->type) {
|
||||
case NAND_OP_CMD_INSTR:
|
||||
pr_debug("%sCMD [0x%02x]\n", prefix,
|
||||
instr->ctx.cmd.opcode);
|
||||
break;
|
||||
case NAND_OP_ADDR_INSTR:
|
||||
pr_debug("%sADDR [%d cyc: %*ph]\n", prefix,
|
||||
instr->ctx.addr.naddrs,
|
||||
instr->ctx.addr.naddrs < 64 ?
|
||||
instr->ctx.addr.naddrs : 64,
|
||||
instr->ctx.addr.addrs);
|
||||
break;
|
||||
case NAND_OP_DATA_IN_INSTR:
|
||||
pr_debug("%sDATA_IN [%d B%s]\n", prefix,
|
||||
instr->ctx.data.len,
|
||||
instr->ctx.data.force_8bit ?
|
||||
", force 8-bit" : "");
|
||||
break;
|
||||
case NAND_OP_DATA_OUT_INSTR:
|
||||
pr_debug("%sDATA_OUT [%d B%s]\n", prefix,
|
||||
instr->ctx.data.len,
|
||||
instr->ctx.data.force_8bit ?
|
||||
", force 8-bit" : "");
|
||||
break;
|
||||
case NAND_OP_WAITRDY_INSTR:
|
||||
pr_debug("%sWAITRDY [max %d ms]\n", prefix,
|
||||
instr->ctx.waitrdy.timeout_ms);
|
||||
break;
|
||||
}
|
||||
nand_op_trace(prefix, instr);
|
||||
|
||||
if (instr == &ctx->subop.instrs[ctx->subop.ninstrs - 1])
|
||||
prefix = " ";
|
||||
@@ -2152,6 +2124,22 @@ static void nand_op_parser_trace(const struct nand_op_parser_ctx *ctx)
|
||||
}
|
||||
#endif
|
||||
|
||||
static int nand_op_parser_cmp_ctx(const struct nand_op_parser_ctx *a,
|
||||
const struct nand_op_parser_ctx *b)
|
||||
{
|
||||
if (a->subop.ninstrs < b->subop.ninstrs)
|
||||
return -1;
|
||||
else if (a->subop.ninstrs > b->subop.ninstrs)
|
||||
return 1;
|
||||
|
||||
if (a->subop.last_instr_end_off < b->subop.last_instr_end_off)
|
||||
return -1;
|
||||
else if (a->subop.last_instr_end_off > b->subop.last_instr_end_off)
|
||||
return 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* nand_op_parser_exec_op - exec_op parser
|
||||
* @chip: the NAND chip
|
||||
@@ -2186,30 +2174,38 @@ int nand_op_parser_exec_op(struct nand_chip *chip,
|
||||
unsigned int i;
|
||||
|
||||
while (ctx.subop.instrs < op->instrs + op->ninstrs) {
|
||||
int ret;
|
||||
const struct nand_op_parser_pattern *pattern;
|
||||
struct nand_op_parser_ctx best_ctx;
|
||||
int ret, best_pattern = -1;
|
||||
|
||||
for (i = 0; i < parser->npatterns; i++) {
|
||||
const struct nand_op_parser_pattern *pattern;
|
||||
struct nand_op_parser_ctx test_ctx = ctx;
|
||||
|
||||
pattern = &parser->patterns[i];
|
||||
if (!nand_op_parser_match_pat(pattern, &ctx))
|
||||
if (!nand_op_parser_match_pat(pattern, &test_ctx))
|
||||
continue;
|
||||
|
||||
nand_op_parser_trace(&ctx);
|
||||
if (best_pattern >= 0 &&
|
||||
nand_op_parser_cmp_ctx(&test_ctx, &best_ctx) <= 0)
|
||||
continue;
|
||||
|
||||
if (check_only)
|
||||
break;
|
||||
best_pattern = i;
|
||||
best_ctx = test_ctx;
|
||||
}
|
||||
|
||||
if (best_pattern < 0) {
|
||||
pr_debug("->exec_op() parser: pattern not found!\n");
|
||||
return -ENOTSUPP;
|
||||
}
|
||||
|
||||
ctx = best_ctx;
|
||||
nand_op_parser_trace(&ctx);
|
||||
|
||||
if (!check_only) {
|
||||
pattern = &parser->patterns[best_pattern];
|
||||
ret = pattern->exec(chip, &ctx.subop);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
if (i == parser->npatterns) {
|
||||
pr_debug("->exec_op() parser: pattern not found!\n");
|
||||
return -ENOTSUPP;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@@ -170,7 +170,7 @@ struct nand_bch_control *nand_bch_init(struct mtd_info *mtd)
|
||||
goto fail;
|
||||
}
|
||||
|
||||
nbc->eccmask = kmalloc(eccbytes, GFP_KERNEL);
|
||||
nbc->eccmask = kzalloc(eccbytes, GFP_KERNEL);
|
||||
nbc->errloc = kmalloc_array(t, sizeof(*nbc->errloc), GFP_KERNEL);
|
||||
if (!nbc->eccmask || !nbc->errloc)
|
||||
goto fail;
|
||||
@@ -182,7 +182,6 @@ struct nand_bch_control *nand_bch_init(struct mtd_info *mtd)
|
||||
goto fail;
|
||||
|
||||
memset(erased_page, 0xff, eccsize);
|
||||
memset(nbc->eccmask, 0, eccbytes);
|
||||
encode_bch(nbc->bch, erased_page, eccsize, nbc->eccmask);
|
||||
kfree(erased_page);
|
||||
|
||||
|
@@ -8,6 +8,50 @@
|
||||
|
||||
#include "internals.h"
|
||||
|
||||
#define MACRONIX_READ_RETRY_BIT BIT(0)
|
||||
#define MACRONIX_NUM_READ_RETRY_MODES 6
|
||||
|
||||
struct nand_onfi_vendor_macronix {
|
||||
u8 reserved;
|
||||
u8 reliability_func;
|
||||
} __packed;
|
||||
|
||||
static int macronix_nand_setup_read_retry(struct nand_chip *chip, int mode)
|
||||
{
|
||||
u8 feature[ONFI_SUBFEATURE_PARAM_LEN];
|
||||
|
||||
if (!chip->parameters.supports_set_get_features ||
|
||||
!test_bit(ONFI_FEATURE_ADDR_READ_RETRY,
|
||||
chip->parameters.set_feature_list))
|
||||
return -ENOTSUPP;
|
||||
|
||||
feature[0] = mode;
|
||||
return nand_set_features(chip, ONFI_FEATURE_ADDR_READ_RETRY, feature);
|
||||
}
|
||||
|
||||
static void macronix_nand_onfi_init(struct nand_chip *chip)
|
||||
{
|
||||
struct nand_parameters *p = &chip->parameters;
|
||||
struct nand_onfi_vendor_macronix *mxic;
|
||||
|
||||
if (!p->onfi)
|
||||
return;
|
||||
|
||||
mxic = (struct nand_onfi_vendor_macronix *)p->onfi->vendor;
|
||||
if ((mxic->reliability_func & MACRONIX_READ_RETRY_BIT) == 0)
|
||||
return;
|
||||
|
||||
chip->read_retries = MACRONIX_NUM_READ_RETRY_MODES;
|
||||
chip->setup_read_retry = macronix_nand_setup_read_retry;
|
||||
|
||||
if (p->supports_set_get_features) {
|
||||
bitmap_set(p->set_feature_list,
|
||||
ONFI_FEATURE_ADDR_READ_RETRY, 1);
|
||||
bitmap_set(p->get_feature_list,
|
||||
ONFI_FEATURE_ADDR_READ_RETRY, 1);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Macronix AC series does not support using SET/GET_FEATURES to change
|
||||
* the timings unlike what is declared in the parameter page. Unflag
|
||||
@@ -56,6 +100,7 @@ static int macronix_nand_init(struct nand_chip *chip)
|
||||
chip->options |= NAND_BBM_FIRSTPAGE | NAND_BBM_SECONDPAGE;
|
||||
|
||||
macronix_nand_fix_broken_get_timings(chip);
|
||||
macronix_nand_onfi_init(chip);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@@ -37,6 +37,8 @@
|
||||
/* Max ECC buffer length */
|
||||
#define FMC2_MAX_ECC_BUF_LEN (FMC2_BCHDSRS_LEN * FMC2_MAX_SG)
|
||||
|
||||
#define FMC2_TIMEOUT_MS 1000
|
||||
|
||||
/* Timings */
|
||||
#define FMC2_THIZ 1
|
||||
#define FMC2_TIO 8000
|
||||
@@ -530,7 +532,8 @@ static int stm32_fmc2_ham_calculate(struct nand_chip *chip, const u8 *data,
|
||||
int ret;
|
||||
|
||||
ret = readl_relaxed_poll_timeout(fmc2->io_base + FMC2_SR,
|
||||
sr, sr & FMC2_SR_NWRF, 10, 1000);
|
||||
sr, sr & FMC2_SR_NWRF, 10,
|
||||
FMC2_TIMEOUT_MS);
|
||||
if (ret) {
|
||||
dev_err(fmc2->dev, "ham timeout\n");
|
||||
return ret;
|
||||
@@ -611,7 +614,7 @@ static int stm32_fmc2_bch_calculate(struct nand_chip *chip, const u8 *data,
|
||||
|
||||
/* Wait until the BCH code is ready */
|
||||
if (!wait_for_completion_timeout(&fmc2->complete,
|
||||
msecs_to_jiffies(1000))) {
|
||||
msecs_to_jiffies(FMC2_TIMEOUT_MS))) {
|
||||
dev_err(fmc2->dev, "bch timeout\n");
|
||||
stm32_fmc2_disable_bch_irq(fmc2);
|
||||
return -ETIMEDOUT;
|
||||
@@ -696,7 +699,7 @@ static int stm32_fmc2_bch_correct(struct nand_chip *chip, u8 *dat,
|
||||
|
||||
/* Wait until the decoding error is ready */
|
||||
if (!wait_for_completion_timeout(&fmc2->complete,
|
||||
msecs_to_jiffies(1000))) {
|
||||
msecs_to_jiffies(FMC2_TIMEOUT_MS))) {
|
||||
dev_err(fmc2->dev, "bch timeout\n");
|
||||
stm32_fmc2_disable_bch_irq(fmc2);
|
||||
return -ETIMEDOUT;
|
||||
@@ -969,7 +972,7 @@ static int stm32_fmc2_xfer(struct nand_chip *chip, const u8 *buf,
|
||||
|
||||
/* Wait end of sequencer transfer */
|
||||
if (!wait_for_completion_timeout(&fmc2->complete,
|
||||
msecs_to_jiffies(1000))) {
|
||||
msecs_to_jiffies(FMC2_TIMEOUT_MS))) {
|
||||
dev_err(fmc2->dev, "seq timeout\n");
|
||||
stm32_fmc2_disable_seq_irq(fmc2);
|
||||
dmaengine_terminate_all(dma_ch);
|
||||
@@ -981,7 +984,7 @@ static int stm32_fmc2_xfer(struct nand_chip *chip, const u8 *buf,
|
||||
|
||||
/* Wait DMA data transfer completion */
|
||||
if (!wait_for_completion_timeout(&fmc2->dma_data_complete,
|
||||
msecs_to_jiffies(100))) {
|
||||
msecs_to_jiffies(FMC2_TIMEOUT_MS))) {
|
||||
dev_err(fmc2->dev, "data DMA timeout\n");
|
||||
dmaengine_terminate_all(dma_ch);
|
||||
ret = -ETIMEDOUT;
|
||||
@@ -990,7 +993,7 @@ static int stm32_fmc2_xfer(struct nand_chip *chip, const u8 *buf,
|
||||
/* Wait DMA ECC transfer completion */
|
||||
if (!write_data && !raw) {
|
||||
if (!wait_for_completion_timeout(&fmc2->dma_ecc_complete,
|
||||
msecs_to_jiffies(100))) {
|
||||
msecs_to_jiffies(FMC2_TIMEOUT_MS))) {
|
||||
dev_err(fmc2->dev, "ECC DMA timeout\n");
|
||||
dmaengine_terminate_all(fmc2->dma_ecc_ch);
|
||||
ret = -ETIMEDOUT;
|
||||
@@ -1909,6 +1912,12 @@ static int stm32_fmc2_probe(struct platform_device *pdev)
|
||||
}
|
||||
|
||||
irq = platform_get_irq(pdev, 0);
|
||||
if (irq < 0) {
|
||||
if (irq != -EPROBE_DEFER)
|
||||
dev_err(dev, "IRQ error missing or invalid\n");
|
||||
return irq;
|
||||
}
|
||||
|
||||
ret = devm_request_irq(dev, irq, stm32_fmc2_irq, 0,
|
||||
dev_name(dev), fmc2);
|
||||
if (ret) {
|
||||
|
@@ -1,3 +1,3 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
spinand-objs := core.o gigadevice.o macronix.o micron.o toshiba.o winbond.o
|
||||
spinand-objs := core.o gigadevice.o macronix.o micron.o paragon.o toshiba.o winbond.o
|
||||
obj-$(CONFIG_MTD_SPI_NAND) += spinand.o
|
||||
|
@@ -511,12 +511,12 @@ static int spinand_mtd_read(struct mtd_info *mtd, loff_t from,
|
||||
if (ret == -EBADMSG) {
|
||||
ecc_failed = true;
|
||||
mtd->ecc_stats.failed++;
|
||||
ret = 0;
|
||||
} else {
|
||||
mtd->ecc_stats.corrected += ret;
|
||||
max_bitflips = max_t(unsigned int, max_bitflips, ret);
|
||||
}
|
||||
|
||||
ret = 0;
|
||||
ops->retlen += iter.req.datalen;
|
||||
ops->oobretlen += iter.req.ooblen;
|
||||
}
|
||||
@@ -757,6 +757,7 @@ static const struct spinand_manufacturer *spinand_manufacturers[] = {
|
||||
&gigadevice_spinand_manufacturer,
|
||||
¯onix_spinand_manufacturer,
|
||||
µn_spinand_manufacturer,
|
||||
¶gon_spinand_manufacturer,
|
||||
&toshiba_spinand_manufacturer,
|
||||
&winbond_spinand_manufacturer,
|
||||
};
|
||||
@@ -845,7 +846,7 @@ spinand_select_op_variant(struct spinand_device *spinand,
|
||||
*/
|
||||
int spinand_match_and_init(struct spinand_device *spinand,
|
||||
const struct spinand_info *table,
|
||||
unsigned int table_size, u8 devid)
|
||||
unsigned int table_size, u16 devid)
|
||||
{
|
||||
struct nand_device *nand = spinand_to_nand(spinand);
|
||||
unsigned int i;
|
||||
|
@@ -9,11 +9,17 @@
|
||||
#include <linux/mtd/spinand.h>
|
||||
|
||||
#define SPINAND_MFR_GIGADEVICE 0xC8
|
||||
|
||||
#define GD5FXGQ4XA_STATUS_ECC_1_7_BITFLIPS (1 << 4)
|
||||
#define GD5FXGQ4XA_STATUS_ECC_8_BITFLIPS (3 << 4)
|
||||
|
||||
#define GD5FXGQ4UEXXG_REG_STATUS2 0xf0
|
||||
|
||||
#define GD5FXGQ4UXFXXG_STATUS_ECC_MASK (7 << 4)
|
||||
#define GD5FXGQ4UXFXXG_STATUS_ECC_NO_BITFLIPS (0 << 4)
|
||||
#define GD5FXGQ4UXFXXG_STATUS_ECC_1_3_BITFLIPS (1 << 4)
|
||||
#define GD5FXGQ4UXFXXG_STATUS_ECC_UNCOR_ERROR (7 << 4)
|
||||
|
||||
static SPINAND_OP_VARIANTS(read_cache_variants,
|
||||
SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0),
|
||||
SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
|
||||
@@ -22,6 +28,14 @@ static SPINAND_OP_VARIANTS(read_cache_variants,
|
||||
SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
|
||||
SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
|
||||
|
||||
static SPINAND_OP_VARIANTS(read_cache_variants_f,
|
||||
SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0),
|
||||
SPINAND_PAGE_READ_FROM_CACHE_X4_OP_3A(0, 1, NULL, 0),
|
||||
SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
|
||||
SPINAND_PAGE_READ_FROM_CACHE_X2_OP_3A(0, 1, NULL, 0),
|
||||
SPINAND_PAGE_READ_FROM_CACHE_OP_3A(true, 0, 1, NULL, 0),
|
||||
SPINAND_PAGE_READ_FROM_CACHE_OP_3A(false, 0, 0, NULL, 0));
|
||||
|
||||
static SPINAND_OP_VARIANTS(write_cache_variants,
|
||||
SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
|
||||
SPINAND_PROG_LOAD(true, 0, NULL, 0));
|
||||
@@ -59,6 +73,11 @@ static int gd5fxgq4xa_ooblayout_free(struct mtd_info *mtd, int section,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct mtd_ooblayout_ops gd5fxgq4xa_ooblayout = {
|
||||
.ecc = gd5fxgq4xa_ooblayout_ecc,
|
||||
.free = gd5fxgq4xa_ooblayout_free,
|
||||
};
|
||||
|
||||
static int gd5fxgq4xa_ecc_get_status(struct spinand_device *spinand,
|
||||
u8 status)
|
||||
{
|
||||
@@ -83,7 +102,7 @@ static int gd5fxgq4xa_ecc_get_status(struct spinand_device *spinand,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static int gd5fxgq4uexxg_ooblayout_ecc(struct mtd_info *mtd, int section,
|
||||
static int gd5fxgq4_variant2_ooblayout_ecc(struct mtd_info *mtd, int section,
|
||||
struct mtd_oob_region *region)
|
||||
{
|
||||
if (section)
|
||||
@@ -95,7 +114,7 @@ static int gd5fxgq4uexxg_ooblayout_ecc(struct mtd_info *mtd, int section,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int gd5fxgq4uexxg_ooblayout_free(struct mtd_info *mtd, int section,
|
||||
static int gd5fxgq4_variant2_ooblayout_free(struct mtd_info *mtd, int section,
|
||||
struct mtd_oob_region *region)
|
||||
{
|
||||
if (section)
|
||||
@@ -108,6 +127,11 @@ static int gd5fxgq4uexxg_ooblayout_free(struct mtd_info *mtd, int section,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct mtd_ooblayout_ops gd5fxgq4_variant2_ooblayout = {
|
||||
.ecc = gd5fxgq4_variant2_ooblayout_ecc,
|
||||
.free = gd5fxgq4_variant2_ooblayout_free,
|
||||
};
|
||||
|
||||
static int gd5fxgq4uexxg_ecc_get_status(struct spinand_device *spinand,
|
||||
u8 status)
|
||||
{
|
||||
@@ -150,15 +174,25 @@ static int gd5fxgq4uexxg_ecc_get_status(struct spinand_device *spinand,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static const struct mtd_ooblayout_ops gd5fxgq4xa_ooblayout = {
|
||||
.ecc = gd5fxgq4xa_ooblayout_ecc,
|
||||
.free = gd5fxgq4xa_ooblayout_free,
|
||||
};
|
||||
static int gd5fxgq4ufxxg_ecc_get_status(struct spinand_device *spinand,
|
||||
u8 status)
|
||||
{
|
||||
switch (status & GD5FXGQ4UXFXXG_STATUS_ECC_MASK) {
|
||||
case GD5FXGQ4UXFXXG_STATUS_ECC_NO_BITFLIPS:
|
||||
return 0;
|
||||
|
||||
static const struct mtd_ooblayout_ops gd5fxgq4uexxg_ooblayout = {
|
||||
.ecc = gd5fxgq4uexxg_ooblayout_ecc,
|
||||
.free = gd5fxgq4uexxg_ooblayout_free,
|
||||
};
|
||||
case GD5FXGQ4UXFXXG_STATUS_ECC_1_3_BITFLIPS:
|
||||
return 3;
|
||||
|
||||
case GD5FXGQ4UXFXXG_STATUS_ECC_UNCOR_ERROR:
|
||||
return -EBADMSG;
|
||||
|
||||
default: /* (2 << 4) through (6 << 4) are 4-8 corrected errors */
|
||||
return ((status & GD5FXGQ4UXFXXG_STATUS_ECC_MASK) >> 4) + 2;
|
||||
}
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static const struct spinand_info gigadevice_spinand_table[] = {
|
||||
SPINAND_INFO("GD5F1GQ4xA", 0xF1,
|
||||
@@ -195,25 +229,40 @@ static const struct spinand_info gigadevice_spinand_table[] = {
|
||||
&write_cache_variants,
|
||||
&update_cache_variants),
|
||||
0,
|
||||
SPINAND_ECCINFO(&gd5fxgq4uexxg_ooblayout,
|
||||
SPINAND_ECCINFO(&gd5fxgq4_variant2_ooblayout,
|
||||
gd5fxgq4uexxg_ecc_get_status)),
|
||||
SPINAND_INFO("GD5F1GQ4UFxxG", 0xb148,
|
||||
NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
|
||||
NAND_ECCREQ(8, 512),
|
||||
SPINAND_INFO_OP_VARIANTS(&read_cache_variants_f,
|
||||
&write_cache_variants,
|
||||
&update_cache_variants),
|
||||
0,
|
||||
SPINAND_ECCINFO(&gd5fxgq4_variant2_ooblayout,
|
||||
gd5fxgq4ufxxg_ecc_get_status)),
|
||||
};
|
||||
|
||||
static int gigadevice_spinand_detect(struct spinand_device *spinand)
|
||||
{
|
||||
u8 *id = spinand->id.data;
|
||||
u16 did;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* For GD NANDs, There is an address byte needed to shift in before IDs
|
||||
* are read out, so the first byte in raw_id is dummy.
|
||||
* Earlier GDF5-series devices (A,E) return [0][MID][DID]
|
||||
* Later (F) devices return [MID][DID1][DID2]
|
||||
*/
|
||||
if (id[1] != SPINAND_MFR_GIGADEVICE)
|
||||
|
||||
if (id[0] == SPINAND_MFR_GIGADEVICE)
|
||||
did = (id[1] << 8) + id[2];
|
||||
else if (id[0] == 0 && id[1] == SPINAND_MFR_GIGADEVICE)
|
||||
did = id[2];
|
||||
else
|
||||
return 0;
|
||||
|
||||
ret = spinand_match_and_init(spinand, gigadevice_spinand_table,
|
||||
ARRAY_SIZE(gigadevice_spinand_table),
|
||||
id[2]);
|
||||
did);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
147
drivers/mtd/nand/spi/paragon.c
Normal file
147
drivers/mtd/nand/spi/paragon.c
Normal file
@@ -0,0 +1,147 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Copyright (C) 2019 Jeff Kletsky
|
||||
*
|
||||
* Author: Jeff Kletsky <git-commits@allycomm.com>
|
||||
*/
|
||||
|
||||
#include <linux/device.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/mtd/spinand.h>
|
||||
|
||||
|
||||
#define SPINAND_MFR_PARAGON 0xa1
|
||||
|
||||
|
||||
#define PN26G0XA_STATUS_ECC_BITMASK (3 << 4)
|
||||
|
||||
#define PN26G0XA_STATUS_ECC_NONE_DETECTED (0 << 4)
|
||||
#define PN26G0XA_STATUS_ECC_1_7_CORRECTED (1 << 4)
|
||||
#define PN26G0XA_STATUS_ECC_ERRORED (2 << 4)
|
||||
#define PN26G0XA_STATUS_ECC_8_CORRECTED (3 << 4)
|
||||
|
||||
|
||||
static SPINAND_OP_VARIANTS(read_cache_variants,
|
||||
SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0),
|
||||
SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
|
||||
SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
|
||||
SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
|
||||
SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
|
||||
SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
|
||||
|
||||
static SPINAND_OP_VARIANTS(write_cache_variants,
|
||||
SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
|
||||
SPINAND_PROG_LOAD(true, 0, NULL, 0));
|
||||
|
||||
static SPINAND_OP_VARIANTS(update_cache_variants,
|
||||
SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
|
||||
SPINAND_PROG_LOAD(false, 0, NULL, 0));
|
||||
|
||||
|
||||
static int pn26g0xa_ooblayout_ecc(struct mtd_info *mtd, int section,
|
||||
struct mtd_oob_region *region)
|
||||
{
|
||||
if (section > 3)
|
||||
return -ERANGE;
|
||||
|
||||
region->offset = 6 + (15 * section); /* 4 BBM + 2 user bytes */
|
||||
region->length = 13;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int pn26g0xa_ooblayout_free(struct mtd_info *mtd, int section,
|
||||
struct mtd_oob_region *region)
|
||||
{
|
||||
if (section > 4)
|
||||
return -ERANGE;
|
||||
|
||||
if (section == 4) {
|
||||
region->offset = 64;
|
||||
region->length = 64;
|
||||
} else {
|
||||
region->offset = 4 + (15 * section);
|
||||
region->length = 2;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int pn26g0xa_ecc_get_status(struct spinand_device *spinand,
|
||||
u8 status)
|
||||
{
|
||||
switch (status & PN26G0XA_STATUS_ECC_BITMASK) {
|
||||
case PN26G0XA_STATUS_ECC_NONE_DETECTED:
|
||||
return 0;
|
||||
|
||||
case PN26G0XA_STATUS_ECC_1_7_CORRECTED:
|
||||
return 7; /* Return upper limit by convention */
|
||||
|
||||
case PN26G0XA_STATUS_ECC_8_CORRECTED:
|
||||
return 8;
|
||||
|
||||
case PN26G0XA_STATUS_ECC_ERRORED:
|
||||
return -EBADMSG;
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static const struct mtd_ooblayout_ops pn26g0xa_ooblayout = {
|
||||
.ecc = pn26g0xa_ooblayout_ecc,
|
||||
.free = pn26g0xa_ooblayout_free,
|
||||
};
|
||||
|
||||
|
||||
static const struct spinand_info paragon_spinand_table[] = {
|
||||
SPINAND_INFO("PN26G01A", 0xe1,
|
||||
NAND_MEMORG(1, 2048, 128, 64, 1024, 21, 1, 1, 1),
|
||||
NAND_ECCREQ(8, 512),
|
||||
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
|
||||
&write_cache_variants,
|
||||
&update_cache_variants),
|
||||
0,
|
||||
SPINAND_ECCINFO(&pn26g0xa_ooblayout,
|
||||
pn26g0xa_ecc_get_status)),
|
||||
SPINAND_INFO("PN26G02A", 0xe2,
|
||||
NAND_MEMORG(1, 2048, 128, 64, 2048, 41, 1, 1, 1),
|
||||
NAND_ECCREQ(8, 512),
|
||||
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
|
||||
&write_cache_variants,
|
||||
&update_cache_variants),
|
||||
0,
|
||||
SPINAND_ECCINFO(&pn26g0xa_ooblayout,
|
||||
pn26g0xa_ecc_get_status)),
|
||||
};
|
||||
|
||||
static int paragon_spinand_detect(struct spinand_device *spinand)
|
||||
{
|
||||
u8 *id = spinand->id.data;
|
||||
int ret;
|
||||
|
||||
/* Read ID returns [0][MID][DID] */
|
||||
|
||||
if (id[1] != SPINAND_MFR_PARAGON)
|
||||
return 0;
|
||||
|
||||
ret = spinand_match_and_init(spinand, paragon_spinand_table,
|
||||
ARRAY_SIZE(paragon_spinand_table),
|
||||
id[2]);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
static const struct spinand_manufacturer_ops paragon_spinand_manuf_ops = {
|
||||
.detect = paragon_spinand_detect,
|
||||
};
|
||||
|
||||
const struct spinand_manufacturer paragon_spinand_manufacturer = {
|
||||
.id = SPINAND_MFR_PARAGON,
|
||||
.name = "Paragon",
|
||||
.ops = ¶gon_spinand_manuf_ops,
|
||||
};
|
Reference in New Issue
Block a user