nand: Import, fix and clean up IPQ8064 NAND support from U-boot

This patch copies U-Boot's code and headers used to implement NAND support
on IPQ 806x. It then fixes and cleans up the IPQ8064 NAND support. It deletes
a significant amount of code from the original version and adds a
depthcharge-style interface.

BUG=chromium:403432
TEST=With the following patches, NAND initializes properly and reads can
correctly take place from erased NAND.
BRANCH=none

Signed-off-by: Dan Ehrenberg <dehrenberg@chromium.org>
Change-Id: I52d01d945700aedaadabdc3f923e06cd77811e59
Reviewed-on: https://chromium-review.googlesource.com/222311
Reviewed-by: Julius Werner <jwerner@chromium.org>
diff --git a/src/drivers/storage/mtd/nand/Kconfig b/src/drivers/storage/mtd/nand/Kconfig
index 422932d..ae4083b 100644
--- a/src/drivers/storage/mtd/nand/Kconfig
+++ b/src/drivers/storage/mtd/nand/Kconfig
@@ -4,3 +4,7 @@
 	  This enables support for accessing all type of NAND flash
 	  devices. For further information see
 	  <http://www.linux-mtd.infradead.org/doc/nand.html>.
+
+config DRIVER_STORAGE_MTD_IPQ_NAND
+	bool "IPQ NAND support"
+	select DRIVER_STORAGE_MTD_NAND
diff --git a/src/drivers/storage/mtd/nand/Makefile.inc b/src/drivers/storage/mtd/nand/Makefile.inc
index f48615b..4475222 100644
--- a/src/drivers/storage/mtd/nand/Makefile.inc
+++ b/src/drivers/storage/mtd/nand/Makefile.inc
@@ -16,3 +16,4 @@
 ##
 
 depthcharge-$(CONFIG_DRIVER_STORAGE_MTD_NAND) += nand_ids.c
+depthcharge-$(CONFIG_DRIVER_STORAGE_MTD_IPQ_NAND) += ipq_nand.c
diff --git a/src/drivers/storage/mtd/nand/ipq_nand.c b/src/drivers/storage/mtd/nand/ipq_nand.c
new file mode 100644
index 0000000..b4e9128
--- /dev/null
+++ b/src/drivers/storage/mtd/nand/ipq_nand.c
@@ -0,0 +1,1662 @@
+/*
+ * Copyright (c) 2012 - 2013 The Linux Foundation. All rights reserved.
+ */
+
+#include "nand.h"
+#include "ipq_nand.h"
+#include "ipq_nand_private.h"
+#include "base/container_of.h"
+
+#include <strings.h>
+
+/*
+ * MTD, NAND and the IPQ806x NAND controller uses various terms to
+ * refer to the various different types of locations in a
+ * codeword/page. The nomenclature used for variables, is explained
+ * below.
+ *
+ * cw/codeword - used to refer to a chunk of bytes that will be read
+ * or written at a time by the controller. The exact size depends on
+ * the ECC mode. For 4-bit, it is 528 bytes. For 8-bit, it is 532
+ * bytes.
+ *
+ * main - used to refer to locations that are covered by the ECC
+ * engine, for ECC calculation. Appears before the ECC bytes in a
+ * codeword.
+ *
+ * spare - used to refer to locations that are not covered by the ECC
+ * engine, for ECC calculation. Appears after the ECC bytes in a
+ * codeword.
+ *
+ * oob - used to refer to locations where filesystem metainfo will be
+ * stored, this is inline with the MTD convention.
+ *
+ * data - used to refer to locations where file's contents will be
+ * stored, this is inline with the MTD convention.
+ */
+
+enum ecc_mode {
+	ECC_REQ_4BIT = 4,
+	ECC_REQ_8BIT = 8
+};
+
+struct ipq_cw_layout {
+	unsigned int data_offs;
+	unsigned int data_size;
+	unsigned int oob_offs;
+	unsigned int oob_size;
+};
+
+/**
+ * struct ipq_config - IPQ806x specific device config. info
+ * @page_size:		page size, used for matching
+ * @ecc_mode:		ECC mode, used for matching
+ * @main_per_cw:        no. of bytes in the codeword that will be ECCed
+ * @spare_per_cw:	no. of bytes in the codeword that will NOT be ECCed
+ * @ecc_per_cw:	        no. of ECC bytes that will be generated
+ * @bb_byte:            offset of the bad block marker within the codeword
+ * @bb_in_spare:	is the bad block marker in spare area?
+ * @cw_per_page:        the no. of codewords in a page
+ * @ecc_page_layout:    the mapping of data and oob buf in AUTO mode
+ * @raw_page_layout:    the mapping of data and oob buf in RAW mode
+ */
+struct ipq_config {
+	unsigned int page_size;
+	enum ecc_mode ecc_mode;
+
+	unsigned int main_per_cw;
+	unsigned int spare_per_cw;
+	unsigned int ecc_per_cw;
+	unsigned int bb_byte;
+	unsigned int bb_in_spare;
+
+	unsigned int cw_per_page;
+	struct ipq_cw_layout *ecc_page_layout;
+	struct ipq_cw_layout *raw_page_layout;
+};
+
+/**
+ * struct ipq_nand_dev - driver state information
+ * @main_per_cw:        no. of bytes in the codeword that will be ECCed
+ * @spare_per_cw:	no. of bytes in the codeword that will NOT be ECCed
+ * @cw_per_page:        the no. of codewords in a page
+ * @ecc_page_layout:    the mapping of data and oob buf in AUTO mode
+ * @raw_page_layout:    the mapping of data and oob buf in RAW mode
+ * @curr_page_layout:   currently selected page layout ECC or raw
+ * @dev_cfg0:           the value for DEVn_CFG0 register
+ * @dev_cfg1:           the value for DEVn_CFG1 register
+ * @dev_ecc_cfg:        the value for DEVn_ECC_CFG register
+ * @dev_cfg0_raw:       the value for DEVn_CFG0 register, in raw mode
+ * @dev_cfg1_raw:       the value for DEVn_CFG1 register, in raw mode
+ * @buffers:            pointer to dynamically allocated buffers
+ * @pad_dat:            the pad buffer for in-band data
+ * @pad_oob:            the pad buffer for out-of-band data
+ * @zero_page:          the zero page written for marking bad blocks
+ * @zero_oob:           the zero OOB written for marking bad blocks
+ * @read_cmd:           the controller cmd to do a read
+ * @write_cmd:          the controller cmd to do a write
+ * @oob_per_page:       the no. of OOB bytes per page, depends on OOB mode
+ * @page_shift		the log base 2 of the page size
+ * @phys_erase_shift	the log base 2 of the erase block size
+ * @nand_onfi_params	ONFI parameters read from the flash chip
+ */
+struct ipq_nand_dev {
+	struct ebi2nd_regs *regs;
+
+	unsigned int main_per_cw;
+	unsigned int spare_per_cw;
+
+	unsigned int cw_per_page;
+	struct ipq_cw_layout *ecc_page_layout;
+	struct ipq_cw_layout *raw_page_layout;
+	struct ipq_cw_layout *curr_page_layout;
+
+	uint32_t dev_cfg0;
+	uint32_t dev_cfg1;
+	uint32_t dev_ecc_cfg;
+
+	uint32_t dev_cfg0_raw;
+	uint32_t dev_cfg1_raw;
+
+	unsigned char *buffers;
+	unsigned char *pad_dat;
+	unsigned char *pad_oob;
+	unsigned char *zero_page;
+	unsigned char *zero_oob;
+
+	uint32_t read_cmd;
+	uint32_t write_cmd;
+	unsigned int oob_per_page;
+
+	/* Fields from nand_chip */
+	unsigned page_shift;
+	unsigned phys_erase_shift;
+	struct nand_onfi_params onfi_params;
+};
+
+#define MTD_IPQ_NAND_DEV(mtd) ((struct ipq_nand_dev *)((mtd)->priv))
+#define MTD_ONFI_PARAMS(mtd) (&(MTD_IPQ_NAND_DEV(mtd)->onfi_params))
+
+static struct ipq_cw_layout ipq_linux_page_layout_4ecc_2k[] = {
+	{ 0, 516,   0,  0 },
+	{ 0, 516,   0,  0 },
+	{ 0, 516,   0,  0 },
+	{ 0, 500, 500, 16 },
+};
+
+static struct ipq_cw_layout ipq_raw_page_layout_4ecc_2k[] = {
+	{ 0, 528,   0,  0 },
+	{ 0, 528,   0,  0 },
+	{ 0, 528,   0,  0 },
+	{ 0, 464, 464, 64 },
+};
+
+static struct ipq_config ipq_linux_config_4ecc_2k = {
+	.page_size = 2048,
+	.ecc_mode = ECC_REQ_4BIT,
+
+	.main_per_cw = 516,
+	.ecc_per_cw = 10,
+	.spare_per_cw = 1,
+	.bb_in_spare = 0,
+	.bb_byte = 465,
+
+	.cw_per_page = 4,
+	.ecc_page_layout = ipq_linux_page_layout_4ecc_2k,
+	.raw_page_layout = ipq_raw_page_layout_4ecc_2k
+};
+
+static struct ipq_cw_layout ipq_linux_page_layout_8ecc_2k[] = {
+	{ 0, 516,   0,  0 },
+	{ 0, 516,   0,  0 },
+	{ 0, 516,   0,  0 },
+	{ 0, 500, 500, 16 },
+};
+
+static struct ipq_cw_layout ipq_raw_page_layout_8ecc_2k[] = {
+	{ 0, 532,   0,  0 },
+	{ 0, 532,   0,  0 },
+	{ 0, 532,   0,  0 },
+	{ 0, 452, 452, 80 },
+};
+
+static struct ipq_config ipq_linux_config_8ecc_2k = {
+	.page_size = 2048,
+	.ecc_mode = ECC_REQ_8BIT,
+
+	.main_per_cw = 516,
+	.ecc_per_cw = 13,
+	.spare_per_cw = 2,
+	.bb_in_spare = 0,
+	.bb_byte = 453,
+
+	.cw_per_page = 4,
+	.ecc_page_layout = ipq_linux_page_layout_8ecc_2k,
+	.raw_page_layout = ipq_raw_page_layout_8ecc_2k
+};
+
+static struct ipq_cw_layout ipq_linux_page_layout_4ecc_4k[] = {
+	{ 0, 516,   0,  0 },
+	{ 0, 516,   0,  0 },
+	{ 0, 516,   0,  0 },
+	{ 0, 516,   0,  0 },
+	{ 0, 516,   0,  0 },
+	{ 0, 516,   0,  0 },
+	{ 0, 516,   0,  0 },
+	{ 0, 484, 484, 32 },
+};
+
+static struct ipq_cw_layout ipq_raw_page_layout_4ecc_4k[] = {
+	{ 0, 528,   0,   0 },
+	{ 0, 528,   0,   0 },
+	{ 0, 528,   0,   0 },
+	{ 0, 528,   0,   0 },
+	{ 0, 528,   0,   0 },
+	{ 0, 528,   0,   0 },
+	{ 0, 528,   0,   0 },
+	{ 0, 400, 400, 128 },
+};
+
+static struct ipq_config ipq_linux_config_4ecc_4k = {
+	.page_size = 4096,
+	.ecc_mode = ECC_REQ_4BIT,
+
+	.main_per_cw = 516,
+	.ecc_per_cw = 10,
+	.spare_per_cw = 1,
+	.bb_in_spare = 0,
+	.bb_byte = 401,
+
+	.cw_per_page = 8,
+	.ecc_page_layout = ipq_linux_page_layout_4ecc_4k,
+	.raw_page_layout = ipq_raw_page_layout_4ecc_4k
+};
+
+static struct ipq_cw_layout ipq_linux_page_layout_8ecc_4k[] = {
+	{ 0, 516,   0,  0 },
+	{ 0, 516,   0,  0 },
+	{ 0, 516,   0,  0 },
+	{ 0, 516,   0,  0 },
+	{ 0, 516,   0,  0 },
+	{ 0, 516,   0,  0 },
+	{ 0, 516,   0,  0 },
+	{ 0, 484, 484, 32 },
+};
+
+static struct ipq_cw_layout ipq_raw_page_layout_8ecc_4k[] = {
+	{ 0, 532,   0,   0 },
+	{ 0, 532,   0,   0 },
+	{ 0, 532,   0,   0 },
+	{ 0, 532,   0,   0 },
+	{ 0, 532,   0,   0 },
+	{ 0, 532,   0,   0 },
+	{ 0, 532,   0,   0 },
+	{ 0, 372, 372, 160 },
+};
+
+static struct ipq_config ipq_linux_config_8ecc_4k = {
+	.page_size = 4096,
+	.ecc_mode = ECC_REQ_8BIT,
+
+	.main_per_cw = 516,
+	.ecc_per_cw = 13,
+	.spare_per_cw = 2,
+	.bb_in_spare = 0,
+	.bb_byte = 373,
+
+	.cw_per_page = 8,
+	.ecc_page_layout = ipq_linux_page_layout_8ecc_4k,
+	.raw_page_layout = ipq_raw_page_layout_8ecc_4k
+};
+
+#define IPQ_CONFIGS_MAX 4
+
+/*
+ * List of supported configs. The code expects this list to be sorted
+ * on ECC requirement size. So 4-bit first, 8-bit next and so on.
+ * This is for the Linux layout; SBL layout is not supported.
+ */
+static struct ipq_config *ipq_configs[IPQ_CONFIGS_MAX] = {
+	&ipq_linux_config_4ecc_2k,
+	&ipq_linux_config_8ecc_2k,
+	&ipq_linux_config_4ecc_4k,
+	&ipq_linux_config_8ecc_4k,
+};
+
+/*
+ * Convenient macros for the flash_cmd register, commands.
+ */
+#define PAGE_CMD                (LAST_PAGE(1) | PAGE_ACC(1))
+
+#define IPQ_CMD_ABORT           (OP_CMD_ABORT_TRANSACTION)
+#define IPQ_CMD_PAGE_READ       (OP_CMD_PAGE_READ | PAGE_CMD)
+#define IPQ_CMD_PAGE_READ_ECC   (OP_CMD_PAGE_READ_WITH_ECC | PAGE_CMD)
+#define IPQ_CMD_PAGE_READ_ALL   (OP_CMD_PAGE_READ_WITH_ECC_SPARE | PAGE_CMD)
+#define IPQ_CMD_PAGE_PROG       (OP_CMD_PROGRAM_PAGE | PAGE_CMD)
+#define IPQ_CMD_PAGE_PROG_ECC   (OP_CMD_PROGRAM_PAGE_WITH_ECC | PAGE_CMD)
+#define IPQ_CMD_PAGE_PROG_ALL   (OP_CMD_PROGRAM_PAGE_WITH_SPARE | PAGE_CMD)
+#define IPQ_CMD_BLOCK_ERASE     (OP_CMD_BLOCK_ERASE | PAGE_CMD)
+#define IPQ_CMD_FETCH_ID        (OP_CMD_FETCH_ID)
+#define IPQ_CMD_CHECK_STATUS    (OP_CMD_CHECK_STATUS)
+#define IPQ_CMD_RESET_DEVICE    (OP_CMD_RESET_NAND_FLASH_DEVICE)
+
+/*
+ * Extract row bytes from a page no.
+ */
+#define PAGENO_ROW0(pgno)       ((pgno) & 0xFF)
+#define PAGENO_ROW1(pgno)       (((pgno) >> 8) & 0xFF)
+#define PAGENO_ROW2(pgno)       (((pgno) >> 16) & 0xFF)
+
+/*
+ * ADDR0 and ADDR1 register field macros, for generating address
+ * cycles during page read and write accesses.
+ */
+#define ADDR_CYC_ROW0(row0)     ((row0) << 16)
+#define ADDR_CYC_ROW1(row1)     ((row1) << 24)
+#define ADDR_CYC_ROW2(row2)     ((row2) << 0)
+
+#define NAND_READY_TIMEOUT      100000 /* 1 SEC */
+
+#define IPQ_DEBUG 0
+#define ipq_debug(...)	do { if (IPQ_DEBUG) printf(__VA_ARGS__); } while (0)
+
+/*
+ * The flash buffer does not like byte accesses. A plain memcpy might
+ * perform byte access, which can clobber the data to the
+ * controller. Hence we implement our custom versions to write to and
+ * read from the flash buffer.
+ */
+
+/*
+ * Copy from memory to flash buffer.
+ */
+static void mem2hwcpy(void *dest, const void *src, size_t n)
+{
+	size_t i;
+	uint32_t *src32 = (uint32_t *)src;
+	uint32_t *dest32 = (uint32_t *)dest;
+	size_t words = n / sizeof(uint32_t);
+
+	/* If the above line rounds down, then this function will
+	 * unexpectedly not copy all memory. However, this driver
+	 * only uses sizes that divide word size. */
+	assert(words * sizeof(uint32_t) == n);
+
+	for (i = 0; i < words; i++)
+		writel(src32[i], &dest32[i]);
+}
+
+/*
+ * Copy from flash buffer to memory.
+ */
+static void hw2memcpy(void *dest, const void *src, size_t n)
+{
+	size_t i;
+	uint32_t *src32 = (uint32_t *)src;
+	uint32_t *dest32 = (uint32_t *)dest;
+	size_t words = n / sizeof(uint32_t);
+
+	/* If the above line rounds down, then this function will
+	 * unexpectedly not copy all memory. However, this driver
+	 * only uses sizes that divide word size. */
+	assert(words * sizeof(uint32_t) == n);
+
+	for (i = 0; i < words; i++)
+		dest32[i] = readl(&src32[i]);
+}
+
+/*
+ * Set the no. of codewords to read/write in the codeword counter.
+ */
+static void ipq_init_cw_count(MtdDev *mtd, unsigned int count)
+{
+	struct ipq_nand_dev *dev = MTD_IPQ_NAND_DEV(mtd);
+	struct ebi2nd_regs *regs = dev->regs;
+
+	clrsetbits_le32(&regs->dev0_cfg0, CW_PER_PAGE_MASK, CW_PER_PAGE(count));
+}
+
+/*
+ * Set the row values for the address cycles, generated during the
+ * read and write transactions.
+ */
+static void ipq_init_rw_pageno(MtdDev *mtd, int pageno)
+{
+	unsigned char row0;
+	unsigned char row1;
+	unsigned char row2;
+	struct ipq_nand_dev *dev = MTD_IPQ_NAND_DEV(mtd);
+	struct ebi2nd_regs *regs = dev->regs;
+
+	row0 = PAGENO_ROW0(pageno);
+	row1 = PAGENO_ROW1(pageno);
+	row2 = PAGENO_ROW2(pageno);
+
+	writel(ADDR_CYC_ROW0(row0) | ADDR_CYC_ROW1(row1), &regs->addr0);
+	writel(ADDR_CYC_ROW2(row2), &regs->addr1);
+}
+
+/*
+ * Initialize the erased page detector function, in the
+ * controller. This is done to prevent ECC error detection and
+ * correction for erased pages, where the ECC bytes does not match
+ * with the page data.
+ */
+static void ipq_init_erased_page_detector(MtdDev *mtd)
+{
+	uint32_t reset;
+	uint32_t enable;
+	struct ipq_nand_dev *dev = MTD_IPQ_NAND_DEV(mtd);
+	struct ebi2nd_regs *regs = dev->regs;
+
+	reset = ERASED_CW_ECC_MASK(1) | AUTO_DETECT_RES(1);
+	enable = ERASED_CW_ECC_MASK(1) | AUTO_DETECT_RES(0);
+
+	writel(reset, &regs->erased_cw_detect_cfg);
+	writel(enable, &regs->erased_cw_detect_cfg);
+}
+
+/*
+ * Configure the controller, and internal driver state for non-ECC
+ * mode operation.
+ */
+static void ipq_enter_raw_mode(MtdDev *mtd)
+{
+	struct ipq_nand_dev *dev = MTD_IPQ_NAND_DEV(mtd);
+	struct ebi2nd_regs *regs = dev->regs;
+
+	writel(dev->dev_cfg0_raw, &regs->dev0_cfg0);
+	writel(dev->dev_cfg1_raw, &regs->dev0_cfg1);
+
+	dev->read_cmd = IPQ_CMD_PAGE_READ;
+	dev->write_cmd = IPQ_CMD_PAGE_PROG;
+	dev->curr_page_layout = dev->raw_page_layout;
+	dev->oob_per_page = mtd->oobsize;
+}
+
+/*
+ * Configure the controller, and internal driver state for ECC mode
+ * operation.
+ */
+static void ipq_exit_raw_mode(MtdDev *mtd)
+{
+	struct ipq_nand_dev *dev = MTD_IPQ_NAND_DEV(mtd);
+	struct ebi2nd_regs *regs = dev->regs;
+
+	writel(dev->dev_cfg0, &regs->dev0_cfg0);
+	writel(dev->dev_cfg1, &regs->dev0_cfg1);
+	writel(dev->dev_ecc_cfg, &regs->dev0_ecc_cfg);
+
+	dev->read_cmd = IPQ_CMD_PAGE_READ_ALL;
+	dev->write_cmd = IPQ_CMD_PAGE_PROG_ALL;
+	dev->curr_page_layout = dev->ecc_page_layout;
+	dev->oob_per_page = mtd->oobavail;
+}
+
+/*
+ * Wait for the controller/flash to complete operation.
+ */
+static int ipq_wait_ready(MtdDev *mtd, uint32_t *status)
+{
+	int count = 0;
+	uint32_t op_status;
+	struct ipq_nand_dev *dev = MTD_IPQ_NAND_DEV(mtd);
+	struct ebi2nd_regs *regs = dev->regs;
+
+	while (count < NAND_READY_TIMEOUT) {
+		*status = readl(&regs->flash_status);
+		op_status = *status & OPER_STATUS_MASK;
+		if (op_status == OPER_STATUS_IDLE_STATE)
+			break;
+
+		udelay(10);
+		count++;
+	}
+
+	writel(READY_BSY_N_EXTERNAL_FLASH_IS_READY, &regs->flash_status);
+
+	if (count >= NAND_READY_TIMEOUT)
+		return -ETIMEDOUT;
+
+	return 0;
+}
+
+/*
+ * Execute and wait for a command to complete.
+ */
+static int ipq_exec_cmd(MtdDev *mtd, uint32_t cmd, uint32_t *status)
+{
+	int ret;
+	struct ipq_nand_dev *dev = MTD_IPQ_NAND_DEV(mtd);
+	struct ebi2nd_regs *regs = dev->regs;
+
+	writel(cmd, &regs->flash_cmd);
+	writel(EXEC_CMD(1), &regs->exec_cmd);
+
+	ret = ipq_wait_ready(mtd, status);
+	if (ret < 0)
+		return ret;
+
+	return 0;
+}
+
+/*
+ * Check if error flags related to read operation have been set in the
+ * status register.
+ */
+static int ipq_check_read_status(MtdDev *mtd, uint32_t status)
+{
+	uint32_t cw_erased;
+	uint32_t num_errors;
+	struct ipq_nand_dev *dev = MTD_IPQ_NAND_DEV(mtd);
+	struct ebi2nd_regs *regs = dev->regs;
+
+	ipq_debug("Read Status: %08x\n", status);
+
+	cw_erased = readl(&regs->erased_cw_detect_status);
+	cw_erased &= CODEWORD_ERASED_MASK;
+
+	num_errors = readl(&regs->buffer_status);
+	num_errors &= NUM_ERRORS_MASK;
+
+	if (status & MPU_ERROR_MASK)
+		return -EPERM;
+
+	if ((status & OP_ERR_MASK) && !cw_erased) {
+		mtd->ecc_stats.failed++;
+		return -EBADMSG;
+	}
+
+	if (num_errors)
+		mtd->ecc_stats.corrected++;
+
+	return 0;
+}
+
+/*
+ * Read a codeword into the data and oob buffers, at offsets specified
+ * by the codeword layout.
+ */
+static int ipq_read_cw(MtdDev *mtd, unsigned int cwno,
+		       struct mtd_oob_ops *ops)
+{
+	int ret;
+	uint32_t status;
+	struct ipq_nand_dev *dev = MTD_IPQ_NAND_DEV(mtd);
+	struct ebi2nd_regs *regs = dev->regs;
+	struct ipq_cw_layout *cwl = &dev->curr_page_layout[cwno];
+
+	ret = ipq_exec_cmd(mtd, dev->read_cmd, &status);
+	if (ret < 0)
+		return ret;
+
+	ret = ipq_check_read_status(mtd, status);
+	if (ret < 0)
+		return ret;
+
+	if (ops->datbuf != NULL) {
+		hw2memcpy(ops->datbuf, &regs->buffn_acc[cwl->data_offs >> 2],
+			  cwl->data_size);
+
+		ops->retlen += cwl->data_size;
+		ops->datbuf += cwl->data_size;
+	}
+
+	if (ops->oobbuf != NULL) {
+		hw2memcpy(ops->oobbuf, &regs->buffn_acc[cwl->oob_offs >> 2],
+			  cwl->oob_size);
+
+		ops->oobretlen += cwl->oob_size;
+		ops->oobbuf += cwl->oob_size;
+	}
+
+	return 0;
+}
+
+/*
+ * Read and discard codewords, to bring the codeword counter back to
+ * zero.
+ */
+static void ipq_reset_cw_counter(MtdDev *mtd, unsigned int start_cw)
+{
+	unsigned int i;
+	uint32_t status;
+	struct ipq_nand_dev *dev = MTD_IPQ_NAND_DEV(mtd);
+
+	for (i = start_cw; i < dev->cw_per_page; i++)
+		ipq_exec_cmd(mtd, dev->read_cmd, &status);
+}
+
+/*
+ * Read a page worth of data and oob.
+ */
+static int ipq_read_page(MtdDev *mtd, int pageno,
+			 struct mtd_oob_ops *ops)
+{
+	unsigned int i;
+	struct ipq_nand_dev *dev = MTD_IPQ_NAND_DEV(mtd);
+	int ret = 0;
+
+	ipq_init_cw_count(mtd, dev->cw_per_page - 1);
+	ipq_init_rw_pageno(mtd, pageno);
+	ipq_init_erased_page_detector(mtd);
+
+	for (i = 0; i < dev->cw_per_page; i++) {
+		ret = ipq_read_cw(mtd, i, ops);
+		if (ret < 0) {
+			ipq_reset_cw_counter(mtd, i + 1);
+			return ret;
+		}
+	}
+
+	return ret;
+}
+
+/*
+ * Estimate the no. of pages to read, based on the data length and oob
+ * length.
+ */
+static int ipq_get_read_page_count(MtdDev *mtd,
+				   struct mtd_oob_ops *ops)
+{
+	struct ipq_nand_dev *dev = MTD_IPQ_NAND_DEV(mtd);
+
+	if (ops->datbuf != NULL) {
+		return (ops->len + mtd->writesize - 1) >> dev->page_shift;
+	} else {
+		if (dev->oob_per_page == 0)
+			return 0;
+
+		return (ops->ooblen + dev->oob_per_page - 1)
+			/ dev->oob_per_page;
+	}
+}
+
+/*
+ * Return the buffer where the next OOB data should be stored. If the
+ * user buffer is insufficient to hold one page worth of OOB data,
+ * return an internal buffer, to hold the data temporarily.
+ */
+static uint8_t *ipq_nand_read_oobbuf(MtdDev *mtd,
+				     struct mtd_oob_ops *ops)
+{
+	size_t read_ooblen;
+	struct ipq_nand_dev *dev = MTD_IPQ_NAND_DEV(mtd);
+
+	if (ops->oobbuf == NULL)
+		return NULL;
+
+	read_ooblen = ops->ooblen - ops->oobretlen;
+	if (read_ooblen < dev->oob_per_page)
+		return dev->pad_oob;
+
+	return ops->oobbuf + ops->oobretlen;
+}
+
+/*
+ * Return the buffer where the next in-band data should be stored. If
+ * the user buffer is insufficient to hold one page worth of in-band
+ * data, return an internal buffer, to hold the data temporarily.
+ */
+static uint8_t *ipq_nand_read_datbuf(MtdDev *mtd,
+				     struct mtd_oob_ops *ops)
+{
+	size_t read_datlen;
+	struct ipq_nand_dev *dev = MTD_IPQ_NAND_DEV(mtd);
+
+	if (ops->datbuf == NULL)
+		return NULL;
+
+	read_datlen = ops->len - ops->retlen;
+	if (read_datlen < mtd->writesize)
+		return dev->pad_dat;
+
+	return ops->datbuf + ops->retlen;
+}
+
+/*
+ * Copy the OOB data from the internal buffer, to the user buffer, if
+ * the internal buffer was used for the read.
+ */
+static void ipq_nand_read_oobcopy(MtdDev *mtd, struct mtd_oob_ops *ops)
+{
+	size_t ooblen;
+	size_t read_ooblen;
+	struct ipq_nand_dev *dev = MTD_IPQ_NAND_DEV(mtd);
+
+	if (ops->oobbuf == NULL)
+		return;
+
+	read_ooblen = ops->ooblen - ops->oobretlen;
+	ooblen = MIN(read_ooblen, dev->oob_per_page);
+
+	if (read_ooblen < dev->oob_per_page)
+		memcpy(ops->oobbuf + ops->oobretlen, dev->pad_oob, ooblen);
+
+	ops->oobretlen += ooblen;
+}
+
+/*
+ * Copy the in-band data from the internal buffer, to the user buffer,
+ * if the internal buffer was used for the read.
+ */
+static void ipq_nand_read_datcopy(MtdDev *mtd, struct mtd_oob_ops *ops)
+{
+	size_t datlen;
+	size_t read_datlen;
+	struct ipq_nand_dev *dev = MTD_IPQ_NAND_DEV(mtd);
+
+	if (ops->datbuf == NULL)
+		return;
+
+	read_datlen = ops->len - ops->retlen;
+	datlen = MIN(read_datlen, mtd->writesize);
+
+	if (read_datlen < mtd->writesize)
+		memcpy(ops->datbuf + ops->retlen, dev->pad_dat, datlen);
+
+	ops->retlen += datlen;
+}
+
+static int ipq_nand_read_oob(MtdDev *mtd, uint64_t from,
+			     struct mtd_oob_ops *ops)
+{
+	int start;
+	int pages;
+	int i;
+	uint32_t corrected;
+	struct ipq_nand_dev *dev = MTD_IPQ_NAND_DEV(mtd);
+	int ret = 0;
+
+	/* We don't support MTD_OOB_PLACE as of yet. */
+	if (ops->mode == MTD_OOB_PLACE)
+		return -ENOSYS;
+
+	/* Check for reads past end of device */
+	if (ops->datbuf && (from + ops->len) > mtd->size)
+		return -EINVAL;
+
+	if (from & (mtd->writesize - 1))
+		return -EINVAL;
+
+	if (ops->ooboffs != 0)
+		return -EINVAL;
+
+	if (ops->mode == MTD_OOB_RAW)
+		ipq_enter_raw_mode(mtd);
+
+	start = from >> dev->page_shift;
+	pages = ipq_get_read_page_count(mtd, ops);
+
+	ipq_debug("Start of page: %d\n", start);
+	ipq_debug("No of pages to read: %d\n", pages);
+
+	corrected = mtd->ecc_stats.corrected;
+
+	for (i = start; i < (start + pages); i++) {
+		struct mtd_oob_ops page_ops;
+
+		page_ops.mode = ops->mode;
+		page_ops.len = mtd->writesize;
+		page_ops.ooblen = dev->oob_per_page;
+		page_ops.datbuf = ipq_nand_read_datbuf(mtd, ops);
+		page_ops.oobbuf = ipq_nand_read_oobbuf(mtd, ops);
+		page_ops.retlen = 0;
+		page_ops.oobretlen = 0;
+
+		ret = ipq_read_page(mtd, i, &page_ops);
+		if (ret < 0)
+			goto done;
+
+		ipq_nand_read_datcopy(mtd, ops);
+		ipq_nand_read_oobcopy(mtd, ops);
+	}
+
+	if (mtd->ecc_stats.corrected != corrected)
+		ret = -EUCLEAN;
+
+done:
+	ipq_exit_raw_mode(mtd);
+	return ret;
+}
+
+static int ipq_nand_read(MtdDev *mtd, uint64_t from, size_t len,
+			 size_t *retlen, unsigned char *buf)
+{
+	int ret;
+	struct mtd_oob_ops ops;
+
+	ops.mode = MTD_OOB_AUTO;
+	ops.len = len;
+	ops.retlen = 0;
+	ops.ooblen = 0;
+	ops.oobretlen = 0;
+	ops.ooboffs = 0;
+	ops.datbuf = (uint8_t *)buf;
+	ops.oobbuf = NULL;
+
+	ret = ipq_nand_read_oob(mtd, from, &ops);
+	*retlen = ops.retlen;
+
+	return ret;
+}
+
+/*
+ * Check if error flags related to write/erase operation have been set
+ * in the status register.
+ */
+static int ipq_check_write_erase_status(uint32_t status)
+{
+	ipq_debug("Write Status: %08x\n", status);
+
+	if (status & MPU_ERROR_MASK)
+		return -EPERM;
+
+	else if (status & OP_ERR_MASK)
+		return -EIO;
+
+	else if (!(status & PROG_ERASE_OP_RESULT_MASK))
+		return -EIO;
+
+	else
+		return 0;
+}
+
+/*
+ * Write a codeword with the specified data and oob.
+ */
+static int ipq_write_cw(MtdDev *mtd, unsigned int cwno,
+			struct mtd_oob_ops *ops)
+{
+	int ret;
+	uint32_t status;
+	struct ipq_nand_dev *dev = MTD_IPQ_NAND_DEV(mtd);
+	struct ebi2nd_regs *regs = dev->regs;
+	struct ipq_cw_layout *cwl = &(dev->curr_page_layout[cwno]);
+
+	mem2hwcpy(&regs->buffn_acc[cwl->data_offs >> 2],
+		  ops->datbuf, cwl->data_size);
+
+	mem2hwcpy(&regs->buffn_acc[cwl->oob_offs >> 2],
+		  ops->oobbuf, cwl->oob_size);
+
+	ret = ipq_exec_cmd(mtd, dev->write_cmd, &status);
+	if (ret < 0)
+		return ret;
+
+	ret = ipq_check_write_erase_status(status);
+	if (ret < 0)
+		return ret;
+
+	ops->retlen += cwl->data_size;
+	ops->datbuf += cwl->data_size;
+
+	if (ops->oobbuf != NULL) {
+		ops->oobretlen += cwl->oob_size;
+		ops->oobbuf += cwl->oob_size;
+	}
+
+	return 0;
+}
+
+/*
+ * Write a page worth of data and oob.
+ */
+static int ipq_write_page(MtdDev *mtd, int pageno,
+			  struct mtd_oob_ops *ops)
+{
+	unsigned int i;
+	int ret;
+	struct ipq_nand_dev *dev = MTD_IPQ_NAND_DEV(mtd);
+
+	ipq_init_cw_count(mtd, dev->cw_per_page - 1);
+	ipq_init_rw_pageno(mtd, pageno);
+
+	for (i = 0; i < dev->cw_per_page; i++) {
+		ret = ipq_write_cw(mtd, i, ops);
+		if (ret < 0) {
+			ipq_reset_cw_counter(mtd, i + 1);
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+/*
+ * Return the buffer containing the in-band data to be written.
+ */
+static uint8_t *ipq_nand_write_datbuf(MtdDev *mtd,
+				      struct mtd_oob_ops *ops)
+{
+	return ops->datbuf + ops->retlen;
+}
+
+/*
+ * Return the buffer containing the OOB data to be written. If user
+ * buffer does not provide on page worth of OOB data, return a padded
+ * internal buffer with the OOB data copied in.
+ */
+static uint8_t *ipq_nand_write_oobbuf(MtdDev *mtd,
+				      struct mtd_oob_ops *ops)
+{
+	size_t write_ooblen;
+	struct ipq_nand_dev *dev = MTD_IPQ_NAND_DEV(mtd);
+
+	if (ops->oobbuf == NULL)
+		return dev->pad_oob;
+
+	write_ooblen = ops->ooblen - ops->oobretlen;
+
+	if (write_ooblen < dev->oob_per_page) {
+		memset(dev->pad_oob, dev->oob_per_page, 0xFF);
+		memcpy(dev->pad_oob, ops->oobbuf + ops->oobretlen,
+		       write_ooblen);
+		return dev->pad_oob;
+	}
+
+	return ops->oobbuf + ops->oobretlen;
+}
+
+/*
+ * Increment the counters to indicate the no. of in-band bytes
+ * written.
+ */
+static void ipq_nand_write_datinc(MtdDev *mtd, struct mtd_oob_ops *ops)
+{
+	ops->retlen += mtd->writesize;
+}
+
+/*
+ * Increment the counters to indicate the no. of OOB bytes written.
+ */
+static void ipq_nand_write_oobinc(MtdDev *mtd, struct mtd_oob_ops *ops)
+{
+	size_t write_ooblen;
+	size_t ooblen;
+	struct ipq_nand_dev *dev = MTD_IPQ_NAND_DEV(mtd);
+
+	if (ops->oobbuf == NULL)
+		return;
+
+	write_ooblen = ops->ooblen - ops->oobretlen;
+	ooblen = MIN(write_ooblen, dev->oob_per_page);
+
+	ops->oobretlen += ooblen;
+}
+
+static int ipq_nand_write_oob(MtdDev *mtd, uint64_t to,
+			      struct mtd_oob_ops *ops)
+{
+	int i;
+	int start;
+	int pages;
+	struct ipq_nand_dev *dev = MTD_IPQ_NAND_DEV(mtd);
+	int ret = 0;
+
+	/* We don't support MTD_OOB_PLACE as of yet. */
+	if (ops->mode == MTD_OOB_PLACE)
+		return -ENOSYS;
+
+	/* Check for writes past end of device. */
+	if ((to + ops->len) > mtd->size)
+		return -EINVAL;
+
+	if (to & (mtd->writesize - 1))
+		return -EINVAL;
+
+	if (ops->len & (mtd->writesize - 1))
+		return -EINVAL;
+
+	if (ops->ooboffs != 0)
+		return -EINVAL;
+
+	if (ops->datbuf == NULL)
+		return -EINVAL;
+
+	if (ops->mode == MTD_OOB_RAW)
+		ipq_enter_raw_mode(mtd);
+
+	start = to >> dev->page_shift;
+	pages = ops->len >> dev->page_shift;
+	ops->retlen = 0;
+	ops->oobretlen = 0;
+
+	for (i = start; i < (start + pages); i++) {
+		struct mtd_oob_ops page_ops;
+
+		page_ops.mode = ops->mode;
+		page_ops.len = mtd->writesize;
+		page_ops.ooblen = dev->oob_per_page;
+		page_ops.datbuf = ipq_nand_write_datbuf(mtd, ops);
+		page_ops.oobbuf = ipq_nand_write_oobbuf(mtd, ops);
+		page_ops.retlen = 0;
+		page_ops.oobretlen = 0;
+
+		ret = ipq_write_page(mtd, i, &page_ops);
+		if (ret < 0)
+			goto done;
+
+		ipq_nand_write_datinc(mtd, ops);
+		ipq_nand_write_oobinc(mtd, ops);
+	}
+
+done:
+	ipq_exit_raw_mode(mtd);
+	return ret;
+}
+
+static int ipq_nand_write(MtdDev *mtd, uint64_t to, size_t len,
+			  size_t *retlen, const unsigned char *buf)
+{
+	int ret;
+	struct mtd_oob_ops ops;
+
+	ops.mode = MTD_OOB_AUTO;
+	ops.len = len;
+	ops.retlen = 0;
+	ops.ooblen = 0;
+	ops.oobretlen = 0;
+	ops.ooboffs = 0;
+	ops.datbuf = (uint8_t *)buf;
+	ops.oobbuf = NULL;
+
+	ret = ipq_nand_write_oob(mtd, to, &ops);
+	*retlen = ops.retlen;
+
+	return ret;
+}
+
+static int ipq_nand_block_isbad(MtdDev *mtd, uint64_t offs)
+{
+	int ret;
+	uint8_t oobbuf;
+	struct mtd_oob_ops ops;
+
+	/* Check for invalid offset */
+	if (offs > mtd->size)
+		return -EINVAL;
+
+	if (offs & (mtd->erasesize - 1))
+		return -EINVAL;
+
+	ops.mode = MTD_OOB_RAW;
+	ops.len = 0;
+	ops.retlen = 0;
+	ops.ooblen = 1;
+	ops.oobretlen = 0;
+	ops.ooboffs = 0;
+	ops.datbuf = NULL;
+	ops.oobbuf = &oobbuf;
+
+	ret = ipq_nand_read_oob(mtd, offs, &ops);
+	if (ret < 0)
+		return ret;
+
+	return oobbuf != 0xFF;
+}
+
+static int ipq_nand_block_markbad(MtdDev *mtd, uint64_t offs)
+{
+	int ret;
+	struct mtd_oob_ops ops;
+	struct ipq_nand_dev *dev = MTD_IPQ_NAND_DEV(mtd);
+
+	/* Check for invalid offset */
+	if (offs > mtd->size)
+		return -EINVAL;
+
+	if (offs & (mtd->erasesize - 1))
+		return -EINVAL;
+
+	ops.mode = MTD_OOB_RAW;
+	ops.len = mtd->writesize;
+	ops.retlen = 0;
+	ops.ooblen = mtd->oobsize;
+	ops.oobretlen = 0;
+	ops.ooboffs = 0;
+	ops.datbuf = dev->zero_page;
+	ops.oobbuf = dev->zero_oob;
+
+	ret = ipq_nand_write_oob(mtd, offs, &ops);
+
+	if (!ret)
+		mtd->ecc_stats.badblocks++;
+
+	return ret;
+}
+
+/*
+ * Erase the specified block.
+ */
+static int ipq_nand_erase_block(MtdDev *mtd, int blockno)
+{
+	uint64_t offs;
+	int pageno;
+	uint32_t status;
+	struct ipq_nand_dev *dev = MTD_IPQ_NAND_DEV(mtd);
+	struct ebi2nd_regs *regs = dev->regs;
+	int ret = 0;
+
+	ipq_init_cw_count(mtd, 0);
+
+	offs = blockno << dev->phys_erase_shift;
+	pageno = offs >> dev->page_shift;
+	writel(pageno, &regs->addr0);
+
+	ret = ipq_exec_cmd(mtd, IPQ_CMD_BLOCK_ERASE, &status);
+	if (ret < 0)
+		return ret;
+
+	return ipq_check_write_erase_status(status);
+}
+
+static int ipq_nand_erase(MtdDev *mtd, struct erase_info *instr)
+{
+	int i;
+	int blocks;
+	int start;
+	uint64_t offs;
+	int ret = 0;
+	struct ipq_nand_dev *dev = MTD_IPQ_NAND_DEV(mtd);
+
+	/* Check for erase past end of device. */
+	if ((instr->addr + instr->len) > mtd->size)
+		return -EINVAL;
+
+	if (instr->addr & (mtd->erasesize - 1))
+		return -EINVAL;
+
+	if (instr->len & (mtd->erasesize - 1))
+		return -EINVAL;
+
+	start = instr->addr >> dev->phys_erase_shift;
+	blocks = instr->len >> dev->phys_erase_shift;
+	ipq_debug("number of blks to erase: %d\n", blocks);
+
+	for (i = start; i < (start + blocks); i++) {
+		offs = i << dev->phys_erase_shift;
+
+		if (!instr->scrub && ipq_nand_block_isbad(mtd, offs)) {
+			printf("ipq_nand: attempt to erase a bad block");
+			return -EIO;
+		}
+
+		ret = ipq_nand_erase_block(mtd, i);
+		if (ret < 0) {
+			instr->fail_addr = offs;
+			break;
+		}
+	}
+
+	return ret;
+}
+
+#define NAND_ID_MAN(id) ((id) & 0xFF)
+#define NAND_ID_DEV(id) (((id) >> 8) & 0xFF)
+#define NAND_ID_CFG(id) (((id) >> 24) & 0xFF)
+
+#define NAND_CFG_PAGE_SIZE(id)   (((id) >> 0) & 0x3)
+#define NAND_CFG_SPARE_SIZE(id)  (((id) >> 2) & 0x3)
+#define NAND_CFG_BLOCK_SIZE(id)  (((id) >> 4) & 0x3)
+
+#define CHUNK_SIZE        512
+
+/* ONFI Signature */
+#define ONFI_SIG          0x49464E4F
+#define ONFI_READ_ID_ADDR 0x20
+
+static int ipq_nand_onfi_probe(MtdDev *mtd, uint32_t *onfi_id)
+{
+	int ret;
+	uint32_t status;
+	struct ipq_nand_dev *dev = MTD_IPQ_NAND_DEV(mtd);
+	struct ebi2nd_regs *regs = dev->regs;
+
+	writel(ONFI_READ_ID_ADDR, &regs->addr0);
+	ret = ipq_exec_cmd(mtd, IPQ_CMD_FETCH_ID, &status);
+	if (ret < 0)
+		return ret;
+
+	*onfi_id = readl(&regs->flash_read_id);
+
+	return 0;
+}
+
+int ipq_nand_get_info_onfi(MtdDev *mtd)
+{
+	uint32_t status;
+	int ret;
+	uint32_t dev_cmd_vld_orig;
+	struct ipq_nand_dev *dev = MTD_IPQ_NAND_DEV(mtd);
+	struct ebi2nd_regs *regs = dev->regs;
+	struct nand_onfi_params *p = MTD_ONFI_PARAMS(mtd);
+
+	ipq_enter_raw_mode(mtd);
+
+	writel(0, &regs->addr0);
+	writel(0, &regs->addr1);
+
+	dev_cmd_vld_orig = readl(&regs->dev_cmd_vld);
+	clrsetbits_le32(&regs->dev_cmd_vld, READ_START_VLD_MASK,
+			READ_START_VLD(0));
+
+	clrsetbits_le32(&regs->dev_cmd1, READ_ADDR_MASK,
+			READ_ADDR(NAND_CMD_PARAM));
+
+	clrsetbits_le32(&regs->dev0_cfg0, NUM_ADDR_CYCLES_MASK,
+			NUM_ADDR_CYCLES(1));
+
+	ipq_init_cw_count(mtd, 0);
+
+	ret = ipq_exec_cmd(mtd, IPQ_CMD_PAGE_READ_ALL, &status);
+	if (ret < 0)
+		goto err_exit;
+
+	ret = ipq_check_read_status(mtd, status);
+	if (ret < 0)
+		goto err_exit;
+
+	hw2memcpy(p, &regs->buffn_acc[0], sizeof(struct nand_onfi_params));
+
+	/* Should use le*_to_cpu functions here, but we are running on a
+	 * little-endian ARM so they can be omitted */
+	mtd->writesize = p->byte_per_page;
+	mtd->erasesize = p->pages_per_block * mtd->writesize;
+	mtd->oobsize = p->spare_bytes_per_page;
+	mtd->size = (uint64_t)p->blocks_per_lun * (mtd->erasesize);
+
+err_exit:
+	/* Restoring the page read command in read command register */
+	clrsetbits_le32(&regs->dev_cmd1, READ_ADDR_MASK,
+			READ_ADDR(NAND_CMD_READ0));
+
+	writel(dev_cmd_vld_orig, &regs->dev_cmd_vld);
+
+	return 0;
+}
+
+/*
+ * Read the ID from the flash device.
+ */
+static int ipq_nand_probe(MtdDev *mtd, uint32_t *id)
+{
+	int ret;
+	uint32_t status;
+	struct ipq_nand_dev *dev = MTD_IPQ_NAND_DEV(mtd);
+	struct ebi2nd_regs *regs = dev->regs;
+
+	writel(0, &regs->addr0);
+
+	ret = ipq_exec_cmd(mtd, IPQ_CMD_FETCH_ID, &status);
+	if (ret < 0)
+		return ret;
+
+	*id = readl(&regs->flash_read_id);
+
+	return 0;
+}
+
+/*
+ * Retreive the flash info entry using the device ID.
+ */
+static const struct nand_flash_dev *flash_get_dev(uint8_t dev_id)
+{
+	int i;
+
+	for (i = 0; nand_flash_ids[i].id; i++) {
+		if (nand_flash_ids[i].id == dev_id)
+			return &nand_flash_ids[i];
+	}
+
+	return NULL;
+}
+
+
+/*
+ * Populate flash parameters from the configuration byte.
+ */
+static void nand_get_info_cfg(MtdDev *mtd, uint8_t cfg_id)
+{
+	unsigned int cfg_page_size;
+	unsigned int cfg_block_size;
+	unsigned int cfg_spare_size;
+	unsigned int chunks;
+	unsigned int spare_per_chunk;
+
+	/* writesize = 1KB * (2 ^ cfg_page_size) */
+	cfg_page_size = NAND_CFG_PAGE_SIZE(cfg_id);
+	mtd->writesize = 1024 << cfg_page_size;
+
+	/* erasesize = 64KB * (2 ^ cfg_block_size) */
+	cfg_block_size = NAND_CFG_BLOCK_SIZE(cfg_id);
+	mtd->erasesize = (64 * 1024) << cfg_block_size;
+
+	/* Spare per 512B = 8 * (2 ^ cfg_spare_size) */
+	cfg_spare_size = NAND_CFG_SPARE_SIZE(cfg_id);
+	chunks = mtd->writesize / CHUNK_SIZE;
+	spare_per_chunk = 8 << cfg_spare_size;
+	mtd->oobsize = spare_per_chunk * chunks;
+
+	if ((mtd->oobsize > 64) && (mtd->writesize == 2048)) {
+		printf(
+		       "ipq_nand: Found a 2K page device with %d oobsize - changing oobsize to 64 bytes.\n",
+		       mtd->oobsize);
+		mtd->oobsize = 64;
+	}
+}
+
+/*
+ * Populate flash parameters for non-ONFI devices.
+ */
+static int nand_get_info(MtdDev *mtd, uint32_t flash_id)
+{
+	uint8_t man_id;
+	uint8_t dev_id;
+	uint8_t cfg_id;
+	const struct nand_flash_dev *flash_dev;
+
+	man_id = NAND_ID_MAN(flash_id);
+	dev_id = NAND_ID_DEV(flash_id);
+	cfg_id = NAND_ID_CFG(flash_id);
+
+	printf("Manufacturer ID: %x\n", man_id);
+	printf("Device ID: %x\n", dev_id);
+	printf("Config. Byte: %x\n", cfg_id);
+
+	flash_dev = flash_get_dev(dev_id);
+
+	if (flash_dev == NULL) {
+		printf(
+		       "ipq_nand: unknown NAND device: %x device: %x\n",
+		       man_id, dev_id);
+		return -ENOENT;
+	}
+
+	mtd->size = (uint64_t)flash_dev->chipsize * MiB;
+	/*
+	 * Older flash IDs have been removed from nand_flash_ids[],
+	 * so we can always get the information we need from cfg_id.
+	 */
+	nand_get_info_cfg(mtd, cfg_id);
+
+	return 0;
+}
+
+/*
+ * Read the device ID, and populate the MTD callbacks and the device
+ * parameters.
+ */
+int ipq_nand_scan(MtdDev *mtd)
+{
+	int ret;
+	uint32_t nand_id1 = 0;
+	uint32_t nand_id2 = 0;
+	uint32_t onfi_sig = 0;
+	struct ipq_nand_dev *dev = MTD_IPQ_NAND_DEV(mtd);
+
+	ret = ipq_nand_onfi_probe(mtd, &onfi_sig);
+	if (ret < 0)
+		return ret;
+
+	if (onfi_sig == ONFI_SIG) {
+		ret = ipq_nand_get_info_onfi(mtd);
+		if (ret < 0)
+			return ret;
+
+	} else {
+		ret = ipq_nand_probe(mtd, &nand_id1);
+		if (ret < 0)
+			return ret;
+
+		ret = ipq_nand_probe(mtd, &nand_id2);
+		if (ret < 0)
+			return ret;
+
+		if (nand_id1 != nand_id2) {
+			/*
+			 * Bus-hold or other interface concerns can cause
+			 * random data to appear. If the two results do not
+			 * match, we are reading garbage.
+			 */
+			return -ENODEV;
+		}
+
+		ret = nand_get_info(mtd, nand_id1);
+		if (ret < 0)
+			return ret;
+	}
+
+	mtd->erase = ipq_nand_erase;
+	mtd->read = ipq_nand_read;
+	mtd->write = ipq_nand_write;
+	mtd->read_oob = ipq_nand_read_oob;
+	mtd->write_oob = ipq_nand_write_oob;
+	mtd->block_isbad = ipq_nand_block_isbad;
+	mtd->block_markbad = ipq_nand_block_markbad;
+
+	dev->page_shift = ffs(mtd->writesize) - 1;
+	dev->phys_erase_shift = ffs(mtd->erasesize) - 1;
+
+	/* One of the NAND layer functions that the command layer
+	 * tries to access directly.
+	 */
+
+	return 0;
+}
+
+/*
+ * Configure the hardware for the selected NAND device configuration.
+ */
+static void ipq_nand_hw_config(MtdDev *mtd, struct ipq_config *cfg)
+{
+	unsigned int i;
+	unsigned int enable_bch;
+	unsigned int raw_cw_size;
+	uint32_t dev_cmd_vld;
+	struct ipq_nand_dev *dev = MTD_IPQ_NAND_DEV(mtd);
+	struct ebi2nd_regs *regs = dev->regs;
+
+	dev->main_per_cw = cfg->main_per_cw;
+	dev->spare_per_cw = cfg->spare_per_cw;
+	dev->cw_per_page = cfg->cw_per_page;
+	dev->ecc_page_layout = cfg->ecc_page_layout;
+	dev->raw_page_layout = cfg->raw_page_layout;
+	raw_cw_size =
+		cfg->main_per_cw + cfg->spare_per_cw + cfg->ecc_per_cw + 1;
+
+	mtd->oobavail = 0;
+	for (i = 0; i < dev->cw_per_page; i++) {
+		struct ipq_cw_layout *cw_layout = &dev->ecc_page_layout[i];
+		mtd->oobavail += cw_layout->oob_size;
+	}
+
+	/*
+	 * We use Reed-Solom ECC engine for 4-bit ECC. And BCH ECC
+	 * engine for 8-bit ECC. Thats the way SBL and Linux kernel
+	 * does it.
+	 */
+	enable_bch = 0;
+	if (cfg->ecc_mode == ECC_REQ_8BIT)
+		enable_bch = 1;
+
+	dev->dev_cfg0 = (BUSY_TIMEOUT_ERROR_SELECT_2_SEC
+			 | DISABLE_STATUS_AFTER_WRITE(0)
+			 | MSB_CW_PER_PAGE(0)
+			 | CW_PER_PAGE(cfg->cw_per_page - 1)
+			 | UD_SIZE_BYTES(cfg->main_per_cw)
+			 | RS_ECC_PARITY_SIZE_BYTES(cfg->ecc_per_cw)
+			 | SPARE_SIZE_BYTES(cfg->spare_per_cw)
+			 | NUM_ADDR_CYCLES(5));
+
+	dev->dev_cfg0_raw = (BUSY_TIMEOUT_ERROR_SELECT_2_SEC
+			     | DISABLE_STATUS_AFTER_WRITE(0)
+			     | MSB_CW_PER_PAGE(0)
+			     | CW_PER_PAGE(cfg->cw_per_page - 1)
+			     | UD_SIZE_BYTES(raw_cw_size)
+			     | RS_ECC_PARITY_SIZE_BYTES(0)
+			     | SPARE_SIZE_BYTES(0)
+			     | NUM_ADDR_CYCLES(5));
+
+	dev->dev_cfg1 = (ECC_DISABLE(0)
+			 | WIDE_FLASH_8_BIT_DATA_BUS
+			 | NAND_RECOVERY_CYCLES(3)
+			 | CS_ACTIVE_BSY_ASSERT_CS_DURING_BUSY
+			 | BAD_BLOCK_BYTE_NUM(cfg->bb_byte)
+			 | BAD_BLOCK_IN_SPARE_AREA(cfg->bb_in_spare)
+			 | WR_RD_BSY_GAP_6_CLOCK_CYCLES_GAP
+			 | ECC_ENCODER_CGC_EN(0)
+			 | ECC_DECODER_CGC_EN(0)
+			 | DISABLE_ECC_RESET_AFTER_OPDONE(0)
+			 | ENABLE_BCH_ECC(enable_bch)
+			 | RS_ECC_MODE(0));
+
+	dev->dev_cfg1_raw = (ECC_DISABLE(1)
+			     | WIDE_FLASH_8_BIT_DATA_BUS
+			     | NAND_RECOVERY_CYCLES(3)
+			     | CS_ACTIVE_BSY_ASSERT_CS_DURING_BUSY
+			     | BAD_BLOCK_BYTE_NUM(0x11)
+			     | BAD_BLOCK_IN_SPARE_AREA(1)
+			     | WR_RD_BSY_GAP_6_CLOCK_CYCLES_GAP
+			     | ECC_ENCODER_CGC_EN(0)
+			     | ECC_DECODER_CGC_EN(0)
+			     | DISABLE_ECC_RESET_AFTER_OPDONE(0)
+			     | ENABLE_BCH_ECC(0)
+			     | RS_ECC_MODE(0));
+
+	dev->dev_ecc_cfg = (BCH_ECC_DISABLE(0)
+			    | ECC_SW_RESET(0)
+			    | BCH_ECC_MODE_8_BIT_ECC_ERROR_DETECTION_CORRECTION
+			    | BCH_ECC_PARITY_SIZE_BYTES(cfg->ecc_per_cw)
+			    | ECC_NUM_DATA_BYTES(cfg->main_per_cw)
+			    | ECC_FORCE_CLK_OPEN(1));
+
+	dev->read_cmd = IPQ_CMD_PAGE_READ_ALL;
+	dev->write_cmd = IPQ_CMD_PAGE_PROG_ALL;
+
+	dev->curr_page_layout = dev->ecc_page_layout;
+	dev->oob_per_page = mtd->oobavail;
+
+	writel(dev->dev_cfg0, &regs->dev0_cfg0);
+	writel(dev->dev_cfg1, &regs->dev0_cfg1);
+	writel(dev->dev_ecc_cfg, &regs->dev0_ecc_cfg);
+	writel(dev->main_per_cw - 1, &regs->ebi2_ecc_buf_cfg);
+
+	dev_cmd_vld = (SEQ_READ_START_VLD(1) | ERASE_START_VLD(1)
+		       | WRITE_START_VLD(1) | READ_START_VLD(1));
+	writel(dev_cmd_vld, &regs->dev_cmd_vld);
+}
+
+/*
+ * Setup the hardware and the driver state. Called after the scan and
+ * is passed in the results of the scan.
+ */
+int ipq_nand_post_scan_init(MtdDev *mtd)
+{
+	unsigned int i;
+	size_t alloc_size;
+	struct ipq_nand_dev *dev = MTD_IPQ_NAND_DEV(mtd);
+	struct nand_onfi_params *nand_onfi = MTD_ONFI_PARAMS(mtd);
+	int ret = 0;
+	u8 *buf;
+
+	alloc_size = (mtd->writesize   /* For dev->pad_dat */
+		      + mtd->oobsize   /* For dev->pad_oob */
+		      + mtd->writesize /* For dev->zero_page */
+		      + mtd->oobsize); /* For dev->zero_oob */
+
+	dev->buffers = malloc(alloc_size);
+	if (dev->buffers == NULL) {
+		printf("ipq_nand: failed to allocate memory\n");
+		return -ENOMEM;
+	}
+
+	buf = dev->buffers;
+
+	dev->pad_dat = buf;
+	buf += mtd->writesize;
+
+	dev->pad_oob = buf;
+	buf += mtd->oobsize;
+
+	dev->zero_page = buf;
+	buf += mtd->writesize;
+
+	dev->zero_oob = buf;
+	buf += mtd->oobsize;
+
+	memset(dev->zero_page, 0x0, mtd->writesize);
+	memset(dev->zero_oob, 0x0, mtd->oobsize);
+
+	for (i = 0; i < IPQ_CONFIGS_MAX; i++) {
+		if ((ipq_configs[i]->page_size == mtd->writesize)
+		    && (nand_onfi->ecc_bits <= ipq_configs[i]->ecc_mode))
+			break;
+	}
+
+	printf("ECC bits = %d\n", nand_onfi->ecc_bits);
+
+	if (i == IPQ_CONFIGS_MAX) {
+		printf("ipq_nand: unsupported dev. configuration\n");
+		ret = -ENOENT;
+		goto err_exit;
+	}
+
+	ipq_nand_hw_config(mtd, ipq_configs[i]);
+
+	printf("OOB Avail: %d\n", mtd->oobavail);
+	printf("CFG0: 0x%04X\n", dev->dev_cfg0);
+	printf("CFG1: 0x%04X\n", dev->dev_cfg1);
+	printf("Raw CFG0: 0x%04X\n", dev->dev_cfg0_raw);
+	printf("Raw CFG1: 0x%04X\n", dev->dev_cfg1_raw);
+	printf("ECC : 0x%04X\n", dev->dev_ecc_cfg);
+
+	goto exit;
+
+err_exit:
+	free(dev->buffers);
+
+exit:
+	return ret;
+}
+
+#define CONFIG_IPQ_NAND_NAND_INFO_IDX 0
+
+/*
+ * Initialize controller and register as an MTD device.
+ */
+int ipq_nand_init(void *ebi2nd_base, MtdDev **mtd_out)
+{
+	uint32_t status;
+	int ret;
+	MtdDev *mtd;
+	struct ipq_nand_dev *dev;
+
+	printf("Initializing IPQ NAND\n");
+
+	mtd = xzalloc(sizeof(*mtd));
+	dev = xzalloc(sizeof(*dev));
+
+	dev->regs = (struct ebi2nd_regs *) ebi2nd_base;
+
+	mtd->priv = dev;
+
+	/* Soft Reset */
+	ret = ipq_exec_cmd(mtd, IPQ_CMD_ABORT, &status);
+	if (ret < 0) {
+		printf("ipq_nand: controller reset timedout\n");
+		return ret;
+	}
+
+	/* Set some sane HW configuration, for ID read. */
+	ipq_nand_hw_config(mtd, ipq_configs[0]);
+
+	/* Reset Flash Memory */
+	ret = ipq_exec_cmd(mtd, IPQ_CMD_RESET_DEVICE, &status);
+	if (ret < 0) {
+		printf("ipq_nand: flash reset timedout\n");
+		return ret;
+	}
+
+	/* Identify the NAND device. */
+	ret = ipq_nand_scan(mtd);
+	if (ret < 0) {
+		printf("ipq_nand: failed to identify device\n");
+		return ret;
+	}
+
+	ret = ipq_nand_post_scan_init(mtd);
+	if (ret < 0)
+		return ret;
+
+	*mtd_out = mtd;
+	return 0;
+}
+
+typedef struct {
+	MtdDevCtrlr ctrlr;
+	void *ebi2nd_base;
+} IpqMtdDevCtrlr;
+
+int ipq_nand_update(MtdDevCtrlr *ctrlr)
+{
+	if (ctrlr->dev)
+		return 0;
+
+	MtdDev *mtd;
+	IpqMtdDevCtrlr *ipq_ctrlr = container_of(ctrlr, IpqMtdDevCtrlr, ctrlr);
+	int ret = ipq_nand_init(ipq_ctrlr->ebi2nd_base, &mtd);
+	if (ret)
+		return ret;
+	ctrlr->dev = mtd;
+	return 0;
+}
+
+/* External entrypoint for lazy NAND initialization */
+MtdDevCtrlr *new_ipq_nand(void *ebi2nd_base)
+{
+	IpqMtdDevCtrlr *ctrlr = xzalloc(sizeof(*ctrlr));
+	ctrlr->ctrlr.update = ipq_nand_update;
+	ctrlr->ebi2nd_base = ebi2nd_base;
+	return &ctrlr->ctrlr;
+}
diff --git a/src/drivers/storage/mtd/nand/ipq_nand.h b/src/drivers/storage/mtd/nand/ipq_nand.h
new file mode 100644
index 0000000..73ef617
--- /dev/null
+++ b/src/drivers/storage/mtd/nand/ipq_nand.h
@@ -0,0 +1,8 @@
+#ifndef __DRIVERS_STORAGE_MTD_NAND_IPQ_NAND_H__
+#define __DRIVERS_STORAGE_MTD_NAND_IPQ_NAND_H__
+
+#include "drivers/storage/mtd/mtd.h"
+
+MtdDevCtrlr *new_ipq_nand(void *ebi2nd_base);
+
+#endif
diff --git a/src/drivers/storage/mtd/nand/ipq_nand_private.h b/src/drivers/storage/mtd/nand/ipq_nand_private.h
new file mode 100644
index 0000000..5174279
--- /dev/null
+++ b/src/drivers/storage/mtd/nand/ipq_nand_private.h
@@ -0,0 +1,408 @@
+/*
+ * Copyright (c) 2012 The Linux Foundation. All rights reserved.
+ */
+#ifndef __DRIVERS_STORAGE_MTD_NAND_ARCH_IPQ806X_NAND_H__
+#define __DRIVERS_STORAGE_MTD_NAND_ARCH_IPQ806X_NAND_H__
+
+struct ebi2nd_regs {
+	uint32_t flash_cmd;                               /* 0x00000000 */
+	uint32_t addr0;                                   /* 0x00000004 */
+	uint32_t addr1;                                   /* 0x00000008 */
+	uint32_t flash_chip_select;                       /* 0x0000000C */
+	uint32_t exec_cmd;                                /* 0x00000010 */
+	uint32_t flash_status;                            /* 0x00000014 */
+	uint32_t buffer_status;                           /* 0x00000018 */
+	uint32_t sflashc_status;                          /* 0x0000001C */
+	uint32_t dev0_cfg0;                               /* 0x00000020 */
+	uint32_t dev0_cfg1;                               /* 0x00000024 */
+	uint32_t dev0_ecc_cfg;                            /* 0x00000028 */
+	uint32_t dev1_ecc_cfg;                            /* 0x0000002C */
+	uint32_t dev1_cfg0;                               /* 0x00000030 */
+	uint32_t dev1_cfg1;                               /* 0x00000034 */
+	uint32_t sflashc_cmd;                             /* 0x00000038 */
+	uint32_t sflashc_exec_cmd;                        /* 0x0000003C */
+	uint32_t flash_read_id;                           /* 0x00000040 */
+	uint32_t flash_read_status;                       /* 0x00000044 */
+	uint8_t reserved0[8];
+	uint32_t flash_config_data;                       /* 0x00000050 */
+	uint32_t flash_config;                            /* 0x00000054 */
+	uint32_t flash_config_mode;                       /* 0x00000058 */
+	uint8_t reserved1[4];
+	uint32_t flash_config_status;                     /* 0x00000060 */
+	uint32_t macro1_reg;                              /* 0x00000064 */
+	uint32_t hsddr_nand_cfg;                          /* 0x00000068 */
+	uint8_t reserved2[4];
+	uint32_t xfr_step1;                               /* 0x00000070 */
+	uint32_t xfr_step2;                               /* 0x00000074 */
+	uint32_t xfr_step3;                               /* 0x00000078 */
+	uint32_t xfr_step4;                               /* 0x0000007C */
+	uint32_t xfr_step5;                               /* 0x00000080 */
+	uint32_t xfr_step6;                               /* 0x00000084 */
+	uint32_t xfr_step7;                               /* 0x00000088 */
+	uint8_t reserved3[4];
+	uint32_t genp_reg0;                               /* 0x00000090 */
+	uint32_t genp_reg1;                               /* 0x00000094 */
+	uint32_t genp_reg2;                               /* 0x00000098 */
+	uint32_t genp_reg3;                               /* 0x0000009C */
+	uint32_t dev_cmd0;                                /* 0x000000A0 */
+	uint32_t dev_cmd1;                                /* 0x000000A4 */
+	uint32_t dev_cmd2;                                /* 0x000000A8 */
+	uint32_t dev_cmd_vld;                             /* 0x000000AC */
+	uint8_t reserved4[16];
+	uint32_t addr2;                                   /* 0x000000C0 */
+	uint32_t addr3;                                   /* 0x000000C4 */
+	uint32_t addr4;                                   /* 0x000000C8 */
+	uint32_t addr5;                                   /* 0x000000CC */
+	uint32_t dev_cmd3;                                /* 0x000000D0 */
+	uint32_t dev_cmd4;                                /* 0x000000D4 */
+	uint32_t dev_cmd5;                                /* 0x000000D8 */
+	uint32_t dev_cmd6;                                /* 0x000000DC */
+	uint32_t sflashc_burst_cfg;                       /* 0x000000E0 */
+	uint32_t addr6;                                   /* 0x000000E4 */
+	uint32_t erased_cw_detect_cfg;                    /* 0x000000E8 */
+	uint32_t erased_cw_detect_status;                 /* 0x000000EC */
+	uint32_t ebi2_ecc_buf_cfg;                        /* 0x000000F0 */
+	uint32_t dbg_cfg;                                 /* 0x000000F4 */
+	uint32_t hw_profile_cfg;                          /* 0x000000F8 */
+	uint32_t hw_info;                                 /* 0x000000FC */
+	uint32_t buffn_acc[144];                          /* 0x00000100 */
+};
+
+/* Register: NAND_DEVn_CFG0 */
+#define SET_RD_MODE_AFTER_STATUS_MASK            0x80000000
+#define SET_RD_MODE_AFTER_STATUS_SEND_READ_CMD   0x80000000
+#define SET_RD_MODE_AFTER_STATUS_DO_NOT_SEND     0x00000000
+#define SET_RD_MODE_AFTER_STATUS(i)              ((i) << 31)
+
+#define STATUS_BFR_READ_MASK                     0x40000000
+#define STATUS_BFR_READ_DO_NOT_READ_STATUS       0x00000000
+#define STATUS_BFR_READ_READ_STATUS_BEFORE_DATA  0x40000000
+#define STATUS_BFR_READ(i)                       ((i) << 30)
+
+#define NUM_ADDR_CYCLES_MASK                     0x38000000
+#define NUM_ADDR_CYCLES_NO_ADDRESS_CYCLES        0x00000000
+#define NUM_ADDR_CYCLES(i)                       ((i) << 27)
+
+#define SPARE_SIZE_BYTES_MASK                    0x07800000
+#define SPARE_SIZE_BYTES(i)                      ((i) << 23)
+
+#define RS_ECC_PARITY_SIZE_BYTES_MASK            0x00780000
+#define RS_ECC_PARITY_SIZE_BYTES(i)              ((i) << 19)
+
+#define UD_SIZE_BYTES_MASK                       0x0007fe00
+#define UD_SIZE_BYTES(i)                         ((i) << 9)
+
+#define CW_PER_PAGE_MASK                         0x000001c0
+#define CW_PER_PAGE_1_CODEWORD_PER_PAGE          0x00000000
+#define CW_PER_PAGE_2_CODEWORDS_PER_PAGE         0x00000040
+#define CW_PER_PAGE_3_CODEWORDS_PER_PAGE         0x00000080
+#define CW_PER_PAGE_4_CODEWORDS_PER_PAGE         0x000000C0
+#define CW_PER_PAGE_5_CODEWORDS_PER_PAGE         0x00000100
+#define CW_PER_PAGE_6_CODEWORDS_PER_PAGE         0x00000140
+#define CW_PER_PAGE_7_CODEWORDS_PER_PAGE         0x00000180
+#define CW_PER_PAGE_8_CODEWORDS_PER_PAGE         0x000001C0
+#define CW_PER_PAGE(i)                           ((i) << 6)
+
+#define MSB_CW_PER_PAGE_MASK                     0x00000020
+#define MSB_CW_PER_PAGE(i)                       ((i) << 5)
+
+#define DISABLE_STATUS_AFTER_WRITE_MASK          0x00000010
+#define DISABLE_STATUS_AFTER_WRITE(i)            ((i) << 4)
+
+#define BUSY_TIMEOUT_ERROR_SELECT_MASK           0x0000000f
+#define BUSY_TIMEOUT_ERROR_SELECT_16_MS          0x00000000
+#define BUSY_TIMEOUT_ERROR_SELECT_32_MS          0x00000001
+#define BUSY_TIMEOUT_ERROR_SELECT_64_MS          0x00000002
+#define BUSY_TIMEOUT_ERROR_SELECT_128_MS         0x00000003
+#define BUSY_TIMEOUT_ERROR_SELECT_256_MS         0x00000004
+#define BUSY_TIMEOUT_ERROR_SELECT_512_MS         0x00000005
+#define BUSY_TIMEOUT_ERROR_SELECT_1_SEC          0x00000006
+#define BUSY_TIMEOUT_ERROR_SELECT_2_SEC          0x00000007
+#define BUSY_TIMEOUT_ERROR_SELECT_1_MS           0x00000008
+#define BUSY_TIMEOUT_ERROR_SELECT(i)             ((i) << 0)
+
+
+/* Register: NAND_DEVn_CFG1 */
+#define RS_ECC_MODE_MASK                         0x30000000
+#define RS_ECC_MODE(i)                           ((i) << 28)
+
+#define ENABLE_BCH_ECC_MASK                      0x08000000
+#define ENABLE_BCH_ECC_ENABLES_TAVOR_ECC_CORE_WITH_BCH_ENCODING_DECODING \
+	0x08000000
+#define ENABLE_BCH_ECC(i)                        ((i) << 27)
+
+#define DISABLE_ECC_RESET_AFTER_OPDONE_MASK      0x02000000
+#define DISABLE_ECC_RESET_AFTER_OPDONE(i)        ((i) << 25)
+
+#define ECC_DECODER_CGC_EN_MASK                  0x01000000
+#define ECC_DECODER_CGC_EN_FREE_RUNNING_CLOCK    0x01000000
+#define ECC_DECODER_CGC_EN(i)                    ((i) << 24)
+
+#define ECC_ENCODER_CGC_EN_MASK                  0x00800000
+#define ECC_ENCODER_CGC_EN_FREE_RUNNING_CLOCK    0x00800000
+#define ECC_ENCODER_CGC_EN(i)                    ((i) << 23)
+
+#define WR_RD_BSY_GAP_MASK                       0x007e0000
+#define WR_RD_BSY_GAP_2_CLOCK_CYCLE_GAP          0x00000000
+#define WR_RD_BSY_GAP_4_CLOCK_CYCLES_GAP         0x00020000
+#define WR_RD_BSY_GAP_6_CLOCK_CYCLES_GAP         0x00040000
+#define WR_RD_BSY_GAP_8_CLOCK_CYCLES_GAP         0x00060000
+#define WR_RD_BSY_GAP_10_CLOCK_CYCLES_GAP        0x00080000
+#define WR_RD_BSY_GAP_128_CLOCK_CYCLES_GAP       0x007E0000
+#define WR_RD_BSY_GAP(i)                         ((i) << 17)
+
+#define BAD_BLOCK_IN_SPARE_AREA_MASK             0x00010000
+#define BAD_BLOCK_IN_SPARE_AREA_IN_USER_DATA_AREA 0x00000000
+#define BAD_BLOCK_IN_SPARE_AREA_IN_SPARE_AREA    0x00010000
+#define BAD_BLOCK_IN_SPARE_AREA(i)               ((i) << 16)
+
+#define BAD_BLOCK_BYTE_NUM_MASK                  0x0000ffc0
+#define BAD_BLOCK_BYTE_NUM(i)                    ((i) << 6)
+
+#define CS_ACTIVE_BSY_MASK                       0x00000020
+#define CS_ACTIVE_BSY_ASSERT_CS_DURING_BUSY      0x00000020
+#define CS_ACTIVE_BSY_ALLOW_CS_DE_ASSERTION      0x00000000
+#define CS_ACTIVE_BSY(i)                         ((i) << 5)
+
+#define NAND_RECOVERY_CYCLES_MASK                0x0000001c
+#define NAND_RECOVERY_CYCLES_1_RECOVERY_CYCLE    0x00000000
+#define NAND_RECOVERY_CYCLES_2_RECOVERY_CYCLES   0x00000004
+#define NAND_RECOVERY_CYCLES_3_RECOVERY_CYCLES   0x00000008
+#define NAND_RECOVERY_CYCLES_8_RECOVERY_CYCLES   0x0000001C
+#define NAND_RECOVERY_CYCLES(i)                  ((i) << 2)
+
+#define WIDE_FLASH_MASK                          0x00000002
+#define WIDE_FLASH_8_BIT_DATA_BUS                0x00000000
+#define WIDE_FLASH_16_BIT_DATA_BUS               0x00000002
+#define WIDE_FLASH(i)                            ((i) << 1)
+
+#define ECC_DISABLE_MASK                         0x00000001
+#define ECC_DISABLE_ECC_ENABLED                  0x00000000
+#define ECC_DISABLE_ECC_DISABLED                 0x00000001
+#define ECC_DISABLE(i)                           ((i) << 0)
+
+
+/* Register: NAND_DEVn_ECC_CFG */
+#define ECC_FORCE_CLK_OPEN_MASK                  0x40000000
+#define ECC_FORCE_CLK_OPEN(i)                    ((i) << 30)
+
+#define ECC_DEC_CLK_SHUTDOWN_MASK                0x20000000
+#define ECC_DEC_CLK_SHUTDOWN(i)                  ((i) << 29)
+
+#define ECC_ENC_CLK_SHUTDOWN_MASK                0x10000000
+#define ECC_ENC_CLK_SHUTDOWN(i)                  ((i) << 28)
+
+#define ECC_NUM_DATA_BYTES_MASK                  0x03ff0000
+#define ECC_NUM_DATA_BYTES_516_BYTES             0x02040000
+#define ECC_NUM_DATA_BYTES_517_BYTES             0x02050000
+#define ECC_NUM_DATA_BYTES(i)                    ((i) << 16)
+
+#define BCH_ECC_PARITY_SIZE_BYTES_MASK           0x00001f00
+#define BCH_ECC_PARITY_SIZE_BYTES(i)             ((i) << 8)
+
+#define BCH_ECC_MODE_MASK                        0x00000030
+#define BCH_ECC_MODE_8_BIT_ECC_ERROR_DETECTION_CORRECTION 0x00000010
+#define BCH_ECC_MODE_12_BIT_ECC_ERROR_DETECTION_CORRECTION 0x00000020
+#define BCH_ECC_MODE_16_BIT_ECC_ERROR_DETECTION_CORRECTION 0x00000030
+#define BCH_ECC_MODE(i)                          ((i) << 4)
+
+#define ECC_SW_RESET_MASK                        0x00000002
+#define ECC_SW_RESET_SOFTWARE_RESET_TO_ECC       0x00000002
+#define ECC_SW_RESET(i)                          ((i) << 1)
+
+#define BCH_ECC_DISABLE_MASK                     0x00000001
+#define BCH_ECC_DISABLE_ECC_ENABLED              0x00000000
+#define BCH_ECC_DISABLE(i)                       ((i) << 0)
+
+
+/* Register: EBI2_ECC_BUF_CFG */
+#define NUM_STEPS_MASK                           0x000003ff
+#define NUM_STEPS(i)                             ((i) << 0)
+
+
+/* Register: FLASH_DEV_CMD_VLD */
+#define SEQ_READ_START_VLD_MASK                  0x00000010
+#define SEQ_READ_START_VLD(i)                    ((i) << 4)
+
+#define ERASE_START_VLD_MASK                     0x00000008
+#define ERASE_START_VLD(i)                       ((i) << 3)
+
+#define WRITE_START_VLD_MASK                     0x00000004
+#define WRITE_START_VLD(i)                       ((i) << 2)
+
+#define READ_STOP_VLD_MASK                       0x00000002
+#define READ_STOP_VLD(i)                         ((i) << 1)
+
+#define READ_START_VLD_MASK                      0x00000001
+#define READ_START_VLD(i)                        ((i) << 0)
+
+
+/* Register: NAND_ADDR0 */
+#define DEV_ADDR0_MASK                           0xffffffff
+#define DEV_ADDR0(i)                             ((i) << 0)
+
+
+/* Register: NAND_ADDR1 */
+#define DEV_ADDR1_MASK                           0xffffffff
+#define DEV_ADDR1(i)                             ((i) << 0)
+
+
+/* Register: NANDC_EXEC_CMD */
+#define EXEC_CMD_MASK                            0x00000001
+#define EXEC_CMD_EXECUTE_THE_COMMAND             0x00000001
+#define EXEC_CMD(i)                              ((i) << 0)
+
+
+/* Register: NAND_ERASED_CW_DETECT_CFG */
+#define ERASED_CW_ECC_MASK_MASK                  0x00000002
+#define ERASED_CW_ECC_MASK(i)                    ((i) << 1)
+
+#define AUTO_DETECT_RES_MASK                     0x00000001
+#define AUTO_DETECT_RES(i)                       ((i) << 0)
+
+
+/* Register: NAND_ERASED_CW_DETECT_STATUS */
+#define PAGE_ERASED_MASK                         0x00000020
+#define PAGE_ERASED(i)                           ((i) << 5)
+
+#define CODEWORD_ERASED_MASK                     0x00000010
+#define CODEWORD_ERASED(i)                       ((i) << 4)
+
+#define ERASED_CW_ECC_MASK_MASK                  0x00000002
+#define ERASED_CW_ECC_MASK(i)                    ((i) << 1)
+
+#define AUTO_DETECT_RES_MASK                     0x00000001
+#define AUTO_DETECT_RES(i)                       ((i) << 0)
+
+
+/* Register: NAND_FLASH_CMD */
+#define ONE_NAND_INTR_STATUS_MASK                0x00040000
+#define ONE_NAND_INTR_STATUS(i)                  ((i) << 18)
+
+#define ONE_NAND_HOST_CFG_MASK                   0x00020000
+#define ONE_NAND_HOST_CFG(i)                     ((i) << 17)
+
+#define AUTO_DETECT_DATA_XFR_SIZE_MASK           0x0001ff80
+#define AUTO_DETECT_DATA_XFR_SIZE(i)             ((i) << 7)
+
+#define AUTO_DETECT_MASK                         0x00000040
+#define AUTO_DETECT(i)                           ((i) << 6)
+
+#define LAST_PAGE_MASK                           0x00000020
+#define LAST_PAGE(i)                             ((i) << 5)
+
+#define PAGE_ACC_MASK                            0x00000010
+#define PAGE_ACC_PAGE_ACCESS_COMMAND             0x00000010
+#define PAGE_ACC_NON_PAGE_ACCESS_COMMAND         0x00000000
+#define PAGE_ACC(i)                              ((i) << 4)
+
+#define OP_CMD_MASK                              0x0000000f
+#define OP_CMD_RESERVED_0                        0x00000000
+#define OP_CMD_ABORT_TRANSACTION                 0x00000001
+#define OP_CMD_PAGE_READ                         0x00000002
+#define OP_CMD_PAGE_READ_WITH_ECC                0x00000003
+#define OP_CMD_PAGE_READ_WITH_ECC_SPARE          0x00000004
+#define OP_CMD_RESERVED_5                        0x00000005
+#define OP_CMD_PROGRAM_PAGE                      0x00000006
+#define OP_CMD_PAGE_PROGRAM_WITH_ECC             0x00000007
+#define OP_CMD_RESERVED_8                        0x00000008
+#define OP_CMD_PROGRAM_PAGE_WITH_SPARE           0x00000009
+#define OP_CMD_BLOCK_ERASE                       0x0000000A
+#define OP_CMD_FETCH_ID                          0x0000000B
+#define OP_CMD_CHECK_STATUS                      0x0000000C
+#define OP_CMD_RESET_NAND_FLASH_DEVICE           0x0000000D
+#define OP_CMD_RESERVED_E                        0x0000000E
+#define OP_CMD_RESERVED_F                        0x0000000F
+#define OP_CMD(i)                                ((i) << 0)
+
+
+/* Register: NAND_FLASH_STATUS */
+#define DEV_STATUS_MASK                          0xffff0000
+#define DEV_STATUS(i)                            ((i) << 16)
+
+#define CODEWORD_CNTR_MASK                       0x0000f000
+#define CODEWORD_CNTR(i)                         ((i) << 12)
+
+#define DEVICE_2KBYTE_MASK                       0x00000800
+#define DEVICE_2KBYTE_2K_BYTE_PAGE_DEVICE        0x00000800
+#define DEVICE_2KBYTE_NOT_A_2K_BYTE_PAGE_DEVICE  0x00000000
+#define DEVICE_2KBYTE(i)                         ((i) << 11)
+
+#define DEVICE_512BYTE_MASK                      0x00000400
+#define DEVICE_512BYTE_512_BYTE_PAGE_DEVICE      0x00000400
+#define DEVICE_512BYTE_NOT_A_512_BYTE_PAGE_DEVICE 0x00000000
+#define DEVICE_512BYTE(i)                        ((i) << 10)
+
+#define AUTO_DETECT_DONE_MASK                    0x00000200
+#define AUTO_DETECT_DONE(i)                      ((i) << 9)
+
+#define MPU_ERROR_MASK                           0x00000100
+#define MPU_ERROR_MPU_ERROR_FOR_THE_ACCESS       0x00000100
+#define MPU_ERROR_NO_ERROR                       0x00000000
+#define MPU_ERROR(i)                             ((i) << 8)
+
+#define PROG_ERASE_OP_RESULT_MASK                0x00000080
+#define PROG_ERASE_OP_RESULT_SUCCESSFUL          0x00000000
+#define PROG_ERASE_OP_RESULT_NOT_SUCCESSFUL      0x00000080
+#define PROG_ERASE_OP_RESULT(i)                  ((i) << 7)
+
+#define NANDC_TIMEOUT_ERR_MASK                   0x00000040
+#define NANDC_TIMEOUT_ERR_NO_ERROR               0x00000000
+#define NANDC_TIMEOUT_ERR_ERROR                  0x00000040
+#define NANDC_TIMEOUT_ERR(i)                     ((i) << 6)
+
+#define READY_BSY_N_MASK                         0x00000020
+#define READY_BSY_N_EXTERNAL_FLASH_IS_BUSY       0x00000000
+#define READY_BSY_N_EXTERNAL_FLASH_IS_READY      0x00000020
+#define READY_BSY_N(i)                           ((i) << 5)
+
+#define OP_ERR_MASK                              0x00000010
+#define OP_ERR(i)                                ((i) << 4)
+
+#define OPER_STATUS_MASK                         0x0000000f
+#define OPER_STATUS_IDLE_STATE                   0x00000000
+#define OPER_STATUS_ABORT_TRANSACTION            0x00000001
+#define OPER_STATUS_PAGE_READ                    0x00000002
+#define OPER_STATUS_PAGE_READ_WITH_ECC           0x00000003
+#define OPER_STATUS_PAGE_READ_WITH_ECC_AND_SPARE_DATA 0x00000004
+#define OPER_STATUS_SEQUENTIAL_PAGE_READ         0x00000005
+#define OPER_STATUS_PROGRAM_PAGE                 0x00000006
+#define OPER_STATUS_PROGRAM_PAGE_WITH_ECC        0x00000007
+#define OPER_STATUS_RESERVED_PROGRAMMING         0x00000008
+#define OPER_STATUS_PROGRAM_PAGE_WITH_SPARE      0x00000009
+#define OPER_STATUS_BLOCK_ERASE                  0x0000000A
+#define OPER_STATUS_FETCH_ID                     0x0000000B
+#define OPER_STATUS_CHECK_STATUS                 0x0000000C
+#define OPER_STATUS_RESET_FLASH_DEVICE           0x0000000D
+#define OPER_STATUS(i)                           ((i) << 0)
+
+
+/* Register: NANDC_BUFFER_STATUS */
+#define BAD_BLOCK_STATUS_MASK                    0xffff0000
+#define BAD_BLOCK_STATUS(i)                      ((i) << 16)
+
+#define XFR_STEP2_REG_UPDATE_DONE_MASK           0x00000200
+#define XFR_STEP2_REG_UPDATE_DONE(i)             ((i) << 9)
+
+#define UNCORRECTABLE_MASK                       0x00000100
+#define UNCORRECTABLE(i)                         ((i) << 8)
+
+#define NUM_ERRORS_MASK                          0x0000001f
+#define NUM_ERRORS(i)                            ((i) << 0)
+
+
+/* Register: FLASH_DEV_CMD1 */
+#define SEQ_READ_MODE_START_MASK                 0xff000000
+#define SEQ_READ_MODE_START(i)                   ((i) << 24)
+
+#define SEQ_READ_MODE_ADDR_MASK                  0x00ff0000
+#define SEQ_READ_MODE_ADDR(i)                    ((i) << 16)
+
+#define READ_START_MASK                          0x0000ff00
+#define READ_START(i)                            ((i) << 8)
+
+#define READ_ADDR_MASK                           0x000000ff
+#define READ_ADDR(i)                             ((i) << 0)
+
+
+#endif