From: Vignesh R Date: Tue, 5 Feb 2019 05:59:17 +0000 (+0530) Subject: mtd: spi: Port SPI NOR framework from Linux X-Git-Url: http://git.dujemihanovic.xyz/?a=commitdiff_plain;h=7aeedac01534ab343c28abed60f8e0fb9311bbee;p=u-boot.git mtd: spi: Port SPI NOR framework from Linux Current U-Boot SPI NOR support (sf layer) is quite outdated as it does not support 4 byte addressing opcodes, SFDP table parsing and different types of quad mode enable sequences. Many newer flashes no longer support BANK registers used by sf layer to a access >16MB of flash address space. So, sync SPI NOR framework from Linux v4.19 that supports all the above features. Start with basic sync up that brings in basic framework subsequent commits will bring in more features. Signed-off-by: Vignesh R Tested-by: Simon Goldschmidt Tested-by: Stefan Roese Tested-by: Horatiu Vultur Reviewed-by: Jagan Teki Tested-by: Jagan Teki #zynq-microzed --- diff --git a/drivers/mtd/spi/spi-nor-core.c b/drivers/mtd/spi/spi-nor-core.c new file mode 100644 index 0000000000..54ef94d20f --- /dev/null +++ b/drivers/mtd/spi/spi-nor-core.c @@ -0,0 +1,1716 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Based on m25p80.c, by Mike Lavender (mike@steroidmicros.com), with + * influence from lart.c (Abraham Van Der Merwe) and mtd_dataflash.c + * + * Copyright (C) 2005, Intec Automation Inc. + * Copyright (C) 2014, Freescale Semiconductor, Inc. + * + * Synced from Linux v4.19 + */ + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +/* Define max times to check status register before we give up. */ + +/* + * For everything but full-chip erase; probably could be much smaller, but kept + * around for safety for now + */ + +#define HZ CONFIG_SYS_HZ + +#define DEFAULT_READY_WAIT_JIFFIES (40UL * HZ) + +#define SPI_NOR_MAX_ID_LEN 6 +#define SPI_NOR_MAX_ADDR_WIDTH 4 + +struct flash_info { + char *name; + + /* + * This array stores the ID bytes. + * The first three bytes are the JEDIC ID. + * JEDEC ID zero means "no ID" (mostly older chips). + */ + u8 id[SPI_NOR_MAX_ID_LEN]; + u8 id_len; + + /* The size listed here is what works with SPINOR_OP_SE, which isn't + * necessarily called a "sector" by the vendor. + */ + unsigned int sector_size; + u16 n_sectors; + + u16 page_size; + u16 addr_width; + + u16 flags; +#define SECT_4K BIT(0) /* SPINOR_OP_BE_4K works uniformly */ +#define SPI_NOR_NO_ERASE BIT(1) /* No erase command needed */ +#define SST_WRITE BIT(2) /* use SST byte programming */ +#define SPI_NOR_NO_FR BIT(3) /* Can't do fastread */ +#define SECT_4K_PMC BIT(4) /* SPINOR_OP_BE_4K_PMC works uniformly */ +#define SPI_NOR_DUAL_READ BIT(5) /* Flash supports Dual Read */ +#define SPI_NOR_QUAD_READ BIT(6) /* Flash supports Quad Read */ +#define USE_FSR BIT(7) /* use flag status register */ +#define SPI_NOR_HAS_LOCK BIT(8) /* Flash supports lock/unlock via SR */ +#define SPI_NOR_HAS_TB BIT(9) /* + * Flash SR has Top/Bottom (TB) protect + * bit. Must be used with + * SPI_NOR_HAS_LOCK. + */ +#define SPI_S3AN BIT(10) /* + * Xilinx Spartan 3AN In-System Flash + * (MFR cannot be used for probing + * because it has the same value as + * ATMEL flashes) + */ +#define SPI_NOR_4B_OPCODES BIT(11) /* + * Use dedicated 4byte address op codes + * to support memory size above 128Mib. + */ +#define NO_CHIP_ERASE BIT(12) /* Chip does not support chip erase */ +#define USE_CLSR BIT(14) /* use CLSR command */ + + int (*quad_enable)(struct spi_nor *nor); +}; + +#define JEDEC_MFR(info) ((info)->id[0]) + +static int spi_nor_read_reg(struct spi_nor *nor, u8 code, u8 *val, int len) +{ + return -EINVAL; +} + +static int spi_nor_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len) +{ + return -EINVAL; +} + +static ssize_t spi_nor_read_data(struct spi_nor *nor, loff_t from, size_t len, + u_char *buf) +{ + return -EINVAL; +} + +static ssize_t spi_nor_write_data(struct spi_nor *nor, loff_t to, size_t len, + const u_char *buf) +{ + return -EINVAL; +} + +/* + * Read the status register, returning its value in the location + * Return the status register value. + * Returns negative if error occurred. + */ +static int read_sr(struct spi_nor *nor) +{ + int ret; + u8 val; + + ret = nor->read_reg(nor, SPINOR_OP_RDSR, &val, 1); + if (ret < 0) { + pr_debug("error %d reading SR\n", (int)ret); + return ret; + } + + return val; +} + +/* + * Read the flag status register, returning its value in the location + * Return the status register value. + * Returns negative if error occurred. + */ +static int read_fsr(struct spi_nor *nor) +{ + int ret; + u8 val; + + ret = nor->read_reg(nor, SPINOR_OP_RDFSR, &val, 1); + if (ret < 0) { + pr_debug("error %d reading FSR\n", ret); + return ret; + } + + return val; +} + +/* + * Read configuration register, returning its value in the + * location. Return the configuration register value. + * Returns negative if error occurred. + */ +#if defined(CONFIG_SPI_FLASH_SPANSION) || defined(CONFIG_SPI_FLASH_WINBOND) +static int read_cr(struct spi_nor *nor) +{ + int ret; + u8 val; + + ret = nor->read_reg(nor, SPINOR_OP_RDCR, &val, 1); + if (ret < 0) { + dev_dbg(nor->dev, "error %d reading CR\n", ret); + return ret; + } + + return val; +} +#endif + +/* + * Write status register 1 byte + * Returns negative if error occurred. + */ +static int write_sr(struct spi_nor *nor, u8 val) +{ + nor->cmd_buf[0] = val; + return nor->write_reg(nor, SPINOR_OP_WRSR, nor->cmd_buf, 1); +} + +/* + * Set write enable latch with Write Enable command. + * Returns negative if error occurred. + */ +static int write_enable(struct spi_nor *nor) +{ + return nor->write_reg(nor, SPINOR_OP_WREN, NULL, 0); +} + +/* + * Send write disable instruction to the chip. + */ +static int write_disable(struct spi_nor *nor) +{ + return nor->write_reg(nor, SPINOR_OP_WRDI, NULL, 0); +} + +static struct spi_nor *mtd_to_spi_nor(struct mtd_info *mtd) +{ + return mtd->priv; +} + +static int spi_nor_sr_ready(struct spi_nor *nor) +{ + int sr = read_sr(nor); + + if (sr < 0) + return sr; + + if (nor->flags & SNOR_F_USE_CLSR && sr & (SR_E_ERR | SR_P_ERR)) { + if (sr & SR_E_ERR) + dev_dbg(nor->dev, "Erase Error occurred\n"); + else + dev_dbg(nor->dev, "Programming Error occurred\n"); + + nor->write_reg(nor, SPINOR_OP_CLSR, NULL, 0); + return -EIO; + } + + return !(sr & SR_WIP); +} + +static int spi_nor_fsr_ready(struct spi_nor *nor) +{ + int fsr = read_fsr(nor); + + if (fsr < 0) + return fsr; + + if (fsr & (FSR_E_ERR | FSR_P_ERR)) { + if (fsr & FSR_E_ERR) + dev_dbg(nor->dev, "Erase operation failed.\n"); + else + dev_dbg(nor->dev, "Program operation failed.\n"); + + if (fsr & FSR_PT_ERR) + dev_dbg(nor->dev, + "Attempted to modify a protected sector.\n"); + + nor->write_reg(nor, SPINOR_OP_CLFSR, NULL, 0); + return -EIO; + } + + return fsr & FSR_READY; +} + +static int spi_nor_ready(struct spi_nor *nor) +{ + int sr, fsr; + + sr = spi_nor_sr_ready(nor); + if (sr < 0) + return sr; + fsr = nor->flags & SNOR_F_USE_FSR ? spi_nor_fsr_ready(nor) : 1; + if (fsr < 0) + return fsr; + return sr && fsr; +} + +/* + * Service routine to read status register until ready, or timeout occurs. + * Returns non-zero if error. + */ +static int spi_nor_wait_till_ready_with_timeout(struct spi_nor *nor, + unsigned long timeout) +{ + unsigned long timebase; + int ret; + + timebase = get_timer(0); + + while (get_timer(timebase) < timeout) { + ret = spi_nor_ready(nor); + if (ret < 0) + return ret; + if (ret) + return 0; + } + + dev_err(nor->dev, "flash operation timed out\n"); + + return -ETIMEDOUT; +} + +static int spi_nor_wait_till_ready(struct spi_nor *nor) +{ + return spi_nor_wait_till_ready_with_timeout(nor, + DEFAULT_READY_WAIT_JIFFIES); +} + +/* + * Initiate the erasure of a single sector + */ +static int spi_nor_erase_sector(struct spi_nor *nor, u32 addr) +{ + u8 buf[SPI_NOR_MAX_ADDR_WIDTH]; + int i; + + if (nor->erase) + return nor->erase(nor, addr); + + /* + * Default implementation, if driver doesn't have a specialized HW + * control + */ + for (i = nor->addr_width - 1; i >= 0; i--) { + buf[i] = addr & 0xff; + addr >>= 8; + } + + return nor->write_reg(nor, nor->erase_opcode, buf, nor->addr_width); +} + +/* + * Erase an address range on the nor chip. The address range may extend + * one or more erase sectors. Return an error is there is a problem erasing. + */ +static int spi_nor_erase(struct mtd_info *mtd, struct erase_info *instr) +{ + struct spi_nor *nor = mtd_to_spi_nor(mtd); + u32 addr, len, rem; + int ret; + + dev_dbg(nor->dev, "at 0x%llx, len %lld\n", (long long)instr->addr, + (long long)instr->len); + + div_u64_rem(instr->len, mtd->erasesize, &rem); + if (rem) + return -EINVAL; + + addr = instr->addr; + len = instr->len; + + while (len) { + write_enable(nor); + + ret = spi_nor_erase_sector(nor, addr); + if (ret) + goto erase_err; + + addr += mtd->erasesize; + len -= mtd->erasesize; + + ret = spi_nor_wait_till_ready(nor); + if (ret) + goto erase_err; + } + + write_disable(nor); + +erase_err: + return ret; +} + +#if defined(CONFIG_SPI_FLASH_STMICRO) || defined(CONFIG_SPI_FLASH_SST) +/* Write status register and ensure bits in mask match written values */ +static int write_sr_and_check(struct spi_nor *nor, u8 status_new, u8 mask) +{ + int ret; + + write_enable(nor); + ret = write_sr(nor, status_new); + if (ret) + return ret; + + ret = spi_nor_wait_till_ready(nor); + if (ret) + return ret; + + ret = read_sr(nor); + if (ret < 0) + return ret; + + return ((ret & mask) != (status_new & mask)) ? -EIO : 0; +} + +static void stm_get_locked_range(struct spi_nor *nor, u8 sr, loff_t *ofs, + uint64_t *len) +{ + struct mtd_info *mtd = &nor->mtd; + u8 mask = SR_BP2 | SR_BP1 | SR_BP0; + int shift = ffs(mask) - 1; + int pow; + + if (!(sr & mask)) { + /* No protection */ + *ofs = 0; + *len = 0; + } else { + pow = ((sr & mask) ^ mask) >> shift; + *len = mtd->size >> pow; + if (nor->flags & SNOR_F_HAS_SR_TB && sr & SR_TB) + *ofs = 0; + else + *ofs = mtd->size - *len; + } +} + +/* + * Return 1 if the entire region is locked (if @locked is true) or unlocked (if + * @locked is false); 0 otherwise + */ +static int stm_check_lock_status_sr(struct spi_nor *nor, loff_t ofs, u64 len, + u8 sr, bool locked) +{ + loff_t lock_offs; + uint64_t lock_len; + + if (!len) + return 1; + + stm_get_locked_range(nor, sr, &lock_offs, &lock_len); + + if (locked) + /* Requested range is a sub-range of locked range */ + return (ofs + len <= lock_offs + lock_len) && (ofs >= lock_offs); + else + /* Requested range does not overlap with locked range */ + return (ofs >= lock_offs + lock_len) || (ofs + len <= lock_offs); +} + +static int stm_is_locked_sr(struct spi_nor *nor, loff_t ofs, uint64_t len, + u8 sr) +{ + return stm_check_lock_status_sr(nor, ofs, len, sr, true); +} + +static int stm_is_unlocked_sr(struct spi_nor *nor, loff_t ofs, uint64_t len, + u8 sr) +{ + return stm_check_lock_status_sr(nor, ofs, len, sr, false); +} + +/* + * Lock a region of the flash. Compatible with ST Micro and similar flash. + * Supports the block protection bits BP{0,1,2} in the status register + * (SR). Does not support these features found in newer SR bitfields: + * - SEC: sector/block protect - only handle SEC=0 (block protect) + * - CMP: complement protect - only support CMP=0 (range is not complemented) + * + * Support for the following is provided conditionally for some flash: + * - TB: top/bottom protect + * + * Sample table portion for 8MB flash (Winbond w25q64fw): + * + * SEC | TB | BP2 | BP1 | BP0 | Prot Length | Protected Portion + * -------------------------------------------------------------------------- + * X | X | 0 | 0 | 0 | NONE | NONE + * 0 | 0 | 0 | 0 | 1 | 128 KB | Upper 1/64 + * 0 | 0 | 0 | 1 | 0 | 256 KB | Upper 1/32 + * 0 | 0 | 0 | 1 | 1 | 512 KB | Upper 1/16 + * 0 | 0 | 1 | 0 | 0 | 1 MB | Upper 1/8 + * 0 | 0 | 1 | 0 | 1 | 2 MB | Upper 1/4 + * 0 | 0 | 1 | 1 | 0 | 4 MB | Upper 1/2 + * X | X | 1 | 1 | 1 | 8 MB | ALL + * ------|-------|-------|-------|-------|---------------|------------------- + * 0 | 1 | 0 | 0 | 1 | 128 KB | Lower 1/64 + * 0 | 1 | 0 | 1 | 0 | 256 KB | Lower 1/32 + * 0 | 1 | 0 | 1 | 1 | 512 KB | Lower 1/16 + * 0 | 1 | 1 | 0 | 0 | 1 MB | Lower 1/8 + * 0 | 1 | 1 | 0 | 1 | 2 MB | Lower 1/4 + * 0 | 1 | 1 | 1 | 0 | 4 MB | Lower 1/2 + * + * Returns negative on errors, 0 on success. + */ +static int stm_lock(struct spi_nor *nor, loff_t ofs, uint64_t len) +{ + struct mtd_info *mtd = &nor->mtd; + int status_old, status_new; + u8 mask = SR_BP2 | SR_BP1 | SR_BP0; + u8 shift = ffs(mask) - 1, pow, val; + loff_t lock_len; + bool can_be_top = true, can_be_bottom = nor->flags & SNOR_F_HAS_SR_TB; + bool use_top; + + status_old = read_sr(nor); + if (status_old < 0) + return status_old; + + /* If nothing in our range is unlocked, we don't need to do anything */ + if (stm_is_locked_sr(nor, ofs, len, status_old)) + return 0; + + /* If anything below us is unlocked, we can't use 'bottom' protection */ + if (!stm_is_locked_sr(nor, 0, ofs, status_old)) + can_be_bottom = false; + + /* If anything above us is unlocked, we can't use 'top' protection */ + if (!stm_is_locked_sr(nor, ofs + len, mtd->size - (ofs + len), + status_old)) + can_be_top = false; + + if (!can_be_bottom && !can_be_top) + return -EINVAL; + + /* Prefer top, if both are valid */ + use_top = can_be_top; + + /* lock_len: length of region that should end up locked */ + if (use_top) + lock_len = mtd->size - ofs; + else + lock_len = ofs + len; + + /* + * Need smallest pow such that: + * + * 1 / (2^pow) <= (len / size) + * + * so (assuming power-of-2 size) we do: + * + * pow = ceil(log2(size / len)) = log2(size) - floor(log2(len)) + */ + pow = ilog2(mtd->size) - ilog2(lock_len); + val = mask - (pow << shift); + if (val & ~mask) + return -EINVAL; + /* Don't "lock" with no region! */ + if (!(val & mask)) + return -EINVAL; + + status_new = (status_old & ~mask & ~SR_TB) | val; + + /* Disallow further writes if WP pin is asserted */ + status_new |= SR_SRWD; + + if (!use_top) + status_new |= SR_TB; + + /* Don't bother if they're the same */ + if (status_new == status_old) + return 0; + + /* Only modify protection if it will not unlock other areas */ + if ((status_new & mask) < (status_old & mask)) + return -EINVAL; + + return write_sr_and_check(nor, status_new, mask); +} + +/* + * Unlock a region of the flash. See stm_lock() for more info + * + * Returns negative on errors, 0 on success. + */ +static int stm_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len) +{ + struct mtd_info *mtd = &nor->mtd; + int status_old, status_new; + u8 mask = SR_BP2 | SR_BP1 | SR_BP0; + u8 shift = ffs(mask) - 1, pow, val; + loff_t lock_len; + bool can_be_top = true, can_be_bottom = nor->flags & SNOR_F_HAS_SR_TB; + bool use_top; + + status_old = read_sr(nor); + if (status_old < 0) + return status_old; + + /* If nothing in our range is locked, we don't need to do anything */ + if (stm_is_unlocked_sr(nor, ofs, len, status_old)) + return 0; + + /* If anything below us is locked, we can't use 'top' protection */ + if (!stm_is_unlocked_sr(nor, 0, ofs, status_old)) + can_be_top = false; + + /* If anything above us is locked, we can't use 'bottom' protection */ + if (!stm_is_unlocked_sr(nor, ofs + len, mtd->size - (ofs + len), + status_old)) + can_be_bottom = false; + + if (!can_be_bottom && !can_be_top) + return -EINVAL; + + /* Prefer top, if both are valid */ + use_top = can_be_top; + + /* lock_len: length of region that should remain locked */ + if (use_top) + lock_len = mtd->size - (ofs + len); + else + lock_len = ofs; + + /* + * Need largest pow such that: + * + * 1 / (2^pow) >= (len / size) + * + * so (assuming power-of-2 size) we do: + * + * pow = floor(log2(size / len)) = log2(size) - ceil(log2(len)) + */ + pow = ilog2(mtd->size) - order_base_2(lock_len); + if (lock_len == 0) { + val = 0; /* fully unlocked */ + } else { + val = mask - (pow << shift); + /* Some power-of-two sizes are not supported */ + if (val & ~mask) + return -EINVAL; + } + + status_new = (status_old & ~mask & ~SR_TB) | val; + + /* Don't protect status register if we're fully unlocked */ + if (lock_len == 0) + status_new &= ~SR_SRWD; + + if (!use_top) + status_new |= SR_TB; + + /* Don't bother if they're the same */ + if (status_new == status_old) + return 0; + + /* Only modify protection if it will not lock other areas */ + if ((status_new & mask) > (status_old & mask)) + return -EINVAL; + + return write_sr_and_check(nor, status_new, mask); +} + +/* + * Check if a region of the flash is (completely) locked. See stm_lock() for + * more info. + * + * Returns 1 if entire region is locked, 0 if any portion is unlocked, and + * negative on errors. + */ +static int stm_is_locked(struct spi_nor *nor, loff_t ofs, uint64_t len) +{ + int status; + + status = read_sr(nor); + if (status < 0) + return status; + + return stm_is_locked_sr(nor, ofs, len, status); +} +#endif /* CONFIG_SPI_FLASH_STMICRO */ + +/* Used when the "_ext_id" is two bytes at most */ +#define INFO(_jedec_id, _ext_id, _sector_size, _n_sectors, _flags) \ + .id = { \ + ((_jedec_id) >> 16) & 0xff, \ + ((_jedec_id) >> 8) & 0xff, \ + (_jedec_id) & 0xff, \ + ((_ext_id) >> 8) & 0xff, \ + (_ext_id) & 0xff, \ + }, \ + .id_len = (!(_jedec_id) ? 0 : (3 + ((_ext_id) ? 2 : 0))), \ + .sector_size = (_sector_size), \ + .n_sectors = (_n_sectors), \ + .page_size = 256, \ + .flags = (_flags), + +#define INFO6(_jedec_id, _ext_id, _sector_size, _n_sectors, _flags) \ + .id = { \ + ((_jedec_id) >> 16) & 0xff, \ + ((_jedec_id) >> 8) & 0xff, \ + (_jedec_id) & 0xff, \ + ((_ext_id) >> 16) & 0xff, \ + ((_ext_id) >> 8) & 0xff, \ + (_ext_id) & 0xff, \ + }, \ + .id_len = 6, \ + .sector_size = (_sector_size), \ + .n_sectors = (_n_sectors), \ + .page_size = 256, \ + .flags = (_flags), + +/* NOTE: double check command sets and memory organization when you add + * more nor chips. This current list focusses on newer chips, which + * have been converging on command sets which including JEDEC ID. + * + * All newly added entries should describe *hardware* and should use SECT_4K + * (or SECT_4K_PMC) if hardware supports erasing 4 KiB sectors. For usage + * scenarios excluding small sectors there is config option that can be + * disabled: CONFIG_MTD_SPI_NOR_USE_4K_SECTORS. + * For historical (and compatibility) reasons (before we got above config) some + * old entries may be missing 4K flag. + */ +const struct flash_info spi_nor_ids[] = { +#ifdef CONFIG_SPI_FLASH_ATMEL /* ATMEL */ + /* Atmel -- some are (confusingly) marketed as "DataFlash" */ + { "at26df321", INFO(0x1f4700, 0, 64 * 1024, 64, SECT_4K) }, + { "at25df321a", INFO(0x1f4701, 0, 64 * 1024, 64, SECT_4K) }, + + { "at45db011d", INFO(0x1f2200, 0, 64 * 1024, 4, SECT_4K) }, + { "at45db021d", INFO(0x1f2300, 0, 64 * 1024, 8, SECT_4K) }, + { "at45db041d", INFO(0x1f2400, 0, 64 * 1024, 8, SECT_4K) }, + { "at45db081d", INFO(0x1f2500, 0, 64 * 1024, 16, SECT_4K) }, + { "at45db161d", INFO(0x1f2600, 0, 64 * 1024, 32, SECT_4K) }, + { "at45db321d", INFO(0x1f2700, 0, 64 * 1024, 64, SECT_4K) }, + { "at45db641d", INFO(0x1f2800, 0, 64 * 1024, 128, SECT_4K) }, + { "at26df081a", INFO(0x1f4501, 0, 64 * 1024, 16, SECT_4K) }, +#endif +#ifdef CONFIG_SPI_FLASH_EON /* EON */ + /* EON -- en25xxx */ + { "en25q32b", INFO(0x1c3016, 0, 64 * 1024, 64, 0) }, + { "en25q64", INFO(0x1c3017, 0, 64 * 1024, 128, SECT_4K) }, + { "en25qh128", INFO(0x1c7018, 0, 64 * 1024, 256, 0) }, + { "en25s64", INFO(0x1c3817, 0, 64 * 1024, 128, SECT_4K) }, +#endif +#ifdef CONFIG_SPI_FLASH_GIGADEVICE /* GIGADEVICE */ + /* GigaDevice */ + { + "gd25q16", INFO(0xc84015, 0, 64 * 1024, 32, + SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | + SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) + }, + { + "gd25q32", INFO(0xc84016, 0, 64 * 1024, 64, + SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | + SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) + }, + { + "gd25lq32", INFO(0xc86016, 0, 64 * 1024, 64, + SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | + SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) + }, + { + "gd25q64", INFO(0xc84017, 0, 64 * 1024, 128, + SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | + SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) + }, +#endif +#ifdef CONFIG_SPI_FLASH_ISSI /* ISSI */ + /* ISSI */ + { "is25lq040b", INFO(0x9d4013, 0, 64 * 1024, 8, + SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, + { "is25lp032", INFO(0x9d6016, 0, 64 * 1024, 64, 0) }, + { "is25lp064", INFO(0x9d6017, 0, 64 * 1024, 128, 0) }, + { "is25lp128", INFO(0x9d6018, 0, 64 * 1024, 256, + SECT_4K | SPI_NOR_DUAL_READ) }, + { "is25lp256", INFO(0x9d6019, 0, 64 * 1024, 512, + SECT_4K | SPI_NOR_DUAL_READ) }, + { "is25wp032", INFO(0x9d7016, 0, 64 * 1024, 64, + SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, + { "is25wp064", INFO(0x9d7017, 0, 64 * 1024, 128, + SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, + { "is25wp128", INFO(0x9d7018, 0, 64 * 1024, 256, + SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, +#endif +#ifdef CONFIG_SPI_FLASH_MACRONIX /* MACRONIX */ + /* Macronix */ + { "mx25l2005a", INFO(0xc22012, 0, 64 * 1024, 4, SECT_4K) }, + { "mx25l4005a", INFO(0xc22013, 0, 64 * 1024, 8, SECT_4K) }, + { "mx25l8005", INFO(0xc22014, 0, 64 * 1024, 16, 0) }, + { "mx25l1606e", INFO(0xc22015, 0, 64 * 1024, 32, SECT_4K) }, + { "mx25l3205d", INFO(0xc22016, 0, 64 * 1024, 64, SECT_4K) }, + { "mx25l6405d", INFO(0xc22017, 0, 64 * 1024, 128, SECT_4K) }, + { "mx25u2033e", INFO(0xc22532, 0, 64 * 1024, 4, SECT_4K) }, + { "mx25u1635e", INFO(0xc22535, 0, 64 * 1024, 32, SECT_4K) }, + { "mx25u6435f", INFO(0xc22537, 0, 64 * 1024, 128, SECT_4K) }, + { "mx25l12805d", INFO(0xc22018, 0, 64 * 1024, 256, 0) }, + { "mx25l12855e", INFO(0xc22618, 0, 64 * 1024, 256, 0) }, + { "mx25l25635e", INFO(0xc22019, 0, 64 * 1024, 512, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, + { "mx25u25635f", INFO(0xc22539, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_4B_OPCODES) }, + { "mx25l25655e", INFO(0xc22619, 0, 64 * 1024, 512, 0) }, + { "mx66l51235l", INFO(0xc2201a, 0, 64 * 1024, 1024, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) }, + { "mx66u51235f", INFO(0xc2253a, 0, 64 * 1024, 1024, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) }, + { "mx66l1g45g", INFO(0xc2201b, 0, 64 * 1024, 2048, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, + { "mx25l1633e", INFO(0xc22415, 0, 64 * 1024, 32, SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES | SECT_4K) }, +#endif + +#ifdef CONFIG_SPI_FLASH_STMICRO /* STMICRO */ + /* Micron */ + { "n25q016a", INFO(0x20bb15, 0, 64 * 1024, 32, SECT_4K | SPI_NOR_QUAD_READ) }, + { "n25q032", INFO(0x20ba16, 0, 64 * 1024, 64, SPI_NOR_QUAD_READ) }, + { "n25q032a", INFO(0x20bb16, 0, 64 * 1024, 64, SPI_NOR_QUAD_READ) }, + { "n25q064", INFO(0x20ba17, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_QUAD_READ) }, + { "n25q064a", INFO(0x20bb17, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_QUAD_READ) }, + { "n25q128a11", INFO(0x20bb18, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_QUAD_READ) }, + { "n25q128a13", INFO(0x20ba18, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_QUAD_READ) }, + { "n25q256a", INFO(0x20ba19, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, + { "n25q256ax1", INFO(0x20bb19, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_QUAD_READ) }, + { "n25q512a", INFO(0x20bb20, 0, 64 * 1024, 1024, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) }, + { "n25q512ax3", INFO(0x20ba20, 0, 64 * 1024, 1024, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) }, + { "n25q00", INFO(0x20ba21, 0, 64 * 1024, 2048, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | NO_CHIP_ERASE) }, + { "n25q00a", INFO(0x20bb21, 0, 64 * 1024, 2048, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | NO_CHIP_ERASE) }, + { "mt25qu02g", INFO(0x20bb22, 0, 64 * 1024, 4096, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | NO_CHIP_ERASE) }, +#endif +#ifdef CONFIG_SPI_FLASH_SPANSION /* SPANSION */ + /* Spansion/Cypress -- single (large) sector size only, at least + * for the chips listed here (without boot sectors). + */ + { "s25sl032p", INFO(0x010215, 0x4d00, 64 * 1024, 64, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, + { "s25sl064p", INFO(0x010216, 0x4d00, 64 * 1024, 128, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, + { "s25fl256s0", INFO(0x010219, 0x4d00, 256 * 1024, 128, USE_CLSR) }, + { "s25fl256s1", INFO(0x010219, 0x4d01, 64 * 1024, 512, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) }, + { "s25fl512s", INFO6(0x010220, 0x4d0081, 256 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) }, + { "s25fl512s_256k", INFO(0x010220, 0x4d00, 256 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) }, + { "s25fl512s_64k", INFO(0x010220, 0x4d01, 64 * 1024, 1024, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) }, + { "s25fl512s_512k", INFO(0x010220, 0x4f00, 256 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) }, + { "s25sl12800", INFO(0x012018, 0x0300, 256 * 1024, 64, 0) }, + { "s25sl12801", INFO(0x012018, 0x0301, 64 * 1024, 256, 0) }, + { "s25fl128s", INFO6(0x012018, 0x4d0180, 64 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) }, + { "s25fl129p0", INFO(0x012018, 0x4d00, 256 * 1024, 64, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) }, + { "s25fl129p1", INFO(0x012018, 0x4d01, 64 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) }, + { "s25sl008a", INFO(0x010213, 0, 64 * 1024, 16, 0) }, + { "s25sl016a", INFO(0x010214, 0, 64 * 1024, 32, 0) }, + { "s25sl032a", INFO(0x010215, 0, 64 * 1024, 64, 0) }, + { "s25sl064a", INFO(0x010216, 0, 64 * 1024, 128, 0) }, + { "s25fl116k", INFO(0x014015, 0, 64 * 1024, 32, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, + { "s25fl164k", INFO(0x014017, 0, 64 * 1024, 128, SECT_4K) }, + { "s25fl208k", INFO(0x014014, 0, 64 * 1024, 16, SECT_4K | SPI_NOR_DUAL_READ) }, + { "s25fl128l", INFO(0x016018, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) }, +#endif +#ifdef CONFIG_SPI_FLASH_SST /* SST */ + /* SST -- large erase sizes are "overlays", "sectors" are 4K */ + { "sst25vf040b", INFO(0xbf258d, 0, 64 * 1024, 8, SECT_4K | SST_WRITE) }, + { "sst25vf080b", INFO(0xbf258e, 0, 64 * 1024, 16, SECT_4K | SST_WRITE) }, + { "sst25vf016b", INFO(0xbf2541, 0, 64 * 1024, 32, SECT_4K | SST_WRITE) }, + { "sst25vf032b", INFO(0xbf254a, 0, 64 * 1024, 64, SECT_4K | SST_WRITE) }, + { "sst25vf064c", INFO(0xbf254b, 0, 64 * 1024, 128, SECT_4K) }, + { "sst25wf512", INFO(0xbf2501, 0, 64 * 1024, 1, SECT_4K | SST_WRITE) }, + { "sst25wf010", INFO(0xbf2502, 0, 64 * 1024, 2, SECT_4K | SST_WRITE) }, + { "sst25wf020", INFO(0xbf2503, 0, 64 * 1024, 4, SECT_4K | SST_WRITE) }, + { "sst25wf020a", INFO(0x621612, 0, 64 * 1024, 4, SECT_4K) }, + { "sst25wf040b", INFO(0x621613, 0, 64 * 1024, 8, SECT_4K) }, + { "sst25wf040", INFO(0xbf2504, 0, 64 * 1024, 8, SECT_4K | SST_WRITE) }, + { "sst25wf080", INFO(0xbf2505, 0, 64 * 1024, 16, SECT_4K | SST_WRITE) }, + { "sst26vf064b", INFO(0xbf2643, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, + { "sst26wf016", INFO(0xbf2651, 0, 64 * 1024, 32, SECT_4K) }, + { "sst26wf032", INFO(0xbf2622, 0, 64 * 1024, 64, SECT_4K) }, + { "sst26wf064", INFO(0xbf2643, 0, 64 * 1024, 128, SECT_4K) }, +#endif +#ifdef CONFIG_SPI_FLASH_STMICRO /* STMICRO */ + /* ST Microelectronics -- newer production may have feature updates */ + { "m25p10", INFO(0x202011, 0, 32 * 1024, 4, 0) }, + { "m25p20", INFO(0x202012, 0, 64 * 1024, 4, 0) }, + { "m25p40", INFO(0x202013, 0, 64 * 1024, 8, 0) }, + { "m25p80", INFO(0x202014, 0, 64 * 1024, 16, 0) }, + { "m25p16", INFO(0x202015, 0, 64 * 1024, 32, 0) }, + { "m25p32", INFO(0x202016, 0, 64 * 1024, 64, 0) }, + { "m25p64", INFO(0x202017, 0, 64 * 1024, 128, 0) }, + { "m25p128", INFO(0x202018, 0, 256 * 1024, 64, 0) }, + { "m25pe16", INFO(0x208015, 0, 64 * 1024, 32, SECT_4K) }, + { "m25px16", INFO(0x207115, 0, 64 * 1024, 32, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, + { "m25px64", INFO(0x207117, 0, 64 * 1024, 128, 0) }, +#endif +#ifdef CONFIG_SPI_FLASH_WINBOND /* WINBOND */ + /* Winbond -- w25x "blocks" are 64K, "sectors" are 4KiB */ + { "w25x05", INFO(0xef3010, 0, 64 * 1024, 1, SECT_4K) }, + { "w25x10", INFO(0xef3011, 0, 64 * 1024, 2, SECT_4K) }, + { "w25x20", INFO(0xef3012, 0, 64 * 1024, 4, SECT_4K) }, + { "w25x40", INFO(0xef3013, 0, 64 * 1024, 8, SECT_4K) }, + { "w25x80", INFO(0xef3014, 0, 64 * 1024, 16, SECT_4K) }, + { "w25x16", INFO(0xef3015, 0, 64 * 1024, 32, SECT_4K) }, + { + "w25q16dw", INFO(0xef6015, 0, 64 * 1024, 32, + SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | + SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) + }, + { "w25x32", INFO(0xef3016, 0, 64 * 1024, 64, SECT_4K) }, + { "w25q20cl", INFO(0xef4012, 0, 64 * 1024, 4, SECT_4K) }, + { "w25q20bw", INFO(0xef5012, 0, 64 * 1024, 4, SECT_4K) }, + { "w25q20ew", INFO(0xef6012, 0, 64 * 1024, 4, SECT_4K) }, + { "w25q32", INFO(0xef4016, 0, 64 * 1024, 64, SECT_4K) }, + { + "w25q32dw", INFO(0xef6016, 0, 64 * 1024, 64, + SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | + SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) + }, + { + "w25q32jv", INFO(0xef7016, 0, 64 * 1024, 64, + SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | + SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) + }, + { "w25x64", INFO(0xef3017, 0, 64 * 1024, 128, SECT_4K) }, + { "w25q64", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) }, + { + "w25q64dw", INFO(0xef6017, 0, 64 * 1024, 128, + SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | + SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) + }, + { + "w25q128fw", INFO(0xef6018, 0, 64 * 1024, 256, + SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | + SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) + }, + { "w25q80", INFO(0xef5014, 0, 64 * 1024, 16, SECT_4K) }, + { "w25q80bl", INFO(0xef4014, 0, 64 * 1024, 16, SECT_4K) }, + { "w25q128", INFO(0xef4018, 0, 64 * 1024, 256, SECT_4K) }, + { "w25q256", INFO(0xef4019, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, + { "w25m512jv", INFO(0xef7119, 0, 64 * 1024, 1024, + SECT_4K | SPI_NOR_QUAD_READ | SPI_NOR_DUAL_READ) }, +#endif +#ifdef CONFIG_SPI_FLASH_XMC + /* XMC (Wuhan Xinxin Semiconductor Manufacturing Corp.) */ + { "XM25QH64A", INFO(0x207017, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, + { "XM25QH128A", INFO(0x207018, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, +#endif + { }, +}; + +static const struct flash_info *spi_nor_read_id(struct spi_nor *nor) +{ + int tmp; + u8 id[SPI_NOR_MAX_ID_LEN]; + const struct flash_info *info; + + if (!ARRAY_SIZE(spi_nor_ids)) + return ERR_PTR(-ENODEV); + + tmp = nor->read_reg(nor, SPINOR_OP_RDID, id, SPI_NOR_MAX_ID_LEN); + if (tmp < 0) { + dev_dbg(nor->dev, "error %d reading JEDEC ID\n", tmp); + return ERR_PTR(tmp); + } + + for (tmp = 0; tmp < ARRAY_SIZE(spi_nor_ids) - 1; tmp++) { + info = &spi_nor_ids[tmp]; + if (info->id_len) { + if (!memcmp(info->id, id, info->id_len)) + return &spi_nor_ids[tmp]; + } + } + dev_err(nor->dev, "unrecognized JEDEC id bytes: %02x, %02x, %02x\n", + id[0], id[1], id[2]); + return ERR_PTR(-ENODEV); +} + +static int spi_nor_read(struct mtd_info *mtd, loff_t from, size_t len, + size_t *retlen, u_char *buf) +{ + struct spi_nor *nor = mtd_to_spi_nor(mtd); + int ret; + + dev_dbg(nor->dev, "from 0x%08x, len %zd\n", (u32)from, len); + + while (len) { + loff_t addr = from; + + ret = nor->read(nor, addr, len, buf); + if (ret == 0) { + /* We shouldn't see 0-length reads */ + ret = -EIO; + goto read_err; + } + if (ret < 0) + goto read_err; + + *retlen += ret; + buf += ret; + from += ret; + len -= ret; + } + ret = 0; + +read_err: + return ret; +} + +#ifdef CONFIG_SPI_FLASH_SST +static int sst_write(struct mtd_info *mtd, loff_t to, size_t len, + size_t *retlen, const u_char *buf) +{ + struct spi_nor *nor = mtd_to_spi_nor(mtd); + size_t actual; + int ret; + + dev_dbg(nor->dev, "to 0x%08x, len %zd\n", (u32)to, len); + + write_enable(nor); + + nor->sst_write_second = false; + + actual = to % 2; + /* Start write from odd address. */ + if (actual) { + nor->program_opcode = SPINOR_OP_BP; + + /* write one byte. */ + ret = nor->write(nor, to, 1, buf); + if (ret < 0) + goto sst_write_err; + ret = spi_nor_wait_till_ready(nor); + if (ret) + goto sst_write_err; + } + to += actual; + + /* Write out most of the data here. */ + for (; actual < len - 1; actual += 2) { + nor->program_opcode = SPINOR_OP_AAI_WP; + + /* write two bytes. */ + ret = nor->write(nor, to, 2, buf + actual); + if (ret < 0) + goto sst_write_err; + ret = spi_nor_wait_till_ready(nor); + if (ret) + goto sst_write_err; + to += 2; + nor->sst_write_second = true; + } + nor->sst_write_second = false; + + write_disable(nor); + ret = spi_nor_wait_till_ready(nor); + if (ret) + goto sst_write_err; + + /* Write out trailing byte if it exists. */ + if (actual != len) { + write_enable(nor); + + nor->program_opcode = SPINOR_OP_BP; + ret = nor->write(nor, to, 1, buf + actual); + if (ret < 0) + goto sst_write_err; + ret = spi_nor_wait_till_ready(nor); + if (ret) + goto sst_write_err; + write_disable(nor); + actual += 1; + } +sst_write_err: + *retlen += actual; + return ret; +} +#endif +/* + * Write an address range to the nor chip. Data must be written in + * FLASH_PAGESIZE chunks. The address range may be any size provided + * it is within the physical boundaries. + */ +static int spi_nor_write(struct mtd_info *mtd, loff_t to, size_t len, + size_t *retlen, const u_char *buf) +{ + struct spi_nor *nor = mtd_to_spi_nor(mtd); + size_t page_offset, page_remain, i; + ssize_t ret; + + dev_dbg(nor->dev, "to 0x%08x, len %zd\n", (u32)to, len); + + for (i = 0; i < len; ) { + ssize_t written; + loff_t addr = to + i; + + /* + * If page_size is a power of two, the offset can be quickly + * calculated with an AND operation. On the other cases we + * need to do a modulus operation (more expensive). + * Power of two numbers have only one bit set and we can use + * the instruction hweight32 to detect if we need to do a + * modulus (do_div()) or not. + */ + if (hweight32(nor->page_size) == 1) { + page_offset = addr & (nor->page_size - 1); + } else { + u64 aux = addr; + + page_offset = do_div(aux, nor->page_size); + } + /* the size of data remaining on the first page */ + page_remain = min_t(size_t, + nor->page_size - page_offset, len - i); + + write_enable(nor); + ret = nor->write(nor, addr, page_remain, buf + i); + if (ret < 0) + goto write_err; + written = ret; + + ret = spi_nor_wait_till_ready(nor); + if (ret) + goto write_err; + *retlen += written; + i += written; + if (written != page_remain) { + ret = -EIO; + goto write_err; + } + } + +write_err: + return ret; +} + +#ifdef CONFIG_SPI_FLASH_MACRONIX +/** + * macronix_quad_enable() - set QE bit in Status Register. + * @nor: pointer to a 'struct spi_nor' + * + * Set the Quad Enable (QE) bit in the Status Register. + * + * bit 6 of the Status Register is the QE bit for Macronix like QSPI memories. + * + * Return: 0 on success, -errno otherwise. + */ +static int macronix_quad_enable(struct spi_nor *nor) +{ + int ret, val; + + val = read_sr(nor); + if (val < 0) + return val; + if (val & SR_QUAD_EN_MX) + return 0; + + write_enable(nor); + + write_sr(nor, val | SR_QUAD_EN_MX); + + ret = spi_nor_wait_till_ready(nor); + if (ret) + return ret; + + ret = read_sr(nor); + if (!(ret > 0 && (ret & SR_QUAD_EN_MX))) { + dev_err(nor->dev, "Macronix Quad bit not set\n"); + return -EINVAL; + } + + return 0; +} +#endif + +#if defined(CONFIG_SPI_FLASH_SPANSION) || defined(CONFIG_SPI_FLASH_WINBOND) +/* + * Write status Register and configuration register with 2 bytes + * The first byte will be written to the status register, while the + * second byte will be written to the configuration register. + * Return negative if error occurred. + */ +static int write_sr_cr(struct spi_nor *nor, u8 *sr_cr) +{ + int ret; + + write_enable(nor); + + ret = nor->write_reg(nor, SPINOR_OP_WRSR, sr_cr, 2); + if (ret < 0) { + dev_dbg(nor->dev, + "error while writing configuration register\n"); + return -EINVAL; + } + + ret = spi_nor_wait_till_ready(nor); + if (ret) { + dev_dbg(nor->dev, + "timeout while writing configuration register\n"); + return ret; + } + + return 0; +} + +/** + * spansion_read_cr_quad_enable() - set QE bit in Configuration Register. + * @nor: pointer to a 'struct spi_nor' + * + * Set the Quad Enable (QE) bit in the Configuration Register. + * This function should be used with QSPI memories supporting the Read + * Configuration Register (35h) instruction. + * + * bit 1 of the Configuration Register is the QE bit for Spansion like QSPI + * memories. + * + * Return: 0 on success, -errno otherwise. + */ +static int spansion_read_cr_quad_enable(struct spi_nor *nor) +{ + u8 sr_cr[2]; + int ret; + + /* Check current Quad Enable bit value. */ + ret = read_cr(nor); + if (ret < 0) { + dev_dbg(dev, "error while reading configuration register\n"); + return -EINVAL; + } + + if (ret & CR_QUAD_EN_SPAN) + return 0; + + sr_cr[1] = ret | CR_QUAD_EN_SPAN; + + /* Keep the current value of the Status Register. */ + ret = read_sr(nor); + if (ret < 0) { + dev_dbg(dev, "error while reading status register\n"); + return -EINVAL; + } + sr_cr[0] = ret; + + ret = write_sr_cr(nor, sr_cr); + if (ret) + return ret; + + /* Read back and check it. */ + ret = read_cr(nor); + if (!(ret > 0 && (ret & CR_QUAD_EN_SPAN))) { + dev_dbg(nor->dev, "Spansion Quad bit not set\n"); + return -EINVAL; + } + + return 0; +} +#endif /* CONFIG_SPI_FLASH_SPANSION */ + +struct spi_nor_read_command { + u8 num_mode_clocks; + u8 num_wait_states; + u8 opcode; + enum spi_nor_protocol proto; +}; + +struct spi_nor_pp_command { + u8 opcode; + enum spi_nor_protocol proto; +}; + +enum spi_nor_read_command_index { + SNOR_CMD_READ, + SNOR_CMD_READ_FAST, + SNOR_CMD_READ_1_1_1_DTR, + + /* Dual SPI */ + SNOR_CMD_READ_1_1_2, + SNOR_CMD_READ_1_2_2, + SNOR_CMD_READ_2_2_2, + SNOR_CMD_READ_1_2_2_DTR, + + /* Quad SPI */ + SNOR_CMD_READ_1_1_4, + SNOR_CMD_READ_1_4_4, + SNOR_CMD_READ_4_4_4, + SNOR_CMD_READ_1_4_4_DTR, + + /* Octo SPI */ + SNOR_CMD_READ_1_1_8, + SNOR_CMD_READ_1_8_8, + SNOR_CMD_READ_8_8_8, + SNOR_CMD_READ_1_8_8_DTR, + + SNOR_CMD_READ_MAX +}; + +enum spi_nor_pp_command_index { + SNOR_CMD_PP, + + /* Quad SPI */ + SNOR_CMD_PP_1_1_4, + SNOR_CMD_PP_1_4_4, + SNOR_CMD_PP_4_4_4, + + /* Octo SPI */ + SNOR_CMD_PP_1_1_8, + SNOR_CMD_PP_1_8_8, + SNOR_CMD_PP_8_8_8, + + SNOR_CMD_PP_MAX +}; + +struct spi_nor_flash_parameter { + u64 size; + u32 page_size; + + struct spi_nor_hwcaps hwcaps; + struct spi_nor_read_command reads[SNOR_CMD_READ_MAX]; + struct spi_nor_pp_command page_programs[SNOR_CMD_PP_MAX]; + + int (*quad_enable)(struct spi_nor *nor); +}; + +static void +spi_nor_set_read_settings(struct spi_nor_read_command *read, + u8 num_mode_clocks, + u8 num_wait_states, + u8 opcode, + enum spi_nor_protocol proto) +{ + read->num_mode_clocks = num_mode_clocks; + read->num_wait_states = num_wait_states; + read->opcode = opcode; + read->proto = proto; +} + +static void +spi_nor_set_pp_settings(struct spi_nor_pp_command *pp, + u8 opcode, + enum spi_nor_protocol proto) +{ + pp->opcode = opcode; + pp->proto = proto; +} + +static int spi_nor_init_params(struct spi_nor *nor, + const struct flash_info *info, + struct spi_nor_flash_parameter *params) +{ + /* Set legacy flash parameters as default. */ + memset(params, 0, sizeof(*params)); + + /* Set SPI NOR sizes. */ + params->size = info->sector_size * info->n_sectors; + params->page_size = info->page_size; + + /* (Fast) Read settings. */ + params->hwcaps.mask |= SNOR_HWCAPS_READ; + spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ], + 0, 0, SPINOR_OP_READ, + SNOR_PROTO_1_1_1); + + if (!(info->flags & SPI_NOR_NO_FR)) { + params->hwcaps.mask |= SNOR_HWCAPS_READ_FAST; + spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ_FAST], + 0, 8, SPINOR_OP_READ_FAST, + SNOR_PROTO_1_1_1); + } + + if (info->flags & SPI_NOR_DUAL_READ) { + params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_2; + spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ_1_1_2], + 0, 8, SPINOR_OP_READ_1_1_2, + SNOR_PROTO_1_1_2); + } + + if (info->flags & SPI_NOR_QUAD_READ) { + params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_4; + spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ_1_1_4], + 0, 8, SPINOR_OP_READ_1_1_4, + SNOR_PROTO_1_1_4); + } + + /* Page Program settings. */ + params->hwcaps.mask |= SNOR_HWCAPS_PP; + spi_nor_set_pp_settings(¶ms->page_programs[SNOR_CMD_PP], + SPINOR_OP_PP, SNOR_PROTO_1_1_1); + + if (info->flags & SPI_NOR_QUAD_READ) { + params->hwcaps.mask |= SNOR_HWCAPS_PP_1_1_4; + spi_nor_set_pp_settings(¶ms->page_programs[SNOR_CMD_PP_1_1_4], + SPINOR_OP_PP_1_1_4, SNOR_PROTO_1_1_4); + } + + /* Select the procedure to set the Quad Enable bit. */ + if (params->hwcaps.mask & (SNOR_HWCAPS_READ_QUAD | + SNOR_HWCAPS_PP_QUAD)) { + switch (JEDEC_MFR(info)) { +#ifdef CONFIG_SPI_FLASH_MACRONIX + case SNOR_MFR_MACRONIX: + params->quad_enable = macronix_quad_enable; + break; +#endif + case SNOR_MFR_ST: + case SNOR_MFR_MICRON: + break; + + default: +#if defined(CONFIG_SPI_FLASH_SPANSION) || defined(CONFIG_SPI_FLASH_WINBOND) + /* Kept only for backward compatibility purpose. */ + params->quad_enable = spansion_read_cr_quad_enable; +#endif + break; + } + } +} + +static int spi_nor_hwcaps2cmd(u32 hwcaps, const int table[][2], size_t size) +{ + size_t i; + + for (i = 0; i < size; i++) + if (table[i][0] == (int)hwcaps) + return table[i][1]; + + return -EINVAL; +} + +static int spi_nor_hwcaps_read2cmd(u32 hwcaps) +{ + static const int hwcaps_read2cmd[][2] = { + { SNOR_HWCAPS_READ, SNOR_CMD_READ }, + { SNOR_HWCAPS_READ_FAST, SNOR_CMD_READ_FAST }, + { SNOR_HWCAPS_READ_1_1_1_DTR, SNOR_CMD_READ_1_1_1_DTR }, + { SNOR_HWCAPS_READ_1_1_2, SNOR_CMD_READ_1_1_2 }, + { SNOR_HWCAPS_READ_1_2_2, SNOR_CMD_READ_1_2_2 }, + { SNOR_HWCAPS_READ_2_2_2, SNOR_CMD_READ_2_2_2 }, + { SNOR_HWCAPS_READ_1_2_2_DTR, SNOR_CMD_READ_1_2_2_DTR }, + { SNOR_HWCAPS_READ_1_1_4, SNOR_CMD_READ_1_1_4 }, + { SNOR_HWCAPS_READ_1_4_4, SNOR_CMD_READ_1_4_4 }, + { SNOR_HWCAPS_READ_4_4_4, SNOR_CMD_READ_4_4_4 }, + { SNOR_HWCAPS_READ_1_4_4_DTR, SNOR_CMD_READ_1_4_4_DTR }, + { SNOR_HWCAPS_READ_1_1_8, SNOR_CMD_READ_1_1_8 }, + { SNOR_HWCAPS_READ_1_8_8, SNOR_CMD_READ_1_8_8 }, + { SNOR_HWCAPS_READ_8_8_8, SNOR_CMD_READ_8_8_8 }, + { SNOR_HWCAPS_READ_1_8_8_DTR, SNOR_CMD_READ_1_8_8_DTR }, + }; + + return spi_nor_hwcaps2cmd(hwcaps, hwcaps_read2cmd, + ARRAY_SIZE(hwcaps_read2cmd)); +} + +static int spi_nor_hwcaps_pp2cmd(u32 hwcaps) +{ + static const int hwcaps_pp2cmd[][2] = { + { SNOR_HWCAPS_PP, SNOR_CMD_PP }, + { SNOR_HWCAPS_PP_1_1_4, SNOR_CMD_PP_1_1_4 }, + { SNOR_HWCAPS_PP_1_4_4, SNOR_CMD_PP_1_4_4 }, + { SNOR_HWCAPS_PP_4_4_4, SNOR_CMD_PP_4_4_4 }, + { SNOR_HWCAPS_PP_1_1_8, SNOR_CMD_PP_1_1_8 }, + { SNOR_HWCAPS_PP_1_8_8, SNOR_CMD_PP_1_8_8 }, + { SNOR_HWCAPS_PP_8_8_8, SNOR_CMD_PP_8_8_8 }, + }; + + return spi_nor_hwcaps2cmd(hwcaps, hwcaps_pp2cmd, + ARRAY_SIZE(hwcaps_pp2cmd)); +} + +static int spi_nor_select_read(struct spi_nor *nor, + const struct spi_nor_flash_parameter *params, + u32 shared_hwcaps) +{ + int cmd, best_match = fls(shared_hwcaps & SNOR_HWCAPS_READ_MASK) - 1; + const struct spi_nor_read_command *read; + + if (best_match < 0) + return -EINVAL; + + cmd = spi_nor_hwcaps_read2cmd(BIT(best_match)); + if (cmd < 0) + return -EINVAL; + + read = ¶ms->reads[cmd]; + nor->read_opcode = read->opcode; + nor->read_proto = read->proto; + + /* + * In the spi-nor framework, we don't need to make the difference + * between mode clock cycles and wait state clock cycles. + * Indeed, the value of the mode clock cycles is used by a QSPI + * flash memory to know whether it should enter or leave its 0-4-4 + * (Continuous Read / XIP) mode. + * eXecution In Place is out of the scope of the mtd sub-system. + * Hence we choose to merge both mode and wait state clock cycles + * into the so called dummy clock cycles. + */ + nor->read_dummy = read->num_mode_clocks + read->num_wait_states; + return 0; +} + +static int spi_nor_select_pp(struct spi_nor *nor, + const struct spi_nor_flash_parameter *params, + u32 shared_hwcaps) +{ + int cmd, best_match = fls(shared_hwcaps & SNOR_HWCAPS_PP_MASK) - 1; + const struct spi_nor_pp_command *pp; + + if (best_match < 0) + return -EINVAL; + + cmd = spi_nor_hwcaps_pp2cmd(BIT(best_match)); + if (cmd < 0) + return -EINVAL; + + pp = ¶ms->page_programs[cmd]; + nor->program_opcode = pp->opcode; + nor->write_proto = pp->proto; + return 0; +} + +static int spi_nor_select_erase(struct spi_nor *nor, + const struct flash_info *info) +{ + struct mtd_info *mtd = &nor->mtd; + +#ifdef CONFIG_SPI_FLASH_USE_4K_SECTORS + /* prefer "small sector" erase if possible */ + if (info->flags & SECT_4K) { + nor->erase_opcode = SPINOR_OP_BE_4K; + mtd->erasesize = 4096; + } else if (info->flags & SECT_4K_PMC) { + nor->erase_opcode = SPINOR_OP_BE_4K_PMC; + mtd->erasesize = 4096; + } else +#endif + { + nor->erase_opcode = SPINOR_OP_SE; + mtd->erasesize = info->sector_size; + } + return 0; +} + +static int spi_nor_setup(struct spi_nor *nor, const struct flash_info *info, + const struct spi_nor_flash_parameter *params, + const struct spi_nor_hwcaps *hwcaps) +{ + u32 ignored_mask, shared_mask; + bool enable_quad_io; + int err; + + /* + * Keep only the hardware capabilities supported by both the SPI + * controller and the SPI flash memory. + */ + shared_mask = hwcaps->mask & params->hwcaps.mask; + + /* SPI n-n-n protocols are not supported yet. */ + ignored_mask = (SNOR_HWCAPS_READ_2_2_2 | + SNOR_HWCAPS_READ_4_4_4 | + SNOR_HWCAPS_READ_8_8_8 | + SNOR_HWCAPS_PP_4_4_4 | + SNOR_HWCAPS_PP_8_8_8); + if (shared_mask & ignored_mask) { + dev_dbg(nor->dev, + "SPI n-n-n protocols are not supported yet.\n"); + shared_mask &= ~ignored_mask; + } + + /* Select the (Fast) Read command. */ + err = spi_nor_select_read(nor, params, shared_mask); + if (err) { + dev_dbg(nor->dev, + "can't select read settings supported by both the SPI controller and memory.\n"); + return err; + } + + /* Select the Page Program command. */ + err = spi_nor_select_pp(nor, params, shared_mask); + if (err) { + dev_dbg(nor->dev, + "can't select write settings supported by both the SPI controller and memory.\n"); + return err; + } + + /* Select the Sector Erase command. */ + err = spi_nor_select_erase(nor, info); + if (err) { + dev_dbg(nor->dev, + "can't select erase settings supported by both the SPI controller and memory.\n"); + return err; + } + + /* Enable Quad I/O if needed. */ + enable_quad_io = (spi_nor_get_protocol_width(nor->read_proto) == 4 || + spi_nor_get_protocol_width(nor->write_proto) == 4); + if (enable_quad_io && params->quad_enable) + nor->quad_enable = params->quad_enable; + else + nor->quad_enable = NULL; + + return 0; +} + +static int spi_nor_init(struct spi_nor *nor) +{ + int err; + + /* + * Atmel, SST, Intel/Numonyx, and others serial NOR tend to power up + * with the software protection bits set + */ + if (JEDEC_MFR(nor->info) == SNOR_MFR_ATMEL || + JEDEC_MFR(nor->info) == SNOR_MFR_INTEL || + JEDEC_MFR(nor->info) == SNOR_MFR_SST || + nor->info->flags & SPI_NOR_HAS_LOCK) { + write_enable(nor); + write_sr(nor, 0); + spi_nor_wait_till_ready(nor); + } + + if (nor->quad_enable) { + err = nor->quad_enable(nor); + if (err) { + dev_dbg(nor->dev, "quad mode not supported\n"); + return err; + } + } + + return 0; +} + +int spi_nor_scan(struct spi_nor *nor) +{ + struct spi_nor_flash_parameter params; + const struct flash_info *info = NULL; + struct mtd_info *mtd = &nor->mtd; + struct spi_nor_hwcaps hwcaps = { + .mask = SNOR_HWCAPS_READ | + SNOR_HWCAPS_READ_FAST | + SNOR_HWCAPS_PP, + }; + struct spi_slave *spi = nor->spi; + int ret; + + /* Reset SPI protocol for all commands. */ + nor->reg_proto = SNOR_PROTO_1_1_1; + nor->read_proto = SNOR_PROTO_1_1_1; + nor->write_proto = SNOR_PROTO_1_1_1; + nor->read = spi_nor_read_data; + nor->write = spi_nor_write_data; + nor->read_reg = spi_nor_read_reg; + nor->write_reg = spi_nor_write_reg; + + if (spi->mode & SPI_RX_QUAD) { + hwcaps.mask |= SNOR_HWCAPS_READ_1_1_4; + + if (spi->mode & SPI_TX_QUAD) + hwcaps.mask |= (SNOR_HWCAPS_READ_1_4_4 | + SNOR_HWCAPS_PP_1_1_4 | + SNOR_HWCAPS_PP_1_4_4); + } else if (spi->mode & SPI_RX_DUAL) { + hwcaps.mask |= SNOR_HWCAPS_READ_1_1_2; + + if (spi->mode & SPI_TX_DUAL) + hwcaps.mask |= SNOR_HWCAPS_READ_1_2_2; + } + + info = spi_nor_read_id(nor); + if (IS_ERR_OR_NULL(info)) + return -ENOENT; + + ret = spi_nor_init_params(nor, info, ¶ms); + if (ret) + return ret; + + if (!mtd->name) + mtd->name = info->name; + mtd->priv = nor; + mtd->type = MTD_NORFLASH; + mtd->writesize = 1; + mtd->flags = MTD_CAP_NORFLASH; + mtd->size = params.size; + mtd->_erase = spi_nor_erase; + mtd->_read = spi_nor_read; + +#if defined(CONFIG_SPI_FLASH_STMICRO) || defined(CONFIG_SPI_FLASH_SST) + /* NOR protection support for STmicro/Micron chips and similar */ + if (JEDEC_MFR(info) == SNOR_MFR_ST || + JEDEC_MFR(info) == SNOR_MFR_MICRON || + JEDEC_MFR(info) == SNOR_MFR_SST || + info->flags & SPI_NOR_HAS_LOCK) { + nor->flash_lock = stm_lock; + nor->flash_unlock = stm_unlock; + nor->flash_is_locked = stm_is_locked; + } +#endif + +#ifdef CONFIG_SPI_FLASH_SST + /* sst nor chips use AAI word program */ + if (info->flags & SST_WRITE) + mtd->_write = sst_write; + else +#endif + mtd->_write = spi_nor_write; + + if (info->flags & USE_FSR) + nor->flags |= SNOR_F_USE_FSR; + if (info->flags & SPI_NOR_HAS_TB) + nor->flags |= SNOR_F_HAS_SR_TB; + if (info->flags & NO_CHIP_ERASE) + nor->flags |= SNOR_F_NO_OP_CHIP_ERASE; + if (info->flags & USE_CLSR) + nor->flags |= SNOR_F_USE_CLSR; + + if (info->flags & SPI_NOR_NO_ERASE) + mtd->flags |= MTD_NO_ERASE; + + nor->page_size = params.page_size; + mtd->writebufsize = nor->page_size; + + /* Some devices cannot do fast-read, no matter what DT tells us */ + if ((info->flags & SPI_NOR_NO_FR) || (spi->mode & SPI_RX_SLOW)) + params.hwcaps.mask &= ~SNOR_HWCAPS_READ_FAST; + + /* + * Configure the SPI memory: + * - select op codes for (Fast) Read, Page Program and Sector Erase. + * - set the number of dummy cycles (mode cycles + wait states). + * - set the SPI protocols for register and memory accesses. + * - set the Quad Enable bit if needed (required by SPI x-y-4 protos). + */ + ret = spi_nor_setup(nor, info, ¶ms, &hwcaps); + if (ret) + return ret; + + if (info->addr_width) { + nor->addr_width = info->addr_width; + } else { + nor->addr_width = 3; + } + + if (nor->addr_width > SPI_NOR_MAX_ADDR_WIDTH) { + dev_dbg(dev, "address width is too large: %u\n", + nor->addr_width); + return -EINVAL; + } + + /* Send all the required SPI flash commands to initialize device */ + nor->info = info; + ret = spi_nor_init(nor); + if (ret) + return ret; + + nor->name = mtd->name; + nor->size = mtd->size; + nor->erase_size = mtd->erasesize; + nor->sector_size = mtd->erasesize; + +#ifndef CONFIG_SPL_BUILD + printf("SF: Detected %s with page size ", nor->name); + print_size(nor->page_size, ", erase size "); + print_size(nor->erase_size, ", total "); + print_size(nor->size, ""); + puts("\n"); +#endif + + return 0; +} diff --git a/include/linux/mtd/cfi.h b/include/linux/mtd/cfi.h new file mode 100644 index 0000000000..3555518bd3 --- /dev/null +++ b/include/linux/mtd/cfi.h @@ -0,0 +1,32 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright © 2000-2010 David Woodhouse et al. + * + */ + +#ifndef __MTD_CFI_H__ +#define __MTD_CFI_H__ + +#define CFI_MFR_ANY 0xFFFF +#define CFI_ID_ANY 0xFFFF +#define CFI_MFR_CONTINUATION 0x007F + +#define CFI_MFR_AMD 0x0001 +#define CFI_MFR_AMIC 0x0037 +#define CFI_MFR_ATMEL 0x001F +#define CFI_MFR_EON 0x001C +#define CFI_MFR_FUJITSU 0x0004 +#define CFI_MFR_HYUNDAI 0x00AD +#define CFI_MFR_INTEL 0x0089 +#define CFI_MFR_MACRONIX 0x00C2 +#define CFI_MFR_NEC 0x0010 +#define CFI_MFR_PMC 0x009D +#define CFI_MFR_SAMSUNG 0x00EC +#define CFI_MFR_SHARP 0x00B0 +#define CFI_MFR_SST 0x00BF +#define CFI_MFR_ST 0x0020 /* STMicroelectronics */ +#define CFI_MFR_MICRON 0x002C /* Micron */ +#define CFI_MFR_TOSHIBA 0x0098 +#define CFI_MFR_WINBOND 0x00DA + +#endif /* __MTD_CFI_H__ */ diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h new file mode 100644 index 0000000000..507458a760 --- /dev/null +++ b/include/linux/mtd/spi-nor.h @@ -0,0 +1,410 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2014 Freescale Semiconductor, Inc. + * Synced from Linux v4.19 + */ + +#ifndef __LINUX_MTD_SPI_NOR_H +#define __LINUX_MTD_SPI_NOR_H + +#include +#include +#include + +/* + * Manufacturer IDs + * + * The first byte returned from the flash after sending opcode SPINOR_OP_RDID. + * Sometimes these are the same as CFI IDs, but sometimes they aren't. + */ +#define SNOR_MFR_ATMEL CFI_MFR_ATMEL +#define SNOR_MFR_GIGADEVICE 0xc8 +#define SNOR_MFR_INTEL CFI_MFR_INTEL +#define SNOR_MFR_ST CFI_MFR_ST /* ST Micro <--> Micron */ +#define SNOR_MFR_MICRON CFI_MFR_MICRON /* ST Micro <--> Micron */ +#define SNOR_MFR_MACRONIX CFI_MFR_MACRONIX +#define SNOR_MFR_SPANSION CFI_MFR_AMD +#define SNOR_MFR_SST CFI_MFR_SST +#define SNOR_MFR_WINBOND 0xef /* Also used by some Spansion */ + +/* + * Note on opcode nomenclature: some opcodes have a format like + * SPINOR_OP_FUNCTION{4,}_x_y_z. The numbers x, y, and z stand for the number + * of I/O lines used for the opcode, address, and data (respectively). The + * FUNCTION has an optional suffix of '4', to represent an opcode which + * requires a 4-byte (32-bit) address. + */ + +/* Flash opcodes. */ +#define SPINOR_OP_WREN 0x06 /* Write enable */ +#define SPINOR_OP_RDSR 0x05 /* Read status register */ +#define SPINOR_OP_WRSR 0x01 /* Write status register 1 byte */ +#define SPINOR_OP_RDSR2 0x3f /* Read status register 2 */ +#define SPINOR_OP_WRSR2 0x3e /* Write status register 2 */ +#define SPINOR_OP_READ 0x03 /* Read data bytes (low frequency) */ +#define SPINOR_OP_READ_FAST 0x0b /* Read data bytes (high frequency) */ +#define SPINOR_OP_READ_1_1_2 0x3b /* Read data bytes (Dual Output SPI) */ +#define SPINOR_OP_READ_1_2_2 0xbb /* Read data bytes (Dual I/O SPI) */ +#define SPINOR_OP_READ_1_1_4 0x6b /* Read data bytes (Quad Output SPI) */ +#define SPINOR_OP_READ_1_4_4 0xeb /* Read data bytes (Quad I/O SPI) */ +#define SPINOR_OP_PP 0x02 /* Page program (up to 256 bytes) */ +#define SPINOR_OP_PP_1_1_4 0x32 /* Quad page program */ +#define SPINOR_OP_PP_1_4_4 0x38 /* Quad page program */ +#define SPINOR_OP_BE_4K 0x20 /* Erase 4KiB block */ +#define SPINOR_OP_BE_4K_PMC 0xd7 /* Erase 4KiB block on PMC chips */ +#define SPINOR_OP_BE_32K 0x52 /* Erase 32KiB block */ +#define SPINOR_OP_CHIP_ERASE 0xc7 /* Erase whole flash chip */ +#define SPINOR_OP_SE 0xd8 /* Sector erase (usually 64KiB) */ +#define SPINOR_OP_RDID 0x9f /* Read JEDEC ID */ +#define SPINOR_OP_RDSFDP 0x5a /* Read SFDP */ +#define SPINOR_OP_RDCR 0x35 /* Read configuration register */ +#define SPINOR_OP_RDFSR 0x70 /* Read flag status register */ +#define SPINOR_OP_CLFSR 0x50 /* Clear flag status register */ +#define SPINOR_OP_RDEAR 0xc8 /* Read Extended Address Register */ +#define SPINOR_OP_WREAR 0xc5 /* Write Extended Address Register */ + +/* 4-byte address opcodes - used on Spansion and some Macronix flashes. */ +#define SPINOR_OP_READ_4B 0x13 /* Read data bytes (low frequency) */ +#define SPINOR_OP_READ_FAST_4B 0x0c /* Read data bytes (high frequency) */ +#define SPINOR_OP_READ_1_1_2_4B 0x3c /* Read data bytes (Dual Output SPI) */ +#define SPINOR_OP_READ_1_2_2_4B 0xbc /* Read data bytes (Dual I/O SPI) */ +#define SPINOR_OP_READ_1_1_4_4B 0x6c /* Read data bytes (Quad Output SPI) */ +#define SPINOR_OP_READ_1_4_4_4B 0xec /* Read data bytes (Quad I/O SPI) */ +#define SPINOR_OP_PP_4B 0x12 /* Page program (up to 256 bytes) */ +#define SPINOR_OP_PP_1_1_4_4B 0x34 /* Quad page program */ +#define SPINOR_OP_PP_1_4_4_4B 0x3e /* Quad page program */ +#define SPINOR_OP_BE_4K_4B 0x21 /* Erase 4KiB block */ +#define SPINOR_OP_BE_32K_4B 0x5c /* Erase 32KiB block */ +#define SPINOR_OP_SE_4B 0xdc /* Sector erase (usually 64KiB) */ + +/* Double Transfer Rate opcodes - defined in JEDEC JESD216B. */ +#define SPINOR_OP_READ_1_1_1_DTR 0x0d +#define SPINOR_OP_READ_1_2_2_DTR 0xbd +#define SPINOR_OP_READ_1_4_4_DTR 0xed + +#define SPINOR_OP_READ_1_1_1_DTR_4B 0x0e +#define SPINOR_OP_READ_1_2_2_DTR_4B 0xbe +#define SPINOR_OP_READ_1_4_4_DTR_4B 0xee + +/* Used for SST flashes only. */ +#define SPINOR_OP_BP 0x02 /* Byte program */ +#define SPINOR_OP_WRDI 0x04 /* Write disable */ +#define SPINOR_OP_AAI_WP 0xad /* Auto address increment word program */ + +/* Used for S3AN flashes only */ +#define SPINOR_OP_XSE 0x50 /* Sector erase */ +#define SPINOR_OP_XPP 0x82 /* Page program */ +#define SPINOR_OP_XRDSR 0xd7 /* Read status register */ + +#define XSR_PAGESIZE BIT(0) /* Page size in Po2 or Linear */ +#define XSR_RDY BIT(7) /* Ready */ + +/* Used for Macronix and Winbond flashes. */ +#define SPINOR_OP_EN4B 0xb7 /* Enter 4-byte mode */ +#define SPINOR_OP_EX4B 0xe9 /* Exit 4-byte mode */ + +/* Used for Spansion flashes only. */ +#define SPINOR_OP_BRWR 0x17 /* Bank register write */ +#define SPINOR_OP_CLSR 0x30 /* Clear status register 1 */ + +/* Used for Micron flashes only. */ +#define SPINOR_OP_RD_EVCR 0x65 /* Read EVCR register */ +#define SPINOR_OP_WD_EVCR 0x61 /* Write EVCR register */ + +/* Status Register bits. */ +#define SR_WIP BIT(0) /* Write in progress */ +#define SR_WEL BIT(1) /* Write enable latch */ +/* meaning of other SR_* bits may differ between vendors */ +#define SR_BP0 BIT(2) /* Block protect 0 */ +#define SR_BP1 BIT(3) /* Block protect 1 */ +#define SR_BP2 BIT(4) /* Block protect 2 */ +#define SR_TB BIT(5) /* Top/Bottom protect */ +#define SR_SRWD BIT(7) /* SR write protect */ +/* Spansion/Cypress specific status bits */ +#define SR_E_ERR BIT(5) +#define SR_P_ERR BIT(6) + +#define SR_QUAD_EN_MX BIT(6) /* Macronix Quad I/O */ + +/* Enhanced Volatile Configuration Register bits */ +#define EVCR_QUAD_EN_MICRON BIT(7) /* Micron Quad I/O */ + +/* Flag Status Register bits */ +#define FSR_READY BIT(7) /* Device status, 0 = Busy, 1 = Ready */ +#define FSR_E_ERR BIT(5) /* Erase operation status */ +#define FSR_P_ERR BIT(4) /* Program operation status */ +#define FSR_PT_ERR BIT(1) /* Protection error bit */ + +/* Configuration Register bits. */ +#define CR_QUAD_EN_SPAN BIT(1) /* Spansion Quad I/O */ + +/* Status Register 2 bits. */ +#define SR2_QUAD_EN_BIT7 BIT(7) + +/* Supported SPI protocols */ +#define SNOR_PROTO_INST_MASK GENMASK(23, 16) +#define SNOR_PROTO_INST_SHIFT 16 +#define SNOR_PROTO_INST(_nbits) \ + ((((unsigned long)(_nbits)) << SNOR_PROTO_INST_SHIFT) & \ + SNOR_PROTO_INST_MASK) + +#define SNOR_PROTO_ADDR_MASK GENMASK(15, 8) +#define SNOR_PROTO_ADDR_SHIFT 8 +#define SNOR_PROTO_ADDR(_nbits) \ + ((((unsigned long)(_nbits)) << SNOR_PROTO_ADDR_SHIFT) & \ + SNOR_PROTO_ADDR_MASK) + +#define SNOR_PROTO_DATA_MASK GENMASK(7, 0) +#define SNOR_PROTO_DATA_SHIFT 0 +#define SNOR_PROTO_DATA(_nbits) \ + ((((unsigned long)(_nbits)) << SNOR_PROTO_DATA_SHIFT) & \ + SNOR_PROTO_DATA_MASK) + +#define SNOR_PROTO_IS_DTR BIT(24) /* Double Transfer Rate */ + +#define SNOR_PROTO_STR(_inst_nbits, _addr_nbits, _data_nbits) \ + (SNOR_PROTO_INST(_inst_nbits) | \ + SNOR_PROTO_ADDR(_addr_nbits) | \ + SNOR_PROTO_DATA(_data_nbits)) +#define SNOR_PROTO_DTR(_inst_nbits, _addr_nbits, _data_nbits) \ + (SNOR_PROTO_IS_DTR | \ + SNOR_PROTO_STR(_inst_nbits, _addr_nbits, _data_nbits)) + +enum spi_nor_protocol { + SNOR_PROTO_1_1_1 = SNOR_PROTO_STR(1, 1, 1), + SNOR_PROTO_1_1_2 = SNOR_PROTO_STR(1, 1, 2), + SNOR_PROTO_1_1_4 = SNOR_PROTO_STR(1, 1, 4), + SNOR_PROTO_1_1_8 = SNOR_PROTO_STR(1, 1, 8), + SNOR_PROTO_1_2_2 = SNOR_PROTO_STR(1, 2, 2), + SNOR_PROTO_1_4_4 = SNOR_PROTO_STR(1, 4, 4), + SNOR_PROTO_1_8_8 = SNOR_PROTO_STR(1, 8, 8), + SNOR_PROTO_2_2_2 = SNOR_PROTO_STR(2, 2, 2), + SNOR_PROTO_4_4_4 = SNOR_PROTO_STR(4, 4, 4), + SNOR_PROTO_8_8_8 = SNOR_PROTO_STR(8, 8, 8), + + SNOR_PROTO_1_1_1_DTR = SNOR_PROTO_DTR(1, 1, 1), + SNOR_PROTO_1_2_2_DTR = SNOR_PROTO_DTR(1, 2, 2), + SNOR_PROTO_1_4_4_DTR = SNOR_PROTO_DTR(1, 4, 4), + SNOR_PROTO_1_8_8_DTR = SNOR_PROTO_DTR(1, 8, 8), +}; + +static inline bool spi_nor_protocol_is_dtr(enum spi_nor_protocol proto) +{ + return !!(proto & SNOR_PROTO_IS_DTR); +} + +static inline u8 spi_nor_get_protocol_inst_nbits(enum spi_nor_protocol proto) +{ + return ((unsigned long)(proto & SNOR_PROTO_INST_MASK)) >> + SNOR_PROTO_INST_SHIFT; +} + +static inline u8 spi_nor_get_protocol_addr_nbits(enum spi_nor_protocol proto) +{ + return ((unsigned long)(proto & SNOR_PROTO_ADDR_MASK)) >> + SNOR_PROTO_ADDR_SHIFT; +} + +static inline u8 spi_nor_get_protocol_data_nbits(enum spi_nor_protocol proto) +{ + return ((unsigned long)(proto & SNOR_PROTO_DATA_MASK)) >> + SNOR_PROTO_DATA_SHIFT; +} + +static inline u8 spi_nor_get_protocol_width(enum spi_nor_protocol proto) +{ + return spi_nor_get_protocol_data_nbits(proto); +} + +#define SPI_NOR_MAX_CMD_SIZE 8 +enum spi_nor_ops { + SPI_NOR_OPS_READ = 0, + SPI_NOR_OPS_WRITE, + SPI_NOR_OPS_ERASE, + SPI_NOR_OPS_LOCK, + SPI_NOR_OPS_UNLOCK, +}; + +enum spi_nor_option_flags { + SNOR_F_USE_FSR = BIT(0), + SNOR_F_HAS_SR_TB = BIT(1), + SNOR_F_NO_OP_CHIP_ERASE = BIT(2), + SNOR_F_S3AN_ADDR_DEFAULT = BIT(3), + SNOR_F_READY_XSR_RDY = BIT(4), + SNOR_F_USE_CLSR = BIT(5), + SNOR_F_BROKEN_RESET = BIT(6), +}; + +/** + * struct flash_info - Forward declaration of a structure used internally by + * spi_nor_scan() + */ +struct flash_info; + +/* TODO: Remove, once all users of spi_flash interface are moved to MTD */ +#define spi_flash spi_nor + +/** + * struct spi_nor - Structure for defining a the SPI NOR layer + * @mtd: point to a mtd_info structure + * @lock: the lock for the read/write/erase/lock/unlock operations + * @dev: point to a spi device, or a spi nor controller device. + * @info: spi-nor part JDEC MFR id and other info + * @page_size: the page size of the SPI NOR + * @addr_width: number of address bytes + * @erase_opcode: the opcode for erasing a sector + * @read_opcode: the read opcode + * @read_dummy: the dummy needed by the read operation + * @program_opcode: the program opcode + * @sst_write_second: used by the SST write operation + * @flags: flag options for the current SPI-NOR (SNOR_F_*) + * @read_proto: the SPI protocol for read operations + * @write_proto: the SPI protocol for write operations + * @reg_proto the SPI protocol for read_reg/write_reg/erase operations + * @cmd_buf: used by the write_reg + * @prepare: [OPTIONAL] do some preparations for the + * read/write/erase/lock/unlock operations + * @unprepare: [OPTIONAL] do some post work after the + * read/write/erase/lock/unlock operations + * @read_reg: [DRIVER-SPECIFIC] read out the register + * @write_reg: [DRIVER-SPECIFIC] write data to the register + * @read: [DRIVER-SPECIFIC] read data from the SPI NOR + * @write: [DRIVER-SPECIFIC] write data to the SPI NOR + * @erase: [DRIVER-SPECIFIC] erase a sector of the SPI NOR + * at the offset @offs; if not provided by the driver, + * spi-nor will send the erase opcode via write_reg() + * @flash_lock: [FLASH-SPECIFIC] lock a region of the SPI NOR + * @flash_unlock: [FLASH-SPECIFIC] unlock a region of the SPI NOR + * @flash_is_locked: [FLASH-SPECIFIC] check if a region of the SPI NOR is + * @quad_enable: [FLASH-SPECIFIC] enables SPI NOR quad mode + * completely locked + * @priv: the private data + */ +struct spi_nor { + struct mtd_info mtd; + struct udevice *dev; + struct spi_slave *spi; + const struct flash_info *info; + u32 page_size; + u8 addr_width; + u8 erase_opcode; + u8 read_opcode; + u8 read_dummy; + u8 program_opcode; + enum spi_nor_protocol read_proto; + enum spi_nor_protocol write_proto; + enum spi_nor_protocol reg_proto; + bool sst_write_second; + u32 flags; + u8 cmd_buf[SPI_NOR_MAX_CMD_SIZE]; + + int (*prepare)(struct spi_nor *nor, enum spi_nor_ops ops); + void (*unprepare)(struct spi_nor *nor, enum spi_nor_ops ops); + int (*read_reg)(struct spi_nor *nor, u8 opcode, u8 *buf, int len); + int (*write_reg)(struct spi_nor *nor, u8 opcode, u8 *buf, int len); + + ssize_t (*read)(struct spi_nor *nor, loff_t from, + size_t len, u_char *read_buf); + ssize_t (*write)(struct spi_nor *nor, loff_t to, + size_t len, const u_char *write_buf); + int (*erase)(struct spi_nor *nor, loff_t offs); + + int (*flash_lock)(struct spi_nor *nor, loff_t ofs, uint64_t len); + int (*flash_unlock)(struct spi_nor *nor, loff_t ofs, uint64_t len); + int (*flash_is_locked)(struct spi_nor *nor, loff_t ofs, uint64_t len); + int (*quad_enable)(struct spi_nor *nor); + + void *priv; +/* Compatibility for spi_flash, remove once sf layer is merged with mtd */ + const char *name; + u32 size; + u32 sector_size; + u32 erase_size; +}; + +static inline void spi_nor_set_flash_node(struct spi_nor *nor, + const struct device_node *np) +{ + mtd_set_of_node(&nor->mtd, np); +} + +static inline const struct +device_node *spi_nor_get_flash_node(struct spi_nor *nor) +{ + return mtd_get_of_node(&nor->mtd); +} + +/** + * struct spi_nor_hwcaps - Structure for describing the hardware capabilies + * supported by the SPI controller (bus master). + * @mask: the bitmask listing all the supported hw capabilies + */ +struct spi_nor_hwcaps { + u32 mask; +}; + +/* + *(Fast) Read capabilities. + * MUST be ordered by priority: the higher bit position, the higher priority. + * As a matter of performances, it is relevant to use Octo SPI protocols first, + * then Quad SPI protocols before Dual SPI protocols, Fast Read and lastly + * (Slow) Read. + */ +#define SNOR_HWCAPS_READ_MASK GENMASK(14, 0) +#define SNOR_HWCAPS_READ BIT(0) +#define SNOR_HWCAPS_READ_FAST BIT(1) +#define SNOR_HWCAPS_READ_1_1_1_DTR BIT(2) + +#define SNOR_HWCAPS_READ_DUAL GENMASK(6, 3) +#define SNOR_HWCAPS_READ_1_1_2 BIT(3) +#define SNOR_HWCAPS_READ_1_2_2 BIT(4) +#define SNOR_HWCAPS_READ_2_2_2 BIT(5) +#define SNOR_HWCAPS_READ_1_2_2_DTR BIT(6) + +#define SNOR_HWCAPS_READ_QUAD GENMASK(10, 7) +#define SNOR_HWCAPS_READ_1_1_4 BIT(7) +#define SNOR_HWCAPS_READ_1_4_4 BIT(8) +#define SNOR_HWCAPS_READ_4_4_4 BIT(9) +#define SNOR_HWCAPS_READ_1_4_4_DTR BIT(10) + +#define SNOR_HWCPAS_READ_OCTO GENMASK(14, 11) +#define SNOR_HWCAPS_READ_1_1_8 BIT(11) +#define SNOR_HWCAPS_READ_1_8_8 BIT(12) +#define SNOR_HWCAPS_READ_8_8_8 BIT(13) +#define SNOR_HWCAPS_READ_1_8_8_DTR BIT(14) + +/* + * Page Program capabilities. + * MUST be ordered by priority: the higher bit position, the higher priority. + * Like (Fast) Read capabilities, Octo/Quad SPI protocols are preferred to the + * legacy SPI 1-1-1 protocol. + * Note that Dual Page Programs are not supported because there is no existing + * JEDEC/SFDP standard to define them. Also at this moment no SPI flash memory + * implements such commands. + */ +#define SNOR_HWCAPS_PP_MASK GENMASK(22, 16) +#define SNOR_HWCAPS_PP BIT(16) + +#define SNOR_HWCAPS_PP_QUAD GENMASK(19, 17) +#define SNOR_HWCAPS_PP_1_1_4 BIT(17) +#define SNOR_HWCAPS_PP_1_4_4 BIT(18) +#define SNOR_HWCAPS_PP_4_4_4 BIT(19) + +#define SNOR_HWCAPS_PP_OCTO GENMASK(22, 20) +#define SNOR_HWCAPS_PP_1_1_8 BIT(20) +#define SNOR_HWCAPS_PP_1_8_8 BIT(21) +#define SNOR_HWCAPS_PP_8_8_8 BIT(22) + +/** + * spi_nor_scan() - scan the SPI NOR + * @nor: the spi_nor structure + * + * The drivers can use this function to scan the SPI NOR. + * In the scanning, it will try to get all the necessary information to + * fill the mtd_info{} and the spi_nor{}. + * + * Return: 0 for success, others for failure. + */ +int spi_nor_scan(struct spi_nor *nor); + +#endif