#define GD5FXGQ4XA_STATUS_ECC_1_7_BITFLIPS (1 << 4)
#define GD5FXGQ4XA_STATUS_ECC_8_BITFLIPS (3 << 4)
-#define GD5FXGQ4XEXXG_REG_STATUS2 0xf0
+#define GD5FXGQ5XE_STATUS_ECC_1_4_BITFLIPS (1 << 4)
+#define GD5FXGQ5XE_STATUS_ECC_4_BITFLIPS (3 << 4)
-static SPINAND_OP_VARIANTS(read_cache_variants,
+#define GD5FXGQXXEXXG_REG_STATUS2 0xf0
+
+/* Q4 devices, QUADIO: Dummy bytes valid for 1 and 2 GBit variants */
+static SPINAND_OP_VARIANTS(gd5fxgq4_read_cache_variants,
SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
+/* Q5 devices, QUADIO: Dummy bytes only valid for 1 GBit variants */
+static SPINAND_OP_VARIANTS(gd5f1gq5_read_cache_variants,
+ SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
+
static SPINAND_OP_VARIANTS(write_cache_variants,
SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
SPINAND_PROG_LOAD(true, 0, NULL, 0));
SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
SPINAND_PROG_LOAD(false, 0, NULL, 0));
-static int gd5fxgq4xexxg_ooblayout_ecc(struct mtd_info *mtd, int section,
+static int gd5fxgqxxexxg_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *region)
{
if (section)
return 0;
}
-static int gd5fxgq4xexxg_ooblayout_free(struct mtd_info *mtd, int section,
+static int gd5fxgqxxexxg_ooblayout_free(struct mtd_info *mtd, int section,
struct mtd_oob_region *region)
{
if (section)
u8 status)
{
u8 status2;
- struct spi_mem_op op = SPINAND_GET_FEATURE_OP(GD5FXGQ4XEXXG_REG_STATUS2,
+ struct spi_mem_op op = SPINAND_GET_FEATURE_OP(GD5FXGQXXEXXG_REG_STATUS2,
&status2);
int ret;
return -EINVAL;
}
-static const struct mtd_ooblayout_ops gd5fxgq4xexxg_ooblayout = {
- .ecc = gd5fxgq4xexxg_ooblayout_ecc,
- .rfree = gd5fxgq4xexxg_ooblayout_free,
+static int gd5fxgq5xexxg_ecc_get_status(struct spinand_device *spinand,
+ u8 status)
+{
+ u8 status2;
+ struct spi_mem_op op = SPINAND_GET_FEATURE_OP(GD5FXGQXXEXXG_REG_STATUS2,
+ &status2);
+ int ret;
+
+ switch (status & STATUS_ECC_MASK) {
+ case STATUS_ECC_NO_BITFLIPS:
+ return 0;
+
+ case GD5FXGQ5XE_STATUS_ECC_1_4_BITFLIPS:
+ /*
+ * Read status2 register to determine a more fine grained
+ * bit error status
+ */
+ ret = spi_mem_exec_op(spinand->slave, &op);
+ if (ret)
+ return ret;
+
+ /*
+ * 1 ... 4 bits are flipped (and corrected)
+ */
+ /* bits sorted this way (1...0): ECCSE1, ECCSE0 */
+ return ((status2 & STATUS_ECC_MASK) >> 4) + 1;
+
+ case STATUS_ECC_UNCOR_ERROR:
+ return -EBADMSG;
+
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static const struct mtd_ooblayout_ops gd5fxgqxxexxg_ooblayout = {
+ .ecc = gd5fxgqxxexxg_ooblayout_ecc,
+ .rfree = gd5fxgqxxexxg_ooblayout_free,
};
static const struct spinand_info gigadevice_spinand_table[] = {
SPINAND_INFO("GD5F1GQ4UExxG", 0xd1,
NAND_MEMORG(1, 2048, 128, 64, 1024, 1, 1, 1),
NAND_ECCREQ(8, 512),
- SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ SPINAND_INFO_OP_VARIANTS(&gd5fxgq4_read_cache_variants,
&write_cache_variants,
&update_cache_variants),
0,
- SPINAND_ECCINFO(&gd5fxgq4xexxg_ooblayout,
+ SPINAND_ECCINFO(&gd5fxgqxxexxg_ooblayout,
gd5fxgq4xexxg_ecc_get_status)),
+ SPINAND_INFO("GD5F1GQ5UExxG", 0x51,
+ NAND_MEMORG(1, 2048, 128, 64, 1024, 1, 1, 1),
+ NAND_ECCREQ(4, 512),
+ SPINAND_INFO_OP_VARIANTS(&gd5f1gq5_read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ 0,
+ SPINAND_ECCINFO(&gd5fxgqxxexxg_ooblayout,
+ gd5fxgq5xexxg_ecc_get_status)),
};
static int gigadevice_spinand_detect(struct spinand_device *spinand)