#define CMD_4BYTE_READ 0x13
#define CMD_4BYTE_FAST_READ 0x0C
-int cadence_qspi_apb_dma_read(struct cadence_spi_plat *plat,
+int cadence_qspi_apb_dma_read(struct cadence_spi_priv *priv,
const struct spi_mem_op *op)
{
u32 reg, ret, rx_rem, n_rx, bytes_to_dma, data;
if (bytes_to_dma) {
cadence_qspi_apb_enable_linear_mode(false);
- reg = readl(plat->regbase + CQSPI_REG_CONFIG);
+ reg = readl(priv->regbase + CQSPI_REG_CONFIG);
reg |= CQSPI_REG_CONFIG_ENBL_DMA;
- writel(reg, plat->regbase + CQSPI_REG_CONFIG);
+ writel(reg, priv->regbase + CQSPI_REG_CONFIG);
- writel(bytes_to_dma, plat->regbase + CQSPI_REG_INDIRECTRDBYTES);
+ writel(bytes_to_dma, priv->regbase + CQSPI_REG_INDIRECTRDBYTES);
writel(CQSPI_DFLT_INDIR_TRIG_ADDR_RANGE,
- plat->regbase + CQSPI_REG_INDIR_TRIG_ADDR_RANGE);
+ priv->regbase + CQSPI_REG_INDIR_TRIG_ADDR_RANGE);
writel(CQSPI_DFLT_DMA_PERIPH_CFG,
- plat->regbase + CQSPI_REG_DMA_PERIPH_CFG);
- writel((unsigned long)rxbuf, plat->regbase +
+ priv->regbase + CQSPI_REG_DMA_PERIPH_CFG);
+ writel((unsigned long)rxbuf, priv->regbase +
CQSPI_DMA_DST_ADDR_REG);
- writel(plat->trigger_address, plat->regbase +
+ writel(priv->trigger_address, priv->regbase +
CQSPI_DMA_SRC_RD_ADDR_REG);
- writel(bytes_to_dma, plat->regbase +
+ writel(bytes_to_dma, priv->regbase +
CQSPI_DMA_DST_SIZE_REG);
flush_dcache_range((unsigned long)rxbuf,
(unsigned long)rxbuf + bytes_to_dma);
writel(CQSPI_DFLT_DST_CTRL_REG_VAL,
- plat->regbase + CQSPI_DMA_DST_CTRL_REG);
+ priv->regbase + CQSPI_DMA_DST_CTRL_REG);
/* Start the indirect read transfer */
- writel(CQSPI_REG_INDIRECTRD_START, plat->regbase +
+ writel(CQSPI_REG_INDIRECTRD_START, priv->regbase +
CQSPI_REG_INDIRECTRD);
/* Wait for dma to complete transfer */
- ret = cadence_qspi_apb_wait_for_dma_cmplt(plat);
+ ret = cadence_qspi_apb_wait_for_dma_cmplt(priv);
if (ret)
return ret;
/* Clear indirect completion status */
- writel(CQSPI_REG_INDIRECTRD_DONE, plat->regbase +
+ writel(CQSPI_REG_INDIRECTRD_DONE, priv->regbase +
CQSPI_REG_INDIRECTRD);
rxbuf += bytes_to_dma;
}
if (rx_rem) {
- reg = readl(plat->regbase + CQSPI_REG_CONFIG);
+ reg = readl(priv->regbase + CQSPI_REG_CONFIG);
reg &= ~CQSPI_REG_CONFIG_ENBL_DMA;
- writel(reg, plat->regbase + CQSPI_REG_CONFIG);
+ writel(reg, priv->regbase + CQSPI_REG_CONFIG);
- reg = readl(plat->regbase + CQSPI_REG_INDIRECTRDSTARTADDR);
+ reg = readl(priv->regbase + CQSPI_REG_INDIRECTRDSTARTADDR);
reg += bytes_to_dma;
- writel(reg, plat->regbase + CQSPI_REG_CMDADDRESS);
+ writel(reg, priv->regbase + CQSPI_REG_CMDADDRESS);
- addr_bytes = readl(plat->regbase + CQSPI_REG_SIZE) &
+ addr_bytes = readl(priv->regbase + CQSPI_REG_SIZE) &
CQSPI_REG_SIZE_ADDRESS_MASK;
opcode = CMD_4BYTE_FAST_READ;
dummy_cycles = 8;
writel((dummy_cycles << CQSPI_REG_RD_INSTR_DUMMY_LSB) | opcode,
- plat->regbase + CQSPI_REG_RD_INSTR);
+ priv->regbase + CQSPI_REG_RD_INSTR);
reg = opcode << CQSPI_REG_CMDCTRL_OPCODE_LSB;
reg |= (0x1 << CQSPI_REG_CMDCTRL_RD_EN_LSB);
reg |= (addr_bytes & CQSPI_REG_CMDCTRL_ADD_BYTES_MASK) <<
CQSPI_REG_CMDCTRL_ADD_BYTES_LSB;
reg |= (0x1 << CQSPI_REG_CMDCTRL_ADDR_EN_LSB);
- dummy_cycles = (readl(plat->regbase + CQSPI_REG_RD_INSTR) >>
+ dummy_cycles = (readl(priv->regbase + CQSPI_REG_RD_INSTR) >>
CQSPI_REG_RD_INSTR_DUMMY_LSB) &
CQSPI_REG_RD_INSTR_DUMMY_MASK;
reg |= (dummy_cycles & CQSPI_REG_CMDCTRL_DUMMY_MASK) <<
CQSPI_REG_CMDCTRL_DUMMY_LSB;
reg |= (((rx_rem - 1) & CQSPI_REG_CMDCTRL_RD_BYTES_MASK) <<
CQSPI_REG_CMDCTRL_RD_BYTES_LSB);
- ret = cadence_qspi_apb_exec_flash_cmd(plat->regbase, reg);
+ ret = cadence_qspi_apb_exec_flash_cmd(priv->regbase, reg);
if (ret)
return ret;
- data = readl(plat->regbase + CQSPI_REG_CMDREADDATALOWER);
+ data = readl(priv->regbase + CQSPI_REG_CMDREADDATALOWER);
memcpy(rxbuf, &data, rx_rem);
}
return 0;
}
-int cadence_qspi_apb_wait_for_dma_cmplt(struct cadence_spi_plat *plat)
+int cadence_qspi_apb_wait_for_dma_cmplt(struct cadence_spi_priv *priv)
{
u32 timeout = CQSPI_DMA_TIMEOUT;
- while (!(readl(plat->regbase + CQSPI_DMA_DST_I_STS_REG) &
+ while (!(readl(priv->regbase + CQSPI_DMA_DST_I_STS_REG) &
CQSPI_DMA_DST_I_STS_DONE) && timeout--)
udelay(1);
return -ETIMEDOUT;
}
- writel(readl(plat->regbase + CQSPI_DMA_DST_I_STS_REG),
- plat->regbase + CQSPI_DMA_DST_I_STS_REG);
+ writel(readl(priv->regbase + CQSPI_DMA_DST_I_STS_REG),
+ priv->regbase + CQSPI_DMA_DST_I_STS_REG);
return 0;
}
#define CQSPI_READ 2
#define CQSPI_WRITE 3
-__weak int cadence_qspi_apb_dma_read(struct cadence_spi_plat *plat,
+__weak int cadence_qspi_apb_dma_read(struct cadence_spi_priv *priv,
const struct spi_mem_op *op)
{
return 0;
static int cadence_spi_write_speed(struct udevice *bus, uint hz)
{
- struct cadence_spi_plat *plat = dev_get_plat(bus);
struct cadence_spi_priv *priv = dev_get_priv(bus);
cadence_qspi_apb_config_baudrate_div(priv->regbase,
- plat->ref_clk_hz, hz);
+ priv->ref_clk_hz, hz);
/* Reconfigure delay timing if speed is changed. */
- cadence_qspi_apb_delay(priv->regbase, plat->ref_clk_hz, hz,
- plat->tshsl_ns, plat->tsd2d_ns,
- plat->tchsh_ns, plat->tslch_ns);
+ cadence_qspi_apb_delay(priv->regbase, priv->ref_clk_hz, hz,
+ priv->tshsl_ns, priv->tsd2d_ns,
+ priv->tchsh_ns, priv->tslch_ns);
return 0;
}
-static int cadence_spi_read_id(struct cadence_spi_plat *plat, u8 len,
+static int cadence_spi_read_id(struct cadence_spi_priv *priv, u8 len,
u8 *idcode)
{
int err;
+
struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(0x9F, 1),
SPI_MEM_OP_NO_ADDR,
SPI_MEM_OP_NO_DUMMY,
SPI_MEM_OP_DATA_IN(len, idcode, 1));
- err = cadence_qspi_apb_command_read_setup(plat, &op);
+ err = cadence_qspi_apb_command_read_setup(priv, &op);
if (!err)
- err = cadence_qspi_apb_command_read(plat, &op);
+ err = cadence_qspi_apb_command_read(priv, &op);
return err;
}
static int spi_calibration(struct udevice *bus, uint hz)
{
struct cadence_spi_priv *priv = dev_get_priv(bus);
- struct cadence_spi_plat *plat = dev_get_plat(bus);
void *base = priv->regbase;
unsigned int idcode = 0, temp = 0;
int err = 0, i, range_lo = -1, range_hi = -1;
cadence_qspi_apb_controller_enable(base);
/* read the ID which will be our golden value */
- err = cadence_spi_read_id(plat, 3, (u8 *)&idcode);
+ err = cadence_spi_read_id(priv, 3, (u8 *)&idcode);
if (err) {
puts("SF: Calibration failed (read)\n");
return err;
cadence_qspi_apb_controller_enable(base);
/* issue a RDID to get the ID value */
- err = cadence_spi_read_id(plat, 3, (u8 *)&temp);
+ err = cadence_spi_read_id(priv, 3, (u8 *)&temp);
if (err) {
puts("SF: Calibration failed (read)\n");
return err;
static int cadence_spi_set_speed(struct udevice *bus, uint hz)
{
- struct cadence_spi_plat *plat = dev_get_plat(bus);
struct cadence_spi_priv *priv = dev_get_priv(bus);
int err;
- if (!hz || hz > plat->max_hz)
- hz = plat->max_hz;
-
+ if (!hz || hz > priv->max_hz)
+ hz = priv->max_hz;
/* Disable QSPI */
cadence_qspi_apb_controller_disable(priv->regbase);
* If the device tree already provides a read delay value, use that
* instead of calibrating.
*/
- if (plat->read_delay >= 0) {
+ if (priv->read_delay >= 0) {
cadence_spi_write_speed(bus, hz);
cadence_qspi_apb_readdata_capture(priv->regbase, 1,
- plat->read_delay);
+ priv->read_delay);
} else if (priv->previous_hz != hz ||
priv->qspi_calibrated_hz != hz ||
priv->qspi_calibrated_cs != spi_chip_select(bus)) {
struct clk clk;
int ret;
- priv->regbase = plat->regbase;
- priv->ahbbase = plat->ahbbase;
+ priv->regbase = plat->regbase;
+ priv->ahbbase = plat->ahbbase;
+ priv->is_dma = plat->is_dma;
+ priv->is_decoded_cs = plat->is_decoded_cs;
+ priv->fifo_depth = plat->fifo_depth;
+ priv->fifo_width = plat->fifo_width;
+ priv->trigger_address = plat->trigger_address;
+ priv->read_delay = plat->read_delay;
+ priv->ahbsize = plat->ahbsize;
+ priv->max_hz = plat->max_hz;
+
+ priv->page_size = plat->page_size;
+ priv->block_size = plat->block_size;
+ priv->tshsl_ns = plat->tshsl_ns;
+ priv->tsd2d_ns = plat->tsd2d_ns;
+ priv->tchsh_ns = plat->tchsh_ns;
+ priv->tslch_ns = plat->tslch_ns;
if (CONFIG_IS_ENABLED(ZYNQMP_FIRMWARE))
xilinx_pm_request(PM_REQUEST_NODE, PM_DEV_OSPI,
ZYNQMP_PM_CAPABILITY_ACCESS, ZYNQMP_PM_MAX_QOS,
ZYNQMP_PM_REQUEST_ACK_NO, NULL);
- if (plat->ref_clk_hz == 0) {
+ if (priv->ref_clk_hz == 0) {
ret = clk_get_by_index(bus, 0, &clk);
if (ret) {
#ifdef CONFIG_HAS_CQSPI_REF_CLK
- plat->ref_clk_hz = CONFIG_CQSPI_REF_CLK;
+ priv->ref_clk_hz = CONFIG_CQSPI_REF_CLK;
#elif defined(CONFIG_ARCH_SOCFPGA)
- plat->ref_clk_hz = cm_get_qspi_controller_clk_hz();
+ priv->ref_clk_hz = cm_get_qspi_controller_clk_hz();
#else
return ret;
#endif
} else {
- plat->ref_clk_hz = clk_get_rate(&clk);
+ priv->ref_clk_hz = clk_get_rate(&clk);
clk_free(&clk);
- if (IS_ERR_VALUE(plat->ref_clk_hz))
- return plat->ref_clk_hz;
+ if (IS_ERR_VALUE(priv->ref_clk_hz))
+ return priv->ref_clk_hz;
}
}
reset_deassert_bulk(priv->resets);
if (!priv->qspi_is_init) {
- cadence_qspi_apb_controller_init(plat);
+ cadence_qspi_apb_controller_init(priv);
priv->qspi_is_init = 1;
}
- plat->wr_delay = 50 * DIV_ROUND_UP(NSEC_PER_SEC, plat->ref_clk_hz);
+ priv->wr_delay = 50 * DIV_ROUND_UP(NSEC_PER_SEC, priv->ref_clk_hz);
if (CONFIG_IS_ENABLED(ARCH_VERSAL)) {
/* Versal platform uses spi calibration to set read delay */
- if (plat->read_delay >= 0)
- plat->read_delay = -1;
+ if (priv->read_delay >= 0)
+ priv->read_delay = -1;
/* Reset ospi flash device */
ret = cadence_qspi_versal_flash_reset(bus);
if (ret)
static int cadence_spi_set_mode(struct udevice *bus, uint mode)
{
- struct cadence_spi_plat *plat = dev_get_plat(bus);
struct cadence_spi_priv *priv = dev_get_priv(bus);
/* Disable QSPI */
cadence_qspi_apb_set_clk_mode(priv->regbase, mode);
/* Enable Direct Access Controller */
- if (plat->use_dac_mode)
+ if (priv->use_dac_mode)
cadence_qspi_apb_dac_mode_enable(priv->regbase);
/* Enable QSPI */
const struct spi_mem_op *op)
{
struct udevice *bus = spi->dev->parent;
- struct cadence_spi_plat *plat = dev_get_plat(bus);
struct cadence_spi_priv *priv = dev_get_priv(bus);
void *base = priv->regbase;
int err = 0;
/* Set Chip select */
cadence_qspi_apb_chipselect(base, spi_chip_select(spi->dev),
- plat->is_decoded_cs);
+ priv->is_decoded_cs);
if (op->data.dir == SPI_MEM_DATA_IN && op->data.buf.in) {
if (!op->addr.nbytes)
switch (mode) {
case CQSPI_STIG_READ:
- err = cadence_qspi_apb_command_read_setup(plat, op);
+ err = cadence_qspi_apb_command_read_setup(priv, op);
if (!err)
- err = cadence_qspi_apb_command_read(plat, op);
+ err = cadence_qspi_apb_command_read(priv, op);
break;
case CQSPI_STIG_WRITE:
- err = cadence_qspi_apb_command_write_setup(plat, op);
+ err = cadence_qspi_apb_command_write_setup(priv, op);
if (!err)
- err = cadence_qspi_apb_command_write(plat, op);
+ err = cadence_qspi_apb_command_write(priv, op);
break;
case CQSPI_READ:
- err = cadence_qspi_apb_read_setup(plat, op);
+ err = cadence_qspi_apb_read_setup(priv, op);
if (!err) {
- if (plat->is_dma)
- err = cadence_qspi_apb_dma_read(plat, op);
+ if (priv->is_dma)
+ err = cadence_qspi_apb_dma_read(priv, op);
else
- err = cadence_qspi_apb_read_execute(plat, op);
+ err = cadence_qspi_apb_read_execute(priv, op);
}
break;
case CQSPI_WRITE:
- err = cadence_qspi_apb_write_setup(plat, op);
+ err = cadence_qspi_apb_write_setup(priv, op);
if (!err)
- err = cadence_qspi_apb_write_execute(plat, op);
+ err = cadence_qspi_apb_write_execute(priv, op);
break;
default:
err = -1;
static int cadence_spi_of_to_plat(struct udevice *bus)
{
struct cadence_spi_plat *plat = dev_get_plat(bus);
+ struct cadence_spi_priv *priv = dev_get_priv(bus);
ofnode subnode;
plat->regbase = (void *)devfdt_get_addr_index(bus, 0);
0);
/* Use DAC mode only when MMIO window is at least 8M wide */
if (plat->ahbsize >= SZ_8M)
- plat->use_dac_mode = true;
+ priv->use_dac_mode = true;
plat->is_dma = dev_read_bool(bus, "cdns,is-dma");
return dummy_clk;
}
-static u32 cadence_qspi_calc_rdreg(struct cadence_spi_plat *plat)
+static u32 cadence_qspi_calc_rdreg(struct cadence_spi_priv *priv)
{
u32 rdreg = 0;
- rdreg |= plat->inst_width << CQSPI_REG_RD_INSTR_TYPE_INSTR_LSB;
- rdreg |= plat->addr_width << CQSPI_REG_RD_INSTR_TYPE_ADDR_LSB;
- rdreg |= plat->data_width << CQSPI_REG_RD_INSTR_TYPE_DATA_LSB;
+ rdreg |= priv->inst_width << CQSPI_REG_RD_INSTR_TYPE_INSTR_LSB;
+ rdreg |= priv->addr_width << CQSPI_REG_RD_INSTR_TYPE_ADDR_LSB;
+ rdreg |= priv->data_width << CQSPI_REG_RD_INSTR_TYPE_DATA_LSB;
return rdreg;
}
}
}
-static int cadence_qspi_set_protocol(struct cadence_spi_plat *plat,
+static int cadence_qspi_set_protocol(struct cadence_spi_priv *priv,
const struct spi_mem_op *op)
{
int ret;
- plat->dtr = op->data.dtr && op->cmd.dtr && op->addr.dtr;
+ priv->dtr = op->data.dtr && op->cmd.dtr && op->addr.dtr;
ret = cadence_qspi_buswidth_to_inst_type(op->cmd.buswidth);
if (ret < 0)
return ret;
- plat->inst_width = ret;
+ priv->inst_width = ret;
ret = cadence_qspi_buswidth_to_inst_type(op->addr.buswidth);
if (ret < 0)
return ret;
- plat->addr_width = ret;
+ priv->addr_width = ret;
ret = cadence_qspi_buswidth_to_inst_type(op->data.buswidth);
if (ret < 0)
return ret;
- plat->data_width = ret;
+ priv->data_width = ret;
return 0;
}
cadence_qspi_apb_controller_enable(reg_base);
}
-void cadence_qspi_apb_controller_init(struct cadence_spi_plat *plat)
+void cadence_qspi_apb_controller_init(struct cadence_spi_priv *priv)
{
unsigned reg;
- cadence_qspi_apb_controller_disable(plat->regbase);
+ cadence_qspi_apb_controller_disable(priv->regbase);
/* Configure the device size and address bytes */
- reg = readl(plat->regbase + CQSPI_REG_SIZE);
+ reg = readl(priv->regbase + CQSPI_REG_SIZE);
/* Clear the previous value */
reg &= ~(CQSPI_REG_SIZE_PAGE_MASK << CQSPI_REG_SIZE_PAGE_LSB);
reg &= ~(CQSPI_REG_SIZE_BLOCK_MASK << CQSPI_REG_SIZE_BLOCK_LSB);
- reg |= (plat->page_size << CQSPI_REG_SIZE_PAGE_LSB);
- reg |= (plat->block_size << CQSPI_REG_SIZE_BLOCK_LSB);
- writel(reg, plat->regbase + CQSPI_REG_SIZE);
+ reg |= (priv->page_size << CQSPI_REG_SIZE_PAGE_LSB);
+ reg |= (priv->block_size << CQSPI_REG_SIZE_BLOCK_LSB);
+ writel(reg, priv->regbase + CQSPI_REG_SIZE);
/* Configure the remap address register, no remap */
- writel(0, plat->regbase + CQSPI_REG_REMAP);
+ writel(0, priv->regbase + CQSPI_REG_REMAP);
/* Indirect mode configurations */
- writel(plat->fifo_depth / 2, plat->regbase + CQSPI_REG_SRAMPARTITION);
+ writel(priv->fifo_depth / 2, priv->regbase + CQSPI_REG_SRAMPARTITION);
/* Disable all interrupts */
- writel(0, plat->regbase + CQSPI_REG_IRQMASK);
+ writel(0, priv->regbase + CQSPI_REG_IRQMASK);
- cadence_qspi_apb_controller_enable(plat->regbase);
+ cadence_qspi_apb_controller_enable(priv->regbase);
}
int cadence_qspi_apb_exec_flash_cmd(void *reg_base, unsigned int reg)
return 0;
}
-static int cadence_qspi_setup_opcode_ext(struct cadence_spi_plat *plat,
+static int cadence_qspi_setup_opcode_ext(struct cadence_spi_priv *priv,
const struct spi_mem_op *op,
unsigned int shift)
{
/* Opcode extension is the LSB. */
ext = op->cmd.opcode & 0xff;
- reg = readl(plat->regbase + CQSPI_REG_OP_EXT_LOWER);
+ reg = readl(priv->regbase + CQSPI_REG_OP_EXT_LOWER);
reg &= ~(0xff << shift);
reg |= ext << shift;
- writel(reg, plat->regbase + CQSPI_REG_OP_EXT_LOWER);
+ writel(reg, priv->regbase + CQSPI_REG_OP_EXT_LOWER);
return 0;
}
-static int cadence_qspi_enable_dtr(struct cadence_spi_plat *plat,
+static int cadence_qspi_enable_dtr(struct cadence_spi_priv *priv,
const struct spi_mem_op *op,
unsigned int shift,
bool enable)
unsigned int reg;
int ret;
- reg = readl(plat->regbase + CQSPI_REG_CONFIG);
+ reg = readl(priv->regbase + CQSPI_REG_CONFIG);
if (enable) {
reg |= CQSPI_REG_CONFIG_DTR_PROTO;
reg |= CQSPI_REG_CONFIG_DUAL_OPCODE;
/* Set up command opcode extension. */
- ret = cadence_qspi_setup_opcode_ext(plat, op, shift);
+ ret = cadence_qspi_setup_opcode_ext(priv, op, shift);
if (ret)
return ret;
} else {
reg &= ~CQSPI_REG_CONFIG_DUAL_OPCODE;
}
- writel(reg, plat->regbase + CQSPI_REG_CONFIG);
+ writel(reg, priv->regbase + CQSPI_REG_CONFIG);
return 0;
}
-int cadence_qspi_apb_command_read_setup(struct cadence_spi_plat *plat,
+int cadence_qspi_apb_command_read_setup(struct cadence_spi_priv *priv,
const struct spi_mem_op *op)
{
int ret;
unsigned int reg;
- ret = cadence_qspi_set_protocol(plat, op);
+ ret = cadence_qspi_set_protocol(priv, op);
if (ret)
return ret;
- ret = cadence_qspi_enable_dtr(plat, op, CQSPI_REG_OP_EXT_STIG_LSB,
- plat->dtr);
+ ret = cadence_qspi_enable_dtr(priv, op, CQSPI_REG_OP_EXT_STIG_LSB,
+ priv->dtr);
if (ret)
return ret;
- reg = cadence_qspi_calc_rdreg(plat);
- writel(reg, plat->regbase + CQSPI_REG_RD_INSTR);
+ reg = cadence_qspi_calc_rdreg(priv);
+ writel(reg, priv->regbase + CQSPI_REG_RD_INSTR);
return 0;
}
/* For command RDID, RDSR. */
-int cadence_qspi_apb_command_read(struct cadence_spi_plat *plat,
+int cadence_qspi_apb_command_read(struct cadence_spi_priv *priv,
const struct spi_mem_op *op)
{
- void *reg_base = plat->regbase;
+ void *reg_base = priv->regbase;
unsigned int reg;
unsigned int read_len;
int status;
return -EINVAL;
}
- if (plat->dtr)
+ if (priv->dtr)
opcode = op->cmd.opcode >> 8;
else
opcode = op->cmd.opcode;
reg = opcode << CQSPI_REG_CMDCTRL_OPCODE_LSB;
/* Set up dummy cycles. */
- dummy_clk = cadence_qspi_calc_dummy(op, plat->dtr);
+ dummy_clk = cadence_qspi_calc_dummy(op, priv->dtr);
if (dummy_clk > CQSPI_DUMMY_CLKS_MAX)
return -ENOTSUPP;
return 0;
}
-int cadence_qspi_apb_command_write_setup(struct cadence_spi_plat *plat,
+int cadence_qspi_apb_command_write_setup(struct cadence_spi_priv *priv,
const struct spi_mem_op *op)
{
int ret;
unsigned int reg;
- ret = cadence_qspi_set_protocol(plat, op);
+ ret = cadence_qspi_set_protocol(priv, op);
if (ret)
return ret;
- ret = cadence_qspi_enable_dtr(plat, op, CQSPI_REG_OP_EXT_STIG_LSB,
- plat->dtr);
+ ret = cadence_qspi_enable_dtr(priv, op, CQSPI_REG_OP_EXT_STIG_LSB,
+ priv->dtr);
if (ret)
return ret;
- reg = cadence_qspi_calc_rdreg(plat);
- writel(reg, plat->regbase + CQSPI_REG_RD_INSTR);
+ reg = cadence_qspi_calc_rdreg(priv);
+ writel(reg, priv->regbase + CQSPI_REG_RD_INSTR);
return 0;
}
/* For commands: WRSR, WREN, WRDI, CHIP_ERASE, BE, etc. */
-int cadence_qspi_apb_command_write(struct cadence_spi_plat *plat,
+int cadence_qspi_apb_command_write(struct cadence_spi_priv *priv,
const struct spi_mem_op *op)
{
unsigned int reg = 0;
unsigned int wr_len;
unsigned int txlen = op->data.nbytes;
const void *txbuf = op->data.buf.out;
- void *reg_base = plat->regbase;
+ void *reg_base = priv->regbase;
u32 addr;
u8 opcode;
return -EINVAL;
}
- if (plat->dtr)
+ if (priv->dtr)
opcode = op->cmd.opcode >> 8;
else
opcode = op->cmd.opcode;
}
/* Opcode + Address (3/4 bytes) + dummy bytes (0-4 bytes) */
-int cadence_qspi_apb_read_setup(struct cadence_spi_plat *plat,
+int cadence_qspi_apb_read_setup(struct cadence_spi_priv *priv,
const struct spi_mem_op *op)
{
unsigned int reg;
int ret;
u8 opcode;
- ret = cadence_qspi_set_protocol(plat, op);
+ ret = cadence_qspi_set_protocol(priv, op);
if (ret)
return ret;
- ret = cadence_qspi_enable_dtr(plat, op, CQSPI_REG_OP_EXT_READ_LSB,
- plat->dtr);
+ ret = cadence_qspi_enable_dtr(priv, op, CQSPI_REG_OP_EXT_READ_LSB,
+ priv->dtr);
if (ret)
return ret;
/* Setup the indirect trigger address */
- writel(plat->trigger_address,
- plat->regbase + CQSPI_REG_INDIRECTTRIGGER);
+ writel(priv->trigger_address,
+ priv->regbase + CQSPI_REG_INDIRECTTRIGGER);
/* Configure the opcode */
- if (plat->dtr)
+ if (priv->dtr)
opcode = op->cmd.opcode >> 8;
else
opcode = op->cmd.opcode;
rd_reg = opcode << CQSPI_REG_RD_INSTR_OPCODE_LSB;
- rd_reg |= cadence_qspi_calc_rdreg(plat);
+ rd_reg |= cadence_qspi_calc_rdreg(priv);
- writel(op->addr.val, plat->regbase + CQSPI_REG_INDIRECTRDSTARTADDR);
+ writel(op->addr.val, priv->regbase + CQSPI_REG_INDIRECTRDSTARTADDR);
if (dummy_bytes) {
/* Convert to clock cycles. */
- dummy_clk = cadence_qspi_calc_dummy(op, plat->dtr);
+ dummy_clk = cadence_qspi_calc_dummy(op, priv->dtr);
if (dummy_clk > CQSPI_DUMMY_CLKS_MAX)
return -ENOTSUPP;
<< CQSPI_REG_RD_INSTR_DUMMY_LSB;
}
- writel(rd_reg, plat->regbase + CQSPI_REG_RD_INSTR);
+ writel(rd_reg, priv->regbase + CQSPI_REG_RD_INSTR);
/* set device size */
- reg = readl(plat->regbase + CQSPI_REG_SIZE);
+ reg = readl(priv->regbase + CQSPI_REG_SIZE);
reg &= ~CQSPI_REG_SIZE_ADDRESS_MASK;
reg |= (op->addr.nbytes - 1);
- writel(reg, plat->regbase + CQSPI_REG_SIZE);
+ writel(reg, priv->regbase + CQSPI_REG_SIZE);
return 0;
}
-static u32 cadence_qspi_get_rd_sram_level(struct cadence_spi_plat *plat)
+static u32 cadence_qspi_get_rd_sram_level(struct cadence_spi_priv *priv)
{
- u32 reg = readl(plat->regbase + CQSPI_REG_SDRAMLEVEL);
+ u32 reg = readl(priv->regbase + CQSPI_REG_SDRAMLEVEL);
reg >>= CQSPI_REG_SDRAMLEVEL_RD_LSB;
return reg & CQSPI_REG_SDRAMLEVEL_RD_MASK;
}
-static int cadence_qspi_wait_for_data(struct cadence_spi_plat *plat)
+static int cadence_qspi_wait_for_data(struct cadence_spi_priv *priv)
{
unsigned int timeout = 10000;
u32 reg;
while (timeout--) {
- reg = cadence_qspi_get_rd_sram_level(plat);
+ reg = cadence_qspi_get_rd_sram_level(priv);
if (reg)
return reg;
udelay(1);
}
static int
-cadence_qspi_apb_indirect_read_execute(struct cadence_spi_plat *plat,
+cadence_qspi_apb_indirect_read_execute(struct cadence_spi_priv *priv,
unsigned int n_rx, u8 *rxbuf)
{
unsigned int remaining = n_rx;
unsigned int bytes_to_read = 0;
int ret;
- writel(n_rx, plat->regbase + CQSPI_REG_INDIRECTRDBYTES);
+ writel(n_rx, priv->regbase + CQSPI_REG_INDIRECTRDBYTES);
/* Start the indirect read transfer */
writel(CQSPI_REG_INDIRECTRD_START,
- plat->regbase + CQSPI_REG_INDIRECTRD);
+ priv->regbase + CQSPI_REG_INDIRECTRD);
while (remaining > 0) {
- ret = cadence_qspi_wait_for_data(plat);
+ ret = cadence_qspi_wait_for_data(priv);
if (ret < 0) {
printf("Indirect write timed out (%i)\n", ret);
goto failrd;
bytes_to_read = ret;
while (bytes_to_read != 0) {
- bytes_to_read *= plat->fifo_width;
+ bytes_to_read *= priv->fifo_width;
bytes_to_read = bytes_to_read > remaining ?
remaining : bytes_to_read;
/*
* data abort.
*/
if (((uintptr_t)rxbuf % 4) || (bytes_to_read % 4))
- readsb(plat->ahbbase, rxbuf, bytes_to_read);
+ readsb(priv->ahbbase, rxbuf, bytes_to_read);
else
- readsl(plat->ahbbase, rxbuf,
+ readsl(priv->ahbbase, rxbuf,
bytes_to_read >> 2);
rxbuf += bytes_to_read;
remaining -= bytes_to_read;
- bytes_to_read = cadence_qspi_get_rd_sram_level(plat);
+ bytes_to_read = cadence_qspi_get_rd_sram_level(priv);
}
}
/* Check indirect done status */
- ret = wait_for_bit_le32(plat->regbase + CQSPI_REG_INDIRECTRD,
+ ret = wait_for_bit_le32(priv->regbase + CQSPI_REG_INDIRECTRD,
CQSPI_REG_INDIRECTRD_DONE, 1, 10, 0);
if (ret) {
printf("Indirect read completion error (%i)\n", ret);
/* Clear indirect completion status */
writel(CQSPI_REG_INDIRECTRD_DONE,
- plat->regbase + CQSPI_REG_INDIRECTRD);
+ priv->regbase + CQSPI_REG_INDIRECTRD);
/* Check indirect done status */
- ret = wait_for_bit_le32(plat->regbase + CQSPI_REG_INDIRECTRD,
+ ret = wait_for_bit_le32(priv->regbase + CQSPI_REG_INDIRECTRD,
CQSPI_REG_INDIRECTRD_DONE, 0, 10, 0);
if (ret) {
printf("Indirect read clear completion error (%i)\n", ret);
failrd:
/* Cancel the indirect read */
writel(CQSPI_REG_INDIRECTRD_CANCEL,
- plat->regbase + CQSPI_REG_INDIRECTRD);
+ priv->regbase + CQSPI_REG_INDIRECTRD);
return ret;
}
-int cadence_qspi_apb_read_execute(struct cadence_spi_plat *plat,
+int cadence_qspi_apb_read_execute(struct cadence_spi_priv *priv,
const struct spi_mem_op *op)
{
u64 from = op->addr.val;
if (CONFIG_IS_ENABLED(ARCH_VERSAL))
cadence_qspi_apb_enable_linear_mode(true);
- if (plat->use_dac_mode && (from + len < plat->ahbsize)) {
+ if (priv->use_dac_mode && (from + len < priv->ahbsize)) {
if (len < 256 ||
- dma_memcpy(buf, plat->ahbbase + from, len) < 0) {
- memcpy_fromio(buf, plat->ahbbase + from, len);
+ dma_memcpy(buf, priv->ahbbase + from, len) < 0) {
+ memcpy_fromio(buf, priv->ahbbase + from, len);
}
- if (!cadence_qspi_wait_idle(plat->regbase))
+ if (!cadence_qspi_wait_idle(priv->regbase))
return -EIO;
return 0;
}
- return cadence_qspi_apb_indirect_read_execute(plat, len, buf);
+ return cadence_qspi_apb_indirect_read_execute(priv, len, buf);
}
/* Opcode + Address (3/4 bytes) */
-int cadence_qspi_apb_write_setup(struct cadence_spi_plat *plat,
+int cadence_qspi_apb_write_setup(struct cadence_spi_priv *priv,
const struct spi_mem_op *op)
{
unsigned int reg;
int ret;
u8 opcode;
- ret = cadence_qspi_set_protocol(plat, op);
+ ret = cadence_qspi_set_protocol(priv, op);
if (ret)
return ret;
- ret = cadence_qspi_enable_dtr(plat, op, CQSPI_REG_OP_EXT_WRITE_LSB,
- plat->dtr);
+ ret = cadence_qspi_enable_dtr(priv, op, CQSPI_REG_OP_EXT_WRITE_LSB,
+ priv->dtr);
if (ret)
return ret;
/* Setup the indirect trigger address */
- writel(plat->trigger_address,
- plat->regbase + CQSPI_REG_INDIRECTTRIGGER);
+ writel(priv->trigger_address,
+ priv->regbase + CQSPI_REG_INDIRECTTRIGGER);
/* Configure the opcode */
- if (plat->dtr)
+ if (priv->dtr)
opcode = op->cmd.opcode >> 8;
else
opcode = op->cmd.opcode;
reg = opcode << CQSPI_REG_WR_INSTR_OPCODE_LSB;
- reg |= plat->data_width << CQSPI_REG_WR_INSTR_TYPE_DATA_LSB;
- reg |= plat->addr_width << CQSPI_REG_WR_INSTR_TYPE_ADDR_LSB;
- writel(reg, plat->regbase + CQSPI_REG_WR_INSTR);
+ reg |= priv->data_width << CQSPI_REG_WR_INSTR_TYPE_DATA_LSB;
+ reg |= priv->addr_width << CQSPI_REG_WR_INSTR_TYPE_ADDR_LSB;
+ writel(reg, priv->regbase + CQSPI_REG_WR_INSTR);
- reg = cadence_qspi_calc_rdreg(plat);
- writel(reg, plat->regbase + CQSPI_REG_RD_INSTR);
+ reg = cadence_qspi_calc_rdreg(priv);
+ writel(reg, priv->regbase + CQSPI_REG_RD_INSTR);
- writel(op->addr.val, plat->regbase + CQSPI_REG_INDIRECTWRSTARTADDR);
+ writel(op->addr.val, priv->regbase + CQSPI_REG_INDIRECTWRSTARTADDR);
- if (plat->dtr) {
+ if (priv->dtr) {
/*
* Some flashes like the cypress Semper flash expect a 4-byte
* dummy address with the Read SR command in DTR mode, but this
* controller's side. spi-nor will take care of polling the
* status register.
*/
- reg = readl(plat->regbase + CQSPI_REG_WR_COMPLETION_CTRL);
+ reg = readl(priv->regbase + CQSPI_REG_WR_COMPLETION_CTRL);
reg |= CQSPI_REG_WR_DISABLE_AUTO_POLL;
- writel(reg, plat->regbase + CQSPI_REG_WR_COMPLETION_CTRL);
+ writel(reg, priv->regbase + CQSPI_REG_WR_COMPLETION_CTRL);
}
- reg = readl(plat->regbase + CQSPI_REG_SIZE);
+ reg = readl(priv->regbase + CQSPI_REG_SIZE);
reg &= ~CQSPI_REG_SIZE_ADDRESS_MASK;
reg |= (op->addr.nbytes - 1);
- writel(reg, plat->regbase + CQSPI_REG_SIZE);
+ writel(reg, priv->regbase + CQSPI_REG_SIZE);
return 0;
}
static int
-cadence_qspi_apb_indirect_write_execute(struct cadence_spi_plat *plat,
+cadence_qspi_apb_indirect_write_execute(struct cadence_spi_priv *priv,
unsigned int n_tx, const u8 *txbuf)
{
- unsigned int page_size = plat->page_size;
+ unsigned int page_size = priv->page_size;
unsigned int remaining = n_tx;
const u8 *bb_txbuf = txbuf;
void *bounce_buf = NULL;
}
/* Configure the indirect read transfer bytes */
- writel(n_tx, plat->regbase + CQSPI_REG_INDIRECTWRBYTES);
+ writel(n_tx, priv->regbase + CQSPI_REG_INDIRECTWRBYTES);
/* Start the indirect write transfer */
writel(CQSPI_REG_INDIRECTWR_START,
- plat->regbase + CQSPI_REG_INDIRECTWR);
+ priv->regbase + CQSPI_REG_INDIRECTWR);
/*
* Some delay is required for the above bit to be internally
* synchronized by the QSPI module.
*/
- ndelay(plat->wr_delay);
+ ndelay(priv->wr_delay);
while (remaining > 0) {
write_bytes = remaining > page_size ? page_size : remaining;
- writesl(plat->ahbbase, bb_txbuf, write_bytes >> 2);
+ writesl(priv->ahbbase, bb_txbuf, write_bytes >> 2);
if (write_bytes % 4)
- writesb(plat->ahbbase,
+ writesb(priv->ahbbase,
bb_txbuf + rounddown(write_bytes, 4),
write_bytes % 4);
- ret = wait_for_bit_le32(plat->regbase + CQSPI_REG_SDRAMLEVEL,
+ ret = wait_for_bit_le32(priv->regbase + CQSPI_REG_SDRAMLEVEL,
CQSPI_REG_SDRAMLEVEL_WR_MASK <<
CQSPI_REG_SDRAMLEVEL_WR_LSB, 0, 10, 0);
if (ret) {
}
/* Check indirect done status */
- ret = wait_for_bit_le32(plat->regbase + CQSPI_REG_INDIRECTWR,
+ ret = wait_for_bit_le32(priv->regbase + CQSPI_REG_INDIRECTWR,
CQSPI_REG_INDIRECTWR_DONE, 1, 10, 0);
if (ret) {
printf("Indirect write completion error (%i)\n", ret);
/* Clear indirect completion status */
writel(CQSPI_REG_INDIRECTWR_DONE,
- plat->regbase + CQSPI_REG_INDIRECTWR);
+ priv->regbase + CQSPI_REG_INDIRECTWR);
/* Check indirect done status */
- ret = wait_for_bit_le32(plat->regbase + CQSPI_REG_INDIRECTWR,
+ ret = wait_for_bit_le32(priv->regbase + CQSPI_REG_INDIRECTWR,
CQSPI_REG_INDIRECTWR_DONE, 0, 10, 0);
if (ret) {
printf("Indirect write clear completion error (%i)\n", ret);
failwr:
/* Cancel the indirect write */
writel(CQSPI_REG_INDIRECTWR_CANCEL,
- plat->regbase + CQSPI_REG_INDIRECTWR);
+ priv->regbase + CQSPI_REG_INDIRECTWR);
if (bounce_buf)
free(bounce_buf);
return ret;
}
-int cadence_qspi_apb_write_execute(struct cadence_spi_plat *plat,
+int cadence_qspi_apb_write_execute(struct cadence_spi_priv *priv,
const struct spi_mem_op *op)
{
u32 to = op->addr.val;
* mode. So, we can not use direct mode when in DTR mode for writing
* data.
*/
- if (!plat->dtr && plat->use_dac_mode && (to + len < plat->ahbsize)) {
- memcpy_toio(plat->ahbbase + to, buf, len);
- if (!cadence_qspi_wait_idle(plat->regbase))
+ cadence_qspi_apb_enable_linear_mode(true);
+ if (!priv->dtr && priv->use_dac_mode && (to + len < priv->ahbsize)) {
+ memcpy_toio(priv->ahbbase + to, buf, len);
+ if (!cadence_qspi_wait_idle(priv->regbase))
return -EIO;
return 0;
}
- return cadence_qspi_apb_indirect_write_execute(plat, len, buf);
+ return cadence_qspi_apb_indirect_write_execute(priv, len, buf);
}
void cadence_qspi_apb_enter_xip(void *reg_base, char xip_dummy)