The MediaTek MT7628 ethernet interface is used on MT7628 and
MT7688 based boards.
+config NET_OCTEONTX
+ bool "OcteonTX Ethernet support"
+ depends on ARCH_OCTEONTX
+ depends on PCI_SRIOV
+ help
+ You must select Y to enable network device support for
+ OcteonTX SoCs. If unsure, say n
+config OCTEONTX_SMI
+ bool "OcteonTX SMI Device support"
+ depends on ARCH_OCTEONTX || ARCH_OCTEONTX2
+ help
+ You must select Y to enable SMI controller support for
+ OcteonTX or OcteonTX2 SoCs. If unsure, say n
+
config PCH_GBE
bool "Intel Platform Controller Hub EG20T GMAC driver"
depends on DM_ETH && DM_PCI
obj-$(CONFIG_SMC91111) += smc91111.o
obj-$(CONFIG_SMC911X) += smc911x.o
obj-$(CONFIG_TSEC_ENET) += tsec.o fsl_mdio.o
+obj-$(CONFIG_NET_OCTEONTX) += octeontx/
+obj-$(CONFIG_OCTEONTX_SMI) += octeontx/smi.o
obj-$(CONFIG_FMAN_ENET) += fsl_mdio.o
obj-$(CONFIG_ULI526X) += uli526x.o
obj-$(CONFIG_VSC7385_ENET) += vsc7385.o
--- /dev/null
+# SPDX-License-Identifier: GPL-2.0
+#
+# Copyright (C) 2018 Marvell International Ltd.
+#
+
+obj-$(CONFIG_NET_OCTEONTX) += bgx.o nic_main.o nicvf_queues.o nicvf_main.o \
+ xcv.o
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ */
+
+#include <config.h>
+#include <dm.h>
+#include <errno.h>
+#include <fdt_support.h>
+#include <malloc.h>
+#include <miiphy.h>
+#include <misc.h>
+#include <net.h>
+#include <netdev.h>
+#include <pci.h>
+#include <pci_ids.h>
+#include <asm/io.h>
+#include <asm/arch/board.h>
+#include <linux/delay.h>
+#include <linux/libfdt.h>
+
+#include "nic_reg.h"
+#include "nic.h"
+#include "bgx.h"
+
+static const phy_interface_t if_mode[] = {
+ [QLM_MODE_SGMII] = PHY_INTERFACE_MODE_SGMII,
+ [QLM_MODE_RGMII] = PHY_INTERFACE_MODE_RGMII,
+ [QLM_MODE_QSGMII] = PHY_INTERFACE_MODE_QSGMII,
+ [QLM_MODE_XAUI] = PHY_INTERFACE_MODE_XAUI,
+ [QLM_MODE_RXAUI] = PHY_INTERFACE_MODE_RXAUI,
+};
+
+struct lmac {
+ struct bgx *bgx;
+ int dmac;
+ u8 mac[6];
+ bool link_up;
+ bool init_pend;
+ int lmacid; /* ID within BGX */
+ int phy_addr; /* ID on board */
+ struct udevice *dev;
+ struct mii_dev *mii_bus;
+ struct phy_device *phydev;
+ unsigned int last_duplex;
+ unsigned int last_link;
+ unsigned int last_speed;
+ int lane_to_sds;
+ int use_training;
+ int lmac_type;
+ u8 qlm_mode;
+ int qlm;
+ bool is_1gx;
+};
+
+struct bgx {
+ u8 bgx_id;
+ int node;
+ struct lmac lmac[MAX_LMAC_PER_BGX];
+ int lmac_count;
+ u8 max_lmac;
+ void __iomem *reg_base;
+ struct pci_dev *pdev;
+ bool is_rgx;
+};
+
+struct bgx_board_info bgx_board_info[MAX_BGX_PER_NODE];
+
+struct bgx *bgx_vnic[MAX_BGX_PER_NODE];
+
+/* APIs to read/write BGXX CSRs */
+static u64 bgx_reg_read(struct bgx *bgx, uint8_t lmac, u64 offset)
+{
+ u64 addr = (uintptr_t)bgx->reg_base +
+ ((uint32_t)lmac << 20) + offset;
+
+ return readq((void *)addr);
+}
+
+static void bgx_reg_write(struct bgx *bgx, uint8_t lmac,
+ u64 offset, u64 val)
+{
+ u64 addr = (uintptr_t)bgx->reg_base +
+ ((uint32_t)lmac << 20) + offset;
+
+ writeq(val, (void *)addr);
+}
+
+static void bgx_reg_modify(struct bgx *bgx, uint8_t lmac,
+ u64 offset, u64 val)
+{
+ u64 addr = (uintptr_t)bgx->reg_base +
+ ((uint32_t)lmac << 20) + offset;
+
+ writeq(val | bgx_reg_read(bgx, lmac, offset), (void *)addr);
+}
+
+static int bgx_poll_reg(struct bgx *bgx, uint8_t lmac,
+ u64 reg, u64 mask, bool zero)
+{
+ int timeout = 200;
+ u64 reg_val;
+
+ while (timeout) {
+ reg_val = bgx_reg_read(bgx, lmac, reg);
+ if (zero && !(reg_val & mask))
+ return 0;
+ if (!zero && (reg_val & mask))
+ return 0;
+ mdelay(1);
+ timeout--;
+ }
+ return 1;
+}
+
+static int gser_poll_reg(u64 reg, int bit, u64 mask, u64 expected_val,
+ int timeout)
+{
+ u64 reg_val;
+
+ debug("%s reg = %#llx, mask = %#llx,", __func__, reg, mask);
+ debug(" expected_val = %#llx, bit = %d\n", expected_val, bit);
+ while (timeout) {
+ reg_val = readq(reg) >> bit;
+ if ((reg_val & mask) == (expected_val))
+ return 0;
+ mdelay(1);
+ timeout--;
+ }
+ return 1;
+}
+
+static bool is_bgx_port_valid(int bgx, int lmac)
+{
+ debug("%s bgx %d lmac %d valid %d\n", __func__, bgx, lmac,
+ bgx_board_info[bgx].lmac_reg[lmac]);
+
+ if (bgx_board_info[bgx].lmac_reg[lmac])
+ return 1;
+ else
+ return 0;
+}
+
+struct lmac *bgx_get_lmac(int node, int bgx_idx, int lmacid)
+{
+ struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx];
+
+ if (bgx)
+ return &bgx->lmac[lmacid];
+
+ return NULL;
+}
+
+const u8 *bgx_get_lmac_mac(int node, int bgx_idx, int lmacid)
+{
+ struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx];
+
+ if (bgx)
+ return bgx->lmac[lmacid].mac;
+
+ return NULL;
+}
+
+void bgx_set_lmac_mac(int node, int bgx_idx, int lmacid, const u8 *mac)
+{
+ struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx];
+
+ if (!bgx)
+ return;
+
+ memcpy(bgx->lmac[lmacid].mac, mac, 6);
+}
+
+/* Return number of BGX present in HW */
+void bgx_get_count(int node, int *bgx_count)
+{
+ int i;
+ struct bgx *bgx;
+
+ *bgx_count = 0;
+ for (i = 0; i < MAX_BGX_PER_NODE; i++) {
+ bgx = bgx_vnic[node * MAX_BGX_PER_NODE + i];
+ debug("bgx_vnic[%u]: %p\n", node * MAX_BGX_PER_NODE + i,
+ bgx);
+ if (bgx)
+ *bgx_count |= (1 << i);
+ }
+}
+
+/* Return number of LMAC configured for this BGX */
+int bgx_get_lmac_count(int node, int bgx_idx)
+{
+ struct bgx *bgx;
+
+ bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx];
+ if (bgx)
+ return bgx->lmac_count;
+
+ return 0;
+}
+
+void bgx_lmac_rx_tx_enable(int node, int bgx_idx, int lmacid, bool enable)
+{
+ struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx];
+ u64 cfg;
+
+ if (!bgx)
+ return;
+
+ cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
+ if (enable)
+ cfg |= CMR_PKT_RX_EN | CMR_PKT_TX_EN;
+ else
+ cfg &= ~(CMR_PKT_RX_EN | CMR_PKT_TX_EN);
+ bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
+}
+
+static void bgx_flush_dmac_addrs(struct bgx *bgx, u64 lmac)
+{
+ u64 dmac = 0x00;
+ u64 offset, addr;
+
+ while (bgx->lmac[lmac].dmac > 0) {
+ offset = ((bgx->lmac[lmac].dmac - 1) * sizeof(dmac)) +
+ (lmac * MAX_DMAC_PER_LMAC * sizeof(dmac));
+ addr = (uintptr_t)bgx->reg_base +
+ BGX_CMR_RX_DMACX_CAM + offset;
+ writeq(dmac, (void *)addr);
+ bgx->lmac[lmac].dmac--;
+ }
+}
+
+/* Configure BGX LMAC in internal loopback mode */
+void bgx_lmac_internal_loopback(int node, int bgx_idx,
+ int lmac_idx, bool enable)
+{
+ struct bgx *bgx;
+ struct lmac *lmac;
+ u64 cfg;
+
+ bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx];
+ if (!bgx)
+ return;
+
+ lmac = &bgx->lmac[lmac_idx];
+ if (lmac->qlm_mode == QLM_MODE_SGMII) {
+ cfg = bgx_reg_read(bgx, lmac_idx, BGX_GMP_PCS_MRX_CTL);
+ if (enable)
+ cfg |= PCS_MRX_CTL_LOOPBACK1;
+ else
+ cfg &= ~PCS_MRX_CTL_LOOPBACK1;
+ bgx_reg_write(bgx, lmac_idx, BGX_GMP_PCS_MRX_CTL, cfg);
+ } else {
+ cfg = bgx_reg_read(bgx, lmac_idx, BGX_SPUX_CONTROL1);
+ if (enable)
+ cfg |= SPU_CTL_LOOPBACK;
+ else
+ cfg &= ~SPU_CTL_LOOPBACK;
+ bgx_reg_write(bgx, lmac_idx, BGX_SPUX_CONTROL1, cfg);
+ }
+}
+
+/* Return the DLM used for the BGX */
+static int get_qlm_for_bgx(int node, int bgx_id, int index)
+{
+ int qlm = 0;
+ u64 cfg;
+
+ if (otx_is_soc(CN81XX)) {
+ qlm = (bgx_id) ? 2 : 0;
+ qlm += (index >= 2) ? 1 : 0;
+ } else if (otx_is_soc(CN83XX)) {
+ switch (bgx_id) {
+ case 0:
+ qlm = 2;
+ break;
+ case 1:
+ qlm = 3;
+ break;
+ case 2:
+ if (index >= 2)
+ qlm = 6;
+ else
+ qlm = 5;
+ break;
+ case 3:
+ qlm = 4;
+ break;
+ }
+ }
+
+ cfg = readq(GSERX_CFG(qlm)) & GSERX_CFG_BGX;
+ debug("%s:qlm%d: cfg = %lld\n", __func__, qlm, cfg);
+
+ /* Check if DLM is configured as BGX# */
+ if (cfg) {
+ if (readq(GSERX_PHY_CTL(qlm)))
+ return -1;
+ return qlm;
+ }
+ return -1;
+}
+
+static int bgx_lmac_sgmii_init(struct bgx *bgx, int lmacid)
+{
+ u64 cfg;
+ struct lmac *lmac;
+
+ lmac = &bgx->lmac[lmacid];
+
+ debug("%s:bgx_id = %d, lmacid = %d\n", __func__, bgx->bgx_id, lmacid);
+
+ bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_TXX_THRESH, 0x30);
+ /* max packet size */
+ bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_RXX_JABBER, MAX_FRAME_SIZE);
+
+ /* Disable frame alignment if using preamble */
+ cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_GMI_TXX_APPEND);
+ if (cfg & 1)
+ bgx_reg_write(bgx, lmacid, BGX_GMP_GMI_TXX_SGMII_CTL, 0);
+
+ /* Enable lmac */
+ bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG, CMR_EN);
+
+ /* PCS reset */
+ bgx_reg_modify(bgx, lmacid, BGX_GMP_PCS_MRX_CTL, PCS_MRX_CTL_RESET);
+ if (bgx_poll_reg(bgx, lmacid, BGX_GMP_PCS_MRX_CTL,
+ PCS_MRX_CTL_RESET, true)) {
+ printf("BGX PCS reset not completed\n");
+ return -1;
+ }
+
+ /* power down, reset autoneg, autoneg enable */
+ cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_PCS_MRX_CTL);
+ cfg &= ~PCS_MRX_CTL_PWR_DN;
+
+ if (bgx_board_info[bgx->bgx_id].phy_info[lmacid].autoneg_dis)
+ cfg |= (PCS_MRX_CTL_RST_AN);
+ else
+ cfg |= (PCS_MRX_CTL_RST_AN | PCS_MRX_CTL_AN_EN);
+ bgx_reg_write(bgx, lmacid, BGX_GMP_PCS_MRX_CTL, cfg);
+
+ /* Disable disparity for QSGMII mode, to prevent propogation across
+ * ports.
+ */
+
+ if (lmac->qlm_mode == QLM_MODE_QSGMII) {
+ cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_PCS_MISCX_CTL);
+ cfg &= ~PCS_MISCX_CTL_DISP_EN;
+ bgx_reg_write(bgx, lmacid, BGX_GMP_PCS_MISCX_CTL, cfg);
+ return 0; /* Skip checking AN_CPT */
+ }
+
+ if (lmac->is_1gx) {
+ cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_PCS_MISCX_CTL);
+ cfg |= PCS_MISC_CTL_MODE;
+ bgx_reg_write(bgx, lmacid, BGX_GMP_PCS_MISCX_CTL, cfg);
+ }
+
+ if (lmac->qlm_mode == QLM_MODE_SGMII) {
+ if (bgx_poll_reg(bgx, lmacid, BGX_GMP_PCS_MRX_STATUS,
+ PCS_MRX_STATUS_AN_CPT, false)) {
+ printf("BGX AN_CPT not completed\n");
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+static int bgx_lmac_sgmii_set_link_speed(struct lmac *lmac)
+{
+ u64 prtx_cfg;
+ u64 pcs_miscx_ctl;
+ u64 cfg;
+ struct bgx *bgx = lmac->bgx;
+ unsigned int lmacid = lmac->lmacid;
+
+ debug("%s: lmacid %d\n", __func__, lmac->lmacid);
+
+ /* Disable LMAC before setting up speed */
+ cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
+ cfg &= ~CMR_EN;
+ bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
+
+ /* Read GMX CFG */
+ prtx_cfg = bgx_reg_read(bgx, lmacid,
+ BGX_GMP_GMI_PRTX_CFG);
+ /* Read PCS MISCS CTL */
+ pcs_miscx_ctl = bgx_reg_read(bgx, lmacid,
+ BGX_GMP_PCS_MISCX_CTL);
+
+ /* Use GMXENO to force the link down*/
+ if (lmac->link_up) {
+ pcs_miscx_ctl &= ~PCS_MISC_CTL_GMX_ENO;
+ /* change the duplex setting if the link is up */
+ prtx_cfg |= GMI_PORT_CFG_DUPLEX;
+ } else {
+ pcs_miscx_ctl |= PCS_MISC_CTL_GMX_ENO;
+ }
+
+ /* speed based setting for GMX */
+ switch (lmac->last_speed) {
+ case 10:
+ prtx_cfg &= ~GMI_PORT_CFG_SPEED;
+ prtx_cfg |= GMI_PORT_CFG_SPEED_MSB;
+ prtx_cfg &= ~GMI_PORT_CFG_SLOT_TIME;
+ pcs_miscx_ctl |= 50; /* sampling point */
+ bgx_reg_write(bgx, lmacid, BGX_GMP_GMI_TXX_SLOT, 0x40);
+ bgx_reg_write(bgx, lmacid, BGX_GMP_GMI_TXX_BURST, 0);
+ break;
+ case 100:
+ prtx_cfg &= ~GMI_PORT_CFG_SPEED;
+ prtx_cfg &= ~GMI_PORT_CFG_SPEED_MSB;
+ prtx_cfg &= ~GMI_PORT_CFG_SLOT_TIME;
+ pcs_miscx_ctl |= 0x5; /* sampling point */
+ bgx_reg_write(bgx, lmacid, BGX_GMP_GMI_TXX_SLOT, 0x40);
+ bgx_reg_write(bgx, lmacid, BGX_GMP_GMI_TXX_BURST, 0);
+ break;
+ case 1000:
+ prtx_cfg |= GMI_PORT_CFG_SPEED;
+ prtx_cfg &= ~GMI_PORT_CFG_SPEED_MSB;
+ prtx_cfg |= GMI_PORT_CFG_SLOT_TIME;
+ pcs_miscx_ctl |= 0x1; /* sampling point */
+ bgx_reg_write(bgx, lmacid, BGX_GMP_GMI_TXX_SLOT, 0x200);
+ if (lmac->last_duplex)
+ bgx_reg_write(bgx, lmacid, BGX_GMP_GMI_TXX_BURST, 0);
+ else /* half duplex */
+ bgx_reg_write(bgx, lmacid, BGX_GMP_GMI_TXX_BURST,
+ 0x2000);
+ break;
+ default:
+ break;
+ }
+
+ /* write back the new PCS misc and GMX settings */
+ bgx_reg_write(bgx, lmacid, BGX_GMP_PCS_MISCX_CTL, pcs_miscx_ctl);
+ bgx_reg_write(bgx, lmacid, BGX_GMP_GMI_PRTX_CFG, prtx_cfg);
+
+ /* read back GMX CFG again to check config completion */
+ bgx_reg_read(bgx, lmacid, BGX_GMP_GMI_PRTX_CFG);
+
+ /* enable BGX back */
+ cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
+ cfg |= CMR_EN;
+ bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
+
+ return 0;
+}
+
+static int bgx_lmac_xaui_init(struct bgx *bgx, int lmacid, int lmac_type)
+{
+ u64 cfg;
+ struct lmac *lmac;
+
+ lmac = &bgx->lmac[lmacid];
+
+ /* Reset SPU */
+ bgx_reg_modify(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_RESET);
+ if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_RESET, true)) {
+ printf("BGX SPU reset not completed\n");
+ return -1;
+ }
+
+ /* Disable LMAC */
+ cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
+ cfg &= ~CMR_EN;
+ bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
+
+ bgx_reg_modify(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_LOW_POWER);
+ /* Set interleaved running disparity for RXAUI */
+ if (lmac->qlm_mode != QLM_MODE_RXAUI)
+ bgx_reg_modify(bgx, lmacid,
+ BGX_SPUX_MISC_CONTROL, SPU_MISC_CTL_RX_DIS);
+ else
+ bgx_reg_modify(bgx, lmacid, BGX_SPUX_MISC_CONTROL,
+ SPU_MISC_CTL_RX_DIS | SPU_MISC_CTL_INTLV_RDISP);
+
+ /* clear all interrupts */
+ cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_RX_INT);
+ bgx_reg_write(bgx, lmacid, BGX_SMUX_RX_INT, cfg);
+ cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_INT);
+ bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_INT, cfg);
+ cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT);
+ bgx_reg_write(bgx, lmacid, BGX_SPUX_INT, cfg);
+
+ if (lmac->use_training) {
+ bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LP_CUP, 0x00);
+ bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LD_CUP, 0x00);
+ bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LD_REP, 0x00);
+ /* training enable */
+ bgx_reg_modify(bgx, lmacid,
+ BGX_SPUX_BR_PMD_CRTL, SPU_PMD_CRTL_TRAIN_EN);
+ }
+
+ /* Append FCS to each packet */
+ bgx_reg_modify(bgx, lmacid, BGX_SMUX_TX_APPEND, SMU_TX_APPEND_FCS_D);
+
+ /* Disable forward error correction */
+ cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_FEC_CONTROL);
+ cfg &= ~SPU_FEC_CTL_FEC_EN;
+ bgx_reg_write(bgx, lmacid, BGX_SPUX_FEC_CONTROL, cfg);
+
+ /* Disable autoneg */
+ cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_AN_CONTROL);
+ cfg = cfg & ~(SPU_AN_CTL_XNP_EN);
+ if (lmac->use_training)
+ cfg = cfg | (SPU_AN_CTL_AN_EN);
+ else
+ cfg = cfg & ~(SPU_AN_CTL_AN_EN);
+ bgx_reg_write(bgx, lmacid, BGX_SPUX_AN_CONTROL, cfg);
+
+ cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_AN_ADV);
+ /* Clear all KR bits, configure according to the mode */
+ cfg &= ~((0xfULL << 22) | (1ULL << 12));
+ if (lmac->qlm_mode == QLM_MODE_10G_KR)
+ cfg |= (1 << 23);
+ else if (lmac->qlm_mode == QLM_MODE_40G_KR4)
+ cfg |= (1 << 24);
+ bgx_reg_write(bgx, lmacid, BGX_SPUX_AN_ADV, cfg);
+
+ cfg = bgx_reg_read(bgx, 0, BGX_SPU_DBG_CONTROL);
+ if (lmac->use_training)
+ cfg |= SPU_DBG_CTL_AN_ARB_LINK_CHK_EN;
+ else
+ cfg &= ~SPU_DBG_CTL_AN_ARB_LINK_CHK_EN;
+ bgx_reg_write(bgx, 0, BGX_SPU_DBG_CONTROL, cfg);
+
+ /* Enable lmac */
+ bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG, CMR_EN);
+
+ cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_CONTROL1);
+ cfg &= ~SPU_CTL_LOW_POWER;
+ bgx_reg_write(bgx, lmacid, BGX_SPUX_CONTROL1, cfg);
+
+ cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_CTL);
+ cfg &= ~SMU_TX_CTL_UNI_EN;
+ cfg |= SMU_TX_CTL_DIC_EN;
+ bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_CTL, cfg);
+
+ /* take lmac_count into account */
+ bgx_reg_modify(bgx, lmacid, BGX_SMUX_TX_THRESH, (0x100 - 1));
+ /* max packet size */
+ bgx_reg_modify(bgx, lmacid, BGX_SMUX_RX_JABBER, MAX_FRAME_SIZE);
+
+ debug("xaui_init: lmacid = %d, qlm = %d, qlm_mode = %d\n",
+ lmacid, lmac->qlm, lmac->qlm_mode);
+ /* RXAUI with Marvell PHY requires some tweaking */
+ if (lmac->qlm_mode == QLM_MODE_RXAUI) {
+ char mii_name[20];
+ struct phy_info *phy;
+
+ phy = &bgx_board_info[bgx->bgx_id].phy_info[lmacid];
+ snprintf(mii_name, sizeof(mii_name), "smi%d", phy->mdio_bus);
+
+ debug("mii_name: %s\n", mii_name);
+ lmac->mii_bus = miiphy_get_dev_by_name(mii_name);
+ lmac->phy_addr = phy->phy_addr;
+ rxaui_phy_xs_init(lmac->mii_bus, lmac->phy_addr);
+ }
+
+ return 0;
+}
+
+/* Get max number of lanes present in a given QLM/DLM */
+static int get_qlm_lanes(int qlm)
+{
+ if (otx_is_soc(CN81XX))
+ return 2;
+ else if (otx_is_soc(CN83XX))
+ return (qlm >= 5) ? 2 : 4;
+ else
+ return -1;
+}
+
+int __rx_equalization(int qlm, int lane)
+{
+ int max_lanes = get_qlm_lanes(qlm);
+ int l;
+ int fail = 0;
+
+ /* Before completing Rx equalization wait for
+ * GSERx_RX_EIE_DETSTS[CDRLOCK] to be set
+ * This ensures the rx data is valid
+ */
+ if (lane == -1) {
+ if (gser_poll_reg(GSER_RX_EIE_DETSTS(qlm), GSER_CDRLOCK, 0xf,
+ (1 << max_lanes) - 1, 100)) {
+ debug("ERROR: CDR Lock not detected");
+ debug(" on DLM%d for 2 lanes\n", qlm);
+ return -1;
+ }
+ } else {
+ if (gser_poll_reg(GSER_RX_EIE_DETSTS(qlm), GSER_CDRLOCK,
+ (0xf & (1 << lane)), (1 << lane), 100)) {
+ debug("ERROR: DLM%d: CDR Lock not detected", qlm);
+ debug(" on %d lane\n", lane);
+ return -1;
+ }
+ }
+
+ for (l = 0; l < max_lanes; l++) {
+ u64 rctl, reer;
+
+ if (lane != -1 && lane != l)
+ continue;
+
+ /* Enable software control */
+ rctl = readq(GSER_BR_RXX_CTL(qlm, l));
+ rctl |= GSER_BR_RXX_CTL_RXT_SWM;
+ writeq(rctl, GSER_BR_RXX_CTL(qlm, l));
+
+ /* Clear the completion flag and initiate a new request */
+ reer = readq(GSER_BR_RXX_EER(qlm, l));
+ reer &= ~GSER_BR_RXX_EER_RXT_ESV;
+ reer |= GSER_BR_RXX_EER_RXT_EER;
+ writeq(reer, GSER_BR_RXX_EER(qlm, l));
+ }
+
+ /* Wait for RX equalization to complete */
+ for (l = 0; l < max_lanes; l++) {
+ u64 rctl, reer;
+
+ if (lane != -1 && lane != l)
+ continue;
+
+ gser_poll_reg(GSER_BR_RXX_EER(qlm, l), EER_RXT_ESV, 1, 1, 200);
+ reer = readq(GSER_BR_RXX_EER(qlm, l));
+
+ /* Switch back to hardware control */
+ rctl = readq(GSER_BR_RXX_CTL(qlm, l));
+ rctl &= ~GSER_BR_RXX_CTL_RXT_SWM;
+ writeq(rctl, GSER_BR_RXX_CTL(qlm, l));
+
+ if (reer & GSER_BR_RXX_EER_RXT_ESV) {
+ debug("Rx equalization completed on DLM%d", qlm);
+ debug(" QLM%d rxt_esm = 0x%llx\n", l, (reer & 0x3fff));
+ } else {
+ debug("Rx equalization timedout on DLM%d", qlm);
+ debug(" lane %d\n", l);
+ fail = 1;
+ }
+ }
+
+ return (fail) ? -1 : 0;
+}
+
+static int bgx_xaui_check_link(struct lmac *lmac)
+{
+ struct bgx *bgx = lmac->bgx;
+ int lmacid = lmac->lmacid;
+ int lmac_type = lmac->lmac_type;
+ u64 cfg;
+
+ bgx_reg_modify(bgx, lmacid, BGX_SPUX_MISC_CONTROL, SPU_MISC_CTL_RX_DIS);
+
+ /* check if auto negotiation is complete */
+ cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_AN_CONTROL);
+ if (cfg & SPU_AN_CTL_AN_EN) {
+ cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_AN_STATUS);
+ if (!(cfg & SPU_AN_STS_AN_COMPLETE)) {
+ /* Restart autonegotiation */
+ debug("restarting auto-neg\n");
+ bgx_reg_modify(bgx, lmacid, BGX_SPUX_AN_CONTROL,
+ SPU_AN_CTL_AN_RESTART);
+ return -1;
+ }
+ }
+
+ debug("%s link use_training %d\n", __func__, lmac->use_training);
+ if (lmac->use_training) {
+ cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT);
+ if (!(cfg & (1ull << 13))) {
+ debug("waiting for link training\n");
+ /* Clear the training interrupts (W1C) */
+ cfg = (1ull << 13) | (1ull << 14);
+ bgx_reg_write(bgx, lmacid, BGX_SPUX_INT, cfg);
+
+ udelay(2000);
+ /* Restart training */
+ cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_BR_PMD_CRTL);
+ cfg |= (1ull << 0);
+ bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_CRTL, cfg);
+ return -1;
+ }
+ }
+
+ /* Perform RX Equalization. Applies to non-KR interfaces for speeds
+ * >= 6.25Gbps.
+ */
+ if (!lmac->use_training) {
+ int qlm;
+ bool use_dlm = 0;
+
+ if (otx_is_soc(CN81XX) || (otx_is_soc(CN83XX) &&
+ bgx->bgx_id == 2))
+ use_dlm = 1;
+ switch (lmac->lmac_type) {
+ default:
+ case BGX_MODE_SGMII:
+ case BGX_MODE_RGMII:
+ case BGX_MODE_XAUI:
+ /* Nothing to do */
+ break;
+ case BGX_MODE_XLAUI:
+ if (use_dlm) {
+ if (__rx_equalization(lmac->qlm, -1) ||
+ __rx_equalization(lmac->qlm + 1, -1)) {
+ printf("BGX%d:%d", bgx->bgx_id, lmacid);
+ printf(" Waiting for RX Equalization");
+ printf(" on DLM%d/DLM%d\n",
+ lmac->qlm, lmac->qlm + 1);
+ return -1;
+ }
+ } else {
+ if (__rx_equalization(lmac->qlm, -1)) {
+ printf("BGX%d:%d", bgx->bgx_id, lmacid);
+ printf(" Waiting for RX Equalization");
+ printf(" on QLM%d\n", lmac->qlm);
+ return -1;
+ }
+ }
+ break;
+ case BGX_MODE_RXAUI:
+ /* RXAUI0 uses LMAC0:QLM0/QLM2 and RXAUI1 uses
+ * LMAC1:QLM1/QLM3 RXAUI requires 2 lanes
+ * for each interface
+ */
+ qlm = lmac->qlm;
+ if (__rx_equalization(qlm, 0)) {
+ printf("BGX%d:%d", bgx->bgx_id, lmacid);
+ printf(" Waiting for RX Equalization");
+ printf(" on QLM%d, Lane0\n", qlm);
+ return -1;
+ }
+ if (__rx_equalization(qlm, 1)) {
+ printf("BGX%d:%d", bgx->bgx_id, lmacid);
+ printf(" Waiting for RX Equalization");
+ printf(" on QLM%d, Lane1\n", qlm);
+ return -1;
+ }
+ break;
+ case BGX_MODE_XFI:
+ {
+ int lid;
+ bool altpkg = otx_is_altpkg();
+
+ if (bgx->bgx_id == 0 && altpkg && lmacid)
+ lid = 0;
+ else if ((lmacid >= 2) && use_dlm)
+ lid = lmacid - 2;
+ else
+ lid = lmacid;
+
+ if (__rx_equalization(lmac->qlm, lid)) {
+ printf("BGX%d:%d", bgx->bgx_id, lid);
+ printf(" Waiting for RX Equalization");
+ printf(" on QLM%d\n", lmac->qlm);
+ }
+ }
+ break;
+ }
+ }
+
+ /* wait for PCS to come out of reset */
+ if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_RESET, true)) {
+ printf("BGX SPU reset not completed\n");
+ return -1;
+ }
+
+ if (lmac_type == 3 || lmac_type == 4) {
+ if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_BR_STATUS1,
+ SPU_BR_STATUS_BLK_LOCK, false)) {
+ printf("SPU_BR_STATUS_BLK_LOCK not completed\n");
+ return -1;
+ }
+ } else {
+ if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_BX_STATUS,
+ SPU_BX_STATUS_RX_ALIGN, false)) {
+ printf("SPU_BX_STATUS_RX_ALIGN not completed\n");
+ return -1;
+ }
+ }
+
+ /* Clear rcvflt bit (latching high) and read it back */
+ bgx_reg_modify(bgx, lmacid, BGX_SPUX_STATUS2, SPU_STATUS2_RCVFLT);
+ if (bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) & SPU_STATUS2_RCVFLT) {
+ printf("Receive fault, retry training\n");
+ if (lmac->use_training) {
+ cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT);
+ if (!(cfg & (1ull << 13))) {
+ cfg = (1ull << 13) | (1ull << 14);
+ bgx_reg_write(bgx, lmacid, BGX_SPUX_INT, cfg);
+ cfg = bgx_reg_read(bgx, lmacid,
+ BGX_SPUX_BR_PMD_CRTL);
+ cfg |= (1ull << 0);
+ bgx_reg_write(bgx, lmacid,
+ BGX_SPUX_BR_PMD_CRTL, cfg);
+ return -1;
+ }
+ }
+ return -1;
+ }
+
+ /* Wait for MAC RX to be ready */
+ if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_RX_CTL,
+ SMU_RX_CTL_STATUS, true)) {
+ printf("SMU RX link not okay\n");
+ return -1;
+ }
+
+ /* Wait for BGX RX to be idle */
+ if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_CTL, SMU_CTL_RX_IDLE, false)) {
+ printf("SMU RX not idle\n");
+ return -1;
+ }
+
+ /* Wait for BGX TX to be idle */
+ if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_CTL, SMU_CTL_TX_IDLE, false)) {
+ printf("SMU TX not idle\n");
+ return -1;
+ }
+
+ if (bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) & SPU_STATUS2_RCVFLT) {
+ printf("Receive fault\n");
+ return -1;
+ }
+
+ /* Receive link is latching low. Force it high and verify it */
+ if (!(bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS1) &
+ SPU_STATUS1_RCV_LNK))
+ bgx_reg_modify(bgx, lmacid, BGX_SPUX_STATUS1,
+ SPU_STATUS1_RCV_LNK);
+ if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_STATUS1,
+ SPU_STATUS1_RCV_LNK, false)) {
+ printf("SPU receive link down\n");
+ return -1;
+ }
+
+ cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_MISC_CONTROL);
+ cfg &= ~SPU_MISC_CTL_RX_DIS;
+ bgx_reg_write(bgx, lmacid, BGX_SPUX_MISC_CONTROL, cfg);
+ return 0;
+}
+
+static int bgx_lmac_enable(struct bgx *bgx, int8_t lmacid)
+{
+ struct lmac *lmac;
+ u64 cfg;
+
+ lmac = &bgx->lmac[lmacid];
+
+ debug("%s: lmac: %p, lmacid = %d\n", __func__, lmac, lmacid);
+
+ if (lmac->qlm_mode == QLM_MODE_SGMII ||
+ lmac->qlm_mode == QLM_MODE_RGMII ||
+ lmac->qlm_mode == QLM_MODE_QSGMII) {
+ if (bgx_lmac_sgmii_init(bgx, lmacid)) {
+ debug("bgx_lmac_sgmii_init failed\n");
+ return -1;
+ }
+ cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_GMI_TXX_APPEND);
+ cfg |= ((1ull << 2) | (1ull << 1)); /* FCS and PAD */
+ bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_TXX_APPEND, cfg);
+ bgx_reg_write(bgx, lmacid, BGX_GMP_GMI_TXX_MIN_PKT, 60 - 1);
+ } else {
+ if (bgx_lmac_xaui_init(bgx, lmacid, lmac->lmac_type))
+ return -1;
+ cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_APPEND);
+ cfg |= ((1ull << 2) | (1ull << 1)); /* FCS and PAD */
+ bgx_reg_modify(bgx, lmacid, BGX_SMUX_TX_APPEND, cfg);
+ bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_MIN_PKT, 60 + 4);
+ }
+
+ /* Enable lmac */
+ bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG,
+ CMR_EN | CMR_PKT_RX_EN | CMR_PKT_TX_EN);
+
+ return 0;
+}
+
+int bgx_poll_for_link(int node, int bgx_idx, int lmacid)
+{
+ int ret;
+ struct lmac *lmac = bgx_get_lmac(node, bgx_idx, lmacid);
+ char mii_name[10];
+ struct phy_info *phy;
+
+ if (!lmac) {
+ printf("LMAC %d/%d/%d is disabled or doesn't exist\n",
+ node, bgx_idx, lmacid);
+ return 0;
+ }
+
+ debug("%s: %d, lmac: %d/%d/%d %p\n",
+ __FILE__, __LINE__,
+ node, bgx_idx, lmacid, lmac);
+ if (lmac->init_pend) {
+ ret = bgx_lmac_enable(lmac->bgx, lmacid);
+ if (ret < 0) {
+ printf("BGX%d LMAC%d lmac_enable failed\n", bgx_idx,
+ lmacid);
+ return ret;
+ }
+ lmac->init_pend = 0;
+ mdelay(100);
+ }
+ if (lmac->qlm_mode == QLM_MODE_SGMII ||
+ lmac->qlm_mode == QLM_MODE_RGMII ||
+ lmac->qlm_mode == QLM_MODE_QSGMII) {
+ if (bgx_board_info[bgx_idx].phy_info[lmacid].phy_addr == -1) {
+ lmac->link_up = 1;
+ lmac->last_speed = 1000;
+ lmac->last_duplex = 1;
+ printf("BGX%d:LMAC %u link up\n", bgx_idx, lmacid);
+ return lmac->link_up;
+ }
+ snprintf(mii_name, sizeof(mii_name), "smi%d",
+ bgx_board_info[bgx_idx].phy_info[lmacid].mdio_bus);
+
+ debug("mii_name: %s\n", mii_name);
+
+ lmac->mii_bus = miiphy_get_dev_by_name(mii_name);
+ phy = &bgx_board_info[bgx_idx].phy_info[lmacid];
+ lmac->phy_addr = phy->phy_addr;
+
+ debug("lmac->mii_bus: %p\n", lmac->mii_bus);
+ if (!lmac->mii_bus) {
+ printf("MDIO device %s not found\n", mii_name);
+ ret = -ENODEV;
+ return ret;
+ }
+
+ lmac->phydev = phy_connect(lmac->mii_bus, lmac->phy_addr,
+ lmac->dev,
+ if_mode[lmac->qlm_mode]);
+
+ if (!lmac->phydev) {
+ printf("%s: No PHY device\n", __func__);
+ return -1;
+ }
+
+ ret = phy_config(lmac->phydev);
+ if (ret) {
+ printf("%s: Could not initialize PHY %s\n",
+ __func__, lmac->phydev->dev->name);
+ return ret;
+ }
+
+ ret = phy_startup(lmac->phydev);
+ debug("%s: %d\n", __FILE__, __LINE__);
+ if (ret) {
+ printf("%s: Could not initialize PHY %s\n",
+ __func__, lmac->phydev->dev->name);
+ }
+
+#ifdef OCTEONTX_XCV
+ if (lmac->qlm_mode == QLM_MODE_RGMII)
+ xcv_setup_link(lmac->phydev->link, lmac->phydev->speed);
+#endif
+
+ lmac->link_up = lmac->phydev->link;
+ lmac->last_speed = lmac->phydev->speed;
+ lmac->last_duplex = lmac->phydev->duplex;
+
+ debug("%s qlm_mode %d phy link status 0x%x,last speed 0x%x,",
+ __func__, lmac->qlm_mode, lmac->link_up,
+ lmac->last_speed);
+ debug(" duplex 0x%x\n", lmac->last_duplex);
+
+ if (lmac->qlm_mode != QLM_MODE_RGMII)
+ bgx_lmac_sgmii_set_link_speed(lmac);
+
+ } else {
+ u64 status1;
+ u64 tx_ctl;
+ u64 rx_ctl;
+
+ status1 = bgx_reg_read(lmac->bgx, lmac->lmacid,
+ BGX_SPUX_STATUS1);
+ tx_ctl = bgx_reg_read(lmac->bgx, lmac->lmacid, BGX_SMUX_TX_CTL);
+ rx_ctl = bgx_reg_read(lmac->bgx, lmac->lmacid, BGX_SMUX_RX_CTL);
+
+ debug("BGX%d LMAC%d BGX_SPUX_STATUS2: %lx\n", bgx_idx, lmacid,
+ (unsigned long)bgx_reg_read(lmac->bgx, lmac->lmacid,
+ BGX_SPUX_STATUS2));
+ debug("BGX%d LMAC%d BGX_SPUX_STATUS1: %lx\n", bgx_idx, lmacid,
+ (unsigned long)bgx_reg_read(lmac->bgx, lmac->lmacid,
+ BGX_SPUX_STATUS1));
+ debug("BGX%d LMAC%d BGX_SMUX_RX_CTL: %lx\n", bgx_idx, lmacid,
+ (unsigned long)bgx_reg_read(lmac->bgx, lmac->lmacid,
+ BGX_SMUX_RX_CTL));
+ debug("BGX%d LMAC%d BGX_SMUX_TX_CTL: %lx\n", bgx_idx, lmacid,
+ (unsigned long)bgx_reg_read(lmac->bgx, lmac->lmacid,
+ BGX_SMUX_TX_CTL));
+
+ if ((status1 & SPU_STATUS1_RCV_LNK) &&
+ ((tx_ctl & SMU_TX_CTL_LNK_STATUS) == 0) &&
+ ((rx_ctl & SMU_RX_CTL_STATUS) == 0)) {
+ lmac->link_up = 1;
+ if (lmac->lmac_type == 4)
+ lmac->last_speed = 40000;
+ else
+ lmac->last_speed = 10000;
+ lmac->last_duplex = 1;
+ } else {
+ lmac->link_up = 0;
+ lmac->last_speed = 0;
+ lmac->last_duplex = 0;
+ return bgx_xaui_check_link(lmac);
+ }
+
+ lmac->last_link = lmac->link_up;
+ }
+
+ printf("BGX%d:LMAC %u link %s\n", bgx_idx, lmacid,
+ (lmac->link_up) ? "up" : "down");
+
+ return lmac->link_up;
+}
+
+void bgx_lmac_disable(struct bgx *bgx, uint8_t lmacid)
+{
+ struct lmac *lmac;
+ u64 cmrx_cfg;
+
+ lmac = &bgx->lmac[lmacid];
+
+ cmrx_cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
+ cmrx_cfg &= ~(1 << 15);
+ bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cmrx_cfg);
+ bgx_flush_dmac_addrs(bgx, lmacid);
+
+ if (lmac->phydev)
+ phy_shutdown(lmac->phydev);
+
+ lmac->phydev = NULL;
+}
+
+/* Program BGXX_CMRX_CONFIG.{lmac_type,lane_to_sds} for each interface.
+ * And the number of LMACs used by this interface. Each lmac can be in
+ * programmed in a different mode, so parse each lmac one at a time.
+ */
+static void bgx_init_hw(struct bgx *bgx)
+{
+ struct lmac *lmac;
+ int i, lmacid, count = 0, inc = 0;
+ char buf[40];
+ static int qsgmii_configured;
+
+ for (lmacid = 0; lmacid < MAX_LMAC_PER_BGX; lmacid++) {
+ struct lmac *tlmac;
+
+ lmac = &bgx->lmac[lmacid];
+ debug("%s: lmacid = %d, qlm = %d, mode = %d\n",
+ __func__, lmacid, lmac->qlm, lmac->qlm_mode);
+ /* If QLM is not programmed, skip */
+ if (lmac->qlm == -1)
+ continue;
+
+ switch (lmac->qlm_mode) {
+ case QLM_MODE_SGMII:
+ {
+ /* EBB8000 (alternative pkg) has only lane0 present on
+ * DLM0 and DLM1, skip configuring other lanes
+ */
+ if (bgx->bgx_id == 0 && otx_is_altpkg()) {
+ if (lmacid % 2)
+ continue;
+ }
+ lmac->lane_to_sds = lmacid;
+ lmac->lmac_type = 0;
+ snprintf(buf, sizeof(buf),
+ "BGX%d QLM%d LMAC%d mode: %s\n",
+ bgx->bgx_id, lmac->qlm, lmacid,
+ lmac->is_1gx ? "1000Base-X" : "SGMII");
+ break;
+ }
+ case QLM_MODE_XAUI:
+ if (lmacid != 0)
+ continue;
+ lmac->lmac_type = 1;
+ lmac->lane_to_sds = 0xE4;
+ snprintf(buf, sizeof(buf),
+ "BGX%d QLM%d LMAC%d mode: XAUI\n",
+ bgx->bgx_id, lmac->qlm, lmacid);
+ break;
+ case QLM_MODE_RXAUI:
+ if (lmacid == 0) {
+ lmac->lmac_type = 2;
+ lmac->lane_to_sds = 0x4;
+ } else if (lmacid == 1) {
+ struct lmac *tlmac;
+
+ tlmac = &bgx->lmac[2];
+ if (tlmac->qlm_mode == QLM_MODE_RXAUI) {
+ lmac->lmac_type = 2;
+ lmac->lane_to_sds = 0xe;
+ lmac->qlm = tlmac->qlm;
+ }
+ } else {
+ continue;
+ }
+ snprintf(buf, sizeof(buf),
+ "BGX%d QLM%d LMAC%d mode: RXAUI\n",
+ bgx->bgx_id, lmac->qlm, lmacid);
+ break;
+ case QLM_MODE_XFI:
+ /* EBB8000 (alternative pkg) has only lane0 present on
+ * DLM0 and DLM1, skip configuring other lanes
+ */
+ if (bgx->bgx_id == 0 && otx_is_altpkg()) {
+ if (lmacid % 2)
+ continue;
+ }
+ lmac->lane_to_sds = lmacid;
+ lmac->lmac_type = 3;
+ snprintf(buf, sizeof(buf),
+ "BGX%d QLM%d LMAC%d mode: XFI\n",
+ bgx->bgx_id, lmac->qlm, lmacid);
+ break;
+ case QLM_MODE_XLAUI:
+ if (lmacid != 0)
+ continue;
+ lmac->lmac_type = 4;
+ lmac->lane_to_sds = 0xE4;
+ snprintf(buf, sizeof(buf),
+ "BGX%d QLM%d LMAC%d mode: XLAUI\n",
+ bgx->bgx_id, lmac->qlm, lmacid);
+ break;
+ case QLM_MODE_10G_KR:
+ /* EBB8000 (alternative pkg) has only lane0 present on
+ * DLM0 and DLM1, skip configuring other lanes
+ */
+ if (bgx->bgx_id == 0 && otx_is_altpkg()) {
+ if (lmacid % 2)
+ continue;
+ }
+ lmac->lane_to_sds = lmacid;
+ lmac->lmac_type = 3;
+ lmac->use_training = 1;
+ snprintf(buf, sizeof(buf),
+ "BGX%d QLM%d LMAC%d mode: 10G-KR\n",
+ bgx->bgx_id, lmac->qlm, lmacid);
+ break;
+ case QLM_MODE_40G_KR4:
+ if (lmacid != 0)
+ continue;
+ lmac->lmac_type = 4;
+ lmac->lane_to_sds = 0xE4;
+ lmac->use_training = 1;
+ snprintf(buf, sizeof(buf),
+ "BGX%d QLM%d LMAC%d mode: 40G-KR4\n",
+ bgx->bgx_id, lmac->qlm, lmacid);
+ break;
+ case QLM_MODE_RGMII:
+ if (lmacid != 0)
+ continue;
+ lmac->lmac_type = 5;
+ lmac->lane_to_sds = 0xE4;
+ snprintf(buf, sizeof(buf),
+ "BGX%d LMAC%d mode: RGMII\n",
+ bgx->bgx_id, lmacid);
+ break;
+ case QLM_MODE_QSGMII:
+ if (qsgmii_configured)
+ continue;
+ if (lmacid == 0 || lmacid == 2) {
+ count = 4;
+ printf("BGX%d QLM%d LMAC%d mode: QSGMII\n",
+ bgx->bgx_id, lmac->qlm, lmacid);
+ for (i = 0; i < count; i++) {
+ struct lmac *l;
+ int type;
+
+ l = &bgx->lmac[i];
+ l->lmac_type = 6;
+ type = l->lmac_type;
+ l->qlm_mode = QLM_MODE_QSGMII;
+ l->lane_to_sds = lmacid + i;
+ if (is_bgx_port_valid(bgx->bgx_id, i))
+ bgx_reg_write(bgx, i,
+ BGX_CMRX_CFG,
+ (type << 8) |
+ l->lane_to_sds);
+ }
+ qsgmii_configured = 1;
+ }
+ continue;
+ default:
+ continue;
+ }
+
+ /* Reset lmac to the unused slot */
+ if (is_bgx_port_valid(bgx->bgx_id, count) &&
+ lmac->qlm_mode != QLM_MODE_QSGMII) {
+ int lmac_en = 0;
+ int tmp, idx;
+
+ tlmac = &bgx->lmac[count];
+ tlmac->lmac_type = lmac->lmac_type;
+ idx = bgx->bgx_id;
+ tmp = count + inc;
+ /* Adjust lane_to_sds based on BGX-ENABLE */
+ for (; tmp < MAX_LMAC_PER_BGX; inc++) {
+ lmac_en = bgx_board_info[idx].lmac_enable[tmp];
+ if (lmac_en)
+ break;
+ tmp = count + inc;
+ }
+
+ if (inc != 0 && inc < MAX_LMAC_PER_BGX &&
+ lmac_en && inc != count)
+ tlmac->lane_to_sds =
+ lmac->lane_to_sds + abs(inc - count);
+ else
+ tlmac->lane_to_sds = lmac->lane_to_sds;
+ tlmac->qlm = lmac->qlm;
+ tlmac->qlm_mode = lmac->qlm_mode;
+
+ printf("%s", buf);
+ /* Initialize lmac_type and lane_to_sds */
+ bgx_reg_write(bgx, count, BGX_CMRX_CFG,
+ (tlmac->lmac_type << 8) |
+ tlmac->lane_to_sds);
+
+ if (tlmac->lmac_type == BGX_MODE_SGMII) {
+ if (tlmac->is_1gx) {
+ /* This is actually 1000BASE-X, so
+ * mark the LMAC as such.
+ */
+ bgx_reg_modify(bgx, count,
+ BGX_GMP_PCS_MISCX_CTL,
+ PCS_MISC_CTL_MODE);
+ }
+
+ if (!bgx_board_info[bgx->bgx_id].phy_info[lmacid].autoneg_dis) {
+ /* The Linux DTS does not disable
+ * autoneg for this LMAC (in SGMII or
+ * 1000BASE-X mode), so that means
+ * enable autoneg.
+ */
+ bgx_reg_modify(bgx, count,
+ BGX_GMP_PCS_MRX_CTL,
+ PCS_MRX_CTL_AN_EN);
+ }
+ }
+
+ count += 1;
+ }
+ }
+
+ /* Done probing all 4 lmacs, now clear qsgmii_configured */
+ qsgmii_configured = 0;
+
+ printf("BGX%d LMACs: %d\n", bgx->bgx_id, count);
+ bgx->lmac_count = count;
+ bgx_reg_write(bgx, 0, BGX_CMR_RX_LMACS, count);
+ bgx_reg_write(bgx, 0, BGX_CMR_TX_LMACS, count);
+
+ bgx_reg_modify(bgx, 0, BGX_CMR_GLOBAL_CFG, CMR_GLOBAL_CFG_FCS_STRIP);
+ if (bgx_reg_read(bgx, 0, BGX_CMR_BIST_STATUS))
+ printf("BGX%d BIST failed\n", bgx->bgx_id);
+
+ /* Set the backpressure AND mask */
+ for (i = 0; i < bgx->lmac_count; i++)
+ bgx_reg_modify(bgx, 0, BGX_CMR_CHAN_MSK_AND,
+ ((1ULL << MAX_BGX_CHANS_PER_LMAC) - 1) <<
+ (i * MAX_BGX_CHANS_PER_LMAC));
+
+ /* Disable all MAC filtering */
+ for (i = 0; i < RX_DMAC_COUNT; i++)
+ bgx_reg_write(bgx, 0, BGX_CMR_RX_DMACX_CAM + (i * 8), 0x00);
+
+ /* Disable MAC steering (NCSI traffic) */
+ for (i = 0; i < RX_TRAFFIC_STEER_RULE_COUNT; i++)
+ bgx_reg_write(bgx, 0, BGX_CMR_RX_STREERING + (i * 8), 0x00);
+}
+
+static void bgx_get_qlm_mode(struct bgx *bgx)
+{
+ struct lmac *lmac;
+ int lmacid;
+
+ /* Read LMACx type to figure out QLM mode
+ * This is configured by low level firmware
+ */
+ for (lmacid = 0; lmacid < MAX_LMAC_PER_BGX; lmacid++) {
+ int lmac_type;
+ int train_en;
+ int index = 0;
+
+ if (otx_is_soc(CN81XX) || (otx_is_soc(CN83XX) &&
+ bgx->bgx_id == 2))
+ index = (lmacid < 2) ? 0 : 2;
+
+ lmac = &bgx->lmac[lmacid];
+
+ /* check if QLM is programmed, if not, skip */
+ if (lmac->qlm == -1)
+ continue;
+
+ lmac_type = bgx_reg_read(bgx, index, BGX_CMRX_CFG);
+ lmac->lmac_type = (lmac_type >> 8) & 0x07;
+ debug("%s:%d:%d: lmac_type = %d, altpkg = %d\n", __func__,
+ bgx->bgx_id, lmacid, lmac->lmac_type, otx_is_altpkg());
+
+ train_en = (readq(GSERX_SCRATCH(lmac->qlm))) & 0xf;
+ lmac->is_1gx = bgx_reg_read(bgx, index, BGX_GMP_PCS_MISCX_CTL)
+ & (PCS_MISC_CTL_MODE) ? true : false;
+
+ switch (lmac->lmac_type) {
+ case BGX_MODE_SGMII:
+ if (bgx->is_rgx) {
+ if (lmacid == 0) {
+ lmac->qlm_mode = QLM_MODE_RGMII;
+ debug("BGX%d LMAC%d mode: RGMII\n",
+ bgx->bgx_id, lmacid);
+ }
+ continue;
+ } else {
+ if (bgx->bgx_id == 0 && otx_is_altpkg()) {
+ if (lmacid % 2)
+ continue;
+ }
+ lmac->qlm_mode = QLM_MODE_SGMII;
+ debug("BGX%d QLM%d LMAC%d mode: %s\n",
+ bgx->bgx_id, lmac->qlm, lmacid,
+ lmac->is_1gx ? "1000Base-X" : "SGMII");
+ }
+ break;
+ case BGX_MODE_XAUI:
+ if (bgx->bgx_id == 0 && otx_is_altpkg())
+ continue;
+ lmac->qlm_mode = QLM_MODE_XAUI;
+ if (lmacid != 0)
+ continue;
+ debug("BGX%d QLM%d LMAC%d mode: XAUI\n",
+ bgx->bgx_id, lmac->qlm, lmacid);
+ break;
+ case BGX_MODE_RXAUI:
+ if (bgx->bgx_id == 0 && otx_is_altpkg())
+ continue;
+ lmac->qlm_mode = QLM_MODE_RXAUI;
+ if (index == lmacid) {
+ debug("BGX%d QLM%d LMAC%d mode: RXAUI\n",
+ bgx->bgx_id, lmac->qlm, (index ? 1 : 0));
+ }
+ break;
+ case BGX_MODE_XFI:
+ if (bgx->bgx_id == 0 && otx_is_altpkg()) {
+ if (lmacid % 2)
+ continue;
+ }
+ if ((lmacid < 2 && (train_en & (1 << lmacid))) ||
+ (train_en & (1 << (lmacid - 2)))) {
+ lmac->qlm_mode = QLM_MODE_10G_KR;
+ debug("BGX%d QLM%d LMAC%d mode: 10G_KR\n",
+ bgx->bgx_id, lmac->qlm, lmacid);
+ } else {
+ lmac->qlm_mode = QLM_MODE_XFI;
+ debug("BGX%d QLM%d LMAC%d mode: XFI\n",
+ bgx->bgx_id, lmac->qlm, lmacid);
+ }
+ break;
+ case BGX_MODE_XLAUI:
+ if (bgx->bgx_id == 0 && otx_is_altpkg())
+ continue;
+ if (train_en) {
+ lmac->qlm_mode = QLM_MODE_40G_KR4;
+ if (lmacid != 0)
+ break;
+ debug("BGX%d QLM%d LMAC%d mode: 40G_KR4\n",
+ bgx->bgx_id, lmac->qlm, lmacid);
+ } else {
+ lmac->qlm_mode = QLM_MODE_XLAUI;
+ if (lmacid != 0)
+ break;
+ debug("BGX%d QLM%d LMAC%d mode: XLAUI\n",
+ bgx->bgx_id, lmac->qlm, lmacid);
+ }
+ break;
+ case BGX_MODE_QSGMII:
+ /* If QLM is configured as QSGMII, use lmac0 */
+ if (otx_is_soc(CN83XX) && lmacid == 2 &&
+ bgx->bgx_id != 2) {
+ //lmac->qlm_mode = QLM_MODE_DISABLED;
+ continue;
+ }
+
+ if (lmacid == 0 || lmacid == 2) {
+ lmac->qlm_mode = QLM_MODE_QSGMII;
+ debug("BGX%d QLM%d LMAC%d mode: QSGMII\n",
+ bgx->bgx_id, lmac->qlm, lmacid);
+ }
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+void bgx_set_board_info(int bgx_id, int *mdio_bus,
+ int *phy_addr, bool *autoneg_dis, bool *lmac_reg,
+ bool *lmac_enable)
+{
+ unsigned int i;
+
+ for (i = 0; i < MAX_LMAC_PER_BGX; i++) {
+ bgx_board_info[bgx_id].phy_info[i].phy_addr = phy_addr[i];
+ bgx_board_info[bgx_id].phy_info[i].mdio_bus = mdio_bus[i];
+ bgx_board_info[bgx_id].phy_info[i].autoneg_dis = autoneg_dis[i];
+ bgx_board_info[bgx_id].lmac_reg[i] = lmac_reg[i];
+ bgx_board_info[bgx_id].lmac_enable[i] = lmac_enable[i];
+ debug("%s bgx_id %d lmac %d\n", __func__, bgx_id, i);
+ debug("phy addr %x mdio bus %d autoneg_dis %d lmac_reg %d\n",
+ bgx_board_info[bgx_id].phy_info[i].phy_addr,
+ bgx_board_info[bgx_id].phy_info[i].mdio_bus,
+ bgx_board_info[bgx_id].phy_info[i].autoneg_dis,
+ bgx_board_info[bgx_id].lmac_reg[i]);
+ debug("lmac_enable = %x\n",
+ bgx_board_info[bgx_id].lmac_enable[i]);
+ }
+}
+
+int octeontx_bgx_remove(struct udevice *dev)
+{
+ int lmacid;
+ u64 cfg;
+ int count = MAX_LMAC_PER_BGX;
+ struct bgx *bgx = dev_get_priv(dev);
+
+ if (!bgx->reg_base)
+ return 0;
+
+ if (bgx->is_rgx)
+ count = 1;
+
+ for (lmacid = 0; lmacid < count; lmacid++) {
+ struct lmac *lmac;
+
+ lmac = &bgx->lmac[lmacid];
+ cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
+ cfg &= ~(CMR_PKT_RX_EN | CMR_PKT_TX_EN);
+ bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
+
+ /* Disable PCS for 1G interface */
+ if (lmac->lmac_type == BGX_MODE_SGMII ||
+ lmac->lmac_type == BGX_MODE_QSGMII) {
+ cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_PCS_MRX_CTL);
+ cfg |= PCS_MRX_CTL_PWR_DN;
+ bgx_reg_write(bgx, lmacid, BGX_GMP_PCS_MRX_CTL, cfg);
+ }
+
+ debug("%s disabling bgx%d lmacid%d\n", __func__, bgx->bgx_id,
+ lmacid);
+ bgx_lmac_disable(bgx, lmacid);
+ }
+ return 0;
+}
+
+int octeontx_bgx_probe(struct udevice *dev)
+{
+ struct bgx *bgx = dev_get_priv(dev);
+ u8 lmac = 0;
+ int qlm[4] = {-1, -1, -1, -1};
+ int bgx_idx, node;
+ int inc = 1;
+
+ bgx->reg_base = dm_pci_map_bar(dev, PCI_BASE_ADDRESS_0,
+ PCI_REGION_MEM);
+ if (!bgx->reg_base) {
+ debug("No PCI region found\n");
+ return 0;
+ }
+
+#ifdef OCTEONTX_XCV
+ /* Use FAKE BGX2 for RGX interface */
+ if ((((uintptr_t)bgx->reg_base >> 24) & 0xf) == 0x8) {
+ bgx->bgx_id = 2;
+ bgx->is_rgx = true;
+ for (lmac = 0; lmac < MAX_LMAC_PER_BGX; lmac++) {
+ if (lmac == 0) {
+ bgx->lmac[lmac].lmacid = 0;
+ bgx->lmac[lmac].qlm = 0;
+ } else {
+ bgx->lmac[lmac].qlm = -1;
+ }
+ }
+ xcv_init_hw();
+ goto skip_qlm_config;
+ }
+#endif
+
+ node = node_id(bgx->reg_base);
+ bgx_idx = ((uintptr_t)bgx->reg_base >> 24) & 3;
+ bgx->bgx_id = (node * MAX_BGX_PER_NODE) + bgx_idx;
+ if (otx_is_soc(CN81XX))
+ inc = 2;
+ else if (otx_is_soc(CN83XX) && (bgx_idx == 2))
+ inc = 2;
+
+ for (lmac = 0; lmac < MAX_LMAC_PER_BGX; lmac += inc) {
+ /* BGX3 (DLM4), has only 2 lanes */
+ if (otx_is_soc(CN83XX) && bgx_idx == 3 && lmac >= 2)
+ continue;
+ qlm[lmac + 0] = get_qlm_for_bgx(node, bgx_idx, lmac);
+ /* Each DLM has 2 lanes, configure both lanes with
+ * same qlm configuration
+ */
+ if (inc == 2)
+ qlm[lmac + 1] = qlm[lmac];
+ debug("qlm[%d] = %d\n", lmac, qlm[lmac]);
+ }
+
+ /* A BGX can take 1 or 2 DLMs, if both the DLMs are not configured
+ * as BGX, then return, nothing to initialize
+ */
+ if (otx_is_soc(CN81XX))
+ if ((qlm[0] == -1) && (qlm[2] == -1))
+ return -ENODEV;
+
+ /* MAP configuration registers */
+ for (lmac = 0; lmac < MAX_LMAC_PER_BGX; lmac++) {
+ bgx->lmac[lmac].qlm = qlm[lmac];
+ bgx->lmac[lmac].lmacid = lmac;
+ }
+
+#ifdef OCTEONTX_XCV
+skip_qlm_config:
+#endif
+ bgx_vnic[bgx->bgx_id] = bgx;
+ bgx_get_qlm_mode(bgx);
+ debug("bgx_vnic[%u]: %p\n", bgx->bgx_id, bgx);
+
+ bgx_init_hw(bgx);
+
+ /* Init LMACs */
+ for (lmac = 0; lmac < bgx->lmac_count; lmac++) {
+ struct lmac *tlmac = &bgx->lmac[lmac];
+
+ tlmac->dev = dev;
+ tlmac->init_pend = 1;
+ tlmac->bgx = bgx;
+ }
+
+ return 0;
+}
+
+U_BOOT_DRIVER(octeontx_bgx) = {
+ .name = "octeontx_bgx",
+ .id = UCLASS_MISC,
+ .probe = octeontx_bgx_probe,
+ .remove = octeontx_bgx_remove,
+ .priv_auto_alloc_size = sizeof(struct bgx),
+ .flags = DM_FLAG_OS_PREPARE,
+};
+
+static struct pci_device_id octeontx_bgx_supported[] = {
+ { PCI_VDEVICE(CAVIUM, PCI_DEVICE_ID_CAVIUM_BGX) },
+ { PCI_VDEVICE(CAVIUM, PCI_DEVICE_ID_CAVIUM_RGX) },
+ {}
+};
+
+U_BOOT_PCI_DEVICE(octeontx_bgx, octeontx_bgx_supported);
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ */
+
+#ifndef BGX_H
+#define BGX_H
+
+#include <asm/arch/board.h>
+
+/* PCI device IDs */
+#define PCI_DEVICE_ID_OCTEONTX_BGX 0xA026
+#define PCI_DEVICE_ID_OCTEONTX_RGX 0xA054
+
+#define MAX_LMAC_PER_BGX 4
+#define MAX_BGX_CHANS_PER_LMAC 16
+#define MAX_DMAC_PER_LMAC 8
+#define MAX_FRAME_SIZE 9216
+
+#define MAX_DMAC_PER_LMAC_TNS_BYPASS_MODE 2
+
+#define MAX_LMAC (MAX_BGX_PER_NODE * MAX_LMAC_PER_BGX)
+
+#define NODE_ID_MASK 0x300000000000
+#define NODE_ID(x) (((x) & NODE_ID_MASK) >> 44)
+
+/* Registers */
+#define GSERX_CFG(x) (0x87E090000080ull + (x) * 0x1000000ull)
+#define GSERX_SCRATCH(x) (0x87E090000020ull + (x) * 0x1000000ull)
+#define GSERX_PHY_CTL(x) (0x87E090000000ull + (x) * 0x1000000ull)
+#define GSERX_CFG_BGX BIT(2)
+#define GSER_RX_EIE_DETSTS(x) (0x87E090000150ull + (x) * 0x1000000ull)
+#define GSER_CDRLOCK (8)
+#define GSER_BR_RXX_CTL(x, y) (0x87E090000400ull + (x) * 0x1000000ull + \
+ (y) * 0x80)
+#define GSER_BR_RXX_CTL_RXT_SWM BIT(2)
+#define GSER_BR_RXX_EER(x, y) (0x87E090000418ull + (x) * 0x1000000ull + \
+ (y) * 0x80)
+#define GSER_BR_RXX_EER_RXT_ESV BIT(14)
+#define GSER_BR_RXX_EER_RXT_EER BIT(15)
+#define EER_RXT_ESV (14)
+
+#define BGX_CMRX_CFG 0x00
+#define CMR_PKT_TX_EN BIT_ULL(13)
+#define CMR_PKT_RX_EN BIT_ULL(14)
+#define CMR_EN BIT_ULL(15)
+#define BGX_CMR_GLOBAL_CFG 0x08
+#define CMR_GLOBAL_CFG_FCS_STRIP BIT_ULL(6)
+#define BGX_CMRX_RX_ID_MAP 0x60
+#define BGX_CMRX_RX_STAT0 0x70
+#define BGX_CMRX_RX_STAT1 0x78
+#define BGX_CMRX_RX_STAT2 0x80
+#define BGX_CMRX_RX_STAT3 0x88
+#define BGX_CMRX_RX_STAT4 0x90
+#define BGX_CMRX_RX_STAT5 0x98
+#define BGX_CMRX_RX_STAT6 0xA0
+#define BGX_CMRX_RX_STAT7 0xA8
+#define BGX_CMRX_RX_STAT8 0xB0
+#define BGX_CMRX_RX_STAT9 0xB8
+#define BGX_CMRX_RX_STAT10 0xC0
+#define BGX_CMRX_RX_BP_DROP 0xC8
+#define BGX_CMRX_RX_DMAC_CTL 0x0E8
+#define BGX_CMR_RX_DMACX_CAM 0x200
+#define RX_DMACX_CAM_EN BIT_ULL(48)
+#define RX_DMACX_CAM_LMACID(x) ((x) << 49)
+#define RX_DMAC_COUNT 32
+#define BGX_CMR_RX_STREERING 0x300
+#define RX_TRAFFIC_STEER_RULE_COUNT 8
+#define BGX_CMR_CHAN_MSK_AND 0x450
+#define BGX_CMR_BIST_STATUS 0x460
+#define BGX_CMR_RX_LMACS 0x468
+#define BGX_CMRX_TX_STAT0 0x600
+#define BGX_CMRX_TX_STAT1 0x608
+#define BGX_CMRX_TX_STAT2 0x610
+#define BGX_CMRX_TX_STAT3 0x618
+#define BGX_CMRX_TX_STAT4 0x620
+#define BGX_CMRX_TX_STAT5 0x628
+#define BGX_CMRX_TX_STAT6 0x630
+#define BGX_CMRX_TX_STAT7 0x638
+#define BGX_CMRX_TX_STAT8 0x640
+#define BGX_CMRX_TX_STAT9 0x648
+#define BGX_CMRX_TX_STAT10 0x650
+#define BGX_CMRX_TX_STAT11 0x658
+#define BGX_CMRX_TX_STAT12 0x660
+#define BGX_CMRX_TX_STAT13 0x668
+#define BGX_CMRX_TX_STAT14 0x670
+#define BGX_CMRX_TX_STAT15 0x678
+#define BGX_CMRX_TX_STAT16 0x680
+#define BGX_CMRX_TX_STAT17 0x688
+#define BGX_CMR_TX_LMACS 0x1000
+
+#define BGX_SPUX_CONTROL1 0x10000
+#define SPU_CTL_LOW_POWER BIT_ULL(11)
+#define SPU_CTL_LOOPBACK BIT_ULL(14)
+#define SPU_CTL_RESET BIT_ULL(15)
+#define BGX_SPUX_STATUS1 0x10008
+#define SPU_STATUS1_RCV_LNK BIT_ULL(2)
+#define BGX_SPUX_STATUS2 0x10020
+#define SPU_STATUS2_RCVFLT BIT_ULL(10)
+#define BGX_SPUX_BX_STATUS 0x10028
+#define SPU_BX_STATUS_RX_ALIGN BIT_ULL(12)
+#define BGX_SPUX_BR_STATUS1 0x10030
+#define SPU_BR_STATUS_BLK_LOCK BIT_ULL(0)
+#define SPU_BR_STATUS_RCV_LNK BIT_ULL(12)
+#define BGX_SPUX_BR_PMD_CRTL 0x10068
+#define SPU_PMD_CRTL_TRAIN_EN BIT_ULL(1)
+#define BGX_SPUX_BR_PMD_LP_CUP 0x10078
+#define BGX_SPUX_BR_PMD_LD_CUP 0x10088
+#define BGX_SPUX_BR_PMD_LD_REP 0x10090
+#define BGX_SPUX_FEC_CONTROL 0x100A0
+#define SPU_FEC_CTL_FEC_EN BIT_ULL(0)
+#define SPU_FEC_CTL_ERR_EN BIT_ULL(1)
+#define BGX_SPUX_AN_CONTROL 0x100C8
+#define SPU_AN_CTL_AN_EN BIT_ULL(12)
+#define SPU_AN_CTL_XNP_EN BIT_ULL(13)
+#define SPU_AN_CTL_AN_RESTART BIT_ULL(15)
+#define BGX_SPUX_AN_STATUS 0x100D0
+#define SPU_AN_STS_AN_COMPLETE BIT_ULL(5)
+#define BGX_SPUX_AN_ADV 0x100D8
+#define BGX_SPUX_MISC_CONTROL 0x10218
+#define SPU_MISC_CTL_INTLV_RDISP BIT_ULL(10)
+#define SPU_MISC_CTL_RX_DIS BIT_ULL(12)
+#define BGX_SPUX_INT 0x10220 /* +(0..3) << 20 */
+#define BGX_SPUX_INT_W1S 0x10228
+#define BGX_SPUX_INT_ENA_W1C 0x10230
+#define BGX_SPUX_INT_ENA_W1S 0x10238
+#define BGX_SPU_DBG_CONTROL 0x10300
+#define SPU_DBG_CTL_AN_ARB_LINK_CHK_EN BIT_ULL(18)
+#define SPU_DBG_CTL_AN_NONCE_MCT_DIS BIT_ULL(29)
+
+#define BGX_SMUX_RX_INT 0x20000
+#define BGX_SMUX_RX_JABBER 0x20030
+#define BGX_SMUX_RX_CTL 0x20048
+#define SMU_RX_CTL_STATUS (3ull << 0)
+#define BGX_SMUX_TX_APPEND 0x20100
+#define SMU_TX_APPEND_FCS_D BIT_ULL(2)
+#define BGX_SMUX_TX_MIN_PKT 0x20118
+#define BGX_SMUX_TX_INT 0x20140
+#define BGX_SMUX_TX_CTL 0x20178
+#define SMU_TX_CTL_DIC_EN BIT_ULL(0)
+#define SMU_TX_CTL_UNI_EN BIT_ULL(1)
+#define SMU_TX_CTL_LNK_STATUS (3ull << 4)
+#define BGX_SMUX_TX_THRESH 0x20180
+#define BGX_SMUX_CTL 0x20200
+#define SMU_CTL_RX_IDLE BIT_ULL(0)
+#define SMU_CTL_TX_IDLE BIT_ULL(1)
+
+#define BGX_GMP_PCS_MRX_CTL 0x30000
+#define PCS_MRX_CTL_RST_AN BIT_ULL(9)
+#define PCS_MRX_CTL_PWR_DN BIT_ULL(11)
+#define PCS_MRX_CTL_AN_EN BIT_ULL(12)
+#define PCS_MRX_CTL_LOOPBACK1 BIT_ULL(14)
+#define PCS_MRX_CTL_RESET BIT_ULL(15)
+#define BGX_GMP_PCS_MRX_STATUS 0x30008
+#define PCS_MRX_STATUS_AN_CPT BIT_ULL(5)
+#define BGX_GMP_PCS_ANX_AN_RESULTS 0x30020
+#define BGX_GMP_PCS_SGM_AN_ADV 0x30068
+#define BGX_GMP_PCS_MISCX_CTL 0x30078
+#define PCS_MISCX_CTL_DISP_EN BIT_ULL(13)
+#define PCS_MISC_CTL_GMX_ENO BIT_ULL(11)
+#define PCS_MISC_CTL_SAMP_PT_MASK 0x7Full
+#define PCS_MISC_CTL_MODE BIT_ULL(8)
+#define BGX_GMP_GMI_PRTX_CFG 0x38020
+#define GMI_PORT_CFG_SPEED BIT_ULL(1)
+#define GMI_PORT_CFG_DUPLEX BIT_ULL(2)
+#define GMI_PORT_CFG_SLOT_TIME BIT_ULL(3)
+#define GMI_PORT_CFG_SPEED_MSB BIT_ULL(8)
+#define BGX_GMP_GMI_RXX_JABBER 0x38038
+#define BGX_GMP_GMI_TXX_THRESH 0x38210
+#define BGX_GMP_GMI_TXX_APPEND 0x38218
+#define BGX_GMP_GMI_TXX_SLOT 0x38220
+#define BGX_GMP_GMI_TXX_BURST 0x38228
+#define BGX_GMP_GMI_TXX_MIN_PKT 0x38240
+#define BGX_GMP_GMI_TXX_SGMII_CTL 0x38300
+
+#define BGX_MSIX_VEC_0_29_ADDR 0x400000 /* +(0..29) << 4 */
+#define BGX_MSIX_VEC_0_29_CTL 0x400008
+#define BGX_MSIX_PBA_0 0x4F0000
+
+/* MSI-X interrupts */
+#define BGX_MSIX_VECTORS 30
+#define BGX_LMAC_VEC_OFFSET 7
+#define BGX_MSIX_VEC_SHIFT 4
+
+#define CMRX_INT 0
+#define SPUX_INT 1
+#define SMUX_RX_INT 2
+#define SMUX_TX_INT 3
+#define GMPX_PCS_INT 4
+#define GMPX_GMI_RX_INT 5
+#define GMPX_GMI_TX_INT 6
+#define CMR_MEM_INT 28
+#define SPU_MEM_INT 29
+
+#define LMAC_INTR_LINK_UP BIT(0)
+#define LMAC_INTR_LINK_DOWN BIT(1)
+
+/* RX_DMAC_CTL configuration*/
+enum MCAST_MODE {
+ MCAST_MODE_REJECT,
+ MCAST_MODE_ACCEPT,
+ MCAST_MODE_CAM_FILTER,
+ RSVD
+};
+
+#define BCAST_ACCEPT 1
+#define CAM_ACCEPT 1
+
+int octeontx_bgx_initialize(unsigned int bgx_idx, unsigned int node);
+void bgx_add_dmac_addr(u64 dmac, int node, int bgx_idx, int lmac);
+void bgx_get_count(int node, int *bgx_count);
+int bgx_get_lmac_count(int node, int bgx);
+void bgx_print_stats(int bgx_idx, int lmac);
+void xcv_init_hw(void);
+void xcv_setup_link(bool link_up, int link_speed);
+
+#undef LINK_INTR_ENABLE
+
+enum qlm_mode {
+ QLM_MODE_SGMII, /* SGMII, each lane independent */
+ QLM_MODE_XAUI, /* 1 XAUI or DXAUI, 4 lanes */
+ QLM_MODE_RXAUI, /* 2 RXAUI, 2 lanes each */
+ QLM_MODE_XFI, /* 4 XFI, 1 lane each */
+ QLM_MODE_XLAUI, /* 1 XLAUI, 4 lanes each */
+ QLM_MODE_10G_KR, /* 4 10GBASE-KR, 1 lane each */
+ QLM_MODE_40G_KR4, /* 1 40GBASE-KR4, 4 lanes each */
+ QLM_MODE_QSGMII, /* 4 QSGMII, each lane independent */
+ QLM_MODE_RGMII, /* 1 RGX */
+};
+
+struct phy_info {
+ int mdio_bus;
+ int phy_addr;
+ bool autoneg_dis;
+};
+
+struct bgx_board_info {
+ struct phy_info phy_info[MAX_LMAC_PER_BGX];
+ bool lmac_reg[MAX_LMAC_PER_BGX];
+ bool lmac_enable[MAX_LMAC_PER_BGX];
+};
+
+enum LMAC_TYPE {
+ BGX_MODE_SGMII = 0, /* 1 lane, 1.250 Gbaud */
+ BGX_MODE_XAUI = 1, /* 4 lanes, 3.125 Gbaud */
+ BGX_MODE_DXAUI = 1, /* 4 lanes, 6.250 Gbaud */
+ BGX_MODE_RXAUI = 2, /* 2 lanes, 6.250 Gbaud */
+ BGX_MODE_XFI = 3, /* 1 lane, 10.3125 Gbaud */
+ BGX_MODE_XLAUI = 4, /* 4 lanes, 10.3125 Gbaud */
+ BGX_MODE_10G_KR = 3,/* 1 lane, 10.3125 Gbaud */
+ BGX_MODE_40G_KR = 4,/* 4 lanes, 10.3125 Gbaud */
+ BGX_MODE_RGMII = 5,
+ BGX_MODE_QSGMII = 6,
+ BGX_MODE_INVALID = 7,
+};
+
+int rxaui_phy_xs_init(struct mii_dev *bus, int phy_addr);
+
+#endif /* BGX_H */
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ */
+
+#ifndef NIC_H
+#define NIC_H
+
+#include <linux/netdevice.h>
+#include "bgx.h"
+
+#define PCI_DEVICE_ID_CAVIUM_NICVF_1 0x0011
+
+/* Subsystem device IDs */
+#define PCI_SUBSYS_DEVID_88XX_NIC_PF 0xA11E
+#define PCI_SUBSYS_DEVID_81XX_NIC_PF 0xA21E
+#define PCI_SUBSYS_DEVID_83XX_NIC_PF 0xA31E
+
+#define PCI_SUBSYS_DEVID_88XX_PASS1_NIC_VF 0xA11E
+#define PCI_SUBSYS_DEVID_88XX_NIC_VF 0xA134
+#define PCI_SUBSYS_DEVID_81XX_NIC_VF 0xA234
+#define PCI_SUBSYS_DEVID_83XX_NIC_VF 0xA334
+
+#define NIC_INTF_COUNT 2 /* Interfaces btw VNIC and TNS/BGX */
+#define NIC_CHANS_PER_INF 128
+#define NIC_MAX_CHANS (NIC_INTF_COUNT * NIC_CHANS_PER_INF)
+
+/* PCI BAR nos */
+#define PCI_CFG_REG_BAR_NUM 0
+#define PCI_MSIX_REG_BAR_NUM 4
+
+/* NIC SRIOV VF count */
+#define MAX_NUM_VFS_SUPPORTED 128
+#define DEFAULT_NUM_VF_ENABLED 8
+
+#define NIC_TNS_BYPASS_MODE 0
+#define NIC_TNS_MODE 1
+
+/* NIC priv flags */
+#define NIC_SRIOV_ENABLED BIT(0)
+#define NIC_TNS_ENABLED BIT(1)
+
+/* VNIC HW optimiation features */
+#define VNIC_RX_CSUM_OFFLOAD_SUPPORT
+#undef VNIC_TX_CSUM_OFFLOAD_SUPPORT
+#undef VNIC_SG_SUPPORT
+#undef VNIC_TSO_SUPPORT
+#undef VNIC_LRO_SUPPORT
+#undef VNIC_RSS_SUPPORT
+
+/* TSO not supported in Thunder pass1 */
+#ifdef VNIC_TSO_SUPPORT
+#define VNIC_SW_TSO_SUPPORT
+#undef VNIC_HW_TSO_SUPPORT
+#endif
+
+/* ETHTOOL enable or disable, undef this to disable */
+#define NICVF_ETHTOOL_ENABLE
+
+/* Min/Max packet size */
+#define NIC_HW_MIN_FRS 64
+#define NIC_HW_MAX_FRS 9200 /* 9216 max packet including FCS */
+
+/* Max pkinds */
+#define NIC_MAX_PKIND 16
+
+/* Max when CPI_ALG is IP diffserv */
+#define NIC_MAX_CPI_PER_LMAC 64
+
+/* NIC VF Interrupts */
+#define NICVF_INTR_CQ 0
+#define NICVF_INTR_SQ 1
+#define NICVF_INTR_RBDR 2
+#define NICVF_INTR_PKT_DROP 3
+#define NICVF_INTR_TCP_TIMER 4
+#define NICVF_INTR_MBOX 5
+#define NICVF_INTR_QS_ERR 6
+
+#define NICVF_INTR_CQ_SHIFT 0
+#define NICVF_INTR_SQ_SHIFT 8
+#define NICVF_INTR_RBDR_SHIFT 16
+#define NICVF_INTR_PKT_DROP_SHIFT 20
+#define NICVF_INTR_TCP_TIMER_SHIFT 21
+#define NICVF_INTR_MBOX_SHIFT 22
+#define NICVF_INTR_QS_ERR_SHIFT 23
+
+#define NICVF_INTR_CQ_MASK (0xFF << NICVF_INTR_CQ_SHIFT)
+#define NICVF_INTR_SQ_MASK (0xFF << NICVF_INTR_SQ_SHIFT)
+#define NICVF_INTR_RBDR_MASK (0x03 << NICVF_INTR_RBDR_SHIFT)
+#define NICVF_INTR_PKT_DROP_MASK BIT(NICVF_INTR_PKT_DROP_SHIFT)
+#define NICVF_INTR_TCP_TIMER_MASK BIT(NICVF_INTR_TCP_TIMER_SHIFT)
+#define NICVF_INTR_MBOX_MASK BIT(NICVF_INTR_MBOX_SHIFT)
+#define NICVF_INTR_QS_ERR_MASK BIT(NICVF_INTR_QS_ERR_SHIFT)
+
+/* MSI-X interrupts */
+#define NIC_PF_MSIX_VECTORS 10
+#define NIC_VF_MSIX_VECTORS 20
+
+#define NIC_PF_INTR_ID_ECC0_SBE 0
+#define NIC_PF_INTR_ID_ECC0_DBE 1
+#define NIC_PF_INTR_ID_ECC1_SBE 2
+#define NIC_PF_INTR_ID_ECC1_DBE 3
+#define NIC_PF_INTR_ID_ECC2_SBE 4
+#define NIC_PF_INTR_ID_ECC2_DBE 5
+#define NIC_PF_INTR_ID_ECC3_SBE 6
+#define NIC_PF_INTR_ID_ECC3_DBE 7
+#define NIC_PF_INTR_ID_MBOX0 8
+#define NIC_PF_INTR_ID_MBOX1 9
+
+/* Global timer for CQ timer thresh interrupts
+ * Calculated for SCLK of 700Mhz
+ * value written should be a 1/16thof what is expected
+ *
+ * 1 tick per ms
+ */
+#define NICPF_CLK_PER_INT_TICK 43750
+
+struct nicvf_cq_poll {
+ u8 cq_idx; /* Completion queue index */
+};
+
+#define NIC_MAX_RSS_HASH_BITS 8
+#define NIC_MAX_RSS_IDR_TBL_SIZE BIT(NIC_MAX_RSS_HASH_BITS)
+#define RSS_HASH_KEY_SIZE 5 /* 320 bit key */
+
+#ifdef VNIC_RSS_SUPPORT
+struct nicvf_rss_info {
+ bool enable;
+#define RSS_L2_EXTENDED_HASH_ENA BIT(0)
+#define RSS_IP_HASH_ENA BIT(1)
+#define RSS_TCP_HASH_ENA BIT(2)
+#define RSS_TCP_SYN_DIS BIT(3)
+#define RSS_UDP_HASH_ENA BIT(4)
+#define RSS_L4_EXTENDED_HASH_ENA BIT(5)
+#define RSS_ROCE_ENA BIT(6)
+#define RSS_L3_BI_DIRECTION_ENA BIT(7)
+#define RSS_L4_BI_DIRECTION_ENA BIT(8)
+ u64 cfg;
+ u8 hash_bits;
+ u16 rss_size;
+ u8 ind_tbl[NIC_MAX_RSS_IDR_TBL_SIZE];
+ u64 key[RSS_HASH_KEY_SIZE];
+};
+#endif
+
+enum rx_stats_reg_offset {
+ RX_OCTS = 0x0,
+ RX_UCAST = 0x1,
+ RX_BCAST = 0x2,
+ RX_MCAST = 0x3,
+ RX_RED = 0x4,
+ RX_RED_OCTS = 0x5,
+ RX_ORUN = 0x6,
+ RX_ORUN_OCTS = 0x7,
+ RX_FCS = 0x8,
+ RX_L2ERR = 0x9,
+ RX_DRP_BCAST = 0xa,
+ RX_DRP_MCAST = 0xb,
+ RX_DRP_L3BCAST = 0xc,
+ RX_DRP_L3MCAST = 0xd,
+ RX_STATS_ENUM_LAST,
+};
+
+enum tx_stats_reg_offset {
+ TX_OCTS = 0x0,
+ TX_UCAST = 0x1,
+ TX_BCAST = 0x2,
+ TX_MCAST = 0x3,
+ TX_DROP = 0x4,
+ TX_STATS_ENUM_LAST,
+};
+
+struct nicvf_hw_stats {
+ u64 rx_bytes_ok;
+ u64 rx_ucast_frames_ok;
+ u64 rx_bcast_frames_ok;
+ u64 rx_mcast_frames_ok;
+ u64 rx_fcs_errors;
+ u64 rx_l2_errors;
+ u64 rx_drop_red;
+ u64 rx_drop_red_bytes;
+ u64 rx_drop_overrun;
+ u64 rx_drop_overrun_bytes;
+ u64 rx_drop_bcast;
+ u64 rx_drop_mcast;
+ u64 rx_drop_l3_bcast;
+ u64 rx_drop_l3_mcast;
+ u64 tx_bytes_ok;
+ u64 tx_ucast_frames_ok;
+ u64 tx_bcast_frames_ok;
+ u64 tx_mcast_frames_ok;
+ u64 tx_drops;
+};
+
+struct nicvf_drv_stats {
+ /* Rx */
+ u64 rx_frames_ok;
+ u64 rx_frames_64;
+ u64 rx_frames_127;
+ u64 rx_frames_255;
+ u64 rx_frames_511;
+ u64 rx_frames_1023;
+ u64 rx_frames_1518;
+ u64 rx_frames_jumbo;
+ u64 rx_drops;
+ /* Tx */
+ u64 tx_frames_ok;
+ u64 tx_drops;
+ u64 tx_busy;
+ u64 tx_tso;
+};
+
+struct hw_info {
+ u8 bgx_cnt;
+ u8 chans_per_lmac;
+ u8 chans_per_bgx; /* Rx/Tx chans */
+ u8 chans_per_rgx;
+ u8 chans_per_lbk;
+ u16 cpi_cnt;
+ u16 rssi_cnt;
+ u16 rss_ind_tbl_size;
+ u16 tl4_cnt;
+ u16 tl3_cnt;
+ u8 tl2_cnt;
+ u8 tl1_cnt;
+ bool tl1_per_bgx; /* TL1 per BGX or per LMAC */
+ u8 model_id;
+};
+
+struct nicvf {
+ struct udevice *dev;
+ u8 vf_id;
+ bool sqs_mode:1;
+ bool loopback_supported:1;
+ u8 tns_mode;
+ u8 node;
+ u16 mtu;
+ struct queue_set *qs;
+#define MAX_SQS_PER_VF_SINGLE_NODE 5
+#define MAX_SQS_PER_VF 11
+ u8 num_qs;
+ void *addnl_qs;
+ u16 vf_mtu;
+ void __iomem *reg_base;
+#define MAX_QUEUES_PER_QSET 8
+ struct nicvf_cq_poll *napi[8];
+
+ u8 cpi_alg;
+
+ struct nicvf_hw_stats stats;
+ struct nicvf_drv_stats drv_stats;
+
+ struct nicpf *nicpf;
+
+ /* VF <-> PF mailbox communication */
+ bool pf_acked;
+ bool pf_nacked;
+ bool set_mac_pending;
+
+ bool link_up;
+ u8 duplex;
+ u32 speed;
+ u8 rev_id;
+ u8 rx_queues;
+ u8 tx_queues;
+
+ bool open;
+ bool rb_alloc_fail;
+ void *rcv_buf;
+ bool hw_tso;
+};
+
+static inline int node_id(void *addr)
+{
+ return ((uintptr_t)addr >> 44) & 0x3;
+}
+
+struct nicpf {
+ struct udevice *udev;
+ struct hw_info *hw;
+ u8 node;
+ unsigned int flags;
+ u16 total_vf_cnt; /* Total num of VF supported */
+ u16 num_vf_en; /* No of VF enabled */
+ void __iomem *reg_base; /* Register start address */
+ u16 rss_ind_tbl_size;
+ u8 num_sqs_en; /* Secondary qsets enabled */
+ u64 nicvf[MAX_NUM_VFS_SUPPORTED];
+ u8 vf_sqs[MAX_NUM_VFS_SUPPORTED][MAX_SQS_PER_VF];
+ u8 pqs_vf[MAX_NUM_VFS_SUPPORTED];
+ bool sqs_used[MAX_NUM_VFS_SUPPORTED];
+ struct pkind_cfg pkind;
+ u8 bgx_cnt;
+ u8 rev_id;
+#define NIC_SET_VF_LMAC_MAP(bgx, lmac) ((((bgx) & 0xF) << 4) | ((lmac) & 0xF))
+#define NIC_GET_BGX_FROM_VF_LMAC_MAP(map) (((map) >> 4) & 0xF)
+#define NIC_GET_LMAC_FROM_VF_LMAC_MAP(map) ((map) & 0xF)
+ u8 vf_lmac_map[MAX_LMAC];
+ u16 cpi_base[MAX_NUM_VFS_SUPPORTED];
+ u64 mac[MAX_NUM_VFS_SUPPORTED];
+ bool mbx_lock[MAX_NUM_VFS_SUPPORTED];
+ u8 link[MAX_LMAC];
+ u8 duplex[MAX_LMAC];
+ u32 speed[MAX_LMAC];
+ bool vf_enabled[MAX_NUM_VFS_SUPPORTED];
+ u16 rssi_base[MAX_NUM_VFS_SUPPORTED];
+ u8 lmac_cnt;
+};
+
+/* PF <--> VF Mailbox communication
+ * Eight 64bit registers are shared between PF and VF.
+ * Separate set for each VF.
+ * Writing '1' into last register mbx7 means end of message.
+ */
+
+/* PF <--> VF mailbox communication */
+#define NIC_PF_VF_MAILBOX_SIZE 2
+#define NIC_PF_VF_MBX_TIMEOUT 2000 /* ms */
+
+/* Mailbox message types */
+#define NIC_MBOX_MSG_READY 0x01 /* Is PF ready to rcv msgs */
+#define NIC_MBOX_MSG_ACK 0x02 /* ACK the message received */
+#define NIC_MBOX_MSG_NACK 0x03 /* NACK the message received */
+#define NIC_MBOX_MSG_QS_CFG 0x04 /* Configure Qset */
+#define NIC_MBOX_MSG_RQ_CFG 0x05 /* Configure receive queue */
+#define NIC_MBOX_MSG_SQ_CFG 0x06 /* Configure Send queue */
+#define NIC_MBOX_MSG_RQ_DROP_CFG 0x07 /* Configure receive queue */
+#define NIC_MBOX_MSG_SET_MAC 0x08 /* Add MAC ID to DMAC filter */
+#define NIC_MBOX_MSG_SET_MAX_FRS 0x09 /* Set max frame size */
+#define NIC_MBOX_MSG_CPI_CFG 0x0A /* Config CPI, RSSI */
+#define NIC_MBOX_MSG_RSS_SIZE 0x0B /* Get RSS indir_tbl size */
+#define NIC_MBOX_MSG_RSS_CFG 0x0C /* Config RSS table */
+#define NIC_MBOX_MSG_RSS_CFG_CONT 0x0D /* RSS config continuation */
+#define NIC_MBOX_MSG_RQ_BP_CFG 0x0E /* RQ backpressure config */
+#define NIC_MBOX_MSG_RQ_SW_SYNC 0x0F /* Flush inflight pkts to RQ */
+#define NIC_MBOX_MSG_BGX_STATS 0x10 /* Get stats from BGX */
+#define NIC_MBOX_MSG_BGX_LINK_CHANGE 0x11 /* BGX:LMAC link status */
+#define NIC_MBOX_MSG_ALLOC_SQS 0x12 /* Allocate secondary Qset */
+#define NIC_MBOX_MSG_NICVF_PTR 0x13 /* Send nicvf ptr to PF */
+#define NIC_MBOX_MSG_PNICVF_PTR 0x14 /* Get primary qset nicvf ptr */
+#define NIC_MBOX_MSG_SNICVF_PTR 0x15 /* Send sqet nicvf ptr to PVF */
+#define NIC_MBOX_MSG_LOOPBACK 0x16 /* Set interface in loopback */
+#define NIC_MBOX_MSG_CFG_DONE 0xF0 /* VF configuration done */
+#define NIC_MBOX_MSG_SHUTDOWN 0xF1 /* VF is being shutdown */
+
+struct nic_cfg_msg {
+ u8 msg;
+ u8 vf_id;
+ u8 node_id;
+ bool tns_mode:1;
+ bool sqs_mode:1;
+ bool loopback_supported:1;
+ u8 mac_addr[6];
+};
+
+/* Qset configuration */
+struct qs_cfg_msg {
+ u8 msg;
+ u8 num;
+ u8 sqs_count;
+ u64 cfg;
+};
+
+/* Receive queue configuration */
+struct rq_cfg_msg {
+ u8 msg;
+ u8 qs_num;
+ u8 rq_num;
+ u64 cfg;
+};
+
+/* Send queue configuration */
+struct sq_cfg_msg {
+ u8 msg;
+ u8 qs_num;
+ u8 sq_num;
+ bool sqs_mode;
+ u64 cfg;
+};
+
+/* Set VF's MAC address */
+struct set_mac_msg {
+ u8 msg;
+ u8 vf_id;
+ u8 mac_addr[6];
+};
+
+/* Set Maximum frame size */
+struct set_frs_msg {
+ u8 msg;
+ u8 vf_id;
+ u16 max_frs;
+};
+
+/* Set CPI algorithm type */
+struct cpi_cfg_msg {
+ u8 msg;
+ u8 vf_id;
+ u8 rq_cnt;
+ u8 cpi_alg;
+};
+
+/* Get RSS table size */
+struct rss_sz_msg {
+ u8 msg;
+ u8 vf_id;
+ u16 ind_tbl_size;
+};
+
+/* Set RSS configuration */
+struct rss_cfg_msg {
+ u8 msg;
+ u8 vf_id;
+ u8 hash_bits;
+ u8 tbl_len;
+ u8 tbl_offset;
+#define RSS_IND_TBL_LEN_PER_MBX_MSG 8
+ u8 ind_tbl[RSS_IND_TBL_LEN_PER_MBX_MSG];
+};
+
+struct bgx_stats_msg {
+ u8 msg;
+ u8 vf_id;
+ u8 rx;
+ u8 idx;
+ u64 stats;
+};
+
+/* Physical interface link status */
+struct bgx_link_status {
+ u8 msg;
+ u8 link_up;
+ u8 duplex;
+ u32 speed;
+};
+
+#ifdef VNIC_MULTI_QSET_SUPPORT
+/* Get Extra Qset IDs */
+struct sqs_alloc {
+ u8 msg;
+ u8 vf_id;
+ u8 qs_count;
+};
+
+struct nicvf_ptr {
+ u8 msg;
+ u8 vf_id;
+ bool sqs_mode;
+ u8 sqs_id;
+ u64 nicvf;
+};
+#endif
+
+/* Set interface in loopback mode */
+struct set_loopback {
+ u8 msg;
+ u8 vf_id;
+ bool enable;
+};
+
+/* 128 bit shared memory between PF and each VF */
+union nic_mbx {
+ struct { u8 msg; } msg;
+ struct nic_cfg_msg nic_cfg;
+ struct qs_cfg_msg qs;
+ struct rq_cfg_msg rq;
+ struct sq_cfg_msg sq;
+ struct set_mac_msg mac;
+ struct set_frs_msg frs;
+ struct cpi_cfg_msg cpi_cfg;
+ struct rss_sz_msg rss_size;
+ struct rss_cfg_msg rss_cfg;
+ struct bgx_stats_msg bgx_stats;
+ struct bgx_link_status link_status;
+#ifdef VNIC_MULTI_QSET_SUPPORT
+ struct sqs_alloc sqs_alloc;
+ struct nicvf_ptr nicvf;
+#endif
+ struct set_loopback lbk;
+};
+
+int nicvf_set_real_num_queues(struct udevice *dev,
+ int tx_queues, int rx_queues);
+int nicvf_open(struct udevice *dev);
+void nicvf_stop(struct udevice *dev);
+int nicvf_send_msg_to_pf(struct nicvf *vf, union nic_mbx *mbx);
+void nicvf_update_stats(struct nicvf *nic);
+
+void nic_handle_mbx_intr(struct nicpf *nic, int vf);
+
+int bgx_poll_for_link(int node, int bgx_idx, int lmacid);
+const u8 *bgx_get_lmac_mac(int node, int bgx_idx, int lmacid);
+void bgx_set_lmac_mac(int node, int bgx_idx, int lmacid, const u8 *mac);
+void bgx_lmac_rx_tx_enable(int node, int bgx_idx, int lmacid, bool enable);
+void bgx_lmac_internal_loopback(int node, int bgx_idx,
+ int lmac_idx, bool enable);
+
+static inline bool pass1_silicon(unsigned int revision, int model_id)
+{
+ return ((revision < 8) && (model_id == 0x88));
+}
+
+static inline bool pass2_silicon(unsigned int revision, int model_id)
+{
+ return ((revision >= 8) && (model_id == 0x88));
+}
+
+#endif /* NIC_H */
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ */
+
+#include <config.h>
+#include <net.h>
+#include <netdev.h>
+#include <malloc.h>
+#include <miiphy.h>
+#include <dm.h>
+#include <misc.h>
+#include <pci.h>
+#include <pci_ids.h>
+#include <asm/io.h>
+#include <linux/delay.h>
+
+#include "nic_reg.h"
+#include "nic.h"
+#include "q_struct.h"
+
+unsigned long rounddown_pow_of_two(unsigned long n)
+{
+ n |= n >> 1;
+ n |= n >> 2;
+ n |= n >> 4;
+ n |= n >> 8;
+ n |= n >> 16;
+ n |= n >> 32;
+
+ return(n + 1);
+}
+
+static void nic_config_cpi(struct nicpf *nic, struct cpi_cfg_msg *cfg);
+static void nic_tx_channel_cfg(struct nicpf *nic, u8 vnic,
+ struct sq_cfg_msg *sq);
+static int nic_update_hw_frs(struct nicpf *nic, int new_frs, int vf);
+static int nic_rcv_queue_sw_sync(struct nicpf *nic);
+
+/* Register read/write APIs */
+static void nic_reg_write(struct nicpf *nic, u64 offset, u64 val)
+{
+ writeq(val, nic->reg_base + offset);
+}
+
+static u64 nic_reg_read(struct nicpf *nic, u64 offset)
+{
+ return readq(nic->reg_base + offset);
+}
+
+static u64 nic_get_mbx_addr(int vf)
+{
+ return NIC_PF_VF_0_127_MAILBOX_0_1 + (vf << NIC_VF_NUM_SHIFT);
+}
+
+static void nic_send_msg_to_vf(struct nicpf *nic, int vf, union nic_mbx *mbx)
+{
+ void __iomem *mbx_addr = (void *)(nic->reg_base + nic_get_mbx_addr(vf));
+ u64 *msg = (u64 *)mbx;
+
+ /* In first revision HW, mbox interrupt is triggerred
+ * when PF writes to MBOX(1), in next revisions when
+ * PF writes to MBOX(0)
+ */
+ if (pass1_silicon(nic->rev_id, nic->hw->model_id)) {
+ /* see the comment for nic_reg_write()/nic_reg_read()
+ * functions above
+ */
+ writeq(msg[0], mbx_addr);
+ writeq(msg[1], mbx_addr + 8);
+ } else {
+ writeq(msg[1], mbx_addr + 8);
+ writeq(msg[0], mbx_addr);
+ }
+}
+
+static void nic_mbx_send_ready(struct nicpf *nic, int vf)
+{
+ union nic_mbx mbx = {};
+ int bgx_idx, lmac, timeout = 5, link = -1;
+ const u8 *mac;
+
+ mbx.nic_cfg.msg = NIC_MBOX_MSG_READY;
+ mbx.nic_cfg.vf_id = vf;
+
+ if (nic->flags & NIC_TNS_ENABLED)
+ mbx.nic_cfg.tns_mode = NIC_TNS_MODE;
+ else
+ mbx.nic_cfg.tns_mode = NIC_TNS_BYPASS_MODE;
+
+ if (vf < nic->num_vf_en) {
+ bgx_idx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
+ lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
+
+ mac = bgx_get_lmac_mac(nic->node, bgx_idx, lmac);
+ if (mac)
+ memcpy((u8 *)&mbx.nic_cfg.mac_addr, mac, 6);
+
+ while (timeout-- && (link <= 0)) {
+ link = bgx_poll_for_link(nic->node, bgx_idx, lmac);
+ debug("Link status: %d\n", link);
+ if (link <= 0)
+ mdelay(2000);
+ }
+ }
+#ifdef VNIC_MULTI_QSET_SUPPORT
+ mbx.nic_cfg.sqs_mode = (vf >= nic->num_vf_en) ? true : false;
+#endif
+ mbx.nic_cfg.node_id = nic->node;
+
+ mbx.nic_cfg.loopback_supported = vf < nic->num_vf_en;
+
+ nic_send_msg_to_vf(nic, vf, &mbx);
+}
+
+/* ACKs VF's mailbox message
+ * @vf: VF to which ACK to be sent
+ */
+static void nic_mbx_send_ack(struct nicpf *nic, int vf)
+{
+ union nic_mbx mbx = {};
+
+ mbx.msg.msg = NIC_MBOX_MSG_ACK;
+ nic_send_msg_to_vf(nic, vf, &mbx);
+}
+
+/* NACKs VF's mailbox message that PF is not able to
+ * complete the action
+ * @vf: VF to which ACK to be sent
+ */
+static void nic_mbx_send_nack(struct nicpf *nic, int vf)
+{
+ union nic_mbx mbx = {};
+
+ mbx.msg.msg = NIC_MBOX_MSG_NACK;
+ nic_send_msg_to_vf(nic, vf, &mbx);
+}
+
+static int nic_config_loopback(struct nicpf *nic, struct set_loopback *lbk)
+{
+ int bgx_idx, lmac_idx;
+
+ if (lbk->vf_id > nic->num_vf_en)
+ return -1;
+
+ bgx_idx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lbk->vf_id]);
+ lmac_idx = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lbk->vf_id]);
+
+ bgx_lmac_internal_loopback(nic->node, bgx_idx, lmac_idx, lbk->enable);
+
+ return 0;
+}
+
+/* Interrupt handler to handle mailbox messages from VFs */
+void nic_handle_mbx_intr(struct nicpf *nic, int vf)
+{
+ union nic_mbx mbx = {};
+ u64 *mbx_data;
+ u64 mbx_addr;
+ u64 reg_addr;
+ u64 cfg;
+ int bgx, lmac;
+ int i;
+ int ret = 0;
+
+ nic->mbx_lock[vf] = true;
+
+ mbx_addr = nic_get_mbx_addr(vf);
+ mbx_data = (u64 *)&mbx;
+
+ for (i = 0; i < NIC_PF_VF_MAILBOX_SIZE; i++) {
+ *mbx_data = nic_reg_read(nic, mbx_addr);
+ mbx_data++;
+ mbx_addr += sizeof(u64);
+ }
+
+ debug("%s: Mailbox msg %d from VF%d\n", __func__, mbx.msg.msg, vf);
+ switch (mbx.msg.msg) {
+ case NIC_MBOX_MSG_READY:
+ nic_mbx_send_ready(nic, vf);
+ if (vf < nic->num_vf_en) {
+ nic->link[vf] = 0;
+ nic->duplex[vf] = 0;
+ nic->speed[vf] = 0;
+ }
+ ret = 1;
+ break;
+ case NIC_MBOX_MSG_QS_CFG:
+ reg_addr = NIC_PF_QSET_0_127_CFG |
+ (mbx.qs.num << NIC_QS_ID_SHIFT);
+ cfg = mbx.qs.cfg;
+#ifdef VNIC_MULTI_QSET_SUPPORT
+ /* Check if its a secondary Qset */
+ if (vf >= nic->num_vf_en) {
+ cfg = cfg & (~0x7FULL);
+ /* Assign this Qset to primary Qset's VF */
+ cfg |= nic->pqs_vf[vf];
+ }
+#endif
+ nic_reg_write(nic, reg_addr, cfg);
+ break;
+ case NIC_MBOX_MSG_RQ_CFG:
+ reg_addr = NIC_PF_QSET_0_127_RQ_0_7_CFG |
+ (mbx.rq.qs_num << NIC_QS_ID_SHIFT) |
+ (mbx.rq.rq_num << NIC_Q_NUM_SHIFT);
+ nic_reg_write(nic, reg_addr, mbx.rq.cfg);
+ /* Enable CQE_RX2_S extension in CQE_RX descriptor.
+ * This gets appended by default on 81xx/83xx chips,
+ * for consistency enabling the same on 88xx pass2
+ * where this is introduced.
+ */
+ if (pass2_silicon(nic->rev_id, nic->hw->model_id))
+ nic_reg_write(nic, NIC_PF_RX_CFG, 0x01);
+ break;
+ case NIC_MBOX_MSG_RQ_BP_CFG:
+ reg_addr = NIC_PF_QSET_0_127_RQ_0_7_BP_CFG |
+ (mbx.rq.qs_num << NIC_QS_ID_SHIFT) |
+ (mbx.rq.rq_num << NIC_Q_NUM_SHIFT);
+ nic_reg_write(nic, reg_addr, mbx.rq.cfg);
+ break;
+ case NIC_MBOX_MSG_RQ_SW_SYNC:
+ ret = nic_rcv_queue_sw_sync(nic);
+ break;
+ case NIC_MBOX_MSG_RQ_DROP_CFG:
+ reg_addr = NIC_PF_QSET_0_127_RQ_0_7_DROP_CFG |
+ (mbx.rq.qs_num << NIC_QS_ID_SHIFT) |
+ (mbx.rq.rq_num << NIC_Q_NUM_SHIFT);
+ nic_reg_write(nic, reg_addr, mbx.rq.cfg);
+ break;
+ case NIC_MBOX_MSG_SQ_CFG:
+ reg_addr = NIC_PF_QSET_0_127_SQ_0_7_CFG |
+ (mbx.sq.qs_num << NIC_QS_ID_SHIFT) |
+ (mbx.sq.sq_num << NIC_Q_NUM_SHIFT);
+ nic_reg_write(nic, reg_addr, mbx.sq.cfg);
+ nic_tx_channel_cfg(nic, mbx.qs.num,
+ (struct sq_cfg_msg *)&mbx.sq);
+ break;
+ case NIC_MBOX_MSG_SET_MAC:
+#ifdef VNIC_MULTI_QSET_SUPPORT
+ if (vf >= nic->num_vf_en)
+ break;
+#endif
+ lmac = mbx.mac.vf_id;
+ bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lmac]);
+ lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lmac]);
+ bgx_set_lmac_mac(nic->node, bgx, lmac, mbx.mac.mac_addr);
+ break;
+ case NIC_MBOX_MSG_SET_MAX_FRS:
+ ret = nic_update_hw_frs(nic, mbx.frs.max_frs,
+ mbx.frs.vf_id);
+ break;
+ case NIC_MBOX_MSG_CPI_CFG:
+ nic_config_cpi(nic, &mbx.cpi_cfg);
+ break;
+#ifdef VNIC_RSS_SUPPORT
+ case NIC_MBOX_MSG_RSS_SIZE:
+ nic_send_rss_size(nic, vf);
+ goto unlock;
+ case NIC_MBOX_MSG_RSS_CFG:
+ case NIC_MBOX_MSG_RSS_CFG_CONT:
+ nic_config_rss(nic, &mbx.rss_cfg);
+ break;
+#endif
+ case NIC_MBOX_MSG_CFG_DONE:
+ /* Last message of VF config msg sequence */
+ nic->vf_enabled[vf] = true;
+ if (vf >= nic->lmac_cnt)
+ goto unlock;
+
+ bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
+ lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
+
+ bgx_lmac_rx_tx_enable(nic->node, bgx, lmac, true);
+ goto unlock;
+ case NIC_MBOX_MSG_SHUTDOWN:
+ /* First msg in VF teardown sequence */
+ nic->vf_enabled[vf] = false;
+#ifdef VNIC_MULTI_QSET_SUPPORT
+ if (vf >= nic->num_vf_en)
+ nic->sqs_used[vf - nic->num_vf_en] = false;
+ nic->pqs_vf[vf] = 0;
+#endif
+ if (vf >= nic->lmac_cnt)
+ break;
+
+ bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
+ lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
+
+ bgx_lmac_rx_tx_enable(nic->node, bgx, lmac, false);
+ break;
+#ifdef VNIC_MULTI_QSET_SUPPORT
+ case NIC_MBOX_MSG_ALLOC_SQS:
+ nic_alloc_sqs(nic, &mbx.sqs_alloc);
+ goto unlock;
+ case NIC_MBOX_MSG_NICVF_PTR:
+ nic->nicvf[vf] = mbx.nicvf.nicvf;
+ break;
+ case NIC_MBOX_MSG_PNICVF_PTR:
+ nic_send_pnicvf(nic, vf);
+ goto unlock;
+ case NIC_MBOX_MSG_SNICVF_PTR:
+ nic_send_snicvf(nic, &mbx.nicvf);
+ goto unlock;
+#endif
+ case NIC_MBOX_MSG_LOOPBACK:
+ ret = nic_config_loopback(nic, &mbx.lbk);
+ break;
+ default:
+ printf("Invalid msg from VF%d, msg 0x%x\n", vf, mbx.msg.msg);
+ break;
+ }
+
+ if (!ret)
+ nic_mbx_send_ack(nic, vf);
+ else if (mbx.msg.msg != NIC_MBOX_MSG_READY)
+ nic_mbx_send_nack(nic, vf);
+unlock:
+ nic->mbx_lock[vf] = false;
+}
+
+static int nic_rcv_queue_sw_sync(struct nicpf *nic)
+{
+ int timeout = 20;
+
+ nic_reg_write(nic, NIC_PF_SW_SYNC_RX, 0x01);
+ while (timeout) {
+ if (nic_reg_read(nic, NIC_PF_SW_SYNC_RX_DONE) & 0x1)
+ break;
+ udelay(2000);
+ timeout--;
+ }
+ nic_reg_write(nic, NIC_PF_SW_SYNC_RX, 0x00);
+ if (!timeout) {
+ printf("Recevie queue software sync failed");
+ return 1;
+ }
+ return 0;
+}
+
+static int nic_update_hw_frs(struct nicpf *nic, int new_frs, int vf)
+{
+ u64 *pkind = (u64 *)&nic->pkind;
+
+ if (new_frs > NIC_HW_MAX_FRS || new_frs < NIC_HW_MIN_FRS) {
+ printf("Invalid MTU setting from VF%d rejected,", vf);
+ printf(" should be between %d and %d\n", NIC_HW_MIN_FRS,
+ NIC_HW_MAX_FRS);
+ return 1;
+ }
+ new_frs += ETH_HLEN;
+ if (new_frs <= nic->pkind.maxlen)
+ return 0;
+
+ nic->pkind.maxlen = new_frs;
+
+ nic_reg_write(nic, NIC_PF_PKIND_0_15_CFG, *pkind);
+ return 0;
+}
+
+/* Set minimum transmit packet size */
+static void nic_set_tx_pkt_pad(struct nicpf *nic, int size)
+{
+ int lmac;
+ u64 lmac_cfg;
+ struct hw_info *hw = nic->hw;
+ int max_lmac = nic->hw->bgx_cnt * MAX_LMAC_PER_BGX;
+
+ /* Max value that can be set is 60 */
+ if (size > 52)
+ size = 52;
+
+ /* CN81XX has RGX configured as FAKE BGX, adjust mac_lmac accordingly */
+ if (hw->chans_per_rgx)
+ max_lmac = ((nic->hw->bgx_cnt - 1) * MAX_LMAC_PER_BGX) + 1;
+
+ for (lmac = 0; lmac < max_lmac; lmac++) {
+ lmac_cfg = nic_reg_read(nic, NIC_PF_LMAC_0_7_CFG | (lmac << 3));
+ lmac_cfg &= ~(0xF << 2);
+ lmac_cfg |= ((size / 4) << 2);
+ nic_reg_write(nic, NIC_PF_LMAC_0_7_CFG | (lmac << 3), lmac_cfg);
+ }
+}
+
+/* Function to check number of LMACs present and set VF to LMAC mapping.
+ * Mapping will be used while initializing channels.
+ */
+static void nic_set_lmac_vf_mapping(struct nicpf *nic)
+{
+ int bgx, bgx_count, next_bgx_lmac = 0;
+ int lmac, lmac_cnt = 0;
+ u64 lmac_credit;
+
+ nic->num_vf_en = 0;
+ if (nic->flags & NIC_TNS_ENABLED) {
+ nic->num_vf_en = DEFAULT_NUM_VF_ENABLED;
+ return;
+ }
+
+ bgx_get_count(nic->node, &bgx_count);
+ debug("bgx_count: %d\n", bgx_count);
+
+ for (bgx = 0; bgx < nic->hw->bgx_cnt; bgx++) {
+ if (!(bgx_count & (1 << bgx)))
+ continue;
+ nic->bgx_cnt++;
+ lmac_cnt = bgx_get_lmac_count(nic->node, bgx);
+ debug("lmac_cnt: %d for BGX%d\n", lmac_cnt, bgx);
+ for (lmac = 0; lmac < lmac_cnt; lmac++)
+ nic->vf_lmac_map[next_bgx_lmac++] =
+ NIC_SET_VF_LMAC_MAP(bgx, lmac);
+ nic->num_vf_en += lmac_cnt;
+
+ /* Program LMAC credits */
+ lmac_credit = (1ull << 1); /* chennel credit enable */
+ lmac_credit |= (0x1ff << 2);
+ lmac_credit |= (((((48 * 1024) / lmac_cnt) -
+ NIC_HW_MAX_FRS) / 16) << 12);
+ lmac = bgx * MAX_LMAC_PER_BGX;
+ for (; lmac < lmac_cnt + (bgx * MAX_LMAC_PER_BGX); lmac++)
+ nic_reg_write(nic, NIC_PF_LMAC_0_7_CREDIT + (lmac * 8),
+ lmac_credit);
+ }
+}
+
+static void nic_get_hw_info(struct nicpf *nic)
+{
+ u16 sdevid;
+ struct hw_info *hw = nic->hw;
+
+ dm_pci_read_config16(nic->udev, PCI_SUBSYSTEM_ID, &sdevid);
+
+ switch (sdevid) {
+ case PCI_SUBSYS_DEVID_88XX_NIC_PF:
+ hw->bgx_cnt = MAX_BGX_PER_NODE;
+ hw->chans_per_lmac = 16;
+ hw->chans_per_bgx = 128;
+ hw->cpi_cnt = 2048;
+ hw->rssi_cnt = 4096;
+ hw->rss_ind_tbl_size = NIC_MAX_RSS_IDR_TBL_SIZE;
+ hw->tl3_cnt = 256;
+ hw->tl2_cnt = 64;
+ hw->tl1_cnt = 2;
+ hw->tl1_per_bgx = true;
+ hw->model_id = 0x88;
+ break;
+ case PCI_SUBSYS_DEVID_81XX_NIC_PF:
+ hw->bgx_cnt = MAX_BGX_PER_NODE;
+ hw->chans_per_lmac = 8;
+ hw->chans_per_bgx = 32;
+ hw->chans_per_rgx = 8;
+ hw->chans_per_lbk = 24;
+ hw->cpi_cnt = 512;
+ hw->rssi_cnt = 256;
+ hw->rss_ind_tbl_size = 32; /* Max RSSI / Max interfaces */
+ hw->tl3_cnt = 64;
+ hw->tl2_cnt = 16;
+ hw->tl1_cnt = 10;
+ hw->tl1_per_bgx = false;
+ hw->model_id = 0x81;
+ break;
+ case PCI_SUBSYS_DEVID_83XX_NIC_PF:
+ hw->bgx_cnt = MAX_BGX_PER_NODE;
+ hw->chans_per_lmac = 8;
+ hw->chans_per_bgx = 32;
+ hw->chans_per_lbk = 64;
+ hw->cpi_cnt = 2048;
+ hw->rssi_cnt = 1024;
+ hw->rss_ind_tbl_size = 64; /* Max RSSI / Max interfaces */
+ hw->tl3_cnt = 256;
+ hw->tl2_cnt = 64;
+ hw->tl1_cnt = 18;
+ hw->tl1_per_bgx = false;
+ hw->model_id = 0x83;
+ break;
+ }
+
+ hw->tl4_cnt = MAX_QUEUES_PER_QSET * pci_sriov_get_totalvfs(nic->udev);
+}
+
+static void nic_init_hw(struct nicpf *nic)
+{
+ int i;
+ u64 reg;
+ u64 *pkind = (u64 *)&nic->pkind;
+
+ /* Get HW capability info */
+ nic_get_hw_info(nic);
+
+ /* Enable NIC HW block */
+ nic_reg_write(nic, NIC_PF_CFG, 0x3);
+
+ /* Enable backpressure */
+ nic_reg_write(nic, NIC_PF_BP_CFG, (1ULL << 6) | 0x03);
+ nic_reg_write(nic, NIC_PF_INTF_0_1_BP_CFG, (1ULL << 63) | 0x08);
+ nic_reg_write(nic, NIC_PF_INTF_0_1_BP_CFG + (1 << 8),
+ (1ULL << 63) | 0x09);
+
+ for (i = 0; i < NIC_MAX_CHANS; i++)
+ nic_reg_write(nic, NIC_PF_CHAN_0_255_TX_CFG | (i << 3), 1);
+
+ if (nic->flags & NIC_TNS_ENABLED) {
+ reg = NIC_TNS_MODE << 7;
+ reg |= 0x06;
+ nic_reg_write(nic, NIC_PF_INTF_0_1_SEND_CFG, reg);
+ reg &= ~0xFull;
+ reg |= 0x07;
+ nic_reg_write(nic, NIC_PF_INTF_0_1_SEND_CFG | (1 << 8), reg);
+ } else {
+ /* Disable TNS mode on both interfaces */
+ reg = NIC_TNS_BYPASS_MODE << 7;
+ reg |= 0x08; /* Block identifier */
+ nic_reg_write(nic, NIC_PF_INTF_0_1_SEND_CFG, reg);
+ reg &= ~0xFull;
+ reg |= 0x09;
+ nic_reg_write(nic, NIC_PF_INTF_0_1_SEND_CFG | (1 << 8), reg);
+ }
+
+ /* PKIND configuration */
+ nic->pkind.minlen = 0;
+ nic->pkind.maxlen = NIC_HW_MAX_FRS + ETH_HLEN;
+ nic->pkind.lenerr_en = 1;
+ nic->pkind.rx_hdr = 0;
+ nic->pkind.hdr_sl = 0;
+
+ for (i = 0; i < NIC_MAX_PKIND; i++)
+ nic_reg_write(nic, NIC_PF_PKIND_0_15_CFG | (i << 3), *pkind);
+
+ nic_set_tx_pkt_pad(nic, NIC_HW_MIN_FRS);
+
+ /* Timer config */
+ nic_reg_write(nic, NIC_PF_INTR_TIMER_CFG, NICPF_CLK_PER_INT_TICK);
+}
+
+/* Channel parse index configuration */
+static void nic_config_cpi(struct nicpf *nic, struct cpi_cfg_msg *cfg)
+{
+ struct hw_info *hw = nic->hw;
+ u32 vnic, bgx, lmac, chan;
+ u32 padd, cpi_count = 0;
+ u64 cpi_base, cpi, rssi_base, rssi;
+ u8 qset, rq_idx = 0;
+
+ vnic = cfg->vf_id;
+ bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vnic]);
+ lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vnic]);
+
+ chan = (lmac * hw->chans_per_lmac) + (bgx * hw->chans_per_bgx);
+ cpi_base = vnic * NIC_MAX_CPI_PER_LMAC;
+ rssi_base = vnic * hw->rss_ind_tbl_size;
+
+ /* Rx channel configuration */
+ nic_reg_write(nic, NIC_PF_CHAN_0_255_RX_BP_CFG | (chan << 3),
+ (1ull << 63) | (vnic << 0));
+ nic_reg_write(nic, NIC_PF_CHAN_0_255_RX_CFG | (chan << 3),
+ ((u64)cfg->cpi_alg << 62) | (cpi_base << 48));
+
+ if (cfg->cpi_alg == CPI_ALG_NONE)
+ cpi_count = 1;
+ else if (cfg->cpi_alg == CPI_ALG_VLAN) /* 3 bits of PCP */
+ cpi_count = 8;
+ else if (cfg->cpi_alg == CPI_ALG_VLAN16) /* 3 bits PCP + DEI */
+ cpi_count = 16;
+ else if (cfg->cpi_alg == CPI_ALG_DIFF) /* 6bits DSCP */
+ cpi_count = NIC_MAX_CPI_PER_LMAC;
+
+ /* RSS Qset, Qidx mapping */
+ qset = cfg->vf_id;
+ rssi = rssi_base;
+ for (; rssi < (rssi_base + cfg->rq_cnt); rssi++) {
+ nic_reg_write(nic, NIC_PF_RSSI_0_4097_RQ | (rssi << 3),
+ (qset << 3) | rq_idx);
+ rq_idx++;
+ }
+
+ rssi = 0;
+ cpi = cpi_base;
+ for (; cpi < (cpi_base + cpi_count); cpi++) {
+ /* Determine port to channel adder */
+ if (cfg->cpi_alg != CPI_ALG_DIFF)
+ padd = cpi % cpi_count;
+ else
+ padd = cpi % 8; /* 3 bits CS out of 6bits DSCP */
+
+ /* Leave RSS_SIZE as '0' to disable RSS */
+ if (pass1_silicon(nic->rev_id, nic->hw->model_id)) {
+ nic_reg_write(nic, NIC_PF_CPI_0_2047_CFG | (cpi << 3),
+ (vnic << 24) | (padd << 16) |
+ (rssi_base + rssi));
+ } else {
+ /* Set MPI_ALG to '0' to disable MCAM parsing */
+ nic_reg_write(nic, NIC_PF_CPI_0_2047_CFG | (cpi << 3),
+ (padd << 16));
+ /* MPI index is same as CPI if MPI_ALG is not enabled */
+ nic_reg_write(nic, NIC_PF_MPI_0_2047_CFG | (cpi << 3),
+ (vnic << 24) | (rssi_base + rssi));
+ }
+
+ if ((rssi + 1) >= cfg->rq_cnt)
+ continue;
+
+ if (cfg->cpi_alg == CPI_ALG_VLAN)
+ rssi++;
+ else if (cfg->cpi_alg == CPI_ALG_VLAN16)
+ rssi = ((cpi - cpi_base) & 0xe) >> 1;
+ else if (cfg->cpi_alg == CPI_ALG_DIFF)
+ rssi = ((cpi - cpi_base) & 0x38) >> 3;
+ }
+ nic->cpi_base[cfg->vf_id] = cpi_base;
+ nic->rssi_base[cfg->vf_id] = rssi_base;
+}
+
+/* Transmit channel configuration (TL4 -> TL3 -> Chan)
+ * VNIC0-SQ0 -> TL4(0) -> TL4A(0) -> TL3[0] -> BGX0/LMAC0/Chan0
+ * VNIC1-SQ0 -> TL4(8) -> TL4A(2) -> TL3[2] -> BGX0/LMAC1/Chan0
+ * VNIC2-SQ0 -> TL4(16) -> TL4A(4) -> TL3[4] -> BGX0/LMAC2/Chan0
+ * VNIC3-SQ0 -> TL4(32) -> TL4A(6) -> TL3[6] -> BGX0/LMAC3/Chan0
+ * VNIC4-SQ0 -> TL4(512) -> TL4A(128) -> TL3[128] -> BGX1/LMAC0/Chan0
+ * VNIC5-SQ0 -> TL4(520) -> TL4A(130) -> TL3[130] -> BGX1/LMAC1/Chan0
+ * VNIC6-SQ0 -> TL4(528) -> TL4A(132) -> TL3[132] -> BGX1/LMAC2/Chan0
+ * VNIC7-SQ0 -> TL4(536) -> TL4A(134) -> TL3[134] -> BGX1/LMAC3/Chan0
+ */
+static void nic_tx_channel_cfg(struct nicpf *nic, u8 vnic,
+ struct sq_cfg_msg *sq)
+{
+ struct hw_info *hw = nic->hw;
+ u32 bgx, lmac, chan;
+ u32 tl2, tl3, tl4;
+ u32 rr_quantum;
+ u8 sq_idx = sq->sq_num;
+ u8 pqs_vnic = vnic;
+ int svf;
+ u16 sdevid;
+
+ dm_pci_read_config16(nic->udev, PCI_SUBSYSTEM_ID, &sdevid);
+
+ bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[pqs_vnic]);
+ lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[pqs_vnic]);
+
+ /* 24 bytes for FCS, IPG and preamble */
+ rr_quantum = ((NIC_HW_MAX_FRS + 24) / 4);
+
+ /* For 88xx 0-511 TL4 transmits via BGX0 and
+ * 512-1023 TL4s transmit via BGX1.
+ */
+ if (hw->tl1_per_bgx) {
+ tl4 = bgx * (hw->tl4_cnt / hw->bgx_cnt);
+ if (!sq->sqs_mode) {
+ tl4 += (lmac * MAX_QUEUES_PER_QSET);
+ } else {
+ for (svf = 0; svf < MAX_SQS_PER_VF_SINGLE_NODE; svf++) {
+ if (nic->vf_sqs[pqs_vnic][svf] == vnic)
+ break;
+ }
+ tl4 += (MAX_LMAC_PER_BGX * MAX_QUEUES_PER_QSET);
+ tl4 += (lmac * MAX_QUEUES_PER_QSET *
+ MAX_SQS_PER_VF_SINGLE_NODE);
+ tl4 += (svf * MAX_QUEUES_PER_QSET);
+ }
+ } else {
+ tl4 = (vnic * MAX_QUEUES_PER_QSET);
+ }
+
+ tl4 += sq_idx;
+
+ tl3 = tl4 / (hw->tl4_cnt / hw->tl3_cnt);
+ nic_reg_write(nic, NIC_PF_QSET_0_127_SQ_0_7_CFG2 |
+ ((u64)vnic << NIC_QS_ID_SHIFT) |
+ ((u32)sq_idx << NIC_Q_NUM_SHIFT), tl4);
+ nic_reg_write(nic, NIC_PF_TL4_0_1023_CFG | (tl4 << 3),
+ ((u64)vnic << 27) | ((u32)sq_idx << 24) | rr_quantum);
+
+ nic_reg_write(nic, NIC_PF_TL3_0_255_CFG | (tl3 << 3), rr_quantum);
+
+ /* On 88xx 0-127 channels are for BGX0 and
+ * 127-255 channels for BGX1.
+ *
+ * On 81xx/83xx TL3_CHAN reg should be configured with channel
+ * within LMAC i.e 0-7 and not the actual channel number like on 88xx
+ */
+ chan = (lmac * hw->chans_per_lmac) + (bgx * hw->chans_per_bgx);
+ if (hw->tl1_per_bgx)
+ nic_reg_write(nic, NIC_PF_TL3_0_255_CHAN | (tl3 << 3), chan);
+ else
+ nic_reg_write(nic, NIC_PF_TL3_0_255_CHAN | (tl3 << 3), 0);
+
+ /* Enable backpressure on the channel */
+ nic_reg_write(nic, NIC_PF_CHAN_0_255_TX_CFG | (chan << 3), 1);
+
+ tl2 = tl3 >> 2;
+ nic_reg_write(nic, NIC_PF_TL3A_0_63_CFG | (tl2 << 3), tl2);
+ nic_reg_write(nic, NIC_PF_TL2_0_63_CFG | (tl2 << 3), rr_quantum);
+ /* No priorities as of now */
+ nic_reg_write(nic, NIC_PF_TL2_0_63_PRI | (tl2 << 3), 0x00);
+
+ /* Unlike 88xx where TL2s 0-31 transmits to TL1 '0' and rest to TL1 '1'
+ * on 81xx/83xx TL2 needs to be configured to transmit to one of the
+ * possible LMACs.
+ *
+ * This register doesn't exist on 88xx.
+ */
+ if (!hw->tl1_per_bgx)
+ nic_reg_write(nic, NIC_PF_TL2_LMAC | (tl2 << 3),
+ lmac + (bgx * MAX_LMAC_PER_BGX));
+}
+
+int nic_initialize(struct udevice *dev)
+{
+ struct nicpf *nic = dev_get_priv(dev);
+
+ nic->udev = dev;
+ nic->hw = calloc(1, sizeof(struct hw_info));
+ if (!nic->hw)
+ return -ENOMEM;
+
+ /* MAP PF's configuration registers */
+ nic->reg_base = dm_pci_map_bar(dev, PCI_BASE_ADDRESS_0,
+ PCI_REGION_MEM);
+ if (!nic->reg_base) {
+ printf("Cannot map config register space, aborting\n");
+ goto exit;
+ }
+
+ nic->node = node_id(nic->reg_base);
+ dm_pci_read_config8(dev, PCI_REVISION_ID, &nic->rev_id);
+
+ /* By default set NIC in TNS bypass mode */
+ nic->flags &= ~NIC_TNS_ENABLED;
+
+ /* Initialize hardware */
+ nic_init_hw(nic);
+
+ nic_set_lmac_vf_mapping(nic);
+
+ /* Set RSS TBL size for each VF */
+ nic->rss_ind_tbl_size = NIC_MAX_RSS_IDR_TBL_SIZE;
+
+ nic->rss_ind_tbl_size = rounddown_pow_of_two(nic->rss_ind_tbl_size);
+
+ return 0;
+exit:
+ free(nic->hw);
+ return -ENODEV;
+}
+
+int octeontx_nic_probe(struct udevice *dev)
+{
+ int ret = 0;
+ struct nicpf *nicpf = dev_get_priv(dev);
+
+ nicpf->udev = dev;
+ ret = nic_initialize(dev);
+ if (ret < 0) {
+ printf("couldn't initialize NIC PF\n");
+ return ret;
+ }
+
+ ret = pci_sriov_init(dev, nicpf->num_vf_en);
+ if (ret < 0)
+ printf("enabling SRIOV failed for num VFs %d\n",
+ nicpf->num_vf_en);
+
+ return ret;
+}
+
+U_BOOT_DRIVER(octeontx_nic) = {
+ .name = "octeontx_nic",
+ .id = UCLASS_MISC,
+ .probe = octeontx_nic_probe,
+ .priv_auto_alloc_size = sizeof(struct nicpf),
+};
+
+static struct pci_device_id octeontx_nic_supported[] = {
+ { PCI_VDEVICE(CAVIUM, PCI_DEVICE_ID_CAVIUM_NIC) },
+ {}
+};
+
+U_BOOT_PCI_DEVICE(octeontx_nic, octeontx_nic_supported);
+
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ */
+
+#ifndef NIC_REG_H
+#define NIC_REG_H
+
+#define NIC_PF_REG_COUNT 29573
+#define NIC_VF_REG_COUNT 249
+
+/* Physical function register offsets */
+#define NIC_PF_CFG (0x0000)
+#define NIC_PF_STATUS (0x0010)
+
+#define NIC_PF_INTR_TIMER_CFG (0x0030)
+#define NIC_PF_BIST_STATUS (0x0040)
+#define NIC_PF_SOFT_RESET (0x0050)
+
+#define NIC_PF_TCP_TIMER (0x0060)
+#define NIC_PF_BP_CFG (0x0080)
+#define NIC_PF_RRM_CFG (0x0088)
+#define NIC_PF_CQM_CF (0x00A0)
+#define NIC_PF_CNM_CF (0x00A8)
+#define NIC_PF_CNM_STATUS (0x00B0)
+#define NIC_PF_CQ_AVG_CFG (0x00C0)
+#define NIC_PF_RRM_AVG_CFG (0x00C8)
+
+#define NIC_PF_INTF_0_1_SEND_CFG (0x0200)
+#define NIC_PF_INTF_0_1_BP_CFG (0x0208)
+#define NIC_PF_INTF_0_1_BP_DIS_0_1 (0x0210)
+#define NIC_PF_INTF_0_1_BP_SW_0_1 (0x0220)
+#define NIC_PF_RBDR_BP_STATE_0_3 (0x0240)
+
+#define NIC_PF_MAILBOX_INT (0x0410)
+#define NIC_PF_MAILBOX_INT_W1S (0x0430)
+#define NIC_PF_MAILBOX_ENA_W1C (0x0450)
+#define NIC_PF_MAILBOX_ENA_W1S (0x0470)
+
+#define NIC_PF_RX_ETYPE_0_7 (0x0500)
+#define NIC_PF_RX_CFG (0x05D0)
+#define NIC_PF_PKIND_0_15_CFG (0x0600)
+
+#define NIC_PF_ECC0_FLIP0 (0x1000)
+#define NIC_PF_ECC1_FLIP0 (0x1008)
+#define NIC_PF_ECC2_FLIP0 (0x1010)
+#define NIC_PF_ECC3_FLIP0 (0x1018)
+#define NIC_PF_ECC0_FLIP1 (0x1080)
+#define NIC_PF_ECC1_FLIP1 (0x1088)
+#define NIC_PF_ECC2_FLIP1 (0x1090)
+#define NIC_PF_ECC3_FLIP1 (0x1098)
+#define NIC_PF_ECC0_CDIS (0x1100)
+#define NIC_PF_ECC1_CDIS (0x1108)
+#define NIC_PF_ECC2_CDIS (0x1110)
+#define NIC_PF_ECC3_CDIS (0x1118)
+#define NIC_PF_BIST0_STATUS (0x1280)
+#define NIC_PF_BIST1_STATUS (0x1288)
+#define NIC_PF_BIST2_STATUS (0x1290)
+#define NIC_PF_BIST3_STATUS (0x1298)
+
+#define NIC_PF_ECC0_SBE_INT (0x2000)
+#define NIC_PF_ECC0_SBE_INT_W1S (0x2008)
+#define NIC_PF_ECC0_SBE_ENA_W1C (0x2010)
+#define NIC_PF_ECC0_SBE_ENA_W1S (0x2018)
+#define NIC_PF_ECC0_DBE_INT (0x2100)
+#define NIC_PF_ECC0_DBE_INT_W1S (0x2108)
+#define NIC_PF_ECC0_DBE_ENA_W1C (0x2110)
+#define NIC_PF_ECC0_DBE_ENA_W1S (0x2118)
+
+#define NIC_PF_ECC1_SBE_INT (0x2200)
+#define NIC_PF_ECC1_SBE_INT_W1S (0x2208)
+#define NIC_PF_ECC1_SBE_ENA_W1C (0x2210)
+#define NIC_PF_ECC1_SBE_ENA_W1S (0x2218)
+#define NIC_PF_ECC1_DBE_INT (0x2300)
+#define NIC_PF_ECC1_DBE_INT_W1S (0x2308)
+#define NIC_PF_ECC1_DBE_ENA_W1C (0x2310)
+#define NIC_PF_ECC1_DBE_ENA_W1S (0x2318)
+
+#define NIC_PF_ECC2_SBE_INT (0x2400)
+#define NIC_PF_ECC2_SBE_INT_W1S (0x2408)
+#define NIC_PF_ECC2_SBE_ENA_W1C (0x2410)
+#define NIC_PF_ECC2_SBE_ENA_W1S (0x2418)
+#define NIC_PF_ECC2_DBE_INT (0x2500)
+#define NIC_PF_ECC2_DBE_INT_W1S (0x2508)
+#define NIC_PF_ECC2_DBE_ENA_W1C (0x2510)
+#define NIC_PF_ECC2_DBE_ENA_W1S (0x2518)
+
+#define NIC_PF_ECC3_SBE_INT (0x2600)
+#define NIC_PF_ECC3_SBE_INT_W1S (0x2608)
+#define NIC_PF_ECC3_SBE_ENA_W1C (0x2610)
+#define NIC_PF_ECC3_SBE_ENA_W1S (0x2618)
+#define NIC_PF_ECC3_DBE_INT (0x2700)
+#define NIC_PF_ECC3_DBE_INT_W1S (0x2708)
+#define NIC_PF_ECC3_DBE_ENA_W1C (0x2710)
+#define NIC_PF_ECC3_DBE_ENA_W1S (0x2718)
+
+#define NIC_PF_CPI_0_2047_CFG (0x200000)
+#define NIC_PF_MPI_0_2047_CFG (0x210000)
+#define NIC_PF_RSSI_0_4097_RQ (0x220000)
+#define NIC_PF_LMAC_0_7_CFG (0x240000)
+#define NIC_PF_LMAC_0_7_SW_XOFF (0x242000)
+#define NIC_PF_LMAC_0_7_CREDIT (0x244000)
+#define NIC_PF_CHAN_0_255_TX_CFG (0x400000)
+#define NIC_PF_CHAN_0_255_RX_CFG (0x420000)
+#define NIC_PF_CHAN_0_255_SW_XOFF (0x440000)
+#define NIC_PF_CHAN_0_255_CREDIT (0x460000)
+#define NIC_PF_CHAN_0_255_RX_BP_CFG (0x480000)
+
+#define NIC_PF_SW_SYNC_RX (0x490000)
+
+#define NIC_PF_SW_SYNC_RX_DONE (0x490008)
+#define NIC_PF_TL2_0_63_CFG (0x500000)
+#define NIC_PF_TL2_0_63_PRI (0x520000)
+#define NIC_PF_TL2_LMAC (0x540000)
+#define NIC_PF_TL2_0_63_SH_STATUS (0x580000)
+#define NIC_PF_TL3A_0_63_CFG (0x5F0000)
+#define NIC_PF_TL3_0_255_CFG (0x600000)
+#define NIC_PF_TL3_0_255_CHAN (0x620000)
+#define NIC_PF_TL3_0_255_PIR (0x640000)
+#define NIC_PF_TL3_0_255_SW_XOFF (0x660000)
+#define NIC_PF_TL3_0_255_CNM_RATE (0x680000)
+#define NIC_PF_TL3_0_255_SH_STATUS (0x6A0000)
+#define NIC_PF_TL4A_0_255_CFG (0x6F0000)
+#define NIC_PF_TL4_0_1023_CFG (0x800000)
+#define NIC_PF_TL4_0_1023_SW_XOFF (0x820000)
+#define NIC_PF_TL4_0_1023_SH_STATUS (0x840000)
+#define NIC_PF_TL4A_0_1023_CNM_RATE (0x880000)
+#define NIC_PF_TL4A_0_1023_CNM_STATUS (0x8A0000)
+
+#define NIC_PF_VF_0_127_MAILBOX_0_1 (0x20002030)
+#define NIC_PF_VNIC_0_127_TX_STAT_0_4 (0x20004000)
+#define NIC_PF_VNIC_0_127_RX_STAT_0_13 (0x20004100)
+#define NIC_PF_QSET_0_127_LOCK_0_15 (0x20006000)
+#define NIC_PF_QSET_0_127_CFG (0x20010000)
+#define NIC_PF_QSET_0_127_RQ_0_7_CFG (0x20010400)
+#define NIC_PF_QSET_0_127_RQ_0_7_DROP_CFG (0x20010420)
+#define NIC_PF_QSET_0_127_RQ_0_7_BP_CFG (0x20010500)
+#define NIC_PF_QSET_0_127_RQ_0_7_STAT_0_1 (0x20010600)
+#define NIC_PF_QSET_0_127_SQ_0_7_CFG (0x20010C00)
+#define NIC_PF_QSET_0_127_SQ_0_7_CFG2 (0x20010C08)
+#define NIC_PF_QSET_0_127_SQ_0_7_STAT_0_1 (0x20010D00)
+
+#define NIC_PF_MSIX_VEC_0_18_ADDR (0x000000)
+#define NIC_PF_MSIX_VEC_0_CTL (0x000008)
+#define NIC_PF_MSIX_PBA_0 (0x0F0000)
+
+/* Virtual function register offsets */
+#define NIC_VNIC_CFG (0x000020)
+#define NIC_VF_PF_MAILBOX_0_1 (0x000130)
+#define NIC_VF_INT (0x000200)
+#define NIC_VF_INT_W1S (0x000220)
+#define NIC_VF_ENA_W1C (0x000240)
+#define NIC_VF_ENA_W1S (0x000260)
+
+#define NIC_VNIC_RSS_CFG (0x0020E0)
+#define NIC_VNIC_RSS_KEY_0_4 (0x002200)
+#define NIC_VNIC_TX_STAT_0_4 (0x004000)
+#define NIC_VNIC_RX_STAT_0_13 (0x004100)
+#define NIC_QSET_RQ_GEN_CFG (0x010010)
+
+#define NIC_QSET_CQ_0_7_CFG (0x010400)
+#define NIC_QSET_CQ_0_7_CFG2 (0x010408)
+#define NIC_QSET_CQ_0_7_THRESH (0x010410)
+#define NIC_QSET_CQ_0_7_BASE (0x010420)
+#define NIC_QSET_CQ_0_7_HEAD (0x010428)
+#define NIC_QSET_CQ_0_7_TAIL (0x010430)
+#define NIC_QSET_CQ_0_7_DOOR (0x010438)
+#define NIC_QSET_CQ_0_7_STATUS (0x010440)
+#define NIC_QSET_CQ_0_7_STATUS2 (0x010448)
+#define NIC_QSET_CQ_0_7_DEBUG (0x010450)
+
+#define NIC_QSET_RQ_0_7_CFG (0x010600)
+#define NIC_QSET_RQ_0_7_STAT_0_1 (0x010700)
+
+#define NIC_QSET_SQ_0_7_CFG (0x010800)
+#define NIC_QSET_SQ_0_7_THRESH (0x010810)
+#define NIC_QSET_SQ_0_7_BASE (0x010820)
+#define NIC_QSET_SQ_0_7_HEAD (0x010828)
+#define NIC_QSET_SQ_0_7_TAIL (0x010830)
+#define NIC_QSET_SQ_0_7_DOOR (0x010838)
+#define NIC_QSET_SQ_0_7_STATUS (0x010840)
+#define NIC_QSET_SQ_0_7_DEBUG (0x010848)
+#define NIC_QSET_SQ_0_7_CNM_CHG (0x010860)
+#define NIC_QSET_SQ_0_7_STAT_0_1 (0x010900)
+
+#define NIC_QSET_RBDR_0_1_CFG (0x010C00)
+#define NIC_QSET_RBDR_0_1_THRESH (0x010C10)
+#define NIC_QSET_RBDR_0_1_BASE (0x010C20)
+#define NIC_QSET_RBDR_0_1_HEAD (0x010C28)
+#define NIC_QSET_RBDR_0_1_TAIL (0x010C30)
+#define NIC_QSET_RBDR_0_1_DOOR (0x010C38)
+#define NIC_QSET_RBDR_0_1_STATUS0 (0x010C40)
+#define NIC_QSET_RBDR_0_1_STATUS1 (0x010C48)
+#define NIC_QSET_RBDR_0_1_PREFETCH_STATUS (0x010C50)
+
+#define NIC_VF_MSIX_VECTOR_0_19_ADDR (0x000000)
+#define NIC_VF_MSIX_VECTOR_0_19_CTL (0x000008)
+#define NIC_VF_MSIX_PBA (0x0F0000)
+
+/* Offsets within registers */
+#define NIC_MSIX_VEC_SHIFT 4
+#define NIC_Q_NUM_SHIFT 18
+#define NIC_QS_ID_SHIFT 21
+#define NIC_VF_NUM_SHIFT 21
+
+/* Port kind configuration register */
+struct pkind_cfg {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ uint64_t reserved_42_63:22;
+ uint64_t hdr_sl:5; /* Header skip length */
+ uint64_t rx_hdr:3; /* TNS Receive header present */
+ uint64_t lenerr_en:1; /* L2 length error check enable */
+ uint64_t reserved_32_32:1;
+ uint64_t maxlen:16; /* Max frame size */
+ uint64_t minlen:16; /* Min frame size */
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ uint64_t minlen:16;
+ uint64_t maxlen:16;
+ uint64_t reserved_32_32:1;
+ uint64_t lenerr_en:1;
+ uint64_t rx_hdr:3;
+ uint64_t hdr_sl:5;
+ uint64_t reserved_42_63:22;
+#endif
+};
+
+static inline uint64_t BGXX_PF_BAR0(unsigned long param1)
+ __attribute__ ((pure, always_inline));
+static inline uint64_t BGXX_PF_BAR0(unsigned long param1)
+{
+ assert(param1 <= 1);
+ return 0x87E0E0000000 + (param1 << 24);
+}
+
+#define BGXX_PF_BAR0_SIZE 0x400000
+#define NIC_PF_BAR0 0x843000000000
+#define NIC_PF_BAR0_SIZE 0x40000000
+
+static inline uint64_t NIC_VFX_BAR0(unsigned long param1)
+ __attribute__ ((pure, always_inline));
+static inline uint64_t NIC_VFX_BAR0(unsigned long param1)
+{
+ assert(param1 <= 127);
+
+ return 0x8430A0000000 + (param1 << 21);
+}
+
+#define NIC_VFX_BAR0_SIZE 0x200000
+
+#endif /* NIC_REG_H */
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ */
+
+#include <dm.h>
+#include <malloc.h>
+#include <misc.h>
+#include <net.h>
+#include <pci.h>
+#include <pci_ids.h>
+#include <phy.h>
+#include <asm/io.h>
+#include <linux/delay.h>
+
+#include "nic_reg.h"
+#include "nic.h"
+#include "nicvf_queues.h"
+
+/* Register read/write APIs */
+void nicvf_reg_write(struct nicvf *nic, u64 offset, u64 val)
+{
+ writeq(val, nic->reg_base + offset);
+}
+
+u64 nicvf_reg_read(struct nicvf *nic, u64 offset)
+{
+ return readq(nic->reg_base + offset);
+}
+
+void nicvf_queue_reg_write(struct nicvf *nic, u64 offset,
+ u64 qidx, u64 val)
+{
+ void *addr = nic->reg_base + offset;
+
+ writeq(val, (void *)(addr + (qidx << NIC_Q_NUM_SHIFT)));
+}
+
+u64 nicvf_queue_reg_read(struct nicvf *nic, u64 offset, u64 qidx)
+{
+ void *addr = nic->reg_base + offset;
+
+ return readq((void *)(addr + (qidx << NIC_Q_NUM_SHIFT)));
+}
+
+static void nicvf_handle_mbx_intr(struct nicvf *nic);
+
+/* VF -> PF mailbox communication */
+static void nicvf_write_to_mbx(struct nicvf *nic, union nic_mbx *mbx)
+{
+ u64 *msg = (u64 *)mbx;
+
+ nicvf_reg_write(nic, NIC_VF_PF_MAILBOX_0_1 + 0, msg[0]);
+ nicvf_reg_write(nic, NIC_VF_PF_MAILBOX_0_1 + 8, msg[1]);
+}
+
+int nicvf_send_msg_to_pf(struct nicvf *nic, union nic_mbx *mbx)
+{
+ int timeout = NIC_PF_VF_MBX_TIMEOUT;
+ int sleep = 10;
+
+ nic->pf_acked = false;
+ nic->pf_nacked = false;
+
+ nicvf_write_to_mbx(nic, mbx);
+
+ nic_handle_mbx_intr(nic->nicpf, nic->vf_id);
+
+ /* Wait for previous message to be acked, timeout 2sec */
+ while (!nic->pf_acked) {
+ if (nic->pf_nacked)
+ return -1;
+ mdelay(sleep);
+ nicvf_handle_mbx_intr(nic);
+
+ if (nic->pf_acked)
+ break;
+ timeout -= sleep;
+ if (!timeout) {
+ printf("PF didn't ack to mbox msg %d from VF%d\n",
+ (mbx->msg.msg & 0xFF), nic->vf_id);
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+/* Checks if VF is able to comminicate with PF
+ * and also gets the VNIC number this VF is associated to.
+ */
+static int nicvf_check_pf_ready(struct nicvf *nic)
+{
+ union nic_mbx mbx = {};
+
+ mbx.msg.msg = NIC_MBOX_MSG_READY;
+ if (nicvf_send_msg_to_pf(nic, &mbx)) {
+ printf("PF didn't respond to READY msg\n");
+ return 0;
+ }
+
+ return 1;
+}
+
+static void nicvf_handle_mbx_intr(struct nicvf *nic)
+{
+ union nic_mbx mbx = {};
+ struct eth_pdata *pdata = dev_get_platdata(nic->dev);
+ u64 *mbx_data;
+ u64 mbx_addr;
+ int i;
+
+ mbx_addr = NIC_VF_PF_MAILBOX_0_1;
+ mbx_data = (u64 *)&mbx;
+
+ for (i = 0; i < NIC_PF_VF_MAILBOX_SIZE; i++) {
+ *mbx_data = nicvf_reg_read(nic, mbx_addr);
+ mbx_data++;
+ mbx_addr += sizeof(u64);
+ }
+
+ debug("Mbox message: msg: 0x%x\n", mbx.msg.msg);
+ switch (mbx.msg.msg) {
+ case NIC_MBOX_MSG_READY:
+ nic->pf_acked = true;
+ nic->vf_id = mbx.nic_cfg.vf_id & 0x7F;
+ nic->tns_mode = mbx.nic_cfg.tns_mode & 0x7F;
+ nic->node = mbx.nic_cfg.node_id;
+ if (!nic->set_mac_pending)
+ memcpy(pdata->enetaddr,
+ mbx.nic_cfg.mac_addr, 6);
+ nic->loopback_supported = mbx.nic_cfg.loopback_supported;
+ nic->link_up = false;
+ nic->duplex = 0;
+ nic->speed = 0;
+ break;
+ case NIC_MBOX_MSG_ACK:
+ nic->pf_acked = true;
+ break;
+ case NIC_MBOX_MSG_NACK:
+ nic->pf_nacked = true;
+ break;
+ case NIC_MBOX_MSG_BGX_LINK_CHANGE:
+ nic->pf_acked = true;
+ nic->link_up = mbx.link_status.link_up;
+ nic->duplex = mbx.link_status.duplex;
+ nic->speed = mbx.link_status.speed;
+ if (nic->link_up) {
+ printf("%s: Link is Up %d Mbps %s\n",
+ nic->dev->name, nic->speed,
+ nic->duplex == 1 ?
+ "Full duplex" : "Half duplex");
+ } else {
+ printf("%s: Link is Down\n", nic->dev->name);
+ }
+ break;
+ default:
+ printf("Invalid message from PF, msg 0x%x\n", mbx.msg.msg);
+ break;
+ }
+
+ nicvf_clear_intr(nic, NICVF_INTR_MBOX, 0);
+}
+
+static int nicvf_hw_set_mac_addr(struct nicvf *nic, struct udevice *dev)
+{
+ union nic_mbx mbx = {};
+ struct eth_pdata *pdata = dev_get_platdata(dev);
+
+ mbx.mac.msg = NIC_MBOX_MSG_SET_MAC;
+ mbx.mac.vf_id = nic->vf_id;
+ memcpy(mbx.mac.mac_addr, pdata->enetaddr, 6);
+
+ return nicvf_send_msg_to_pf(nic, &mbx);
+}
+
+static void nicvf_config_cpi(struct nicvf *nic)
+{
+ union nic_mbx mbx = {};
+
+ mbx.cpi_cfg.msg = NIC_MBOX_MSG_CPI_CFG;
+ mbx.cpi_cfg.vf_id = nic->vf_id;
+ mbx.cpi_cfg.cpi_alg = nic->cpi_alg;
+ mbx.cpi_cfg.rq_cnt = nic->qs->rq_cnt;
+
+ nicvf_send_msg_to_pf(nic, &mbx);
+}
+
+static int nicvf_init_resources(struct nicvf *nic)
+{
+ int err;
+
+ nic->num_qs = 1;
+
+ /* Enable Qset */
+ nicvf_qset_config(nic, true);
+
+ /* Initialize queues and HW for data transfer */
+ err = nicvf_config_data_transfer(nic, true);
+
+ if (err) {
+ printf("Failed to alloc/config VF's QSet resources\n");
+ return err;
+ }
+ return 0;
+}
+
+static void nicvf_snd_pkt_handler(struct nicvf *nic,
+ struct cmp_queue *cq,
+ void *cq_desc, int cqe_type)
+{
+ struct cqe_send_t *cqe_tx;
+ struct snd_queue *sq;
+ struct sq_hdr_subdesc *hdr;
+
+ cqe_tx = (struct cqe_send_t *)cq_desc;
+ sq = &nic->qs->sq[cqe_tx->sq_idx];
+
+ hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, cqe_tx->sqe_ptr);
+ if (hdr->subdesc_type != SQ_DESC_TYPE_HEADER)
+ return;
+
+ nicvf_check_cqe_tx_errs(nic, cq, cq_desc);
+ nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1);
+}
+
+static int nicvf_rcv_pkt_handler(struct nicvf *nic,
+ struct cmp_queue *cq, void *cq_desc,
+ void **ppkt, int cqe_type)
+{
+ void *pkt;
+
+ size_t pkt_len;
+ struct cqe_rx_t *cqe_rx = (struct cqe_rx_t *)cq_desc;
+ int err = 0;
+
+ /* Check for errors */
+ err = nicvf_check_cqe_rx_errs(nic, cq, cq_desc);
+ if (err && !cqe_rx->rb_cnt)
+ return -1;
+
+ pkt = nicvf_get_rcv_pkt(nic, cq_desc, &pkt_len);
+ if (!pkt) {
+ debug("Packet not received\n");
+ return -1;
+ }
+
+ if (pkt)
+ *ppkt = pkt;
+
+ return pkt_len;
+}
+
+int nicvf_cq_handler(struct nicvf *nic, void **ppkt, int *pkt_len)
+{
+ int cq_qnum = 0;
+ int processed_sq_cqe = 0;
+ int processed_rq_cqe = 0;
+ int processed_cqe = 0;
+
+ unsigned long cqe_count, cqe_head;
+ struct queue_set *qs = nic->qs;
+ struct cmp_queue *cq = &qs->cq[cq_qnum];
+ struct cqe_rx_t *cq_desc;
+
+ /* Get num of valid CQ entries expect next one to be SQ completion */
+ cqe_count = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS, cq_qnum);
+ cqe_count &= 0xFFFF;
+ if (!cqe_count)
+ return 0;
+
+ /* Get head of the valid CQ entries */
+ cqe_head = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD, cq_qnum);
+ cqe_head >>= 9;
+ cqe_head &= 0xFFFF;
+
+ if (cqe_count) {
+ /* Get the CQ descriptor */
+ cq_desc = (struct cqe_rx_t *)GET_CQ_DESC(cq, cqe_head);
+ cqe_head++;
+ cqe_head &= (cq->dmem.q_len - 1);
+ /* Initiate prefetch for next descriptor */
+ prefetch((struct cqe_rx_t *)GET_CQ_DESC(cq, cqe_head));
+
+ switch (cq_desc->cqe_type) {
+ case CQE_TYPE_RX:
+ debug("%s: Got Rx CQE\n", nic->dev->name);
+ *pkt_len = nicvf_rcv_pkt_handler(nic, cq, cq_desc,
+ ppkt, CQE_TYPE_RX);
+ processed_rq_cqe++;
+ break;
+ case CQE_TYPE_SEND:
+ debug("%s: Got Tx CQE\n", nic->dev->name);
+ nicvf_snd_pkt_handler(nic, cq, cq_desc, CQE_TYPE_SEND);
+ processed_sq_cqe++;
+ break;
+ default:
+ debug("%s: Got CQ type %u\n", nic->dev->name,
+ cq_desc->cqe_type);
+ break;
+ }
+ processed_cqe++;
+ }
+
+ /* Dequeue CQE */
+ nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_DOOR,
+ cq_qnum, processed_cqe);
+
+ asm volatile ("dsb sy");
+
+ return (processed_sq_cqe | processed_rq_cqe);
+}
+
+/* Qset error interrupt handler
+ *
+ * As of now only CQ errors are handled
+ */
+void nicvf_handle_qs_err(struct nicvf *nic)
+{
+ struct queue_set *qs = nic->qs;
+ int qidx;
+ u64 status;
+
+ /* Check if it is CQ err */
+ for (qidx = 0; qidx < qs->cq_cnt; qidx++) {
+ status = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS,
+ qidx);
+ if (!(status & CQ_ERR_MASK))
+ continue;
+ /* Process already queued CQEs and reconfig CQ */
+ nicvf_sq_disable(nic, qidx);
+ nicvf_cmp_queue_config(nic, qs, qidx, true);
+ nicvf_sq_free_used_descs(nic->dev, &qs->sq[qidx], qidx);
+ nicvf_sq_enable(nic, &qs->sq[qidx], qidx);
+ }
+}
+
+static int nicvf_free_pkt(struct udevice *dev, uchar *pkt, int pkt_len)
+{
+ struct nicvf *nic = dev_get_priv(dev);
+
+ if (pkt && pkt_len)
+ free(pkt);
+ nicvf_refill_rbdr(nic);
+ return 0;
+}
+
+static int nicvf_xmit(struct udevice *dev, void *pkt, int pkt_len)
+{
+ struct nicvf *nic = dev_get_priv(dev);
+ int ret = 0;
+ int rcv_len = 0;
+ unsigned int timeout = 5000;
+ void *rpkt = NULL;
+
+ if (!nicvf_sq_append_pkt(nic, pkt, pkt_len)) {
+ printf("VF%d: TX ring full\n", nic->vf_id);
+ return -1;
+ }
+
+ /* check and update CQ for pkt sent */
+ while (!ret && timeout--) {
+ ret = nicvf_cq_handler(nic, &rpkt, &rcv_len);
+ if (!ret) {
+ debug("%s: %d, Not sent\n", __func__, __LINE__);
+ udelay(10);
+ }
+ }
+
+ return 0;
+}
+
+static int nicvf_recv(struct udevice *dev, int flags, uchar **packetp)
+{
+ struct nicvf *nic = dev_get_priv(dev);
+ void *pkt;
+ int pkt_len = 0;
+#ifdef DEBUG
+ u8 *dpkt;
+ int i, j;
+#endif
+
+ nicvf_cq_handler(nic, &pkt, &pkt_len);
+
+ if (pkt_len) {
+#ifdef DEBUG
+ dpkt = pkt;
+ printf("RX packet contents:\n");
+ for (i = 0; i < 8; i++) {
+ puts("\t");
+ for (j = 0; j < 10; j++)
+ printf("%02x ", dpkt[i * 10 + j]);
+ puts("\n");
+ }
+#endif
+ *packetp = pkt;
+ }
+
+ return pkt_len;
+}
+
+void nicvf_stop(struct udevice *dev)
+{
+ struct nicvf *nic = dev_get_priv(dev);
+
+ if (!nic->open)
+ return;
+
+ /* Free resources */
+ nicvf_config_data_transfer(nic, false);
+
+ /* Disable HW Qset */
+ nicvf_qset_config(nic, false);
+
+ nic->open = false;
+}
+
+int nicvf_open(struct udevice *dev)
+{
+ int err;
+ struct nicvf *nic = dev_get_priv(dev);
+
+ nicvf_hw_set_mac_addr(nic, dev);
+
+ /* Configure CPI alorithm */
+ nic->cpi_alg = CPI_ALG_NONE;
+ nicvf_config_cpi(nic);
+
+ /* Initialize the queues */
+ err = nicvf_init_resources(nic);
+ if (err)
+ return -1;
+
+ if (!nicvf_check_pf_ready(nic))
+ return -1;
+
+ nic->open = true;
+
+ /* Make sure queue initialization is written */
+ asm volatile("dsb sy");
+
+ return 0;
+}
+
+int nicvf_write_hwaddr(struct udevice *dev)
+{
+ unsigned char ethaddr[ARP_HLEN];
+ struct eth_pdata *pdata = dev_get_platdata(dev);
+ struct nicvf *nic = dev_get_priv(dev);
+
+ /* If lower level firmware fails to set proper MAC
+ * u-boot framework updates MAC to random address.
+ * Use this hook to update mac address in environment.
+ */
+ if (!eth_env_get_enetaddr_by_index("eth", dev->seq, ethaddr)) {
+ eth_env_set_enetaddr_by_index("eth", dev->seq, pdata->enetaddr);
+ debug("%s: pMAC %pM\n", __func__, pdata->enetaddr);
+ }
+ eth_env_get_enetaddr_by_index("eth", dev->seq, ethaddr);
+ if (memcmp(ethaddr, pdata->enetaddr, ARP_HLEN)) {
+ debug("%s: pMAC %pM\n", __func__, pdata->enetaddr);
+ nicvf_hw_set_mac_addr(nic, dev);
+ }
+ return 0;
+}
+
+static void nicvf_probe_mdio_devices(void)
+{
+ struct udevice *pdev;
+ int err;
+ static int probed;
+
+ if (probed)
+ return;
+
+ err = dm_pci_find_device(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVICE_ID_CAVIUM_SMI, 0,
+ &pdev);
+ if (err)
+ debug("%s couldn't find SMI device\n", __func__);
+ probed = 1;
+}
+
+int nicvf_initialize(struct udevice *dev)
+{
+ struct nicvf *nicvf = dev_get_priv(dev);
+ struct eth_pdata *pdata = dev_get_platdata(dev);
+ int ret = 0, bgx, lmac;
+ char name[16];
+ unsigned char ethaddr[ARP_HLEN];
+ struct udevice *pfdev;
+ struct nicpf *pf;
+ static int vfid;
+
+ if (dm_pci_find_device(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVICE_ID_CAVIUM_NIC, 0, &pfdev)) {
+ printf("%s NIC PF device not found..VF probe failed\n",
+ __func__);
+ return -1;
+ }
+ pf = dev_get_priv(pfdev);
+ nicvf->vf_id = vfid++;
+ nicvf->dev = dev;
+ nicvf->nicpf = pf;
+
+ nicvf_probe_mdio_devices();
+
+ /* Enable TSO support */
+ nicvf->hw_tso = true;
+
+ nicvf->reg_base = dm_pci_map_bar(dev, PCI_BASE_ADDRESS_0,
+ PCI_REGION_MEM);
+
+ debug("nicvf->reg_base: %p\n", nicvf->reg_base);
+
+ if (!nicvf->reg_base) {
+ printf("Cannot map config register space, aborting\n");
+ ret = -1;
+ goto fail;
+ }
+
+ ret = nicvf_set_qset_resources(nicvf);
+ if (ret)
+ return -1;
+
+ sprintf(name, "vnic%u", nicvf->vf_id);
+ debug("%s name %s\n", __func__, name);
+ device_set_name(dev, name);
+
+ bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(pf->vf_lmac_map[nicvf->vf_id]);
+ lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(pf->vf_lmac_map[nicvf->vf_id]);
+ debug("%s VF %d BGX %d LMAC %d\n", __func__, nicvf->vf_id, bgx, lmac);
+ debug("%s PF %p pfdev %p VF %p vfdev %p vf->pdata %p\n",
+ __func__, nicvf->nicpf, nicvf->nicpf->udev, nicvf, nicvf->dev,
+ pdata);
+
+ fdt_board_get_ethaddr(bgx, lmac, ethaddr);
+
+ debug("%s bgx %d lmac %d ethaddr %pM\n", __func__, bgx, lmac, ethaddr);
+
+ if (is_valid_ethaddr(ethaddr)) {
+ memcpy(pdata->enetaddr, ethaddr, ARP_HLEN);
+ eth_env_set_enetaddr_by_index("eth", dev->seq, ethaddr);
+ }
+ debug("%s enetaddr %pM ethaddr %pM\n", __func__,
+ pdata->enetaddr, ethaddr);
+
+fail:
+ return ret;
+}
+
+int octeontx_vnic_probe(struct udevice *dev)
+{
+ return nicvf_initialize(dev);
+}
+
+static const struct eth_ops octeontx_vnic_ops = {
+ .start = nicvf_open,
+ .stop = nicvf_stop,
+ .send = nicvf_xmit,
+ .recv = nicvf_recv,
+ .free_pkt = nicvf_free_pkt,
+ .write_hwaddr = nicvf_write_hwaddr,
+};
+
+U_BOOT_DRIVER(octeontx_vnic) = {
+ .name = "vnic",
+ .id = UCLASS_ETH,
+ .probe = octeontx_vnic_probe,
+ .ops = &octeontx_vnic_ops,
+ .priv_auto_alloc_size = sizeof(struct nicvf),
+ .platdata_auto_alloc_size = sizeof(struct eth_pdata),
+};
+
+static struct pci_device_id octeontx_vnic_supported[] = {
+ { PCI_VDEVICE(CAVIUM, PCI_DEVICE_ID_CAVIUM_NICVF) },
+ { PCI_VDEVICE(CAVIUM, PCI_DEVICE_ID_CAVIUM_NICVF_1) },
+ {}
+};
+
+U_BOOT_PCI_DEVICE(octeontx_vnic, octeontx_vnic_supported);
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ */
+
+#include <cpu_func.h>
+#include <dm/device.h>
+#include <malloc.h>
+#include <net.h>
+#include <phy.h>
+#include <linux/delay.h>
+
+#include "nic_reg.h"
+#include "nic.h"
+#include "q_struct.h"
+#include "nicvf_queues.h"
+
+static int nicvf_poll_reg(struct nicvf *nic, int qidx,
+ u64 reg, int bit_pos, int bits, int val)
+{
+ u64 bit_mask;
+ u64 reg_val;
+ int timeout = 10;
+
+ bit_mask = (1ULL << bits) - 1;
+ bit_mask = (bit_mask << bit_pos);
+
+ while (timeout) {
+ reg_val = nicvf_queue_reg_read(nic, reg, qidx);
+ if (((reg_val & bit_mask) >> bit_pos) == val)
+ return 0;
+ udelay(2000);
+ timeout--;
+ }
+ printf("Poll on reg 0x%llx failed\n", reg);
+ return 1;
+}
+
+static int nicvf_alloc_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem,
+ int q_len, int desc_size, int align_bytes)
+{
+ dmem->q_len = q_len;
+ dmem->size = (desc_size * q_len) + align_bytes;
+ /* Save address, need it while freeing */
+ dmem->unalign_base = calloc(1, dmem->size);
+ dmem->dma = (uintptr_t)dmem->unalign_base;
+
+ if (!dmem->unalign_base)
+ return -1;
+
+ /* Align memory address for 'align_bytes' */
+ dmem->phys_base = NICVF_ALIGNED_ADDR((u64)dmem->dma, align_bytes);
+ dmem->base = dmem->unalign_base + (dmem->phys_base - dmem->dma);
+
+ return 0;
+}
+
+static void nicvf_free_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem)
+{
+ if (!dmem)
+ return;
+
+ free(dmem->unalign_base);
+
+ dmem->unalign_base = NULL;
+ dmem->base = NULL;
+}
+
+static void *nicvf_rb_ptr_to_pkt(struct nicvf *nic, uintptr_t rb_ptr)
+{
+ return (void *)rb_ptr;
+}
+
+static int nicvf_init_rbdr(struct nicvf *nic, struct rbdr *rbdr,
+ int ring_len, int buf_size)
+{
+ int idx;
+ uintptr_t rbuf;
+ struct rbdr_entry_t *desc;
+
+ if (nicvf_alloc_q_desc_mem(nic, &rbdr->dmem, ring_len,
+ sizeof(struct rbdr_entry_t),
+ NICVF_RCV_BUF_ALIGN_BYTES)) {
+ printf("Unable to allocate memory for rcv buffer ring\n");
+ return -1;
+ }
+
+ rbdr->desc = rbdr->dmem.base;
+ /* Buffer size has to be in multiples of 128 bytes */
+ rbdr->dma_size = buf_size;
+ rbdr->enable = true;
+ rbdr->thresh = RBDR_THRESH;
+
+ debug("%s: %d: allocating %lld bytes for rcv buffers\n",
+ __func__, __LINE__,
+ ring_len * buf_size + NICVF_RCV_BUF_ALIGN_BYTES);
+ rbdr->buf_mem = (uintptr_t)calloc(1, ring_len * buf_size
+ + NICVF_RCV_BUF_ALIGN_BYTES);
+
+ if (!rbdr->buf_mem) {
+ printf("Unable to allocate memory for rcv buffers\n");
+ return -1;
+ }
+
+ rbdr->buffers = NICVF_ALIGNED_ADDR(rbdr->buf_mem,
+ NICVF_RCV_BUF_ALIGN_BYTES);
+
+ debug("%s: %d: rbdr->buf_mem: %lx, rbdr->buffers: %lx\n",
+ __func__, __LINE__, rbdr->buf_mem, rbdr->buffers);
+
+ for (idx = 0; idx < ring_len; idx++) {
+ rbuf = rbdr->buffers + DMA_BUFFER_LEN * idx;
+ desc = GET_RBDR_DESC(rbdr, idx);
+ desc->buf_addr = rbuf >> NICVF_RCV_BUF_ALIGN;
+ flush_dcache_range((uintptr_t)desc,
+ (uintptr_t)desc + sizeof(desc));
+ }
+ return 0;
+}
+
+static void nicvf_free_rbdr(struct nicvf *nic, struct rbdr *rbdr)
+{
+ if (!rbdr)
+ return;
+
+ rbdr->enable = false;
+ if (!rbdr->dmem.base)
+ return;
+
+ debug("%s: %d: rbdr->buf_mem: %p\n", __func__,
+ __LINE__, (void *)rbdr->buf_mem);
+ free((void *)rbdr->buf_mem);
+
+ /* Free RBDR ring */
+ nicvf_free_q_desc_mem(nic, &rbdr->dmem);
+}
+
+/* Refill receive buffer descriptors with new buffers.
+ * This runs in softirq context .
+ */
+void nicvf_refill_rbdr(struct nicvf *nic)
+{
+ struct queue_set *qs = nic->qs;
+ int rbdr_idx = qs->rbdr_cnt;
+ unsigned long qcount, head, tail, rb_cnt;
+ struct rbdr *rbdr;
+
+ if (!rbdr_idx)
+ return;
+ rbdr_idx--;
+ rbdr = &qs->rbdr[rbdr_idx];
+ /* Check if it's enabled */
+ if (!rbdr->enable) {
+ printf("Receive queue %d is disabled\n", rbdr_idx);
+ return;
+ }
+
+ /* check if valid descs reached or crossed threshold level */
+ qcount = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, rbdr_idx);
+ head = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_HEAD, rbdr_idx);
+ tail = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_TAIL, rbdr_idx);
+
+ qcount &= 0x7FFFF;
+
+ rb_cnt = qs->rbdr_len - qcount - 1;
+
+ debug("%s: %d: qcount: %lu, head: %lx, tail: %lx, rb_cnt: %lu\n",
+ __func__, __LINE__, qcount, head, tail, rb_cnt);
+
+ /* Notify HW */
+ nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR, rbdr_idx, rb_cnt);
+
+ asm volatile ("dsb sy");
+}
+
+/* TBD: how to handle full packets received in CQ
+ * i.e conversion of buffers into SKBs
+ */
+static int nicvf_init_cmp_queue(struct nicvf *nic,
+ struct cmp_queue *cq, int q_len)
+{
+ if (nicvf_alloc_q_desc_mem(nic, &cq->dmem, q_len,
+ CMP_QUEUE_DESC_SIZE,
+ NICVF_CQ_BASE_ALIGN_BYTES)) {
+ printf("Unable to allocate memory for completion queue\n");
+ return -1;
+ }
+ cq->desc = cq->dmem.base;
+ if (!pass1_silicon(nic->rev_id, nic->nicpf->hw->model_id))
+ cq->thresh = CMP_QUEUE_CQE_THRESH;
+ else
+ cq->thresh = 0;
+ cq->intr_timer_thresh = CMP_QUEUE_TIMER_THRESH;
+
+ return 0;
+}
+
+static void nicvf_free_cmp_queue(struct nicvf *nic, struct cmp_queue *cq)
+{
+ if (!cq)
+ return;
+ if (!cq->dmem.base)
+ return;
+
+ nicvf_free_q_desc_mem(nic, &cq->dmem);
+}
+
+static int nicvf_init_snd_queue(struct nicvf *nic,
+ struct snd_queue *sq, int q_len)
+{
+ if (nicvf_alloc_q_desc_mem(nic, &sq->dmem, q_len,
+ SND_QUEUE_DESC_SIZE,
+ NICVF_SQ_BASE_ALIGN_BYTES)) {
+ printf("Unable to allocate memory for send queue\n");
+ return -1;
+ }
+
+ sq->desc = sq->dmem.base;
+ sq->skbuff = calloc(q_len, sizeof(u64));
+ sq->head = 0;
+ sq->tail = 0;
+ sq->free_cnt = q_len - 1;
+ sq->thresh = SND_QUEUE_THRESH;
+
+ return 0;
+}
+
+static void nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq)
+{
+ if (!sq)
+ return;
+ if (!sq->dmem.base)
+ return;
+
+ debug("%s: %d\n", __func__, __LINE__);
+ free(sq->skbuff);
+
+ nicvf_free_q_desc_mem(nic, &sq->dmem);
+}
+
+static void nicvf_reclaim_snd_queue(struct nicvf *nic,
+ struct queue_set *qs, int qidx)
+{
+ /* Disable send queue */
+ nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, 0);
+ /* Check if SQ is stopped */
+ if (nicvf_poll_reg(nic, qidx, NIC_QSET_SQ_0_7_STATUS, 21, 1, 0x01))
+ return;
+ /* Reset send queue */
+ nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET);
+}
+
+static void nicvf_reclaim_rcv_queue(struct nicvf *nic,
+ struct queue_set *qs, int qidx)
+{
+ union nic_mbx mbx = {};
+
+ /* Make sure all packets in the pipeline are written back into mem */
+ mbx.msg.msg = NIC_MBOX_MSG_RQ_SW_SYNC;
+ nicvf_send_msg_to_pf(nic, &mbx);
+}
+
+static void nicvf_reclaim_cmp_queue(struct nicvf *nic,
+ struct queue_set *qs, int qidx)
+{
+ /* Disable timer threshold (doesn't get reset upon CQ reset */
+ nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, qidx, 0);
+ /* Disable completion queue */
+ nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, 0);
+ /* Reset completion queue */
+ nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET);
+}
+
+static void nicvf_reclaim_rbdr(struct nicvf *nic,
+ struct rbdr *rbdr, int qidx)
+{
+ u64 tmp, fifo_state;
+ int timeout = 10;
+
+ /* Save head and tail pointers for feeing up buffers */
+ rbdr->head = nicvf_queue_reg_read(nic,
+ NIC_QSET_RBDR_0_1_HEAD,
+ qidx) >> 3;
+ rbdr->tail = nicvf_queue_reg_read(nic,
+ NIC_QSET_RBDR_0_1_TAIL,
+ qidx) >> 3;
+
+ /* If RBDR FIFO is in 'FAIL' state then do a reset first
+ * before relaiming.
+ */
+ fifo_state = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, qidx);
+ if (((fifo_state >> 62) & 0x03) == 0x3)
+ nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG,
+ qidx, NICVF_RBDR_RESET);
+
+ /* Disable RBDR */
+ nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0);
+ if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00))
+ return;
+ while (1) {
+ tmp = nicvf_queue_reg_read(nic,
+ NIC_QSET_RBDR_0_1_PREFETCH_STATUS,
+ qidx);
+ if ((tmp & 0xFFFFFFFF) == ((tmp >> 32) & 0xFFFFFFFF))
+ break;
+ mdelay(2000);
+ timeout--;
+ if (!timeout) {
+ printf("Failed polling on prefetch status\n");
+ return;
+ }
+ }
+ nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG,
+ qidx, NICVF_RBDR_RESET);
+
+ if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x02))
+ return;
+ nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0x00);
+ if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00))
+ return;
+}
+
+/* Configures receive queue */
+static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs,
+ int qidx, bool enable)
+{
+ union nic_mbx mbx = {};
+ struct rcv_queue *rq;
+ union {
+ struct rq_cfg s;
+ u64 u;
+ } rq_cfg;
+
+ rq = &qs->rq[qidx];
+ rq->enable = enable;
+
+ /* Disable receive queue */
+ nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, 0);
+
+ if (!rq->enable) {
+ nicvf_reclaim_rcv_queue(nic, qs, qidx);
+ return;
+ }
+
+ rq->cq_qs = qs->vnic_id;
+ rq->cq_idx = qidx;
+ rq->start_rbdr_qs = qs->vnic_id;
+ rq->start_qs_rbdr_idx = qs->rbdr_cnt - 1;
+ rq->cont_rbdr_qs = qs->vnic_id;
+ rq->cont_qs_rbdr_idx = qs->rbdr_cnt - 1;
+ /* all writes of RBDR data to be loaded into L2 Cache as well*/
+ rq->caching = 1;
+
+ /* Send a mailbox msg to PF to config RQ */
+ mbx.rq.msg = NIC_MBOX_MSG_RQ_CFG;
+ mbx.rq.qs_num = qs->vnic_id;
+ mbx.rq.rq_num = qidx;
+ mbx.rq.cfg = (rq->caching << 26) | (rq->cq_qs << 19) |
+ (rq->cq_idx << 16) | (rq->cont_rbdr_qs << 9) |
+ (rq->cont_qs_rbdr_idx << 8) |
+ (rq->start_rbdr_qs << 1) | (rq->start_qs_rbdr_idx);
+ nicvf_send_msg_to_pf(nic, &mbx);
+
+ mbx.rq.msg = NIC_MBOX_MSG_RQ_BP_CFG;
+ mbx.rq.cfg = (1ULL << 63) | (1ULL << 62) | (qs->vnic_id << 0);
+ nicvf_send_msg_to_pf(nic, &mbx);
+
+ /* RQ drop config
+ * Enable CQ drop to reserve sufficient CQEs for all tx packets
+ */
+ mbx.rq.msg = NIC_MBOX_MSG_RQ_DROP_CFG;
+ mbx.rq.cfg = (1ULL << 62) | (RQ_CQ_DROP << 8);
+ nicvf_send_msg_to_pf(nic, &mbx);
+ nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, 0, 0x00);
+
+ /* Enable Receive queue */
+ rq_cfg.s.ena = 1;
+ rq_cfg.s.tcp_ena = 0;
+ nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, rq_cfg.u);
+}
+
+void nicvf_cmp_queue_config(struct nicvf *nic, struct queue_set *qs,
+ int qidx, bool enable)
+{
+ struct cmp_queue *cq;
+ union {
+ u64 u;
+ struct cq_cfg s;
+ } cq_cfg;
+
+ cq = &qs->cq[qidx];
+ cq->enable = enable;
+
+ if (!cq->enable) {
+ nicvf_reclaim_cmp_queue(nic, qs, qidx);
+ return;
+ }
+
+ /* Reset completion queue */
+ nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET);
+
+ if (!cq->enable)
+ return;
+
+ /* Set completion queue base address */
+ nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_BASE,
+ qidx, (u64)(cq->dmem.phys_base));
+
+ /* Enable Completion queue */
+ cq_cfg.s.ena = 1;
+ cq_cfg.s.reset = 0;
+ cq_cfg.s.caching = 0;
+ cq_cfg.s.qsize = CMP_QSIZE;
+ cq_cfg.s.avg_con = 0;
+ nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, cq_cfg.u);
+
+ /* Set threshold value for interrupt generation */
+ nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_THRESH, qidx, cq->thresh);
+ nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, qidx,
+ cq->intr_timer_thresh);
+}
+
+/* Configures transmit queue */
+static void nicvf_snd_queue_config(struct nicvf *nic, struct queue_set *qs,
+ int qidx, bool enable)
+{
+ union nic_mbx mbx = {};
+ struct snd_queue *sq;
+
+ union {
+ struct sq_cfg s;
+ u64 u;
+ } sq_cfg;
+
+ sq = &qs->sq[qidx];
+ sq->enable = enable;
+
+ if (!sq->enable) {
+ nicvf_reclaim_snd_queue(nic, qs, qidx);
+ return;
+ }
+
+ /* Reset send queue */
+ nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET);
+
+ sq->cq_qs = qs->vnic_id;
+ sq->cq_idx = qidx;
+
+ /* Send a mailbox msg to PF to config SQ */
+ mbx.sq.msg = NIC_MBOX_MSG_SQ_CFG;
+ mbx.sq.qs_num = qs->vnic_id;
+ mbx.sq.sq_num = qidx;
+ mbx.sq.sqs_mode = nic->sqs_mode;
+ mbx.sq.cfg = (sq->cq_qs << 3) | sq->cq_idx;
+ nicvf_send_msg_to_pf(nic, &mbx);
+
+ /* Set queue base address */
+ nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_BASE,
+ qidx, (u64)(sq->dmem.phys_base));
+
+ /* Enable send queue & set queue size */
+ sq_cfg.s.ena = 1;
+ sq_cfg.s.reset = 0;
+ sq_cfg.s.ldwb = 0;
+ sq_cfg.s.qsize = SND_QSIZE;
+ sq_cfg.s.tstmp_bgx_intf = 0;
+ nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg.u);
+
+ /* Set threshold value for interrupt generation */
+ nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_THRESH, qidx, sq->thresh);
+}
+
+/* Configures receive buffer descriptor ring */
+static void nicvf_rbdr_config(struct nicvf *nic, struct queue_set *qs,
+ int qidx, bool enable)
+{
+ struct rbdr *rbdr;
+ union {
+ struct rbdr_cfg s;
+ u64 u;
+ } rbdr_cfg;
+
+ rbdr = &qs->rbdr[qidx];
+ nicvf_reclaim_rbdr(nic, rbdr, qidx);
+ if (!enable)
+ return;
+
+ /* Set descriptor base address */
+ nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_BASE,
+ qidx, (u64)(rbdr->dmem.phys_base));
+
+ /* Enable RBDR & set queue size */
+ /* Buffer size should be in multiples of 128 bytes */
+ rbdr_cfg.s.ena = 1;
+ rbdr_cfg.s.reset = 0;
+ rbdr_cfg.s.ldwb = 0;
+ rbdr_cfg.s.qsize = RBDR_SIZE;
+ rbdr_cfg.s.avg_con = 0;
+ rbdr_cfg.s.lines = rbdr->dma_size / 128;
+ nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG,
+ qidx, rbdr_cfg.u);
+
+ /* Notify HW */
+ nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR,
+ qidx, qs->rbdr_len - 1);
+
+ /* Set threshold value for interrupt generation */
+ nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_THRESH,
+ qidx, rbdr->thresh - 1);
+}
+
+/* Requests PF to assign and enable Qset */
+void nicvf_qset_config(struct nicvf *nic, bool enable)
+{
+ union nic_mbx mbx = {};
+ struct queue_set *qs = nic->qs;
+ struct qs_cfg *qs_cfg;
+
+ if (!qs) {
+ printf("Qset is still not allocated, don't init queues\n");
+ return;
+ }
+
+ qs->enable = enable;
+ qs->vnic_id = nic->vf_id;
+
+ /* Send a mailbox msg to PF to config Qset */
+ mbx.qs.msg = NIC_MBOX_MSG_QS_CFG;
+ mbx.qs.num = qs->vnic_id;
+#ifdef VNIC_MULTI_QSET_SUPPORT
+ mbx.qs.sqs_count = nic->sqs_count;
+#endif
+
+ mbx.qs.cfg = 0;
+ qs_cfg = (struct qs_cfg *)&mbx.qs.cfg;
+ if (qs->enable) {
+ qs_cfg->ena = 1;
+#ifdef __BIG_ENDIAN
+ qs_cfg->be = 1;
+#endif
+ qs_cfg->vnic = qs->vnic_id;
+ }
+ nicvf_send_msg_to_pf(nic, &mbx);
+}
+
+static void nicvf_free_resources(struct nicvf *nic)
+{
+ int qidx;
+ struct queue_set *qs = nic->qs;
+
+ /* Free receive buffer descriptor ring */
+ for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
+ nicvf_free_rbdr(nic, &qs->rbdr[qidx]);
+
+ /* Free completion queue */
+ for (qidx = 0; qidx < qs->cq_cnt; qidx++)
+ nicvf_free_cmp_queue(nic, &qs->cq[qidx]);
+
+ /* Free send queue */
+ for (qidx = 0; qidx < qs->sq_cnt; qidx++)
+ nicvf_free_snd_queue(nic, &qs->sq[qidx]);
+}
+
+static int nicvf_alloc_resources(struct nicvf *nic)
+{
+ int qidx;
+ struct queue_set *qs = nic->qs;
+
+ /* Alloc receive buffer descriptor ring */
+ for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) {
+ if (nicvf_init_rbdr(nic, &qs->rbdr[qidx], qs->rbdr_len,
+ DMA_BUFFER_LEN))
+ goto alloc_fail;
+ }
+
+ /* Alloc send queue */
+ for (qidx = 0; qidx < qs->sq_cnt; qidx++) {
+ if (nicvf_init_snd_queue(nic, &qs->sq[qidx], qs->sq_len))
+ goto alloc_fail;
+ }
+
+ /* Alloc completion queue */
+ for (qidx = 0; qidx < qs->cq_cnt; qidx++) {
+ if (nicvf_init_cmp_queue(nic, &qs->cq[qidx], qs->cq_len))
+ goto alloc_fail;
+ }
+
+ return 0;
+alloc_fail:
+ nicvf_free_resources(nic);
+ return -1;
+}
+
+int nicvf_set_qset_resources(struct nicvf *nic)
+{
+ struct queue_set *qs;
+
+ qs = calloc(1, sizeof(struct queue_set));
+ if (!qs)
+ return -1;
+ nic->qs = qs;
+
+ /* Set count of each queue */
+ qs->rbdr_cnt = RBDR_CNT;
+ qs->rq_cnt = 1;
+ qs->sq_cnt = SND_QUEUE_CNT;
+ qs->cq_cnt = CMP_QUEUE_CNT;
+
+ /* Set queue lengths */
+ qs->rbdr_len = RCV_BUF_COUNT;
+ qs->sq_len = SND_QUEUE_LEN;
+ qs->cq_len = CMP_QUEUE_LEN;
+
+ nic->rx_queues = qs->rq_cnt;
+ nic->tx_queues = qs->sq_cnt;
+
+ return 0;
+}
+
+int nicvf_config_data_transfer(struct nicvf *nic, bool enable)
+{
+ bool disable = false;
+ struct queue_set *qs = nic->qs;
+ int qidx;
+
+ if (!qs)
+ return 0;
+
+ if (enable) {
+ if (nicvf_alloc_resources(nic))
+ return -1;
+
+ for (qidx = 0; qidx < qs->sq_cnt; qidx++)
+ nicvf_snd_queue_config(nic, qs, qidx, enable);
+ for (qidx = 0; qidx < qs->cq_cnt; qidx++)
+ nicvf_cmp_queue_config(nic, qs, qidx, enable);
+ for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
+ nicvf_rbdr_config(nic, qs, qidx, enable);
+ for (qidx = 0; qidx < qs->rq_cnt; qidx++)
+ nicvf_rcv_queue_config(nic, qs, qidx, enable);
+ } else {
+ for (qidx = 0; qidx < qs->rq_cnt; qidx++)
+ nicvf_rcv_queue_config(nic, qs, qidx, disable);
+ for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
+ nicvf_rbdr_config(nic, qs, qidx, disable);
+ for (qidx = 0; qidx < qs->sq_cnt; qidx++)
+ nicvf_snd_queue_config(nic, qs, qidx, disable);
+ for (qidx = 0; qidx < qs->cq_cnt; qidx++)
+ nicvf_cmp_queue_config(nic, qs, qidx, disable);
+
+ nicvf_free_resources(nic);
+ }
+
+ return 0;
+}
+
+/* Get a free desc from SQ
+ * returns descriptor ponter & descriptor number
+ */
+static int nicvf_get_sq_desc(struct snd_queue *sq, int desc_cnt)
+{
+ int qentry;
+
+ qentry = sq->tail;
+ sq->free_cnt -= desc_cnt;
+ sq->tail += desc_cnt;
+ sq->tail &= (sq->dmem.q_len - 1);
+
+ return qentry;
+}
+
+/* Free descriptor back to SQ for future use */
+void nicvf_put_sq_desc(struct snd_queue *sq, int desc_cnt)
+{
+ sq->free_cnt += desc_cnt;
+ sq->head += desc_cnt;
+ sq->head &= (sq->dmem.q_len - 1);
+}
+
+static int nicvf_get_nxt_sqentry(struct snd_queue *sq, int qentry)
+{
+ qentry++;
+ qentry &= (sq->dmem.q_len - 1);
+ return qentry;
+}
+
+void nicvf_sq_enable(struct nicvf *nic, struct snd_queue *sq, int qidx)
+{
+ u64 sq_cfg;
+
+ sq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx);
+ sq_cfg |= NICVF_SQ_EN;
+ nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg);
+ /* Ring doorbell so that H/W restarts processing SQEs */
+ nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR, qidx, 0);
+}
+
+void nicvf_sq_disable(struct nicvf *nic, int qidx)
+{
+ u64 sq_cfg;
+
+ sq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx);
+ sq_cfg &= ~NICVF_SQ_EN;
+ nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg);
+}
+
+void nicvf_sq_free_used_descs(struct udevice *dev, struct snd_queue *sq,
+ int qidx)
+{
+ u64 head;
+ struct nicvf *nic = dev_get_priv(dev);
+ struct sq_hdr_subdesc *hdr;
+
+ head = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_HEAD, qidx) >> 4;
+
+ while (sq->head != head) {
+ hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, sq->head);
+ if (hdr->subdesc_type != SQ_DESC_TYPE_HEADER) {
+ nicvf_put_sq_desc(sq, 1);
+ continue;
+ }
+ nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1);
+ }
+}
+
+/* Get the number of SQ descriptors needed to xmit this skb */
+static int nicvf_sq_subdesc_required(struct nicvf *nic)
+{
+ int subdesc_cnt = MIN_SQ_DESC_PER_PKT_XMIT;
+
+ return subdesc_cnt;
+}
+
+/* Add SQ HEADER subdescriptor.
+ * First subdescriptor for every send descriptor.
+ */
+static inline void
+nicvf_sq_add_hdr_subdesc(struct nicvf *nic, struct snd_queue *sq, int qentry,
+ int subdesc_cnt, void *pkt, size_t pkt_len)
+{
+ struct sq_hdr_subdesc *hdr;
+
+ hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, qentry);
+ sq->skbuff[qentry] = (uintptr_t)pkt;
+
+ memset(hdr, 0, SND_QUEUE_DESC_SIZE);
+ hdr->subdesc_type = SQ_DESC_TYPE_HEADER;
+ /* Enable notification via CQE after processing SQE */
+ hdr->post_cqe = 1;
+ /* No of subdescriptors following this */
+ hdr->subdesc_cnt = subdesc_cnt;
+ hdr->tot_len = pkt_len;
+
+ flush_dcache_range((uintptr_t)hdr,
+ (uintptr_t)hdr + sizeof(struct sq_hdr_subdesc));
+}
+
+/* SQ GATHER subdescriptor
+ * Must follow HDR descriptor
+ */
+static inline void nicvf_sq_add_gather_subdesc(struct snd_queue *sq, int qentry,
+ size_t size, uintptr_t data)
+{
+ struct sq_gather_subdesc *gather;
+
+ qentry &= (sq->dmem.q_len - 1);
+ gather = (struct sq_gather_subdesc *)GET_SQ_DESC(sq, qentry);
+
+ memset(gather, 0, SND_QUEUE_DESC_SIZE);
+ gather->subdesc_type = SQ_DESC_TYPE_GATHER;
+ gather->ld_type = NIC_SEND_LD_TYPE_E_LDD;
+ gather->size = size;
+ gather->addr = data;
+
+ flush_dcache_range((uintptr_t)gather,
+ (uintptr_t)gather + sizeof(struct sq_hdr_subdesc));
+}
+
+/* Append an skb to a SQ for packet transfer. */
+int nicvf_sq_append_pkt(struct nicvf *nic, void *pkt, size_t pkt_size)
+{
+ int subdesc_cnt;
+ int sq_num = 0, qentry;
+ struct queue_set *qs;
+ struct snd_queue *sq;
+
+ qs = nic->qs;
+ sq = &qs->sq[sq_num];
+
+ subdesc_cnt = nicvf_sq_subdesc_required(nic);
+ if (subdesc_cnt > sq->free_cnt)
+ goto append_fail;
+
+ qentry = nicvf_get_sq_desc(sq, subdesc_cnt);
+
+ /* Add SQ header subdesc */
+ nicvf_sq_add_hdr_subdesc(nic, sq, qentry, subdesc_cnt - 1,
+ pkt, pkt_size);
+
+ /* Add SQ gather subdescs */
+ qentry = nicvf_get_nxt_sqentry(sq, qentry);
+ nicvf_sq_add_gather_subdesc(sq, qentry, pkt_size, (uintptr_t)(pkt));
+
+ flush_dcache_range((uintptr_t)pkt,
+ (uintptr_t)pkt + pkt_size);
+
+ /* make sure all memory stores are done before ringing doorbell */
+ asm volatile ("dsb sy");
+
+ /* Inform HW to xmit new packet */
+ nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR,
+ sq_num, subdesc_cnt);
+ return 1;
+
+append_fail:
+ printf("Not enough SQ descriptors to xmit pkt\n");
+ return 0;
+}
+
+static unsigned int frag_num(unsigned int i)
+{
+#ifdef __BIG_ENDIAN
+ return (i & ~3) + 3 - (i & 3);
+#else
+ return i;
+#endif
+}
+
+void *nicvf_get_rcv_pkt(struct nicvf *nic, void *cq_desc, size_t *pkt_len)
+{
+ int frag;
+ int payload_len = 0, tot_len;
+ void *pkt = NULL, *pkt_buf = NULL, *buffer;
+ struct cqe_rx_t *cqe_rx;
+ struct rbdr *rbdr;
+ struct rcv_queue *rq;
+ struct queue_set *qs = nic->qs;
+ u16 *rb_lens = NULL;
+ u64 *rb_ptrs = NULL;
+
+ cqe_rx = (struct cqe_rx_t *)cq_desc;
+
+ rq = &qs->rq[cqe_rx->rq_idx];
+ rbdr = &qs->rbdr[rq->start_qs_rbdr_idx];
+ rb_lens = cq_desc + (3 * sizeof(u64)); /* Use offsetof */
+ /* Except 88xx pass1 on all other chips CQE_RX2_S is added to
+ * CQE_RX at word6, hence buffer pointers move by word
+ *
+ * Use existing 'hw_tso' flag which will be set for all chips
+ * except 88xx pass1 instead of a additional cache line
+ * access (or miss) by using pci dev's revision.
+ */
+ if (!nic->hw_tso)
+ rb_ptrs = (void *)cqe_rx + (6 * sizeof(u64));
+ else
+ rb_ptrs = (void *)cqe_rx + (7 * sizeof(u64));
+
+ /*
+ * Figure out packet length to create packet buffer
+ */
+ for (frag = 0; frag < cqe_rx->rb_cnt; frag++)
+ payload_len += rb_lens[frag_num(frag)];
+ *pkt_len = payload_len;
+ /* round up size to 8 byte multiple */
+ tot_len = (payload_len & (~0x7)) + 8;
+ buffer = calloc(1, tot_len);
+ if (!buffer) {
+ printf("%s - Failed to allocate packet buffer\n", __func__);
+ return NULL;
+ }
+ pkt_buf = buffer;
+ debug("total pkt buf %p len %ld tot_len %d\n", pkt_buf, *pkt_len,
+ tot_len);
+ for (frag = 0; frag < cqe_rx->rb_cnt; frag++) {
+ payload_len = rb_lens[frag_num(frag)];
+
+ invalidate_dcache_range((uintptr_t)(*rb_ptrs),
+ (uintptr_t)(*rb_ptrs) + rbdr->dma_size);
+
+ /* First fragment */
+ *rb_ptrs = *rb_ptrs - cqe_rx->align_pad;
+
+ pkt = nicvf_rb_ptr_to_pkt(nic, *rb_ptrs);
+
+ invalidate_dcache_range((uintptr_t)pkt,
+ (uintptr_t)pkt + payload_len);
+
+ if (cqe_rx->align_pad)
+ pkt += cqe_rx->align_pad;
+ debug("pkt_buf %p, pkt %p payload_len %d\n", pkt_buf, pkt,
+ payload_len);
+ memcpy(buffer, pkt, payload_len);
+ buffer += payload_len;
+ /* Next buffer pointer */
+ rb_ptrs++;
+ }
+ return pkt_buf;
+}
+
+/* Clear interrupt */
+void nicvf_clear_intr(struct nicvf *nic, int int_type, int q_idx)
+{
+ u64 reg_val = 0;
+
+ switch (int_type) {
+ case NICVF_INTR_CQ:
+ reg_val = ((1ULL << q_idx) << NICVF_INTR_CQ_SHIFT);
+ break;
+ case NICVF_INTR_SQ:
+ reg_val = ((1ULL << q_idx) << NICVF_INTR_SQ_SHIFT);
+ break;
+ case NICVF_INTR_RBDR:
+ reg_val = ((1ULL << q_idx) << NICVF_INTR_RBDR_SHIFT);
+ break;
+ case NICVF_INTR_PKT_DROP:
+ reg_val = (1ULL << NICVF_INTR_PKT_DROP_SHIFT);
+ break;
+ case NICVF_INTR_TCP_TIMER:
+ reg_val = (1ULL << NICVF_INTR_TCP_TIMER_SHIFT);
+ break;
+ case NICVF_INTR_MBOX:
+ reg_val = (1ULL << NICVF_INTR_MBOX_SHIFT);
+ break;
+ case NICVF_INTR_QS_ERR:
+ reg_val |= (1ULL << NICVF_INTR_QS_ERR_SHIFT);
+ break;
+ default:
+ printf("Failed to clear interrupt: unknown type\n");
+ break;
+ }
+
+ nicvf_reg_write(nic, NIC_VF_INT, reg_val);
+}
+
+void nicvf_update_rq_stats(struct nicvf *nic, int rq_idx)
+{
+ struct rcv_queue *rq;
+
+#define GET_RQ_STATS(reg) \
+ nicvf_reg_read(nic, NIC_QSET_RQ_0_7_STAT_0_1 |\
+ (rq_idx << NIC_Q_NUM_SHIFT) | ((reg) << 3))
+
+ rq = &nic->qs->rq[rq_idx];
+ rq->stats.bytes = GET_RQ_STATS(RQ_SQ_STATS_OCTS);
+ rq->stats.pkts = GET_RQ_STATS(RQ_SQ_STATS_PKTS);
+}
+
+void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx)
+{
+ struct snd_queue *sq;
+
+#define GET_SQ_STATS(reg) \
+ nicvf_reg_read(nic, NIC_QSET_SQ_0_7_STAT_0_1 |\
+ (sq_idx << NIC_Q_NUM_SHIFT) | ((reg) << 3))
+
+ sq = &nic->qs->sq[sq_idx];
+ sq->stats.bytes = GET_SQ_STATS(RQ_SQ_STATS_OCTS);
+ sq->stats.pkts = GET_SQ_STATS(RQ_SQ_STATS_PKTS);
+}
+
+/* Check for errors in the receive cmp.queue entry */
+int nicvf_check_cqe_rx_errs(struct nicvf *nic,
+ struct cmp_queue *cq, void *cq_desc)
+{
+ struct cqe_rx_t *cqe_rx;
+ struct cmp_queue_stats *stats = &cq->stats;
+
+ cqe_rx = (struct cqe_rx_t *)cq_desc;
+ if (!cqe_rx->err_level && !cqe_rx->err_opcode) {
+ stats->rx.errop.good++;
+ return 0;
+ }
+
+ switch (cqe_rx->err_level) {
+ case CQ_ERRLVL_MAC:
+ stats->rx.errlvl.mac_errs++;
+ break;
+ case CQ_ERRLVL_L2:
+ stats->rx.errlvl.l2_errs++;
+ break;
+ case CQ_ERRLVL_L3:
+ stats->rx.errlvl.l3_errs++;
+ break;
+ case CQ_ERRLVL_L4:
+ stats->rx.errlvl.l4_errs++;
+ break;
+ }
+
+ switch (cqe_rx->err_opcode) {
+ case CQ_RX_ERROP_RE_PARTIAL:
+ stats->rx.errop.partial_pkts++;
+ break;
+ case CQ_RX_ERROP_RE_JABBER:
+ stats->rx.errop.jabber_errs++;
+ break;
+ case CQ_RX_ERROP_RE_FCS:
+ stats->rx.errop.fcs_errs++;
+ break;
+ case CQ_RX_ERROP_RE_TERMINATE:
+ stats->rx.errop.terminate_errs++;
+ break;
+ case CQ_RX_ERROP_RE_RX_CTL:
+ stats->rx.errop.bgx_rx_errs++;
+ break;
+ case CQ_RX_ERROP_PREL2_ERR:
+ stats->rx.errop.prel2_errs++;
+ break;
+ case CQ_RX_ERROP_L2_FRAGMENT:
+ stats->rx.errop.l2_frags++;
+ break;
+ case CQ_RX_ERROP_L2_OVERRUN:
+ stats->rx.errop.l2_overruns++;
+ break;
+ case CQ_RX_ERROP_L2_PFCS:
+ stats->rx.errop.l2_pfcs++;
+ break;
+ case CQ_RX_ERROP_L2_PUNY:
+ stats->rx.errop.l2_puny++;
+ break;
+ case CQ_RX_ERROP_L2_MAL:
+ stats->rx.errop.l2_hdr_malformed++;
+ break;
+ case CQ_RX_ERROP_L2_OVERSIZE:
+ stats->rx.errop.l2_oversize++;
+ break;
+ case CQ_RX_ERROP_L2_UNDERSIZE:
+ stats->rx.errop.l2_undersize++;
+ break;
+ case CQ_RX_ERROP_L2_LENMISM:
+ stats->rx.errop.l2_len_mismatch++;
+ break;
+ case CQ_RX_ERROP_L2_PCLP:
+ stats->rx.errop.l2_pclp++;
+ break;
+ case CQ_RX_ERROP_IP_NOT:
+ stats->rx.errop.non_ip++;
+ break;
+ case CQ_RX_ERROP_IP_CSUM_ERR:
+ stats->rx.errop.ip_csum_err++;
+ break;
+ case CQ_RX_ERROP_IP_MAL:
+ stats->rx.errop.ip_hdr_malformed++;
+ break;
+ case CQ_RX_ERROP_IP_MALD:
+ stats->rx.errop.ip_payload_malformed++;
+ break;
+ case CQ_RX_ERROP_IP_HOP:
+ stats->rx.errop.ip_hop_errs++;
+ break;
+ case CQ_RX_ERROP_L3_ICRC:
+ stats->rx.errop.l3_icrc_errs++;
+ break;
+ case CQ_RX_ERROP_L3_PCLP:
+ stats->rx.errop.l3_pclp++;
+ break;
+ case CQ_RX_ERROP_L4_MAL:
+ stats->rx.errop.l4_malformed++;
+ break;
+ case CQ_RX_ERROP_L4_CHK:
+ stats->rx.errop.l4_csum_errs++;
+ break;
+ case CQ_RX_ERROP_UDP_LEN:
+ stats->rx.errop.udp_len_err++;
+ break;
+ case CQ_RX_ERROP_L4_PORT:
+ stats->rx.errop.bad_l4_port++;
+ break;
+ case CQ_RX_ERROP_TCP_FLAG:
+ stats->rx.errop.bad_tcp_flag++;
+ break;
+ case CQ_RX_ERROP_TCP_OFFSET:
+ stats->rx.errop.tcp_offset_errs++;
+ break;
+ case CQ_RX_ERROP_L4_PCLP:
+ stats->rx.errop.l4_pclp++;
+ break;
+ case CQ_RX_ERROP_RBDR_TRUNC:
+ stats->rx.errop.pkt_truncated++;
+ break;
+ }
+
+ return 1;
+}
+
+/* Check for errors in the send cmp.queue entry */
+int nicvf_check_cqe_tx_errs(struct nicvf *nic,
+ struct cmp_queue *cq, void *cq_desc)
+{
+ struct cqe_send_t *cqe_tx;
+ struct cmp_queue_stats *stats = &cq->stats;
+
+ cqe_tx = (struct cqe_send_t *)cq_desc;
+ switch (cqe_tx->send_status) {
+ case CQ_TX_ERROP_GOOD:
+ stats->tx.good++;
+ return 0;
+ break;
+ case CQ_TX_ERROP_DESC_FAULT:
+ stats->tx.desc_fault++;
+ break;
+ case CQ_TX_ERROP_HDR_CONS_ERR:
+ stats->tx.hdr_cons_err++;
+ break;
+ case CQ_TX_ERROP_SUBDC_ERR:
+ stats->tx.subdesc_err++;
+ break;
+ case CQ_TX_ERROP_IMM_SIZE_OFLOW:
+ stats->tx.imm_size_oflow++;
+ break;
+ case CQ_TX_ERROP_DATA_SEQUENCE_ERR:
+ stats->tx.data_seq_err++;
+ break;
+ case CQ_TX_ERROP_MEM_SEQUENCE_ERR:
+ stats->tx.mem_seq_err++;
+ break;
+ case CQ_TX_ERROP_LOCK_VIOL:
+ stats->tx.lock_viol++;
+ break;
+ case CQ_TX_ERROP_DATA_FAULT:
+ stats->tx.data_fault++;
+ break;
+ case CQ_TX_ERROP_TSTMP_CONFLICT:
+ stats->tx.tstmp_conflict++;
+ break;
+ case CQ_TX_ERROP_TSTMP_TIMEOUT:
+ stats->tx.tstmp_timeout++;
+ break;
+ case CQ_TX_ERROP_MEM_FAULT:
+ stats->tx.mem_fault++;
+ break;
+ case CQ_TX_ERROP_CK_OVERLAP:
+ stats->tx.csum_overlap++;
+ break;
+ case CQ_TX_ERROP_CK_OFLOW:
+ stats->tx.csum_overflow++;
+ break;
+ }
+
+ return 1;
+}
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ */
+
+#ifndef NICVF_QUEUES_H
+#define NICVF_QUEUES_H
+
+#include "q_struct.h"
+
+#define MAX_QUEUE_SET 128
+#define MAX_RCV_QUEUES_PER_QS 8
+#define MAX_RCV_BUF_DESC_RINGS_PER_QS 2
+#define MAX_SND_QUEUES_PER_QS 8
+#define MAX_CMP_QUEUES_PER_QS 8
+
+/* VF's queue interrupt ranges */
+#define NICVF_INTR_ID_CQ 0
+#define NICVF_INTR_ID_SQ 8
+#define NICVF_INTR_ID_RBDR 16
+#define NICVF_INTR_ID_MISC 18
+#define NICVF_INTR_ID_QS_ERR 19
+
+#define RBDR_SIZE0 0ULL /* 8K entries */
+#define RBDR_SIZE1 1ULL /* 16K entries */
+#define RBDR_SIZE2 2ULL /* 32K entries */
+#define RBDR_SIZE3 3ULL /* 64K entries */
+#define RBDR_SIZE4 4ULL /* 126K entries */
+#define RBDR_SIZE5 5ULL /* 256K entries */
+#define RBDR_SIZE6 6ULL /* 512K entries */
+
+#define SND_QUEUE_SIZE0 0ULL /* 1K entries */
+#define SND_QUEUE_SIZE1 1ULL /* 2K entries */
+#define SND_QUEUE_SIZE2 2ULL /* 4K entries */
+#define SND_QUEUE_SIZE3 3ULL /* 8K entries */
+#define SND_QUEUE_SIZE4 4ULL /* 16K entries */
+#define SND_QUEUE_SIZE5 5ULL /* 32K entries */
+#define SND_QUEUE_SIZE6 6ULL /* 64K entries */
+
+#define CMP_QUEUE_SIZE0 0ULL /* 1K entries */
+#define CMP_QUEUE_SIZE1 1ULL /* 2K entries */
+#define CMP_QUEUE_SIZE2 2ULL /* 4K entries */
+#define CMP_QUEUE_SIZE3 3ULL /* 8K entries */
+#define CMP_QUEUE_SIZE4 4ULL /* 16K entries */
+#define CMP_QUEUE_SIZE5 5ULL /* 32K entries */
+#define CMP_QUEUE_SIZE6 6ULL /* 64K entries */
+
+/* Default queue count per QS, its lengths and threshold values */
+#define RBDR_CNT 1
+#define RCV_QUEUE_CNT 1
+#define SND_QUEUE_CNT 1
+#define CMP_QUEUE_CNT 1 /* Max of RCV and SND qcount */
+
+#define SND_QSIZE SND_QUEUE_SIZE0
+#define SND_QUEUE_LEN BIT_ULL((SND_QSIZE + 10))
+#define SND_QUEUE_THRESH 2ULL
+#define MIN_SQ_DESC_PER_PKT_XMIT 2
+#define MAX_CQE_PER_PKT_XMIT 2
+
+#define CMP_QSIZE CMP_QUEUE_SIZE0
+#define CMP_QUEUE_LEN BIT_ULL((CMP_QSIZE + 10))
+#define CMP_QUEUE_CQE_THRESH 0
+#define CMP_QUEUE_TIMER_THRESH 1 /* 1 ms */
+
+#define RBDR_SIZE RBDR_SIZE0
+#define RCV_BUF_COUNT BIT_ULL((RBDR_SIZE + 13))
+#define RBDR_THRESH (RCV_BUF_COUNT / 2)
+#define DMA_BUFFER_LEN 2048 /* In multiples of 128bytes */
+#define RCV_FRAG_LEN DMA_BUFFER_LEN
+
+#define MAX_CQES_FOR_TX ((SND_QUEUE_LEN / MIN_SQ_DESC_PER_PKT_XMIT) *\
+ MAX_CQE_PER_PKT_XMIT)
+#define RQ_CQ_DROP ((CMP_QUEUE_LEN - MAX_CQES_FOR_TX) / 256)
+
+/* Descriptor size */
+#define SND_QUEUE_DESC_SIZE 16 /* 128 bits */
+#define CMP_QUEUE_DESC_SIZE 512
+
+/* Buffer / descriptor alignments */
+#define NICVF_RCV_BUF_ALIGN 7
+#define NICVF_RCV_BUF_ALIGN_BYTES BIT_ULL(NICVF_RCV_BUF_ALIGN)
+#define NICVF_CQ_BASE_ALIGN_BYTES 512 /* 9 bits */
+#define NICVF_SQ_BASE_ALIGN_BYTES 128 /* 7 bits */
+
+#define NICVF_ALIGNED_ADDR(ADDR, ALIGN_BYTES) ALIGN(ADDR, ALIGN_BYTES)
+
+/* Queue enable/disable */
+#define NICVF_SQ_EN BIT_ULL(19)
+
+/* Queue reset */
+#define NICVF_CQ_RESET BIT_ULL(41)
+#define NICVF_SQ_RESET BIT_ULL(17)
+#define NICVF_RBDR_RESET BIT_ULL(43)
+
+enum CQ_RX_ERRLVL_E {
+ CQ_ERRLVL_MAC,
+ CQ_ERRLVL_L2,
+ CQ_ERRLVL_L3,
+ CQ_ERRLVL_L4,
+};
+
+enum CQ_RX_ERROP_E {
+ CQ_RX_ERROP_RE_NONE = 0x0,
+ CQ_RX_ERROP_RE_PARTIAL = 0x1,
+ CQ_RX_ERROP_RE_JABBER = 0x2,
+ CQ_RX_ERROP_RE_FCS = 0x7,
+ CQ_RX_ERROP_RE_TERMINATE = 0x9,
+ CQ_RX_ERROP_RE_RX_CTL = 0xb,
+ CQ_RX_ERROP_PREL2_ERR = 0x1f,
+ CQ_RX_ERROP_L2_FRAGMENT = 0x20,
+ CQ_RX_ERROP_L2_OVERRUN = 0x21,
+ CQ_RX_ERROP_L2_PFCS = 0x22,
+ CQ_RX_ERROP_L2_PUNY = 0x23,
+ CQ_RX_ERROP_L2_MAL = 0x24,
+ CQ_RX_ERROP_L2_OVERSIZE = 0x25,
+ CQ_RX_ERROP_L2_UNDERSIZE = 0x26,
+ CQ_RX_ERROP_L2_LENMISM = 0x27,
+ CQ_RX_ERROP_L2_PCLP = 0x28,
+ CQ_RX_ERROP_IP_NOT = 0x41,
+ CQ_RX_ERROP_IP_CSUM_ERR = 0x42,
+ CQ_RX_ERROP_IP_MAL = 0x43,
+ CQ_RX_ERROP_IP_MALD = 0x44,
+ CQ_RX_ERROP_IP_HOP = 0x45,
+ CQ_RX_ERROP_L3_ICRC = 0x46,
+ CQ_RX_ERROP_L3_PCLP = 0x47,
+ CQ_RX_ERROP_L4_MAL = 0x61,
+ CQ_RX_ERROP_L4_CHK = 0x62,
+ CQ_RX_ERROP_UDP_LEN = 0x63,
+ CQ_RX_ERROP_L4_PORT = 0x64,
+ CQ_RX_ERROP_TCP_FLAG = 0x65,
+ CQ_RX_ERROP_TCP_OFFSET = 0x66,
+ CQ_RX_ERROP_L4_PCLP = 0x67,
+ CQ_RX_ERROP_RBDR_TRUNC = 0x70,
+};
+
+enum CQ_TX_ERROP_E {
+ CQ_TX_ERROP_GOOD = 0x0,
+ CQ_TX_ERROP_DESC_FAULT = 0x10,
+ CQ_TX_ERROP_HDR_CONS_ERR = 0x11,
+ CQ_TX_ERROP_SUBDC_ERR = 0x12,
+ CQ_TX_ERROP_IMM_SIZE_OFLOW = 0x80,
+ CQ_TX_ERROP_DATA_SEQUENCE_ERR = 0x81,
+ CQ_TX_ERROP_MEM_SEQUENCE_ERR = 0x82,
+ CQ_TX_ERROP_LOCK_VIOL = 0x83,
+ CQ_TX_ERROP_DATA_FAULT = 0x84,
+ CQ_TX_ERROP_TSTMP_CONFLICT = 0x85,
+ CQ_TX_ERROP_TSTMP_TIMEOUT = 0x86,
+ CQ_TX_ERROP_MEM_FAULT = 0x87,
+ CQ_TX_ERROP_CK_OVERLAP = 0x88,
+ CQ_TX_ERROP_CK_OFLOW = 0x89,
+ CQ_TX_ERROP_ENUM_LAST = 0x8a,
+};
+
+struct cmp_queue_stats {
+ struct rx_stats {
+ struct {
+ u64 mac_errs;
+ u64 l2_errs;
+ u64 l3_errs;
+ u64 l4_errs;
+ } errlvl;
+ struct {
+ u64 good;
+ u64 partial_pkts;
+ u64 jabber_errs;
+ u64 fcs_errs;
+ u64 terminate_errs;
+ u64 bgx_rx_errs;
+ u64 prel2_errs;
+ u64 l2_frags;
+ u64 l2_overruns;
+ u64 l2_pfcs;
+ u64 l2_puny;
+ u64 l2_hdr_malformed;
+ u64 l2_oversize;
+ u64 l2_undersize;
+ u64 l2_len_mismatch;
+ u64 l2_pclp;
+ u64 non_ip;
+ u64 ip_csum_err;
+ u64 ip_hdr_malformed;
+ u64 ip_payload_malformed;
+ u64 ip_hop_errs;
+ u64 l3_icrc_errs;
+ u64 l3_pclp;
+ u64 l4_malformed;
+ u64 l4_csum_errs;
+ u64 udp_len_err;
+ u64 bad_l4_port;
+ u64 bad_tcp_flag;
+ u64 tcp_offset_errs;
+ u64 l4_pclp;
+ u64 pkt_truncated;
+ } errop;
+ } rx;
+ struct tx_stats {
+ u64 good;
+ u64 desc_fault;
+ u64 hdr_cons_err;
+ u64 subdesc_err;
+ u64 imm_size_oflow;
+ u64 data_seq_err;
+ u64 mem_seq_err;
+ u64 lock_viol;
+ u64 data_fault;
+ u64 tstmp_conflict;
+ u64 tstmp_timeout;
+ u64 mem_fault;
+ u64 csum_overlap;
+ u64 csum_overflow;
+ } tx;
+};
+
+enum RQ_SQ_STATS {
+ RQ_SQ_STATS_OCTS,
+ RQ_SQ_STATS_PKTS,
+};
+
+struct rx_tx_queue_stats {
+ u64 bytes;
+ u64 pkts;
+};
+
+struct q_desc_mem {
+ uintptr_t dma;
+ u64 size;
+ u16 q_len;
+ uintptr_t phys_base;
+ void *base;
+ void *unalign_base;
+ bool allocated;
+};
+
+struct rbdr {
+ bool enable;
+ u32 dma_size;
+ u32 thresh; /* Threshold level for interrupt */
+ void *desc;
+ u32 head;
+ u32 tail;
+ struct q_desc_mem dmem;
+ uintptr_t buf_mem;
+ uintptr_t buffers;
+};
+
+struct rcv_queue {
+ bool enable;
+ struct rbdr *rbdr_start;
+ struct rbdr *rbdr_cont;
+ bool en_tcp_reassembly;
+ u8 cq_qs; /* CQ's QS to which this RQ is assigned */
+ u8 cq_idx; /* CQ index (0 to 7) in the QS */
+ u8 cont_rbdr_qs; /* Continue buffer ptrs - QS num */
+ u8 cont_qs_rbdr_idx; /* RBDR idx in the cont QS */
+ u8 start_rbdr_qs; /* First buffer ptrs - QS num */
+ u8 start_qs_rbdr_idx; /* RBDR idx in the above QS */
+ u8 caching;
+ struct rx_tx_queue_stats stats;
+};
+
+struct cmp_queue {
+ bool enable;
+ u16 intr_timer_thresh;
+ u16 thresh;
+ void *desc;
+ struct q_desc_mem dmem;
+ struct cmp_queue_stats stats;
+};
+
+struct snd_queue {
+ bool enable;
+ u8 cq_qs; /* CQ's QS to which this SQ is pointing */
+ u8 cq_idx; /* CQ index (0 to 7) in the above QS */
+ u16 thresh;
+ u32 free_cnt;
+ u32 head;
+ u32 tail;
+ u64 *skbuff;
+ void *desc;
+ struct q_desc_mem dmem;
+ struct rx_tx_queue_stats stats;
+};
+
+struct queue_set {
+ bool enable;
+ bool be_en;
+ u8 vnic_id;
+ u8 rq_cnt;
+ u8 cq_cnt;
+ u64 cq_len;
+ u8 sq_cnt;
+ u64 sq_len;
+ u8 rbdr_cnt;
+ u64 rbdr_len;
+ struct rcv_queue rq[MAX_RCV_QUEUES_PER_QS];
+ struct cmp_queue cq[MAX_CMP_QUEUES_PER_QS];
+ struct snd_queue sq[MAX_SND_QUEUES_PER_QS];
+ struct rbdr rbdr[MAX_RCV_BUF_DESC_RINGS_PER_QS];
+};
+
+#define GET_RBDR_DESC(RING, idx)\
+ (&(((struct rbdr_entry_t *)((RING)->desc))[idx]))
+#define GET_SQ_DESC(RING, idx)\
+ (&(((struct sq_hdr_subdesc *)((RING)->desc))[idx]))
+#define GET_CQ_DESC(RING, idx)\
+ (&(((union cq_desc_t *)((RING)->desc))[idx]))
+
+/* CQ status bits */
+#define CQ_WR_FULL BIT(26)
+#define CQ_WR_DISABLE BIT(25)
+#define CQ_WR_FAULT BIT(24)
+#define CQ_CQE_COUNT (0xFFFF << 0)
+
+#define CQ_ERR_MASK (CQ_WR_FULL | CQ_WR_DISABLE | CQ_WR_FAULT)
+
+int nicvf_set_qset_resources(struct nicvf *nic);
+int nicvf_config_data_transfer(struct nicvf *nic, bool enable);
+void nicvf_qset_config(struct nicvf *nic, bool enable);
+void nicvf_cmp_queue_config(struct nicvf *nic, struct queue_set *qs,
+ int qidx, bool enable);
+
+void nicvf_sq_enable(struct nicvf *nic, struct snd_queue *sq, int qidx);
+void nicvf_sq_disable(struct nicvf *nic, int qidx);
+void nicvf_put_sq_desc(struct snd_queue *sq, int desc_cnt);
+void nicvf_sq_free_used_descs(struct udevice *dev,
+ struct snd_queue *sq, int qidx);
+int nicvf_sq_append_pkt(struct nicvf *nic, void *pkt, size_t pkt_len);
+
+void *nicvf_get_rcv_pkt(struct nicvf *nic, void *cq_desc, size_t *pkt_len);
+void nicvf_refill_rbdr(struct nicvf *nic);
+
+void nicvf_enable_intr(struct nicvf *nic, int int_type, int q_idx);
+void nicvf_disable_intr(struct nicvf *nic, int int_type, int q_idx);
+void nicvf_clear_intr(struct nicvf *nic, int int_type, int q_idx);
+int nicvf_is_intr_enabled(struct nicvf *nic, int int_type, int q_idx);
+
+/* Register access APIs */
+void nicvf_reg_write(struct nicvf *nic, u64 offset, u64 val);
+u64 nicvf_reg_read(struct nicvf *nic, u64 offset);
+void nicvf_qset_reg_write(struct nicvf *nic, u64 offset, u64 val);
+u64 nicvf_qset_reg_read(struct nicvf *nic, u64 offset);
+void nicvf_queue_reg_write(struct nicvf *nic, u64 offset,
+ u64 qidx, u64 val);
+u64 nicvf_queue_reg_read(struct nicvf *nic, u64 offset, u64 qidx);
+
+/* Stats */
+void nicvf_update_rq_stats(struct nicvf *nic, int rq_idx);
+void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx);
+int nicvf_check_cqe_rx_errs(struct nicvf *nic,
+ struct cmp_queue *cq, void *cq_desc);
+int nicvf_check_cqe_tx_errs(struct nicvf *nic,
+ struct cmp_queue *cq, void *cq_desc);
+#endif /* NICVF_QUEUES_H */
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ */
+
+#ifndef Q_STRUCT_H
+#define Q_STRUCT_H
+
+/* Load transaction types for reading segment bytes specified by
+ * NIC_SEND_GATHER_S[LD_TYPE].
+ */
+enum nic_send_ld_type_e {
+ NIC_SEND_LD_TYPE_E_LDD = 0x0,
+ NIC_SEND_LD_TYPE_E_LDT = 0x1,
+ NIC_SEND_LD_TYPE_E_LDWB = 0x2,
+ NIC_SEND_LD_TYPE_E_ENUM_LAST = 0x3,
+};
+
+enum ether_type_algorithm {
+ ETYPE_ALG_NONE = 0x0,
+ ETYPE_ALG_SKIP = 0x1,
+ ETYPE_ALG_ENDPARSE = 0x2,
+ ETYPE_ALG_VLAN = 0x3,
+ ETYPE_ALG_VLAN_STRIP = 0x4,
+};
+
+enum layer3_type {
+ L3TYPE_NONE = 0x00,
+ L3TYPE_GRH = 0x01,
+ L3TYPE_IPV4 = 0x04,
+ L3TYPE_IPV4_OPTIONS = 0x05,
+ L3TYPE_IPV6 = 0x06,
+ L3TYPE_IPV6_OPTIONS = 0x07,
+ L3TYPE_ET_STOP = 0x0D,
+ L3TYPE_OTHER = 0x0E,
+};
+
+enum layer4_type {
+ L4TYPE_NONE = 0x00,
+ L4TYPE_IPSEC_ESP = 0x01,
+ L4TYPE_IPFRAG = 0x02,
+ L4TYPE_IPCOMP = 0x03,
+ L4TYPE_TCP = 0x04,
+ L4TYPE_UDP = 0x05,
+ L4TYPE_SCTP = 0x06,
+ L4TYPE_GRE = 0x07,
+ L4TYPE_ROCE_BTH = 0x08,
+ L4TYPE_OTHER = 0x0E,
+};
+
+/* CPI and RSSI configuration */
+enum cpi_algorithm_type {
+ CPI_ALG_NONE = 0x0,
+ CPI_ALG_VLAN = 0x1,
+ CPI_ALG_VLAN16 = 0x2,
+ CPI_ALG_DIFF = 0x3,
+};
+
+enum rss_algorithm_type {
+ RSS_ALG_NONE = 0x00,
+ RSS_ALG_PORT = 0x01,
+ RSS_ALG_IP = 0x02,
+ RSS_ALG_TCP_IP = 0x03,
+ RSS_ALG_UDP_IP = 0x04,
+ RSS_ALG_SCTP_IP = 0x05,
+ RSS_ALG_GRE_IP = 0x06,
+ RSS_ALG_ROCE = 0x07,
+};
+
+enum rss_hash_cfg {
+ RSS_HASH_L2ETC = 0x00,
+ RSS_HASH_IP = 0x01,
+ RSS_HASH_TCP = 0x02,
+ RSS_TCP_SYN_DIS = 0x03,
+ RSS_HASH_UDP = 0x04,
+ RSS_HASH_L4ETC = 0x05,
+ RSS_HASH_ROCE = 0x06,
+ RSS_L3_BIDI = 0x07,
+ RSS_L4_BIDI = 0x08,
+};
+
+/* Completion queue entry types */
+enum cqe_type {
+ CQE_TYPE_INVALID = 0x0,
+ CQE_TYPE_RX = 0x2,
+ CQE_TYPE_RX_SPLIT = 0x3,
+ CQE_TYPE_RX_TCP = 0x4,
+ CQE_TYPE_SEND = 0x8,
+ CQE_TYPE_SEND_PTP = 0x9,
+};
+
+enum cqe_rx_tcp_status {
+ CQE_RX_STATUS_VALID_TCP_CNXT = 0x00,
+ CQE_RX_STATUS_INVALID_TCP_CNXT = 0x0F,
+};
+
+enum cqe_send_status {
+ CQE_SEND_STATUS_GOOD = 0x00,
+ CQE_SEND_STATUS_DESC_FAULT = 0x01,
+ CQE_SEND_STATUS_HDR_CONS_ERR = 0x11,
+ CQE_SEND_STATUS_SUBDESC_ERR = 0x12,
+ CQE_SEND_STATUS_IMM_SIZE_OFLOW = 0x80,
+ CQE_SEND_STATUS_CRC_SEQ_ERR = 0x81,
+ CQE_SEND_STATUS_DATA_SEQ_ERR = 0x82,
+ CQE_SEND_STATUS_MEM_SEQ_ERR = 0x83,
+ CQE_SEND_STATUS_LOCK_VIOL = 0x84,
+ CQE_SEND_STATUS_LOCK_UFLOW = 0x85,
+ CQE_SEND_STATUS_DATA_FAULT = 0x86,
+ CQE_SEND_STATUS_TSTMP_CONFLICT = 0x87,
+ CQE_SEND_STATUS_TSTMP_TIMEOUT = 0x88,
+ CQE_SEND_STATUS_MEM_FAULT = 0x89,
+ CQE_SEND_STATUS_CSUM_OVERLAP = 0x8A,
+ CQE_SEND_STATUS_CSUM_OVERFLOW = 0x8B,
+};
+
+enum cqe_rx_tcp_end_reason {
+ CQE_RX_TCP_END_FIN_FLAG_DET = 0,
+ CQE_RX_TCP_END_INVALID_FLAG = 1,
+ CQE_RX_TCP_END_TIMEOUT = 2,
+ CQE_RX_TCP_END_OUT_OF_SEQ = 3,
+ CQE_RX_TCP_END_PKT_ERR = 4,
+ CQE_RX_TCP_END_QS_DISABLED = 0x0F,
+};
+
+/* Packet protocol level error enumeration */
+enum cqe_rx_err_level {
+ CQE_RX_ERRLVL_RE = 0x0,
+ CQE_RX_ERRLVL_L2 = 0x1,
+ CQE_RX_ERRLVL_L3 = 0x2,
+ CQE_RX_ERRLVL_L4 = 0x3,
+};
+
+/* Packet protocol level error type enumeration */
+enum cqe_rx_err_opcode {
+ CQE_RX_ERR_RE_NONE = 0x0,
+ CQE_RX_ERR_RE_PARTIAL = 0x1,
+ CQE_RX_ERR_RE_JABBER = 0x2,
+ CQE_RX_ERR_RE_FCS = 0x7,
+ CQE_RX_ERR_RE_TERMINATE = 0x9,
+ CQE_RX_ERR_RE_RX_CTL = 0xb,
+ CQE_RX_ERR_PREL2_ERR = 0x1f,
+ CQE_RX_ERR_L2_FRAGMENT = 0x20,
+ CQE_RX_ERR_L2_OVERRUN = 0x21,
+ CQE_RX_ERR_L2_PFCS = 0x22,
+ CQE_RX_ERR_L2_PUNY = 0x23,
+ CQE_RX_ERR_L2_MAL = 0x24,
+ CQE_RX_ERR_L2_OVERSIZE = 0x25,
+ CQE_RX_ERR_L2_UNDERSIZE = 0x26,
+ CQE_RX_ERR_L2_LENMISM = 0x27,
+ CQE_RX_ERR_L2_PCLP = 0x28,
+ CQE_RX_ERR_IP_NOT = 0x41,
+ CQE_RX_ERR_IP_CHK = 0x42,
+ CQE_RX_ERR_IP_MAL = 0x43,
+ CQE_RX_ERR_IP_MALD = 0x44,
+ CQE_RX_ERR_IP_HOP = 0x45,
+ CQE_RX_ERR_L3_ICRC = 0x46,
+ CQE_RX_ERR_L3_PCLP = 0x47,
+ CQE_RX_ERR_L4_MAL = 0x61,
+ CQE_RX_ERR_L4_CHK = 0x62,
+ CQE_RX_ERR_UDP_LEN = 0x63,
+ CQE_RX_ERR_L4_PORT = 0x64,
+ CQE_RX_ERR_TCP_FLAG = 0x65,
+ CQE_RX_ERR_TCP_OFFSET = 0x66,
+ CQE_RX_ERR_L4_PCLP = 0x67,
+ CQE_RX_ERR_RBDR_TRUNC = 0x70,
+};
+
+struct cqe_rx_t {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 cqe_type:4; /* W0 */
+ u64 stdn_fault:1;
+ u64 rsvd0:1;
+ u64 rq_qs:7;
+ u64 rq_idx:3;
+ u64 rsvd1:12;
+ u64 rss_alg:4;
+ u64 rsvd2:4;
+ u64 rb_cnt:4;
+ u64 vlan_found:1;
+ u64 vlan_stripped:1;
+ u64 vlan2_found:1;
+ u64 vlan2_stripped:1;
+ u64 l4_type:4;
+ u64 l3_type:4;
+ u64 l2_present:1;
+ u64 err_level:3;
+ u64 err_opcode:8;
+
+ u64 pkt_len:16; /* W1 */
+ u64 l2_ptr:8;
+ u64 l3_ptr:8;
+ u64 l4_ptr:8;
+ u64 cq_pkt_len:8;
+ u64 align_pad:3;
+ u64 rsvd3:1;
+ u64 chan:12;
+
+ u64 rss_tag:32; /* W2 */
+ u64 vlan_tci:16;
+ u64 vlan_ptr:8;
+ u64 vlan2_ptr:8;
+
+ u64 rb3_sz:16; /* W3 */
+ u64 rb2_sz:16;
+ u64 rb1_sz:16;
+ u64 rb0_sz:16;
+
+ u64 rb7_sz:16; /* W4 */
+ u64 rb6_sz:16;
+ u64 rb5_sz:16;
+ u64 rb4_sz:16;
+
+ u64 rb11_sz:16; /* W5 */
+ u64 rb10_sz:16;
+ u64 rb9_sz:16;
+ u64 rb8_sz:16;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 err_opcode:8;
+ u64 err_level:3;
+ u64 l2_present:1;
+ u64 l3_type:4;
+ u64 l4_type:4;
+ u64 vlan2_stripped:1;
+ u64 vlan2_found:1;
+ u64 vlan_stripped:1;
+ u64 vlan_found:1;
+ u64 rb_cnt:4;
+ u64 rsvd2:4;
+ u64 rss_alg:4;
+ u64 rsvd1:12;
+ u64 rq_idx:3;
+ u64 rq_qs:7;
+ u64 rsvd0:1;
+ u64 stdn_fault:1;
+ u64 cqe_type:4; /* W0 */
+ u64 chan:12;
+ u64 rsvd3:1;
+ u64 align_pad:3;
+ u64 cq_pkt_len:8;
+ u64 l4_ptr:8;
+ u64 l3_ptr:8;
+ u64 l2_ptr:8;
+ u64 pkt_len:16; /* W1 */
+ u64 vlan2_ptr:8;
+ u64 vlan_ptr:8;
+ u64 vlan_tci:16;
+ u64 rss_tag:32; /* W2 */
+ u64 rb0_sz:16;
+ u64 rb1_sz:16;
+ u64 rb2_sz:16;
+ u64 rb3_sz:16; /* W3 */
+ u64 rb4_sz:16;
+ u64 rb5_sz:16;
+ u64 rb6_sz:16;
+ u64 rb7_sz:16; /* W4 */
+ u64 rb8_sz:16;
+ u64 rb9_sz:16;
+ u64 rb10_sz:16;
+ u64 rb11_sz:16; /* W5 */
+#endif
+ u64 rb0_ptr:64;
+ u64 rb1_ptr:64;
+ u64 rb2_ptr:64;
+ u64 rb3_ptr:64;
+ u64 rb4_ptr:64;
+ u64 rb5_ptr:64;
+ u64 rb6_ptr:64;
+ u64 rb7_ptr:64;
+ u64 rb8_ptr:64;
+ u64 rb9_ptr:64;
+ u64 rb10_ptr:64;
+ u64 rb11_ptr:64;
+};
+
+struct cqe_rx_tcp_err_t {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 cqe_type:4; /* W0 */
+ u64 rsvd0:60;
+
+ u64 rsvd1:4; /* W1 */
+ u64 partial_first:1;
+ u64 rsvd2:27;
+ u64 rbdr_bytes:8;
+ u64 rsvd3:24;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 rsvd0:60;
+ u64 cqe_type:4;
+
+ u64 rsvd3:24;
+ u64 rbdr_bytes:8;
+ u64 rsvd2:27;
+ u64 partial_first:1;
+ u64 rsvd1:4;
+#endif
+};
+
+struct cqe_rx_tcp_t {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 cqe_type:4; /* W0 */
+ u64 rsvd0:52;
+ u64 cq_tcp_status:8;
+
+ u64 rsvd1:32; /* W1 */
+ u64 tcp_cntx_bytes:8;
+ u64 rsvd2:8;
+ u64 tcp_err_bytes:16;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 cq_tcp_status:8;
+ u64 rsvd0:52;
+ u64 cqe_type:4; /* W0 */
+
+ u64 tcp_err_bytes:16;
+ u64 rsvd2:8;
+ u64 tcp_cntx_bytes:8;
+ u64 rsvd1:32; /* W1 */
+#endif
+};
+
+struct cqe_send_t {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 cqe_type:4; /* W0 */
+ u64 rsvd0:4;
+ u64 sqe_ptr:16;
+ u64 rsvd1:4;
+ u64 rsvd2:10;
+ u64 sq_qs:7;
+ u64 sq_idx:3;
+ u64 rsvd3:8;
+ u64 send_status:8;
+
+ u64 ptp_timestamp:64; /* W1 */
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 send_status:8;
+ u64 rsvd3:8;
+ u64 sq_idx:3;
+ u64 sq_qs:7;
+ u64 rsvd2:10;
+ u64 rsvd1:4;
+ u64 sqe_ptr:16;
+ u64 rsvd0:4;
+ u64 cqe_type:4; /* W0 */
+
+ u64 ptp_timestamp:64; /* W1 */
+#endif
+};
+
+union cq_desc_t {
+ u64 u[64];
+ struct cqe_send_t snd_hdr;
+ struct cqe_rx_t rx_hdr;
+ struct cqe_rx_tcp_t rx_tcp_hdr;
+ struct cqe_rx_tcp_err_t rx_tcp_err_hdr;
+};
+
+struct rbdr_entry_t {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 rsvd0:15;
+ u64 buf_addr:42;
+ u64 cache_align:7;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 cache_align:7;
+ u64 buf_addr:42;
+ u64 rsvd0:15;
+#endif
+};
+
+/* TCP reassembly context */
+struct rbe_tcp_cnxt_t {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 tcp_pkt_cnt:12;
+ u64 rsvd1:4;
+ u64 align_hdr_bytes:4;
+ u64 align_ptr_bytes:4;
+ u64 ptr_bytes:16;
+ u64 rsvd2:24;
+ u64 cqe_type:4;
+ u64 rsvd0:54;
+ u64 tcp_end_reason:2;
+ u64 tcp_status:4;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 tcp_status:4;
+ u64 tcp_end_reason:2;
+ u64 rsvd0:54;
+ u64 cqe_type:4;
+ u64 rsvd2:24;
+ u64 ptr_bytes:16;
+ u64 align_ptr_bytes:4;
+ u64 align_hdr_bytes:4;
+ u64 rsvd1:4;
+ u64 tcp_pkt_cnt:12;
+#endif
+};
+
+/* Always Big endian */
+struct rx_hdr_t {
+ u64 opaque:32;
+ u64 rss_flow:8;
+ u64 skip_length:6;
+ u64 disable_rss:1;
+ u64 disable_tcp_reassembly:1;
+ u64 nodrop:1;
+ u64 dest_alg:2;
+ u64 rsvd0:2;
+ u64 dest_rq:11;
+};
+
+enum send_l4_csum_type {
+ SEND_L4_CSUM_DISABLE = 0x00,
+ SEND_L4_CSUM_UDP = 0x01,
+ SEND_L4_CSUM_TCP = 0x02,
+ SEND_L4_CSUM_SCTP = 0x03,
+};
+
+enum send_crc_alg {
+ SEND_CRCALG_CRC32 = 0x00,
+ SEND_CRCALG_CRC32C = 0x01,
+ SEND_CRCALG_ICRC = 0x02,
+};
+
+enum send_load_type {
+ SEND_LD_TYPE_LDD = 0x00,
+ SEND_LD_TYPE_LDT = 0x01,
+ SEND_LD_TYPE_LDWB = 0x02,
+};
+
+enum send_mem_alg_type {
+ SEND_MEMALG_SET = 0x00,
+ SEND_MEMALG_ADD = 0x08,
+ SEND_MEMALG_SUB = 0x09,
+ SEND_MEMALG_ADDLEN = 0x0A,
+ SEND_MEMALG_SUBLEN = 0x0B,
+};
+
+enum send_mem_dsz_type {
+ SEND_MEMDSZ_B64 = 0x00,
+ SEND_MEMDSZ_B32 = 0x01,
+ SEND_MEMDSZ_B8 = 0x03,
+};
+
+enum sq_subdesc_type {
+ SQ_DESC_TYPE_INVALID = 0x00,
+ SQ_DESC_TYPE_HEADER = 0x01,
+ SQ_DESC_TYPE_CRC = 0x02,
+ SQ_DESC_TYPE_IMMEDIATE = 0x03,
+ SQ_DESC_TYPE_GATHER = 0x04,
+ SQ_DESC_TYPE_MEMORY = 0x05,
+};
+
+struct sq_crc_subdesc {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 rsvd1:32;
+ u64 crc_ival:32;
+ u64 subdesc_type:4;
+ u64 crc_alg:2;
+ u64 rsvd0:10;
+ u64 crc_insert_pos:16;
+ u64 hdr_start:16;
+ u64 crc_len:16;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 crc_len:16;
+ u64 hdr_start:16;
+ u64 crc_insert_pos:16;
+ u64 rsvd0:10;
+ u64 crc_alg:2;
+ u64 subdesc_type:4;
+ u64 crc_ival:32;
+ u64 rsvd1:32;
+#endif
+};
+
+struct sq_gather_subdesc {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 subdesc_type:4; /* W0 */
+ u64 ld_type:2;
+ u64 rsvd0:42;
+ u64 size:16;
+
+ u64 rsvd1:15; /* W1 */
+ u64 addr:49;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 size:16;
+ u64 rsvd0:42;
+ u64 ld_type:2;
+ u64 subdesc_type:4; /* W0 */
+
+ u64 addr:49;
+ u64 rsvd1:15; /* W1 */
+#endif
+};
+
+/* SQ immediate subdescriptor */
+struct sq_imm_subdesc {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 subdesc_type:4; /* W0 */
+ u64 rsvd0:46;
+ u64 len:14;
+
+ u64 data:64; /* W1 */
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 len:14;
+ u64 rsvd0:46;
+ u64 subdesc_type:4; /* W0 */
+
+ u64 data:64; /* W1 */
+#endif
+};
+
+struct sq_mem_subdesc {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 subdesc_type:4; /* W0 */
+ u64 mem_alg:4;
+ u64 mem_dsz:2;
+ u64 wmem:1;
+ u64 rsvd0:21;
+ u64 offset:32;
+
+ u64 rsvd1:15; /* W1 */
+ u64 addr:49;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 offset:32;
+ u64 rsvd0:21;
+ u64 wmem:1;
+ u64 mem_dsz:2;
+ u64 mem_alg:4;
+ u64 subdesc_type:4; /* W0 */
+
+ u64 addr:49;
+ u64 rsvd1:15; /* W1 */
+#endif
+};
+
+struct sq_hdr_subdesc {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 subdesc_type:4;
+ u64 tso:1;
+ u64 post_cqe:1; /* Post CQE on no error also */
+ u64 dont_send:1;
+ u64 tstmp:1;
+ u64 subdesc_cnt:8;
+ u64 csum_l4:2;
+ u64 csum_l3:1;
+ u64 rsvd0:5;
+ u64 l4_offset:8;
+ u64 l3_offset:8;
+ u64 rsvd1:4;
+ u64 tot_len:20; /* W0 */
+
+ u64 tso_sdc_cont:8;
+ u64 tso_sdc_first:8;
+ u64 tso_l4_offset:8;
+ u64 tso_flags_last:12;
+ u64 tso_flags_first:12;
+ u64 rsvd2:2;
+ u64 tso_max_paysize:14; /* W1 */
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 tot_len:20;
+ u64 rsvd1:4;
+ u64 l3_offset:8;
+ u64 l4_offset:8;
+ u64 rsvd0:5;
+ u64 csum_l3:1;
+ u64 csum_l4:2;
+ u64 subdesc_cnt:8;
+ u64 tstmp:1;
+ u64 dont_send:1;
+ u64 post_cqe:1; /* Post CQE on no error also */
+ u64 tso:1;
+ u64 subdesc_type:4; /* W0 */
+
+ u64 tso_max_paysize:14;
+ u64 rsvd2:2;
+ u64 tso_flags_first:12;
+ u64 tso_flags_last:12;
+ u64 tso_l4_offset:8;
+ u64 tso_sdc_first:8;
+ u64 tso_sdc_cont:8; /* W1 */
+#endif
+};
+
+/* Queue config register formats */
+struct rq_cfg {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 reserved_2_63:62;
+ u64 ena:1;
+ u64 tcp_ena:1;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 tcp_ena:1;
+ u64 ena:1;
+ u64 reserved_2_63:62;
+#endif
+};
+
+struct cq_cfg {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 reserved_43_63:21;
+ u64 ena:1;
+ u64 reset:1;
+ u64 caching:1;
+ u64 reserved_35_39:5;
+ u64 qsize:3;
+ u64 reserved_25_31:7;
+ u64 avg_con:9;
+ u64 reserved_0_15:16;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 reserved_0_15:16;
+ u64 avg_con:9;
+ u64 reserved_25_31:7;
+ u64 qsize:3;
+ u64 reserved_35_39:5;
+ u64 caching:1;
+ u64 reset:1;
+ u64 ena:1;
+ u64 reserved_43_63:21;
+#endif
+};
+
+struct sq_cfg {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 reserved_20_63:44;
+ u64 ena:1;
+ u64 reserved_18_18:1;
+ u64 reset:1;
+ u64 ldwb:1;
+ u64 reserved_11_15:5;
+ u64 qsize:3;
+ u64 reserved_3_7:5;
+ u64 tstmp_bgx_intf:3;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 tstmp_bgx_intf:3;
+ u64 reserved_3_7:5;
+ u64 qsize:3;
+ u64 reserved_11_15:5;
+ u64 ldwb:1;
+ u64 reset:1;
+ u64 reserved_18_18:1;
+ u64 ena:1;
+ u64 reserved_20_63:44;
+#endif
+};
+
+struct rbdr_cfg {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 reserved_45_63:19;
+ u64 ena:1;
+ u64 reset:1;
+ u64 ldwb:1;
+ u64 reserved_36_41:6;
+ u64 qsize:4;
+ u64 reserved_25_31:7;
+ u64 avg_con:9;
+ u64 reserved_12_15:4;
+ u64 lines:12;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 lines:12;
+ u64 reserved_12_15:4;
+ u64 avg_con:9;
+ u64 reserved_25_31:7;
+ u64 qsize:4;
+ u64 reserved_36_41:6;
+ u64 ldwb:1;
+ u64 reset:1;
+ u64 ena: 1;
+ u64 reserved_45_63:19;
+#endif
+};
+
+struct qs_cfg {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 reserved_32_63:32;
+ u64 ena:1;
+ u64 reserved_27_30:4;
+ u64 sq_ins_ena:1;
+ u64 sq_ins_pos:6;
+ u64 lock_ena:1;
+ u64 lock_viol_cqe_ena:1;
+ u64 send_tstmp_ena:1;
+ u64 be:1;
+ u64 reserved_7_15:9;
+ u64 vnic:7;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 vnic:7;
+ u64 reserved_7_15:9;
+ u64 be:1;
+ u64 send_tstmp_ena:1;
+ u64 lock_viol_cqe_ena:1;
+ u64 lock_ena:1;
+ u64 sq_ins_pos:6;
+ u64 sq_ins_ena:1;
+ u64 reserved_27_30:4;
+ u64 ena:1;
+ u64 reserved_32_63:32;
+#endif
+};
+
+#endif /* Q_STRUCT_H */
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ */
+
+#include <dm.h>
+#include <malloc.h>
+#include <miiphy.h>
+#include <misc.h>
+#include <pci.h>
+#include <pci_ids.h>
+#include <phy.h>
+#include <asm/io.h>
+#include <linux/ctype.h>
+#include <linux/delay.h>
+
+#define PCI_DEVICE_ID_OCTEONTX_SMI 0xA02B
+
+DECLARE_GLOBAL_DATA_PTR;
+
+enum octeontx_smi_mode {
+ CLAUSE22 = 0,
+ CLAUSE45 = 1,
+};
+
+enum {
+ SMI_OP_C22_WRITE = 0,
+ SMI_OP_C22_READ = 1,
+
+ SMI_OP_C45_ADDR = 0,
+ SMI_OP_C45_WRITE = 1,
+ SMI_OP_C45_PRIA = 2,
+ SMI_OP_C45_READ = 3,
+};
+
+union smi_x_clk {
+ u64 u;
+ struct smi_x_clk_s {
+ int phase:8;
+ int sample:4;
+ int preamble:1;
+ int clk_idle:1;
+ int reserved_14_14:1;
+ int sample_mode:1;
+ int sample_hi:5;
+ int reserved_21_23:3;
+ int mode:1;
+ } s;
+};
+
+union smi_x_cmd {
+ u64 u;
+ struct smi_x_cmd_s {
+ int reg_adr:5;
+ int reserved_5_7:3;
+ int phy_adr:5;
+ int reserved_13_15:3;
+ int phy_op:2;
+ } s;
+};
+
+union smi_x_wr_dat {
+ u64 u;
+ struct smi_x_wr_dat_s {
+ unsigned int dat:16;
+ int val:1;
+ int pending:1;
+ } s;
+};
+
+union smi_x_rd_dat {
+ u64 u;
+ struct smi_x_rd_dat_s {
+ unsigned int dat:16;
+ int val:1;
+ int pending:1;
+ } s;
+};
+
+union smi_x_en {
+ u64 u;
+ struct smi_x_en_s {
+ int en:1;
+ } s;
+};
+
+#define SMI_X_RD_DAT 0x10ull
+#define SMI_X_WR_DAT 0x08ull
+#define SMI_X_CMD 0x00ull
+#define SMI_X_CLK 0x18ull
+#define SMI_X_EN 0x20ull
+
+struct octeontx_smi_priv {
+ void __iomem *baseaddr;
+ enum octeontx_smi_mode mode;
+};
+
+#define MDIO_TIMEOUT 10000
+
+void octeontx_smi_setmode(struct mii_dev *bus, enum octeontx_smi_mode mode)
+{
+ struct octeontx_smi_priv *priv = bus->priv;
+ union smi_x_clk smix_clk;
+
+ smix_clk.u = readq(priv->baseaddr + SMI_X_CLK);
+ smix_clk.s.mode = mode;
+ smix_clk.s.preamble = mode == CLAUSE45;
+ writeq(smix_clk.u, priv->baseaddr + SMI_X_CLK);
+
+ priv->mode = mode;
+}
+
+int octeontx_c45_addr(struct mii_dev *bus, int addr, int devad, int regnum)
+{
+ struct octeontx_smi_priv *priv = bus->priv;
+
+ union smi_x_cmd smix_cmd;
+ union smi_x_wr_dat smix_wr_dat;
+ unsigned long timeout = MDIO_TIMEOUT;
+
+ smix_wr_dat.u = 0;
+ smix_wr_dat.s.dat = regnum;
+
+ writeq(smix_wr_dat.u, priv->baseaddr + SMI_X_WR_DAT);
+
+ smix_cmd.u = 0;
+ smix_cmd.s.phy_op = SMI_OP_C45_ADDR;
+ smix_cmd.s.phy_adr = addr;
+ smix_cmd.s.reg_adr = devad;
+
+ writeq(smix_cmd.u, priv->baseaddr + SMI_X_CMD);
+
+ do {
+ smix_wr_dat.u = readq(priv->baseaddr + SMI_X_WR_DAT);
+ udelay(100);
+ timeout--;
+ } while (smix_wr_dat.s.pending && timeout);
+
+ return timeout == 0;
+}
+
+int octeontx_phy_read(struct mii_dev *bus, int addr, int devad, int regnum)
+{
+ struct octeontx_smi_priv *priv = bus->priv;
+ union smi_x_cmd smix_cmd;
+ union smi_x_rd_dat smix_rd_dat;
+ unsigned long timeout = MDIO_TIMEOUT;
+ int ret;
+
+ enum octeontx_smi_mode mode = (devad < 0) ? CLAUSE22 : CLAUSE45;
+
+ debug("RD: Mode: %u, baseaddr: %p, addr: %d, devad: %d, reg: %d\n",
+ mode, priv->baseaddr, addr, devad, regnum);
+
+ octeontx_smi_setmode(bus, mode);
+
+ if (mode == CLAUSE45) {
+ ret = octeontx_c45_addr(bus, addr, devad, regnum);
+
+ debug("RD: ret: %u\n", ret);
+
+ if (ret)
+ return 0;
+ }
+
+ smix_cmd.u = 0;
+ smix_cmd.s.phy_adr = addr;
+
+ if (mode == CLAUSE45) {
+ smix_cmd.s.reg_adr = devad;
+ smix_cmd.s.phy_op = SMI_OP_C45_READ;
+ } else {
+ smix_cmd.s.reg_adr = regnum;
+ smix_cmd.s.phy_op = SMI_OP_C22_READ;
+ }
+
+ writeq(smix_cmd.u, priv->baseaddr + SMI_X_CMD);
+
+ do {
+ smix_rd_dat.u = readq(priv->baseaddr + SMI_X_RD_DAT);
+ udelay(10);
+ timeout--;
+ } while (smix_rd_dat.s.pending && timeout);
+
+ debug("SMIX_RD_DAT: %lx\n", (unsigned long)smix_rd_dat.u);
+
+ return smix_rd_dat.s.dat;
+}
+
+int octeontx_phy_write(struct mii_dev *bus, int addr, int devad, int regnum,
+ u16 value)
+{
+ struct octeontx_smi_priv *priv = bus->priv;
+ union smi_x_cmd smix_cmd;
+ union smi_x_wr_dat smix_wr_dat;
+ unsigned long timeout = MDIO_TIMEOUT;
+ int ret;
+
+ enum octeontx_smi_mode mode = (devad < 0) ? CLAUSE22 : CLAUSE45;
+
+ debug("WR: Mode: %u, baseaddr: %p, addr: %d, devad: %d, reg: %d\n",
+ mode, priv->baseaddr, addr, devad, regnum);
+
+ if (mode == CLAUSE45) {
+ ret = octeontx_c45_addr(bus, addr, devad, regnum);
+
+ debug("WR: ret: %u\n", ret);
+
+ if (ret)
+ return ret;
+ }
+
+ smix_wr_dat.u = 0;
+ smix_wr_dat.s.dat = value;
+
+ writeq(smix_wr_dat.u, priv->baseaddr + SMI_X_WR_DAT);
+
+ smix_cmd.u = 0;
+ smix_cmd.s.phy_adr = addr;
+
+ if (mode == CLAUSE45) {
+ smix_cmd.s.reg_adr = devad;
+ smix_cmd.s.phy_op = SMI_OP_C45_WRITE;
+ } else {
+ smix_cmd.s.reg_adr = regnum;
+ smix_cmd.s.phy_op = SMI_OP_C22_WRITE;
+ }
+
+ writeq(smix_cmd.u, priv->baseaddr + SMI_X_CMD);
+
+ do {
+ smix_wr_dat.u = readq(priv->baseaddr + SMI_X_WR_DAT);
+ udelay(10);
+ timeout--;
+ } while (smix_wr_dat.s.pending && timeout);
+
+ debug("SMIX_WR_DAT: %lx\n", (unsigned long)smix_wr_dat.u);
+
+ return timeout == 0;
+}
+
+int octeontx_smi_reset(struct mii_dev *bus)
+{
+ struct octeontx_smi_priv *priv = bus->priv;
+
+ union smi_x_en smi_en;
+
+ smi_en.s.en = 0;
+ writeq(smi_en.u, priv->baseaddr + SMI_X_EN);
+
+ smi_en.s.en = 1;
+ writeq(smi_en.u, priv->baseaddr + SMI_X_EN);
+
+ octeontx_smi_setmode(bus, CLAUSE22);
+
+ return 0;
+}
+
+/* PHY XS initialization, primarily for RXAUI
+ *
+ */
+int rxaui_phy_xs_init(struct mii_dev *bus, int phy_addr)
+{
+ int reg;
+ ulong start_time;
+ int phy_id1, phy_id2;
+ int oui, model_number;
+
+ phy_id1 = octeontx_phy_read(bus, phy_addr, 1, 0x2);
+ phy_id2 = octeontx_phy_read(bus, phy_addr, 1, 0x3);
+ model_number = (phy_id2 >> 4) & 0x3F;
+ debug("%s model %x\n", __func__, model_number);
+ oui = phy_id1;
+ oui <<= 6;
+ oui |= (phy_id2 >> 10) & 0x3F;
+ debug("%s oui %x\n", __func__, oui);
+ switch (oui) {
+ case 0x5016:
+ if (model_number == 9) {
+ debug("%s +\n", __func__);
+ /* Perform hardware reset in XGXS control */
+ reg = octeontx_phy_read(bus, phy_addr, 4, 0x0);
+ if ((reg & 0xffff) < 0)
+ goto read_error;
+ reg |= 0x8000;
+ octeontx_phy_write(bus, phy_addr, 4, 0x0, reg);
+
+ start_time = get_timer(0);
+ do {
+ reg = octeontx_phy_read(bus, phy_addr, 4, 0x0);
+ if ((reg & 0xffff) < 0)
+ goto read_error;
+ } while ((reg & 0x8000) && get_timer(start_time) < 500);
+ if (reg & 0x8000) {
+ printf("HW reset for M88X3120 PHY failed");
+ printf("MII_BMCR: 0x%x\n", reg);
+ return -1;
+ }
+ /* program 4.49155 with 0x5 */
+ octeontx_phy_write(bus, phy_addr, 4, 0xc003, 0x5);
+ }
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+
+read_error:
+ debug("M88X3120 PHY config read failed\n");
+ return -1;
+}
+
+int octeontx_smi_probe(struct udevice *dev)
+{
+ int ret, subnode, cnt = 0, node = dev->node.of_offset;
+ struct mii_dev *bus;
+ struct octeontx_smi_priv *priv;
+ pci_dev_t bdf = dm_pci_get_bdf(dev);
+
+ debug("SMI PCI device: %x\n", bdf);
+ dev->req_seq = PCI_FUNC(bdf);
+ if (!dm_pci_map_bar(dev, PCI_BASE_ADDRESS_0, PCI_REGION_MEM)) {
+ printf("Failed to map PCI region for bdf %x\n", bdf);
+ return -1;
+ }
+
+ fdt_for_each_subnode(subnode, gd->fdt_blob, node) {
+ ret = fdt_node_check_compatible(gd->fdt_blob, subnode,
+ "cavium,thunder-8890-mdio");
+ if (ret)
+ continue;
+
+ bus = mdio_alloc();
+ priv = malloc(sizeof(*priv));
+ if (!bus || !priv) {
+ printf("Failed to allocate OcteonTX MDIO bus # %u\n",
+ dev->seq);
+ return -1;
+ }
+
+ bus->read = octeontx_phy_read;
+ bus->write = octeontx_phy_write;
+ bus->reset = octeontx_smi_reset;
+ bus->priv = priv;
+
+ priv->mode = CLAUSE22;
+ priv->baseaddr = (void __iomem *)fdtdec_get_addr(gd->fdt_blob,
+ subnode,
+ "reg");
+ debug("mdio base addr %p\n", priv->baseaddr);
+
+ /* use given name or generate its own unique name */
+ snprintf(bus->name, MDIO_NAME_LEN, "smi%d", cnt++);
+
+ ret = mdio_register(bus);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+static const struct udevice_id octeontx_smi_ids[] = {
+ { .compatible = "cavium,thunder-8890-mdio-nexus" },
+ {}
+};
+
+U_BOOT_DRIVER(octeontx_smi) = {
+ .name = "octeontx_smi",
+ .id = UCLASS_MISC,
+ .probe = octeontx_smi_probe,
+ .of_match = octeontx_smi_ids,
+};
+
+static struct pci_device_id octeontx_smi_supported[] = {
+ { PCI_VDEVICE(CAVIUM, PCI_DEVICE_ID_CAVIUM_SMI) },
+ {}
+};
+
+U_BOOT_PCI_DEVICE(octeontx_smi, octeontx_smi_supported);
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ */
+
+#include <config.h>
+#include <dm.h>
+#include <errno.h>
+#include <fdt_support.h>
+#include <pci.h>
+#include <malloc.h>
+#include <miiphy.h>
+#include <misc.h>
+#include <net.h>
+#include <netdev.h>
+#include <asm/io.h>
+#include <linux/delay.h>
+#include <linux/libfdt.h>
+
+#include <asm/arch/csrs/csrs-xcv.h>
+
+#define XCVX_BASE 0x87E0DB000000ULL
+
+/* Initialize XCV block */
+void xcv_init_hw(void)
+{
+ union xcvx_reset reset;
+ union xcvx_dll_ctl xcv_dll_ctl;
+
+ /* Take the DLL out of reset */
+ reset.u = readq(XCVX_BASE + XCVX_RESET(0));
+ reset.s.dllrst = 0;
+ writeq(reset.u, XCVX_BASE + XCVX_RESET(0));
+
+ /* Take the clock tree out of reset */
+ reset.u = readq(XCVX_BASE + XCVX_RESET(0));
+ reset.s.clkrst = 0;
+ writeq(reset.u, XCVX_BASE + XCVX_RESET(0));
+
+ /* Once the 125MHz ref clock is stable, wait 10us for DLL to lock */
+ udelay(10);
+
+ /* Optionally, bypass the DLL setting */
+ xcv_dll_ctl.u = readq(XCVX_BASE + XCVX_DLL_CTL(0));
+ xcv_dll_ctl.s.clkrx_set = 0;
+ xcv_dll_ctl.s.clkrx_byp = 1;
+ xcv_dll_ctl.s.clktx_byp = 0;
+ writeq(xcv_dll_ctl.u, XCVX_BASE + XCVX_DLL_CTL(0));
+
+ /* Enable the compensation controller */
+ reset.u = readq(XCVX_BASE + XCVX_RESET(0));
+ reset.s.comp = 1;
+ writeq(reset.u, XCVX_BASE + XCVX_RESET(0));
+ reset.u = readq(XCVX_BASE + XCVX_RESET(0));
+
+ /* Wait for 1040 reference clock cycles for the compensation state
+ * machine lock.
+ */
+ udelay(100);
+
+ /* Enable the XCV block */
+ reset.u = readq(XCVX_BASE + XCVX_RESET(0));
+ reset.s.enable = 1;
+ writeq(reset.u, XCVX_BASE + XCVX_RESET(0));
+
+ /* set XCV(0)_RESET[CLKRST] to 1 */
+ reset.u = readq(XCVX_BASE + XCVX_RESET(0));
+ reset.s.clkrst = 1;
+ writeq(reset.u, XCVX_BASE + XCVX_RESET(0));
+}
+
+/*
+ * Configure XCV link based on the speed
+ * link_up : Set to 1 when link is up otherwise 0
+ * link_speed: The speed of the link.
+ */
+void xcv_setup_link(bool link_up, int link_speed)
+{
+ union xcvx_ctl xcv_ctl;
+ union xcvx_reset reset;
+ union xcvx_batch_crd_ret xcv_crd_ret;
+ int speed = 2;
+
+ /* Check RGMII link */
+ if (link_speed == 100)
+ speed = 1;
+ else if (link_speed == 10)
+ speed = 0;
+
+ if (link_up) {
+ /* Set operating speed */
+ xcv_ctl.u = readq(XCVX_BASE + XCVX_CTL(0));
+ xcv_ctl.s.speed = speed;
+ writeq(xcv_ctl.u, XCVX_BASE + XCVX_CTL(0));
+
+ /* Datapaths come out of reset
+ * - The datapath resets will disengage BGX from the
+ * RGMII interface
+ * - XCV will continue to return TX credits for each tick
+ * that is sent on the TX data path
+ */
+ reset.u = readq(XCVX_BASE + XCVX_RESET(0));
+ reset.s.tx_dat_rst_n = 1;
+ reset.s.rx_dat_rst_n = 1;
+ writeq(reset.u, XCVX_BASE + XCVX_RESET(0));
+
+ /* Enable packet flow */
+ reset.u = readq(XCVX_BASE + XCVX_RESET(0));
+ reset.s.tx_pkt_rst_n = 1;
+ reset.s.rx_pkt_rst_n = 1;
+ writeq(reset.u, XCVX_BASE + XCVX_RESET(0));
+
+ xcv_crd_ret.u = readq(XCVX_BASE + XCVX_BATCH_CRD_RET(0));
+ xcv_crd_ret.s.crd_ret = 1;
+ writeq(xcv_crd_ret.u, XCVX_BASE + XCVX_BATCH_CRD_RET(0));
+ } else {
+ /* Enable packet flow */
+ reset.u = readq(XCVX_BASE + XCVX_RESET(0));
+ reset.s.tx_pkt_rst_n = 0;
+ reset.s.rx_pkt_rst_n = 0;
+ writeq(reset.u, XCVX_BASE + XCVX_RESET(0));
+ reset.u = readq(XCVX_BASE + XCVX_RESET(0));
+ }
+}