amd-xgbe: add support for rx-adaptation
The existing implementation for non-Autonegotiation 10G speed modes does not enable RX adaptation in the Driver and FW. The RX Equalization settings (AFE settings alone) are manually configured and the existing link-up sequence in the driver does not perform rx adaptation process as mentioned in the Synopsys databook. There's a customer request for 10G backplane mode without Auto-negotiation and for the DAC cables of more significant length that follow the non-Autonegotiation mode. These modes require PHY to perform RX Adaptation. The proposed logic adds the necessary changes to Yellow Carp devices to ensure seamless RX Adaptation for 10G-SFI (LONG DAC) and 10G-KR without AN (CL72 not present). The RX adaptation core algorithm is executed by firmware, however, to achieve that a new mailbox sub-command is required to be sent by the driver. Co-developed-by: Shyam Sundar S K <Shyam-sundar.S-k@amd.com> Signed-off-by: Shyam Sundar S K <Shyam-sundar.S-k@amd.com> Signed-off-by: Raju Rangoju <Raju.Rangoju@amd.com> Signed-off-by: Paolo Abeni <pabeni@redhat.com>
This commit is contained in:
parent
3ee217c47b
commit
4f3b20bfbb
3 changed files with 211 additions and 4 deletions
|
@ -1285,6 +1285,22 @@
|
|||
#define MDIO_PMA_RX_CTRL1 0x8051
|
||||
#endif
|
||||
|
||||
#ifndef MDIO_PMA_RX_LSTS
|
||||
#define MDIO_PMA_RX_LSTS 0x018020
|
||||
#endif
|
||||
|
||||
#ifndef MDIO_PMA_RX_EQ_CTRL4
|
||||
#define MDIO_PMA_RX_EQ_CTRL4 0x0001805C
|
||||
#endif
|
||||
|
||||
#ifndef MDIO_PMA_MP_MISC_STS
|
||||
#define MDIO_PMA_MP_MISC_STS 0x0078
|
||||
#endif
|
||||
|
||||
#ifndef MDIO_PMA_PHY_RX_EQ_CEU
|
||||
#define MDIO_PMA_PHY_RX_EQ_CEU 0x1800E
|
||||
#endif
|
||||
|
||||
#ifndef MDIO_PCS_DIG_CTRL
|
||||
#define MDIO_PCS_DIG_CTRL 0x8000
|
||||
#endif
|
||||
|
@ -1395,6 +1411,28 @@
|
|||
#define XGBE_PMA_RX_RST_0_RESET_ON 0x10
|
||||
#define XGBE_PMA_RX_RST_0_RESET_OFF 0x00
|
||||
|
||||
#define XGBE_PMA_RX_SIG_DET_0_MASK BIT(4)
|
||||
#define XGBE_PMA_RX_SIG_DET_0_ENABLE BIT(4)
|
||||
#define XGBE_PMA_RX_SIG_DET_0_DISABLE 0x0000
|
||||
|
||||
#define XGBE_PMA_RX_VALID_0_MASK BIT(12)
|
||||
#define XGBE_PMA_RX_VALID_0_ENABLE BIT(12)
|
||||
#define XGBE_PMA_RX_VALID_0_DISABLE 0x0000
|
||||
|
||||
#define XGBE_PMA_RX_AD_REQ_MASK BIT(12)
|
||||
#define XGBE_PMA_RX_AD_REQ_ENABLE BIT(12)
|
||||
#define XGBE_PMA_RX_AD_REQ_DISABLE 0x0000
|
||||
|
||||
#define XGBE_PMA_RX_ADPT_ACK_MASK BIT(12)
|
||||
#define XGBE_PMA_RX_ADPT_ACK BIT(12)
|
||||
|
||||
#define XGBE_PMA_CFF_UPDTM1_VLD BIT(8)
|
||||
#define XGBE_PMA_CFF_UPDT0_VLD BIT(9)
|
||||
#define XGBE_PMA_CFF_UPDT1_VLD BIT(10)
|
||||
#define XGBE_PMA_CFF_UPDT_MASK (XGBE_PMA_CFF_UPDTM1_VLD |\
|
||||
XGBE_PMA_CFF_UPDT0_VLD | \
|
||||
XGBE_PMA_CFF_UPDT1_VLD)
|
||||
|
||||
#define XGBE_PMA_PLL_CTRL_MASK BIT(15)
|
||||
#define XGBE_PMA_PLL_CTRL_ENABLE BIT(15)
|
||||
#define XGBE_PMA_PLL_CTRL_DISABLE 0x0000
|
||||
|
|
|
@ -388,6 +388,9 @@ struct xgbe_phy_data {
|
|||
static DEFINE_MUTEX(xgbe_phy_comm_lock);
|
||||
|
||||
static enum xgbe_an_mode xgbe_phy_an_mode(struct xgbe_prv_data *pdata);
|
||||
static void xgbe_phy_rrc(struct xgbe_prv_data *pdata);
|
||||
static void xgbe_phy_perform_ratechange(struct xgbe_prv_data *pdata,
|
||||
unsigned int cmd, unsigned int sub_cmd);
|
||||
|
||||
static int xgbe_phy_i2c_xfer(struct xgbe_prv_data *pdata,
|
||||
struct xgbe_i2c_op *i2c_op)
|
||||
|
@ -2038,6 +2041,93 @@ static void xgbe_phy_set_redrv_mode(struct xgbe_prv_data *pdata)
|
|||
xgbe_phy_put_comm_ownership(pdata);
|
||||
}
|
||||
|
||||
#define MAX_RX_ADAPT_RETRIES 1
|
||||
#define XGBE_PMA_RX_VAL_SIG_MASK (XGBE_PMA_RX_SIG_DET_0_MASK | \
|
||||
XGBE_PMA_RX_VALID_0_MASK)
|
||||
|
||||
static void xgbe_set_rx_adap_mode(struct xgbe_prv_data *pdata,
|
||||
enum xgbe_mode mode)
|
||||
{
|
||||
if (pdata->rx_adapt_retries++ >= MAX_RX_ADAPT_RETRIES) {
|
||||
pdata->rx_adapt_retries = 0;
|
||||
return;
|
||||
}
|
||||
|
||||
xgbe_phy_perform_ratechange(pdata,
|
||||
mode == XGBE_MODE_KR ?
|
||||
XGBE_MB_CMD_SET_10G_KR :
|
||||
XGBE_MB_CMD_SET_10G_SFI,
|
||||
XGBE_MB_SUBCMD_RX_ADAP);
|
||||
}
|
||||
|
||||
static void xgbe_rx_adaptation(struct xgbe_prv_data *pdata)
|
||||
{
|
||||
struct xgbe_phy_data *phy_data = pdata->phy_data;
|
||||
unsigned int reg;
|
||||
|
||||
/* step 2: force PCS to send RX_ADAPT Req to PHY */
|
||||
XMDIO_WRITE_BITS(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_RX_EQ_CTRL4,
|
||||
XGBE_PMA_RX_AD_REQ_MASK, XGBE_PMA_RX_AD_REQ_ENABLE);
|
||||
|
||||
/* Step 3: Wait for RX_ADAPT ACK from the PHY */
|
||||
msleep(200);
|
||||
|
||||
/* Software polls for coefficient update command (given by local PHY) */
|
||||
reg = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_PHY_RX_EQ_CEU);
|
||||
|
||||
/* Clear the RX_AD_REQ bit */
|
||||
XMDIO_WRITE_BITS(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_RX_EQ_CTRL4,
|
||||
XGBE_PMA_RX_AD_REQ_MASK, XGBE_PMA_RX_AD_REQ_DISABLE);
|
||||
|
||||
/* Check if coefficient update command is set */
|
||||
if ((reg & XGBE_PMA_CFF_UPDT_MASK) != XGBE_PMA_CFF_UPDT_MASK)
|
||||
goto set_mode;
|
||||
|
||||
/* Step 4: Check for Block lock */
|
||||
|
||||
/* Link status is latched low, so read once to clear
|
||||
* and then read again to get current state
|
||||
*/
|
||||
reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1);
|
||||
reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1);
|
||||
if (reg & MDIO_STAT1_LSTATUS) {
|
||||
/* If the block lock is found, update the helpers
|
||||
* and declare the link up
|
||||
*/
|
||||
netif_dbg(pdata, link, pdata->netdev, "Block_lock done");
|
||||
pdata->rx_adapt_done = true;
|
||||
pdata->mode_set = false;
|
||||
return;
|
||||
}
|
||||
|
||||
set_mode:
|
||||
xgbe_set_rx_adap_mode(pdata, phy_data->cur_mode);
|
||||
}
|
||||
|
||||
static void xgbe_phy_rx_adaptation(struct xgbe_prv_data *pdata)
|
||||
{
|
||||
unsigned int reg;
|
||||
|
||||
rx_adapt_reinit:
|
||||
reg = XMDIO_READ_BITS(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_RX_LSTS,
|
||||
XGBE_PMA_RX_VAL_SIG_MASK);
|
||||
|
||||
/* step 1: Check for RX_VALID && LF_SIGDET */
|
||||
if ((reg & XGBE_PMA_RX_VAL_SIG_MASK) != XGBE_PMA_RX_VAL_SIG_MASK) {
|
||||
netif_dbg(pdata, link, pdata->netdev,
|
||||
"RX_VALID or LF_SIGDET is unset, issue rrc");
|
||||
xgbe_phy_rrc(pdata);
|
||||
if (pdata->rx_adapt_retries++ >= MAX_RX_ADAPT_RETRIES) {
|
||||
pdata->rx_adapt_retries = 0;
|
||||
return;
|
||||
}
|
||||
goto rx_adapt_reinit;
|
||||
}
|
||||
|
||||
/* perform rx adaptation */
|
||||
xgbe_rx_adaptation(pdata);
|
||||
}
|
||||
|
||||
static void xgbe_phy_rx_reset(struct xgbe_prv_data *pdata)
|
||||
{
|
||||
int reg;
|
||||
|
@ -2103,7 +2193,7 @@ static void xgbe_phy_perform_ratechange(struct xgbe_prv_data *pdata,
|
|||
wait = XGBE_RATECHANGE_COUNT;
|
||||
while (wait--) {
|
||||
if (!XP_IOREAD_BITS(pdata, XP_DRIVER_INT_RO, STATUS))
|
||||
goto reenable_pll;
|
||||
goto do_rx_adaptation;
|
||||
|
||||
usleep_range(1000, 2000);
|
||||
}
|
||||
|
@ -2113,6 +2203,20 @@ static void xgbe_phy_perform_ratechange(struct xgbe_prv_data *pdata,
|
|||
|
||||
/* Reset on error */
|
||||
xgbe_phy_rx_reset(pdata);
|
||||
goto reenable_pll;
|
||||
|
||||
do_rx_adaptation:
|
||||
if (pdata->en_rx_adap && sub_cmd == XGBE_MB_SUBCMD_RX_ADAP &&
|
||||
(cmd == XGBE_MB_CMD_SET_10G_KR || cmd == XGBE_MB_CMD_SET_10G_SFI)) {
|
||||
netif_dbg(pdata, link, pdata->netdev,
|
||||
"Enabling RX adaptation\n");
|
||||
pdata->mode_set = true;
|
||||
xgbe_phy_rx_adaptation(pdata);
|
||||
/* return from here to avoid enabling PLL ctrl
|
||||
* during adaptation phase
|
||||
*/
|
||||
return;
|
||||
}
|
||||
|
||||
reenable_pll:
|
||||
/* Enable PLL re-initialization, not needed for PHY Power Off and RRC cmds */
|
||||
|
@ -2141,6 +2245,31 @@ static void xgbe_phy_power_off(struct xgbe_prv_data *pdata)
|
|||
netif_dbg(pdata, link, pdata->netdev, "phy powered off\n");
|
||||
}
|
||||
|
||||
static bool enable_rx_adap(struct xgbe_prv_data *pdata, enum xgbe_mode mode)
|
||||
{
|
||||
struct xgbe_phy_data *phy_data = pdata->phy_data;
|
||||
unsigned int ver;
|
||||
|
||||
/* Rx-Adaptation is not supported on older platforms(< 0x30H) */
|
||||
ver = XGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER);
|
||||
if (ver < 0x30)
|
||||
return false;
|
||||
|
||||
/* Re-driver models 4223 && 4227 do not support Rx-Adaptation */
|
||||
if (phy_data->redrv &&
|
||||
(phy_data->redrv_model == XGBE_PHY_REDRV_MODEL_4223 ||
|
||||
phy_data->redrv_model == XGBE_PHY_REDRV_MODEL_4227))
|
||||
return false;
|
||||
|
||||
/* 10G KR mode with AN does not support Rx-Adaptation */
|
||||
if (mode == XGBE_MODE_KR &&
|
||||
phy_data->port_mode != XGBE_PORT_MODE_BACKPLANE_NO_AUTONEG)
|
||||
return false;
|
||||
|
||||
pdata->en_rx_adap = 1;
|
||||
return true;
|
||||
}
|
||||
|
||||
static void xgbe_phy_sfi_mode(struct xgbe_prv_data *pdata)
|
||||
{
|
||||
struct xgbe_phy_data *phy_data = pdata->phy_data;
|
||||
|
@ -2149,7 +2278,12 @@ static void xgbe_phy_sfi_mode(struct xgbe_prv_data *pdata)
|
|||
|
||||
/* 10G/SFI */
|
||||
if (phy_data->sfp_cable != XGBE_SFP_CABLE_PASSIVE) {
|
||||
pdata->en_rx_adap = 0;
|
||||
xgbe_phy_perform_ratechange(pdata, XGBE_MB_CMD_SET_10G_SFI, XGBE_MB_SUBCMD_ACTIVE);
|
||||
} else if ((phy_data->sfp_cable == XGBE_SFP_CABLE_PASSIVE) &&
|
||||
(enable_rx_adap(pdata, XGBE_MODE_SFI))) {
|
||||
xgbe_phy_perform_ratechange(pdata, XGBE_MB_CMD_SET_10G_SFI,
|
||||
XGBE_MB_SUBCMD_RX_ADAP);
|
||||
} else {
|
||||
if (phy_data->sfp_cable_len <= 1)
|
||||
xgbe_phy_perform_ratechange(pdata, XGBE_MB_CMD_SET_10G_SFI,
|
||||
|
@ -2230,7 +2364,12 @@ static void xgbe_phy_kr_mode(struct xgbe_prv_data *pdata)
|
|||
xgbe_phy_set_redrv_mode(pdata);
|
||||
|
||||
/* 10G/KR */
|
||||
xgbe_phy_perform_ratechange(pdata, XGBE_MB_CMD_SET_10G_KR, XGBE_MB_SUBCMD_NONE);
|
||||
if (enable_rx_adap(pdata, XGBE_MODE_KR))
|
||||
xgbe_phy_perform_ratechange(pdata, XGBE_MB_CMD_SET_10G_KR,
|
||||
XGBE_MB_SUBCMD_RX_ADAP);
|
||||
else
|
||||
xgbe_phy_perform_ratechange(pdata, XGBE_MB_CMD_SET_10G_KR,
|
||||
XGBE_MB_SUBCMD_NONE);
|
||||
|
||||
phy_data->cur_mode = XGBE_MODE_KR;
|
||||
|
||||
|
@ -2743,8 +2882,11 @@ static int xgbe_phy_link_status(struct xgbe_prv_data *pdata, int *an_restart)
|
|||
return 0;
|
||||
}
|
||||
|
||||
if (phy_data->sfp_mod_absent || phy_data->sfp_rx_los)
|
||||
if (phy_data->sfp_mod_absent || phy_data->sfp_rx_los) {
|
||||
if (pdata->en_rx_adap)
|
||||
pdata->rx_adapt_done = false;
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
if (phy_data->phydev) {
|
||||
|
@ -2766,7 +2908,29 @@ static int xgbe_phy_link_status(struct xgbe_prv_data *pdata, int *an_restart)
|
|||
*/
|
||||
reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1);
|
||||
reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1);
|
||||
if (reg & MDIO_STAT1_LSTATUS)
|
||||
|
||||
if (pdata->en_rx_adap) {
|
||||
/* if the link is available and adaptation is done,
|
||||
* declare link up
|
||||
*/
|
||||
if ((reg & MDIO_STAT1_LSTATUS) && pdata->rx_adapt_done)
|
||||
return 1;
|
||||
/* If either link is not available or adaptation is not done,
|
||||
* retrigger the adaptation logic. (if the mode is not set,
|
||||
* then issue mailbox command first)
|
||||
*/
|
||||
if (pdata->mode_set) {
|
||||
xgbe_phy_rx_adaptation(pdata);
|
||||
} else {
|
||||
pdata->rx_adapt_done = false;
|
||||
xgbe_phy_set_mode(pdata, phy_data->cur_mode);
|
||||
}
|
||||
|
||||
/* check again for the link and adaptation status */
|
||||
reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1);
|
||||
if ((reg & MDIO_STAT1_LSTATUS) && pdata->rx_adapt_done)
|
||||
return 1;
|
||||
} else if (reg & MDIO_STAT1_LSTATUS)
|
||||
return 1;
|
||||
|
||||
if (pdata->phy.autoneg == AUTONEG_ENABLE &&
|
||||
|
|
|
@ -625,6 +625,7 @@ enum xgbe_mb_cmd {
|
|||
|
||||
enum xgbe_mb_subcmd {
|
||||
XGBE_MB_SUBCMD_NONE = 0,
|
||||
XGBE_MB_SUBCMD_RX_ADAP,
|
||||
|
||||
/* 10GbE SFP subcommands */
|
||||
XGBE_MB_SUBCMD_ACTIVE = 0,
|
||||
|
@ -1316,6 +1317,10 @@ struct xgbe_prv_data {
|
|||
|
||||
bool debugfs_an_cdr_workaround;
|
||||
bool debugfs_an_cdr_track_early;
|
||||
bool en_rx_adap;
|
||||
int rx_adapt_retries;
|
||||
bool rx_adapt_done;
|
||||
bool mode_set;
|
||||
};
|
||||
|
||||
/* Function prototypes*/
|
||||
|
|
Loading…
Add table
Reference in a new issue