Configure RMMI and M-PHY registers for HS mode required for selection of bit rate series A or B. If it is not a calibrated part, then switch back to SLOWAUTO_MODE and skip all these configurations. Implemented below sequence as per the DWC RMMI databook. 1. Override RMMI CBRATESEL with the desired rate. 2. Set TX_CFGUPDT_0 to 1'b1 for one TX_CFGCLK_0 cycle. 3. Override PHY rx_req to 1, then poll on PHY rx_ack register till it goes 1(both lanes). 4. Override PHY rx_req to 0, then poll on PHY rx_ack register till it goes 0(both lanes). 5. Remove PHY rx_req override(both lanes). 6. Start the LS PMC.
Signed-off-by: Venkatesh Yadav Abbarapu <venkatesh.abbar...@amd.com> --- drivers/ufs/ufs-amd-versal2.c | 112 ++++++++++++++++++++++++++++++++++ drivers/ufs/ufs.c | 15 +++++ drivers/ufs/ufs.h | 3 + drivers/ufs/ufshcd-dwc.h | 3 + 4 files changed, 133 insertions(+) diff --git a/drivers/ufs/ufs-amd-versal2.c b/drivers/ufs/ufs-amd-versal2.c index 1c5ed538370..896dda2de4e 100644 --- a/drivers/ufs/ufs-amd-versal2.c +++ b/drivers/ufs/ufs-amd-versal2.c @@ -26,6 +26,10 @@ #define MPHY_FAST_RX_AFE_CAL BIT(2) #define MPHY_FW_CALIB_CFG_VAL BIT(8) +#define MPHY_RX_OVRD_EN BIT(3) +#define MPHY_RX_OVRD_VAL BIT(2) +#define MPHY_RX_ACK_MASK BIT(0) + #define TX_RX_CFG_RDY_MASK GENMASK(3, 0) #define TIMEOUT_MICROSEC 1000000L @@ -422,10 +426,118 @@ static int ufs_versal2_link_startup_notify(struct ufs_hba *hba, return ret; } +static int ufs_versal2_phy_ratesel(struct ufs_hba *hba, u32 activelanes, u32 rx_req) +{ + u32 time_left, reg, lane; + int ret; + + for (lane = 0; lane < activelanes; lane++) { + time_left = TIMEOUT_MICROSEC; + ret = ufs_versal2_phy_reg_read(hba, RX_OVRD_IN_1(lane), ®); + if (ret) + return ret; + + reg |= MPHY_RX_OVRD_EN; + if (rx_req) + reg |= MPHY_RX_OVRD_VAL; + else + reg &= ~MPHY_RX_OVRD_VAL; + + ret = ufs_versal2_phy_reg_write(hba, RX_OVRD_IN_1(lane), reg); + if (ret) + return ret; + + do { + ret = ufs_versal2_phy_reg_read(hba, RX_PCS_OUT(lane), ®); + if (ret) + return ret; + + reg &= MPHY_RX_ACK_MASK; + if (reg == rx_req) + break; + + time_left--; + mdelay(5); + } while (time_left); + + if (!time_left) { + dev_err(hba->dev, "Invalid Rx Ack value.\n"); + return -ETIMEDOUT; + } + } + + return 0; +} + +static int ufs_get_max_pwr_mode(struct ufs_hba *hba, + struct ufs_pwr_mode_info *max_pwr_info) +{ + struct ufs_versal2_priv *priv = dev_get_priv(hba->dev); + u32 lane, reg, rate = 0; + int ret = 0; + + /* If it is not a calibrated part, switch PWRMODE to SLOW_MODE */ + if (!priv->attcompval0 && !priv->attcompval1 && + !priv->ctlecompval0 && !priv->ctlecompval1) { + max_pwr_info->info.pwr_rx = SLOWAUTO_MODE; + max_pwr_info->info.pwr_tx = SLOWAUTO_MODE; + max_pwr_info->info.gear_rx = UFS_PWM_G1; + max_pwr_info->info.gear_tx = UFS_PWM_G1; + max_pwr_info->info.lane_tx = 1; + max_pwr_info->info.lane_rx = 1; + max_pwr_info->info.hs_rate = 0; + return 0; + } + + if (max_pwr_info->info.pwr_rx == SLOWAUTO_MODE || + max_pwr_info->info.pwr_tx == SLOWAUTO_MODE) + return 0; + + if (max_pwr_info->info.hs_rate == PA_HS_MODE_B) + rate = 1; + + /* Select the rate */ + ret = ufshcd_dme_set(hba, UIC_ARG_MIB(CBRATESEL), rate); + if (ret) + return ret; + + ret = ufshcd_dme_set(hba, UIC_ARG_MIB(VS_MPHYCFGUPDT), 1); + if (ret) + return ret; + + ret = ufs_versal2_phy_ratesel(hba, max_pwr_info->info.lane_tx, 1); + if (ret) + return ret; + + ret = ufs_versal2_phy_ratesel(hba, max_pwr_info->info.lane_tx, 0); + if (ret) + return ret; + + /* Remove rx_req override */ + for (lane = 0; lane < max_pwr_info->info.lane_tx; lane++) { + ret = ufs_versal2_phy_reg_read(hba, RX_OVRD_IN_1(lane), ®); + if (ret) + return ret; + + reg &= ~MPHY_RX_OVRD_EN; + ret = ufs_versal2_phy_reg_write(hba, RX_OVRD_IN_1(lane), reg); + if (ret) + return ret; + } + + if (max_pwr_info->info.lane_tx == UFS_LANE_2 && + max_pwr_info->info.lane_rx == UFS_LANE_2) + ret = ufshcd_dme_configure_adapt(hba, max_pwr_info->info.gear_tx, + PA_INITIAL_ADAPT); + + return 0; +} + static struct ufs_hba_ops ufs_versal2_hba_ops = { .init = ufs_versal2_init, .link_startup_notify = ufs_versal2_link_startup_notify, .hce_enable_notify = ufs_versal2_hce_enable_notify, + .get_max_pwr_mode = ufs_get_max_pwr_mode, }; static int ufs_versal2_probe(struct udevice *dev) diff --git a/drivers/ufs/ufs.c b/drivers/ufs/ufs.c index 91f6ad3bfef..57e6e8c013b 100644 --- a/drivers/ufs/ufs.c +++ b/drivers/ufs/ufs.c @@ -226,6 +226,21 @@ static int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) return 0; } +int ufshcd_dme_configure_adapt(struct ufs_hba *hba, + int agreed_gear, + int adapt_val) +{ + int ret; + + if (agreed_gear < UFS_HS_G4) + adapt_val = PA_NO_ADAPT; + + ret = ufshcd_dme_set(hba, + UIC_ARG_MIB(PA_TXHSADAPTTYPE), + adapt_val); + return ret; +} + /** * ufshcd_dme_set_attr - UIC command for DME_SET, DME_PEER_SET * diff --git a/drivers/ufs/ufs.h b/drivers/ufs/ufs.h index 53137fae3a8..0337ac5996b 100644 --- a/drivers/ufs/ufs.h +++ b/drivers/ufs/ufs.h @@ -428,6 +428,9 @@ enum uic_link_state { #define ATTR_SET_NOR 0 /* NORMAL */ #define ATTR_SET_ST 1 /* STATIC */ +int ufshcd_dme_configure_adapt(struct ufs_hba *hba, + int agreed_gear, + int adapt_val); int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel, u8 attr_set, u32 mib_val, u8 peer); int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel, diff --git a/drivers/ufs/ufshcd-dwc.h b/drivers/ufs/ufshcd-dwc.h index fc1bcca8ccb..f7d27736f44 100644 --- a/drivers/ufs/ufshcd-dwc.h +++ b/drivers/ufs/ufshcd-dwc.h @@ -17,6 +17,7 @@ #define CBREFCLKCTRL2 0x8132 #define CBCRCTRL 0x811F #define CBC10DIRECTCONF2 0x810E +#define CBRATESEL 0x8114 #define CBCREGADDRLSB 0x8116 #define CBCREGADDRMSB 0x8117 #define CBCREGWRLSB 0x8118 @@ -32,6 +33,8 @@ #define MRX_FSM_STATE 0xC1 /* M-PHY registers */ +#define RX_OVRD_IN_1(n) (0x3006 + ((n) * 0x100)) +#define RX_PCS_OUT(n) (0x300F + ((n) * 0x100)) #define FAST_FLAGS(n) (0x401C + ((n) * 0x100)) #define RX_AFE_ATT_IDAC(n) (0x4000 + ((n) * 0x100)) #define RX_AFE_CTLE_IDAC(n) (0x4001 + ((n) * 0x100)) -- 2.25.1