On 2025/8/16 18:28, Chenghai Huang wrote: > When the device resumes from a suspended state, it will revert to its > initial state and requires re-enabling. Currently, the address prefetch > function is not re-enabled after device resuming. Move the address prefetch > enable to the initialization process. In this way, the address prefetch > can be enabled when the device resumes by calling the initialization > process. > > Fixes: 607c191b371d ("crypto: hisilicon - support runtime PM for accelerator device") > Signed-off-by: Chenghai Huang <huangchenghai2@xxxxxxxxxx> > --- > drivers/crypto/hisilicon/hpre/hpre_main.c | 86 +++++++++++------------ > drivers/crypto/hisilicon/qm.c | 3 - > drivers/crypto/hisilicon/sec2/sec_main.c | 80 ++++++++++----------- > drivers/crypto/hisilicon/zip/zip_main.c | 2 +- > 4 files changed, 84 insertions(+), 87 deletions(-) > > diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c > index f5b47e5ff48a..dbe8f62f556b 100644 > --- a/drivers/crypto/hisilicon/hpre/hpre_main.c > +++ b/drivers/crypto/hisilicon/hpre/hpre_main.c > @@ -466,6 +466,47 @@ struct hisi_qp *hpre_create_qp(u8 type) > return NULL; > } > > +static void hpre_close_sva_prefetch(struct hisi_qm *qm) > +{ > + u32 val; > + int ret; > + > + if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps)) > + return; > + > + val = readl_relaxed(qm->io_base + HPRE_PREFETCH_CFG); > + val |= HPRE_PREFETCH_DISABLE; > + writel(val, qm->io_base + HPRE_PREFETCH_CFG); > + > + ret = readl_relaxed_poll_timeout(qm->io_base + HPRE_SVA_PREFTCH_DFX, > + val, !(val & HPRE_SVA_DISABLE_READY), > + HPRE_REG_RD_INTVRL_US, > + HPRE_REG_RD_TMOUT_US); > + if (ret) > + pci_err(qm->pdev, "failed to close sva prefetch\n"); > +} > + > +static void hpre_open_sva_prefetch(struct hisi_qm *qm) > +{ > + u32 val; > + int ret; > + > + if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps)) > + return; > + > + /* Enable prefetch */ > + val = readl_relaxed(qm->io_base + HPRE_PREFETCH_CFG); > + val &= HPRE_PREFETCH_ENABLE; > + writel(val, qm->io_base + HPRE_PREFETCH_CFG); > + > + ret = readl_relaxed_poll_timeout(qm->io_base + HPRE_PREFETCH_CFG, > + val, !(val & HPRE_PREFETCH_DISABLE), > + HPRE_REG_RD_INTVRL_US, > + HPRE_REG_RD_TMOUT_US); > + if (ret) > + pci_err(qm->pdev, "failed to open sva prefetch\n"); > +} > + > static void hpre_config_pasid(struct hisi_qm *qm) > { > u32 val1, val2; > @@ -484,6 +525,8 @@ static void hpre_config_pasid(struct hisi_qm *qm) > } > writel_relaxed(val1, qm->io_base + HPRE_DATA_RUSER_CFG); > writel_relaxed(val2, qm->io_base + HPRE_DATA_WUSER_CFG); > + > + hpre_open_sva_prefetch(qm); For compatibility considerations, address prefetch enablement relies on the device's capability configuration rather than the chip version. The function should be called before the version check. Thanks, Weili > } > > static int hpre_cfg_by_dsm(struct hisi_qm *qm) > @@ -563,47 +606,6 @@ static void disable_flr_of_bme(struct hisi_qm *qm) > writel(PEH_AXUSER_CFG_ENABLE, qm->io_base + QM_PEH_AXUSER_CFG_ENABLE); > } > > -static void hpre_open_sva_prefetch(struct hisi_qm *qm) > -{ > - u32 val; > - int ret; > - > - if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps)) > - return; > - > - /* Enable prefetch */ > - val = readl_relaxed(qm->io_base + HPRE_PREFETCH_CFG); > - val &= HPRE_PREFETCH_ENABLE; > - writel(val, qm->io_base + HPRE_PREFETCH_CFG); > - > - ret = readl_relaxed_poll_timeout(qm->io_base + HPRE_PREFETCH_CFG, > - val, !(val & HPRE_PREFETCH_DISABLE), > - HPRE_REG_RD_INTVRL_US, > - HPRE_REG_RD_TMOUT_US); > - if (ret) > - pci_err(qm->pdev, "failed to open sva prefetch\n"); > -} > - > -static void hpre_close_sva_prefetch(struct hisi_qm *qm) > -{ > - u32 val; > - int ret; > - > - if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps)) > - return; > - > - val = readl_relaxed(qm->io_base + HPRE_PREFETCH_CFG); > - val |= HPRE_PREFETCH_DISABLE; > - writel(val, qm->io_base + HPRE_PREFETCH_CFG); > - > - ret = readl_relaxed_poll_timeout(qm->io_base + HPRE_SVA_PREFTCH_DFX, > - val, !(val & HPRE_SVA_DISABLE_READY), > - HPRE_REG_RD_INTVRL_US, > - HPRE_REG_RD_TMOUT_US); > - if (ret) > - pci_err(qm->pdev, "failed to close sva prefetch\n"); > -} > - > static void hpre_enable_clock_gate(struct hisi_qm *qm) > { > unsigned long offset; > @@ -1450,8 +1452,6 @@ static int hpre_pf_probe_init(struct hpre *hpre) > if (ret) > return ret; > > - hpre_open_sva_prefetch(qm); > - > hisi_qm_dev_err_init(qm); > ret = hpre_show_last_regs_init(qm); > if (ret) > diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c > index 2e4ee7ecfdfb..a5cc0ccd94f1 100644 > --- a/drivers/crypto/hisilicon/qm.c > +++ b/drivers/crypto/hisilicon/qm.c > @@ -4447,9 +4447,6 @@ static void qm_restart_prepare(struct hisi_qm *qm) > { > u32 value; > > - if (qm->err_ini->open_sva_prefetch) > - qm->err_ini->open_sva_prefetch(qm); > - > if (qm->ver >= QM_HW_V3) > return; > > diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c > index 72cf48d1f3ab..ddb20f380b54 100644 > --- a/drivers/crypto/hisilicon/sec2/sec_main.c > +++ b/drivers/crypto/hisilicon/sec2/sec_main.c > @@ -464,6 +464,45 @@ static void sec_set_endian(struct hisi_qm *qm) > writel_relaxed(reg, qm->io_base + SEC_CONTROL_REG); > } > > +static void sec_close_sva_prefetch(struct hisi_qm *qm) > +{ > + u32 val; > + int ret; > + > + if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps)) > + return; > + > + val = readl_relaxed(qm->io_base + SEC_PREFETCH_CFG); > + val |= SEC_PREFETCH_DISABLE; > + writel(val, qm->io_base + SEC_PREFETCH_CFG); > + > + ret = readl_relaxed_poll_timeout(qm->io_base + SEC_SVA_TRANS, > + val, !(val & SEC_SVA_DISABLE_READY), > + SEC_DELAY_10_US, SEC_POLL_TIMEOUT_US); > + if (ret) > + pci_err(qm->pdev, "failed to close sva prefetch\n"); > +} > + > +static void sec_open_sva_prefetch(struct hisi_qm *qm) > +{ > + u32 val; > + int ret; > + > + if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps)) > + return; > + > + /* Enable prefetch */ > + val = readl_relaxed(qm->io_base + SEC_PREFETCH_CFG); > + val &= SEC_PREFETCH_ENABLE; > + writel(val, qm->io_base + SEC_PREFETCH_CFG); > + > + ret = readl_relaxed_poll_timeout(qm->io_base + SEC_PREFETCH_CFG, > + val, !(val & SEC_PREFETCH_DISABLE), > + SEC_DELAY_10_US, SEC_POLL_TIMEOUT_US); > + if (ret) > + pci_err(qm->pdev, "failed to open sva prefetch\n"); > +} > + > static void sec_engine_sva_config(struct hisi_qm *qm) > { > u32 reg; > @@ -497,45 +536,7 @@ static void sec_engine_sva_config(struct hisi_qm *qm) > writel_relaxed(reg, qm->io_base + > SEC_INTERFACE_USER_CTRL1_REG); > } > -} > - > -static void sec_open_sva_prefetch(struct hisi_qm *qm) > -{ > - u32 val; > - int ret; > - > - if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps)) > - return; > - > - /* Enable prefetch */ > - val = readl_relaxed(qm->io_base + SEC_PREFETCH_CFG); > - val &= SEC_PREFETCH_ENABLE; > - writel(val, qm->io_base + SEC_PREFETCH_CFG); > - > - ret = readl_relaxed_poll_timeout(qm->io_base + SEC_PREFETCH_CFG, > - val, !(val & SEC_PREFETCH_DISABLE), > - SEC_DELAY_10_US, SEC_POLL_TIMEOUT_US); > - if (ret) > - pci_err(qm->pdev, "failed to open sva prefetch\n"); > -} > - > -static void sec_close_sva_prefetch(struct hisi_qm *qm) > -{ > - u32 val; > - int ret; > - > - if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps)) > - return; > - > - val = readl_relaxed(qm->io_base + SEC_PREFETCH_CFG); > - val |= SEC_PREFETCH_DISABLE; > - writel(val, qm->io_base + SEC_PREFETCH_CFG); > - > - ret = readl_relaxed_poll_timeout(qm->io_base + SEC_SVA_TRANS, > - val, !(val & SEC_SVA_DISABLE_READY), > - SEC_DELAY_10_US, SEC_POLL_TIMEOUT_US); > - if (ret) > - pci_err(qm->pdev, "failed to close sva prefetch\n"); > + sec_open_sva_prefetch(qm); > } > > static void sec_enable_clock_gate(struct hisi_qm *qm) > @@ -1152,7 +1153,6 @@ static int sec_pf_probe_init(struct sec_dev *sec) > if (ret) > return ret; > > - sec_open_sva_prefetch(qm); > hisi_qm_dev_err_init(qm); > sec_debug_regs_clear(qm); > ret = sec_show_last_regs_init(qm); > diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c > index d8ba23b7cc7d..96687c78a8dc 100644 > --- a/drivers/crypto/hisilicon/zip/zip_main.c > +++ b/drivers/crypto/hisilicon/zip/zip_main.c > @@ -565,6 +565,7 @@ static int hisi_zip_set_user_domain_and_cache(struct hisi_qm *qm) > writel(AXUSER_BASE, base + HZIP_DATA_WUSER_32_63); > writel(AXUSER_BASE, base + HZIP_SGL_RUSER_32_63); > } > + hisi_zip_open_sva_prefetch(qm); > > /* let's open all compression/decompression cores */ > > @@ -1255,7 +1256,6 @@ static int hisi_zip_pf_probe_init(struct hisi_zip *hisi_zip) > if (ret) > return ret; > > - hisi_zip_open_sva_prefetch(qm); > hisi_qm_dev_err_init(qm); > hisi_zip_debug_regs_clear(qm); > >