Hi Tanmay,

On Fri, May 10, 2024 at 05:51:25PM -0700, Tanmay Shah wrote:
> It is possible that remote processor is already running before
> linux boot or remoteproc platform driver probe. Implement required
> remoteproc framework ops to provide resource table address and
> connect or disconnect with remote processor in such case.
> 
> Signed-off-by: Tanmay Shah <tanmay.s...@amd.com>
> ---
> 
> Changes in v2:
>   - Fix following sparse warnings
> 
> drivers/remoteproc/xlnx_r5_remoteproc.c:827:21: sparse:    expected struct 
> rsc_tbl_data *rsc_data_va
> drivers/remoteproc/xlnx_r5_remoteproc.c:844:18: sparse:    expected struct 
> resource_table *rsc_addr
> drivers/remoteproc/xlnx_r5_remoteproc.c:898:24: sparse:    expected void 
> volatile [noderef] __iomem *addr
> 
>  drivers/remoteproc/xlnx_r5_remoteproc.c | 164 +++++++++++++++++++++++-
>  1 file changed, 160 insertions(+), 4 deletions(-)
> 
> diff --git a/drivers/remoteproc/xlnx_r5_remoteproc.c 
> b/drivers/remoteproc/xlnx_r5_remoteproc.c
> index 84243d1dff9f..039370cffa32 100644
> --- a/drivers/remoteproc/xlnx_r5_remoteproc.c
> +++ b/drivers/remoteproc/xlnx_r5_remoteproc.c
> @@ -25,6 +25,10 @@
>  /* RX mailbox client buffer max length */
>  #define MBOX_CLIENT_BUF_MAX  (IPI_BUF_LEN_MAX + \
>                                sizeof(struct zynqmp_ipi_message))
> +
> +#define RSC_TBL_XLNX_MAGIC   ((uint32_t)'x' << 24 | (uint32_t)'a' << 16 | \
> +                              (uint32_t)'m' << 8 | (uint32_t)'p')
> +
>  /*
>   * settings for RPU cluster mode which
>   * reflects possible values of xlnx,cluster-mode dt-property
> @@ -73,6 +77,15 @@ struct mbox_info {
>       struct mbox_chan *rx_chan;
>  };
>  
> +/* Xilinx Platform specific data structure */
> +struct rsc_tbl_data {
> +     const int version;
> +     const u32 magic_num;
> +     const u32 comp_magic_num;

Why is a complement magic number needed?

> +     const u32 rsc_tbl_size;
> +     const uintptr_t rsc_tbl;
> +} __packed;
> +
>  /*
>   * Hardcoded TCM bank values. This will stay in driver to maintain backward
>   * compatibility with device-tree that does not have TCM information.
> @@ -95,20 +108,24 @@ static const struct mem_bank_data 
> zynqmp_tcm_banks_lockstep[] = {
>  /**
>   * struct zynqmp_r5_core
>   *
> + * @rsc_tbl_va: resource table virtual address
>   * @dev: device of RPU instance
>   * @np: device node of RPU instance
>   * @tcm_bank_count: number TCM banks accessible to this RPU
>   * @tcm_banks: array of each TCM bank data
>   * @rproc: rproc handle
> + * @rsc_tbl_size: resource table size retrieved from remote
>   * @pm_domain_id: RPU CPU power domain id
>   * @ipi: pointer to mailbox information
>   */
>  struct zynqmp_r5_core {
> +     struct resource_table *rsc_tbl_va;

Shouldn't this be of type "void __iomem *"?  Did sparse give you trouble on that
one?

>       struct device *dev;
>       struct device_node *np;
>       int tcm_bank_count;
>       struct mem_bank_data **tcm_banks;
>       struct rproc *rproc;
> +     u32 rsc_tbl_size;
>       u32 pm_domain_id;
>       struct mbox_info *ipi;
>  };
> @@ -621,10 +638,19 @@ static int zynqmp_r5_rproc_prepare(struct rproc *rproc)
>  {
>       int ret;
>  
> -     ret = add_tcm_banks(rproc);
> -     if (ret) {
> -             dev_err(&rproc->dev, "failed to get TCM banks, err %d\n", ret);
> -             return ret;
> +     /**

Using "/**" is for comments that will endup in the documentation, which I don't
think is needed here.  Please correct throughout the patch.

> +      * For attach/detach use case, Firmware is already loaded so
> +      * TCM isn't really needed at all. Also, for security TCM can be
> +      * locked in such case and linux may not have access at all.
> +      * So avoid adding TCM banks. TCM power-domains requested during attach
> +      * callback.
> +      */
> +     if (rproc->state != RPROC_DETACHED) {
> +             ret = add_tcm_banks(rproc);
> +             if (ret) {
> +                     dev_err(&rproc->dev, "failed to get TCM banks, err 
> %d\n", ret);
> +                     return ret;
> +             }
>       }
>  
>       ret = add_mem_regions_carveout(rproc);
> @@ -662,6 +688,123 @@ static int zynqmp_r5_rproc_unprepare(struct rproc 
> *rproc)
>       return 0;
>  }
>  
> +static struct resource_table *zynqmp_r5_get_loaded_rsc_table(struct rproc 
> *rproc,
> +                                                          size_t *size)
> +{
> +     struct zynqmp_r5_core *r5_core;
> +
> +     r5_core = rproc->priv;
> +
> +     *size = r5_core->rsc_tbl_size;
> +
> +     return r5_core->rsc_tbl_va;
> +}
> +
> +static int zynqmp_r5_get_rsc_table_va(struct zynqmp_r5_core *r5_core)
> +{
> +     struct device *dev = r5_core->dev;
> +     struct rsc_tbl_data *rsc_data_va;
> +     struct resource_table *rsc_addr;
> +     struct resource res_mem;
> +     struct device_node *np;
> +     int ret;
> +
> +     /**
> +      * It is expected from remote processor firmware to provide resource
> +      * table address via struct rsc_tbl_data data structure.
> +      * Start address of first entry under "memory-region" property list
> +      * contains that data structure which holds resource table address, size
> +      * and some magic number to validate correct resource table entry.
> +      */
> +     np = of_parse_phandle(r5_core->np, "memory-region", 0);
> +     if (!np) {
> +             dev_err(dev, "failed to get memory region dev node\n");
> +             return -EINVAL;
> +     }
> +
> +     ret = of_address_to_resource(np, 0, &res_mem);
> +     if (ret) {
> +             dev_err(dev, "failed to get memory-region resource addr\n");
> +             return -EINVAL;
> +     }
> +
> +     rsc_data_va = (struct rsc_tbl_data *)devm_ioremap_wc(dev, res_mem.start,
> +                                                          sizeof(struct 
> rsc_tbl_data));

There is no point in holding memory until the driver is unloaded.  Please use
ioremap_wc() and free at the end of the function.

> +     if (!rsc_data_va) {
> +             dev_err(dev, "failed to map resource table data address\n");
> +             return -EIO;
> +     }
> +
> +     /**
> +      * If RSC_TBL_XLNX_MAGIC number and its complement isn't found then
> +      * do not consider resource table address valid and don't attach
> +      */
> +     if (rsc_data_va->magic_num != RSC_TBL_XLNX_MAGIC ||
> +         rsc_data_va->comp_magic_num != ~RSC_TBL_XLNX_MAGIC) {
> +             dev_dbg(dev, "invalid magic number, won't attach\n");
> +             return -EINVAL;
> +     }
> +
> +     rsc_addr = (struct resource_table *)ioremap_wc(rsc_data_va->rsc_tbl,
> +                                                    
> rsc_data_va->rsc_tbl_size);
> +     if (!rsc_addr) {
> +             dev_err(dev, "failed to get rsc_addr\n");
> +             return -EINVAL;
> +     }
> +
> +     /**
> +      * As of now resource table version 1 is expected. Don't fail to attach
> +      * but warn users about it.
> +      */
> +     if (rsc_addr->ver != 1)
> +             dev_warn(dev, "unexpected resource table version %d\n",
> +                      rsc_addr->ver);
> +
> +     r5_core->rsc_tbl_size = rsc_data_va->rsc_tbl_size;
> +     r5_core->rsc_tbl_va = rsc_addr;
> +
> +     return 0;
> +}
> +
> +static int zynqmp_r5_attach(struct rproc *rproc)
> +{
> +     struct zynqmp_r5_core *r5_core = rproc->priv;
> +     int i, pm_domain_id, ret;
> +
> +     /*
> +      * Firmware is loaded in TCM. Request TCM power domains to notify
> +      * platform management controller that TCM is in use. This will be
> +      * released during unprepare callback.
> +      */
> +     for (i = 0; i < r5_core->tcm_bank_count; i++) {
> +             pm_domain_id = r5_core->tcm_banks[i]->pm_domain_id;
> +             ret = zynqmp_pm_request_node(pm_domain_id,
> +                                          ZYNQMP_PM_CAPABILITY_ACCESS, 0,
> +                                          ZYNQMP_PM_REQUEST_ACK_BLOCKING);
> +             if (ret < 0)
> +                     pr_warn("TCM %d can't be requested\n", i);
> +     }
> +
> +     return 0;
> +}
> +
> +static int zynqmp_r5_detach(struct rproc *rproc)
> +{
> +     struct zynqmp_r5_core *r5_core = rproc->priv;
> +
> +     /*
> +      * Generate last notification to remote after clearing virtio flag.
> +      * Remote can avoid polling on virtio reset flag if kick is generated
> +      * during detach by host and check virtio reset flag on kick interrupt.
> +      */
> +     zynqmp_r5_rproc_kick(rproc, 0);
> +
> +     iounmap((void __iomem *)r5_core->rsc_tbl_va);
> +     r5_core->rsc_tbl_va = NULL;

This is puzzling...  What happens to ->tsc_tbl_va when the remote processor is
re-attached?  

I will not look at the SRAM part.  Please re-submit when we are done with the
attach/detach feature.

Thanks,
Mathieu

> +
> +     return 0;
> +}
> +
>  static const struct rproc_ops zynqmp_r5_rproc_ops = {
>       .prepare        = zynqmp_r5_rproc_prepare,
>       .unprepare      = zynqmp_r5_rproc_unprepare,
> @@ -673,6 +816,9 @@ static const struct rproc_ops zynqmp_r5_rproc_ops = {
>       .sanity_check   = rproc_elf_sanity_check,
>       .get_boot_addr  = rproc_elf_get_boot_addr,
>       .kick           = zynqmp_r5_rproc_kick,
> +     .get_loaded_rsc_table = zynqmp_r5_get_loaded_rsc_table,
> +     .attach         = zynqmp_r5_attach,
> +     .detach         = zynqmp_r5_detach,
>  };
>  
>  /**
> @@ -723,6 +869,16 @@ static struct zynqmp_r5_core 
> *zynqmp_r5_add_rproc_core(struct device *cdev)
>               goto free_rproc;
>       }
>  
> +     /*
> +      * Move rproc state to DETACHED to give one time opportunity to attach
> +      * if firmware is already available in the memory. This can happen if
> +      * firmware is loaded via debugger or by any other agent in the system.
> +      * If firmware isn't available in the memory and resource table isn't 
> found,
> +      * then rproc state stay OFFLINE.
> +      */
> +     if (!zynqmp_r5_get_rsc_table_va(r5_core))
> +             r5_rproc->state = RPROC_DETACHED;
> +
>       r5_core->rproc = r5_rproc;
>       return r5_core;
>  
> -- 
> 2.25.1
> 

Reply via email to