Re: [PATCH v6 3/4] remoteproc: zynqmp: add pm domains support

2023-10-30 Thread Tanmay Shah
Hi Mathieu,

I agree to all the comments, I will address them in next revision.

Thanks,

Tanmay

On 10/18/23 12:38 PM, Mathieu Poirier wrote:
> Good morning,
>
> On Thu, Oct 12, 2023 at 09:22:28PM -0700, Tanmay Shah wrote:
> > Use TCM pm domains extracted from device-tree
> > to power on/off TCM using general pm domain framework.
> > 
> > Signed-off-by: Tanmay Shah 
> > ---
> > 
> > Changes in v6:
> >   - Remove spurious change
> >   - Handle errors in add_pm_domains function
> >   - Remove redundant code to handle errors from remove_pm_domains
> > 
> >  drivers/remoteproc/xlnx_r5_remoteproc.c | 262 ++--
> >  1 file changed, 243 insertions(+), 19 deletions(-)
> > 
> > diff --git a/drivers/remoteproc/xlnx_r5_remoteproc.c 
> > b/drivers/remoteproc/xlnx_r5_remoteproc.c
> > index 4395edea9a64..04e95d880184 100644
> > --- a/drivers/remoteproc/xlnx_r5_remoteproc.c
> > +++ b/drivers/remoteproc/xlnx_r5_remoteproc.c
> > @@ -16,6 +16,7 @@
> >  #include 
> >  #include 
> >  #include 
> > +#include 
> >  
> >  #include "remoteproc_internal.h"
> >  
> > @@ -102,6 +103,12 @@ static const struct mem_bank_data 
> > zynqmp_tcm_banks_lockstep[] = {
> >   * @rproc: rproc handle
> >   * @pm_domain_id: RPU CPU power domain id
> >   * @ipi: pointer to mailbox information
> > + * @num_pm_dev: number of tcm pm domain devices for this core
> > + * @pm_dev1: pm domain virtual devices for power domain framework
> > + * @pm_dev_link1: pm domain device links after registration
> > + * @pm_dev2: used only in lockstep mode. second core's pm domain virtual 
> > devices
> > + * @pm_dev_link2: used only in lockstep mode. second core's pm device 
> > links after
> > + * registration
> >   */
> >  struct zynqmp_r5_core {
> > struct device *dev;
> > @@ -111,6 +118,11 @@ struct zynqmp_r5_core {
> > struct rproc *rproc;
> > u32 pm_domain_id;
> > struct mbox_info *ipi;
> > +   int num_pm_dev;
> > +   struct device **pm_dev1;
>
> s/pm_dev1/pm_dev_core0
>
> > +   struct device_link **pm_dev_link1;
>
> s/pm_dev_link1/pm_dev_core0_link;
>
> > +   struct device **pm_dev2;
>
> s/pm_dev2/pm_dev_core1
>
> > +   struct device_link **pm_dev_link2;
>
> s/pm_dev_link2/pm_dev_core1_link;
>
> >  };
> >  
> >  /**
> > @@ -575,12 +587,21 @@ static int add_tcm_carveout_split_mode(struct rproc 
> > *rproc)
> > bank_size = r5_core->tcm_banks[i]->size;
> > pm_domain_id = r5_core->tcm_banks[i]->pm_domain_id;
> >  
> > -   ret = zynqmp_pm_request_node(pm_domain_id,
> > -ZYNQMP_PM_CAPABILITY_ACCESS, 0,
> > -ZYNQMP_PM_REQUEST_ACK_BLOCKING);
> > -   if (ret < 0) {
> > -   dev_err(dev, "failed to turn on TCM 0x%x", 
> > pm_domain_id);
> > -   goto release_tcm_split;
> > +   /*
> > +* If TCM information is available in device-tree then
> > +* in that case, pm domain framework will power on/off TCM.
> > +* In that case pm_domain_id is set to 0. If hardcode
> > +* bindings from driver is used, then only this driver will
> > +* use pm_domain_id.
> > +*/
> > +   if (pm_domain_id) {
> > +   ret = zynqmp_pm_request_node(pm_domain_id,
> > +
> > ZYNQMP_PM_CAPABILITY_ACCESS, 0,
> > +
> > ZYNQMP_PM_REQUEST_ACK_BLOCKING);
> > +   if (ret < 0) {
> > +   dev_err(dev, "failed to turn on TCM 0x%x", 
> > pm_domain_id);
> > +   goto release_tcm_split;
> > +   }
>
> This should go in the next patch.
>
> > }
> >  
> > dev_dbg(dev, "TCM carveout split mode %s addr=%llx, da=0x%x, 
> > size=0x%lx",
> > @@ -646,13 +667,16 @@ static int add_tcm_carveout_lockstep_mode(struct 
> > rproc *rproc)
> > for (i = 0; i < num_banks; i++) {
> > pm_domain_id = r5_core->tcm_banks[i]->pm_domain_id;
> >  
> > -   /* Turn on each TCM bank individually */
> > -   ret = zynqmp_pm_request_node(pm_domain_id,
> > -ZYNQMP_PM_CAPABILITY_ACCESS, 0,
> > -ZYNQMP_PM_REQUEST_ACK_BLOCKING);
> > -   if (ret < 0) {
> > -   dev_err(dev, "failed to turn on TCM 0x%x", 
> > pm_domain_id);
> > -   goto release_tcm_lockstep;
> > +   if (pm_domain_id) {
> > +   /* Turn on each TCM bank individually */
> > +   ret = zynqmp_pm_request_node(pm_domain_id,
> > +
> > ZYNQMP_PM_CAPABILITY_ACCESS, 0,
> > +
> > ZYNQMP_PM_REQUEST_ACK_BLOCKING);
> > +   if (ret < 0) {
> > +   dev_err(dev, "failed to turn on TCM 0x%x",
> > +   

Re: [PATCH v6 3/4] remoteproc: zynqmp: add pm domains support

2023-10-18 Thread Mathieu Poirier
Good morning,

On Thu, Oct 12, 2023 at 09:22:28PM -0700, Tanmay Shah wrote:
> Use TCM pm domains extracted from device-tree
> to power on/off TCM using general pm domain framework.
> 
> Signed-off-by: Tanmay Shah 
> ---
> 
> Changes in v6:
>   - Remove spurious change
>   - Handle errors in add_pm_domains function
>   - Remove redundant code to handle errors from remove_pm_domains
> 
>  drivers/remoteproc/xlnx_r5_remoteproc.c | 262 ++--
>  1 file changed, 243 insertions(+), 19 deletions(-)
> 
> diff --git a/drivers/remoteproc/xlnx_r5_remoteproc.c 
> b/drivers/remoteproc/xlnx_r5_remoteproc.c
> index 4395edea9a64..04e95d880184 100644
> --- a/drivers/remoteproc/xlnx_r5_remoteproc.c
> +++ b/drivers/remoteproc/xlnx_r5_remoteproc.c
> @@ -16,6 +16,7 @@
>  #include 
>  #include 
>  #include 
> +#include 
>  
>  #include "remoteproc_internal.h"
>  
> @@ -102,6 +103,12 @@ static const struct mem_bank_data 
> zynqmp_tcm_banks_lockstep[] = {
>   * @rproc: rproc handle
>   * @pm_domain_id: RPU CPU power domain id
>   * @ipi: pointer to mailbox information
> + * @num_pm_dev: number of tcm pm domain devices for this core
> + * @pm_dev1: pm domain virtual devices for power domain framework
> + * @pm_dev_link1: pm domain device links after registration
> + * @pm_dev2: used only in lockstep mode. second core's pm domain virtual 
> devices
> + * @pm_dev_link2: used only in lockstep mode. second core's pm device links 
> after
> + * registration
>   */
>  struct zynqmp_r5_core {
>   struct device *dev;
> @@ -111,6 +118,11 @@ struct zynqmp_r5_core {
>   struct rproc *rproc;
>   u32 pm_domain_id;
>   struct mbox_info *ipi;
> + int num_pm_dev;
> + struct device **pm_dev1;

s/pm_dev1/pm_dev_core0

> + struct device_link **pm_dev_link1;

s/pm_dev_link1/pm_dev_core0_link;

> + struct device **pm_dev2;

s/pm_dev2/pm_dev_core1

> + struct device_link **pm_dev_link2;

s/pm_dev_link2/pm_dev_core1_link;

>  };
>  
>  /**
> @@ -575,12 +587,21 @@ static int add_tcm_carveout_split_mode(struct rproc 
> *rproc)
>   bank_size = r5_core->tcm_banks[i]->size;
>   pm_domain_id = r5_core->tcm_banks[i]->pm_domain_id;
>  
> - ret = zynqmp_pm_request_node(pm_domain_id,
> -  ZYNQMP_PM_CAPABILITY_ACCESS, 0,
> -  ZYNQMP_PM_REQUEST_ACK_BLOCKING);
> - if (ret < 0) {
> - dev_err(dev, "failed to turn on TCM 0x%x", 
> pm_domain_id);
> - goto release_tcm_split;
> + /*
> +  * If TCM information is available in device-tree then
> +  * in that case, pm domain framework will power on/off TCM.
> +  * In that case pm_domain_id is set to 0. If hardcode
> +  * bindings from driver is used, then only this driver will
> +  * use pm_domain_id.
> +  */
> + if (pm_domain_id) {
> + ret = zynqmp_pm_request_node(pm_domain_id,
> +  
> ZYNQMP_PM_CAPABILITY_ACCESS, 0,
> +  
> ZYNQMP_PM_REQUEST_ACK_BLOCKING);
> + if (ret < 0) {
> + dev_err(dev, "failed to turn on TCM 0x%x", 
> pm_domain_id);
> + goto release_tcm_split;
> + }

This should go in the next patch.

>   }
>  
>   dev_dbg(dev, "TCM carveout split mode %s addr=%llx, da=0x%x, 
> size=0x%lx",
> @@ -646,13 +667,16 @@ static int add_tcm_carveout_lockstep_mode(struct rproc 
> *rproc)
>   for (i = 0; i < num_banks; i++) {
>   pm_domain_id = r5_core->tcm_banks[i]->pm_domain_id;
>  
> - /* Turn on each TCM bank individually */
> - ret = zynqmp_pm_request_node(pm_domain_id,
> -  ZYNQMP_PM_CAPABILITY_ACCESS, 0,
> -  ZYNQMP_PM_REQUEST_ACK_BLOCKING);
> - if (ret < 0) {
> - dev_err(dev, "failed to turn on TCM 0x%x", 
> pm_domain_id);
> - goto release_tcm_lockstep;
> + if (pm_domain_id) {
> + /* Turn on each TCM bank individually */
> + ret = zynqmp_pm_request_node(pm_domain_id,
> +  
> ZYNQMP_PM_CAPABILITY_ACCESS, 0,
> +  
> ZYNQMP_PM_REQUEST_ACK_BLOCKING);
> + if (ret < 0) {
> + dev_err(dev, "failed to turn on TCM 0x%x",
> + pm_domain_id);
> + goto release_tcm_lockstep;
> + }

Same

>   }
>  
>   bank_size = r5_core->tcm_banks[i]->size;
> @@ -687,7 +711,8 @@ static int add_tcm_carveout_lockstep_mode(struct rproc 
> 

[PATCH v6 3/4] remoteproc: zynqmp: add pm domains support

2023-10-12 Thread Tanmay Shah
Use TCM pm domains extracted from device-tree
to power on/off TCM using general pm domain framework.

Signed-off-by: Tanmay Shah 
---

Changes in v6:
  - Remove spurious change
  - Handle errors in add_pm_domains function
  - Remove redundant code to handle errors from remove_pm_domains

 drivers/remoteproc/xlnx_r5_remoteproc.c | 262 ++--
 1 file changed, 243 insertions(+), 19 deletions(-)

diff --git a/drivers/remoteproc/xlnx_r5_remoteproc.c 
b/drivers/remoteproc/xlnx_r5_remoteproc.c
index 4395edea9a64..04e95d880184 100644
--- a/drivers/remoteproc/xlnx_r5_remoteproc.c
+++ b/drivers/remoteproc/xlnx_r5_remoteproc.c
@@ -16,6 +16,7 @@
 #include 
 #include 
 #include 
+#include 
 
 #include "remoteproc_internal.h"
 
@@ -102,6 +103,12 @@ static const struct mem_bank_data 
zynqmp_tcm_banks_lockstep[] = {
  * @rproc: rproc handle
  * @pm_domain_id: RPU CPU power domain id
  * @ipi: pointer to mailbox information
+ * @num_pm_dev: number of tcm pm domain devices for this core
+ * @pm_dev1: pm domain virtual devices for power domain framework
+ * @pm_dev_link1: pm domain device links after registration
+ * @pm_dev2: used only in lockstep mode. second core's pm domain virtual 
devices
+ * @pm_dev_link2: used only in lockstep mode. second core's pm device links 
after
+ * registration
  */
 struct zynqmp_r5_core {
struct device *dev;
@@ -111,6 +118,11 @@ struct zynqmp_r5_core {
struct rproc *rproc;
u32 pm_domain_id;
struct mbox_info *ipi;
+   int num_pm_dev;
+   struct device **pm_dev1;
+   struct device_link **pm_dev_link1;
+   struct device **pm_dev2;
+   struct device_link **pm_dev_link2;
 };
 
 /**
@@ -575,12 +587,21 @@ static int add_tcm_carveout_split_mode(struct rproc 
*rproc)
bank_size = r5_core->tcm_banks[i]->size;
pm_domain_id = r5_core->tcm_banks[i]->pm_domain_id;
 
-   ret = zynqmp_pm_request_node(pm_domain_id,
-ZYNQMP_PM_CAPABILITY_ACCESS, 0,
-ZYNQMP_PM_REQUEST_ACK_BLOCKING);
-   if (ret < 0) {
-   dev_err(dev, "failed to turn on TCM 0x%x", 
pm_domain_id);
-   goto release_tcm_split;
+   /*
+* If TCM information is available in device-tree then
+* in that case, pm domain framework will power on/off TCM.
+* In that case pm_domain_id is set to 0. If hardcode
+* bindings from driver is used, then only this driver will
+* use pm_domain_id.
+*/
+   if (pm_domain_id) {
+   ret = zynqmp_pm_request_node(pm_domain_id,
+
ZYNQMP_PM_CAPABILITY_ACCESS, 0,
+
ZYNQMP_PM_REQUEST_ACK_BLOCKING);
+   if (ret < 0) {
+   dev_err(dev, "failed to turn on TCM 0x%x", 
pm_domain_id);
+   goto release_tcm_split;
+   }
}
 
dev_dbg(dev, "TCM carveout split mode %s addr=%llx, da=0x%x, 
size=0x%lx",
@@ -646,13 +667,16 @@ static int add_tcm_carveout_lockstep_mode(struct rproc 
*rproc)
for (i = 0; i < num_banks; i++) {
pm_domain_id = r5_core->tcm_banks[i]->pm_domain_id;
 
-   /* Turn on each TCM bank individually */
-   ret = zynqmp_pm_request_node(pm_domain_id,
-ZYNQMP_PM_CAPABILITY_ACCESS, 0,
-ZYNQMP_PM_REQUEST_ACK_BLOCKING);
-   if (ret < 0) {
-   dev_err(dev, "failed to turn on TCM 0x%x", 
pm_domain_id);
-   goto release_tcm_lockstep;
+   if (pm_domain_id) {
+   /* Turn on each TCM bank individually */
+   ret = zynqmp_pm_request_node(pm_domain_id,
+
ZYNQMP_PM_CAPABILITY_ACCESS, 0,
+
ZYNQMP_PM_REQUEST_ACK_BLOCKING);
+   if (ret < 0) {
+   dev_err(dev, "failed to turn on TCM 0x%x",
+   pm_domain_id);
+   goto release_tcm_lockstep;
+   }
}
 
bank_size = r5_core->tcm_banks[i]->size;
@@ -687,7 +711,8 @@ static int add_tcm_carveout_lockstep_mode(struct rproc 
*rproc)
/* If failed, Turn off all TCM banks turned on before */
for (i--; i >= 0; i--) {
pm_domain_id = r5_core->tcm_banks[i]->pm_domain_id;
-   zynqmp_pm_release_node(pm_domain_id);
+   if (pm_domain_id)
+   zynqmp_pm_release_node(pm_domain_id);
}
return ret;
 }
@@ -758,6