>-----Original Message-----
>From: dev <dev-boun...@dpdk.org> On Behalf Of Andrzej Ostruszka
>Sent: Thursday, March 19, 2020 3:07 PM
>To: dev@dpdk.org
>Subject: Re: [dpdk-dev] [PATCH 1/2] mempool/octeontx2: add devargs
>to lock ctx in cache
>
>On 3/6/20 5:35 PM, pbhagavat...@marvell.com wrote:
>> From: Pavan Nikhilesh <pbhagavat...@marvell.com>
>>
>> Add device arguments to lock NPA aura and pool contexts in NDC
>cache.
>> The device args take hexadecimal bitmask where each bit represent
>the
>> corresponding aura/pool id.
>> Example:
>>      -w 0002:02:00.0,npa_lock_mask=0xf // Lock first 4 aura/pool ctx
>>
>> Signed-off-by: Pavan Nikhilesh <pbhagavat...@marvell.com>
>[...]
>> +- ``Lock NPA contexts in NDC``
>> +
>> +   Lock NPA aura and pool contexts in NDC cache.
>> +   The device args take hexadecimal bitmask where each bit
>represent the
>> +   corresponding aura/pool id.
>> +
>> +   For example::
>> +      -w 0002:0e:00.0,npa_lock_mask=0xf
>
>I think you need to make a paragraph break (empty line) after "::" in
>order to have this example treated as "literal block" (same as max_pool
>above - not visible in diff).  At least it looks so when I build doc
>with "ninja doc" and check the result in browser.

Will fix in v2.

>
>> diff --git a/doc/guides/mempool/octeontx2.rst
>b/doc/guides/mempool/octeontx2.rst
>> index 2c9a0953b..c594934d8 100644
>> --- a/doc/guides/mempool/octeontx2.rst
>> +++ b/doc/guides/mempool/octeontx2.rst
>> @@ -61,6 +61,15 @@ Runtime Config Options
>>     provide ``max_pools`` parameter to the first PCIe device probed by
>the given
>>     application.
>>
>> +- ``Lock NPA contexts in NDC``
>> +
>> +   Lock NPA aura and pool contexts in NDC cache.
>> +   The device args take hexadecimal bitmask where each bit
>represent the
>> +   corresponding aura/pool id.
>> +
>> +   For example::
>> +      -w 0002:02:00.0,npa_lock_mask=0xf
>
>Ditto.
>
>> diff --git a/doc/guides/nics/octeontx2.rst
>b/doc/guides/nics/octeontx2.rst
>> index 60187ec72..819d09e11 100644
>> --- a/doc/guides/nics/octeontx2.rst
>> +++ b/doc/guides/nics/octeontx2.rst
>> @@ -213,6 +213,15 @@ Runtime Config Options
>>     parameters to all the PCIe devices if application requires to
>configure on
>>     all the ethdev ports.
>>
>> +- ``Lock NPA contexts in NDC``
>> +
>> +   Lock NPA aura and pool contexts in NDC cache.
>> +   The device args take hexadecimal bitmask where each bit
>represent the
>> +   corresponding aura/pool id.
>> +
>> +   For example::
>> +      -w 0002:02:00.0,npa_lock_mask=0xf
>
>Ditto - make that general comment (you might also want to fix other
>places - not only those introduced).
>
>[...]
>> diff --git a/drivers/common/octeontx2/otx2_common.c
>b/drivers/common/octeontx2/otx2_common.c
>> index 1a257cf07..684bb3a0f 100644
>> --- a/drivers/common/octeontx2/otx2_common.c
>> +++ b/drivers/common/octeontx2/otx2_common.c
>> @@ -169,6 +169,41 @@ int otx2_npa_lf_obj_ref(void)
>>      return cnt ? 0 : -EINVAL;
>>  }
>>
>> +static int
>> +parse_npa_lock_mask(const char *key, const char *value, void
>*extra_args)
>> +{
>> +    RTE_SET_USED(key);
>> +    uint64_t val;
>> +
>> +    val = strtoull(value, NULL, 16);
>> +
>> +    *(uint64_t *)extra_args = val;
>> +
>> +    return 0;
>> +}
>> +
>> +#define OTX2_NPA_LOCK_MASK "npa_lock_mask"
>> +/*
>> + * @internal
>> + * Parse common device arguments
>> + */
>> +void otx2_parse_common_devargs(struct rte_kvargs *kvlist)
>> +{
>> +
>> +    struct otx2_idev_cfg *idev;
>> +    uint64_t npa_lock_mask;
>
>Missing initialization of 'npa_lock_mask' - when user does not supply
>this devarg then no callback is called and you copy this to idev (below).

Will fix in v2.

>
>> +
>> +    idev = otx2_intra_dev_get_cfg();
>> +
>> +    if (idev == NULL)
>> +            return;
>> +
>> +    rte_kvargs_process(kvlist, OTX2_NPA_LOCK_MASK,
>> +                    &parse_npa_lock_mask, &npa_lock_mask);
>> +
>> +    idev->npa_lock_mask = npa_lock_mask;
>> +}
>[...]
>> diff --git a/drivers/mempool/octeontx2/otx2_mempool_ops.c
>b/drivers/mempool/octeontx2/otx2_mempool_ops.c
>> index ac2d61861..5075b027a 100644
>> --- a/drivers/mempool/octeontx2/otx2_mempool_ops.c
>> +++ b/drivers/mempool/octeontx2/otx2_mempool_ops.c
>> @@ -348,6 +348,7 @@ npa_lf_aura_pool_init(struct otx2_mbox
>*mbox, uint32_t aura_id,
>>      struct npa_aq_enq_req *aura_init_req, *pool_init_req;
>>      struct npa_aq_enq_rsp *aura_init_rsp, *pool_init_rsp;
>>      struct otx2_mbox_dev *mdev = &mbox->dev[0];
>> +    struct otx2_idev_cfg *idev;
>>      int rc, off;
>>
>>      aura_init_req = otx2_mbox_alloc_msg_npa_aq_enq(mbox);
>> @@ -379,6 +380,46 @@ npa_lf_aura_pool_init(struct otx2_mbox
>*mbox, uint32_t aura_id,
>>              return 0;
>>      else
>>              return NPA_LF_ERR_AURA_POOL_INIT;
>> +
>> +    idev = otx2_intra_dev_get_cfg();
>> +    if (idev == NULL)
>> +            return 0;
>
>Is this not an error?

I think that condition would never be true as it is a part of device probe 
and we would exit the application there.
 
I will move the condition above before sending the mbox message just to 
be safe.

>
>> +
>> +    if (!(idev->npa_lock_mask & BIT_ULL(aura_id)))
>> +            return 0;
>> +
>> +    aura_init_req = otx2_mbox_alloc_msg_npa_aq_enq(mbox);
>> +    aura_init_req->aura_id = aura_id;
>> +    aura_init_req->ctype = NPA_AQ_CTYPE_AURA;
>> +    aura_init_req->op = NPA_AQ_INSTOP_LOCK;
>> +
>> +    pool_init_req = otx2_mbox_alloc_msg_npa_aq_enq(mbox);
>> +    if (!pool_init_req) {
>> +            /* The shared memory buffer can be full.
>> +             * Flush it and retry
>> +             */
>> +            otx2_mbox_msg_send(mbox, 0);
>> +            rc = otx2_mbox_wait_for_rsp(mbox, 0);
>> +            if (rc < 0) {
>> +                    otx2_err("Failed to LOCK AURA context");
>> +                    return 0;
>
>Same here and below - if these are not errors then maybe do not log
>them
>as such.  If they are errors then we should probably signal them via
>return value ("return rc;").

These are not catastrophic errors since locking is first come first serve and 
pool can still function without locking.

I have logged them as errors for debuggability since the application has 
requested through devargs.

>
>> +            }
>> +
>> +            pool_init_req =
>otx2_mbox_alloc_msg_npa_aq_enq(mbox);
>> +            if (!pool_init_req) {
>> +                    otx2_err("Failed to LOCK POOL context");
>> +                    return 0;
>
>See above.
>
>> +            }
>> +    }
>> +    pool_init_req->aura_id = aura_id;
>> +    pool_init_req->ctype = NPA_AQ_CTYPE_POOL;
>> +    pool_init_req->op = NPA_AQ_INSTOP_LOCK;
>> +
>> +    rc = otx2_mbox_process(mbox);
>> +    if (rc < 0)
>> +            otx2_err("Failed to lock POOL ctx to NDC");
>
>See above.
>
>> +
>> +    return >  }
>>
>>  static int
>> @@ -390,6 +431,7 @@ npa_lf_aura_pool_fini(struct otx2_mbox
>*mbox,
>>      struct npa_aq_enq_rsp *aura_rsp, *pool_rsp;
>>      struct otx2_mbox_dev *mdev = &mbox->dev[0];
>>      struct ndc_sync_op *ndc_req;
>> +    struct otx2_idev_cfg *idev;
>>      int rc, off;
>>
>>      /* Procedure for disabling an aura/pool */
>> @@ -434,6 +476,32 @@ npa_lf_aura_pool_fini(struct otx2_mbox
>*mbox,
>>              otx2_err("Error on NDC-NPA LF sync, rc %d", rc);
>>              return NPA_LF_ERR_AURA_POOL_FINI;
>>      }
>> +
>> +    idev = otx2_intra_dev_get_cfg();
>> +    if (idev == NULL)
>> +            return 0;
>> +
>> +    if (!(idev->npa_lock_mask & BIT_ULL(aura_id)))
>> +            return 0;
>
>Same comments here and below as for *pool_init above.
>
>> +
>> +    aura_req = otx2_mbox_alloc_msg_npa_aq_enq(mbox);
>> +    aura_req->aura_id = aura_id;
>> +    aura_req->ctype = NPA_AQ_CTYPE_AURA;
>> +    aura_req->op = NPA_AQ_INSTOP_UNLOCK;
>> +
>> +    rc = otx2_mbox_process(mbox);
>> +    if (rc < 0)
>> +            otx2_err("Failed to unlock AURA ctx to NDC");
>> +
>> +    pool_req = otx2_mbox_alloc_msg_npa_aq_enq(mbox);
>> +    pool_req->aura_id = aura_id;
>> +    pool_req->ctype = NPA_AQ_CTYPE_POOL;
>> +    pool_req->op = NPA_AQ_INSTOP_UNLOCK;
>> +
>> +    rc = otx2_mbox_process(mbox);
>> +    if (rc < 0)
>> +            otx2_err("Failed to unlock POOL ctx to NDC");
>> +
>>      return 0;
>>  }
>With regards
>Andrzej Ostruszka

Reply via email to