Le 26/07/2023 à 17:02, Herve Codina a écrit :
> QMC channels support runtime timeslots changes but nothing is done at
> the QMC HDLC driver to handle these changes.
> 
> Use existing IFACE ioctl in order to configure the timeslots to use.
> 
> Signed-off-by: Herve Codina <herve.cod...@bootlin.com>

Reviewed-by: Christophe Leroy <christophe.le...@csgroup.eu>

> ---
>   drivers/net/wan/fsl_qmc_hdlc.c | 169 ++++++++++++++++++++++++++++++++-
>   1 file changed, 168 insertions(+), 1 deletion(-)
> 
> diff --git a/drivers/net/wan/fsl_qmc_hdlc.c b/drivers/net/wan/fsl_qmc_hdlc.c
> index b4ebae963d39..c449edf0a35e 100644
> --- a/drivers/net/wan/fsl_qmc_hdlc.c
> +++ b/drivers/net/wan/fsl_qmc_hdlc.c
> @@ -32,6 +32,7 @@ struct qmc_hdlc {
>       struct qmc_hdlc_desc tx_descs[8];
>       unsigned int tx_out;
>       struct qmc_hdlc_desc rx_descs[4];
> +     u32 slot_map;
>   };
>   
>   static inline struct qmc_hdlc *netdev_to_qmc_hdlc(struct net_device *netdev)
> @@ -202,6 +203,162 @@ static netdev_tx_t qmc_hdlc_xmit(struct sk_buff *skb, 
> struct net_device *netdev)
>       return NETDEV_TX_OK;
>   }
>   
> +static int qmc_hdlc_xlate_slot_map(struct qmc_hdlc *qmc_hdlc,
> +                                u32 slot_map, struct qmc_chan_ts_info 
> *ts_info)
> +{
> +     u64 ts_mask_avail;
> +     unsigned int bit;
> +     unsigned int i;
> +     u64 ts_mask;
> +     u64 map = 0;
> +
> +     /* Tx and Rx masks must be identical */
> +     if (ts_info->rx_ts_mask_avail != ts_info->tx_ts_mask_avail) {
> +             dev_err(qmc_hdlc->dev, "tx and rx available timeslots mismatch 
> (0x%llx, 0x%llx)\n",
> +                     ts_info->rx_ts_mask_avail, ts_info->tx_ts_mask_avail);
> +             return -EINVAL;
> +     }
> +
> +     ts_mask_avail = ts_info->rx_ts_mask_avail;
> +     ts_mask = 0;
> +     map = slot_map;
> +     bit = 0;
> +     for (i = 0; i < 64; i++) {
> +             if (ts_mask_avail & BIT_ULL(i)) {
> +                     if (map & BIT_ULL(bit))
> +                             ts_mask |= BIT_ULL(i);
> +                     bit++;
> +             }
> +     }
> +
> +     if (hweight64(ts_mask) != hweight64(map)) {
> +             dev_err(qmc_hdlc->dev, "Cannot translate timeslots 0x%llx -> 
> (0x%llx,0x%llx)\n",
> +                     map, ts_mask_avail, ts_mask);
> +             return -EINVAL;
> +     }
> +
> +     ts_info->tx_ts_mask = ts_mask;
> +     ts_info->rx_ts_mask = ts_mask;
> +     return 0;
> +}
> +
> +static int qmc_hdlc_xlate_ts_info(struct qmc_hdlc *qmc_hdlc,
> +                               const struct qmc_chan_ts_info *ts_info, u32 
> *slot_map)
> +{
> +     u64 ts_mask_avail;
> +     unsigned int bit;
> +     unsigned int i;
> +     u64 ts_mask;
> +     u64 map = 0;
> +
> +     /* Tx and Rx masks must be identical */
> +     if (ts_info->rx_ts_mask_avail != ts_info->tx_ts_mask_avail) {
> +             dev_err(qmc_hdlc->dev, "tx and rx available timeslots mismatch 
> (0x%llx, 0x%llx)\n",
> +                     ts_info->rx_ts_mask_avail, ts_info->tx_ts_mask_avail);
> +             return -EINVAL;
> +     }
> +     if (ts_info->rx_ts_mask != ts_info->tx_ts_mask) {
> +             dev_err(qmc_hdlc->dev, "tx and rx timeslots mismatch (0x%llx, 
> 0x%llx)\n",
> +                     ts_info->rx_ts_mask, ts_info->tx_ts_mask);
> +             return -EINVAL;
> +     }
> +
> +     ts_mask_avail = ts_info->rx_ts_mask_avail;
> +     ts_mask = ts_info->rx_ts_mask;
> +     map = 0;
> +     bit = 0;
> +     for (i = 0; i < 64; i++) {
> +             if (ts_mask_avail & BIT_ULL(i)) {
> +                     if (ts_mask & BIT_ULL(i))
> +                             map |= BIT_ULL(bit);
> +                     bit++;
> +             }
> +     }
> +
> +     if (hweight64(ts_mask) != hweight64(map)) {
> +             dev_err(qmc_hdlc->dev, "Cannot translate timeslots 
> (0x%llx,0x%llx) -> 0x%llx\n",
> +                     ts_mask_avail, ts_mask, map);
> +             return -EINVAL;
> +     }
> +
> +     if (map >= BIT_ULL(32)) {
> +             dev_err(qmc_hdlc->dev, "Slot map out of 32bit (0x%llx,0x%llx) 
> -> 0x%llx\n",
> +                     ts_mask_avail, ts_mask, map);
> +             return -EINVAL;
> +     }
> +
> +     *slot_map = map;
> +     return 0;
> +}
> +
> +static int qmc_hdlc_set_iface(struct qmc_hdlc *qmc_hdlc, int if_iface, const 
> te1_settings *te1)
> +{
> +     struct qmc_chan_ts_info ts_info;
> +     int ret;
> +
> +     ret = qmc_chan_get_ts_info(qmc_hdlc->qmc_chan, &ts_info);
> +     if (ret) {
> +             dev_err(qmc_hdlc->dev, "get QMC channel ts info failed %d\n", 
> ret);
> +             return ret;
> +     }
> +     ret = qmc_hdlc_xlate_slot_map(qmc_hdlc, te1->slot_map, &ts_info);
> +     if (ret)
> +             return ret;
> +
> +     ret = qmc_chan_set_ts_info(qmc_hdlc->qmc_chan, &ts_info);
> +     if (ret) {
> +             dev_err(qmc_hdlc->dev, "set QMC channel ts info failed %d\n", 
> ret);
> +             return ret;
> +     }
> +
> +     qmc_hdlc->slot_map = te1->slot_map;
> +
> +     return 0;
> +}
> +
> +static int qmc_hdlc_ioctl(struct net_device *netdev, struct if_settings *ifs)
> +{
> +     struct qmc_hdlc *qmc_hdlc = netdev_to_qmc_hdlc(netdev);
> +     te1_settings te1;
> +
> +     switch (ifs->type) {
> +     case IF_GET_IFACE:
> +             ifs->type = IF_IFACE_E1;
> +             if (ifs->size < sizeof(te1)) {
> +                     if (!ifs->size)
> +                             return 0; /* only type requested */
> +
> +                     ifs->size = sizeof(te1); /* data size wanted */
> +                     return -ENOBUFS;
> +             }
> +
> +             memset(&te1, 0, sizeof(te1));
> +
> +             /* Update slot_map */
> +             te1.slot_map = qmc_hdlc->slot_map;
> +
> +             if (copy_to_user(ifs->ifs_ifsu.te1, &te1,  sizeof(te1)))
> +                     return -EFAULT;
> +             return 0;
> +
> +     case IF_IFACE_E1:
> +     case IF_IFACE_T1:
> +             if (!capable(CAP_NET_ADMIN))
> +                     return -EPERM;
> +
> +             if (netdev->flags & IFF_UP)
> +                     return -EBUSY;
> +
> +             if (copy_from_user(&te1, ifs->ifs_ifsu.te1, sizeof(te1)))
> +                     return -EFAULT;
> +
> +             return qmc_hdlc_set_iface(qmc_hdlc, ifs->type, &te1);
> +
> +     default:
> +             return hdlc_ioctl(netdev, ifs);
> +     }
> +}
> +
>   static int qmc_hdlc_open(struct net_device *netdev)
>   {
>       struct qmc_hdlc *qmc_hdlc = netdev_to_qmc_hdlc(netdev);
> @@ -328,13 +485,14 @@ static const struct net_device_ops qmc_hdlc_netdev_ops 
> = {
>       .ndo_open       = qmc_hdlc_open,
>       .ndo_stop       = qmc_hdlc_close,
>       .ndo_start_xmit = hdlc_start_xmit,
> -     .ndo_siocwandev = hdlc_ioctl,
> +     .ndo_siocwandev = qmc_hdlc_ioctl,
>   };
>   
>   static int qmc_hdlc_probe(struct platform_device *pdev)
>   {
>       struct device_node *np = pdev->dev.of_node;
>       struct qmc_hdlc *qmc_hdlc;
> +     struct qmc_chan_ts_info ts_info;
>       struct qmc_chan_info info;
>       hdlc_device *hdlc;
>       int ret;
> @@ -364,6 +522,15 @@ static int qmc_hdlc_probe(struct platform_device *pdev)
>               return -EINVAL;
>       }
>   
> +     ret = qmc_chan_get_ts_info(qmc_hdlc->qmc_chan, &ts_info);
> +     if (ret) {
> +             dev_err(qmc_hdlc->dev, "get QMC channel ts info failed %d\n", 
> ret);
> +             return ret;
> +     }
> +     ret = qmc_hdlc_xlate_ts_info(qmc_hdlc, &ts_info, &qmc_hdlc->slot_map);
> +     if (ret)
> +             return ret;
> +
>       qmc_hdlc->netdev = alloc_hdlcdev(qmc_hdlc);
>       if (!qmc_hdlc->netdev) {
>               dev_err(qmc_hdlc->dev, "failed to alloc hdlc dev\n");

Reply via email to