Reviewed-by: Steve Wahl <[email protected]>

On Sun, Jan 23, 2022 at 10:38:46AM -0800, Yury Norov wrote:
> In some cases, arch/x86 code calls cpumask_weight() to check if any bit of
> a given cpumask is set. We can do it more efficiently with cpumask_empty()
> because cpumask_empty() stops traversing the cpumask as soon as it finds
> first set bit, while cpumask_weight() counts all bits unconditionally.
> 
> Signed-off-by: Yury Norov <[email protected]>
> ---
>  arch/x86/kernel/cpu/resctrl/rdtgroup.c | 14 +++++++-------
>  arch/x86/mm/mmio-mod.c                 |  2 +-
>  arch/x86/platform/uv/uv_nmi.c          |  2 +-
>  3 files changed, 9 insertions(+), 9 deletions(-)
> 
> diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c 
> b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
> index b57b3db9a6a7..e23ff03290b8 100644
> --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c
> +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
> @@ -341,14 +341,14 @@ static int cpus_mon_write(struct rdtgroup *rdtgrp, 
> cpumask_var_t newmask,
>  
>       /* Check whether cpus belong to parent ctrl group */
>       cpumask_andnot(tmpmask, newmask, &prgrp->cpu_mask);
> -     if (cpumask_weight(tmpmask)) {
> +     if (!cpumask_empty(tmpmask)) {
>               rdt_last_cmd_puts("Can only add CPUs to mongroup that belong to 
> parent\n");
>               return -EINVAL;
>       }
>  
>       /* Check whether cpus are dropped from this group */
>       cpumask_andnot(tmpmask, &rdtgrp->cpu_mask, newmask);
> -     if (cpumask_weight(tmpmask)) {
> +     if (!cpumask_empty(tmpmask)) {
>               /* Give any dropped cpus to parent rdtgroup */
>               cpumask_or(&prgrp->cpu_mask, &prgrp->cpu_mask, tmpmask);
>               update_closid_rmid(tmpmask, prgrp);
> @@ -359,7 +359,7 @@ static int cpus_mon_write(struct rdtgroup *rdtgrp, 
> cpumask_var_t newmask,
>        * and update per-cpu rmid
>        */
>       cpumask_andnot(tmpmask, newmask, &rdtgrp->cpu_mask);
> -     if (cpumask_weight(tmpmask)) {
> +     if (!cpumask_empty(tmpmask)) {
>               head = &prgrp->mon.crdtgrp_list;
>               list_for_each_entry(crgrp, head, mon.crdtgrp_list) {
>                       if (crgrp == rdtgrp)
> @@ -394,7 +394,7 @@ static int cpus_ctrl_write(struct rdtgroup *rdtgrp, 
> cpumask_var_t newmask,
>  
>       /* Check whether cpus are dropped from this group */
>       cpumask_andnot(tmpmask, &rdtgrp->cpu_mask, newmask);
> -     if (cpumask_weight(tmpmask)) {
> +     if (!cpumask_empty(tmpmask)) {
>               /* Can't drop from default group */
>               if (rdtgrp == &rdtgroup_default) {
>                       rdt_last_cmd_puts("Can't drop CPUs from default 
> group\n");
> @@ -413,12 +413,12 @@ static int cpus_ctrl_write(struct rdtgroup *rdtgrp, 
> cpumask_var_t newmask,
>        * and update per-cpu closid/rmid.
>        */
>       cpumask_andnot(tmpmask, newmask, &rdtgrp->cpu_mask);
> -     if (cpumask_weight(tmpmask)) {
> +     if (!cpumask_empty(tmpmask)) {
>               list_for_each_entry(r, &rdt_all_groups, rdtgroup_list) {
>                       if (r == rdtgrp)
>                               continue;
>                       cpumask_and(tmpmask1, &r->cpu_mask, tmpmask);
> -                     if (cpumask_weight(tmpmask1))
> +                     if (!cpumask_empty(tmpmask1))
>                               cpumask_rdtgrp_clear(r, tmpmask1);
>               }
>               update_closid_rmid(tmpmask, rdtgrp);
> @@ -488,7 +488,7 @@ static ssize_t rdtgroup_cpus_write(struct 
> kernfs_open_file *of,
>  
>       /* check that user didn't specify any offline cpus */
>       cpumask_andnot(tmpmask, newmask, cpu_online_mask);
> -     if (cpumask_weight(tmpmask)) {
> +     if (!cpumask_empty(tmpmask)) {
>               ret = -EINVAL;
>               rdt_last_cmd_puts("Can only assign online CPUs\n");
>               goto unlock;
> diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
> index 933a2ebad471..c3317f0650d8 100644
> --- a/arch/x86/mm/mmio-mod.c
> +++ b/arch/x86/mm/mmio-mod.c
> @@ -400,7 +400,7 @@ static void leave_uniprocessor(void)
>       int cpu;
>       int err;
>  
> -     if (!cpumask_available(downed_cpus) || cpumask_weight(downed_cpus) == 0)
> +     if (!cpumask_available(downed_cpus) || cpumask_empty(downed_cpus))
>               return;
>       pr_notice("Re-enabling CPUs...\n");
>       for_each_cpu(cpu, downed_cpus) {
> diff --git a/arch/x86/platform/uv/uv_nmi.c b/arch/x86/platform/uv/uv_nmi.c
> index 1e9ff28bc2e0..ea277fc08357 100644
> --- a/arch/x86/platform/uv/uv_nmi.c
> +++ b/arch/x86/platform/uv/uv_nmi.c
> @@ -985,7 +985,7 @@ static int uv_handle_nmi(unsigned int reason, struct 
> pt_regs *regs)
>  
>       /* Clear global flags */
>       if (master) {
> -             if (cpumask_weight(uv_nmi_cpu_mask))
> +             if (!cpumask_empty(uv_nmi_cpu_mask))
>                       uv_nmi_cleanup_mask();
>               atomic_set(&uv_nmi_cpus_in_nmi, -1);
>               atomic_set(&uv_nmi_cpu, -1);
> -- 
> 2.30.2
> 

-- 
Steve Wahl, Hewlett Packard Enterprise

Reply via email to