On Sat, May 14, 2022 at 05:26:21PM +0300, Vasily Averin wrote:
> Fixes sparse warnings:
> ./include/trace/events/fs_dax.h:10:1: sparse:
>     got restricted vm_fault_t
> ./include/trace/events/fs_dax.h:153:1: sparse:
>     got restricted vm_fault_t
> fs/dax.c:563:39: sparse:    got restricted vm_fault_t
> fs/dax.c:565:39: sparse:    got restricted vm_fault_t
> fs/dax.c:569:31: sparse:    got restricted vm_fault_t
> fs/dax.c:1055:41: sparse:
>     got restricted vm_fault_t [assigned] [usertype] ret
> fs/dax.c:1461:46: sparse:    got restricted vm_fault_t [usertype] ret
> fs/dax.c:1477:21: sparse:
>     expected restricted vm_fault_t [assigned] [usertype] ret
> fs/dax.c:1518:51: sparse:
>     got restricted vm_fault_t [assigned] [usertype] ret
> fs/dax.c:1599:21: sparse:
>     expected restricted vm_fault_t [assigned] [usertype] ret
> fs/dax.c:1633:62: sparse:
>     got restricted vm_fault_t [assigned] [usertype] ret
> fs/dax.c:1696:55: sparse:    got restricted vm_fault_t
> fs/dax.c:1711:58: sparse:
>     got restricted vm_fault_t [assigned] [usertype] ret
> 
> vm_fault_t type is bitwise and requires __force attribute for any casts.

Well, this patch is all kinds of messy.  I would rather we had better
abstractions.  For example ...

> @@ -560,13 +560,13 @@ static void *grab_mapping_entry(struct xa_state *xas,
>       if (xas_nomem(xas, mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM))
>               goto retry;
>       if (xas->xa_node == XA_ERROR(-ENOMEM))
> -             return xa_mk_internal(VM_FAULT_OOM);
> +             return xa_mk_internal((__force unsigned long)VM_FAULT_OOM);
>       if (xas_error(xas))
> -             return xa_mk_internal(VM_FAULT_SIGBUS);
> +             return xa_mk_internal((__force unsigned long)VM_FAULT_SIGBUS);
>       return entry;
>  fallback:
>       xas_unlock_irq(xas);
> -     return xa_mk_internal(VM_FAULT_FALLBACK);
> +     return xa_mk_internal((__force unsigned long)VM_FAULT_FALLBACK);
>  }

        return vm_fault_encode(VM_FAULT_xxx);

>  /**
> @@ -1052,7 +1052,7 @@ static vm_fault_t dax_load_hole(struct xa_state *xas,
>                       DAX_ZERO_PAGE, false);
>  
>       ret = vmf_insert_mixed(vmf->vma, vaddr, pfn);
> -     trace_dax_load_hole(inode, vmf, ret);
> +     trace_dax_load_hole(inode, vmf, (__force int)ret);

Seems like trace_dax_load_hole() should take a vm_fault_t?

> -     trace_dax_pte_fault(iter.inode, vmf, ret);
> +     trace_dax_pte_fault(iter.inode, vmf, (__force int)ret);

Ditto.

> @@ -1474,7 +1474,7 @@ static vm_fault_t dax_iomap_pte_fault(struct vm_fault 
> *vmf, pfn_t *pfnp,
>  
>       entry = grab_mapping_entry(&xas, mapping, 0);
>       if (xa_is_internal(entry)) {
> -             ret = xa_to_internal(entry);
> +             ret = (__force vm_fault_t)xa_to_internal(entry);

vm_fault_decode(entry)?

... the others seem like more of the same.  So I'm in favour of what
you're doing, but would rather it were done differently.  Generally
seeing __force casts in the body of a function is a sign that things are
wrong; it's better to have them hidden in abstractions.

Reply via email to