CC: [email protected]
CC: [email protected]
TO: Helge Deller <[email protected]>
CC: John David Anglin <[email protected]>

tree:   https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git 
master
head:   368094df48e680fa51cedb68537408cfa64b788e
commit: b7795074a04669d0a023babf786d29bf67c68783 parisc: Optimize per-pagetable 
spinlocks
date:   4 months ago
:::::: branch date: 11 hours ago
:::::: commit date: 4 months ago
config: parisc-randconfig-s032-20210607 (attached as .config)
compiler: hppa64-linux-gcc (GCC) 9.3.0
reproduce:
        wget 
https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O 
~/bin/make.cross
        chmod +x ~/bin/make.cross
        # apt-get install sparse
        # sparse version: v0.6.3-341-g8af24329-dirty
        # 
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=b7795074a04669d0a023babf786d29bf67c68783
        git remote add linus 
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
        git fetch --no-tags linus master
        git checkout b7795074a04669d0a023babf786d29bf67c68783
        # save the attached .config to linux build tree
        COMPILER_INSTALL_PATH=$HOME/0day COMPILER=gcc-9.3.0 make.cross C=1 
CF='-fdiagnostic-prefix -D__CHECK_ENDIAN__' W=1 ARCH=parisc 

If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <[email protected]>


sparse warnings: (new ones prefixed by >>)
>> mm/mprotect.c:140:48: sparse: sparse: context imbalance in 
>> 'change_pte_range' - different lock contexts for basic block
--
   mm/memory.c:698:1: sparse: sparse: context imbalance in 
'copy_nonpresent_pte' - different lock contexts for basic block
   mm/memory.c:900:9: sparse: sparse: context imbalance in 'copy_pte_range' - 
different lock contexts for basic block
   mm/memory.c: note: in included file (through include/linux/pgtable.h, 
arch/parisc/include/asm/io.h, include/linux/io.h, ...):
   arch/parisc/include/asm/pgtable.h:451:9: sparse: sparse: context imbalance 
in 'zap_pte_range' - different lock contexts for basic block
   mm/memory.c:1623:16: sparse: sparse: context imbalance in '__get_locked_pte' 
- different lock contexts for basic block
   mm/memory.c:1644:9: sparse: sparse: context imbalance in 
'insert_page_into_pte_locked' - different lock contexts for basic block
   mm/memory.c:1672:9: sparse: sparse: context imbalance in 'insert_page' - 
different lock contexts for basic block
   mm/memory.c:1964:9: sparse: sparse: context imbalance in 'insert_pfn' - 
different lock contexts for basic block
   mm/memory.c:2183:17: sparse: sparse: context imbalance in 'remap_pte_range' 
- different lock contexts for basic block
   mm/memory.c:2419:17: sparse: sparse: context imbalance in 
'apply_to_pte_range' - unexpected unlock
   mm/memory.c:2912:17: sparse: sparse: context imbalance in 'wp_page_copy' - 
different lock contexts for basic block
   mm/memory.c:3022:17: sparse: sparse: context imbalance in 'wp_pfn_shared' - 
unexpected unlock
   mm/memory.c:3085:19: sparse: sparse: context imbalance in 'do_wp_page' - 
different lock contexts for basic block
   mm/memory.c:3580:9: sparse: sparse: context imbalance in 'do_anonymous_page' 
- different lock contexts for basic block
   mm/memory.c:3657:19: sparse: sparse: context imbalance in 
'pte_alloc_one_map' - different lock contexts for basic block
   mm/memory.c:3842:9: sparse: sparse: context imbalance in 'alloc_set_pte' - 
different lock contexts for basic block
   mm/memory.c:3884:17: sparse: sparse: context imbalance in 'finish_fault' - 
unexpected unlock
   mm/memory.c:3993:9: sparse: sparse: context imbalance in 'do_fault_around' - 
unexpected unlock
>> mm/memory.c:4209:32: sparse: sparse: context imbalance in 'do_numa_page' - 
>> different lock contexts for basic block
   mm/memory.c:4426:9: sparse: sparse: context imbalance in 'handle_pte_fault' 
- different lock contexts for basic block
   mm/memory.c:4712:5: sparse: sparse: context imbalance in 'follow_pte' - 
different lock contexts for basic block
   mm/memory.c:4802:9: sparse: sparse: context imbalance in 'follow_pfn' - 
unexpected unlock

vim +/change_pte_range +140 mm/mprotect.c

36f881883c5794 Kirill A. Shutemov 2015-06-24   37  
4b10e7d562c90d Mel Gorman         2012-10-25   38  static unsigned long 
change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
c1e6098b23bb46 Peter Zijlstra     2006-09-25   39               unsigned long 
addr, unsigned long end, pgprot_t newprot,
58705444c45b3c Peter Xu           2020-04-06   40               unsigned long 
cp_flags)
^1da177e4c3f41 Linus Torvalds     2005-04-16   41  {
0697212a411c1d Christoph Lameter  2006-06-23   42       pte_t *pte, oldpte;
705e87c0c3c384 Hugh Dickins       2005-10-29   43       spinlock_t *ptl;
7da4d641c58d20 Peter Zijlstra     2012-11-19   44       unsigned long pages = 0;
3e32158767b04d Andi Kleen         2016-12-12   45       int target_node = 
NUMA_NO_NODE;
58705444c45b3c Peter Xu           2020-04-06   46       bool dirty_accountable 
= cp_flags & MM_CP_DIRTY_ACCT;
58705444c45b3c Peter Xu           2020-04-06   47       bool prot_numa = 
cp_flags & MM_CP_PROT_NUMA;
292924b2602474 Peter Xu           2020-04-06   48       bool uffd_wp = cp_flags 
& MM_CP_UFFD_WP;
292924b2602474 Peter Xu           2020-04-06   49       bool uffd_wp_resolve = 
cp_flags & MM_CP_UFFD_WP_RESOLVE;
^1da177e4c3f41 Linus Torvalds     2005-04-16   50  
175ad4f1e7a29c Andrea Arcangeli   2017-02-22   51       /*
c1e8d7c6a7a682 Michel Lespinasse  2020-06-08   52        * Can be called with 
only the mmap_lock for reading by
175ad4f1e7a29c Andrea Arcangeli   2017-02-22   53        * prot_numa so we must 
check the pmd isn't constantly
175ad4f1e7a29c Andrea Arcangeli   2017-02-22   54        * changing from under 
us from pmd_none to pmd_trans_huge
175ad4f1e7a29c Andrea Arcangeli   2017-02-22   55        * and/or the other way 
around.
175ad4f1e7a29c Andrea Arcangeli   2017-02-22   56        */
175ad4f1e7a29c Andrea Arcangeli   2017-02-22   57       if 
(pmd_trans_unstable(pmd))
175ad4f1e7a29c Andrea Arcangeli   2017-02-22   58               return 0;
175ad4f1e7a29c Andrea Arcangeli   2017-02-22   59  
175ad4f1e7a29c Andrea Arcangeli   2017-02-22   60       /*
175ad4f1e7a29c Andrea Arcangeli   2017-02-22   61        * The pmd points to a 
regular pte so the pmd can't change
c1e8d7c6a7a682 Michel Lespinasse  2020-06-08   62        * from under us even 
if the mmap_lock is only hold for
175ad4f1e7a29c Andrea Arcangeli   2017-02-22   63        * reading.
175ad4f1e7a29c Andrea Arcangeli   2017-02-22   64        */
175ad4f1e7a29c Andrea Arcangeli   2017-02-22   65       pte = 
pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
1ad9f620c3a22f Mel Gorman         2014-04-07   66  
3e32158767b04d Andi Kleen         2016-12-12   67       /* Get target node for 
single threaded private VMAs */
3e32158767b04d Andi Kleen         2016-12-12   68       if (prot_numa && 
!(vma->vm_flags & VM_SHARED) &&
3e32158767b04d Andi Kleen         2016-12-12   69           
atomic_read(&vma->vm_mm->mm_users) == 1)
3e32158767b04d Andi Kleen         2016-12-12   70               target_node = 
numa_node_id();
3e32158767b04d Andi Kleen         2016-12-12   71  
3ea277194daaea Mel Gorman         2017-08-02   72       
flush_tlb_batched_pending(vma->vm_mm);
6606c3e0da5360 Zachary Amsden     2006-09-30   73       
arch_enter_lazy_mmu_mode();
^1da177e4c3f41 Linus Torvalds     2005-04-16   74       do {
0697212a411c1d Christoph Lameter  2006-06-23   75               oldpte = *pte;
0697212a411c1d Christoph Lameter  2006-06-23   76               if 
(pte_present(oldpte)) {
^1da177e4c3f41 Linus Torvalds     2005-04-16   77                       pte_t 
ptent;
b191f9b106ea1a Mel Gorman         2015-03-25   78                       bool 
preserve_write = prot_numa && pte_write(oldpte);
^1da177e4c3f41 Linus Torvalds     2005-04-16   79  
e944fd67b625c0 Mel Gorman         2015-02-12   80                       /*
e944fd67b625c0 Mel Gorman         2015-02-12   81                        * 
Avoid trapping faults against the zero or KSM
e944fd67b625c0 Mel Gorman         2015-02-12   82                        * 
pages. See similar comment in change_huge_pmd.
e944fd67b625c0 Mel Gorman         2015-02-12   83                        */
e944fd67b625c0 Mel Gorman         2015-02-12   84                       if 
(prot_numa) {
e944fd67b625c0 Mel Gorman         2015-02-12   85                               
struct page *page;
e944fd67b625c0 Mel Gorman         2015-02-12   86  
a818f5363a0eba Huang Ying         2019-11-30   87                               
/* Avoid TLB flush if possible */
a818f5363a0eba Huang Ying         2019-11-30   88                               
if (pte_protnone(oldpte))
a818f5363a0eba Huang Ying         2019-11-30   89                               
        continue;
a818f5363a0eba Huang Ying         2019-11-30   90  
e944fd67b625c0 Mel Gorman         2015-02-12   91                               
page = vm_normal_page(vma, addr, oldpte);
e944fd67b625c0 Mel Gorman         2015-02-12   92                               
if (!page || PageKsm(page))
e944fd67b625c0 Mel Gorman         2015-02-12   93                               
        continue;
10c1045f28e86a Mel Gorman         2015-02-12   94  
859d4adc3415a6 Henry Willard      2018-01-31   95                               
/* Also skip shared copy-on-write pages */
859d4adc3415a6 Henry Willard      2018-01-31   96                               
if (is_cow_mapping(vma->vm_flags) &&
859d4adc3415a6 Henry Willard      2018-01-31   97                               
    page_mapcount(page) != 1)
859d4adc3415a6 Henry Willard      2018-01-31   98                               
        continue;
859d4adc3415a6 Henry Willard      2018-01-31   99  
09a913a7a947fb Mel Gorman         2018-04-10  100                               
/*
09a913a7a947fb Mel Gorman         2018-04-10  101                               
 * While migration can move some dirty pages,
09a913a7a947fb Mel Gorman         2018-04-10  102                               
 * it cannot move them all from MIGRATE_ASYNC
09a913a7a947fb Mel Gorman         2018-04-10  103                               
 * context.
09a913a7a947fb Mel Gorman         2018-04-10  104                               
 */
9de4f22a60f731 Huang Ying         2020-04-06  105                               
if (page_is_file_lru(page) && PageDirty(page))
09a913a7a947fb Mel Gorman         2018-04-10  106                               
        continue;
09a913a7a947fb Mel Gorman         2018-04-10  107  
3e32158767b04d Andi Kleen         2016-12-12  108                               
/*
3e32158767b04d Andi Kleen         2016-12-12  109                               
 * Don't mess with PTEs if page is already on the node
3e32158767b04d Andi Kleen         2016-12-12  110                               
 * a single-threaded process is running on.
3e32158767b04d Andi Kleen         2016-12-12  111                               
 */
3e32158767b04d Andi Kleen         2016-12-12  112                               
if (target_node == page_to_nid(page))
3e32158767b04d Andi Kleen         2016-12-12  113                               
        continue;
e944fd67b625c0 Mel Gorman         2015-02-12  114                       }
e944fd67b625c0 Mel Gorman         2015-02-12  115  
04a8645304500b Aneesh Kumar K.V   2019-03-05  116                       oldpte 
= ptep_modify_prot_start(vma, addr, pte);
04a8645304500b Aneesh Kumar K.V   2019-03-05  117                       ptent = 
pte_modify(oldpte, newprot);
b191f9b106ea1a Mel Gorman         2015-03-25  118                       if 
(preserve_write)
288bc54949fc26 Aneesh Kumar K.V   2017-02-24  119                               
ptent = pte_mk_savedwrite(ptent);
8a0516ed8b90c9 Mel Gorman         2015-02-12  120  
292924b2602474 Peter Xu           2020-04-06  121                       if 
(uffd_wp) {
292924b2602474 Peter Xu           2020-04-06  122                               
ptent = pte_wrprotect(ptent);
292924b2602474 Peter Xu           2020-04-06  123                               
ptent = pte_mkuffd_wp(ptent);
292924b2602474 Peter Xu           2020-04-06  124                       } else 
if (uffd_wp_resolve) {
292924b2602474 Peter Xu           2020-04-06  125                               
/*
292924b2602474 Peter Xu           2020-04-06  126                               
 * Leave the write bit to be handled
292924b2602474 Peter Xu           2020-04-06  127                               
 * by PF interrupt handler, then
292924b2602474 Peter Xu           2020-04-06  128                               
 * things like COW could be properly
292924b2602474 Peter Xu           2020-04-06  129                               
 * handled.
292924b2602474 Peter Xu           2020-04-06  130                               
 */
292924b2602474 Peter Xu           2020-04-06  131                               
ptent = pte_clear_uffd_wp(ptent);
292924b2602474 Peter Xu           2020-04-06  132                       }
292924b2602474 Peter Xu           2020-04-06  133  
8a0516ed8b90c9 Mel Gorman         2015-02-12  134                       /* 
Avoid taking write faults for known dirty pages */
64e455079e1bd7 Peter Feiner       2014-10-13  135                       if 
(dirty_accountable && pte_dirty(ptent) &&
64e455079e1bd7 Peter Feiner       2014-10-13  136                               
        (pte_soft_dirty(ptent) ||
8a0516ed8b90c9 Mel Gorman         2015-02-12  137                               
         !(vma->vm_flags & VM_SOFTDIRTY))) {
9d85d5863fa481 Aneesh Kumar K.V   2014-02-12  138                               
ptent = pte_mkwrite(ptent);
4b10e7d562c90d Mel Gorman         2012-10-25  139                       }
04a8645304500b Aneesh Kumar K.V   2019-03-05 @140                       
ptep_modify_prot_commit(vma, addr, pte, oldpte, ptent);
7da4d641c58d20 Peter Zijlstra     2012-11-19  141                       pages++;
f45ec5ff16a75f Peter Xu           2020-04-06  142               } else if 
(is_swap_pte(oldpte)) {
0697212a411c1d Christoph Lameter  2006-06-23  143                       
swp_entry_t entry = pte_to_swp_entry(oldpte);
f45ec5ff16a75f Peter Xu           2020-04-06  144                       pte_t 
newpte;
0697212a411c1d Christoph Lameter  2006-06-23  145  
0697212a411c1d Christoph Lameter  2006-06-23  146                       if 
(is_write_migration_entry(entry)) {
0697212a411c1d Christoph Lameter  2006-06-23  147                               
/*
0697212a411c1d Christoph Lameter  2006-06-23  148                               
 * A protection check is difficult so
0697212a411c1d Christoph Lameter  2006-06-23  149                               
 * just be safe and disable write
0697212a411c1d Christoph Lameter  2006-06-23  150                               
 */
0697212a411c1d Christoph Lameter  2006-06-23  151                               
make_migration_entry_read(&entry);
c3d16e16522fe3 Cyrill Gorcunov    2013-10-16  152                               
newpte = swp_entry_to_pte(entry);
c3d16e16522fe3 Cyrill Gorcunov    2013-10-16  153                               
if (pte_swp_soft_dirty(oldpte))
c3d16e16522fe3 Cyrill Gorcunov    2013-10-16  154                               
        newpte = pte_swp_mksoft_dirty(newpte);
f45ec5ff16a75f Peter Xu           2020-04-06  155                               
if (pte_swp_uffd_wp(oldpte))
f45ec5ff16a75f Peter Xu           2020-04-06  156                               
        newpte = pte_swp_mkuffd_wp(newpte);
f45ec5ff16a75f Peter Xu           2020-04-06  157                       } else 
if (is_write_device_private_entry(entry)) {
5042db43cc26f5 Jérôme Glisse      2017-09-08  158                               
/*
5042db43cc26f5 Jérôme Glisse      2017-09-08  159                               
 * We do not preserve soft-dirtiness. See
5042db43cc26f5 Jérôme Glisse      2017-09-08  160                               
 * copy_one_pte() for explanation.
5042db43cc26f5 Jérôme Glisse      2017-09-08  161                               
 */
5042db43cc26f5 Jérôme Glisse      2017-09-08  162                               
make_device_private_entry_read(&entry);
5042db43cc26f5 Jérôme Glisse      2017-09-08  163                               
newpte = swp_entry_to_pte(entry);
f45ec5ff16a75f Peter Xu           2020-04-06  164                               
if (pte_swp_uffd_wp(oldpte))
f45ec5ff16a75f Peter Xu           2020-04-06  165                               
        newpte = pte_swp_mkuffd_wp(newpte);
f45ec5ff16a75f Peter Xu           2020-04-06  166                       } else {
f45ec5ff16a75f Peter Xu           2020-04-06  167                               
newpte = oldpte;
f45ec5ff16a75f Peter Xu           2020-04-06  168                       }
f45ec5ff16a75f Peter Xu           2020-04-06  169  
f45ec5ff16a75f Peter Xu           2020-04-06  170                       if 
(uffd_wp)
f45ec5ff16a75f Peter Xu           2020-04-06  171                               
newpte = pte_swp_mkuffd_wp(newpte);
f45ec5ff16a75f Peter Xu           2020-04-06  172                       else if 
(uffd_wp_resolve)
f45ec5ff16a75f Peter Xu           2020-04-06  173                               
newpte = pte_swp_clear_uffd_wp(newpte);
5042db43cc26f5 Jérôme Glisse      2017-09-08  174  
f45ec5ff16a75f Peter Xu           2020-04-06  175                       if 
(!pte_same(oldpte, newpte)) {
f45ec5ff16a75f Peter Xu           2020-04-06  176                               
set_pte_at(vma->vm_mm, addr, pte, newpte);
5042db43cc26f5 Jérôme Glisse      2017-09-08  177                               
pages++;
5042db43cc26f5 Jérôme Glisse      2017-09-08  178                       }
e920e14ca29b0b Mel Gorman         2013-10-07  179               }
^1da177e4c3f41 Linus Torvalds     2005-04-16  180       } while (pte++, addr += 
PAGE_SIZE, addr != end);
6606c3e0da5360 Zachary Amsden     2006-09-30  181       
arch_leave_lazy_mmu_mode();
705e87c0c3c384 Hugh Dickins       2005-10-29  182       pte_unmap_unlock(pte - 
1, ptl);
7da4d641c58d20 Peter Zijlstra     2012-11-19  183  
7da4d641c58d20 Peter Zijlstra     2012-11-19  184       return pages;
^1da177e4c3f41 Linus Torvalds     2005-04-16  185  }
^1da177e4c3f41 Linus Torvalds     2005-04-16  186  

:::::: The code at line 140 was first introduced by commit
:::::: 04a8645304500be88b3345b65fef7efe58016166 mm: update 
ptep_modify_prot_commit to take old pte value as arg

:::::: TO: Aneesh Kumar K.V <[email protected]>
:::::: CC: Linus Torvalds <[email protected]>

---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/[email protected]

Attachment: .config.gz
Description: application/gzip

_______________________________________________
kbuild mailing list -- [email protected]
To unsubscribe send an email to [email protected]

Reply via email to