CC: [email protected]
CC: [email protected]
TO: Helge Deller <[email protected]>
CC: John David Anglin <[email protected]>

tree:   https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git 
master
head:   477f70cd2a67904e04c2c2b9bd0fa2e95222f2f6
commit: b7795074a04669d0a023babf786d29bf67c68783 parisc: Optimize per-pagetable 
spinlocks
date:   7 months ago
:::::: branch date: 8 hours ago
:::::: commit date: 7 months ago
config: parisc-randconfig-s031-20210830 (attached as .config)
compiler: hppa64-linux-gcc (GCC) 11.2.0
reproduce:
        wget 
https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O 
~/bin/make.cross
        chmod +x ~/bin/make.cross
        # apt-get install sparse
        # sparse version: v0.6.3-348-gf0e6938b-dirty
        # 
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=b7795074a04669d0a023babf786d29bf67c68783
        git remote add linus 
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
        git fetch --no-tags linus master
        git checkout b7795074a04669d0a023babf786d29bf67c68783
        # save the attached .config to linux build tree
        COMPILER_INSTALL_PATH=$HOME/0day COMPILER=gcc-11.2.0 make.cross C=1 
CF='-fdiagnostic-prefix -D__CHECK_ENDIAN__' O=build_dir ARCH=parisc 
SHELL=/bin/bash

If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <[email protected]>


sparse warnings: (new ones prefixed by >>)
   mm/hugetlb.c:420:12: sparse: sparse: context imbalance in 
'allocate_file_region_entries' - wrong count at exit
   mm/hugetlb.c:493:13: sparse: sparse: context imbalance in 'region_add' - 
wrong count at exit
   mm/hugetlb.c:561:13: sparse: sparse: context imbalance in 'region_chg' - 
wrong count at exit
>> mm/hugetlb.c:3945:39: sparse: sparse: context imbalance in 
>> '__unmap_hugepage_range' - different lock contexts for basic block
   mm/hugetlb.c: note: in included file (through include/linux/atomic.h, 
arch/parisc/include/asm/bitops.h, include/linux/bitops.h, ...):
   arch/parisc/include/asm/atomic.h:122:1: sparse: sparse: context imbalance in 
'hugetlb_cow' - unexpected unlock
   mm/hugetlb.c:4862:25: sparse: sparse: context imbalance in 
'follow_hugetlb_page' - different lock contexts for basic block
--
   mm/memory.c:5172:22: sparse: sparse: cast removes address space '__user' of 
expression
   mm/memory.c:772:9: sparse: sparse: context imbalance in 
'copy_nonpresent_pte' - different lock contexts for basic block
   mm/memory.c:900:9: sparse: sparse: context imbalance in 'copy_pte_range' - 
different lock contexts for basic block
   mm/memory.c: note: in included file (through include/linux/pgtable.h, 
arch/parisc/include/asm/io.h, include/linux/io.h, ...):
   arch/parisc/include/asm/pgtable.h:451:9: sparse: sparse: context imbalance 
in 'zap_pte_range' - different lock contexts for basic block
   mm/memory.c:1623:16: sparse: sparse: context imbalance in '__get_locked_pte' 
- different lock contexts for basic block
   mm/memory.c:1644:9: sparse: sparse: context imbalance in 
'insert_page_into_pte_locked' - different lock contexts for basic block
   mm/memory.c:1672:9: sparse: sparse: context imbalance in 'insert_page' - 
different lock contexts for basic block
   mm/memory.c:1964:9: sparse: sparse: context imbalance in 'insert_pfn' - 
different lock contexts for basic block
   mm/memory.c:2183:17: sparse: sparse: context imbalance in 'remap_pte_range' 
- different lock contexts for basic block
   mm/memory.c:2419:17: sparse: sparse: context imbalance in 
'apply_to_pte_range' - unexpected unlock
   mm/memory.c:2912:17: sparse: sparse: context imbalance in 'wp_page_copy' - 
different lock contexts for basic block
   mm/memory.c:3022:17: sparse: sparse: context imbalance in 'wp_pfn_shared' - 
unexpected unlock
   mm/memory.c:3085:19: sparse: sparse: context imbalance in 'do_wp_page' - 
different lock contexts for basic block
   mm/memory.c:3422:26: sparse: sparse: context imbalance in 'do_swap_page' - 
different lock contexts for basic block
   mm/memory.c:3580:9: sparse: sparse: context imbalance in 'do_anonymous_page' 
- different lock contexts for basic block
   mm/memory.c:3657:19: sparse: sparse: context imbalance in 
'pte_alloc_one_map' - different lock contexts for basic block
   mm/memory.c:3842:9: sparse: sparse: context imbalance in 'alloc_set_pte' - 
different lock contexts for basic block
   mm/memory.c:3884:17: sparse: sparse: context imbalance in 'finish_fault' - 
unexpected unlock
   mm/memory.c:3993:9: sparse: sparse: context imbalance in 'do_fault_around' - 
unexpected unlock
>> mm/memory.c:4209:32: sparse: sparse: context imbalance in 'do_numa_page' - 
>> different lock contexts for basic block
   mm/memory.c:4426:9: sparse: sparse: context imbalance in 'handle_pte_fault' 
- different lock contexts for basic block
   mm/memory.c:4712:5: sparse: sparse: context imbalance in 'follow_pte' - 
different lock contexts for basic block
   mm/memory.c:4802:9: sparse: sparse: context imbalance in 'follow_pfn' - 
unexpected unlock
--
>> mm/mprotect.c:140:48: sparse: sparse: context imbalance in 
>> 'change_pte_range' - different lock contexts for basic block

vim +/__unmap_hugepage_range +3945 mm/hugetlb.c

63551ae0feaaa2 David Gibson       2005-06-21  3885  
24669e58477e27 Aneesh Kumar K.V   2012-07-31  3886  void 
__unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
24669e58477e27 Aneesh Kumar K.V   2012-07-31  3887                          
unsigned long start, unsigned long end,
24669e58477e27 Aneesh Kumar K.V   2012-07-31  3888                          
struct page *ref_page)
63551ae0feaaa2 David Gibson       2005-06-21  3889  {
63551ae0feaaa2 David Gibson       2005-06-21  3890      struct mm_struct *mm = 
vma->vm_mm;
63551ae0feaaa2 David Gibson       2005-06-21  3891      unsigned long address;
c7546f8f03f5a4 David Gibson       2005-08-05  3892      pte_t *ptep;
63551ae0feaaa2 David Gibson       2005-06-21  3893      pte_t pte;
cb900f41215447 Kirill A. Shutemov 2013-11-14  3894      spinlock_t *ptl;
63551ae0feaaa2 David Gibson       2005-06-21  3895      struct page *page;
a5516438959d90 Andi Kleen         2008-07-23  3896      struct hstate *h = 
hstate_vma(vma);
a5516438959d90 Andi Kleen         2008-07-23  3897      unsigned long sz = 
huge_page_size(h);
ac46d4f3c43241 Jérôme Glisse      2018-12-28  3898      struct 
mmu_notifier_range range;
a5516438959d90 Andi Kleen         2008-07-23  3899  
63551ae0feaaa2 David Gibson       2005-06-21  3900      
WARN_ON(!is_vm_hugetlb_page(vma));
a5516438959d90 Andi Kleen         2008-07-23  3901      BUG_ON(start & 
~huge_page_mask(h));
a5516438959d90 Andi Kleen         2008-07-23  3902      BUG_ON(end & 
~huge_page_mask(h));
63551ae0feaaa2 David Gibson       2005-06-21  3903  
07e326610e5634 Aneesh Kumar K.V   2016-12-12  3904      /*
07e326610e5634 Aneesh Kumar K.V   2016-12-12  3905       * This is a hugetlb 
vma, all the pte entries should point
07e326610e5634 Aneesh Kumar K.V   2016-12-12  3906       * to huge page.
07e326610e5634 Aneesh Kumar K.V   2016-12-12  3907       */
ed6a79352cad00 Peter Zijlstra     2018-08-31  3908      
tlb_change_page_size(tlb, sz);
24669e58477e27 Aneesh Kumar K.V   2012-07-31  3909      tlb_start_vma(tlb, vma);
dff11abe280b47 Mike Kravetz       2018-10-05  3910  
dff11abe280b47 Mike Kravetz       2018-10-05  3911      /*
dff11abe280b47 Mike Kravetz       2018-10-05  3912       * If sharing possible, 
alert mmu notifiers of worst case.
dff11abe280b47 Mike Kravetz       2018-10-05  3913       */
6f4f13e8d9e27c Jérôme Glisse      2019-05-13  3914      
mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, mm, start,
6f4f13e8d9e27c Jérôme Glisse      2019-05-13  3915                              
end);
ac46d4f3c43241 Jérôme Glisse      2018-12-28  3916      
adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end);
ac46d4f3c43241 Jérôme Glisse      2018-12-28  3917      
mmu_notifier_invalidate_range_start(&range);
569f48b85813f0 Hillf Danton       2014-12-10  3918      address = start;
569f48b85813f0 Hillf Danton       2014-12-10  3919      for (; address < end; 
address += sz) {
7868a2087ec13e Punit Agrawal      2017-07-06  3920              ptep = 
huge_pte_offset(mm, address, sz);
c7546f8f03f5a4 David Gibson       2005-08-05  3921              if (!ptep)
c7546f8f03f5a4 David Gibson       2005-08-05  3922                      
continue;
c7546f8f03f5a4 David Gibson       2005-08-05  3923  
cb900f41215447 Kirill A. Shutemov 2013-11-14  3924              ptl = 
huge_pte_lock(h, mm, ptep);
34ae204f18519f Mike Kravetz       2020-08-11  3925              if 
(huge_pmd_unshare(mm, vma, &address, ptep)) {
31d49da5ad0172 Aneesh Kumar K.V   2016-07-26  3926                      
spin_unlock(ptl);
dff11abe280b47 Mike Kravetz       2018-10-05  3927                      /*
dff11abe280b47 Mike Kravetz       2018-10-05  3928                       * We 
just unmapped a page of PMDs by clearing a PUD.
dff11abe280b47 Mike Kravetz       2018-10-05  3929                       * The 
caller's TLB flush range should cover this area.
dff11abe280b47 Mike Kravetz       2018-10-05  3930                       */
31d49da5ad0172 Aneesh Kumar K.V   2016-07-26  3931                      
continue;
31d49da5ad0172 Aneesh Kumar K.V   2016-07-26  3932              }
39dde65c9940c9 Kenneth W Chen     2006-12-06  3933  
6629326b89b6e6 Hillf Danton       2012-03-23  3934              pte = 
huge_ptep_get(ptep);
31d49da5ad0172 Aneesh Kumar K.V   2016-07-26  3935              if 
(huge_pte_none(pte)) {
31d49da5ad0172 Aneesh Kumar K.V   2016-07-26  3936                      
spin_unlock(ptl);
31d49da5ad0172 Aneesh Kumar K.V   2016-07-26  3937                      
continue;
31d49da5ad0172 Aneesh Kumar K.V   2016-07-26  3938              }
6629326b89b6e6 Hillf Danton       2012-03-23  3939  
6629326b89b6e6 Hillf Danton       2012-03-23  3940              /*
9fbc1f635fd0bd Naoya Horiguchi    2015-02-11  3941               * Migrating 
hugepage or HWPoisoned hugepage is already
9fbc1f635fd0bd Naoya Horiguchi    2015-02-11  3942               * unmapped and 
its refcount is dropped, so just clear pte here.
6629326b89b6e6 Hillf Danton       2012-03-23  3943               */
9fbc1f635fd0bd Naoya Horiguchi    2015-02-11  3944              if 
(unlikely(!pte_present(pte))) {
9386fac34c7cbe Punit Agrawal      2017-07-06 @3945                      
huge_pte_clear(mm, address, ptep, sz);
31d49da5ad0172 Aneesh Kumar K.V   2016-07-26  3946                      
spin_unlock(ptl);
31d49da5ad0172 Aneesh Kumar K.V   2016-07-26  3947                      
continue;
8c4894c6bc790d Naoya Horiguchi    2012-12-12  3948              }
6629326b89b6e6 Hillf Danton       2012-03-23  3949  
6629326b89b6e6 Hillf Danton       2012-03-23  3950              page = 
pte_page(pte);
04f2cbe35699d2 Mel Gorman         2008-07-23  3951              /*
04f2cbe35699d2 Mel Gorman         2008-07-23  3952               * If a 
reference page is supplied, it is because a specific
04f2cbe35699d2 Mel Gorman         2008-07-23  3953               * page is 
being unmapped, not a range. Ensure the page we
04f2cbe35699d2 Mel Gorman         2008-07-23  3954               * are about to 
unmap is the actual page of interest.
04f2cbe35699d2 Mel Gorman         2008-07-23  3955               */
04f2cbe35699d2 Mel Gorman         2008-07-23  3956              if (ref_page) {
31d49da5ad0172 Aneesh Kumar K.V   2016-07-26  3957                      if 
(page != ref_page) {
31d49da5ad0172 Aneesh Kumar K.V   2016-07-26  3958                              
spin_unlock(ptl);
31d49da5ad0172 Aneesh Kumar K.V   2016-07-26  3959                              
continue;
31d49da5ad0172 Aneesh Kumar K.V   2016-07-26  3960                      }
04f2cbe35699d2 Mel Gorman         2008-07-23  3961                      /*
04f2cbe35699d2 Mel Gorman         2008-07-23  3962                       * Mark 
the VMA as having unmapped its page so that
04f2cbe35699d2 Mel Gorman         2008-07-23  3963                       * 
future faults in this VMA will fail rather than
04f2cbe35699d2 Mel Gorman         2008-07-23  3964                       * 
looking like data was lost
04f2cbe35699d2 Mel Gorman         2008-07-23  3965                       */
04f2cbe35699d2 Mel Gorman         2008-07-23  3966                      
set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED);
04f2cbe35699d2 Mel Gorman         2008-07-23  3967              }
04f2cbe35699d2 Mel Gorman         2008-07-23  3968  
c7546f8f03f5a4 David Gibson       2005-08-05  3969              pte = 
huge_ptep_get_and_clear(mm, address, ptep);
b528e4b6405b9f Aneesh Kumar K.V   2016-12-12  3970              
tlb_remove_huge_tlb_entry(h, tlb, ptep, address);
106c992a5ebef2 Gerald Schaefer    2013-04-29  3971              if 
(huge_pte_dirty(pte))
6649a3863232eb Ken Chen           2007-02-08  3972                      
set_page_dirty(page);
9e81130b7ce230 Hillf Danton       2012-03-21  3973  
5d317b2b653659 Naoya Horiguchi    2015-11-05  3974              
hugetlb_count_sub(pages_per_huge_page(h), mm);
d281ee61451835 Kirill A. Shutemov 2016-01-15  3975              
page_remove_rmap(page, true);
31d49da5ad0172 Aneesh Kumar K.V   2016-07-26  3976  
cb900f41215447 Kirill A. Shutemov 2013-11-14  3977              
spin_unlock(ptl);
e77b0852b551ff Aneesh Kumar K.V   2016-07-26  3978              
tlb_remove_page_size(tlb, page, huge_page_size(h));
24669e58477e27 Aneesh Kumar K.V   2012-07-31  3979              /*
31d49da5ad0172 Aneesh Kumar K.V   2016-07-26  3980               * Bail out 
after unmapping reference page if supplied
24669e58477e27 Aneesh Kumar K.V   2012-07-31  3981               */
31d49da5ad0172 Aneesh Kumar K.V   2016-07-26  3982              if (ref_page)
31d49da5ad0172 Aneesh Kumar K.V   2016-07-26  3983                      break;
fe1668ae5bf014 Kenneth W Chen     2006-10-04  3984      }
ac46d4f3c43241 Jérôme Glisse      2018-12-28  3985      
mmu_notifier_invalidate_range_end(&range);
24669e58477e27 Aneesh Kumar K.V   2012-07-31  3986      tlb_end_vma(tlb, vma);
^1da177e4c3f41 Linus Torvalds     2005-04-16  3987  }
63551ae0feaaa2 David Gibson       2005-06-21  3988  

:::::: The code at line 3945 was first introduced by commit
:::::: 9386fac34c7cbe39013410b01348e284652ca1cf mm/hugetlb: allow architectures 
to override huge_pte_clear()

:::::: TO: Punit Agrawal <[email protected]>
:::::: CC: Linus Torvalds <[email protected]>

---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/[email protected]

Attachment: .config.gz
Description: application/gzip

_______________________________________________
kbuild mailing list -- [email protected]
To unsubscribe send an email to [email protected]

Reply via email to