tree: https://git.kernel.org/pub/scm/linux/kernel/git/josef/btrfs-next.git kill-mmap-sem-v3 head: 2bdf3757643c03bcff975b2e3352f022e33e3d0a commit: 435e2282677bd076783518a0cda335c32d6b9884 [4/10] mm: clean up swapcache lookup and creation function names config: i386-randconfig-sb0-09300821 (attached as .config) compiler: gcc-4.9 (Debian 4.9.4-2) 4.9.4 reproduce: git checkout 435e2282677bd076783518a0cda335c32d6b9884 # save the attached .config to linux build tree make ARCH=i386
All errors (new ones prefixed by >>): mm/shmem.c: In function 'shmem_getpage_gfp': >> mm/shmem.c:1682:3: error: implicit declaration of function 'find_swap_cache' >> [-Werror=implicit-function-declaration] page = find_swap_cache(swap, NULL, 0); ^ mm/shmem.c:1682:8: warning: assignment makes pointer from integer without a cast page = find_swap_cache(swap, NULL, 0); ^ cc1: some warnings being treated as errors -- mm/memory.c: In function 'do_swap_page': >> mm/memory.c:2938:2: error: implicit declaration of function >> 'find_swap_cache' [-Werror=implicit-function-declaration] page = find_swap_cache(entry, vma, vmf->address); ^ mm/memory.c:2938:7: warning: assignment makes pointer from integer without a cast page = find_swap_cache(entry, vma, vmf->address); ^ cc1: some warnings being treated as errors vim +/find_swap_cache +1682 mm/shmem.c 1610 1611 /* 1612 * shmem_getpage_gfp - find page in cache, or get from swap, or allocate 1613 * 1614 * If we allocate a new one we do not mark it dirty. That's up to the 1615 * vm. If we swap it in we mark it dirty since we also free the swap 1616 * entry since a page cannot live in both the swap and page cache. 1617 * 1618 * fault_mm and fault_type are only supplied by shmem_fault: 1619 * otherwise they are NULL. 1620 */ 1621 static int shmem_getpage_gfp(struct inode *inode, pgoff_t index, 1622 struct page **pagep, enum sgp_type sgp, gfp_t gfp, 1623 struct vm_area_struct *vma, struct vm_fault *vmf, 1624 vm_fault_t *fault_type) 1625 { 1626 struct address_space *mapping = inode->i_mapping; 1627 struct shmem_inode_info *info = SHMEM_I(inode); 1628 struct shmem_sb_info *sbinfo; 1629 struct mm_struct *charge_mm; 1630 struct mem_cgroup *memcg; 1631 struct page *page; 1632 swp_entry_t swap; 1633 enum sgp_type sgp_huge = sgp; 1634 pgoff_t hindex = index; 1635 int error; 1636 int once = 0; 1637 int alloced = 0; 1638 1639 if (index > (MAX_LFS_FILESIZE >> PAGE_SHIFT)) 1640 return -EFBIG; 1641 if (sgp == SGP_NOHUGE || sgp == SGP_HUGE) 1642 sgp = SGP_CACHE; 1643 repeat: 1644 swap.val = 0; 1645 page = find_lock_entry(mapping, index); 1646 if (radix_tree_exceptional_entry(page)) { 1647 swap = radix_to_swp_entry(page); 1648 page = NULL; 1649 } 1650 1651 if (sgp <= SGP_CACHE && 1652 ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) { 1653 error = -EINVAL; 1654 goto unlock; 1655 } 1656 1657 if (page && sgp == SGP_WRITE) 1658 mark_page_accessed(page); 1659 1660 /* fallocated page? */ 1661 if (page && !PageUptodate(page)) { 1662 if (sgp != SGP_READ) 1663 goto clear; 1664 unlock_page(page); 1665 put_page(page); 1666 page = NULL; 1667 } 1668 if (page || (sgp == SGP_READ && !swap.val)) { 1669 *pagep = page; 1670 return 0; 1671 } 1672 1673 /* 1674 * Fast cache lookup did not find it: 1675 * bring it back from swap or allocate. 1676 */ 1677 sbinfo = SHMEM_SB(inode->i_sb); 1678 charge_mm = vma ? vma->vm_mm : current->mm; 1679 1680 if (swap.val) { 1681 /* Look it up and read it in.. */ > 1682 page = find_swap_cache(swap, NULL, 0); 1683 if (!page) { 1684 /* Or update major stats only when swapin succeeds?? */ 1685 if (fault_type) { 1686 *fault_type |= VM_FAULT_MAJOR; 1687 count_vm_event(PGMAJFAULT); 1688 count_memcg_event_mm(charge_mm, PGMAJFAULT); 1689 } 1690 /* Here we actually start the io */ 1691 page = shmem_swapin(swap, gfp, info, index); 1692 if (!page) { 1693 error = -ENOMEM; 1694 goto failed; 1695 } 1696 } 1697 1698 /* We have to do this with page locked to prevent races */ 1699 lock_page(page); 1700 if (!PageSwapCache(page) || page_private(page) != swap.val || 1701 !shmem_confirm_swap(mapping, index, swap)) { 1702 error = -EEXIST; /* try again */ 1703 goto unlock; 1704 } 1705 if (!PageUptodate(page)) { 1706 error = -EIO; 1707 goto failed; 1708 } 1709 wait_on_page_writeback(page); 1710 1711 if (shmem_should_replace_page(page, gfp)) { 1712 error = shmem_replace_page(&page, gfp, info, index); 1713 if (error) 1714 goto failed; 1715 } 1716 1717 error = mem_cgroup_try_charge_delay(page, charge_mm, gfp, &memcg, 1718 false); 1719 if (!error) { 1720 error = shmem_add_to_page_cache(page, mapping, index, 1721 swp_to_radix_entry(swap)); 1722 /* 1723 * We already confirmed swap under page lock, and make 1724 * no memory allocation here, so usually no possibility 1725 * of error; but free_swap_and_cache() only trylocks a 1726 * page, so it is just possible that the entry has been 1727 * truncated or holepunched since swap was confirmed. 1728 * shmem_undo_range() will have done some of the 1729 * unaccounting, now delete_from_swap_cache() will do 1730 * the rest. 1731 * Reset swap.val? No, leave it so "failed" goes back to 1732 * "repeat": reading a hole and writing should succeed. 1733 */ 1734 if (error) { 1735 mem_cgroup_cancel_charge(page, memcg, false); 1736 delete_from_swap_cache(page); 1737 } 1738 } 1739 if (error) 1740 goto failed; 1741 1742 mem_cgroup_commit_charge(page, memcg, true, false); 1743 1744 spin_lock_irq(&info->lock); 1745 info->swapped--; 1746 shmem_recalc_inode(inode); 1747 spin_unlock_irq(&info->lock); 1748 1749 if (sgp == SGP_WRITE) 1750 mark_page_accessed(page); 1751 1752 delete_from_swap_cache(page); 1753 set_page_dirty(page); 1754 swap_free(swap); 1755 1756 } else { 1757 if (vma && userfaultfd_missing(vma)) { 1758 *fault_type = handle_userfault(vmf, VM_UFFD_MISSING); 1759 return 0; 1760 } 1761 1762 /* shmem_symlink() */ 1763 if (mapping->a_ops != &shmem_aops) 1764 goto alloc_nohuge; 1765 if (shmem_huge == SHMEM_HUGE_DENY || sgp_huge == SGP_NOHUGE) 1766 goto alloc_nohuge; 1767 if (shmem_huge == SHMEM_HUGE_FORCE) 1768 goto alloc_huge; 1769 switch (sbinfo->huge) { 1770 loff_t i_size; 1771 pgoff_t off; 1772 case SHMEM_HUGE_NEVER: 1773 goto alloc_nohuge; 1774 case SHMEM_HUGE_WITHIN_SIZE: 1775 off = round_up(index, HPAGE_PMD_NR); 1776 i_size = round_up(i_size_read(inode), PAGE_SIZE); 1777 if (i_size >= HPAGE_PMD_SIZE && 1778 i_size >> PAGE_SHIFT >= off) 1779 goto alloc_huge; 1780 /* fallthrough */ 1781 case SHMEM_HUGE_ADVISE: 1782 if (sgp_huge == SGP_HUGE) 1783 goto alloc_huge; 1784 /* TODO: implement fadvise() hints */ 1785 goto alloc_nohuge; 1786 } 1787 1788 alloc_huge: 1789 page = shmem_alloc_and_acct_page(gfp, inode, index, true); 1790 if (IS_ERR(page)) { 1791 alloc_nohuge: page = shmem_alloc_and_acct_page(gfp, inode, 1792 index, false); 1793 } 1794 if (IS_ERR(page)) { 1795 int retry = 5; 1796 error = PTR_ERR(page); 1797 page = NULL; 1798 if (error != -ENOSPC) 1799 goto failed; 1800 /* 1801 * Try to reclaim some spece by splitting a huge page 1802 * beyond i_size on the filesystem. 1803 */ 1804 while (retry--) { 1805 int ret; 1806 ret = shmem_unused_huge_shrink(sbinfo, NULL, 1); 1807 if (ret == SHRINK_STOP) 1808 break; 1809 if (ret) 1810 goto alloc_nohuge; 1811 } 1812 goto failed; 1813 } 1814 1815 if (PageTransHuge(page)) 1816 hindex = round_down(index, HPAGE_PMD_NR); 1817 else 1818 hindex = index; 1819 1820 if (sgp == SGP_WRITE) 1821 __SetPageReferenced(page); 1822 1823 error = mem_cgroup_try_charge_delay(page, charge_mm, gfp, &memcg, 1824 PageTransHuge(page)); 1825 if (error) 1826 goto unacct; 1827 error = radix_tree_maybe_preload_order(gfp & GFP_RECLAIM_MASK, 1828 compound_order(page)); 1829 if (!error) { 1830 error = shmem_add_to_page_cache(page, mapping, hindex, 1831 NULL); 1832 radix_tree_preload_end(); 1833 } 1834 if (error) { 1835 mem_cgroup_cancel_charge(page, memcg, 1836 PageTransHuge(page)); 1837 goto unacct; 1838 } 1839 mem_cgroup_commit_charge(page, memcg, false, 1840 PageTransHuge(page)); 1841 lru_cache_add_anon(page); 1842 1843 spin_lock_irq(&info->lock); 1844 info->alloced += 1 << compound_order(page); 1845 inode->i_blocks += BLOCKS_PER_PAGE << compound_order(page); 1846 shmem_recalc_inode(inode); 1847 spin_unlock_irq(&info->lock); 1848 alloced = true; 1849 1850 if (PageTransHuge(page) && 1851 DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE) < 1852 hindex + HPAGE_PMD_NR - 1) { 1853 /* 1854 * Part of the huge page is beyond i_size: subject 1855 * to shrink under memory pressure. 1856 */ 1857 spin_lock(&sbinfo->shrinklist_lock); 1858 /* 1859 * _careful to defend against unlocked access to 1860 * ->shrink_list in shmem_unused_huge_shrink() 1861 */ 1862 if (list_empty_careful(&info->shrinklist)) { 1863 list_add_tail(&info->shrinklist, 1864 &sbinfo->shrinklist); 1865 sbinfo->shrinklist_len++; 1866 } 1867 spin_unlock(&sbinfo->shrinklist_lock); 1868 } 1869 1870 /* 1871 * Let SGP_FALLOC use the SGP_WRITE optimization on a new page. 1872 */ 1873 if (sgp == SGP_FALLOC) 1874 sgp = SGP_WRITE; 1875 clear: 1876 /* 1877 * Let SGP_WRITE caller clear ends if write does not fill page; 1878 * but SGP_FALLOC on a page fallocated earlier must initialize 1879 * it now, lest undo on failure cancel our earlier guarantee. 1880 */ 1881 if (sgp != SGP_WRITE && !PageUptodate(page)) { 1882 struct page *head = compound_head(page); 1883 int i; 1884 1885 for (i = 0; i < (1 << compound_order(head)); i++) { 1886 clear_highpage(head + i); 1887 flush_dcache_page(head + i); 1888 } 1889 SetPageUptodate(head); 1890 } 1891 } 1892 1893 /* Perhaps the file has been truncated since we checked */ 1894 if (sgp <= SGP_CACHE && 1895 ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) { 1896 if (alloced) { 1897 ClearPageDirty(page); 1898 delete_from_page_cache(page); 1899 spin_lock_irq(&info->lock); 1900 shmem_recalc_inode(inode); 1901 spin_unlock_irq(&info->lock); 1902 } 1903 error = -EINVAL; 1904 goto unlock; 1905 } 1906 *pagep = page + index - hindex; 1907 return 0; 1908 1909 /* 1910 * Error recovery. 1911 */ 1912 unacct: 1913 shmem_inode_unacct_blocks(inode, 1 << compound_order(page)); 1914 1915 if (PageTransHuge(page)) { 1916 unlock_page(page); 1917 put_page(page); 1918 goto alloc_nohuge; 1919 } 1920 failed: 1921 if (swap.val && !shmem_confirm_swap(mapping, index, swap)) 1922 error = -EEXIST; 1923 unlock: 1924 if (page) { 1925 unlock_page(page); 1926 put_page(page); 1927 } 1928 if (error == -ENOSPC && !once++) { 1929 spin_lock_irq(&info->lock); 1930 shmem_recalc_inode(inode); 1931 spin_unlock_irq(&info->lock); 1932 goto repeat; 1933 } 1934 if (error == -EEXIST) /* from above or from radix_tree_insert */ 1935 goto repeat; 1936 return error; 1937 } 1938 --- 0-DAY kernel test infrastructure Open Source Technology Center https://lists.01.org/pipermail/kbuild-all Intel Corporation
.config.gz
Description: application/gzip