Hello Sunil Khatri,
Commit 737da5363cc0 ("drm/amdgpu: update the functions to use amdgpu
version of hmm") from Oct 10, 2025 (linux-next), leads to the
following Smatch static checker warning:
drivers/gpu/drm/amd/amdgpu/../amdkfd/kfd_svm.c:1767
svm_range_validate_and_map()
warn: passing freed memory 'range' (line 1746)
drivers/gpu/drm/amd/amdgpu/../amdkfd/kfd_svm.c
1698 start = map_start << PAGE_SHIFT;
1699 end = (map_last + 1) << PAGE_SHIFT;
1700 for (addr = start; !r && addr < end; ) {
1701 struct amdgpu_hmm_range *range = NULL;
1702 unsigned long map_start_vma;
1703 unsigned long map_last_vma;
1704 struct vm_area_struct *vma;
1705 unsigned long next = 0;
1706 unsigned long offset;
1707 unsigned long npages;
1708 bool readonly;
1709
1710 vma = vma_lookup(mm, addr);
1711 if (vma) {
1712 readonly = !(vma->vm_flags & VM_WRITE);
1713
1714 next = min(vma->vm_end, end);
1715 npages = (next - addr) >> PAGE_SHIFT;
1716 /* HMM requires at least READ permissions. If
provided with PROT_NONE,
1717 * unmap the memory. If it's not already
mapped, this is a no-op
1718 * If PROT_WRITE is provided without READ,
warn first then unmap
1719 */
1720 if (!(vma->vm_flags & VM_READ)) {
1721 unsigned long e, s;
1722
1723 svm_range_lock(prange);
1724 if (vma->vm_flags & VM_WRITE)
1725 pr_debug("VM_WRITE without
VM_READ is not supported");
1726 s = max(start, prange->start);
1727 e = min(end, prange->last);
1728 if (e >= s)
1729 r =
svm_range_unmap_from_gpus(prange, s, e,
1730
KFD_SVM_UNMAP_TRIGGER_UNMAP_FROM_CPU);
1731 svm_range_unlock(prange);
1732 /* If unmap returns non-zero, we'll
bail on the next for loop
1733 * iteration, so just leave r and
continue
1734 */
1735 addr = next;
1736 continue;
1737 }
1738
1739 WRITE_ONCE(p->svms.faulting_task, current);
1740 range = amdgpu_hmm_range_alloc(NULL);
1741 r =
amdgpu_hmm_range_get_pages(&prange->notifier, addr, npages,
1742 readonly, owner,
1743 range);
1744 WRITE_ONCE(p->svms.faulting_task, NULL);
1745 if (r) {
1746 amdgpu_hmm_range_free(range);
^^^^^
Freed here.
1747 pr_debug("failed %d to get svm range
pages\n", r);
1748 }
1749 } else {
1750 r = -EFAULT;
1751 }
1752
1753 if (!r) {
1754 offset = (addr >> PAGE_SHIFT) - prange->start;
1755 r = svm_range_dma_map(prange, ctx->bitmap,
offset, npages,
1756
range->hmm_range.hmm_pfns);
1757 if (r)
1758 pr_debug("failed %d to dma map
range\n", r);
1759 }
1760
1761 svm_range_lock(prange);
1762
1763 /* Free backing memory of hmm_range if it was
initialized
1764 * Overrride return value to TRY AGAIN only if prior
returns
1765 * were successful
1766 */
--> 1767 if (range && !amdgpu_hmm_range_valid(range) && !r) {
^^^^^
Use after free
1768 pr_debug("hmm update the range, need validate
again\n");
1769 r = -EAGAIN;
1770 }
1771 /* Free the hmm range */
1772 amdgpu_hmm_range_free(range);
^^^^^
Double free
1773
1774
1775 if (!r && !list_empty(&prange->child_list)) {
1776 pr_debug("range split by unmap in parallel,
validate again\n");
1777 r = -EAGAIN;
1778 }
1779
1780 if (!r) {
1781 map_start_vma = max(map_start, prange->start +
offset);
1782 map_last_vma = min(map_last, prange->start +
offset + npages - 1);
1783 if (map_start_vma <= map_last_vma) {
1784 offset = map_start_vma - prange->start;
1785 npages = map_last_vma - map_start_vma
+ 1;
1786 r = svm_range_map_to_gpus(prange,
offset, npages, readonly,
1787 ctx->bitmap,
wait, flush_tlb);
1788 }
1789 }
1790
1791 if (!r && next == end)
1792 prange->mapped_to_gpu = true;
1793
1794 svm_range_unlock(prange);
1795
1796 addr = next;
1797 }
1798
1799 svm_range_unreserve_bos(ctx);
1800 if (!r)
1801 prange->validate_timestamp = ktime_get_boottime();
1802
1803 free_ctx:
1804 kfree(ctx);
1805
1806 return r;
1807 }
regards,
dan carpenter