Hello Thomas Hellstrom,

This is a semi-automatic email about new static checker warnings.

The patch 7a39f35ce43f: "drm/ttm: TTM fault handler helpers" from Feb 
6, 2019, leads to the following Smatch complaint:

    drivers/gpu/drm/ttm/ttm_bo_vm.c:266 ttm_bo_vm_fault_reserved()
    error: we previously assumed 'bo->ttm' could be null (see line 204)

drivers/gpu/drm/ttm/ttm_bo_vm.c
   203           */
   204          if (bo->ttm && (bo->ttm->page_flags & TTM_PAGE_FLAG_SG))
                    ^^^^^^^
Check for NULL.

   205                  return VM_FAULT_SIGBUS;
   206  
   207          if (bdev->driver->fault_reserve_notify) {
   208                  struct dma_fence *moving = dma_fence_get(bo->moving);
   209  
   210                  err = bdev->driver->fault_reserve_notify(bo);
   211                  switch (err) {
   212                  case 0:
   213                          break;
   214                  case -EBUSY:
   215                  case -ERESTARTSYS:
   216                          return VM_FAULT_NOPAGE;
   217                  default:
   218                          return VM_FAULT_SIGBUS;
   219                  }
   220  
   221                  if (bo->moving != moving) {
   222                          spin_lock(&bdev->glob->lru_lock);
   223                          ttm_bo_move_to_lru_tail(bo, NULL);
   224                          spin_unlock(&bdev->glob->lru_lock);
   225                  }
   226                  dma_fence_put(moving);
   227          }
   228  
   229          /*
   230           * Wait for buffer data in transit, due to a pipelined
   231           * move.
   232           */
   233          ret = ttm_bo_vm_fault_idle(bo, vmf);
   234          if (unlikely(ret != 0))
   235                  return ret;
   236  
   237          err = ttm_mem_io_lock(man, true);
   238          if (unlikely(err != 0))
   239                  return VM_FAULT_NOPAGE;
   240          err = ttm_mem_io_reserve_vm(bo);
   241          if (unlikely(err != 0)) {
   242                  ret = VM_FAULT_SIGBUS;
   243                  goto out_io_unlock;
   244          }
   245  
   246          page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
   247                  vma->vm_pgoff - drm_vma_node_start(&bo->vma_node);
   248          page_last = vma_pages(vma) + vma->vm_pgoff -
   249                  drm_vma_node_start(&bo->vma_node);
   250  
   251          if (unlikely(page_offset >= bo->num_pages)) {
   252                  ret = VM_FAULT_SIGBUS;
   253                  goto out_io_unlock;
   254          }
   255  
   256          cvma.vm_page_prot = ttm_io_prot(bo->mem.placement, prot);
   257          if (!bo->mem.bus.is_iomem) {
   258                  struct ttm_operation_ctx ctx = {
   259                          .interruptible = false,
   260                          .no_wait_gpu = false,
   261                          .flags = TTM_OPT_FLAG_FORCE_ALLOC
   262  
   263                  };
   264  
   265                  ttm = bo->ttm;
   266                  if (ttm_tt_populate(bo->ttm, &ctx)) {
                                            ^^^^^^^
Unchecked dereference inside ttm_tt_populate() function.

   267                          ret = VM_FAULT_OOM;
   268                          goto out_io_unlock;

regards,
dan carpenter
_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

Reply via email to