+ bo_offset = unmap_start - unmap_vma->base.va.addr +
unmap_vma->base.gem.offset;
+ sz2m_prev = ALIGN_DOWN(unmap_start, SZ_2M);
+ sz2m_next = ALIGN(unmap_start + 1, SZ_2M);
+ pgsize = get_pgsize(unmap_start, unmap_range, &pgcount);
+
+ } else {
+ bo_offset = ((unmap_start + unmap_range - 1) -
unmap_vma->base.va.addr)
+ + unmap_vma->base.gem.offset;
+ sz2m_prev = ALIGN_DOWN(unmap_start + unmap_range - 1, SZ_2M);
+ sz2m_next = ALIGN(unmap_start + unmap_range, SZ_2M);
+ pgsize = get_pgsize(sz2m_prev, unmap_start + unmap_range -
sz2m_prev, &pgcount);
+ }
+
+ pg = to_panthor_bo(unmap_vma->base.gem.obj)->base.pages[bo_offset >>
PAGE_SHIFT];
+
+ if (pgsize == SZ_4K && folio_order(page_folio(pg)) == PMD_ORDER &&
+ unmap_vma->base.va.addr <= sz2m_prev && unmap_vma->base.va.addr +
+ unmap_vma->base.va.range >= sz2m_next)
+ return true;
+
+ return false;
+}
+
+struct remap_params {
+ u64 prev_unmap_start, prev_unmap_range;
+ u64 prev_remap_start, prev_remap_range;
+ u64 next_unmap_start, next_unmap_range;
+ u64 next_remap_start, next_remap_range;
+ u64 unmap_start, unmap_range;
+};
+
+static struct remap_params
+get_map_unmap_intervals(const struct drm_gpuva_op_remap *op,
+ const struct panthor_vma *unmap_vma)
+{
+ u64 unmap_start, unmap_range, sz2m_prev, sz2m_next;
+ struct remap_params params = {0};
+
+ drm_gpuva_op_remap_to_unmap_range(op, &unmap_start, &unmap_range);
+
+ if (op->prev) {
+ sz2m_prev = ALIGN_DOWN(unmap_start, SZ_2M);
+ sz2m_next = ALIGN(unmap_start + 1, SZ_2M);
+
+ if (is_huge_page_partial_unmap(unmap_vma, op->prev, unmap_start,
+ unmap_range, sz2m_prev,
sz2m_next)) {
+ params.prev_unmap_start = sz2m_prev;
+ params.prev_unmap_range = SZ_2M;
+ params.prev_remap_start = sz2m_prev;
+ params.prev_remap_range = unmap_start & (SZ_2M - 1);
+
+ u64 diff = min(sz2m_next - unmap_start, unmap_range);
+
+ unmap_range -= diff;
+ unmap_start += diff;
+ }
+ }
+
+ if (op->next) {
+ sz2m_prev = ALIGN_DOWN(unmap_start + unmap_range - 1, SZ_2M);
+ sz2m_next = ALIGN(unmap_start + unmap_range, SZ_2M);
+
+ if (is_huge_page_partial_unmap(unmap_vma, op->next, unmap_start,
+ unmap_range, sz2m_prev,
sz2m_next)) {
+ if (unmap_range) {
+ params.next_unmap_start = sz2m_prev;
+ params.next_unmap_range = SZ_2M;
+ unmap_range -= op->next->va.addr & (SZ_2M - 1);
+ }
+
+ params.next_remap_start = op->next->va.addr;
+ params.next_remap_range = SZ_2M - (op->next->va.addr &
(SZ_2M - 1));
+ }
+ }
+
+ params.unmap_start = unmap_start;
+ params.unmap_range = unmap_range;
+
+ return params;
+}
+
static int panthor_gpuva_sm_step_remap(struct drm_gpuva_op *op,
void *priv)
{
@@ -2100,20 +2192,51 @@ static int panthor_gpuva_sm_step_remap(struct
drm_gpuva_op *op,
struct panthor_vm *vm = priv;
struct panthor_vm_op_ctx *op_ctx = vm->op_ctx;
struct panthor_vma *prev_vma = NULL, *next_vma = NULL;
- u64 unmap_start, unmap_range;
+ struct remap_params params;
int ret;
- drm_gpuva_op_remap_to_unmap_range(&op->remap, &unmap_start,
&unmap_range);
- ret = panthor_vm_unmap_pages(vm, unmap_start, unmap_range);
+ /*
+ * ARM IOMMU page table management code disallows partial unmaps of
huge pages,
+ * so when a partial unmap is requested, we must first unmap the entire
huge
+ * page and then remap the difference between the huge page minus the
requested
+ * unmap region. Calculating the right offsets and ranges for the
different unmap
+ * and map operations is the responsibility of the following function.
+ */
+ params = get_map_unmap_intervals(&op->remap, unmap_vma);
+
+ ret = panthor_vm_unmap_pages(vm, params.unmap_start,
params.unmap_range);
if (ret)
return ret;
if (op->remap.prev) {
+ ret = panthor_vm_unmap_pages(vm, params.prev_unmap_start,
+ params.prev_unmap_range);
+ if (ret)
+ return ret;
+ ret = panthor_vm_map_pages(vm, params.prev_remap_start,
+ flags_to_prot(unmap_vma->flags),
+
to_drm_gem_shmem_obj(op->remap.prev->gem.obj)->sgt,
+ op->remap.prev->gem.offset,
params.prev_remap_range);
+ if (ret)
+ return ret;
+
prev_vma = panthor_vm_op_ctx_get_vma(op_ctx);
panthor_vma_init(prev_vma, unmap_vma->flags);
}
if (op->remap.next) {
+ ret = panthor_vm_unmap_pages(vm, params.next_unmap_start,
+ params.next_unmap_range);
+ if (ret)
+ return ret;
+
+ ret = panthor_vm_map_pages(vm, params.next_remap_start,
+ flags_to_prot(unmap_vma->flags),
+
to_drm_gem_shmem_obj(op->remap.next->gem.obj)->sgt,
+ op->remap.next->gem.offset,
params.next_remap_range);
+ if (ret)
+ return ret;
+
next_vma = panthor_vm_op_ctx_get_vma(op_ctx);
panthor_vma_init(next_vma, unmap_vma->flags);
}
base-commit: 7fb19ea1ec6aa85c75905b1fd732d50801e7fb28
prerequisite-patch-id: 3b0f61bfc22a616a205ff7c15d546d2049fd53de