On Mon, 3 Nov 2025 20:42:05 +0000 Akash Goel <[email protected]> wrote:
> On 10/30/25 14:05, Boris Brezillon wrote: > > This will be used by the UMD to synchronize CPU-cached mappings when > > the UMD can't do it directly (no usermode cache maintenance instruction > > on Arm32). > > > > v2: > > - Change the flags so they better match the drm_gem_shmem_sync() > > semantics > > > > v3: > > - Add Steve's R-b > > > > v4: > > - No changes > > > > v5: > > - Drop Steve's R-b (the semantics changes call for a new review) > > > > Signed-off-by: Faith Ekstrand <[email protected]> > > Signed-off-by: Boris Brezillon <[email protected]> > > --- > > drivers/gpu/drm/panthor/panthor_drv.c | 42 +++++++++++++++++++++- > > drivers/gpu/drm/panthor/panthor_gem.c | 21 +++++++++++ > > drivers/gpu/drm/panthor/panthor_gem.h | 3 ++ > > include/uapi/drm/panthor_drm.h | 52 +++++++++++++++++++++++++++ > > 4 files changed, 117 insertions(+), 1 deletion(-) > > > > diff --git a/drivers/gpu/drm/panthor/panthor_drv.c > > b/drivers/gpu/drm/panthor/panthor_drv.c > > index 99a4534c0074..cad5c4270b04 100644 > > --- a/drivers/gpu/drm/panthor/panthor_drv.c > > +++ b/drivers/gpu/drm/panthor/panthor_drv.c > > @@ -175,7 +175,8 @@ panthor_get_uobj_array(const struct > > drm_panthor_obj_array *in, u32 min_stride, > > PANTHOR_UOBJ_DECL(struct drm_panthor_sync_op, > > timeline_value), \ > > PANTHOR_UOBJ_DECL(struct drm_panthor_queue_submit, syncs), \ > > PANTHOR_UOBJ_DECL(struct drm_panthor_queue_create, > > ringbuf_size), \ > > - PANTHOR_UOBJ_DECL(struct drm_panthor_vm_bind_op, syncs)) > > + PANTHOR_UOBJ_DECL(struct drm_panthor_vm_bind_op, syncs), \ > > + PANTHOR_UOBJ_DECL(struct drm_panthor_bo_sync_op, size)) > > > > /** > > * PANTHOR_UOBJ_SET() - Copy a kernel object to a user object. > > @@ -1394,6 +1395,44 @@ static int panthor_ioctl_set_user_mmio_offset(struct > > drm_device *ddev, > > return 0; > > } > > > > +#define PANTHOR_BO_SYNC_OP_FLAGS \ > > + (DRM_PANTHOR_BO_SYNC_FOR_DEV | DRM_PANTHOR_BO_SYNC_FOR_READ | \ > > + DRM_PANTHOR_BO_SYNC_FOR_WRITE) > > + > > > Sorry, couldn't find where PANTHOR_BO_SYNC_OP_FLAGS and > DRM_PANTHOR_BO_SYNC_FOR_xxx macros get used. That's a leftover from v4, it's not supposed to be there. I'll drop that. > > > > > +static int panthor_ioctl_bo_sync(struct drm_device *ddev, void *data, > > + struct drm_file *file) > > +{ > > + struct drm_panthor_bo_sync *args = data; > > + struct drm_panthor_bo_sync_op *ops; > > + struct drm_gem_object *obj; > > + int ret = 0; > > + > > + ret = PANTHOR_UOBJ_GET_ARRAY(ops, &args->ops); > > + if (ret) > > + return ret; > > + > > + for (u32 i = 0; i < args->ops.count; i++) { > > + obj = drm_gem_object_lookup(file, ops[i].handle); > > + if (!obj) { > > + ret = -ENOENT; > > + goto err_ops; > > + } > > + > > + ret = panthor_gem_sync(obj, ops[i].type, ops[i].offset, > > + ops[i].size); > > + > > + drm_gem_object_put(obj); > > + > > + if (ret) > > + goto err_ops; > > + } > > + > > +err_ops: > > + kvfree(ops); > > + > > + return ret; > > +} > > + > > static int > > panthor_open(struct drm_device *ddev, struct drm_file *file) > > { > > @@ -1468,6 +1507,7 @@ static const struct drm_ioctl_desc > > panthor_drm_driver_ioctls[] = { > > PANTHOR_IOCTL(GROUP_SUBMIT, group_submit, DRM_RENDER_ALLOW), > > PANTHOR_IOCTL(BO_SET_LABEL, bo_set_label, DRM_RENDER_ALLOW), > > PANTHOR_IOCTL(SET_USER_MMIO_OFFSET, set_user_mmio_offset, > > DRM_RENDER_ALLOW), > > + PANTHOR_IOCTL(BO_SYNC, bo_sync, DRM_RENDER_ALLOW), > > }; > > > > static int panthor_mmap(struct file *filp, struct vm_area_struct *vma) > > diff --git a/drivers/gpu/drm/panthor/panthor_gem.c > > b/drivers/gpu/drm/panthor/panthor_gem.c > > index 160692e45f44..1b1e98c61b8c 100644 > > --- a/drivers/gpu/drm/panthor/panthor_gem.c > > +++ b/drivers/gpu/drm/panthor/panthor_gem.c > > @@ -357,6 +357,27 @@ panthor_gem_kernel_bo_set_label(struct > > panthor_kernel_bo *bo, const char *label) > > panthor_gem_bo_set_label(bo->obj, str); > > } > > > > +int > > +panthor_gem_sync(struct drm_gem_object *obj, u32 type, > > + u64 offset, u64 size) > > +{ > > + enum drm_gem_shmem_sync_type shmem_sync_type; > > + struct panthor_gem_object *bo = to_panthor_bo(obj); > > + > > + switch (type) { > > + case DRM_PANTHOR_BO_SYNC_CPU_CACHE_FLUSH: > > + shmem_sync_type = DRM_GEM_SHMEM_SYNC_CPU_CACHE_FLUSH; > > + break; > > + case DRM_PANTHOR_BO_SYNC_CPU_CACHE_FLUSH_AND_INVALIDATE: > > + shmem_sync_type = > > DRM_GEM_SHMEM_SYNC_CPU_CACHE_FLUSH_AND_INVALIDATE; > > + break; > > + default: > > + return -EINVAL; > > + } > > + > > + return drm_gem_shmem_sync(&bo->base, offset, size, shmem_sync_type); > > +} > > + > > #ifdef CONFIG_DEBUG_FS > > struct gem_size_totals { > > size_t size; > > diff --git a/drivers/gpu/drm/panthor/panthor_gem.h > > b/drivers/gpu/drm/panthor/panthor_gem.h > > index 528088839468..8705c492c5b6 100644 > > --- a/drivers/gpu/drm/panthor/panthor_gem.h > > +++ b/drivers/gpu/drm/panthor/panthor_gem.h > > @@ -147,6 +147,9 @@ panthor_gem_create_with_handle(struct drm_file *file, > > void panthor_gem_bo_set_label(struct drm_gem_object *obj, const char > > *label); > > void panthor_gem_kernel_bo_set_label(struct panthor_kernel_bo *bo, const > > char *label); > > > > +int panthor_gem_sync(struct drm_gem_object *obj, > > + u32 type, u64 offset, u64 size); > > + > > const struct dma_buf_ops * > > panthor_gem_prime_get_dma_buf_ops(struct drm_device *dev); > > > > diff --git a/include/uapi/drm/panthor_drm.h b/include/uapi/drm/panthor_drm.h > > index f0f637e0631d..bb12760abe99 100644 > > --- a/include/uapi/drm/panthor_drm.h > > +++ b/include/uapi/drm/panthor_drm.h > > @@ -144,6 +144,9 @@ enum drm_panthor_ioctl_id { > > * pgoff_t size. > > */ > > DRM_PANTHOR_SET_USER_MMIO_OFFSET, > > + > > + /** @DRM_PANTHOR_BO_SYNC: Sync BO data to/from the device */ > > + DRM_PANTHOR_BO_SYNC, > > }; > > > > /** > > @@ -1073,6 +1076,53 @@ struct drm_panthor_set_user_mmio_offset { > > __u64 offset; > > }; > > > > +/** > > + * enum drm_panthor_bo_sync_op_type - BO sync type > > + */ > > +enum drm_panthor_bo_sync_op_type { > > + /** @DRM_PANTHOR_BO_SYNC_CPU_CACHE_FLUSH: Flush CPU caches. */ > > + DRM_PANTHOR_BO_SYNC_CPU_CACHE_FLUSH = 0, > > + > > + /** @DRM_PANTHOR_BO_SYNC_CPU_CACHE_FLUSH_AND_INVALIDATE: Flush and > > invalidate CPU caches. */ > > + DRM_PANTHOR_BO_SYNC_CPU_CACHE_FLUSH_AND_INVALIDATE = 1, > > +}; > > + > > +/** > > + * struct drm_panthor_bo_sync_op - BO map sync op > > + */ > > +struct drm_panthor_bo_sync_op { > > + /** @handle: Handle of the buffer object to sync. */ > > + __u32 handle; > > + > > + /** @type: Type of operation. */ > > + __u32 type; > > + > > + /** > > + * @offset: Offset into the BO at which the sync range starts. > > + * > > + * This will be rounded down to the nearest cache line as needed. > > + */ > > + __u64 offset; > > + > > + /** > > + * @size: Size of the range to sync > > + * > > + * @size + @offset will be rounded up to the nearest cache line as > > + * needed. > > + */ > > + __u64 size; > > +}; > > + > > +/** > > + * struct drm_panthor_bo_sync - BO map sync request > > + */ > > +struct drm_panthor_bo_sync { > > + /** > > + * @ops: Array of struct drm_panthor_bo_sync_op sync operations. > > + */ > > + struct drm_panthor_obj_array ops; > > +}; > > + > > /** > > * DRM_IOCTL_PANTHOR() - Build a Panthor IOCTL number > > * @__access: Access type. Must be R, W or RW. > > @@ -1119,6 +1169,8 @@ enum { > > DRM_IOCTL_PANTHOR(WR, BO_SET_LABEL, bo_set_label), > > DRM_IOCTL_PANTHOR_SET_USER_MMIO_OFFSET = > > DRM_IOCTL_PANTHOR(WR, SET_USER_MMIO_OFFSET, > > set_user_mmio_offset), > > + DRM_IOCTL_PANTHOR_BO_SYNC = > > + DRM_IOCTL_PANTHOR(WR, BO_SYNC, bo_sync), > > }; > > > > #if defined(__cplusplus) > IMPORTANT NOTICE: The contents of this email and any attachments are > confidential and may also be privileged. If you are not the intended > recipient, please notify the sender immediately and do not disclose the > contents to any other person, use it for any purpose, or store or copy the > information in any medium. Thank you.
