Validate scatter-gather table size matches buffer object size before
mapping. Break mapping early if the table exceeds buffer size to
prevent overwriting existing mappings. Also validate the table is
not smaller than buffer size to avoid unmapped regions that trigger
MMU translation faults.
Log error and fail mapping operation on size mismatch to prevent
data corruption from mismatched host memory locations and NPU
addresses. Unmap any partially mapped buffer on failure.
Signed-off-by: Karol Wachowski <[email protected]>
---
drivers/accel/ivpu/ivpu_gem.c | 2 +-
drivers/accel/ivpu/ivpu_mmu_context.c | 20 +++++++++++++++++---
drivers/accel/ivpu/ivpu_mmu_context.h | 5 +++--
3 files changed, 21 insertions(+), 6 deletions(-)
diff --git a/drivers/accel/ivpu/ivpu_gem.c b/drivers/accel/ivpu/ivpu_gem.c
index ece68f570b7e..98b9ce26962b 100644
--- a/drivers/accel/ivpu/ivpu_gem.c
+++ b/drivers/accel/ivpu/ivpu_gem.c
@@ -95,7 +95,7 @@ int __must_check ivpu_bo_bind(struct ivpu_bo *bo)
if (!bo->mmu_mapped) {
drm_WARN_ON(&vdev->drm, !bo->ctx);
- ret = ivpu_mmu_context_map_sgt(vdev, bo->ctx, bo->vpu_addr, sgt,
+ ret = ivpu_mmu_context_map_sgt(vdev, bo->ctx, bo->vpu_addr,
sgt, ivpu_bo_size(bo),
ivpu_bo_is_snooped(bo),
ivpu_bo_is_read_only(bo));
if (ret) {
ivpu_err(vdev, "Failed to map BO in MMU: %d\n", ret);
diff --git a/drivers/accel/ivpu/ivpu_mmu_context.c
b/drivers/accel/ivpu/ivpu_mmu_context.c
index 87ad593ef47d..c4014c83e727 100644
--- a/drivers/accel/ivpu/ivpu_mmu_context.c
+++ b/drivers/accel/ivpu/ivpu_mmu_context.c
@@ -429,11 +429,12 @@ static void ivpu_mmu_context_unmap_pages(struct
ivpu_mmu_context *ctx, u64 vpu_a
}
int
-ivpu_mmu_context_map_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context
*ctx,
- u64 vpu_addr, struct sg_table *sgt, bool llc_coherent,
bool read_only)
+ivpu_mmu_context_map_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context
*ctx, u64 vpu_addr,
+ struct sg_table *sgt, size_t bo_size, bool
llc_coherent, bool read_only)
{
size_t start_vpu_addr = vpu_addr;
struct scatterlist *sg;
+ size_t sgt_size = 0;
int ret;
u64 prot;
u64 i;
@@ -462,12 +463,25 @@ ivpu_mmu_context_map_sgt(struct ivpu_device *vdev, struct
ivpu_mmu_context *ctx,
ivpu_dbg(vdev, MMU_MAP, "Map ctx: %u dma_addr: 0x%llx vpu_addr:
0x%llx size: %lu\n",
ctx->id, dma_addr, vpu_addr, size);
+ if (sgt_size + size > bo_size) {
+ ivpu_err(vdev, "Scatter-gather table size exceeds buffer
object size\n");
+ ret = -EINVAL;
+ goto err_unmap_pages;
+ }
+
ret = ivpu_mmu_context_map_pages(vdev, ctx, vpu_addr, dma_addr,
size, prot);
if (ret) {
ivpu_err(vdev, "Failed to map context pages\n");
goto err_unmap_pages;
}
vpu_addr += size;
+ sgt_size += size;
+ }
+
+ if (sgt_size < bo_size) {
+ ivpu_err(vdev, "Scatter-gather table size too small to cover buffer
object size\n");
+ ret = -EINVAL;
+ goto err_unmap_pages;
}
if (!ctx->is_cd_valid) {
@@ -493,7 +507,7 @@ ivpu_mmu_context_map_sgt(struct ivpu_device *vdev, struct
ivpu_mmu_context *ctx,
return 0;
err_unmap_pages:
- ivpu_mmu_context_unmap_pages(ctx, start_vpu_addr, vpu_addr -
start_vpu_addr);
+ ivpu_mmu_context_unmap_pages(ctx, start_vpu_addr, sgt_size);
mutex_unlock(&ctx->lock);
return ret;
}
diff --git a/drivers/accel/ivpu/ivpu_mmu_context.h
b/drivers/accel/ivpu/ivpu_mmu_context.h
index 663a11a9db11..cc02e7bab04e 100644
--- a/drivers/accel/ivpu/ivpu_mmu_context.h
+++ b/drivers/accel/ivpu/ivpu_mmu_context.h
@@ -41,8 +41,9 @@ int ivpu_mmu_context_insert_node(struct ivpu_mmu_context
*ctx, const struct ivpu
u64 size, struct drm_mm_node *node);
void ivpu_mmu_context_remove_node(struct ivpu_mmu_context *ctx, struct
drm_mm_node *node);
-int ivpu_mmu_context_map_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
- u64 vpu_addr, struct sg_table *sgt, bool
llc_coherent, bool read_only);
+int
+ivpu_mmu_context_map_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context
*ctx, u64 vpu_addr,
+ struct sg_table *sgt, size_t bo_size, bool
llc_coherent, bool read_only);
void ivpu_mmu_context_unmap_sgt(struct ivpu_device *vdev, struct
ivpu_mmu_context *ctx,
u64 vpu_addr, struct sg_table *sgt);
int ivpu_mmu_context_set_pages_ro(struct ivpu_device *vdev, struct
ivpu_mmu_context *ctx,