kernel/0071-img-gpu-kmd-update-kernel-api-of-dma_resv.patch
2024-12-19 16:34:44 -05:00

208 lines
5.9 KiB
Diff

From 8981a93228414936226d4691304cea8d5bf9d63a Mon Sep 17 00:00:00 2001
From: Sakura286 <sakura286@outlook.com>
Date: Fri, 5 Jul 2024 13:47:09 +0800
Subject: [PATCH 071/219] img gpu kmd: update kernel api of dma_resv
ref: https://github.com/starfive-tech/linux/commit/8180dc82
https://github.com/cl91/mtgpu-drv/blob/v5.19-fix/patch/0001-mtgpu-Update-to-kernel-v5.19.patch
---
.../server/env/linux/pvr_buffer_sync.c | 156 ++++++++++++++++++
1 file changed, 156 insertions(+)
diff --git a/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/pvr_buffer_sync.c b/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/pvr_buffer_sync.c
index b5426d402eed..5d9ec73376d2 100644
--- a/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/pvr_buffer_sync.c
+++ b/drivers/gpu/drm/img/img-volcanic/services/server/env/linux/pvr_buffer_sync.c
@@ -172,6 +172,145 @@ pvr_buffer_sync_pmrs_unlock(struct pvr_buffer_sync_context *ctx,
mutex_unlock(&ctx->ctx_lock);
}
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 19, 0))
+
+static void
+dma_resv_count_fences(struct dma_resv *resv, u32 *read_fence_count_out, u32 *write_fence_count_out)
+{
+ struct dma_resv_iter cursor;
+ u32 write_fence_count = 0;
+ u32 read_fence_count = 0;
+ struct dma_fence *fence;
+
+ dma_resv_iter_begin(&cursor, resv, DMA_RESV_USAGE_READ);
+ dma_resv_for_each_fence_unlocked(&cursor, fence) {
+ if (dma_resv_iter_is_restarted(&cursor)) {
+ read_fence_count = 0;
+ write_fence_count = 0;
+ }
+ if (dma_resv_iter_usage(&cursor) == DMA_RESV_USAGE_READ)
+ read_fence_count++;
+ else if (dma_resv_iter_usage(&cursor) == DMA_RESV_USAGE_WRITE)
+ write_fence_count++;
+ }
+
+ *read_fence_count_out = read_fence_count;
+ *write_fence_count_out = write_fence_count;
+}
+
+static u32
+pvr_buffer_sync_pmrs_fence_count(u32 nr_pmrs, struct _PMR_ **pmrs,
+ u32 *pmr_flags)
+{
+ struct dma_resv *resv;
+ u32 fence_count = 0;
+ bool exclusive;
+ int i;
+
+ for (i = 0; i < nr_pmrs; i++) {
+ u32 write_fence_count = 0;
+ u32 read_fence_count = 0;
+
+ exclusive = !!(pmr_flags[i] & PVR_BUFFER_FLAG_WRITE);
+
+ resv = pmr_reservation_object_get(pmrs[i]);
+ if (WARN_ON_ONCE(!resv))
+ continue;
+
+ dma_resv_count_fences(resv, &read_fence_count, &write_fence_count);
+
+ if (!exclusive || !read_fence_count)
+ fence_count += write_fence_count;
+ if (exclusive)
+ fence_count += read_fence_count;
+ }
+
+ return fence_count;
+}
+
+static struct pvr_buffer_sync_check_data *
+pvr_buffer_sync_check_fences_create(struct pvr_fence_context *fence_ctx,
+ PSYNC_CHECKPOINT_CONTEXT sync_checkpoint_ctx,
+ u32 nr_pmrs,
+ struct _PMR_ **pmrs,
+ u32 *pmr_flags)
+{
+ struct pvr_buffer_sync_check_data *data;
+ struct dma_resv *resv;
+ struct dma_fence *fence;
+ u32 fence_count;
+ bool exclusive;
+ int i;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return NULL;
+
+ fence_count = pvr_buffer_sync_pmrs_fence_count(nr_pmrs, pmrs,
+ pmr_flags);
+ if (fence_count) {
+ data->fences = kcalloc(fence_count, sizeof(*data->fences),
+ GFP_KERNEL);
+ if (!data->fences)
+ goto err_check_data_free;
+ }
+
+ for (i = 0; i < nr_pmrs; i++) {
+ struct dma_resv_iter cursor;
+ bool include_write_fences;
+ bool include_read_fences;
+ u32 write_fence_count = 0;
+ u32 read_fence_count = 0;
+
+ resv = pmr_reservation_object_get(pmrs[i]);
+ if (WARN_ON_ONCE(!resv))
+ continue;
+
+ exclusive = !!(pmr_flags[i] & PVR_BUFFER_FLAG_WRITE);
+
+ dma_resv_count_fences(resv, &read_fence_count, &write_fence_count);
+
+ include_write_fences = (!exclusive || !read_fence_count);
+ include_read_fences = exclusive;
+
+ dma_resv_iter_begin(&cursor, resv, DMA_RESV_USAGE_READ);
+ dma_resv_for_each_fence_unlocked(&cursor, fence) {
+ enum dma_resv_usage usage = dma_resv_iter_usage(&cursor);
+
+ if ((!include_write_fences && usage == DMA_RESV_USAGE_WRITE) ||
+ (!include_read_fences && usage == DMA_RESV_USAGE_READ))
+ continue;
+
+ data->fences[data->nr_fences++] =
+ pvr_fence_create_from_fence(fence_ctx,
+ sync_checkpoint_ctx,
+ fence,
+ PVRSRV_NO_FENCE,
+ (usage == DMA_RESV_USAGE_WRITE) ?
+ "write check fence" :
+ "read check fence");
+ if (!data->fences[data->nr_fences - 1]) {
+ data->nr_fences--;
+ PVR_FENCE_TRACE(fence,
+ (usage == DMA_RESV_USAGE_WRITE) ?
+ "waiting on write fence" :
+ "waiting on read fence\n");
+ WARN_ON(dma_fence_wait(fence, true) <= 0);
+ }
+ }
+ }
+
+ WARN_ON((i != nr_pmrs));
+
+ return data;
+
+err_check_data_free:
+ kfree(data);
+ return NULL;
+}
+
+#else
+
static u32
pvr_buffer_sync_pmrs_fence_count(u32 nr_pmrs, struct _PMR_ **pmrs,
u32 *pmr_flags)
@@ -301,6 +440,8 @@ pvr_buffer_sync_check_fences_create(struct pvr_fence_context *fence_ctx,
return NULL;
}
+#endif
+
static void
pvr_buffer_sync_check_fences_destroy(struct pvr_buffer_sync_check_data *data)
{
@@ -527,18 +668,33 @@ pvr_buffer_sync_kick_succeeded(struct pvr_buffer_sync_append_data *data)
if (WARN_ON_ONCE(!resv))
continue;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 19, 0))
+ dma_resv_reserve_fences(resv, 1);
+#endif
if (data->pmr_flags[i] & PVR_BUFFER_FLAG_WRITE) {
PVR_FENCE_TRACE(&data->update_fence->base,
"added exclusive fence (%s) to resv %p\n",
data->update_fence->name, resv);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 19, 0))
+ dma_resv_add_fence(resv,
+ &data->update_fence->base,
+ DMA_RESV_USAGE_WRITE);
+#else
dma_resv_add_excl_fence(resv,
&data->update_fence->base);
+#endif
} else if (data->pmr_flags[i] & PVR_BUFFER_FLAG_READ) {
PVR_FENCE_TRACE(&data->update_fence->base,
"added non-exclusive fence (%s) to resv %p\n",
data->update_fence->name, resv);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 19, 0))
+ dma_resv_add_fence(resv,
+ &data->update_fence->base,
+ DMA_RESV_USAGE_READ);
+#else
dma_resv_add_shared_fence(resv,
&data->update_fence->base);
+#endif
}
}
--
2.47.0