kernel/0172-feat-merge-DSP-DSP-NPU-SDK-driver-to-linux.patch

610 lines
20 KiB
Diff
Raw Normal View History

2024-12-19 21:34:44 +00:00
From f880e579cf29c7dc37a714c47de6a2bd609f9b65 Mon Sep 17 00:00:00 2001
2024-12-15 18:29:23 +00:00
From: denglei <denglei@eswincomputing.com>
Date: Wed, 18 Sep 2024 16:34:12 +0800
Subject: [PATCH 172/219] feat:merge DSP DSP & NPU SDK driver to linux.
Changelogs:
1.feat: dsp drv support multi op at the same time.
2.feat: Support multi operator tasks.
3.feat:dsp fw and drv add debug info.
4.feat:dsp add cfg_clk and aclk low power.
5.fix:dsp origial submit get task err
Signed-off-by: denglei <denglei@eswincomputing.com>
---
drivers/soc/eswin/ai_driver/dsp/dsp_ioctl.c | 251 ++++++++++++++----
.../soc/eswin/ai_driver/dsp/dsp_ioctl_if.h | 2 +
drivers/soc/eswin/ai_driver/dsp/dsp_main.c | 19 +-
drivers/soc/eswin/ai_driver/dsp/dsp_main.h | 3 +
.../soc/eswin/ai_driver/dsp/dsp_platform.c | 20 +-
.../eswin/ai_driver/include/es_dsp_internal.h | 24 ++
.../soc/eswin/ai_driver/include/hetero_ipc.h | 4 +-
7 files changed, 251 insertions(+), 72 deletions(-)
diff --git a/drivers/soc/eswin/ai_driver/dsp/dsp_ioctl.c b/drivers/soc/eswin/ai_driver/dsp/dsp_ioctl.c
index 5bfd429001bb..1edac591f642 100644
--- a/drivers/soc/eswin/ai_driver/dsp/dsp_ioctl.c
+++ b/drivers/soc/eswin/ai_driver/dsp/dsp_ioctl.c
@@ -345,6 +345,12 @@ static int dsp_set_task_req(struct es_dsp *dsp, dsp_request_t *dsp_req,
dsp_req->allow_eval = 1;
dsp_req->flat_virt = (void *)flat;
dsp_req->prio = task->task.priority;
+ if (dsp_req->prio >= DSP_MAX_PRIO) {
+ dsp_err("%s, %d, dsp request prio = %d is err.\n", __func__,
+ __LINE__, dsp_req->prio);
+ dsp_free_flat_mem(dsp, dma_len, (void *)flat, dma_addr);
+ return -EINVAL;
+ }
dsp_req->flat_size = dma_len;
flat->num_buffer = buffer_count;
@@ -399,8 +405,10 @@ static void dsp_async_task_release(struct khandle *handle)
}
dsp = user_req->es_dsp;
user = user_req->user;
+ if(user_req->need_notify) {
+ module_put(THIS_MODULE);
+ }
kfree(user_req);
- module_put(THIS_MODULE);
dsp_debug("%s, done.\n", __func__);
}
@@ -416,7 +424,7 @@ static void dsp_hw_complete_task(struct device *dev, dsp_request_t *req)
dsp = dsp_file->dsp;
spin_lock_irqsave(&dsp_file->async_ll_lock, flags);
- if (dsp_file->h.fd != INVALID_HANDLE_VALUE) {
+ if (dsp_file->h.fd != INVALID_HANDLE_VALUE && async_task->need_notify) {
list_add_tail(&async_task->async_ll,
&dsp_file->async_ll_complete);
spin_unlock_irqrestore(&dsp_file->async_ll_lock, flags);
@@ -434,51 +442,23 @@ static void dsp_hw_complete_task(struct device *dev, dsp_request_t *req)
}
}
-static long dsp_ioctl_submit_tsk_async(struct file *flip,
- dsp_ioctl_task_s __user *arg)
+static struct dsp_user_req_async *dsp_set_task_info(struct dsp_file *dsp_file,
+ dsp_ioctl_task_s *task,
+ bool need_notify)
{
- struct dsp_file *dsp_file = flip->private_data;
- struct es_dsp *dsp = dsp_file->dsp;
- dsp_ioctl_task_s req;
- dsp_ioctl_task_s *task;
dsp_request_t *dsp_req;
struct dsp_user_req_async *user_req;
int ret;
struct dsp_user *user;
u32 buffer_count;
struct dsp_dma_buf **dma_entry = NULL;
-
- if (copy_from_user(&req, arg, sizeof(dsp_ioctl_task_s))) {
- dsp_err("%s, %d, copy_from_user err.\n", __func__, __LINE__);
- ret = -EINVAL;
- return ret;
- }
- task = &req.task;
-
- // using reserved for op_idx
- dsp->op_idx = task->task.reserved;
- if ((dsp_perf_enable || dsp->perf_enable) &&
- dsp->op_idx < MAX_DSP_TASKS) {
- dsp->op_perf[dsp->op_idx].OpStartCycle =
- 0; //get_perf_timer_cnt();
- dsp->op_perf[dsp->op_idx].Die = dsp->numa_id;
- dsp->op_perf[dsp->op_idx].CoreId = dsp->process_id;
- dsp->op_perf[dsp->op_idx].OpIndex = dsp->op_idx;
- dsp->op_perf[dsp->op_idx].OpType =
- dsp->process_id + 7; // IDX_DSP0
- }
-
- if (!try_module_get(THIS_MODULE)) {
- dsp_err("%s, %d, cannot get module.\n", __func__, __LINE__);
- return -ENODEV;
- }
+ struct es_dsp *dsp = dsp_file->dsp;
user = dsp_find_user_by_fd(dsp_file, task->task.operatorHandle);
if (!user) {
- ret = -EINVAL;
- module_put(THIS_MODULE);
dsp_err("cannot get user.\n");
- return ret;
+ module_put(THIS_MODULE);
+ return NULL;
}
buffer_count = task->task.bufferCntCfg + task->task.bufferCntInput +
@@ -491,8 +471,7 @@ static long dsp_ioctl_submit_tsk_async(struct file *flip,
kernel_handle_decref(&user->h);
module_put(THIS_MODULE);
dsp_err("kmalloc dsp request struct error.\n");
- ret = -ENOMEM;
- return ret;
+ return NULL;
}
ret = init_kernel_handle(&user_req->handle, dsp_async_task_release,
@@ -502,8 +481,7 @@ static long dsp_ioctl_submit_tsk_async(struct file *flip,
kernel_handle_decref(&user->h);
kfree(user_req);
module_put(THIS_MODULE);
- ret = -EIO;
- return ret;
+ return NULL;
}
user_req->user = user;
@@ -514,7 +492,6 @@ static long dsp_ioctl_submit_tsk_async(struct file *flip,
user_req->dsp_file = dsp_file;
user_req->callback = task->task.callback;
user_req->cbarg = task->task.cbArg;
-
INIT_LIST_HEAD(&user_req->async_ll);
dsp_debug("%s, user_req=0x%px.\n", __func__, user_req);
@@ -523,8 +500,8 @@ static long dsp_ioctl_submit_tsk_async(struct file *flip,
dsp_debug("%s,%d, dsp_req=0x%px.\n", __func__, __LINE__, dsp_req);
ret = dsp_set_task_req(dsp, dsp_req, task);
-
if (ret) {
+ dsp_err("%s, %d, err, ret = %d.\n", __func__, __LINE__, ret);
goto err_req;
}
user_req->req_cpl_handler = dsp_user_async_req_complete;
@@ -532,12 +509,89 @@ static long dsp_ioctl_submit_tsk_async(struct file *flip,
dsp_req->handle = (u64)(user->op);
ret = dsp_ioctl_set_flat(dsp_file, task, dsp_req->flat_virt, dma_entry);
if (ret != 0) {
- ret = -EINVAL;
+ dsp_err("%s, %d, ret = %d.\n", __func__, __LINE__, ret);
goto err_flat;
}
- kernel_handle_addref(&user_req->handle);
+ user_req->need_notify = need_notify;
+ return user_req;
+err_flat:
+ dsp_free_flat_mem(dsp, dsp_req->flat_size, dsp_req->flat_virt,
+ dsp_req->dsp_flat1_iova);
+err_req:
+ kernel_handle_release_family(&user_req->handle);
+ kernel_handle_decref(&user_req->handle);
+ kernel_handle_decref(&user->h);
+ if (need_notify) {
+ module_put(THIS_MODULE);
+ }
+ return NULL;
+}
+
+static void dsp_free_task(struct dsp_file *dsp_file,
+ struct dsp_user_req_async *user_req)
+{
+ struct dsp_dma_buf **dma_entry = user_req->dma_entry;
+ struct es_dsp *dsp = dsp_file->dsp;
+ u32 buffer_count = user_req->dma_buf_count;
+ dsp_request_t *dsp_req = &user_req->dsp_req;
+ struct dsp_user *user = user_req->user;
+
+ if (dma_entry) {
+ dsp_unmap_dmabuf(dsp_file, dma_entry, buffer_count);
+ }
+ dsp_free_flat_mem(dsp, dsp_req->flat_size, dsp_req->flat_virt,
+ dsp_req->dsp_flat1_iova);
+
+ kernel_handle_release_family(&user_req->handle);
+ kernel_handle_decref(&user_req->handle);
+ kernel_handle_decref(&user->h);
+}
+
+static long dsp_ioctl_submit_tsk_async(struct file *flip,
+ dsp_ioctl_task_s __user *arg)
+{
+ struct dsp_file *dsp_file = flip->private_data;
+ struct es_dsp *dsp = dsp_file->dsp;
+ dsp_ioctl_task_s req;
+ dsp_ioctl_task_s *task;
+ dsp_request_t *dsp_req;
+ struct dsp_user_req_async *user_req;
+ int i, ret;
+
+ if (copy_from_user(&req, arg, sizeof(dsp_ioctl_task_s))) {
+ dsp_err("%s, %d, copy_from_user err.\n", __func__, __LINE__);
+ ret = -EINVAL;
+ return ret;
+ }
+ task = &req;
+
+ // using reserved for op_idx
+ dsp->op_idx = task->task.reserved;
+ if ((dsp_perf_enable || dsp->perf_enable) &&
+ dsp->op_idx < MAX_DSP_TASKS) {
+ dsp->op_perf[dsp->op_idx].OpStartCycle =
+ 0; //get_perf_timer_cnt();
+ dsp->op_perf[dsp->op_idx].Die = dsp->numa_id;
+ dsp->op_perf[dsp->op_idx].CoreId = dsp->process_id;
+ dsp->op_perf[dsp->op_idx].OpIndex = dsp->op_idx;
+ dsp->op_perf[dsp->op_idx].OpType =
+ dsp->process_id + 7; // IDX_DSP0
+ }
+
+ if (!try_module_get(THIS_MODULE)) {
+ dsp_err("%s, %d, cannot get module.\n", __func__, __LINE__);
+ return -ENODEV;
+ }
+
+ user_req = dsp_set_task_info(dsp_file, task, true);
+ if (user_req == NULL) {
+ dsp_err("%s, %d, err\n", __func__, __LINE__);
+ return -EIO;
+ }
req.task.taskHandle = user_req->handle.fd;
- ret = submit_task(dsp->dev, dsp_req);
+
+ kernel_handle_addref(&user_req->handle);
+ ret = submit_task(dsp->dev, &user_req->dsp_req);
if (ret) {
kernel_handle_decref(&user_req->handle);
dsp_err("submit task error.\n");
@@ -547,18 +601,9 @@ static long dsp_ioctl_submit_tsk_async(struct file *flip,
dsp_err("copy to user err.\n");
ret = -EINVAL;
}
- dsp_debug("%s, %d, user refcount=%d.\n\n", __func__, __LINE__,
- kref_read(&user->h.refcount));
return 0;
err_task:
- dsp_unmap_dmabuf(dsp_file, dma_entry, buffer_count);
-err_flat:
- dsp_free_flat_mem(dsp, dsp_req->flat_size, dsp_req->flat_virt,
- dsp_req->dsp_flat1_iova);
-err_req:
- kernel_handle_release_family(&user_req->handle);
- kernel_handle_decref(&user_req->handle);
- kernel_handle_decref(&user->h);
+ dsp_free_task(dsp_file, user_req);
return ret;
}
@@ -961,6 +1006,94 @@ static long dsp_ioctl_get_fw_perf_data(struct file *flip, dsp_fw_perf_t *data)
return ret;
}
+static long dsp_ioctl_multi_tasks_submit(struct file *flip,
+ dsp_ioctl_task_s __user *arg)
+{
+ struct dsp_file *dsp_file = flip->private_data;
+ struct es_dsp *dsp = dsp_file->dsp;
+ dsp_ioctl_task_s req;
+ dsp_ioctl_task_s *tasks;
+ dsp_request_t *dsp_req;
+ struct dsp_user_req_async **user_req;
+ int i, ret;
+ unsigned long flags;
+
+ if (copy_from_user(&req, arg, sizeof(dsp_ioctl_task_s))) {
+ dsp_err("%s, %d, copy_from_user err.\n", __func__, __LINE__);
+ ret = -EINVAL;
+ return ret;
+ }
+
+ if (req.task_num <= 0) {
+ dsp_err("%s, %d, task num below zero, err,\n", __func__,
+ __LINE__);
+ return -EINVAL;
+ }
+ tasks = kzalloc(req.task_num * (sizeof(dsp_ioctl_task_s) +
+ sizeof(struct dsp_user_req_async *)),
+ GFP_KERNEL);
+ if (tasks == NULL) {
+ dsp_err("", __func__, __LINE__);
+ return -ENOMEM;
+ }
+ if (copy_from_user(tasks, arg,
+ req.task_num * sizeof(dsp_ioctl_task_s))) {
+ dsp_err("%s, %d, copy_from_user for multi tasks err.\n",
+ __func__, __LINE__);
+ kfree(tasks);
+ return -EINVAL;
+ }
+ if (!try_module_get(THIS_MODULE)) {
+ dsp_err("%s, %d, cannot get module.\n", __func__, __LINE__);
+ kfree(tasks);
+ return -ENODEV;
+ }
+ user_req = (struct dsp_user_req_async **)(tasks + req.task_num);
+
+ for (i = 0; i < req.task_num; i++) {
+ user_req[i] = dsp_set_task_info(dsp_file, &tasks[i], false);
+ if (user_req[i] == NULL) {
+ dsp_err("%s, %d, and ,i = %d.\n", __func__, __LINE__,
+ i);
+ goto free_task;
+ }
+ tasks[i].task.taskHandle = user_req[i]->handle.fd;
+ }
+ user_req[req.task_num - 1]->need_notify = true;
+
+ if (dsp->off) {
+ dsp_err("es dsp off.\n");
+ ret = -ENODEV;
+ goto free_task;
+ }
+
+ spin_lock_irqsave(&dsp->send_lock, flags);
+ for (i = 0; i < req.task_num; i++) {
+ kernel_handle_addref(&user_req[i]->handle);
+ dsp_req = &user_req[i]->dsp_req;
+ dsp_set_flat_func(dsp_req->flat_virt, dsp_req->handle);
+ __dsp_enqueue_task(dsp, dsp_req);
+ }
+ spin_unlock_irqrestore(&dsp->send_lock, flags);
+
+ dsp_schedule_task(dsp);
+
+ if (copy_to_user(arg, tasks, req.task_num * sizeof(dsp_ioctl_task_s))) {
+ dsp_err("copy to user err.\n");
+ ret = -EINVAL;
+ }
+
+ kfree(tasks);
+ return 0;
+
+free_task:
+ for (i = 0; i < req.task_num; i++) {
+ if (user_req[i] != NULL)
+ dsp_free_task(dsp_file, user_req[i]);
+ }
+ kfree(tasks);
+ return ret;
+}
static long dsp_ioctl(struct file *flip, unsigned int cmd, unsigned long arg)
{
long retval;
@@ -976,6 +1109,11 @@ static long dsp_ioctl(struct file *flip, unsigned int cmd, unsigned long arg)
retval = dsp_ioctl_submit_tsk_async(
flip, (dsp_ioctl_task_s __user *)arg);
break;
+ case DSP_IOCTL_SUBMIT_TSKS_ASYNC:
+ // TODO
+ retval = dsp_ioctl_multi_tasks_submit(
+ flip, (dsp_ioctl_task_s __user *)arg);
+ break;
case DSP_IOCTL_PROCESS_REPORT:
retval = dsp_ioctl_process_complete_task(
flip, (dsp_ioctl_async_process_s __user *)arg);
@@ -1035,7 +1173,8 @@ static int dsp_open(struct inode *inode, struct file *flip)
ret = es_dsp_pm_get_sync(dsp);
if (ret < 0) {
- dsp_err("%s, %d, pm get sync err, ret=%d.\n", __func__, __LINE__, ret);
+ dsp_err("%s, %d, pm get sync err, ret=%d.\n", __func__,
+ __LINE__, ret);
return ret;
}
dsp_file = devm_kzalloc(dsp->dev, sizeof(*dsp_file), GFP_KERNEL);
diff --git a/drivers/soc/eswin/ai_driver/dsp/dsp_ioctl_if.h b/drivers/soc/eswin/ai_driver/dsp/dsp_ioctl_if.h
index 76a88f2399dc..e7ad49893492 100644
--- a/drivers/soc/eswin/ai_driver/dsp/dsp_ioctl_if.h
+++ b/drivers/soc/eswin/ai_driver/dsp/dsp_ioctl_if.h
@@ -36,6 +36,7 @@
#define DSP_IOCTL_ENABLE_PERF _IO(ES_DSP_IOCTL_MAGIC, 22)
#define DSP_IOCTL_GET_PERF_DATA _IO(ES_DSP_IOCTL_MAGIC, 23)
#define DSP_IOCTL_GET_FW_PERF_DATA _IO(ES_DSP_IOCTL_MAGIC, 24)
+#define DSP_IOCTL_SUBMIT_TSKS_ASYNC _IO(ES_DSP_IOCTL_MAGIC, 25)
typedef struct dsp_ioctl_pre_dma_t {
ES_DEV_BUF_S desc;
@@ -54,6 +55,7 @@ typedef struct dsp_ioctl_unload_t {
} __attribute__((packed)) dsp_ioctl_unload_s;
typedef struct dsp_ioctl_task_t {
+ ES_U32 task_num;
ES_DSP_TASK_S task;
ES_S32 result;
} __attribute__((packed)) dsp_ioctl_task_s;
diff --git a/drivers/soc/eswin/ai_driver/dsp/dsp_main.c b/drivers/soc/eswin/ai_driver/dsp/dsp_main.c
index dcfd23a6f205..4a68e4bf3559 100644
--- a/drivers/soc/eswin/ai_driver/dsp/dsp_main.c
+++ b/drivers/soc/eswin/ai_driver/dsp/dsp_main.c
@@ -56,7 +56,7 @@
#define DSP_SUBSYS_HILOAD_CLK 1040000000
#define DSP_SUBSYS_LOWLOAD_CLK 5200000
-#define ES_DSP_DEFAULT_TIMEOUT (100* 6)
+#define ES_DSP_DEFAULT_TIMEOUT (100 * 6)
#ifdef DEBUG
#pragma GCC optimize("O0")
@@ -105,7 +105,7 @@ int es_dsp_exec_cmd_timeout(void)
return fw_timeout;
}
-static void __dsp_enqueue_task(struct es_dsp *dsp, dsp_request_t *req)
+void __dsp_enqueue_task(struct es_dsp *dsp, dsp_request_t *req)
{
struct prio_array *array = &dsp->array;
unsigned long flags;
@@ -175,7 +175,7 @@ static void __dsp_send_task(struct es_dsp *dsp)
es_dsp_send_irq(dsp->hw_arg, (void *)req);
}
-static void dsp_schedule_task(struct es_dsp *dsp)
+void dsp_schedule_task(struct es_dsp *dsp)
{
unsigned long flags;
spin_lock_irqsave(&dsp->send_lock, flags);
@@ -245,6 +245,11 @@ static void dsp_process_expire_work(struct work_struct *work)
dsp_fw_state->exccause, dsp_fw_state->ps, dsp_fw_state->pc,
dsp_fw_state->dsp_task_state, dsp_fw_state->npu_task_state,
dsp_fw_state->func_state);
+
+ if (dsp->stats->last_op_name) {
+ dsp_err("%s, %d, op name = %s.\n", __func__, __LINE__,
+ dsp->stats->last_op_name);
+ }
ret = es_dsp_reboot_core(dsp->hw_arg);
if (ret < 0) {
dsp_err("reboot dsp core failed.\n");
@@ -721,7 +726,7 @@ int __maybe_unused dsp_suspend(struct device *dev)
pm_runtime_mark_last_busy(dsp->dev);
pm_runtime_put_noidle(dsp->dev);
win2030_tbu_power(dsp->dev, false);
- es_dsp_core_clk_disable(dsp);
+ es_dsp_clk_disable(dsp);
dsp_disable_mbox_clock(dsp);
dsp_debug("%s, %d, dsp core%d generic suspend done.\n", __func__,
__LINE__, dsp->process_id);
@@ -743,7 +748,7 @@ int __maybe_unused dsp_resume(struct device *dev)
dsp_err("dsp resume mbox clock err.\n");
return ret;
}
- ret = es_dsp_core_clk_enable(dsp);
+ ret = es_dsp_clk_enable(dsp);
if (ret < 0) {
dev_err(dsp->dev, "couldn't enable DSP\n");
goto out;
@@ -788,7 +793,7 @@ int __maybe_unused dsp_runtime_suspend(struct device *dev)
dsp_debug("%s, dsp core%d runtime suspend.\n", __func__,
dsp->process_id);
win2030_tbu_power(dev, false);
- es_dsp_core_clk_disable(dsp);
+ es_dsp_clk_disable(dsp);
return 0;
}
EXPORT_SYMBOL(dsp_runtime_suspend);
@@ -804,7 +809,7 @@ int __maybe_unused dsp_runtime_resume(struct device *dev)
dsp_debug("%s, dsp core%d runtime resumng.....\n\n", __func__,
dsp->process_id);
- ret = es_dsp_core_clk_enable(dsp);
+ ret = es_dsp_clk_enable(dsp);
if (ret < 0) {
dev_err(dsp->dev, "couldn't enable DSP\n");
goto out;
diff --git a/drivers/soc/eswin/ai_driver/dsp/dsp_main.h b/drivers/soc/eswin/ai_driver/dsp/dsp_main.h
index 308a5b1758fb..d71d1ac3ecaa 100644
--- a/drivers/soc/eswin/ai_driver/dsp/dsp_main.h
+++ b/drivers/soc/eswin/ai_driver/dsp/dsp_main.h
@@ -85,6 +85,7 @@ struct dsp_user_req_async {
/* async_ll, cbarg, callback and dsp_file Can Only use for Lowlevel interface*/
struct list_head async_ll;
struct dsp_file *dsp_file;
+ bool need_notify;
u64 cbarg;
u64 callback;
};
@@ -214,4 +215,6 @@ void dsp_op_release(struct kref *kref);
int es_dsp_exec_cmd_timeout(void);
struct es_dsp *es_proc_get_dsp(int dieid, int dspid);
+void __dsp_enqueue_task(struct es_dsp *dsp, dsp_request_t *req);
+void dsp_schedule_task(struct es_dsp *dsp);
#endif
diff --git a/drivers/soc/eswin/ai_driver/dsp/dsp_platform.c b/drivers/soc/eswin/ai_driver/dsp/dsp_platform.c
index 5051f7585dc8..a2a360d181e9 100644
--- a/drivers/soc/eswin/ai_driver/dsp/dsp_platform.c
+++ b/drivers/soc/eswin/ai_driver/dsp/dsp_platform.c
@@ -255,7 +255,8 @@ int es_dsp_clk_disable(struct es_dsp *dsp)
return ret;
}
- //clk_disable_unprepare(hw->aclk);
+ clk_disable_unprepare(hw->aclk);
+ clk_disable_unprepare(hw->subsys->cfg_clk);
dsp_debug("%s, %d, done.\n", __func__, __LINE__);
return ret;
}
@@ -263,13 +264,18 @@ int es_dsp_clk_enable(struct es_dsp *dsp)
{
struct es_dsp_hw *hw = (struct es_dsp_hw *)dsp->hw_arg;
int ret;
- if (!__clk_is_enabled(hw->aclk)) {
- ret = clk_prepare_enable(hw->aclk);
- if (ret) {
- dev_err(dsp->dev, "failed to enable aclk: %d\n", ret);
- return ret;
- }
+
+ ret = clk_prepare_enable(hw->subsys->cfg_clk);
+ if (ret) {
+ dev_err(dsp->dev, "failed to enable cfg clk, ret=%d.\n", ret);
+ return ret;
}
+ ret = clk_prepare_enable(hw->aclk);
+ if (ret) {
+ dev_err(dsp->dev, "failed to enable aclk: %d\n", ret);
+ return ret;
+ }
+
ret = es_dsp_core_clk_enable(dsp);
if (ret) {
clk_disable_unprepare(hw->aclk);
diff --git a/drivers/soc/eswin/ai_driver/include/es_dsp_internal.h b/drivers/soc/eswin/ai_driver/include/es_dsp_internal.h
index 3fd15afae0c8..2ecbd0e0a0dd 100644
--- a/drivers/soc/eswin/ai_driver/include/es_dsp_internal.h
+++ b/drivers/soc/eswin/ai_driver/include/es_dsp_internal.h
@@ -8,6 +8,7 @@
#ifndef __ESWIN_DSP_INTERNAL_H__
#define __ESWIN_DSP_INTERNAL_H__
#include "es_type.h"
+#include "es_dsp_types.h"
#include "es_dsp_op_types.h"
#ifdef __cplusplus
@@ -183,6 +184,29 @@ typedef struct {
ES_U32 reserved;
} e31_msg_payload_t;
+typedef struct DSP_TASK_DESC_S {
+ ES_S32 dspFd;
+ ES_DSP_TASK_S opTask;
+} __attribute__((packed)) ES_DSP_TASK_DESC_S;
+
+/**
+ * @brief Synchronously submit operator tasks to DSP devices, and only return after all tasks are completed.
+ *
+ * @param[in] tasks: pointer to tasks.
+ * @param[in] numTasks: number of tasks.
+ * @return Returns the execution status code: ES_DSP_SUCCESS for success, others for failure.
+ */
+ES_S32 ES_DSP_LL_SubmitTasks(ES_DSP_TASK_DESC_S *tasks, ES_U32 numTasks);
+
+/**
+ * @brief Asynchronously submit operator tasks to DSP devices, and only return after all tasks are completed.
+ *
+ * @param[in] tasks: pointer to tasks.
+ * @param[in] numTasks: number of tasks.
+ * @return Returns the execution status code: ES_DSP_SUCCESS for success, others for failure.
+ */
+ES_S32 ES_DSP_LL_SubmitAsyncTasks(ES_DSP_TASK_DESC_S *tasks, ES_U32 numTasks);
+
#ifdef __cplusplus
#if __cplusplus
}
diff --git a/drivers/soc/eswin/ai_driver/include/hetero_ipc.h b/drivers/soc/eswin/ai_driver/include/hetero_ipc.h
index 791132cfd645..1a1bf833e668 100644
--- a/drivers/soc/eswin/ai_driver/include/hetero_ipc.h
+++ b/drivers/soc/eswin/ai_driver/include/hetero_ipc.h
@@ -446,14 +446,14 @@ static void messagebox_send_dsp(u32 op_type, u64 payload)
dsp_idx = op_type - IDX_KMD_DSP0;
mbox_base = MAILBOX_E31_TO_DSP_REG[dsp_idx];
mbox_int = MAILBOX_E31_TO_DSP_INT[dsp_idx];
- mbox_lock = (1 << dsp_idx); // must different with dsp driver(8/10/12/14)
+ mbox_lock = BIT1; // must different with dsp driver(using BIT0)
timeout = 0;
// check lock bit and fifo
while (1) {
reg_write(mbox_base + MBOX_NPU_WR_LOCK, mbox_lock);
- if ((reg_read(mbox_base + MBOX_NPU_WR_LOCK) & mbox_lock) ||
+ if ((reg_read(mbox_base + MBOX_NPU_WR_LOCK) & mbox_lock) &&
(reg_read(mbox_base + MBOX_NPU_FIFO_OFFSET) & 0x1) == 0) {
break;
}
--
2.47.0