msm: camera: custom: Add support for SW sync & snapshot

Add support to handle SW sync & ZSLSnapshot. Add substate machine
and mechanism to handle the lifecycle of requests based on irqs
accordingly.

CRs-Fixed: 2524308
Change-Id: Ie5ba97d4ae1b38f4b44c2d3935d2882df59fcac6
Signed-off-by: Karthik Anantha Ram <kartanan@codeaurora.org>
此提交包含在:
Karthik Anantha Ram
2019-10-22 14:54:38 -07:00
父節點 f81a46003b
當前提交 02af3487fa
共有 8 個檔案被更改,包括 667 行新增110 行删除

查看文件

@@ -27,6 +27,258 @@ static int __cam_custom_ctx_handle_irq_in_activated(
static int __cam_custom_ctx_start_dev_in_ready(
struct cam_context *ctx, struct cam_start_stop_dev_cmd *cmd);
static int __cam_custom_ctx_apply_req_in_activated_state(
struct cam_context *ctx, struct cam_req_mgr_apply_request *apply,
uint32_t next_state);
static int __cam_custom_ctx_apply_default_settings(
struct cam_context *ctx, struct cam_req_mgr_apply_request *apply);
static int __cam_custom_ctx_apply_req_in_activated(
struct cam_context *ctx, struct cam_req_mgr_apply_request *apply)
{
int rc = 0;
struct cam_custom_context *custom_ctx =
(struct cam_custom_context *) ctx->ctx_priv;
rc = __cam_custom_ctx_apply_req_in_activated_state(
ctx, apply, CAM_CUSTOM_CTX_ACTIVATED_APPLIED);
CAM_DBG(CAM_CUSTOM, "new substate %d", custom_ctx->substate_activated);
if (rc)
CAM_ERR(CAM_CUSTOM, "Apply failed in state %d rc %d",
custom_ctx->substate_activated, rc);
return rc;
}
static int __cam_custom_ctx_handle_error(
struct cam_custom_context *custom_ctx, void *evt_data)
{
/*
* Handle any HW error scenerios here, all the
* requests in all the lists can be signaled error.
* Notify UMD about this error if needed.
*/
return 0;
}
static int __cam_custom_ctx_reg_upd_in_sof(
struct cam_custom_context *custom_ctx, void *evt_data)
{
struct cam_ctx_request *req = NULL;
struct cam_custom_dev_ctx_req *req_custom;
struct cam_context *ctx = custom_ctx->base;
custom_ctx->frame_id++;
/*
* This is for the first update before streamon.
* The initial setting will cause the reg_upd in the
* first frame.
*/
if (!list_empty(&ctx->wait_req_list)) {
req = list_first_entry(&ctx->wait_req_list,
struct cam_ctx_request, list);
list_del_init(&req->list);
req_custom = (struct cam_custom_dev_ctx_req *) req->req_priv;
if (req_custom->num_fence_map_out == req_custom->num_acked) {
list_add_tail(&req->list, &ctx->free_req_list);
} else {
list_add_tail(&req->list, &ctx->active_req_list);
custom_ctx->active_req_cnt++;
CAM_DBG(CAM_REQ,
"move request %lld to active list(cnt = %d), ctx %u",
req->request_id, custom_ctx->active_req_cnt,
ctx->ctx_id);
}
}
return 0;
}
static int __cam_custom_ctx_reg_upd_in_applied_state(
struct cam_custom_context *custom_ctx, void *evt_data)
{
struct cam_ctx_request *req;
struct cam_context *ctx = custom_ctx->base;
struct cam_custom_dev_ctx_req *req_custom;
custom_ctx->frame_id++;
if (list_empty(&ctx->wait_req_list)) {
CAM_ERR(CAM_CUSTOM,
"Reg upd ack with no waiting request");
goto end;
}
req = list_first_entry(&ctx->wait_req_list,
struct cam_ctx_request, list);
list_del_init(&req->list);
req_custom = (struct cam_custom_dev_ctx_req *) req->req_priv;
if (req_custom->num_fence_map_out != 0) {
list_add_tail(&req->list, &ctx->active_req_list);
custom_ctx->active_req_cnt++;
CAM_DBG(CAM_REQ,
"move request %lld to active list(cnt = %d), ctx %u",
req->request_id, custom_ctx->active_req_cnt,
ctx->ctx_id);
} else {
/* no io config, so the request is completed. */
list_add_tail(&req->list, &ctx->free_req_list);
CAM_DBG(CAM_ISP,
"move active request %lld to free list(cnt = %d), ctx %u",
req->request_id, custom_ctx->active_req_cnt,
ctx->ctx_id);
}
custom_ctx->substate_activated = CAM_CUSTOM_CTX_ACTIVATED_SOF;
CAM_DBG(CAM_CUSTOM, "next substate %d", custom_ctx->substate_activated);
end:
return 0;
}
static int __cam_custom_ctx_frame_done(
struct cam_custom_context *custom_ctx, void *evt_data)
{
int rc = 0, i, j;
uint64_t frame_done_req_id;
struct cam_ctx_request *req;
struct cam_custom_dev_ctx_req *req_custom;
struct cam_context *ctx = custom_ctx->base;
struct cam_custom_hw_done_event_data *done_data =
(struct cam_custom_hw_done_event_data *)evt_data;
if (list_empty(&ctx->active_req_list)) {
CAM_DBG(CAM_CUSTOM, "Frame done with no active request");
return 0;
}
req = list_first_entry(&ctx->active_req_list,
struct cam_ctx_request, list);
req_custom = req->req_priv;
for (i = 0; i < done_data->num_handles; i++) {
for (j = 0; j < req_custom->num_fence_map_out; j++) {
if (done_data->resource_handle[i] ==
req_custom->fence_map_out[j].resource_handle)
break;
}
if (j == req_custom->num_fence_map_out) {
CAM_ERR(CAM_CUSTOM,
"Can not find matching rsrc handle 0x%x!",
done_data->resource_handle[i]);
rc = -EINVAL;
continue;
}
if (req_custom->fence_map_out[j].sync_id == -1) {
CAM_WARN(CAM_CUSTOM,
"Duplicate frame done for req %lld",
req->request_id);
continue;
}
rc = cam_sync_signal(req_custom->fence_map_out[j].sync_id,
CAM_SYNC_STATE_SIGNALED_SUCCESS);
if (rc)
CAM_ERR(CAM_CUSTOM, "Sync failed with rc = %d", rc);
req_custom->num_acked++;
req_custom->fence_map_out[j].sync_id = -1;
}
if (req_custom->num_acked > req_custom->num_fence_map_out) {
CAM_ERR(CAM_CUSTOM,
"WARNING: req_id %lld num_acked %d > map_out %d, ctx %u",
req->request_id, req_custom->num_acked,
req_custom->num_fence_map_out, ctx->ctx_id);
}
if (req_custom->num_acked != req_custom->num_fence_map_out)
return rc;
custom_ctx->active_req_cnt--;
frame_done_req_id = req->request_id;
list_del_init(&req->list);
list_add_tail(&req->list, &ctx->free_req_list);
CAM_DBG(CAM_REQ,
"Move active request %lld to free list(cnt = %d) [all fences done], ctx %u",
frame_done_req_id, custom_ctx->active_req_cnt, ctx->ctx_id);
return rc;
}
static struct cam_ctx_ops
cam_custom_ctx_activated_state_machine
[CAM_CUSTOM_CTX_ACTIVATED_MAX] = {
/* SOF */
{
.ioctl_ops = {},
.crm_ops = {
.apply_req = __cam_custom_ctx_apply_req_in_activated,
.apply_default =
__cam_custom_ctx_apply_default_settings,
},
.irq_ops = NULL,
},
/* APPLIED */
{
.ioctl_ops = {},
.crm_ops = {
.apply_req = __cam_custom_ctx_apply_req_in_activated,
.apply_default =
__cam_custom_ctx_apply_default_settings,
},
.irq_ops = NULL,
},
/* HW ERROR */
{
.ioctl_ops = {},
.crm_ops = {},
.irq_ops = NULL,
},
/* HALT */
{
.ioctl_ops = {},
.crm_ops = {},
.irq_ops = NULL,
},
};
static struct cam_custom_ctx_irq_ops
cam_custom_ctx_activated_state_machine_irq
[CAM_CUSTOM_CTX_ACTIVATED_MAX] = {
/* SOF */
{
.irq_ops = {
__cam_custom_ctx_handle_error,
__cam_custom_ctx_reg_upd_in_sof,
__cam_custom_ctx_frame_done,
},
},
/* APPLIED */
{
.irq_ops = {
__cam_custom_ctx_handle_error,
__cam_custom_ctx_reg_upd_in_applied_state,
__cam_custom_ctx_frame_done,
},
},
/* HW ERROR */
{
.irq_ops = {
NULL,
NULL,
NULL,
},
},
/* HALT */
{
},
};
static int __cam_custom_ctx_enqueue_request_in_order(
struct cam_context *ctx, struct cam_ctx_request *req)
@@ -484,7 +736,8 @@ static int __cam_custom_ctx_apply_default_settings(
}
static int __cam_custom_ctx_apply_req_in_activated_state(
struct cam_context *ctx, struct cam_req_mgr_apply_request *apply)
struct cam_context *ctx, struct cam_req_mgr_apply_request *apply,
uint32_t next_state)
{
int rc = 0;
struct cam_ctx_request *req;
@@ -499,6 +752,9 @@ static int __cam_custom_ctx_apply_req_in_activated_state(
goto end;
}
if (!list_empty(&ctx->wait_req_list))
CAM_WARN(CAM_CUSTOM, "Apply invoked with a req in wait list");
custom_ctx = (struct cam_custom_context *) ctx->ctx_priv;
spin_lock_bh(&ctx->lock);
req = list_first_entry(&ctx->pending_req_list, struct cam_ctx_request,
@@ -531,19 +787,10 @@ static int __cam_custom_ctx_apply_req_in_activated_state(
"Can not apply the configuration");
} else {
spin_lock_bh(&ctx->lock);
custom_ctx->substate_activated = next_state;
list_del_init(&req->list);
if (!req->num_out_map_entries) {
list_add_tail(&req->list, &ctx->free_req_list);
list_add_tail(&req->list, &ctx->wait_req_list);
spin_unlock_bh(&ctx->lock);
} else {
list_add_tail(&req->list, &ctx->active_req_list);
spin_unlock_bh(&ctx->lock);
/*
* for test purposes only-this should be
* triggered based on irq
*/
__cam_custom_ctx_handle_irq_in_activated(ctx, 0, NULL);
}
}
end:
@@ -610,6 +857,10 @@ static int __cam_custom_ctx_acquire_hw_v1(
goto free_res;
}
ctx_custom->substate_machine_irq =
cam_custom_ctx_activated_state_machine_irq;
ctx_custom->substate_machine =
cam_custom_ctx_activated_state_machine;
ctx_custom->hw_ctx = param.ctxt_to_hw_map;
ctx_custom->hw_acquired = true;
ctx->ctxt_to_hw_map = param.ctxt_to_hw_map;
@@ -808,7 +1059,9 @@ static int __cam_custom_ctx_config_dev(struct cam_context *ctx,
cfg.packet = packet;
cfg.ctxt_to_hw_map = ctx_custom->hw_ctx;
cfg.out_map_entries = req_custom->fence_map_out;
cfg.max_out_map_entries = CAM_CUSTOM_DEV_CTX_RES_MAX;
cfg.in_map_entries = req_custom->fence_map_in;
cfg.max_in_map_entries = CAM_CUSTOM_DEV_CTX_RES_MAX;
cfg.priv = &req_custom->hw_update_data;
cfg.pf_data = &(req->pf_data);
@@ -824,6 +1077,7 @@ static int __cam_custom_ctx_config_dev(struct cam_context *ctx,
req_custom->num_fence_map_out = cfg.num_out_map_entries;
req_custom->num_fence_map_in = cfg.num_in_map_entries;
req_custom->num_acked = 0;
req_custom->hw_update_data.num_cfg = cfg.num_out_map_entries;
for (i = 0; i < req_custom->num_fence_map_out; i++) {
rc = cam_sync_get_obj_ref(req_custom->fence_map_out[i].sync_id);
@@ -1025,6 +1279,13 @@ static int __cam_custom_ctx_start_dev_in_ready(struct cam_context *ctx,
else
custom_start.start_only = false;
ctx_custom->frame_id = 0;
ctx_custom->active_req_cnt = 0;
ctx_custom->substate_activated =
(req_custom->num_fence_map_out) ?
CAM_CUSTOM_CTX_ACTIVATED_APPLIED :
CAM_CUSTOM_CTX_ACTIVATED_SOF;
ctx->state = CAM_CTX_ACTIVATED;
rc = ctx->hw_mgr_intf->hw_start(ctx->hw_mgr_intf->hw_mgr_priv,
&custom_start);
@@ -1040,10 +1301,7 @@ static int __cam_custom_ctx_start_dev_in_ready(struct cam_context *ctx,
spin_lock_bh(&ctx->lock);
list_del_init(&req->list);
if (req_custom->num_fence_map_out)
list_add_tail(&req->list, &ctx->active_req_list);
else
list_add_tail(&req->list, &ctx->free_req_list);
list_add_tail(&req->list, &ctx->wait_req_list);
spin_unlock_bh(&ctx->lock);
end:
@@ -1103,20 +1361,28 @@ static int __cam_custom_ctx_process_evt(struct cam_context *ctx,
static int __cam_custom_ctx_handle_irq_in_activated(void *context,
uint32_t evt_id, void *evt_data)
{
int rc;
struct cam_context *ctx =
(struct cam_context *)context;
int rc = 0;
struct cam_custom_ctx_irq_ops *custom_irq_ops = NULL;
struct cam_context *ctx = (struct cam_context *)context;
struct cam_custom_context *ctx_custom =
(struct cam_custom_context *)ctx->ctx_priv;
CAM_DBG(CAM_CUSTOM, "Enter %d", ctx->ctx_id);
spin_lock(&ctx->lock);
CAM_DBG(CAM_CUSTOM, "Enter: State %d, Substate %d, evt id %d",
ctx->state, ctx_custom->substate_activated, evt_id);
custom_irq_ops = &ctx_custom->substate_machine_irq[
ctx_custom->substate_activated];
if (custom_irq_ops->irq_ops[evt_id])
rc = custom_irq_ops->irq_ops[evt_id](ctx_custom,
evt_data);
else
CAM_DBG(CAM_CUSTOM, "No handle function for substate %d",
ctx_custom->substate_activated);
/*
* handle based on different irq's currently
* triggering only buf done if there are fences
*/
rc = cam_context_buf_done_from_hw(ctx, evt_data, 0);
if (rc)
CAM_ERR(CAM_CUSTOM, "Failed in buf done, rc=%d", rc);
CAM_DBG(CAM_CUSTOM, "Exit: State %d Substate %d",
ctx->state, ctx_custom->substate_activated);
spin_unlock(&ctx->lock);
return rc;
}
@@ -1141,6 +1407,67 @@ static int __cam_custom_ctx_acquire_hw_in_acquired(
return rc;
}
static int __cam_custom_ctx_apply_req(struct cam_context *ctx,
struct cam_req_mgr_apply_request *apply)
{
int rc = 0;
struct cam_ctx_ops *ctx_ops = NULL;
struct cam_custom_context *custom_ctx =
(struct cam_custom_context *) ctx->ctx_priv;
CAM_DBG(CAM_CUSTOM,
"Enter: apply req in Substate %d request _id:%lld",
custom_ctx->substate_activated, apply->request_id);
ctx_ops = &custom_ctx->substate_machine[
custom_ctx->substate_activated];
if (ctx_ops->crm_ops.apply_req) {
rc = ctx_ops->crm_ops.apply_req(ctx, apply);
} else {
CAM_WARN_RATE_LIMIT(CAM_CUSTOM,
"No handle function in activated substate %d",
custom_ctx->substate_activated);
rc = -EFAULT;
}
if (rc)
CAM_WARN_RATE_LIMIT(CAM_CUSTOM,
"Apply failed in active substate %d rc %d",
custom_ctx->substate_activated, rc);
return rc;
}
static int __cam_custom_ctx_apply_default_req(
struct cam_context *ctx,
struct cam_req_mgr_apply_request *apply)
{
int rc = 0;
struct cam_ctx_ops *ctx_ops = NULL;
struct cam_custom_context *custom_ctx =
(struct cam_custom_context *) ctx->ctx_priv;
CAM_DBG(CAM_CUSTOM,
"Enter: apply req in Substate %d request _id:%lld",
custom_ctx->substate_activated, apply->request_id);
ctx_ops = &custom_ctx->substate_machine[
custom_ctx->substate_activated];
if (ctx_ops->crm_ops.apply_default) {
rc = ctx_ops->crm_ops.apply_default(ctx, apply);
} else {
CAM_WARN_RATE_LIMIT(CAM_CUSTOM,
"No handle function in activated substate %d",
custom_ctx->substate_activated);
rc = -EFAULT;
}
if (rc)
CAM_WARN_RATE_LIMIT(CAM_CUSTOM,
"Apply default failed in active substate %d rc %d",
custom_ctx->substate_activated, rc);
return rc;
}
/* top state machine */
static struct cam_ctx_ops
cam_custom_dev_ctx_top_state_machine[CAM_CTX_STATE_MAX] = {
@@ -1219,10 +1546,9 @@ static struct cam_ctx_ops
},
.crm_ops = {
.unlink = __cam_custom_ctx_unlink_in_activated,
.apply_req =
__cam_custom_ctx_apply_req_in_activated_state,
.apply_req = __cam_custom_ctx_apply_req,
.apply_default =
__cam_custom_ctx_apply_default_settings,
__cam_custom_ctx_apply_default_req,
.flush_req = __cam_custom_ctx_flush_req_in_top_state,
.process_evt = __cam_custom_ctx_process_evt,
},

查看文件

@@ -19,13 +19,39 @@
* output port resource. The current maximum resource number
* is 2.
*/
#define CAM_CUSTOM_DEV_CTX_RES_MAX 2
#define CAM_CUSTOM_DEV_CTX_RES_MAX 1
#define CAM_CUSTOM_CTX_CFG_MAX 8
/* forward declaration */
struct cam_custom_context;
/* cam custom context irq handling function type */
typedef int (*cam_custom_hw_event_cb_func)(
struct cam_custom_context *custom_ctx, void *evt_data);
/**
* enum cam_custom_ctx_activated_substate - sub states for activated
*
*/
enum cam_custom_ctx_activated_substate {
CAM_CUSTOM_CTX_ACTIVATED_SOF,
CAM_CUSTOM_CTX_ACTIVATED_APPLIED,
CAM_CUSTOM_CTX_ACTIVATED_HW_ERROR,
CAM_CUSTOM_CTX_ACTIVATED_HALT,
CAM_CUSTOM_CTX_ACTIVATED_MAX,
};
/**
* struct cam_custom_ctx_irq_ops - Function table for handling IRQ callbacks
*
* @irq_ops: Array of handle function pointers.
*
*/
struct cam_custom_ctx_irq_ops {
cam_custom_hw_event_cb_func irq_ops[CAM_CUSTOM_HW_EVENT_MAX];
};
/**
* struct cam_custom_dev_ctx_req - Custom context request object
*
@@ -69,6 +95,9 @@ struct cam_custom_dev_ctx_req {
* @active_req_cnt: Counter for the active request
* @frame_id: Frame id tracking for the custom context
* @hw_acquired: Flag to indicate if HW is acquired for this context
* @substate_actiavted: Current substate for the activated state.
* @substate_machine: Custom substate machine for external interface
* @substate_machine_irq: Custom substate machine for irq handling
* @req_base: common request structure
* @req_custom: custom request structure
*
@@ -83,6 +112,9 @@ struct cam_custom_context {
uint32_t active_req_cnt;
int64_t frame_id;
bool hw_acquired;
uint32_t substate_activated;
struct cam_ctx_ops *substate_machine;
struct cam_custom_ctx_irq_ops *substate_machine_irq;
struct cam_ctx_request req_base[CAM_CTX_REQ_MAX];
struct cam_custom_dev_ctx_req req_custom[CAM_CTX_REQ_MAX];
};

查看文件

@@ -283,8 +283,6 @@ irqreturn_t cam_custom_hw_sub_mod_irq(int irq_num, void *data)
core_info->device_hw_info->irq_clear);
spin_lock(&custom_dev->hw_lock);
cb_args.irq_status = irq_status;
cb_args.req_info = core_info->curr_req;
core_info->curr_req = NULL;
if (core_info->irq_cb.custom_hw_mgr_cb)
core_info->irq_cb.custom_hw_mgr_cb(
@@ -315,7 +313,7 @@ int cam_custom_hw_sub_mod_process_cmd(void *hw_priv, uint32_t cmd_type,
switch (cmd_type) {
case CAM_CUSTOM_SET_IRQ_CB: {
struct cam_custom_sub_mod_set_irq_cb *irq_cb = cmd_args;
/* This can be deprecated */
CAM_DBG(CAM_CUSTOM, "Setting irq cb");
spin_lock_irqsave(&hw->hw_lock, flag);
core_info->irq_cb.custom_hw_mgr_cb = irq_cb->custom_hw_mgr_cb;

查看文件

@@ -49,6 +49,8 @@ enum cam_custom_hw_resource_type {
struct cam_custom_sub_mod_acq {
enum cam_custom_hw_resource_type rsrc_type;
int32_t acq;
cam_hw_mgr_event_cb_func event_cb;
void *priv;
struct cam_custom_resource_node *rsrc_node;
};

查看文件

@@ -471,6 +471,88 @@ static int cam_custom_mgr_write(void *hw_mgr_priv, void *write_args)
return -EPERM;
}
static int cam_custom_hw_mgr_notify(
uint32_t evt_id,
struct cam_custom_hw_mgr_ctx *custom_ctx,
struct cam_custom_hw_event_info *evt_info)
{
int rc = 0;
struct cam_custom_hw_reg_update_event_data reg_upd_data;
struct cam_custom_hw_done_event_data done_evt_data;
struct cam_custom_hw_error_event_data err_evt_data;
switch (evt_id) {
case CAM_CUSTOM_HW_EVENT_RUP_DONE:
CAM_DBG(CAM_CUSTOM, "Notify RUP for ctx %u",
custom_ctx->ctx_index);
/* fill the evt data struct */
custom_ctx->event_cb(custom_ctx->cb_priv,
CAM_CUSTOM_HW_EVENT_RUP_DONE, &reg_upd_data);
break;
case CAM_CUSTOM_HW_EVENT_RUP_DONE:
CAM_DBG(CAM_CUSTOM, "Notify FRAME DONE for ctx %u",
custom_ctx->ctx_index);
/* fill the evt data struct */
done_evt_data.num_handles = 1;
done_evt_data.resource_handle = 0;
custom_ctx->event_cb(custom_ctx->cb_priv,
CAM_CUSTOM_HW_EVENT_RUP_DONE, &done_evt_data);
break;
case CAM_CUSTOM_HW_EVENT_RUP_DONE:
CAM_DBG(CAM_CUSTOM, "Notify ERROR for ctx %u",
custom_ctx->ctx_index);
/* fill the evt data struct */
custom_ctx->event_cb(custom_ctx->cb_priv,
CAM_CUSTOM_HW_EVENT_RUP_DONE, &err_evt_data);
break;
default:
CAM_ERR(CAM_CUSTOM, "Invalid evt_id %u", evt_id);
rc = -EINVAL;
break;
}
return rc;
}
static int cam_custom_hw_mgr_evt_handler(
void *priv, uint32_t evt_id, void *hw_evt_info)
{
int rc = 0;
struct cam_custom_hw_mgr_ctx *custom_ctx = NULL;
struct cam_custom_hw_mgr_ctx *custom_ctx_tmp = NULL;
struct cam_custom_hw_event_info *evt_info = NULL;
struct cam_custom_hw_mgr *hw_mgr = &g_custom_hw_mgr;
bool ctx_found = false;
if (!hw_evt_info) {
CAM_ERR(CAM_CUSTOM, "invalid evt info");
return -EINVAL;
}
CAM_DBG(CAM_CUSTOM, "Invoked for HW Event ID 0x%x",
evt_id);
evt_info = (struct cam_custom_hw_event_info *)hw_evt_info;
list_for_each_entry_safe(custom_ctx, custom_ctx_tmp,
&hw_mgr->used_ctx_list, list) {
if (custom_ctx->task_type == evt_info->task_type) {
ctx_found = true;
rc = cam_custom_hw_mgr_notify(evt_id, custom_ctx,
evt_info);
if (rc) {
CAM_ERR(CAM_CUSTOM, "Failed to notify");
return rc;
}
break;
}
}
if (!ctx_found)
CAM_DBG(CAM_CUSTOM, "Failed to find affected ctx");
return rc;
}
static int cam_custom_hw_mgr_put_ctx(
struct list_head *src_list,
struct cam_custom_hw_mgr_ctx **custom_ctx)
@@ -541,7 +623,8 @@ static int cam_custom_hw_mgr_get_res(
static enum cam_ife_pix_path_res_id
cam_custom_hw_mgr_get_csid_res_type(
uint32_t out_port_type)
uint32_t out_port_type,
struct cam_custom_hw_mgr_ctx *custom_ctx)
{
enum cam_ife_pix_path_res_id path_id;
@@ -553,9 +636,11 @@ static enum cam_ife_pix_path_res_id
break;
case CAM_CUSTOM_OUT_RES_UDI_1:
path_id = CAM_IFE_PIX_PATH_RES_UDI_1;
custom_ctx->task_type = CAM_CUSTOM_EVENT_TASK2;
break;
case CAM_CUSTOM_OUT_RES_UDI_2:
path_id = CAM_IFE_PIX_PATH_RES_UDI_2;
custom_ctx->task_type = CAM_CUSTOM_EVENT_TASK3;
break;
default:
path_id = CAM_IFE_PIX_PATH_RES_MAX;
@@ -668,7 +753,7 @@ static int cam_custom_hw_mgr_acquire_csid_res(
for (i = 0; i < in_port_info->num_out_res; i++) {
out_port = &in_port_info->data[i];
path_res_id = cam_custom_hw_mgr_get_csid_res_type(
out_port->res_type);
out_port->res_type, custom_ctx);
if (path_res_id == CAM_IFE_PIX_PATH_RES_MAX) {
CAM_WARN(CAM_CUSTOM, "Invalid out port res_type %u",
@@ -826,7 +911,9 @@ static int cam_custom_mgr_release_hw(void *hw_mgr_priv,
cam_custom_hw_mgr_release_hw_for_ctx(custom_ctx);
list_del_init(&custom_ctx->list);
custom_ctx->scratch_buffer_addr = 0;
custom_ctx->ctx_in_use = 0;
custom_ctx->task_type = CAM_CUSTOM_EVENT_INVALID;
cam_custom_hw_mgr_put_ctx(&g_custom_hw_mgr.free_ctx_list, &custom_ctx);
CAM_DBG(CAM_CUSTOM, "Release Exit..");
return rc;
@@ -935,6 +1022,8 @@ static int cam_custom_mgr_acquire_hw_for_ctx(
if (!hw_intf)
continue;
acq.event_cb = cam_custom_hw_mgr_evt_handler;
acq.priv = custom_ctx;
rc = hw_intf->hw_ops.reserve(hw_intf->hw_priv,
&acq, sizeof(acq));
if (rc) {
@@ -1014,6 +1103,7 @@ static int cam_custom_mgr_acquire_hw(
}
custom_ctx->ctx_in_use = 1;
custom_ctx->scratch_buffer_addr = 0;
acquire_args->ctxt_to_hw_map = custom_ctx;
cam_custom_hw_mgr_put_ctx(&custom_hw_mgr->used_ctx_list, &custom_ctx);
CAM_DBG(CAM_CUSTOM, "Exit...(success)");
@@ -1034,17 +1124,23 @@ static int cam_custom_add_io_buffers(
int iommu_hdl,
struct cam_hw_prepare_update_args *prepare)
{
int rc = 0, i = 0;
int rc = 0, i = 0, num_out_buf = 0;
int32_t hdl;
uint32_t plane_id;
size_t size;
struct cam_buf_io_cfg *io_cfg;
struct cam_hw_fence_map_entry *out_map_entries;
struct cam_custom_prepare_hw_update_data *prepare_hw_data;
bool is_buf_secure;
io_cfg = (struct cam_buf_io_cfg *)((uint8_t *)
&prepare->packet->payload +
prepare->packet->io_configs_offset);
prepare_hw_data =
(struct cam_custom_prepare_hw_update_data *)
prepare->priv;
/* Validate hw update entries */
for (i = 0; i < prepare->packet->num_io_configs; i++) {
CAM_DBG(CAM_CUSTOM, "======= io config idx %d ============", i);
CAM_DBG(CAM_CUSTOM,
@@ -1058,29 +1154,69 @@ static int cam_custom_add_io_buffers(
if (io_cfg[i].direction == CAM_BUF_OUTPUT) {
CAM_DBG(CAM_CUSTOM,
"output fence 0x%x", io_cfg[i].fence);
out_map_entries =
&prepare->out_map_entries[num_out_buf];
if (num_out_buf < prepare->max_out_map_entries) {
out_map_entries->resource_handle =
io_cfg[i].resource_type;
out_map_entries->sync_id =
io_cfg[i].fence;
num_out_buf++;
} else {
CAM_ERR(CAM_CUSTOM, "out: %d max: %d",
num_out_buf,
prepare->max_out_map_entries);
return -EINVAL;
}
} else if (io_cfg[i].direction == CAM_BUF_INPUT) {
CAM_DBG(CAM_CUSTOM,
"input fence 0x%x", io_cfg[i].fence);
return -EINVAL;
} else {
CAM_ERR(CAM_CUSTOM, "Invalid io config direction :%d",
io_cfg[i].direction);
return -EINVAL;
}
if (io_cfg[i].direction == CAM_BUF_OUTPUT) {
for (plane_id = 0; plane_id < CAM_PACKET_MAX_PLANES;
plane_id++) {
/* for custom HW it's one plane only */
if (!io_cfg[i].mem_handle[plane_id])
continue;
hdl = io_cfg[i].mem_handle[plane_id];
CAM_DBG(CAM_CUSTOM, "handle 0x%x for plane %d",
hdl, plane_id);
/* Use cam_mem_get_io_buf() to retrieve iova */
is_buf_secure = cam_mem_is_secure_buf(hdl);
if (is_buf_secure) {
CAM_ERR(CAM_CUSTOM,
"secure buffer not supported");
return -EINVAL;
}
/* Do other I/O config operations */
rc = cam_mem_get_io_buf(
io_cfg[i].mem_handle[plane_id],
iommu_hdl,
&prepare_hw_data->io_addr[plane_id],
&size);
if (rc) {
CAM_ERR(CAM_CUSTOM,
"No io addr for plane: %d",
plane_id);
return -EINVAL;
}
prepare_hw_data->io_addr[plane_id] +=
io_cfg[i].offsets[plane_id];
CAM_DBG(CAM_CUSTOM,
"handle 0x%x for plane %d addr %pK",
hdl, plane_id,
prepare_hw_data->io_addr[plane_id]);
}
}
}
prepare->num_out_map_entries = num_out_buf;
prepare->num_in_map_entries = 0;
return rc;
}
@@ -1116,6 +1252,10 @@ static int cam_custom_mgr_prepare_hw_update(void *hw_mgr_priv,
(prepare->packet->header.op_code & 0xFFF);
ctx = (struct cam_custom_hw_mgr_ctx *) prepare->ctxt_to_hw_map;
prepare->num_hw_update_entries = 0;
prepare->num_in_map_entries = 0;
prepare->num_out_map_entries = 0;
/* Test purposes-check the data in cmd buffer */
cmd_desc = (struct cam_cmd_buf_desc *)
((uint8_t *)&prepare->packet->payload +
@@ -1130,6 +1270,11 @@ static int cam_custom_mgr_prepare_hw_update(void *hw_mgr_priv,
custom_buf_type1->custom_info);
}
/*
* Populate scratch buffer addr here based on INIT
*/
ctx->scratch_buffer_addr = 0x0;
prepare_hw_data->num_cfg = 0;
cam_custom_add_io_buffers(hw_mgr->img_iommu_hdl, prepare);
return 0;
}
@@ -1218,6 +1363,7 @@ static int cam_custom_mgr_config_hw(void *hw_mgr_priv,
struct cam_custom_hw_mgr_res *res;
struct cam_hw_config_args *cfg;
struct cam_hw_intf *hw_intf = NULL;
struct cam_custom_prepare_hw_update_data *prepare_hw_data;
CAM_DBG(CAM_CUSTOM, "Enter");
if (!hw_mgr_priv || !hw_config_args) {
@@ -1234,6 +1380,23 @@ static int cam_custom_mgr_config_hw(void *hw_mgr_priv,
return -EPERM;
}
prepare_hw_data =
(struct cam_custom_prepare_hw_update_data *)
cfg->priv;
for (i = 0; i < prepare_hw_data->num_cfg; i++) {
CAM_DBG(CAM_CUSTOM, "plane %d io_addr %pK",
i, prepare_hw_data->io_addr[i]);
}
prepare_hw_data =
(struct cam_custom_prepare_hw_update_data *)cfg->priv;
for (i = 0; i < prepare_hw_data->num_cfg; i++) {
CAM_DBG(CAM_CUSTOM, "plane %d io_addr %p cfg %u", i,
prepare_hw_data->io_addr[i],
prepare_hw_data->num_cfg);
/* this will be ZSLBuffer addr */
}
for (i = 0; i < CAM_CUSTOM_HW_SUB_MOD_MAX; i++) {
res = &custom_ctx->sub_hw_list[i];
if (res->hw_res) {
@@ -1255,32 +1418,6 @@ static int cam_custom_mgr_config_hw(void *hw_mgr_priv,
return rc;
}
static int cam_custom_hw_mgr_irq_cb(void *data,
struct cam_custom_hw_cb_args *cb_args)
{
struct cam_custom_sub_mod_req_to_dev *proc_req;
struct cam_hw_done_event_data evt_data;
struct cam_custom_hw_mgr_ctx *custom_ctx;
uint32_t ctx_idx;
proc_req = cb_args->req_info;
ctx_idx = proc_req->ctx_idx;
custom_ctx = &g_custom_hw_mgr.ctx_pool[ctx_idx];
if (!custom_ctx->ctx_in_use) {
CAM_ERR(CAM_CUSTOM, "ctx %u not in use", ctx_idx);
return 0;
}
/* Based on irq status notify success/failure */
evt_data.request_id = proc_req->req_id;
custom_ctx->event_cb(custom_ctx->cb_priv,
CAM_CUSTOM_EVENT_BUF_DONE, &evt_data);
return 0;
}
static int cam_custom_mgr_cmd(void *hw_mgr_priv, void *cmd_args)
{
int rc = 0;
@@ -1315,7 +1452,7 @@ static int cam_custom_mgr_cmd(void *hw_mgr_priv, void *cmd_args)
switch (custom_hw_cmd_args->cmd_type) {
case CAM_CUSTOM_HW_MGR_PROG_DEFAULT_CONFIG:
CAM_DBG(CAM_CUSTOM, "configure RUP and scratch buffer");
/* Handle event accordingly */
//use custom_ctx->scratch_buffer_addr
break;
default:
CAM_ERR(CAM_CUSTOM, "Invalid HW mgr command:0x%x",
@@ -1338,8 +1475,6 @@ int cam_custom_hw_mgr_init(struct device_node *of_node,
int rc = 0;
int i, j;
struct cam_custom_hw_mgr_ctx *ctx_pool;
struct cam_custom_sub_mod_set_irq_cb irq_cb_args;
struct cam_hw_intf *hw_intf = NULL;
memset(&g_custom_hw_mgr, 0, sizeof(g_custom_hw_mgr));
mutex_init(&g_custom_hw_mgr.ctx_mutex);
@@ -1349,20 +1484,6 @@ int cam_custom_hw_mgr_init(struct device_node *of_node,
/* Initialize sub modules */
rc = cam_custom_hw_sub_mod_init(
&g_custom_hw_mgr.custom_hw[i], i);
/* handle in case init fails */
if (g_custom_hw_mgr.custom_hw[i]) {
hw_intf = g_custom_hw_mgr.custom_hw[i];
if (hw_intf->hw_ops.process_cmd) {
irq_cb_args.custom_hw_mgr_cb =
cam_custom_hw_mgr_irq_cb;
irq_cb_args.data =
g_custom_hw_mgr.custom_hw[i]->hw_priv;
hw_intf->hw_ops.process_cmd(hw_intf->hw_priv,
CAM_CUSTOM_SET_IRQ_CB, &irq_cb_args,
sizeof(irq_cb_args));
}
}
}
for (i = 0; i < CAM_CUSTOM_CSID_HW_MAX; i++) {

查看文件

@@ -75,6 +75,8 @@ struct ctx_base_info {
* @num_base: number of valid base data in the base array
* @init_done: indicate whether init hw is done
* @event_cb: event_cb to ctx
* @scratch_buffer_addr: scratch buffer address
* @task_type: Custom HW task type
* @cb_priv: data sent back with event_cb
*
*/
@@ -96,6 +98,8 @@ struct cam_custom_hw_mgr_ctx {
uint32_t num_base;
bool init_done;
cam_hw_event_cb_func event_cb;
uint64_t scratch_buffer_addr;
enum cam_custom_hw_task_type task_type;
void *cb_priv;
};

查看文件

@@ -20,6 +20,28 @@ enum cam_custom_hw_resource_state {
CAM_CUSTOM_HW_RESOURCE_STATE_STREAMING = 4,
};
enum cam_custom_hw_task_type {
CAM_CUSTOM_EVENT_INVALID,
CAM_CUSTOM_EVENT_TASK1,
CAM_CUSTOM_EVENT_TASK2,
CAM_CUSTOM_EVENT_TASK3,
CAM_CUSTOM_EVENT_MAX,
};
/*
* struct cam_custom_hw_event_info:
*
* @Brief: Structure to pass event details to hw mgr
*
* @task_type: Type of CUSTOM HW Task
* @err_type: Error type if any
*
*/
struct cam_custom_hw_event_info {
enum cam_custom_hw_task_type task_type;
uint32_t err_type;
};
/*
* struct cam_custom_resource_node:
*

查看文件

@@ -13,6 +13,7 @@
#include "cam_hw.h"
#include "cam_hw_mgr_intf.h"
#include "cam_hw_intf.h"
#include "cam_custom_hw.h"
#define CAM_CUSTOM_HW_TYPE_1 1
@@ -22,8 +23,10 @@
#define CAM_CUSTOM_CSID_HW_MAX 1
enum cam_custom_hw_event_type {
CAM_CUSTOM_EVENT_TYPE_ERROR,
CAM_CUSTOM_EVENT_BUF_DONE,
CAM_CUSTOM_HW_EVENT_ERROR,
CAM_CUSTOM_HW_EVENT_RUP_DONE,
CAM_CUSTOM_HW_EVENT_FRAME_DONE,
CAM_CUSTOM_HW_EVENT_MAX
};
enum cam_custom_cmd_types {
@@ -48,6 +51,52 @@ struct cam_custom_hw_cmd_args {
uint32_t reserved;
};
/**
* struct cam_custom_hw_sof_event_data - Event payload for CAM_HW_EVENT_SOF
*
* @timestamp: Time stamp for the sof event
* @boot_time: Boot time stamp for the sof event
*
*/
struct cam_custom_hw_sof_event_data {
uint64_t timestamp;
uint64_t boot_time;
};
/**
* struct cam_custom_hw_reg_update_event_data - Event payload for
* CAM_HW_EVENT_REG_UPDATE
*
* @timestamp: Time stamp for the reg update event
*
*/
struct cam_custom_hw_reg_update_event_data {
uint64_t timestamp;
};
/**
* struct cam_custom_hw_done_event_data - Event payload for CAM_HW_EVENT_DONE
*
* @num_handles: Number of resource handeles
* @resource_handle: Resource handle array
*
*/
struct cam_custom_hw_done_event_data {
uint32_t num_handles;
uint32_t resource_handle[CAM_NUM_OUT_PER_COMP_IRQ_MAX];
};
/**
* struct cam_custom_hw_error_event_data - Event payload for CAM_HW_EVENT_ERROR
*
* @error_type: Error type for the error event
* @timestamp: Timestamp for the error event
*/
struct cam_custom_hw_error_event_data {
uint32_t error_type;
uint64_t timestamp;
};
/**
* struct cam_custom_stop_args - hardware stop arguments
*
@@ -77,21 +126,24 @@ struct cam_custom_start_args {
* @packet_opcode_type: Packet header opcode in the packet header
* this opcode defines, packet is init packet or
* update packet
* @buffer_addr: IO Buffer address
*
*/
struct cam_custom_prepare_hw_update_data {
uint32_t packet_opcode_type;
uint32_t num_cfg;
uint64_t io_addr[CAM_PACKET_MAX_PLANES];
};
/**
* struct cam_custom_hw_cb_args : HW manager callback args
*
* @irq_status : irq status
* @req_info : Pointer to the request info associated with the cb
* @res_type : resource type
* @err_type : error type
*/
struct cam_custom_hw_cb_args {
uint32_t irq_status;
struct cam_custom_sub_mod_req_to_dev *req_info;
uint32_t res_type;
uint32_t err_type;
};
/**