msm: camera: isp: Add support for handling FCG configurations

This change parses FCG configurations from UMD, handles it
in its corresponding blob handler, then writes exact FCG configs
to the hardware.

During the prepare stage, all FCG update values will be temporarily
stored in req_isp and then utilized by the config function later.
At the end of the blob handler with FCG prepared, a dummy hw update
entry is created for further usage and the size of such entry is based
on the number of reg val pairs and the size of cdm reg random header.

During applying the req in activated state, an algorithm is implemented
to pick exact FCG prediction to be used in SFE/IFE/TFE usecases.

During the config stage, based on the number of skipped frames before,
the exact FCG configurations are passed to SFE/VFE top and then
written into kmd_buf and picked by the CDM. If the number of skipped
frames reaches the maximum that FCG supports or no recorded skipped
frames before, current configuration will be used instead and FCG
entry will be skipped. If the hardware supports multi context like TFE,
wr_sel will be programmed right before the FCG config of each context.

In order to retrieve cpu address of FCG related hw update entry,
a slight change is made in packet parser to pass those addresses
when adding hw update entries.

CRs-Fixed: 3487116
Change-Id: I1db957885933edcbfabc6ce90d72902f4c518118
Signed-off-by: Haochen Yang <quic_haocyang@quicinc.com>
(cherry picked from commit 55213f81a4bd9a847692c00e657e722c1d4ed903)
这个提交包含在:
Haochen Yang
2023-04-28 10:42:27 -07:00
提交者 Sridhar Gujje
父节点 69f7756ed0
当前提交 4722b4c478
修改 15 个文件,包含 1501 行新增64 行删除

查看文件

@@ -1065,6 +1065,8 @@ static int __cam_isp_ctx_enqueue_init_request(
struct cam_isp_prepare_hw_update_data *req_update_old; struct cam_isp_prepare_hw_update_data *req_update_old;
struct cam_isp_prepare_hw_update_data *req_update_new; struct cam_isp_prepare_hw_update_data *req_update_new;
struct cam_isp_prepare_hw_update_data *hw_update_data; struct cam_isp_prepare_hw_update_data *hw_update_data;
struct cam_isp_fcg_config_info *fcg_info_old;
struct cam_isp_fcg_config_info *fcg_info_new;
spin_lock_bh(&ctx->lock); spin_lock_bh(&ctx->lock);
if (list_empty(&ctx->pending_req_list)) { if (list_empty(&ctx->pending_req_list)) {
@@ -1111,11 +1113,11 @@ static int __cam_isp_ctx_enqueue_init_request(
req_isp_old->num_fence_map_in = req_isp_old->num_fence_map_in =
req_isp_new->num_fence_map_in; req_isp_new->num_fence_map_in;
/* Copy hw update entries, num_cfg is updated later */
memcpy(&req_isp_old->cfg[req_isp_old->num_cfg], memcpy(&req_isp_old->cfg[req_isp_old->num_cfg],
req_isp_new->cfg, req_isp_new->cfg,
sizeof(req_isp_new->cfg[0]) * sizeof(req_isp_new->cfg[0]) *
req_isp_new->num_cfg); req_isp_new->num_cfg);
req_isp_old->num_cfg += req_isp_new->num_cfg;
memcpy(&req_old->pf_data, &req->pf_data, memcpy(&req_old->pf_data, &req->pf_data,
sizeof(struct cam_hw_mgr_pf_request_info)); sizeof(struct cam_hw_mgr_pf_request_info));
@@ -1145,6 +1147,32 @@ static int __cam_isp_ctx_enqueue_init_request(
req_isp_old->hw_update_data.num_exp = req_isp_old->hw_update_data.num_exp =
req_isp_new->hw_update_data.num_exp; req_isp_new->hw_update_data.num_exp;
} }
/* Copy FCG HW update params */
fcg_info_new = &hw_update_data->fcg_info;
fcg_info_old = &req_isp_old->hw_update_data.fcg_info;
fcg_info_old->use_current_cfg = true;
if (fcg_info_new->ife_fcg_online) {
fcg_info_old->ife_fcg_online = true;
fcg_info_old->ife_fcg_entry_idx =
req_isp_old->num_cfg +
fcg_info_new->ife_fcg_entry_idx;
memcpy(&fcg_info_old->ife_fcg_config,
&fcg_info_new->ife_fcg_config,
sizeof(struct cam_isp_fcg_config_internal));
}
if (fcg_info_new->sfe_fcg_online) {
fcg_info_old->sfe_fcg_online = true;
fcg_info_old->sfe_fcg_entry_idx =
req_isp_old->num_cfg +
fcg_info_new->sfe_fcg_entry_idx;
memcpy(&fcg_info_old->sfe_fcg_config,
&fcg_info_new->sfe_fcg_config,
sizeof(struct cam_isp_fcg_config_internal));
}
req_isp_old->num_cfg += req_isp_new->num_cfg;
req_old->request_id = req->request_id; req_old->request_id = req->request_id;
list_splice_init(&req->buf_tracker, &req_old->buf_tracker); list_splice_init(&req->buf_tracker, &req_old->buf_tracker);
@@ -4728,17 +4756,62 @@ static inline int cam_isp_context_apply_evt_injection(struct cam_context *ctx)
return rc; return rc;
} }
static inline void __cam_isp_ctx_update_fcg_prediction_idx(
struct cam_context *ctx,
uint32_t request_id,
struct cam_isp_fcg_prediction_tracker *fcg_tracker,
struct cam_isp_fcg_config_info *fcg_info)
{
struct cam_isp_context *ctx_isp = ctx->ctx_priv;
if ((fcg_tracker->sum_skipped == 0) ||
(fcg_tracker->sum_skipped > CAM_ISP_MAX_FCG_PREDICTIONS)) {
fcg_info->use_current_cfg = true;
CAM_DBG(CAM_ISP,
"Apply req: %llu, Use current FCG value, frame_id: %llu, ctx_id: %u",
request_id, ctx_isp->frame_id, ctx->ctx_id);
} else {
fcg_info->prediction_idx = fcg_tracker->sum_skipped;
CAM_DBG(CAM_ISP,
"Apply req: %llu, FCG prediction: %u, frame_id: %llu, ctx_id: %u",
request_id, fcg_tracker->sum_skipped,
ctx_isp->frame_id, ctx->ctx_id);
}
}
static inline void __cam_isp_ctx_print_fcg_tracker(
struct cam_isp_fcg_prediction_tracker *fcg_tracker)
{
uint32_t skipped_list[CAM_ISP_AFD_PIPELINE_DELAY];
struct cam_isp_skip_frame_info *skip_info;
int i = 0;
list_for_each_entry(skip_info,
&fcg_tracker->skipped_list, list) {
skipped_list[i] = skip_info->num_frame_skipped;
i += 1;
}
CAM_DBG(CAM_ISP,
"FCG tracker num_skipped: %u, sum_skipped: %u, skipped list: [%u, %u, %u]",
fcg_tracker->num_skipped, fcg_tracker->sum_skipped,
skipped_list[0], skipped_list[1], skipped_list[2]);
}
static int __cam_isp_ctx_apply_req_in_activated_state( static int __cam_isp_ctx_apply_req_in_activated_state(
struct cam_context *ctx, struct cam_req_mgr_apply_request *apply, struct cam_context *ctx, struct cam_req_mgr_apply_request *apply,
enum cam_isp_ctx_activated_substate next_state) enum cam_isp_ctx_activated_substate next_state)
{ {
int rc = 0; int rc = 0;
struct cam_ctx_request *req; struct cam_ctx_request *req;
struct cam_ctx_request *active_req = NULL; struct cam_ctx_request *active_req = NULL;
struct cam_isp_ctx_req *req_isp; struct cam_isp_ctx_req *req_isp;
struct cam_isp_ctx_req *active_req_isp; struct cam_isp_ctx_req *active_req_isp;
struct cam_isp_context *ctx_isp = NULL; struct cam_isp_context *ctx_isp = NULL;
struct cam_hw_config_args cfg = {0}; struct cam_hw_config_args cfg = {0};
struct cam_isp_skip_frame_info *skip_info;
struct cam_isp_fcg_prediction_tracker *fcg_tracker;
struct cam_isp_fcg_config_info *fcg_info;
ctx_isp = (struct cam_isp_context *) ctx->ctx_priv; ctx_isp = (struct cam_isp_context *) ctx->ctx_priv;
@@ -4875,6 +4948,28 @@ static int __cam_isp_ctx_apply_req_in_activated_state(
goto end; goto end;
} }
/* Decide the exact FCG prediction */
fcg_tracker = &ctx_isp->fcg_tracker;
fcg_info = &req_isp->hw_update_data.fcg_info;
if (!list_empty(&fcg_tracker->skipped_list)) {
__cam_isp_ctx_print_fcg_tracker(fcg_tracker);
skip_info = list_first_entry(&fcg_tracker->skipped_list,
struct cam_isp_skip_frame_info, list);
fcg_tracker->sum_skipped -= skip_info->num_frame_skipped;
if (unlikely((uint32_t)UINT_MAX - fcg_tracker->sum_skipped <
fcg_tracker->num_skipped))
fcg_tracker->num_skipped =
(uint32_t)UINT_MAX - fcg_tracker->sum_skipped;
fcg_tracker->sum_skipped += fcg_tracker->num_skipped;
skip_info->num_frame_skipped = fcg_tracker->num_skipped;
fcg_tracker->num_skipped = 0;
list_rotate_left(&fcg_tracker->skipped_list);
__cam_isp_ctx_print_fcg_tracker(fcg_tracker);
__cam_isp_ctx_update_fcg_prediction_idx(ctx,
apply->request_id, fcg_tracker, fcg_info);
}
atomic_set(&ctx_isp->apply_in_progress, 1); atomic_set(&ctx_isp->apply_in_progress, 1);
rc = ctx->hw_mgr_intf->hw_config(ctx->hw_mgr_intf->hw_mgr_priv, &cfg); rc = ctx->hw_mgr_intf->hw_config(ctx->hw_mgr_intf->hw_mgr_priv, &cfg);
@@ -5556,6 +5651,23 @@ static int __cam_isp_ctx_flush_req(struct cam_context *ctx,
return 0; return 0;
} }
static inline void __cam_isp_ctx_reset_fcg_tracker(
struct cam_context *ctx)
{
struct cam_isp_context *ctx_isp;
struct cam_isp_skip_frame_info *skip_info;
ctx_isp = (struct cam_isp_context *) ctx->ctx_priv;
/* Reset skipped_list for FCG config */
ctx_isp->fcg_tracker.sum_skipped = 0;
ctx_isp->fcg_tracker.num_skipped = 0;
list_for_each_entry(skip_info, &ctx_isp->fcg_tracker.skipped_list, list)
skip_info->num_frame_skipped = 0;
CAM_DBG(CAM_ISP, "Reset FCG skip info on ctx %u link: %x",
ctx->ctx_id, ctx->link_hdl);
}
static int __cam_isp_ctx_flush_req_in_top_state( static int __cam_isp_ctx_flush_req_in_top_state(
struct cam_context *ctx, struct cam_context *ctx,
struct cam_req_mgr_flush_request *flush_req) struct cam_req_mgr_flush_request *flush_req)
@@ -5574,6 +5686,9 @@ static int __cam_isp_ctx_flush_req_in_top_state(
__cam_isp_ctx_flush_req(ctx, &ctx->pending_req_list, flush_req); __cam_isp_ctx_flush_req(ctx, &ctx->pending_req_list, flush_req);
spin_unlock_bh(&ctx->lock); spin_unlock_bh(&ctx->lock);
/* Reset skipped_list for FCG config */
__cam_isp_ctx_reset_fcg_tracker(ctx);
if (flush_req->type == CAM_REQ_MGR_FLUSH_TYPE_ALL) { if (flush_req->type == CAM_REQ_MGR_FLUSH_TYPE_ALL) {
if (ctx->state <= CAM_CTX_READY) { if (ctx->state <= CAM_CTX_READY) {
ctx->state = CAM_CTX_ACQUIRED; ctx->state = CAM_CTX_ACQUIRED;
@@ -7975,6 +8090,9 @@ static int __cam_isp_ctx_stop_dev_in_activated_unlock(
atomic64_set(&ctx_isp->dbg_monitors.state_monitor_head, -1); atomic64_set(&ctx_isp->dbg_monitors.state_monitor_head, -1);
atomic64_set(&ctx_isp->dbg_monitors.frame_monitor_head, -1); atomic64_set(&ctx_isp->dbg_monitors.frame_monitor_head, -1);
/* Reset skipped_list for FCG config */
__cam_isp_ctx_reset_fcg_tracker(ctx);
for (i = 0; i < CAM_ISP_CTX_EVENT_MAX; i++) for (i = 0; i < CAM_ISP_CTX_EVENT_MAX; i++)
atomic64_set(&ctx_isp->dbg_monitors.event_record_head[i], -1); atomic64_set(&ctx_isp->dbg_monitors.event_record_head[i], -1);
@@ -8393,6 +8511,7 @@ static int __cam_isp_ctx_apply_default_settings(
struct cam_ctx_ops *ctx_ops = NULL; struct cam_ctx_ops *ctx_ops = NULL;
struct cam_isp_context *ctx_isp = struct cam_isp_context *ctx_isp =
(struct cam_isp_context *) ctx->ctx_priv; (struct cam_isp_context *) ctx->ctx_priv;
struct cam_isp_fcg_prediction_tracker *fcg_tracker;
if (!(apply->trigger_point & ctx_isp->subscribe_event)) { if (!(apply->trigger_point & ctx_isp->subscribe_event)) {
CAM_WARN(CAM_ISP, CAM_WARN(CAM_ISP,
@@ -8409,6 +8528,14 @@ static int __cam_isp_ctx_apply_default_settings(
if (atomic_read(&ctx_isp->internal_recovery_set)) if (atomic_read(&ctx_isp->internal_recovery_set))
return __cam_isp_ctx_reset_and_recover(false, ctx); return __cam_isp_ctx_reset_and_recover(false, ctx);
/* FCG handling */
fcg_tracker = &ctx_isp->fcg_tracker;
if (ctx_isp->frame_id != 1)
fcg_tracker->num_skipped += 1;
CAM_DBG(CAM_ISP,
"Apply default settings, number of previous continuous skipped frames: %d, ctx_id: %d",
fcg_tracker->num_skipped, ctx->ctx_id);
/* /*
* Call notify frame skip for static offline cases or * Call notify frame skip for static offline cases or
* mode switch cases where IFE mode switch delay differs * mode switch cases where IFE mode switch delay differs
@@ -8916,6 +9043,7 @@ int cam_isp_context_init(struct cam_isp_context *ctx,
{ {
int rc = -1; int rc = -1;
int i; int i;
struct cam_isp_skip_frame_info *skip_info, *temp;
if (!ctx || !ctx_base) { if (!ctx || !ctx_base) {
CAM_ERR(CAM_ISP, "Invalid Context"); CAM_ERR(CAM_ISP, "Invalid Context");
@@ -8958,6 +9086,21 @@ int cam_isp_context_init(struct cam_isp_context *ctx,
goto err; goto err;
} }
/* FCG related struct setup */
INIT_LIST_HEAD(&ctx->fcg_tracker.skipped_list);
for (i = 0; i < CAM_ISP_AFD_PIPELINE_DELAY; i++) {
skip_info = kzalloc(sizeof(struct cam_isp_skip_frame_info), GFP_KERNEL);
if (!skip_info) {
CAM_ERR(CAM_ISP,
"Failed to allocate memory for FCG struct, ctx_idx: %u, link: %x",
ctx_base->ctx_id, ctx_base->link_hdl);
rc = -ENOMEM;
goto kfree;
}
list_add_tail(&skip_info->list, &ctx->fcg_tracker.skipped_list);
}
/* link camera context with isp context */ /* link camera context with isp context */
ctx_base->state_machine = cam_isp_ctx_top_state_machine; ctx_base->state_machine = cam_isp_ctx_top_state_machine;
ctx_base->ctx_priv = ctx; ctx_base->ctx_priv = ctx;
@@ -8977,12 +9120,30 @@ int cam_isp_context_init(struct cam_isp_context *ctx,
if (!isp_ctx_debug.dentry) if (!isp_ctx_debug.dentry)
cam_isp_context_debug_register(); cam_isp_context_debug_register();
return rc;
kfree:
list_for_each_entry_safe(skip_info, temp,
&ctx->fcg_tracker.skipped_list, list) {
list_del(&skip_info->list);
kfree(skip_info);
skip_info = NULL;
}
err: err:
return rc; return rc;
} }
int cam_isp_context_deinit(struct cam_isp_context *ctx) int cam_isp_context_deinit(struct cam_isp_context *ctx)
{ {
struct cam_isp_skip_frame_info *skip_info, *temp;
list_for_each_entry_safe(skip_info, temp,
&ctx->fcg_tracker.skipped_list, list) {
list_del(&skip_info->list);
kfree(skip_info);
skip_info = NULL;
}
if (ctx->base) if (ctx->base)
cam_context_deinit(ctx->base); cam_context_deinit(ctx->base);

查看文件

@@ -62,13 +62,13 @@
/* Debug Buffer length*/ /* Debug Buffer length*/
#define CAM_ISP_CONTEXT_DBG_BUF_LEN 300 #define CAM_ISP_CONTEXT_DBG_BUF_LEN 300
/* AFD pipeline delay for FCG configuration */
#define CAM_ISP_AFD_PIPELINE_DELAY 3
/* Maximum entries in frame record */ /* Maximum entries in frame record */
#define CAM_ISP_CTX_MAX_FRAME_RECORDS 5 #define CAM_ISP_CTX_MAX_FRAME_RECORDS 5
/* Congestion count threshold */
/*
* Congestion count threshold
*/
#define CAM_ISP_CONTEXT_CONGESTION_CNT_MAX 3 #define CAM_ISP_CONTEXT_CONGESTION_CNT_MAX 3
/* forward declaration */ /* forward declaration */
@@ -302,6 +302,34 @@ struct cam_isp_context_debug_monitors {
CAM_ISP_CTX_MAX_FRAME_RECORDS]; CAM_ISP_CTX_MAX_FRAME_RECORDS];
}; };
/**
* struct cam_isp_skip_frame_info - FIFO Queue for number of skipped frames for
* the decision of FCG prediction
* @num_frame_skipped: Keep track of the number of skipped frames in between
* of the normal frames
* @list: List member used to append this node to a linked list
*/
struct cam_isp_skip_frame_info {
uint32_t num_frame_skipped;
struct list_head list;
};
/**
* struct cam_isp_fcg_prediction_tracker - Track the number of skipped frames before and
* indicate which FCG prediction should be applied
*
* @num_skipped: Number of skipped frames from previous normally applied frame
* to this normally applied frame
* @sum_skipped: Sum of the number of frames from req generation to req apply
* @skipped_list: Keep track of the number of skipped frames in between from two
* normal frames
*/
struct cam_isp_fcg_prediction_tracker {
uint32_t num_skipped;
uint32_t sum_skipped;
struct list_head skipped_list;
};
/** /**
* struct cam_isp_context - ISP context object * struct cam_isp_context - ISP context object
* *
@@ -373,6 +401,8 @@ struct cam_isp_context_debug_monitors {
* by other devices on the link as part of link setup * by other devices on the link as part of link setup
* @mode_switch_en: Indicates if mode switch is enabled * @mode_switch_en: Indicates if mode switch is enabled
* @hw_idx: Hardware ID * @hw_idx: Hardware ID
* @fcg_tracker: FCG prediction tracker containing number of previously skipped
* frames and indicates which prediction should be used
* *
*/ */
struct cam_isp_context { struct cam_isp_context {
@@ -436,6 +466,7 @@ struct cam_isp_context {
bool handle_mswitch; bool handle_mswitch;
bool mode_switch_en; bool mode_switch_en;
uint32_t hw_idx; uint32_t hw_idx;
struct cam_isp_fcg_prediction_tracker fcg_tracker;
}; };
/** /**

查看文件

@@ -47,7 +47,6 @@
#define MAX_PARAMS_FOR_IRQ_INJECT 5 #define MAX_PARAMS_FOR_IRQ_INJECT 5
#define IRQ_INJECT_DISPLAY_BUF_LEN 4096 #define IRQ_INJECT_DISPLAY_BUF_LEN 4096
typedef int (*cam_isp_irq_inject_cmd_parse_handler)( typedef int (*cam_isp_irq_inject_cmd_parse_handler)(
struct cam_isp_irq_inject_param *irq_inject_param, struct cam_isp_irq_inject_param *irq_inject_param,
uint32_t param_index, char *token, bool *is_query); uint32_t param_index, char *token, bool *is_query);
@@ -6976,9 +6975,124 @@ static int cam_ife_hw_mgr_irq_injection(struct cam_ife_hw_mgr *hw_mgr,
return rc; return rc;
} }
static int cam_isp_blob_fcg_update(
struct cam_isp_fcg_config_internal *fcg_config_internal,
uint32_t entry_idx,
uint32_t prediction_idx,
struct list_head *res_list_isp_src,
struct cam_hw_config_args *cfg)
{
struct cam_isp_resource_node *res;
struct cam_isp_hw_mgr_res *hw_mgr_res;
struct cam_isp_hw_fcg_cmd fcg_cmd;
struct cam_hw_update_entry *hw_entry;
uint32_t i;
int rc = -EINVAL;
list_for_each_entry(hw_mgr_res, res_list_isp_src, list) {
if (hw_mgr_res->res_type == CAM_ISP_RESOURCE_UNINT)
continue;
for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
if (!hw_mgr_res->hw_res[i])
continue;
if (entry_idx >= cfg->num_hw_update_entries) {
CAM_ERR(CAM_ISP,
"Entry index %d exceed number of hw update entries %u, request id %llu",
entry_idx, cfg->num_hw_update_entries, cfg->request_id);
return -EINVAL;
}
hw_entry = &cfg->hw_update_entries[entry_idx];
res = hw_mgr_res->hw_res[i];
fcg_cmd.res = res;
fcg_cmd.cmd_type = CAM_ISP_HW_CMD_FCG_CONFIG;
fcg_cmd.get_size_flag = false;
fcg_cmd.u.fcg_update.cmd_size = hw_entry->len;
fcg_cmd.u.fcg_update.cmd_buf_addr = hw_entry->addr;
fcg_cmd.u.fcg_update.data = (void *)fcg_config_internal;
/* This prediction sent from userspace is insufficient */
if (prediction_idx > fcg_config_internal->num_predictions)
prediction_idx = fcg_config_internal->num_predictions;
fcg_cmd.u.fcg_update.prediction_idx = prediction_idx;
CAM_DBG(CAM_ISP,
"Replace FCG config with predicted ones, prediction idx: %d, request id: %llu",
prediction_idx, cfg->request_id);
rc = res->hw_intf->hw_ops.process_cmd(
res->hw_intf->hw_priv,
CAM_ISP_HW_CMD_FCG_CONFIG,
&fcg_cmd, sizeof(struct cam_isp_hw_fcg_cmd));
if (rc) {
CAM_ERR(CAM_ISP,
"Failed in writing FCG values to the hw update entry, rc: %d, request id: %llu",
rc, cfg->request_id);
return rc;
}
return 0;
}
}
CAM_DBG(CAM_ISP,
"No matching ISP resources when filling FCG hw update entry, request id: %llu",
cfg->request_id);
return rc;
}
static inline int cam_ife_mgr_apply_fcg_update(
struct cam_ife_hw_mgr_ctx *ctx,
struct cam_isp_prepare_hw_update_data *hw_update_data,
struct cam_hw_config_args *cfg)
{
int rc = 0;
struct cam_isp_fcg_config_internal *fcg_configs;
if (hw_update_data->fcg_info.ife_fcg_online &&
!hw_update_data->fcg_info.use_current_cfg) {
CAM_DBG(CAM_ISP, "Start writing IFE/MC_TFE FCG configs to kmd buffer on ctx: %d",
ctx->ctx_index);
fcg_configs = &hw_update_data->fcg_info.ife_fcg_config;
rc = cam_isp_blob_fcg_update(fcg_configs,
hw_update_data->fcg_info.ife_fcg_entry_idx,
hw_update_data->fcg_info.prediction_idx,
&ctx->res_list_ife_src, cfg);
if (rc) {
CAM_ERR(CAM_ISP,
"Failed in applying IFE/MC_TFE FCG configurations, ctx_idx: %u",
ctx->ctx_index);
return rc;
}
}
if (hw_update_data->fcg_info.sfe_fcg_online &&
!hw_update_data->fcg_info.use_current_cfg) {
CAM_DBG(CAM_ISP, "Start writing SFE FCG configs to kmd buffer on ctx: %d",
ctx->ctx_index);
fcg_configs = &hw_update_data->fcg_info.sfe_fcg_config;
rc = cam_isp_blob_fcg_update(fcg_configs,
hw_update_data->fcg_info.sfe_fcg_entry_idx,
hw_update_data->fcg_info.prediction_idx,
&ctx->res_list_sfe_src, cfg);
if (rc) {
CAM_ERR(CAM_ISP,
"Failed in applying SFE FCG configurations, ctx_idx: %u",
ctx->ctx_index);
return rc;
}
}
return rc;
}
/* entry function: config_hw */ /* entry function: config_hw */
static int cam_ife_mgr_config_hw(void *hw_mgr_priv, static int cam_ife_mgr_config_hw(
void *config_hw_args) void *hw_mgr_priv,
void *config_hw_args)
{ {
int rc, i, skip = 0; int rc, i, skip = 0;
struct cam_hw_config_args *cfg; struct cam_hw_config_args *cfg;
@@ -7155,6 +7269,11 @@ static int cam_ife_mgr_config_hw(void *hw_mgr_priv,
} }
skip_bw_clk_update: skip_bw_clk_update:
rc = cam_ife_mgr_apply_fcg_update(ctx, hw_update_data, cfg);
if (rc) {
CAM_ERR(CAM_ISP, "Failed in updating FCG values", ctx->ctx_index);
return rc;
}
CAM_DBG(CAM_ISP, CAM_DBG(CAM_ISP,
"Enter ctx id:%u num_hw_upd_entries %d request id: %llu", "Enter ctx id:%u num_hw_upd_entries %d request id: %llu",
@@ -7189,6 +7308,30 @@ skip_bw_clk_update:
CAM_ERR(CAM_ISP, "Unexpected BL type %d, ctx_idx=%u", CAM_ERR(CAM_ISP, "Unexpected BL type %d, ctx_idx=%u",
cmd->flags, ctx->ctx_index); cmd->flags, ctx->ctx_index);
if (hw_update_data->fcg_info.ife_fcg_online &&
(hw_update_data->fcg_info.ife_fcg_entry_idx == i)) {
CAM_DBG(CAM_ISP,
"IFE/MC_TFE FCG hw entry is detected, num_ent: %d, ctx_idx: %u, request id: %llu, use current cfg: %d",
i, ctx->ctx_index, cfg->request_id,
hw_update_data->fcg_info.use_current_cfg);
if (hw_update_data->fcg_info.use_current_cfg) {
skip++;
continue;
}
}
if (hw_update_data->fcg_info.sfe_fcg_online &&
(hw_update_data->fcg_info.sfe_fcg_entry_idx == i)) {
CAM_DBG(CAM_ISP,
"SFE FCG hw entry is detected, num_ent: %d, ctx_idx: %u, request id: %llu, use current cfg: %d",
i, ctx->ctx_index, cfg->request_id,
hw_update_data->fcg_info.use_current_cfg);
if (hw_update_data->fcg_info.use_current_cfg) {
skip++;
continue;
}
}
cdm_cmd->cmd[i - skip].bl_addr.mem_handle = cmd->handle; cdm_cmd->cmd[i - skip].bl_addr.mem_handle = cmd->handle;
cdm_cmd->cmd[i - skip].offset = cmd->offset; cdm_cmd->cmd[i - skip].offset = cmd->offset;
cdm_cmd->cmd[i - skip].len = cmd->len; cdm_cmd->cmd[i - skip].len = cmd->len;
@@ -10646,6 +10789,182 @@ static int cam_isp_validate_scratch_buffer_blob(
return 0; return 0;
} }
static void cam_isp_copy_fcg_config(
struct cam_isp_fcg_config_internal *fcg_args_internal,
struct cam_isp_generic_fcg_config *fcg_args)
{
struct cam_isp_ch_ctx_fcg_config_internal *fcg_ch_ctx_internal;
struct cam_isp_ch_ctx_fcg_config *fcg_ch_ctx;
struct cam_isp_predict_fcg_config_internal *fcg_predict_internal;
struct cam_isp_predict_fcg_config *fcg_predict;
uint32_t fcg_ch_ctx_size, num_types = 0;
int i, j;
/* Copy generic FCG config */
fcg_args_internal->num_ch_ctx = fcg_args->num_ch_ctx;
fcg_args_internal->num_predictions = fcg_args->num_predictions;
fcg_ch_ctx_size = sizeof(struct cam_isp_ch_ctx_fcg_config) +
(fcg_args->num_predictions - 1) *
sizeof(struct cam_isp_predict_fcg_config);
/* Copy channel/context FCG config */
for (i = 0; i < fcg_args->num_ch_ctx; i++) {
fcg_ch_ctx_internal = &fcg_args_internal->ch_ctx_fcg_configs[i];
fcg_ch_ctx = (struct cam_isp_ch_ctx_fcg_config *)
((void *)(fcg_args->ch_ctx_fcg_configs) +
i * fcg_ch_ctx_size);
fcg_ch_ctx_internal->fcg_ch_ctx_id =
fcg_ch_ctx->fcg_ch_ctx_id;
fcg_ch_ctx_internal->fcg_enable_mask =
fcg_ch_ctx->fcg_enable_mask;
if (fcg_ch_ctx->fcg_enable_mask & CAM_ISP_FCG_ENABLE_PHASE) {
for (j = 0; j < fcg_args->num_predictions; j++) {
fcg_predict_internal =
&fcg_ch_ctx_internal->predicted_fcg_configs[j];
fcg_predict = &fcg_ch_ctx->predicted_fcg_configs[j];
/* Copy 3 PHASE related values for R/G/B channel */
fcg_predict_internal->phase_index_b =
fcg_predict->phase_index_b;
fcg_predict_internal->phase_index_r =
fcg_predict->phase_index_r;
fcg_predict_internal->phase_index_g =
fcg_predict->phase_index_g;
CAM_DBG(CAM_ISP,
"Copy FCG PHASE config on ch 0x%x, prediction idx %d, phase_index_g: %u, phase_index_r: %u, phase_index_b: %u",
fcg_ch_ctx_internal->fcg_ch_ctx_id, j,
fcg_predict_internal->phase_index_g,
fcg_predict_internal->phase_index_r,
fcg_predict_internal->phase_index_b);
}
num_types += 1;
}
if (fcg_ch_ctx->fcg_enable_mask & CAM_ISP_FCG_ENABLE_STATS) {
for (j = 0; j < fcg_args->num_predictions; j++) {
fcg_predict_internal =
&fcg_ch_ctx_internal->predicted_fcg_configs[j];
fcg_predict = &fcg_ch_ctx->predicted_fcg_configs[j];
/* Copy 3 STATS related values for R/G/B channel */
fcg_predict_internal->stats_index_b =
fcg_predict->stats_index_b;
fcg_predict_internal->stats_index_r =
fcg_predict->stats_index_r;
fcg_predict_internal->stats_index_g =
fcg_predict->stats_index_g;
CAM_DBG(CAM_ISP,
"Copy FCG STATS config on ch 0x%x, prediction idx %d, stats_index_g: %u, stats_index_r: %u, stats_index_b: %u",
fcg_ch_ctx_internal->fcg_ch_ctx_id, j,
fcg_predict_internal->stats_index_g,
fcg_predict_internal->stats_index_r,
fcg_predict_internal->stats_index_b);
}
num_types += 1;
}
}
fcg_args_internal->num_types = num_types;
CAM_DBG(CAM_ISP,
"Inspect on copied FCG config, num_types: %u, num_ch_ctx: %u, num_predictions: %u",
num_types, fcg_args_internal->num_ch_ctx,
fcg_args_internal->num_predictions);
}
static int cam_isp_blob_fcg_config_prepare(
struct cam_isp_generic_fcg_config *fcg_config_args,
struct cam_hw_prepare_update_args *prepare,
enum cam_isp_hw_type hw_type)
{
struct cam_ife_hw_mgr_ctx *ctx = NULL;
struct cam_isp_prepare_hw_update_data *prepare_hw_data;
struct cam_isp_fcg_config_info *fcg_info;
uint32_t fcg_size;
uint64_t request_id;
ctx = prepare->ctxt_to_hw_map;
request_id = prepare->packet->header.request_id;
prepare_hw_data = (struct cam_isp_prepare_hw_update_data *) prepare->priv;
fcg_info = &(prepare_hw_data->fcg_info);
if ((hw_type == CAM_ISP_HW_TYPE_SFE) &&
fcg_info->sfe_fcg_online) {
CAM_ERR(CAM_ISP,
"SFE FCG config is sent more than once, ctx_id: %u, request_id: %llu",
ctx->ctx_index, request_id);
return -EINVAL;
}
if ((hw_type == CAM_ISP_HW_TYPE_VFE) &&
fcg_info->ife_fcg_online) {
CAM_ERR(CAM_ISP,
"IFE/MC_TFE FCG config is sent more than once, ctx_id: %u, request_id: %llu",
ctx->ctx_index, request_id);
return -EINVAL;
}
CAM_DBG(CAM_ISP,
"Start storing FCG config in req_isp on ctx_idx: %u, hw_type: %d, request_id: %llu",
ctx->ctx_index, hw_type, request_id);
fcg_size = sizeof(struct cam_isp_generic_fcg_config);
fcg_size += (fcg_config_args->num_ch_ctx - 1) *
sizeof(struct cam_isp_ch_ctx_fcg_config);
fcg_size += fcg_config_args->num_ch_ctx *
(fcg_config_args->num_predictions - 1) *
sizeof(struct cam_isp_predict_fcg_config);
if (fcg_size != fcg_config_args->size) {
CAM_ERR(CAM_ISP,
"Mismatched size between userspace provides and real comsumption %u - %u, ctx_idx: %u, request_id: %llu",
fcg_config_args->size, fcg_size,
ctx->ctx_index, request_id);
return -EINVAL;
}
switch (hw_type) {
case CAM_ISP_HW_TYPE_SFE:
fcg_info->sfe_fcg_online = true;
cam_isp_copy_fcg_config(&fcg_info->sfe_fcg_config,
fcg_config_args);
break;
case CAM_ISP_HW_TYPE_VFE:
fcg_info->ife_fcg_online = true;
cam_isp_copy_fcg_config(&fcg_info->ife_fcg_config,
fcg_config_args);
break;
default:
CAM_ERR(CAM_ISP,
"Failed in parsing FCG configuration for hw_type: %u, ctx_idx: %u, request_id: %llu",
hw_type, ctx->ctx_index, request_id);
return -EINVAL;
}
return 0;
}
static int cam_isp_validate_fcg_configs(
struct cam_isp_generic_fcg_config *fcg_config_args,
uint32_t max_fcg_ch_ctx,
uint32_t max_fcg_predictions,
struct cam_ife_hw_mgr_ctx *ife_mgr_ctx)
{
if ((fcg_config_args->num_ch_ctx > max_fcg_ch_ctx) ||
(fcg_config_args->num_ch_ctx == 0)) {
CAM_ERR(CAM_ISP, "Invalid num of channels/contexts %u in FCG config, ctx_idx: %u",
fcg_config_args->num_ch_ctx, ife_mgr_ctx->ctx_index);
return -EINVAL;
}
if ((fcg_config_args->num_predictions > max_fcg_predictions) ||
(fcg_config_args->num_predictions == 0)) {
CAM_ERR(CAM_ISP, "Invalid num of predictions %u in FCG config, ctx_idx: %u",
fcg_config_args->num_predictions, ife_mgr_ctx->ctx_index);
return -EINVAL;
}
return 0;
}
static int cam_isp_packet_generic_blob_handler(void *user_data, static int cam_isp_packet_generic_blob_handler(void *user_data,
uint32_t blob_type, uint32_t blob_size, uint8_t *blob_data) uint32_t blob_type, uint32_t blob_size, uint8_t *blob_data)
{ {
@@ -11273,12 +11592,46 @@ static int cam_isp_packet_generic_blob_handler(void *user_data,
rc, ife_mgr_ctx->ctx_index); rc, ife_mgr_ctx->ctx_index);
} }
break; break;
case CAM_ISP_GENERIC_BLOB_TYPE_IFE_FCG_CFG: {
struct cam_isp_generic_fcg_config *fcg_config_args;
if (blob_size <
sizeof(struct cam_isp_generic_fcg_config)) {
CAM_ERR(CAM_ISP, "Invalid blob size %u, fcg config size: %u, ctx_idx: %u",
blob_size,
sizeof(struct cam_isp_generic_fcg_config),
ife_mgr_ctx->ctx_index);
return -EINVAL;
}
fcg_config_args =
(struct cam_isp_generic_fcg_config *)blob_data;
rc = cam_isp_validate_fcg_configs(fcg_config_args,
CAM_ISP_IFE_MAX_FCG_CH_CTXS,
CAM_ISP_IFE_MAX_FCG_PREDICTIONS,
ife_mgr_ctx);
if (rc) {
CAM_ERR(CAM_ISP, "Failed in validating FCG configs, ctx_idx: %u",
ife_mgr_ctx->ctx_index);
return rc;
}
rc = cam_isp_blob_fcg_config_prepare(fcg_config_args,
prepare, CAM_ISP_HW_TYPE_VFE);
if (rc)
CAM_ERR(CAM_ISP,
"FCG configuration preparation failed, rc: %d, ctx_idx: %d",
rc, ife_mgr_ctx->ctx_index);
}
break;
case CAM_ISP_GENERIC_BLOB_TYPE_SFE_CLOCK_CONFIG: case CAM_ISP_GENERIC_BLOB_TYPE_SFE_CLOCK_CONFIG:
case CAM_ISP_GENERIC_BLOB_TYPE_SFE_CORE_CONFIG: case CAM_ISP_GENERIC_BLOB_TYPE_SFE_CORE_CONFIG:
case CAM_ISP_GENERIC_BLOB_TYPE_SFE_OUT_CONFIG: case CAM_ISP_GENERIC_BLOB_TYPE_SFE_OUT_CONFIG:
case CAM_ISP_GENERIC_BLOB_TYPE_SFE_HFR_CONFIG: case CAM_ISP_GENERIC_BLOB_TYPE_SFE_HFR_CONFIG:
case CAM_ISP_GENERIC_BLOB_TYPE_SFE_FE_CONFIG: case CAM_ISP_GENERIC_BLOB_TYPE_SFE_FE_CONFIG:
case CAM_ISP_GENERIC_BLOB_TYPE_SFE_EXP_ORDER_CFG: case CAM_ISP_GENERIC_BLOB_TYPE_SFE_EXP_ORDER_CFG:
case CAM_ISP_GENERIC_BLOB_TYPE_SFE_FCG_CFG:
case CAM_ISP_GENERIC_BLOB_TYPE_FPS_CONFIG: case CAM_ISP_GENERIC_BLOB_TYPE_FPS_CONFIG:
break; break;
case CAM_ISP_GENERIC_BLOB_TYPE_IRQ_COMP_CFG: case CAM_ISP_GENERIC_BLOB_TYPE_IRQ_COMP_CFG:
@@ -11574,6 +11927,8 @@ static int cam_csid_packet_generic_blob_handler(void *user_data,
case CAM_ISP_GENERIC_BLOB_TYPE_INIT_CONFIG: case CAM_ISP_GENERIC_BLOB_TYPE_INIT_CONFIG:
case CAM_ISP_GENERIC_BLOB_TYPE_RDI_LCR_CONFIG: case CAM_ISP_GENERIC_BLOB_TYPE_RDI_LCR_CONFIG:
case CAM_ISP_GENERIC_BLOB_TYPE_DRV_CONFIG: case CAM_ISP_GENERIC_BLOB_TYPE_DRV_CONFIG:
case CAM_ISP_GENERIC_BLOB_TYPE_SFE_FCG_CFG:
case CAM_ISP_GENERIC_BLOB_TYPE_IFE_FCG_CFG:
break; break;
case CAM_ISP_GENERIC_BLOB_TYPE_IRQ_COMP_CFG: { case CAM_ISP_GENERIC_BLOB_TYPE_IRQ_COMP_CFG: {
struct cam_isp_irq_comp_cfg *irq_comp_cfg; struct cam_isp_irq_comp_cfg *irq_comp_cfg;
@@ -11959,6 +12314,40 @@ static int cam_sfe_packet_generic_blob_handler(void *user_data,
rc, ife_mgr_ctx->ctx_index); rc, ife_mgr_ctx->ctx_index);
} }
break; break;
case CAM_ISP_GENERIC_BLOB_TYPE_SFE_FCG_CFG: {
struct cam_isp_generic_fcg_config *fcg_config_args;
if (blob_size <
sizeof(struct cam_isp_generic_fcg_config)) {
CAM_ERR(CAM_ISP, "Invalid blob size %u, fcg config size: %u, ctx_idx: %u",
blob_size,
sizeof(struct cam_isp_generic_fcg_config),
ife_mgr_ctx->ctx_index);
return -EINVAL;
}
fcg_config_args =
(struct cam_isp_generic_fcg_config *)blob_data;
rc = cam_isp_validate_fcg_configs(fcg_config_args,
CAM_ISP_SFE_MAX_FCG_CHANNELS,
CAM_ISP_SFE_MAX_FCG_PREDICTIONS,
ife_mgr_ctx);
if (rc) {
CAM_ERR(CAM_ISP, "Failed in validating FCG configs, ctx_idx: %u",
ife_mgr_ctx->ctx_index);
return rc;
}
rc = cam_isp_blob_fcg_config_prepare(fcg_config_args,
prepare, CAM_ISP_HW_TYPE_SFE);
if (rc)
CAM_ERR(CAM_ISP,
"FCG configuration preparation failed, rc: %d, ctx_idx: %d",
rc, ife_mgr_ctx->ctx_index);
}
break;
case CAM_ISP_GENERIC_BLOB_TYPE_IFE_FCG_CFG:
case CAM_ISP_GENERIC_BLOB_TYPE_HFR_CONFIG: case CAM_ISP_GENERIC_BLOB_TYPE_HFR_CONFIG:
case CAM_ISP_GENERIC_BLOB_TYPE_CLOCK_CONFIG: case CAM_ISP_GENERIC_BLOB_TYPE_CLOCK_CONFIG:
case CAM_ISP_GENERIC_BLOB_TYPE_BW_CONFIG: case CAM_ISP_GENERIC_BLOB_TYPE_BW_CONFIG:
@@ -12554,6 +12943,84 @@ add_cmds:
return rc; return rc;
} }
static int cam_ife_hw_mgr_add_fcg_update(
struct cam_hw_prepare_update_args *prepare,
struct cam_kmd_buf_info *kmd_buf_info,
struct cam_isp_fcg_config_internal *fcg_args_internal,
bool *fcg_online,
uint32_t *fcg_entry_idx,
struct list_head *res_list_isp_src)
{
uint32_t fcg_kmd_size, num_ent, i;
struct cam_isp_hw_mgr_res *hw_mgr_res;
struct cam_isp_resource_node *res;
struct cam_isp_hw_fcg_cmd fcg_cmd;
int rc;
list_for_each_entry(hw_mgr_res, res_list_isp_src, list) {
if (hw_mgr_res->res_type == CAM_ISP_RESOURCE_UNINT)
continue;
for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
if (!hw_mgr_res->hw_res[i])
continue;
res = hw_mgr_res->hw_res[i];
fcg_cmd.res = res;
fcg_cmd.cmd_type = CAM_ISP_HW_CMD_FCG_CONFIG;
fcg_cmd.get_size_flag = true;
fcg_cmd.u.fcg_get_size.num_types = fcg_args_internal->num_types;
fcg_cmd.u.fcg_get_size.num_ctxs = fcg_args_internal->num_ch_ctx;
fcg_cmd.u.fcg_get_size.kmd_size = 0;
rc = res->hw_intf->hw_ops.process_cmd(
res->hw_intf->hw_priv,
CAM_ISP_HW_CMD_FCG_CONFIG, &fcg_cmd,
sizeof(struct cam_isp_hw_fcg_cmd));
if (rc || (fcg_cmd.u.fcg_get_size.kmd_size == 0)) {
CAM_ERR(CAM_ISP,
"Failed in retrieving KMD buf size requirement, rc: %d",
rc);
return rc;
}
}
break;
}
if (!fcg_cmd.u.fcg_get_size.fcg_supported) {
*fcg_online = false;
CAM_WARN(CAM_ISP, "FCG is sent from userspace but not supported by the hardware");
return 0;
}
fcg_kmd_size = fcg_cmd.u.fcg_get_size.kmd_size * sizeof(uint32_t);
CAM_DBG(CAM_ISP, "KMD buf usage for FCG config is %u", fcg_kmd_size);
num_ent = prepare->num_hw_update_entries;
if (num_ent + 1 >= prepare->max_hw_update_entries) {
CAM_ERR(CAM_ISP, "Insufficient HW entries: %u, %u",
num_ent, prepare->max_hw_update_entries);
return -EINVAL;
}
if (fcg_kmd_size + kmd_buf_info->used_bytes >
kmd_buf_info->size) {
CAM_ERR(CAM_ISP, "Insufficient space in kmd buffer, used_bytes: %u, buf size: %u",
kmd_buf_info->used_bytes, kmd_buf_info->size);
return -ENOMEM;
}
*fcg_entry_idx = num_ent;
cam_ife_mgr_update_hw_entries_util(CAM_ISP_IQ_BL, fcg_kmd_size,
kmd_buf_info, prepare, false);
CAM_DBG(CAM_ISP, "FCG dummy entry, num_ent: %u, entry_size: %u",
num_ent, fcg_kmd_size);
return 0;
}
static int cam_ife_hw_mgr_update_cmd_buffer( static int cam_ife_hw_mgr_update_cmd_buffer(
struct cam_ife_hw_mgr_ctx *ctx, struct cam_ife_hw_mgr_ctx *ctx,
struct cam_hw_prepare_update_args *prepare, struct cam_hw_prepare_update_args *prepare,
@@ -12561,10 +13028,11 @@ static int cam_ife_hw_mgr_update_cmd_buffer(
struct cam_isp_cmd_buf_count *cmd_buf_count, struct cam_isp_cmd_buf_count *cmd_buf_count,
uint32_t base_idx) uint32_t base_idx)
{ {
struct list_head *res_list = NULL; struct list_head *res_list = NULL;
struct cam_isp_change_base_args change_base_info = {0}; struct cam_isp_change_base_args change_base_info = {0};
int rc = 0; int rc = 0;
struct cam_isp_prepare_hw_update_data *prepare_hw_data; struct cam_isp_prepare_hw_update_data *prepare_hw_data;
struct cam_isp_fcg_config_info *fcg_info;
prepare_hw_data = (struct cam_isp_prepare_hw_update_data *)prepare->priv; prepare_hw_data = (struct cam_isp_prepare_hw_update_data *)prepare->priv;
@@ -12603,7 +13071,14 @@ static int cam_ife_hw_mgr_update_cmd_buffer(
ctx->cdm_id, ctx->ctx_index); ctx->cdm_id, ctx->ctx_index);
} }
if (ctx->base[base_idx].hw_type == CAM_ISP_HW_TYPE_SFE) CAM_DBG(CAM_ISP,
"Add cmdbuf, i=%d, split_id=%d, hw_type=%d ctx_idx: %u",
base_idx, ctx->base[base_idx].split_id,
ctx->base[base_idx].hw_type, ctx->ctx_index);
fcg_info = &(prepare_hw_data->fcg_info);
if (ctx->base[base_idx].hw_type == CAM_ISP_HW_TYPE_SFE) {
rc = cam_sfe_add_command_buffers( rc = cam_sfe_add_command_buffers(
prepare, kmd_buf, &ctx->base[base_idx], prepare, kmd_buf, &ctx->base[base_idx],
cam_sfe_packet_generic_blob_handler, cam_sfe_packet_generic_blob_handler,
@@ -12612,7 +13087,22 @@ static int cam_ife_hw_mgr_update_cmd_buffer(
CAM_ISP_SFE_OUT_RES_BASE, CAM_ISP_SFE_OUT_RES_BASE,
(CAM_ISP_SFE_OUT_RES_BASE + (CAM_ISP_SFE_OUT_RES_BASE +
max_sfe_out_res)); max_sfe_out_res));
else if (ctx->base[base_idx].hw_type == CAM_ISP_HW_TYPE_VFE) if (rc)
goto add_cmd_err;
/* No need to handle FCG entry if no valid fcg config from userspace */
if (!fcg_info->sfe_fcg_online)
goto end;
rc = cam_ife_hw_mgr_add_fcg_update(
prepare, kmd_buf,
&fcg_info->sfe_fcg_config,
&fcg_info->sfe_fcg_online,
&fcg_info->sfe_fcg_entry_idx,
res_list);
if (rc)
goto add_cmd_err;
} else if (ctx->base[base_idx].hw_type == CAM_ISP_HW_TYPE_VFE) {
rc = cam_isp_add_command_buffers( rc = cam_isp_add_command_buffers(
prepare, kmd_buf, &ctx->base[base_idx], prepare, kmd_buf, &ctx->base[base_idx],
cam_isp_packet_generic_blob_handler, cam_isp_packet_generic_blob_handler,
@@ -12621,21 +13111,37 @@ static int cam_ife_hw_mgr_update_cmd_buffer(
CAM_ISP_IFE_OUT_RES_BASE, CAM_ISP_IFE_OUT_RES_BASE,
(CAM_ISP_IFE_OUT_RES_BASE + (CAM_ISP_IFE_OUT_RES_BASE +
max_ife_out_res)); max_ife_out_res));
else if (ctx->base[base_idx].hw_type == CAM_ISP_HW_TYPE_CSID) if (rc)
goto add_cmd_err;
/* No need to handle FCG entry if no valid fcg config from userspace */
if (!fcg_info->ife_fcg_online)
goto end;
rc = cam_ife_hw_mgr_add_fcg_update(
prepare, kmd_buf,
&fcg_info->ife_fcg_config,
&fcg_info->ife_fcg_online,
&fcg_info->ife_fcg_entry_idx,
res_list);
if (rc)
goto add_cmd_err;
} else if (ctx->base[base_idx].hw_type == CAM_ISP_HW_TYPE_CSID) {
rc = cam_isp_add_csid_command_buffers(prepare, rc = cam_isp_add_csid_command_buffers(prepare,
kmd_buf, cam_csid_packet_generic_blob_handler, kmd_buf, cam_csid_packet_generic_blob_handler,
&ctx->base[base_idx]); &ctx->base[base_idx]);
if (rc)
goto add_cmd_err;
}
CAM_DBG(CAM_ISP, return rc;
"Add cmdbuf, i=%d, split_id=%d, hw_type=%d ctx_idx: %u",
base_idx, ctx->base[base_idx].split_id, add_cmd_err:
CAM_ERR(CAM_ISP,
"Failed in add cmdbuf, i=%d, split_id=%d, rc=%d hw_type=%d ctx_idx: %u",
base_idx, ctx->base[base_idx].split_id, rc,
ctx->base[base_idx].hw_type, ctx->ctx_index); ctx->base[base_idx].hw_type, ctx->ctx_index);
end:
if (rc)
CAM_ERR(CAM_ISP,
"Failed in add cmdbuf, i=%d, split_id=%d, rc=%d hw_type=%d ctx_idx: %u",
base_idx, ctx->base[base_idx].split_id, rc,
ctx->base[base_idx].hw_type, ctx->ctx_index);
return rc; return rc;
} }

查看文件

@@ -29,6 +29,8 @@ static void cam_isp_add_update_entry(
curr_update_entry->len = update_size; curr_update_entry->len = update_size;
curr_update_entry->offset = kmd_buf_info->offset; curr_update_entry->offset = kmd_buf_info->offset;
curr_update_entry->flags = cdm_bl_type; curr_update_entry->flags = cdm_bl_type;
curr_update_entry->addr = (uintptr_t) (kmd_buf_info->cpu_addr +
kmd_buf_info->used_bytes/4);
num_ent++; num_ent++;
kmd_buf_info->used_bytes += update_size; kmd_buf_info->used_bytes += update_size;

查看文件

@@ -62,6 +62,15 @@
*/ */
#define CAM_ISP_SFE_CTX_CFG_MAX 40 #define CAM_ISP_SFE_CTX_CFG_MAX 40
/* Maximum number of channels/contexts for FCG modules */
#define CAM_ISP_MAX_FCG_CH_CTXS 3
#define CAM_ISP_IFE_MAX_FCG_CH_CTXS 3
#define CAM_ISP_SFE_MAX_FCG_CHANNELS 2
/* Maximum number of predicitons for FCG config */
#define CAM_ISP_MAX_FCG_PREDICTIONS 3
#define CAM_ISP_IFE_MAX_FCG_PREDICTIONS CAM_ISP_MAX_FCG_PREDICTIONS
#define CAM_ISP_SFE_MAX_FCG_PREDICTIONS CAM_ISP_MAX_FCG_PREDICTIONS
/** /**
* enum cam_isp_hw_event_type - Collection of the ISP hardware events * enum cam_isp_hw_event_type - Collection of the ISP hardware events
@@ -186,7 +195,7 @@ struct cam_isp_clock_config_internal {
* *
* @usage_type: ife hw index * @usage_type: ife hw index
* @num_paths: Number of data paths * @num_paths: Number of data paths
* @axi_path per path vote info * @axi_path: per path vote info
*/ */
struct cam_isp_bw_config_internal_v2 { struct cam_isp_bw_config_internal_v2 {
uint32_t usage_type; uint32_t usage_type;
@@ -235,6 +244,88 @@ struct cam_isp_bw_clk_config_info {
}; };
/**
* struct cam_isp_predict_fcg_config_internal - Internal FCG config in a single prediction
*
* @phase_index_g: Starting index of LUT for G channel in phase
* @phase_index_r: Starting index of LUT for R channel in phase
* @phase_index_b: Starting index of LUT for B channel in phase
* @stats_index_g: Starting index of LUT for G channel in stats
* @stats_index_r: Starting index of LUT for R channel in stats
* @stats_index_b: Starting index of LUT for B channel in stats
*/
struct cam_isp_predict_fcg_config_internal {
uint32_t phase_index_g;
uint32_t phase_index_r;
uint32_t phase_index_b;
uint32_t stats_index_g;
uint32_t stats_index_r;
uint32_t stats_index_b;
};
/**
* struct cam_isp_ch_ctx_fcg_config_internal - Internal FCG config in a single channel or context
*
* @fcg_ch_ctx_id: Index of the channel in SFE/IFE or context in TFE
* to be configured that FCG blocks reside on.
* For example, if one wants to config FCG block
* for IFE in ctx 0, this value will be CAM_ISP_FCG_MASK_CH0
* @fcg_enable_mask: Indicate which module will be enabled for
* FCG. For example, if one wants to config
* SFE FCG STATS module, CAM_ISP_FCG_ENABLE_STATS
* will be set in mask
* @predicted_fcg_configs: FCG config for each prediction of the channel
* in serial order
*/
struct cam_isp_ch_ctx_fcg_config_internal {
uint32_t fcg_ch_ctx_id;
uint32_t fcg_enable_mask;
struct cam_isp_predict_fcg_config_internal predicted_fcg_configs[
CAM_ISP_MAX_FCG_PREDICTIONS];
};
/**
* struct cam_isp_fcg_config_internal - Internal FCG config for a frame
*
* @num_ch_ctx: Number of channels for FCG config for SFE/IFE or
* number of contexts for FCG config for TFE
* @num_predictions: Number of predictions for each channel
* @num_types: Number of types(STATS/PHASE) for FCG config
* @ch_ctx_fcg_configs: FCG config for each channel or context
*/
struct cam_isp_fcg_config_internal {
uint32_t num_ch_ctx;
uint32_t num_predictions;
uint32_t num_types;
struct cam_isp_ch_ctx_fcg_config_internal ch_ctx_fcg_configs[
CAM_ISP_MAX_FCG_CH_CTXS];
};
/**
* struct cam_isp_fcg_config_info - Track FCG config for further usage in config stage
*
* @prediction_idx: Indicate which exact prediction to be used, decided
* during trying to apply the request
* @sfe_fcg_entry_idx: Index for SFE FCG config in hw update entries
* @sfe_fcg_config: Internal storage of SFE FCG configurations
* @ife_fcg_entry_idx: Index for IFE/MC_TFE FCG config in hw update entries
* @ife_fcg_config: Internal storage of IFE/MC_TFE FCG configurations
* @use_current_cfg: Indicate whether use current configuration or replace
* the value with FCG predicted ones.
* @sfe_fcg_online: Indicate whether SFE FCG handling is online or not
* @ife_fcg_online: Indicate whether IFE/MC_TFE FCG handling is online or not
*/
struct cam_isp_fcg_config_info {
uint32_t prediction_idx;
uint32_t sfe_fcg_entry_idx;
struct cam_isp_fcg_config_internal sfe_fcg_config;
uint32_t ife_fcg_entry_idx;
struct cam_isp_fcg_config_internal ife_fcg_config;
bool use_current_cfg;
bool sfe_fcg_online;
bool ife_fcg_online;
};
/** /**
* struct cam_isp_prepare_hw_update_data - hw prepare data * struct cam_isp_prepare_hw_update_data - hw prepare data
* *
@@ -256,6 +347,7 @@ struct cam_isp_bw_clk_config_info {
* @mup_val: MUP value if configured * @mup_val: MUP value if configured
* @num_exp: Num of exposures * @num_exp: Num of exposures
* @mup_en: Flag if dynamic sensor switch is enabled * @mup_en: Flag if dynamic sensor switch is enabled
* @fcg_info: Track FCG config for further usage in config stage
* *
*/ */
struct cam_isp_prepare_hw_update_data { struct cam_isp_prepare_hw_update_data {
@@ -277,6 +369,7 @@ struct cam_isp_prepare_hw_update_data {
uint32_t mup_val; uint32_t mup_val;
uint32_t num_exp; uint32_t num_exp;
bool mup_en; bool mup_en;
struct cam_isp_fcg_config_info fcg_info;
}; };

查看文件

@@ -190,6 +190,7 @@ enum cam_isp_hw_cmd_type {
CAM_ISP_HW_CMD_BW_UPDATE, CAM_ISP_HW_CMD_BW_UPDATE,
CAM_ISP_HW_CMD_BW_UPDATE_V2, CAM_ISP_HW_CMD_BW_UPDATE_V2,
CAM_ISP_HW_CMD_BW_CONTROL, CAM_ISP_HW_CMD_BW_CONTROL,
CAM_ISP_HW_CMD_FCG_CONFIG,
CAM_ISP_HW_CMD_STOP_BUS_ERR_IRQ, CAM_ISP_HW_CMD_STOP_BUS_ERR_IRQ,
CAM_ISP_HW_CMD_GET_REG_DUMP, CAM_ISP_HW_CMD_GET_REG_DUMP,
CAM_ISP_HW_CMD_UBWC_UPDATE, CAM_ISP_HW_CMD_UBWC_UPDATE,
@@ -443,10 +444,65 @@ struct cam_isp_hw_get_res_for_mid {
uint32_t out_res_id; uint32_t out_res_id;
}; };
/**
* struct cam_isp_hw_fcg_get_size:
*
* @Brief: Get the size of KMD buf FCG config needs
*
* @num_types: Num of types(STATS/PHASE) for each FCG config
* @num_ctxs: Num of contexts for each FCG config in MC_TFE
* @kmd_size: Size of KMD buffer that will be used for FCG
* @fcg_supported: Indicate whether FCG is supported by the hardware
*/
struct cam_isp_hw_fcg_get_size {
uint32_t num_types;
uint32_t num_ctxs;
uint32_t kmd_size;
bool fcg_supported;
};
/**
* struct cam_isp_hw_fcg_update:
*
* @Brief: Get FCG update and pass to lower level processing
*
* @cmd_addr: Command buffer address that FCG configs are written into
* @cmd_size: Size of the command
* @prediction_idx: Indicate exact FCG predictions to be used
* @data: Exact FCG configs
*/
struct cam_isp_hw_fcg_update {
uintptr_t cmd_buf_addr;
uint32_t cmd_size;
uint32_t prediction_idx;
void *data;
};
/**
* struct cam_isp_hw_fcg_cmd
*
* @Brief: Union struct for fcg related cmd
*
* @res: Resource node
* @cmd_type: Command type
* @get_size_flag: Indicate to get kmd size for FCG or apply FCG update
* True - Get the size of KMD buffer to carry reg/val pairs
* False - Apply FCG update and pass it to SFE/IFE/MC_TFE
*/
struct cam_isp_hw_fcg_cmd {
struct cam_isp_resource_node *res;
enum cam_isp_hw_cmd_type cmd_type;
bool get_size_flag;
union {
struct cam_isp_hw_fcg_update fcg_update;
struct cam_isp_hw_fcg_get_size fcg_get_size;
} u;
};
/* /*
* struct cam_isp_hw_get_cmd_update: * struct cam_isp_hw_get_cmd_update:
* *
* @Brief: Get cmd buffer update for different CMD types * @Brief: Get cmd buffer update for different CMD types
* *
* @res: Resource node * @res: Resource node
* @cmd_type: Command type for which to get update * @cmd_type: Command type for which to get update

查看文件

@@ -621,13 +621,29 @@ static struct cam_sfe_top_common_reg_offset sfe880_top_commong_reg = {
}, },
}; };
static struct cam_sfe_fcg_module_info sfe880_fcg_module_info = {
.fcg_index_shift = 16,
.max_reg_val_pair_size = 16,
.fcg_type_size = 2,
.fcg_ch1_phase_index_cfg_0 = 0x00007270,
.fcg_ch1_phase_index_cfg_1 = 0x00007274,
.fcg_ch2_phase_index_cfg_0 = 0x00007470,
.fcg_ch2_phase_index_cfg_1 = 0x00007474,
.fcg_ch1_stats_phase_index_cfg_0 = 0x00007670,
.fcg_ch1_stats_phase_index_cfg_1 = 0x00007674,
.fcg_ch2_stats_phase_index_cfg_0 = 0x00007870,
.fcg_ch2_stats_phase_index_cfg_1 = 0x00007874,
};
static struct cam_sfe_modules_common_reg_offset sfe880_modules_common_reg = { static struct cam_sfe_modules_common_reg_offset sfe880_modules_common_reg = {
.demux_module_cfg = 0x00003060, .demux_module_cfg = 0x00003060,
.demux_xcfa_cfg = 0x00003064, .demux_xcfa_cfg = 0x00003064,
.demux_hdr_cfg = 0x00003074, .demux_hdr_cfg = 0x00003074,
.hdrc_remo_mod_cfg = 0x00005860, .hdrc_remo_mod_cfg = 0x00005860,
.xcfa_hdrc_remo_out_mux_cfg = 0x00005A74, .xcfa_hdrc_remo_out_mux_cfg = 0x00005A74,
.hdrc_remo_xcfa_bin_cfg = 0x00005A78, .hdrc_remo_xcfa_bin_cfg = 0x00005A78,
.fcg_module_info = &sfe880_fcg_module_info,
.fcg_supported = true,
}; };
static struct cam_sfe_top_common_reg_data sfe_880_top_common_reg_data = { static struct cam_sfe_top_common_reg_data sfe_880_top_common_reg_data = {

查看文件

@@ -395,6 +395,7 @@ int cam_sfe_process_cmd(void *hw_priv, uint32_t cmd_type,
case CAM_ISP_HW_CMD_CORE_CONFIG: case CAM_ISP_HW_CMD_CORE_CONFIG:
case CAM_ISP_HW_NOTIFY_OVERFLOW: case CAM_ISP_HW_NOTIFY_OVERFLOW:
case CAM_ISP_HW_CMD_APPLY_CLK_BW_UPDATE: case CAM_ISP_HW_CMD_APPLY_CLK_BW_UPDATE:
case CAM_ISP_HW_CMD_FCG_CONFIG:
rc = core_info->sfe_top->hw_ops.process_cmd( rc = core_info->sfe_top->hw_ops.process_cmd(
core_info->sfe_top->top_priv, cmd_type, core_info->sfe_top->top_priv, cmd_type,
cmd_args, arg_size); cmd_args, arg_size);

查看文件

@@ -1143,6 +1143,259 @@ end:
return rc; return rc;
} }
static int cam_sfe_top_apply_fcg_update(
struct cam_sfe_top_priv *top_priv,
struct cam_isp_hw_fcg_update *fcg_update,
struct cam_cdm_utils_ops *cdm_util_ops)
{
struct cam_isp_fcg_config_internal *fcg_config;
struct cam_isp_ch_ctx_fcg_config_internal *fcg_ch_ctx;
struct cam_isp_predict_fcg_config_internal *fcg_pr;
struct cam_sfe_top_hw_info *hw_info;
struct cam_sfe_fcg_module_info *fcg_module_info;
uint32_t size, fcg_index_shift;
uint32_t *reg_val_pair;
uint32_t num_regval_pairs = 0;
int rc = 0, i, j = 0;
if (!top_priv || (fcg_update->prediction_idx == 0)) {
CAM_ERR(CAM_SFE, "Invalid args");
return -EINVAL;
}
hw_info = top_priv->hw_info;
fcg_config = (struct cam_isp_fcg_config_internal *)fcg_update->data;
if (!fcg_config || !hw_info) {
CAM_ERR(CAM_SFE, "Invalid config params");
return -EINVAL;
}
fcg_module_info = hw_info->modules_hw_info->fcg_module_info;
if (!fcg_module_info) {
CAM_ERR(CAM_SFE, "Invalid FCG common data");
return -EINVAL;
}
reg_val_pair = kcalloc(fcg_module_info->max_reg_val_pair_size, sizeof(uint32_t),
GFP_KERNEL);
if (!reg_val_pair) {
CAM_ERR(CAM_SFE, "Failed allocating memory for reg val pair");
return -ENOMEM;
}
fcg_index_shift = fcg_module_info->fcg_index_shift;
for (i = 0, j = 0; i < fcg_config->num_ch_ctx; i++) {
if (j >= fcg_module_info->max_reg_val_pair_size) {
CAM_ERR(CAM_SFE, "reg_val_pair %d exceeds the array limit %u",
j, fcg_module_info->max_reg_val_pair_size);
rc = -ENOMEM;
goto kfree;
}
fcg_ch_ctx = &fcg_config->ch_ctx_fcg_configs[i];
if (!fcg_ch_ctx) {
CAM_ERR(CAM_SFE, "Failed in FCG channel/context dereference");
rc = -EINVAL;
goto kfree;
}
fcg_pr = &fcg_ch_ctx->predicted_fcg_configs[
fcg_update->prediction_idx - 1];
if (fcg_ch_ctx->fcg_enable_mask & CAM_ISP_FCG_ENABLE_PHASE) {
switch (fcg_ch_ctx->fcg_ch_ctx_id) {
case CAM_ISP_FCG_MASK_CH1:
CAM_SFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
fcg_module_info->fcg_ch1_phase_index_cfg_0,
(fcg_pr->phase_index_r |
fcg_pr->phase_index_g << fcg_index_shift));
CAM_SFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
fcg_module_info->fcg_ch1_phase_index_cfg_1,
fcg_pr->phase_index_b);
CAM_DBG(CAM_SFE,
"Program FCG registers for SFE channel 0x%x, phase_index_cfg_0: %u, phase_index_cfg_1: %u",
fcg_ch_ctx->fcg_ch_ctx_id,
(fcg_pr->phase_index_r |
(fcg_pr->phase_index_g << fcg_index_shift)),
fcg_pr->phase_index_b);
break;
case CAM_ISP_FCG_MASK_CH2:
CAM_SFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
fcg_module_info->fcg_ch2_phase_index_cfg_0,
(fcg_pr->phase_index_r |
fcg_pr->phase_index_g << fcg_index_shift));
CAM_SFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
fcg_module_info->fcg_ch2_phase_index_cfg_1,
fcg_pr->phase_index_b);
CAM_DBG(CAM_SFE,
"Program FCG registers for SFE channel 0x%x, phase_index_cfg_0: %u, phase_index_cfg_1: %u",
fcg_ch_ctx->fcg_ch_ctx_id,
(fcg_pr->phase_index_r |
(fcg_pr->phase_index_g << fcg_index_shift)),
fcg_pr->phase_index_b);
break;
default:
CAM_ERR(CAM_SFE, "Unsupported channel id: 0x%x",
fcg_ch_ctx->fcg_ch_ctx_id);
rc = -EINVAL;
goto kfree;
}
}
if (fcg_ch_ctx->fcg_enable_mask & CAM_ISP_FCG_ENABLE_STATS) {
switch (fcg_ch_ctx->fcg_ch_ctx_id) {
case CAM_ISP_FCG_MASK_CH1:
CAM_SFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
fcg_module_info->fcg_ch1_stats_phase_index_cfg_0,
(fcg_pr->stats_index_r |
fcg_pr->stats_index_g << fcg_index_shift));
CAM_SFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
fcg_module_info->fcg_ch1_stats_phase_index_cfg_1,
fcg_pr->stats_index_b);
CAM_DBG(CAM_SFE,
"Program FCG registers for SFE channel 0x%x, stats_index_cfg_0: %u, stats_index_cfg_1: %u",
fcg_ch_ctx->fcg_ch_ctx_id,
(fcg_pr->phase_index_r |
(fcg_pr->phase_index_g << fcg_index_shift)),
fcg_pr->phase_index_b);
break;
case CAM_ISP_FCG_MASK_CH2:
CAM_SFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
fcg_module_info->fcg_ch2_stats_phase_index_cfg_0,
(fcg_pr->stats_index_r |
fcg_pr->stats_index_g << fcg_index_shift));
CAM_SFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
fcg_module_info->fcg_ch2_stats_phase_index_cfg_1,
fcg_pr->stats_index_b);
CAM_DBG(CAM_SFE,
"Program FCG registers for SFE channel 0x%x, stats_index_cfg_0: %u, stats_index_cfg_1: %u",
fcg_ch_ctx->fcg_ch_ctx_id,
(fcg_pr->phase_index_r |
(fcg_pr->phase_index_g << fcg_index_shift)),
fcg_pr->phase_index_b);
break;
default:
CAM_ERR(CAM_SFE, "Unsupported channel id: 0x%x",
fcg_ch_ctx->fcg_ch_ctx_id);
rc = -EINVAL;
goto kfree;
}
}
}
num_regval_pairs = j / 2;
if (num_regval_pairs) {
size = cdm_util_ops->cdm_required_size_reg_random(
num_regval_pairs);
if ((size * 4) != fcg_update->cmd_size) {
CAM_ERR(CAM_SFE,
"Failed! Buf size:%d is wrong, expected size: %d",
fcg_update->cmd_size, size * 4);
rc = -ENOMEM;
goto kfree;
}
cdm_util_ops->cdm_write_regrandom(
(uint32_t *)fcg_update->cmd_buf_addr,
num_regval_pairs, reg_val_pair);
} else {
CAM_WARN(CAM_SFE, "No reg val pairs");
}
kfree:
kfree(reg_val_pair);
return rc;
}
static int cam_sfe_top_get_fcg_buf_size(
struct cam_sfe_top_priv *top_priv,
struct cam_isp_hw_fcg_get_size *fcg_get_size,
struct cam_cdm_utils_ops *cdm_util_ops)
{
struct cam_sfe_top_hw_info *hw_info;
struct cam_sfe_modules_common_reg_offset *modules_hw_info;
uint32_t num_types;
if (!top_priv) {
CAM_ERR(CAM_SFE, "Invalid args");
return -EINVAL;
}
hw_info = top_priv->hw_info;
if (!hw_info) {
CAM_ERR(CAM_SFE, "Invalid config params");
return -EINVAL;
}
modules_hw_info = hw_info->modules_hw_info;
if (!modules_hw_info) {
CAM_ERR(CAM_SFE, "Invalid modules hw info");
return -EINVAL;
}
if (!modules_hw_info->fcg_supported) {
fcg_get_size->fcg_supported = false;
CAM_DBG(CAM_SFE, "FCG is not supported by hardware");
return 0;
}
fcg_get_size->fcg_supported = true;
num_types = fcg_get_size->num_types;
if (num_types == 0) {
CAM_ERR(CAM_SFE, "Number of types(STATS/PHASE) requested is empty");
return -EINVAL;
}
fcg_get_size->kmd_size =
cdm_util_ops->cdm_required_size_reg_random(
num_types * modules_hw_info->fcg_module_info->fcg_type_size);
return 0;
}
static int cam_sfe_top_fcg_config(
struct cam_sfe_top_priv *top_priv,
void *cmd_args,
uint32_t arg_size)
{
struct cam_isp_hw_fcg_cmd *fcg_cmd;
struct cam_cdm_utils_ops *cdm_util_ops;
int rc;
if (arg_size != sizeof(struct cam_isp_hw_fcg_cmd)) {
CAM_ERR(CAM_SFE, "Invalid cmd size, arg_size: %d, expected size: %d",
arg_size, sizeof(struct cam_isp_hw_fcg_cmd));
return -EINVAL;
}
fcg_cmd = (struct cam_isp_hw_fcg_cmd *) cmd_args;
if (!fcg_cmd || !fcg_cmd->res) {
CAM_ERR(CAM_SFE, "Invalid cmd args");
return -EINVAL;
}
cdm_util_ops =
(struct cam_cdm_utils_ops *)fcg_cmd->res->cdm_ops;
if (!cdm_util_ops) {
CAM_ERR(CAM_SFE, "Invalid CDM ops");
return -EINVAL;
}
if (fcg_cmd->get_size_flag) {
struct cam_isp_hw_fcg_get_size *fcg_get_size;
fcg_get_size = &fcg_cmd->u.fcg_get_size;
rc = cam_sfe_top_get_fcg_buf_size(top_priv, fcg_get_size, cdm_util_ops);
} else {
struct cam_isp_hw_fcg_update *fcg_update;
fcg_update = &fcg_cmd->u.fcg_update;
rc = cam_sfe_top_apply_fcg_update(top_priv, fcg_update, cdm_util_ops);
}
return rc;
}
int cam_sfe_top_process_cmd(void *priv, uint32_t cmd_type, int cam_sfe_top_process_cmd(void *priv, uint32_t cmd_type,
void *cmd_args, uint32_t arg_size) void *cmd_args, uint32_t arg_size)
{ {
@@ -1194,6 +1447,9 @@ int cam_sfe_top_process_cmd(void *priv, uint32_t cmd_type,
case CAM_ISP_HW_CMD_APPLY_CLK_BW_UPDATE: case CAM_ISP_HW_CMD_APPLY_CLK_BW_UPDATE:
rc = cam_sfe_top_apply_clk_bw_update(top_priv, cmd_args, arg_size); rc = cam_sfe_top_apply_clk_bw_update(top_priv, cmd_args, arg_size);
break; break;
case CAM_ISP_HW_CMD_FCG_CONFIG:
rc = cam_sfe_top_fcg_config(top_priv, cmd_args, arg_size);
break;
case CAM_ISP_HW_CMD_QUERY_CAP: { case CAM_ISP_HW_CMD_QUERY_CAP: {
struct cam_isp_hw_cap *sfe_cap; struct cam_isp_hw_cap *sfe_cap;

查看文件

@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-only */ /* SPDX-License-Identifier: GPL-2.0-only */
/* /*
* Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/ */
#ifndef _CAM_SFE_TOP_H_ #ifndef _CAM_SFE_TOP_H_
@@ -118,14 +118,30 @@ struct cam_sfe_top_common_reg_offset {
uint32_t top_debug[CAM_SFE_TOP_DBG_REG_MAX]; uint32_t top_debug[CAM_SFE_TOP_DBG_REG_MAX];
}; };
struct cam_sfe_fcg_module_info {
uint32_t fcg_index_shift;
uint32_t max_reg_val_pair_size;
uint32_t fcg_type_size;
uint32_t fcg_ch1_phase_index_cfg_0;
uint32_t fcg_ch1_phase_index_cfg_1;
uint32_t fcg_ch2_phase_index_cfg_0;
uint32_t fcg_ch2_phase_index_cfg_1;
uint32_t fcg_ch1_stats_phase_index_cfg_0;
uint32_t fcg_ch1_stats_phase_index_cfg_1;
uint32_t fcg_ch2_stats_phase_index_cfg_0;
uint32_t fcg_ch2_stats_phase_index_cfg_1;
};
struct cam_sfe_modules_common_reg_offset { struct cam_sfe_modules_common_reg_offset {
uint32_t demux_module_cfg; uint32_t demux_module_cfg;
uint32_t demux_xcfa_cfg; uint32_t demux_xcfa_cfg;
uint32_t demux_hdr_cfg; uint32_t demux_hdr_cfg;
uint32_t demux_lcr_sel; uint32_t demux_lcr_sel;
uint32_t hdrc_remo_mod_cfg; uint32_t hdrc_remo_mod_cfg;
uint32_t hdrc_remo_xcfa_bin_cfg; uint32_t hdrc_remo_xcfa_bin_cfg;
uint32_t xcfa_hdrc_remo_out_mux_cfg; uint32_t xcfa_hdrc_remo_out_mux_cfg;
struct cam_sfe_fcg_module_info *fcg_module_info;
bool fcg_supported;
}; };
struct cam_sfe_top_common_reg_data { struct cam_sfe_top_common_reg_data {

查看文件

@@ -516,6 +516,7 @@ int cam_vfe_process_cmd(void *hw_priv, uint32_t cmd_type,
case CAM_ISP_HW_CMD_INIT_CONFIG_UPDATE: case CAM_ISP_HW_CMD_INIT_CONFIG_UPDATE:
case CAM_ISP_HW_CMD_RDI_LCR_CFG: case CAM_ISP_HW_CMD_RDI_LCR_CFG:
case CAM_ISP_HW_CMD_GET_SET_PRIM_SOF_TS_ADDR: case CAM_ISP_HW_CMD_GET_SET_PRIM_SOF_TS_ADDR:
case CAM_ISP_HW_CMD_FCG_CONFIG:
rc = core_info->vfe_top->hw_ops.process_cmd( rc = core_info->vfe_top->hw_ops.process_cmd(
core_info->vfe_top->top_priv, cmd_type, cmd_args, core_info->vfe_top->top_priv, cmd_type, cmd_args,
arg_size); arg_size);

查看文件

@@ -792,6 +792,17 @@ static struct cam_vfe_top_ver4_debug_reg_info tfe980_dbg_reg_info[CAM_TFE_980_NU
}, },
}; };
static struct cam_vfe_ver4_fcg_module_info tfe980_fcg_module_info = {
.fcg_index_shift = 16,
.max_reg_val_pair_size = 6,
.fcg_type_size = 2,
.fcg_phase_index_cfg_0 = 0x0000DE70,
.fcg_phase_index_cfg_1 = 0x0000DE74,
.fcg_reg_ctxt_shift = 0x0,
.fcg_reg_ctxt_sel = 0x0000DFF4,
.fcg_reg_ctxt_mask = 0x7,
};
static struct cam_vfe_top_ver4_hw_info tfe980_top_hw_info = { static struct cam_vfe_top_ver4_hw_info tfe980_top_hw_info = {
.common_reg = &tfe980_top_common_reg, .common_reg = &tfe980_top_common_reg,
.vfe_full_hw_info = { .vfe_full_hw_info = {
@@ -830,6 +841,8 @@ static struct cam_vfe_top_ver4_hw_info tfe980_top_hw_info = {
.debug_reg_info = &tfe980_dbg_reg_info, .debug_reg_info = &tfe980_dbg_reg_info,
.pdaf_lcr_res_mask = tfe980_pdaf_haf_res_mask, .pdaf_lcr_res_mask = tfe980_pdaf_haf_res_mask,
.num_pdaf_lcr_res = ARRAY_SIZE(tfe980_pdaf_haf_res_mask), .num_pdaf_lcr_res = ARRAY_SIZE(tfe980_pdaf_haf_res_mask),
.fcg_module_info = &tfe980_fcg_module_info,
.fcg_mc_supported = true,
}; };
static struct cam_irq_register_set tfe980_bus_irq_reg[2] = { static struct cam_irq_register_set tfe980_bus_irq_reg[2] = {

查看文件

@@ -872,6 +872,14 @@ static struct cam_vfe_top_ver4_debug_reg_info vfe880_dbg_reg_info[CAM_VFE_880_NU
}, },
}; };
static struct cam_vfe_ver4_fcg_module_info vfe880_fcg_module_info = {
.fcg_index_shift = 16,
.max_reg_val_pair_size = 4,
.fcg_type_size = 2,
.fcg_phase_index_cfg_0 = 0xC870,
.fcg_phase_index_cfg_1 = 0xC874,
};
static struct cam_vfe_top_ver4_hw_info vfe880_top_hw_info = { static struct cam_vfe_top_ver4_hw_info vfe880_top_hw_info = {
.common_reg = &vfe880_top_common_reg, .common_reg = &vfe880_top_common_reg,
.vfe_full_hw_info = { .vfe_full_hw_info = {
@@ -907,6 +915,8 @@ static struct cam_vfe_top_ver4_hw_info vfe880_top_hw_info = {
.debug_reg_info = &vfe880_dbg_reg_info, .debug_reg_info = &vfe880_dbg_reg_info,
.pdaf_lcr_res_mask = vfe880_pdaf_lcr_res_mask, .pdaf_lcr_res_mask = vfe880_pdaf_lcr_res_mask,
.num_pdaf_lcr_res = ARRAY_SIZE(vfe880_pdaf_lcr_res_mask), .num_pdaf_lcr_res = ARRAY_SIZE(vfe880_pdaf_lcr_res_mask),
.fcg_module_info = &vfe880_fcg_module_info,
.fcg_supported = true,
}; };
static struct cam_irq_register_set vfe880_bus_irq_reg[2] = { static struct cam_irq_register_set vfe880_bus_irq_reg[2] = {

查看文件

@@ -1096,6 +1096,257 @@ int cam_vfe_top_ver4_write(void *device_priv,
return -EPERM; return -EPERM;
} }
static int cam_vfe_top_apply_fcg_update(
struct cam_vfe_top_ver4_priv *top_priv,
struct cam_isp_hw_fcg_update *fcg_update,
struct cam_cdm_utils_ops *cdm_util_ops)
{
struct cam_isp_fcg_config_internal *fcg_config;
struct cam_isp_ch_ctx_fcg_config_internal *fcg_ch_ctx;
struct cam_isp_predict_fcg_config_internal *fcg_pr;
struct cam_vfe_top_ver4_hw_info *hw_info;
struct cam_vfe_ver4_fcg_module_info *fcg_module_info;
uint32_t size, fcg_index_shift;
uint32_t *reg_val_pair;
uint32_t num_regval_pairs = 0;
int rc = 0, i, j = 0;
if (!top_priv || (fcg_update->prediction_idx == 0)) {
CAM_ERR(CAM_ISP, "Invalid args");
return -EINVAL;
}
hw_info = top_priv->common_data.hw_info;
fcg_config = (struct cam_isp_fcg_config_internal *)fcg_update->data;
if (!hw_info || !fcg_config) {
CAM_ERR(CAM_ISP, "Invalid config params");
return -EINVAL;
}
fcg_module_info = hw_info->fcg_module_info;
if (!fcg_module_info) {
CAM_ERR(CAM_ISP, "Invalid FCG common data");
return -EINVAL;
}
reg_val_pair = kcalloc(fcg_module_info->max_reg_val_pair_size, sizeof(uint32_t),
GFP_KERNEL);
if (!reg_val_pair) {
CAM_ERR(CAM_ISP, "Failed allocating memory for reg val pair");
return -ENOMEM;
}
fcg_index_shift = fcg_module_info->fcg_index_shift;
for (i = 0, j = 0; i < fcg_config->num_ch_ctx; i++) {
if (j >= fcg_module_info->max_reg_val_pair_size) {
CAM_ERR(CAM_ISP, "reg_val_pair %d exceeds the array limit %u",
j, fcg_module_info->max_reg_val_pair_size);
rc = -ENOMEM;
goto kfree;
}
fcg_ch_ctx = &fcg_config->ch_ctx_fcg_configs[i];
if (!fcg_ch_ctx) {
CAM_ERR(CAM_ISP, "Failed in FCG channel/context dereference");
rc = -EINVAL;
goto kfree;
}
fcg_pr = &fcg_ch_ctx->predicted_fcg_configs[
fcg_update->prediction_idx - 1];
/* For VFE/MC_TFE, only PHASE should be enabled */
if (fcg_ch_ctx->fcg_enable_mask & CAM_ISP_FCG_ENABLE_PHASE) {
switch (fcg_ch_ctx->fcg_ch_ctx_id) {
/* Same value as CAM_ISP_FCG_MASK_CH0/1/2 to support both VFE and MC_TFE */
case CAM_ISP_MULTI_CTXT0_MASK:
if (hw_info->fcg_mc_supported) {
CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
fcg_module_info->fcg_reg_ctxt_sel,
(fcg_module_info->fcg_reg_ctxt_mask &
(fcg_ch_ctx->fcg_ch_ctx_id <<
fcg_module_info->fcg_reg_ctxt_shift)));
CAM_DBG(CAM_ISP,
"Program FCG registers for MC_TFE, ch_ctx_id: 0x%x, sel_wr: 0x%x",
fcg_ch_ctx->fcg_ch_ctx_id,
(fcg_module_info->fcg_reg_ctxt_mask &
(fcg_ch_ctx->fcg_ch_ctx_id <<
fcg_module_info->fcg_reg_ctxt_shift)));
}
CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
fcg_module_info->fcg_phase_index_cfg_0,
(fcg_pr->phase_index_r |
(fcg_pr->phase_index_g << fcg_index_shift)));
CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
fcg_module_info->fcg_phase_index_cfg_1,
fcg_pr->phase_index_b);
CAM_DBG(CAM_ISP,
"Program FCG registers for IFE/MC_TFE, ch_ctx_id: 0x%x, phase_index_cfg_0: %u, phase_index_cfg_1: %u",
fcg_ch_ctx->fcg_ch_ctx_id,
(fcg_pr->phase_index_r |
(fcg_pr->phase_index_g << fcg_index_shift)),
fcg_pr->phase_index_b);
break;
case CAM_ISP_MULTI_CTXT1_MASK:
case CAM_ISP_MULTI_CTXT2_MASK:
if (!hw_info->fcg_mc_supported) {
CAM_ERR(CAM_ISP,
"No support for multi context for FCG on ch_ctx_id: 0x%x",
fcg_ch_ctx->fcg_ch_ctx_id);
rc = -EINVAL;
goto kfree;
}
CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
fcg_module_info->fcg_reg_ctxt_sel,
(fcg_module_info->fcg_reg_ctxt_mask &
(fcg_ch_ctx->fcg_ch_ctx_id <<
fcg_module_info->fcg_reg_ctxt_shift)));
CAM_DBG(CAM_ISP,
"Program FCG registers for MC_TFE, ch_ctx_id: 0x%x, sel_wr: 0x%x",
fcg_ch_ctx->fcg_ch_ctx_id,
(fcg_module_info->fcg_reg_ctxt_mask &
(fcg_ch_ctx->fcg_ch_ctx_id <<
fcg_module_info->fcg_reg_ctxt_shift)));
CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
fcg_module_info->fcg_phase_index_cfg_0,
(fcg_pr->phase_index_r |
(fcg_pr->phase_index_g << fcg_index_shift)));
CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
fcg_module_info->fcg_phase_index_cfg_1,
fcg_pr->phase_index_b);
CAM_DBG(CAM_ISP,
"Program FCG registers for MC_TFE, ch_ctx_id: 0x%x, phase_index_cfg_0: %u, phase_index_cfg_1: %u",
fcg_ch_ctx->fcg_ch_ctx_id,
(fcg_pr->phase_index_r |
(fcg_pr->phase_index_g << fcg_index_shift)),
fcg_pr->phase_index_b);
break;
default:
CAM_ERR(CAM_ISP, "Unsupported ch_ctx_id: 0x%x",
fcg_ch_ctx->fcg_ch_ctx_id);
rc = -EINVAL;
goto kfree;
}
}
}
num_regval_pairs = j / 2;
if (num_regval_pairs) {
size = cdm_util_ops->cdm_required_size_reg_random(
num_regval_pairs);
if ((size * 4) != fcg_update->cmd_size) {
CAM_ERR(CAM_ISP,
"Failed! Buf size:%d is wrong, expected size: %d",
fcg_update->cmd_size, size * 4);
rc = -ENOMEM;
goto kfree;
}
cdm_util_ops->cdm_write_regrandom(
(uint32_t *)fcg_update->cmd_buf_addr,
num_regval_pairs, reg_val_pair);
} else {
CAM_WARN(CAM_ISP, "No reg val pairs");
}
kfree:
kfree(reg_val_pair);
return rc;
}
static int cam_vfe_top_get_fcg_buf_size(
struct cam_vfe_top_ver4_priv *top_priv,
struct cam_isp_hw_fcg_get_size *fcg_get_size,
struct cam_cdm_utils_ops *cdm_util_ops)
{
struct cam_vfe_top_ver4_hw_info *hw_info;
struct cam_vfe_ver4_fcg_module_info *fcg_module_info;
uint32_t num_types, num_reg_val;
if (!top_priv) {
CAM_ERR(CAM_ISP, "Invalid args");
return -EINVAL;
}
hw_info = top_priv->common_data.hw_info;
if (!hw_info) {
CAM_ERR(CAM_ISP, "Invalid config params");
return -EINVAL;
}
if (!hw_info->fcg_supported &&
!hw_info->fcg_mc_supported) {
fcg_get_size->fcg_supported = false;
CAM_DBG(CAM_ISP, "FCG is not supported by hardware");
return 0;
}
fcg_module_info = hw_info->fcg_module_info;
fcg_get_size->fcg_supported = true;
num_types = fcg_get_size->num_types;
if (num_types == 0) {
CAM_ERR(CAM_ISP, "Number of types(STATS/PHASE) requested is empty");
return -EINVAL;
}
num_reg_val = num_types * fcg_module_info->fcg_type_size;
/* Count for wr_sel register in MC_TFE */
if (hw_info->fcg_mc_supported)
num_reg_val += fcg_get_size->num_ctxs;
fcg_get_size->kmd_size =
cdm_util_ops->cdm_required_size_reg_random(num_reg_val);
return 0;
}
static int cam_vfe_top_fcg_config(
struct cam_vfe_top_ver4_priv *top_priv,
void *cmd_args,
uint32_t arg_size)
{
struct cam_isp_hw_fcg_cmd *fcg_cmd;
struct cam_cdm_utils_ops *cdm_util_ops;
int rc;
if (arg_size != sizeof(struct cam_isp_hw_fcg_cmd)) {
CAM_ERR(CAM_ISP, "Invalid cmd size, arg_size: %d, expected size: %d",
arg_size, sizeof(struct cam_isp_hw_fcg_cmd));
return -EINVAL;
}
fcg_cmd = (struct cam_isp_hw_fcg_cmd *) cmd_args;
if (!fcg_cmd || !fcg_cmd->res) {
CAM_ERR(CAM_ISP, "Invalid cmd args");
return -EINVAL;
}
cdm_util_ops =
(struct cam_cdm_utils_ops *)fcg_cmd->res->cdm_ops;
if (!cdm_util_ops) {
CAM_ERR(CAM_ISP, "Invalid CDM ops");
return -EINVAL;
}
if (fcg_cmd->get_size_flag) {
struct cam_isp_hw_fcg_get_size *fcg_get_size;
fcg_get_size = &fcg_cmd->u.fcg_get_size;
rc = cam_vfe_top_get_fcg_buf_size(top_priv, fcg_get_size, cdm_util_ops);
} else {
struct cam_isp_hw_fcg_update *fcg_update;
fcg_update = &fcg_cmd->u.fcg_update;
rc = cam_vfe_top_apply_fcg_update(top_priv, fcg_update, cdm_util_ops);
}
return rc;
}
int cam_vfe_top_ver4_process_cmd(void *device_priv, uint32_t cmd_type, int cam_vfe_top_ver4_process_cmd(void *device_priv, uint32_t cmd_type,
void *cmd_args, uint32_t arg_size) void *cmd_args, uint32_t arg_size)
{ {
@@ -1207,6 +1458,9 @@ int cam_vfe_top_ver4_process_cmd(void *device_priv, uint32_t cmd_type,
sof_addr_args); sof_addr_args);
} }
break; break;
case CAM_ISP_HW_CMD_FCG_CONFIG:
rc = cam_vfe_top_fcg_config(top_priv, cmd_args, arg_size);
break;
default: default:
rc = -EINVAL; rc = -EINVAL;
CAM_ERR(CAM_ISP, "VFE:%u Error, Invalid cmd:%d", CAM_ERR(CAM_ISP, "VFE:%u Error, Invalid cmd:%d",

查看文件

@@ -153,28 +153,43 @@ struct cam_vfe_top_ver4_debug_reg_info {
char *clc_name; char *clc_name;
}; };
struct cam_vfe_ver4_fcg_module_info {
uint32_t fcg_index_shift;
uint32_t max_reg_val_pair_size;
uint32_t fcg_type_size;
uint32_t fcg_phase_index_cfg_0;
uint32_t fcg_phase_index_cfg_1;
uint32_t fcg_reg_ctxt_shift;
uint32_t fcg_reg_ctxt_mask;
uint32_t fcg_reg_ctxt_sel;
};
struct cam_vfe_top_ver4_hw_info { struct cam_vfe_top_ver4_hw_info {
struct cam_vfe_top_ver4_reg_offset_common *common_reg; struct cam_vfe_top_ver4_reg_offset_common *common_reg;
struct cam_vfe_ver4_path_hw_info vfe_full_hw_info; struct cam_vfe_ver4_path_hw_info vfe_full_hw_info;
struct cam_vfe_ver4_path_hw_info pdlib_hw_info; struct cam_vfe_ver4_path_hw_info pdlib_hw_info;
struct cam_vfe_ver4_path_hw_info *rdi_hw_info; struct cam_vfe_ver4_path_hw_info *rdi_hw_info;
struct cam_vfe_ver4_path_reg_data *reg_data; struct cam_vfe_ver4_path_reg_data *reg_data;
struct cam_vfe_top_ver4_wr_client_desc *wr_client_desc; struct cam_vfe_top_ver4_wr_client_desc *wr_client_desc;
struct cam_vfe_top_ver4_module_desc *ipp_module_desc; struct cam_vfe_top_ver4_module_desc *ipp_module_desc;
struct cam_vfe_bayer_ver4_module_desc *bayer_module_desc; struct cam_vfe_bayer_ver4_module_desc *bayer_module_desc;
uint32_t num_reg; uint32_t num_reg;
struct cam_vfe_top_ver4_debug_reg_info (*debug_reg_info)[][8]; struct cam_vfe_top_ver4_debug_reg_info (*debug_reg_info)[][8];
uint32_t num_mux; uint32_t num_mux;
uint32_t num_path_port_map; uint32_t num_path_port_map;
uint32_t mux_type[CAM_VFE_TOP_MUX_MAX]; uint32_t mux_type[CAM_VFE_TOP_MUX_MAX];
uint32_t path_port_map[CAM_ISP_HW_PATH_PORT_MAP_MAX][2]; uint32_t path_port_map[CAM_ISP_HW_PATH_PORT_MAP_MAX][2];
uint32_t num_rdi; uint32_t num_rdi;
uint32_t num_top_errors; uint32_t num_top_errors;
struct cam_vfe_top_ver4_top_err_irq_desc *top_err_desc; struct cam_vfe_top_ver4_top_err_irq_desc *top_err_desc;
uint32_t num_pdaf_violation_errors; uint32_t num_pdaf_violation_errors;
struct cam_vfe_top_ver4_pdaf_violation_desc *pdaf_violation_desc; struct cam_vfe_top_ver4_pdaf_violation_desc *pdaf_violation_desc;
struct cam_vfe_top_ver4_pdaf_lcr_res_info *pdaf_lcr_res_mask; struct cam_vfe_top_ver4_pdaf_lcr_res_info *pdaf_lcr_res_mask;
uint32_t num_pdaf_lcr_res; uint32_t num_pdaf_lcr_res;
struct cam_vfe_ver4_fcg_module_info *fcg_module_info;
bool fcg_supported;
bool fcg_mc_supported;
}; };
struct cam_vfe_ver4_path_reg_data { struct cam_vfe_ver4_path_reg_data {
@@ -216,4 +231,10 @@ int cam_vfe_top_ver4_deinit(struct cam_vfe_top **vfe_top);
VFE_DBG_INFO(28, name8), \ VFE_DBG_INFO(28, name8), \
} }
#define CAM_VFE_ADD_REG_VAL_PAIR(buf_array, index, offset, val) \
do { \
buf_array[(index)++] = offset; \
buf_array[(index)++] = val; \
} while (0)
#endif /* _CAM_VFE_TOP_VER4_H_ */ #endif /* _CAM_VFE_TOP_VER4_H_ */