msm: camera: common: Optimize cam context memory

In current implementation, cam_ctx_req has a array of
hw_update_entries, in_map_entries and out_map_entries. Each
context has array of N requests. Memory for all contexts is
allocated during probe. This causes a huge memory remaining
unutilized.
This commit moves the memory allocation to context acquire time
and freeing this memory during context release. In place
of using array, now dynamic allocation is used.
A top level calculation shows a memory reduction of around 2M-
2.5M with this change including all the camera drivers.

CRs-Fixed: 2830502
Change-Id: Id63cf2a52272e2a419704dc95100694e384330c3
Signed-off-by: Gaurav Jindal <gjindal@codeaurora.org>
This commit is contained in:
Gaurav Jindal
2020-10-13 00:37:41 +05:30
committed by Gerrit - the friendly Code Review server
parent c6b2796833
commit 1a5797c6a8
11 changed files with 254 additions and 32 deletions

View File

@@ -663,6 +663,7 @@ int cam_context_init(struct cam_context *ctx,
INIT_LIST_HEAD(&ctx->req_list[i].list);
list_add_tail(&ctx->req_list[i].list, &ctx->free_req_list);
ctx->req_list[i].ctx = ctx;
ctx->req_list[i].index = i;
}
ctx->state = CAM_CTX_AVAILABLE;
ctx->state_machine = NULL;

View File

@@ -60,6 +60,7 @@ enum cam_context_state {
* @num_out_map_entries: Number of out map entries
* @num_in_acked: Number of in fence acked
* @num_out_acked: Number of out fence acked
* @index: Index of request in the list
* @flushed: Request is flushed
* @ctx: The context to which this request belongs
* @pf_data page fault debug data
@@ -70,14 +71,15 @@ struct cam_ctx_request {
uint32_t status;
uint64_t request_id;
void *req_priv;
struct cam_hw_update_entry hw_update_entries[CAM_CTX_CFG_MAX];
struct cam_hw_update_entry *hw_update_entries;
uint32_t num_hw_update_entries;
struct cam_hw_fence_map_entry in_map_entries[CAM_CTX_CFG_MAX];
struct cam_hw_fence_map_entry *in_map_entries;
uint32_t num_in_map_entries;
struct cam_hw_fence_map_entry out_map_entries[CAM_CTX_CFG_MAX];
struct cam_hw_fence_map_entry *out_map_entries;
uint32_t num_out_map_entries;
atomic_t num_in_acked;
uint32_t num_out_acked;
uint32_t index;
int flushed;
struct cam_context *ctx;
struct cam_hw_mgr_dump_pf_data pf_data;
@@ -198,6 +200,12 @@ struct cam_ctx_ops {
* @node: The main node to which this context belongs
* @sync_mutex: mutex to sync with sync cb thread
* @last_flush_req: Last request to flush
* @max_hw_update_entries: Max hw update entries
* @max_in_map_entries: Max in map entries
* @max_out_map_entries: Max out in map entries
* @hw_updater_entry: Hw update entry
* @in_map_entries: In map update entry
* @out_map_entries: Out map entry
*
*/
struct cam_context {
@@ -234,6 +242,12 @@ struct cam_context {
void *node;
struct mutex sync_mutex;
uint32_t last_flush_req;
uint32_t max_hw_update_entries;
uint32_t max_in_map_entries;
uint32_t max_out_map_entries;
struct cam_hw_update_entry *hw_update_entry;
struct cam_hw_fence_map_entry *in_map_entries;
struct cam_hw_fence_map_entry *out_map_entries;
};
/**

View File

@@ -31,6 +31,82 @@ static inline int cam_context_validate_thread(void)
return 0;
}
static void cam_context_free_mem_hw_entries(struct cam_context *ctx)
{
kfree(ctx->out_map_entries);
kfree(ctx->in_map_entries);
kfree(ctx->hw_update_entry);
ctx->out_map_entries = NULL;
ctx->in_map_entries = NULL;
ctx->hw_update_entry = NULL;
}
static int cam_context_allocate_mem_hw_entries(struct cam_context *ctx)
{
int rc = 0;
struct cam_ctx_request *req;
struct cam_ctx_request *temp_req;
size_t num_entries = 0;
CAM_DBG(CAM_CTXT,
"%s[%d] num: max_hw %u in_map %u out_map %u req %u",
ctx->dev_name,
ctx->ctx_id,
ctx->max_hw_update_entries,
ctx->max_in_map_entries,
ctx->max_out_map_entries,
ctx->req_size);
num_entries = ctx->max_hw_update_entries * ctx->req_size;
ctx->hw_update_entry = kcalloc(num_entries,
sizeof(struct cam_hw_update_entry),
GFP_KERNEL);
if (!ctx->hw_update_entry) {
CAM_ERR(CAM_CTXT, "%s[%d] no memory", ctx->dev_name, ctx->ctx_id);
rc = -ENOMEM;
return -ENOMEM;
}
num_entries = ctx->max_in_map_entries * ctx->req_size;
ctx->in_map_entries = kcalloc(num_entries,
sizeof(struct cam_hw_fence_map_entry),
GFP_KERNEL);
if (!ctx->in_map_entries) {
CAM_ERR(CAM_CTXT, "%s[%d] no memory", ctx->dev_name, ctx->ctx_id);
rc = -ENOMEM;
goto free_mem;
}
num_entries = ctx->max_out_map_entries * ctx->req_size;
ctx->out_map_entries = kcalloc(num_entries,
sizeof(struct cam_hw_fence_map_entry),
GFP_KERNEL);
if (!ctx->out_map_entries) {
CAM_ERR(CAM_CTXT, "%s[%d] no memory", ctx->dev_name, ctx->ctx_id);
goto free_mem;
}
list_for_each_entry_safe(req, temp_req,
&ctx->free_req_list, list) {
req->hw_update_entries =
&ctx->hw_update_entry[req->index * ctx->max_hw_update_entries];
req->in_map_entries =
&ctx->in_map_entries[req->index * ctx->max_in_map_entries];
req->out_map_entries =
&ctx->out_map_entries[req->index * ctx->max_out_map_entries];
}
return rc;
free_mem:
cam_context_free_mem_hw_entries(ctx);
return rc;
}
int cam_context_buf_done_from_hw(struct cam_context *ctx,
void *done_event_data, uint32_t evt_id)
{
@@ -252,6 +328,7 @@ int32_t cam_context_release_dev_to_hw(struct cam_context *ctx,
arg.active_req = false;
ctx->hw_mgr_intf->hw_release(ctx->hw_mgr_intf->hw_mgr_priv, &arg);
cam_context_free_mem_hw_entries(ctx);
ctx->ctxt_to_hw_map = NULL;
ctx->session_hdl = -1;
@@ -359,9 +436,15 @@ int32_t cam_context_prepare_dev_to_hw(struct cam_context *ctx,
return -ENOMEM;
}
memset(req, 0, sizeof(*req));
INIT_LIST_HEAD(&req->list);
req->ctx = ctx;
req->num_hw_update_entries = 0;
req->num_in_map_entries = 0;
req->num_out_map_entries = 0;
req->num_out_acked = 0;
req->flushed = 0;
atomic_set(&req->num_in_acked, 0);
memset(&req->pf_data, 0, sizeof(struct cam_hw_mgr_dump_pf_data));
/* for config dev, only memory handle is supported */
/* map packet from the memhandle */
@@ -408,12 +491,12 @@ int32_t cam_context_prepare_dev_to_hw(struct cam_context *ctx,
cfg.packet = packet;
cfg.remain_len = remain_len;
cfg.ctxt_to_hw_map = ctx->ctxt_to_hw_map;
cfg.max_hw_update_entries = CAM_CTX_CFG_MAX;
cfg.max_hw_update_entries = ctx->max_hw_update_entries;
cfg.num_hw_update_entries = req->num_hw_update_entries;
cfg.hw_update_entries = req->hw_update_entries;
cfg.max_out_map_entries = CAM_CTX_CFG_MAX;
cfg.max_out_map_entries = ctx->max_out_map_entries;
cfg.out_map_entries = req->out_map_entries;
cfg.max_in_map_entries = CAM_CTX_CFG_MAX;
cfg.max_in_map_entries = ctx->max_in_map_entries;
cfg.in_map_entries = req->in_map_entries;
cfg.pf_data = &(req->pf_data);
@@ -555,6 +638,15 @@ int32_t cam_context_acquire_dev_to_hw(struct cam_context *ctx,
param.num_acq = cmd->num_resources;
param.acquire_info = cmd->resource_hdl;
/* Allocate memory for hw and map entries */
rc = cam_context_allocate_mem_hw_entries(ctx);
if (rc != 0) {
CAM_ERR(CAM_CTXT, "[%s][%d] Alloc entries failed",
ctx->dev_name, ctx->ctx_id);
goto end;
}
/* call HW manager to reserve the resource */
rc = ctx->hw_mgr_intf->hw_acquire(ctx->hw_mgr_intf->hw_mgr_priv,
&param);

View File

@@ -1729,6 +1729,9 @@ int cam_custom_dev_context_init(struct cam_custom_context *ctx,
return rc;
}
ctx_base->max_hw_update_entries = CAM_CTX_CFG_MAX;
ctx_base->max_in_map_entries = CAM_CTX_CFG_MAX;
ctx_base->max_out_map_entries = CAM_CTX_CFG_MAX;
/* link camera context with custom HW context */
ctx_base->state_machine = cam_custom_dev_ctx_top_state_machine;
ctx_base->ctx_priv = ctx;

View File

@@ -242,6 +242,9 @@ int cam_fd_context_init(struct cam_fd_context *fd_ctx,
fd_ctx->base = base_ctx;
base_ctx->ctx_priv = fd_ctx;
base_ctx->state_machine = cam_fd_ctx_state_machine;
base_ctx->max_hw_update_entries = CAM_CTX_CFG_MAX;
base_ctx->max_in_map_entries = CAM_CTX_CFG_MAX;
base_ctx->max_out_map_entries = CAM_CTX_CFG_MAX;
return rc;
}

View File

@@ -294,6 +294,9 @@ int cam_icp_context_init(struct cam_icp_context *ctx,
ctx->base->state_machine = cam_icp_ctx_state_machine;
ctx->base->ctx_priv = ctx;
ctx->base->max_hw_update_entries = CAM_CTX_CFG_MAX;
ctx->base->max_in_map_entries = CAM_CTX_CFG_MAX;
ctx->base->max_out_map_entries = CAM_CTX_CFG_MAX;
ctx->ctxt_to_hw_map = NULL;
err:

View File

@@ -4274,6 +4274,16 @@ static struct cam_ctx_ops
},
};
static void __cam_isp_ctx_free_mem_hw_entries(struct cam_context *ctx)
{
kfree(ctx->out_map_entries);
kfree(ctx->in_map_entries);
kfree(ctx->hw_update_entry);
ctx->out_map_entries = NULL;
ctx->in_map_entries = NULL;
ctx->hw_update_entry = NULL;
}
static int __cam_isp_ctx_release_hw_in_top_state(struct cam_context *ctx,
void *cmd)
{
@@ -4325,6 +4335,7 @@ static int __cam_isp_ctx_release_hw_in_top_state(struct cam_context *ctx,
spin_lock_bh(&ctx->lock);
rc = __cam_isp_ctx_flush_req(ctx, &ctx->pending_req_list, &flush_req);
spin_unlock_bh(&ctx->lock);
__cam_isp_ctx_free_mem_hw_entries(ctx);
ctx->state = CAM_CTX_ACQUIRED;
trace_cam_context_state("ISP", ctx);
@@ -4390,6 +4401,8 @@ static int __cam_isp_ctx_release_dev_in_top_state(struct cam_context *ctx,
spin_lock_bh(&ctx->lock);
rc = __cam_isp_ctx_flush_req(ctx, &ctx->pending_req_list, &flush_req);
spin_unlock_bh(&ctx->lock);
__cam_isp_ctx_free_mem_hw_entries(ctx);
ctx->state = CAM_CTX_AVAILABLE;
trace_cam_context_state("ISP", ctx);
@@ -4491,10 +4504,10 @@ static int __cam_isp_ctx_config_dev_in_top_state(
cfg.packet = packet;
cfg.remain_len = remain_len;
cfg.ctxt_to_hw_map = ctx_isp->hw_ctx;
cfg.max_hw_update_entries = CAM_ISP_CTX_CFG_MAX;
cfg.max_hw_update_entries = ctx->max_hw_update_entries;
cfg.hw_update_entries = req_isp->cfg;
cfg.max_out_map_entries = CAM_ISP_CTX_RES_MAX;
cfg.max_in_map_entries = CAM_ISP_CTX_RES_MAX;
cfg.max_out_map_entries = ctx->max_out_map_entries;
cfg.max_in_map_entries = ctx->max_in_map_entries;
cfg.out_map_entries = req_isp->fence_map_out;
cfg.in_map_entries = req_isp->fence_map_in;
cfg.priv = &req_isp->hw_update_data;
@@ -4606,6 +4619,65 @@ free_req:
return rc;
}
static int __cam_isp_ctx_allocate_mem_hw_entries(struct cam_context *ctx)
{
int rc = 0;
struct cam_ctx_request *req;
struct cam_ctx_request *temp_req;
struct cam_isp_ctx_req *req_isp;
size_t num_entries = 0;
num_entries = ctx->max_hw_update_entries * CAM_ISP_CTX_REQ_MAX;
ctx->hw_update_entry = kcalloc(num_entries,
sizeof(struct cam_hw_update_entry),
GFP_KERNEL);
if (!ctx->hw_update_entry) {
CAM_ERR(CAM_CTXT, "%s[%d] no memory", ctx->dev_name, ctx->ctx_id);
return -ENOMEM;
}
num_entries = ctx->max_in_map_entries * CAM_ISP_CTX_REQ_MAX;
ctx->in_map_entries = kcalloc(num_entries,
sizeof(struct cam_hw_fence_map_entry),
GFP_KERNEL);
if (!ctx->in_map_entries) {
CAM_ERR(CAM_CTXT, "%s[%d] no memory", ctx->dev_name, ctx->ctx_id);
rc = -ENOMEM;
goto end;
}
num_entries = ctx->max_out_map_entries * CAM_ISP_CTX_REQ_MAX;
ctx->out_map_entries = kcalloc(num_entries,
sizeof(struct cam_hw_fence_map_entry),
GFP_KERNEL);
if (!ctx->out_map_entries) {
CAM_ERR(CAM_CTXT, "%s[%d] no memory", ctx->dev_name, ctx->ctx_id);
rc = -ENOMEM;
goto end;
}
list_for_each_entry_safe(req, temp_req,
&ctx->free_req_list, list) {
req_isp = (struct cam_isp_ctx_req *) req->req_priv;
req_isp->cfg =
&ctx->hw_update_entry[req->index * ctx->max_hw_update_entries];
req_isp->fence_map_in =
&ctx->in_map_entries[req->index * ctx->max_in_map_entries];
req_isp->fence_map_out =
&ctx->out_map_entries[req->index * ctx->max_out_map_entries];
}
return rc;
end:
__cam_isp_ctx_free_mem_hw_entries(ctx);
return rc;
}
static int __cam_isp_ctx_acquire_dev_in_available(struct cam_context *ctx,
struct cam_acquire_dev_cmd *cmd)
{
@@ -4671,6 +4743,13 @@ static int __cam_isp_ctx_acquire_dev_in_available(struct cam_context *ctx,
param.num_acq = cmd->num_resources;
param.acquire_info = (uintptr_t) isp_res;
rc = __cam_isp_ctx_allocate_mem_hw_entries(ctx);
if (rc) {
CAM_ERR(CAM_ISP, "Ctx[%d] allocate hw entry fail",
ctx->ctx_id);
goto free_res;
}
/* call HW manager to reserve the resource */
rc = ctx->hw_mgr_intf->hw_acquire(ctx->hw_mgr_intf->hw_mgr_priv,
&param);
@@ -4833,6 +4912,13 @@ static int __cam_isp_ctx_acquire_hw_v1(struct cam_context *ctx,
param.acquire_info_size = cmd->data_size;
param.acquire_info = (uint64_t) acquire_hw_info;
rc = __cam_isp_ctx_allocate_mem_hw_entries(ctx);
if (rc) {
CAM_ERR(CAM_ISP, "Ctx[%d] allocate hw entry fail",
ctx->ctx_id);
goto free_res;
}
/* call HW manager to reserve the resource */
rc = ctx->hw_mgr_intf->hw_acquire(ctx->hw_mgr_intf->hw_mgr_priv,
&param);
@@ -4976,6 +5062,13 @@ static int __cam_isp_ctx_acquire_hw_v2(struct cam_context *ctx,
param.acquire_info_size = cmd->data_size;
param.acquire_info = (uint64_t) acquire_hw_info;
rc = __cam_isp_ctx_allocate_mem_hw_entries(ctx);
if (rc) {
CAM_ERR(CAM_ISP, "Ctx[%d] allocate hw entry fail",
ctx->ctx_id);
goto free_res;
}
/* call HW manager to reserve the resource */
rc = ctx->hw_mgr_intf->hw_acquire(ctx->hw_mgr_intf->hw_mgr_priv,
&param);
@@ -6014,6 +6107,10 @@ int cam_isp_context_init(struct cam_isp_context *ctx,
ctx_base->state_machine = cam_isp_ctx_top_state_machine;
ctx_base->ctx_priv = ctx;
ctx_base->max_hw_update_entries = CAM_ISP_CTX_CFG_MAX;
ctx_base->max_in_map_entries = CAM_ISP_CTX_RES_MAX;
ctx_base->max_out_map_entries = CAM_ISP_CTX_RES_MAX;
/* initializing current state for error logging */
for (i = 0; i < CAM_ISP_CTX_STATE_MONITOR_MAX_ENTRIES; i++) {
ctx->cam_isp_ctx_state_monitor[i].curr_state =

View File

@@ -166,12 +166,11 @@ struct cam_isp_ctx_irq_ops {
*/
struct cam_isp_ctx_req {
struct cam_ctx_request *base;
struct cam_hw_update_entry cfg[CAM_ISP_CTX_CFG_MAX];
struct cam_hw_update_entry *cfg;
uint32_t num_cfg;
struct cam_hw_fence_map_entry fence_map_out
[CAM_ISP_CTX_RES_MAX];
struct cam_hw_fence_map_entry *fence_map_out;
uint32_t num_fence_map_out;
struct cam_hw_fence_map_entry fence_map_in[CAM_ISP_CTX_RES_MAX];
struct cam_hw_fence_map_entry *fence_map_in;
uint32_t num_fence_map_in;
uint32_t num_acked;
uint32_t num_deferred_acks;

View File

@@ -198,6 +198,9 @@ int cam_jpeg_context_init(struct cam_jpeg_context *ctx,
ctx_base->state_machine = cam_jpeg_ctx_state_machine;
ctx_base->ctx_priv = ctx;
ctx_base->max_hw_update_entries = CAM_CTX_CFG_MAX;
ctx_base->max_in_map_entries = CAM_CTX_CFG_MAX;
ctx_base->max_out_map_entries = CAM_CTX_CFG_MAX;
err:
return rc;
}

View File

@@ -248,6 +248,10 @@ int cam_lrme_context_init(struct cam_lrme_context *lrme_ctx,
base_ctx->ctx_priv = lrme_ctx;
base_ctx->state_machine = cam_lrme_ctx_state_machine;
base_ctx->max_hw_update_entries = CAM_CTX_CFG_MAX;
base_ctx->max_in_map_entries = CAM_CTX_CFG_MAX;
base_ctx->max_out_map_entries = CAM_CTX_CFG_MAX;
return rc;
}

View File

@@ -268,6 +268,9 @@ int cam_ope_context_init(struct cam_ope_context *ctx,
ctx->base->ctx_priv = ctx;
ctx->ctxt_to_hw_map = NULL;
ctx->base->max_hw_update_entries = CAM_CTX_CFG_MAX;
ctx->base->max_in_map_entries = CAM_CTX_CFG_MAX;
ctx->base->max_out_map_entries = CAM_CTX_CFG_MAX;
err:
return rc;
}