msm: camera: sensor: Add dynamic Switch support in TPG

- Add support for Dynamic mode switch.
- Request store and apply from waiting queue.
- Add support for reapply request from active queue.
- Handling TPG interrupt.

CRs-Fixed: 3403974
Change-Id: I5dfe146b30631a94059f72d482610e04ba8e4e2c
Signed-off-by: Rishab Garg <quic_rishabg@quicinc.com>
This commit is contained in:
Rishab Garg
2023-02-15 14:51:12 +05:30
committed by Camera Software Integration
parent 85fbc65dfa
commit 3be797a964
10 changed files with 933 additions and 306 deletions

View File

@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "cam_tpg_core.h"
@@ -40,7 +40,7 @@ int cam_tpg_publish_dev_info(
info->dev_id = CAM_REQ_MGR_DEVICE_TPG;
strlcpy(info->name, CAM_TPG_NAME, sizeof(info->name));
/* Hard code for now */
info->p_delay = 2;
info->p_delay = 1;
info->trigger = CAM_TRIGGER_POINT_SOF;
return rc;
@@ -78,18 +78,49 @@ int cam_tpg_setup_link(
static int cam_tpg_notify_frame_skip(
struct cam_req_mgr_apply_request *apply)
{
CAM_DBG(CAM_TPG, "Got Skip frame from crm");
struct cam_tpg_device *tpg_dev = NULL;
if (apply == NULL) {
CAM_ERR(CAM_TPG, "invalid parameters");
return -EINVAL;
}
tpg_dev = (struct cam_tpg_device *)cam_get_device_priv(apply->dev_hdl);
if (tpg_dev == NULL) {
CAM_ERR(CAM_TPG, "Device data is NULL");
return -EINVAL;
}
CAM_DBG(CAM_TPG, "TPG[%d] Got Skip frame from crm for request %lld",
tpg_dev->soc_info.index,
apply->request_id);
return 0;
}
static int cam_tpg_apply_req(
struct cam_req_mgr_apply_request *apply)
{
struct cam_tpg_device *tpg_dev = NULL;
if (!apply) {
CAM_ERR(CAM_TPG, "invalid parameters");
return -EINVAL;
}
CAM_DBG(CAM_TPG, "Got Apply request from crm %lld", apply->request_id);
tpg_dev = (struct cam_tpg_device *)
cam_get_device_priv(apply->dev_hdl);
if (tpg_dev == NULL) {
CAM_ERR(CAM_TPG, "Device data is NULL");
return -EINVAL;
}
CAM_DBG(CAM_TPG, "TPG[%d] Got Apply request from crm %lld",
tpg_dev->soc_info.index,
apply->request_id);
mutex_lock(&tpg_dev->mutex);
tpg_hw_apply(&tpg_dev->tpg_hw,
apply->request_id);
mutex_unlock(&tpg_dev->mutex);
return 0;
}
@@ -335,8 +366,6 @@ static int __cam_tpg_handle_stop_dev(
if (rc) {
CAM_ERR(CAM_TPG, "TPG[%d] STOP_DEV failed", tpg_dev->soc_info.index);
} else {
/* Free all allocated streams during stop dev */
tpg_hw_free_streams(&tpg_dev->tpg_hw);
tpg_dev->state = CAM_TPG_STATE_ACQUIRE;
CAM_INFO(CAM_TPG, "TPG[%d] STOP_DEV done.", tpg_dev->soc_info.index);
}
@@ -460,10 +489,27 @@ static int cam_tpg_cmd_buf_parse(
{
int rc = 0, i = 0;
struct cam_cmd_buf_desc *cmd_desc = NULL;
struct tpg_hw_request *req = NULL;
if (!tpg_dev || !packet)
return -EINVAL;
/* Allocate space for TPG requests */
req = tpg_hw_create_request(&tpg_dev->tpg_hw,
packet->header.request_id);
if (req == NULL) {
CAM_ERR(CAM_TPG, "TPG[%d] Unable to create hw request ",
tpg_dev->soc_info.index);
return -EINVAL;
}
rc = tpg_hw_request_set_opcode(req,
packet->header.op_code & 0xFF);
if (rc < 0)
goto free_request;
CAM_DBG(CAM_TPG, "Queue TPG requests for request id %lld", req->request_id);
for (i = 0; i < packet->num_cmd_buf; i++) {
uint32_t cmd_type = TPG_CMD_TYPE_INVALID;
uintptr_t cmd_addr;
@@ -483,20 +529,27 @@ static int cam_tpg_cmd_buf_parse(
switch (cmd_type) {
case TPG_CMD_TYPE_GLOBAL_CONFIG:
rc = tpg_hw_copy_global_config(&tpg_dev->tpg_hw,
(struct tpg_global_config_t *)cmd_addr);
/* Copy the global config to request */
memcpy(&req->global_config,
(struct tpg_global_config_t *)cmd_addr,
sizeof(struct tpg_global_config_t));
tpg_dev->tpg_hw.tpg_clock = req->global_config.tpg_clock;
break;
case TPG_CMD_TYPE_STREAM_CONFIG: {
if (cmd_header->cmd_version == 3) {
rc = tpg_hw_add_stream_v3(&tpg_dev->tpg_hw,
req,
(struct tpg_stream_config_v3_t *)cmd_addr);
CAM_DBG(CAM_TPG, "Stream config v3");
} else if (cmd_header->cmd_version == 1 ||
cmd_header->cmd_version == 2) {
rc = tpg_hw_add_stream(&tpg_dev->tpg_hw,
req,
(struct tpg_stream_config_t *)cmd_addr);
CAM_DBG(CAM_TPG, "Stream config");
}
if (rc < 0)
goto free_request;
break;
}
case TPG_CMD_TYPE_SETTINGS_CONFIG: {
@@ -515,11 +568,17 @@ static int cam_tpg_cmd_buf_parse(
tpg_dev->soc_info.index,
cmd_type);
rc = -EINVAL;
goto free_request;
break;
}
}
tpg_hw_add_request(&tpg_dev->tpg_hw, req);
end:
return rc;
free_request:
/* free the request and return the failure */
tpg_hw_free_request(&tpg_dev->tpg_hw, req);
return rc;
}
static int cam_tpg_packet_parse(
@@ -581,10 +640,17 @@ static int cam_tpg_packet_parse(
tpg_hw_config(&tpg_dev->tpg_hw, TPG_HW_CMD_INIT_CONFIG, NULL);
break;
}
case CAM_TPG_PACKET_OPCODE_NOP: {
case CAM_TPG_PACKET_OPCODE_NOP:
case CAM_TPG_PACKET_OPCODE_UPDATE:
{
struct cam_req_mgr_add_request add_req = {0};
rc = cam_tpg_cmd_buf_parse(tpg_dev, csl_packet);
if (rc < 0) {
CAM_ERR(CAM_TPG, "CMD buffer parse failed");
goto end;
}
CAM_DBG(CAM_TPG, "TPG[%d] NOP packet request id : %llu",
CAM_DBG(CAM_TPG, "TPG[%d] packet request id : %llu",
tpg_dev->soc_info.index,
csl_packet->header.request_id);
if ((tpg_dev->crm_intf.link_hdl != -1) &&
@@ -595,7 +661,7 @@ static int cam_tpg_packet_parse(
add_req.req_id = csl_packet->header.request_id;
tpg_dev->crm_intf.crm_cb->add_req(&add_req);
} else {
CAM_ERR(CAM_TPG, "TPG[%d] invalid link req: %llu",
CAM_ERR(CAM_TPG, "TPG[%d] add request %d failed",
tpg_dev->soc_info.index,
csl_packet->header.request_id);
}

View File

@@ -132,12 +132,23 @@ static const struct v4l2_subdev_internal_ops tpg_subdev_intern_ops = {
.close = cam_tpg_subdev_close,
};
irqreturn_t cam_tpg_irq_handler(int irq_num, void *data)
static irqreturn_t cam_tpg_irq_handler(int irq_num, void *data)
{
CAM_DBG(CAM_TPG, "tpg irq handler");
return IRQ_HANDLED;
struct cam_tpg_device *tpg_dev =
(struct cam_tpg_device *)data;
struct tpg_hw *hw = &tpg_dev->tpg_hw;
irqreturn_t rc = IRQ_NONE;
/* handle the registered irq handler */
if (hw &&
hw->hw_info &&
hw->hw_info->ops &&
hw->hw_info->ops->handle_irq) {
rc = hw->hw_info->ops->handle_irq(hw);
}
return rc;
}
static int tpg_subdev_init(struct cam_tpg_device *tpg_dev,
struct device *dev)
@@ -237,7 +248,6 @@ static int tpg_register_cpas_client(struct cam_tpg_device *tpg_dev,
static int cam_tpg_hw_layer_init(struct cam_tpg_device *tpg_dev,
struct device *dev)
{
int i = 0;
/* get top tpg hw information */
const struct of_device_id *match_dev = NULL;
struct platform_device *pdev = to_platform_device(dev);
@@ -253,26 +263,18 @@ static int cam_tpg_hw_layer_init(struct cam_tpg_device *tpg_dev,
tpg_dev->tpg_hw.hw_info = (struct tpg_hw_info *)match_dev->data;
tpg_dev->tpg_hw.soc_info = &tpg_dev->soc_info;
tpg_dev->tpg_hw.cpas_handle = tpg_dev->cpas_handle;
spin_lock_init(&tpg_dev->tpg_hw.hw_state_lock);
tpg_dev->tpg_hw.state = TPG_HW_STATE_HW_DISABLED;
mutex_init(&tpg_dev->tpg_hw.mutex);
tpg_dev->tpg_hw.vc_slots = devm_kzalloc(&pdev->dev,
sizeof(struct tpg_vc_slot_info) * tpg_dev->tpg_hw.hw_info->max_vc_channels,
GFP_KERNEL);
if (!tpg_dev->tpg_hw.vc_slots) {
CAM_ERR(CAM_TPG, "TPG VC slot allocation failed");
mutex_destroy(&tpg_dev->tpg_hw.mutex);
return -ENOMEM;
}
for(i = 0; i < tpg_dev->tpg_hw.hw_info->max_vc_channels; i++) {
tpg_dev->tpg_hw.vc_slots[i].slot_id = i;
tpg_dev->tpg_hw.vc_slots[i].vc = -1;
tpg_dev->tpg_hw.vc_slots[i].stream_count = 0;
INIT_LIST_HEAD(&(tpg_dev->tpg_hw.vc_slots[i].head));
}
init_completion(&tpg_dev->tpg_hw.complete_rup);
/*Initialize the waiting queue and active queues*/
INIT_LIST_HEAD(&(tpg_dev->tpg_hw.waiting_request_q));
INIT_LIST_HEAD(&(tpg_dev->tpg_hw.active_request_q));
tpg_dev->tpg_hw.waiting_request_q_depth = 0;
tpg_dev->tpg_hw.active_request_q_depth = 0;
tpg_dev->tpg_hw.settings_update = 0;
tpg_dev->tpg_hw.tpg_clock = 0;
tpg_dev->tpg_hw.hw_info->layer_init(&tpg_dev->tpg_hw);
return 0;
}

View File

@@ -1,6 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef __CAM_TPG_DEV_H__
@@ -111,6 +112,9 @@ struct tpg_crm_intf_params {
* @state : state machine states
* @slot_id : slot index of this tpg
* @phy_id : phy index mapped to this tpg
* @waiting_request_q : waiting request queue
* @active_request_q : active request queue
* @tpg_irq_state : tpg irq state
*/
struct cam_tpg_device {
char device_name[CAM_CTX_DEV_NAME_MAX_LENGTH];
@@ -124,6 +128,9 @@ struct cam_tpg_device {
int state;
int slot_id;
int phy_id;
atomic_t tpg_irq_state;
struct list_head waiting_request_q;
struct list_head active_request_q;
};

View File

@@ -9,6 +9,11 @@
#define BYTES_PER_REGISTER 4
#define NUM_REGISTER_PER_LINE 4
#define REG_OFFSET(__start, __i) ((__start) + ((__i) * BYTES_PER_REGISTER))
#define CAM_TPG_HW_WAIT_TIMEOUT msecs_to_jiffies(100)
#define MAX_ACTIVE_QUEUE_DEPTH 2
#define MAX_WAITING_QUEUE_DEPTH 32
#define TIMEOUT_MULTIPLIER_PRESIL 5
#define TIMEOUT_MULTIPLIER 1
static int cam_io_tpg_dump(void __iomem *base_addr,
uint32_t start_offset, int size)
@@ -296,11 +301,99 @@ int dump_stream_configs_v3(int hw_idx,
#endif
return 0;
}
static int tpg_hw_release_vc_slots_locked(struct tpg_hw *hw,
struct tpg_hw_request *req)
{
struct list_head *pos = NULL, *pos_next = NULL;
struct tpg_hw_stream *entry;
struct tpg_hw_stream_v3 *entry_v3;
int i = 0;
if (!hw || !hw->hw_info || !req) {
CAM_ERR(CAM_TPG, "Invalid Params");
return -EINVAL;
}
CAM_DBG(CAM_TPG, "TPG[%d] req[%lld] Freeing all the streams",
hw->hw_idx, req->request_id);
for (i = 0; i < hw->hw_info->max_vc_channels; i++) {
req->vc_slots[i].slot_id = i;
req->vc_slots[i].vc = -1;
req->vc_slots[i].stream_count = 0;
if (hw->stream_version == 1) {
list_for_each_safe(pos, pos_next, &req->vc_slots[i].head) {
entry = list_entry(pos, struct tpg_hw_stream, list);
list_del(pos);
kfree(entry);
}
} else if (hw->stream_version == 3) {
list_for_each_safe(pos, pos_next, &req->vc_slots[i].head) {
entry_v3 = list_entry(pos, struct tpg_hw_stream_v3, list);
list_del(pos);
kfree(entry_v3);
}
}
INIT_LIST_HEAD(&(req->vc_slots[i].head));
}
hw->vc_count = 0;
kfree(req->vc_slots);
kfree(req);
return 0;
}
static int tpg_hw_free_waiting_requests_locked(struct tpg_hw *hw)
{
struct list_head *pos = NULL, *pos_next = NULL;
struct tpg_hw_request *req = NULL;
if (!hw) {
CAM_ERR(CAM_TPG, "Invalid Params");
return -EINVAL;
}
/* free up the pending requests*/
CAM_DBG(CAM_TPG, "TPG[%d] freeing all waiting requests",
hw->hw_idx);
list_for_each_safe(pos, pos_next, &hw->waiting_request_q) {
req = list_entry(pos, struct tpg_hw_request, list);
CAM_DBG(CAM_TPG, "TPG[%d] freeing request[%lld] ",
hw->hw_idx, req->request_id);
list_del(pos);
tpg_hw_release_vc_slots_locked(hw, req);
}
return 0;
}
static int tpg_hw_free_active_requests_locked(struct tpg_hw *hw)
{
struct list_head *pos = NULL, *pos_next = NULL;
struct tpg_hw_request *req = NULL;
if (!hw) {
CAM_ERR(CAM_TPG, "Invalid Params");
return -EINVAL;
}
/* free up the active requests*/
CAM_DBG(CAM_TPG, "TPG[%d] freeing all active requests",
hw->hw_idx);
list_for_each_safe(pos, pos_next, &hw->active_request_q) {
req = list_entry(pos, struct tpg_hw_request, list);
CAM_DBG(CAM_TPG, "TPG[%d] freeing request[%lld] ",
hw->hw_idx, req->request_id);
list_del(pos);
tpg_hw_release_vc_slots_locked(hw, req);
}
return 0;
}
static int tpg_hw_soc_disable(struct tpg_hw *hw)
{
int rc = 0;
unsigned long flags;
if (!hw || !hw->soc_info) {
CAM_ERR(CAM_TPG, "Error Invalid params");
@@ -308,7 +401,8 @@ static int tpg_hw_soc_disable(struct tpg_hw *hw)
}
rc = cam_soc_util_disable_platform_resource(hw->soc_info, CAM_CLK_SW_CLIENT_IDX, true,
false);
true);
if (rc) {
CAM_ERR(CAM_TPG, "TPG[%d] Disable platform failed %d",
hw->hw_idx, rc);
@@ -319,7 +413,9 @@ static int tpg_hw_soc_disable(struct tpg_hw *hw)
hw->hw_idx);
return rc;
} else {
spin_lock_irqsave(&hw->hw_state_lock, flags);
hw->state = TPG_HW_STATE_HW_DISABLED;
spin_unlock_irqrestore(&hw->hw_state_lock, flags);
}
return rc;
@@ -327,11 +423,13 @@ static int tpg_hw_soc_disable(struct tpg_hw *hw)
static int tpg_hw_soc_enable(
struct tpg_hw *hw,
enum cam_vote_level clk_level)
enum cam_vote_level clk_level,
uint32_t enable_irq)
{
int rc = 0;
struct cam_ahb_vote ahb_vote;
struct cam_axi_vote axi_vote = {0};
unsigned long flags;
ahb_vote.type = CAM_VOTE_ABSOLUTE;
ahb_vote.vote.level = CAM_SVS_VOTE;
@@ -358,13 +456,18 @@ static int tpg_hw_soc_enable(
}
rc = cam_soc_util_enable_platform_resource(hw->soc_info, CAM_CLK_SW_CLIENT_IDX, true,
clk_level, false);
clk_level, enable_irq);
if (rc) {
CAM_ERR(CAM_TPG, "TPG[%d] enable platform failed",
hw->hw_idx);
goto stop_cpas;
}
hw->state = TPG_HW_STATE_HW_ENABLED;
spin_lock_irqsave(&hw->hw_state_lock, flags);
/* might need to wait if in busy state */
hw->state = TPG_HW_STATE_READY;
spin_unlock_irqrestore(&hw->hw_state_lock, flags);
return rc;
stop_cpas:
@@ -373,7 +476,8 @@ end:
return rc;
}
static int tpg_hw_start_default_new(struct tpg_hw *hw)
static int tpg_hw_apply_settings_to_hw_locked(struct tpg_hw *hw,
struct tpg_hw_request *req)
{
int i = 0;
uint32_t stream_idx = 0;
@@ -382,31 +486,38 @@ static int tpg_hw_start_default_new(struct tpg_hw *hw)
if (!hw ||
!hw->hw_info ||
!hw->hw_info->ops ||
!hw->hw_info->ops->process_cmd) {
!hw->hw_info->ops->process_cmd ||
!hw->soc_info ||
!req) {
CAM_ERR(CAM_TPG, "Invalid argument");
return -EINVAL;
}
/* If nop then skip applying and return success*/
if (req->request_type == TPG_HW_REQ_TYPE_NOP)
goto end;
dump_global_configs(hw->hw_idx, &hw->global_config);
dump_global_configs(hw->hw_idx, &req->global_config);
if (hw->stream_version == 1) {
for (i = 0; i < hw->hw_info->max_vc_channels; i++) {
int dt_slot = 0;
struct vc_config_args vc_config = {0};
struct list_head *pos = NULL, *pos_next = NULL;
struct tpg_hw_stream *entry = NULL, *vc_stream_entry = NULL;
if (hw->vc_slots[i].vc == -1)
if (req->vc_slots[i].vc == -1)
break;
num_vcs++;
vc_config.vc_slot = i;
vc_config.num_dts = hw->vc_slots[i].stream_count;
vc_stream_entry = list_first_entry(&hw->vc_slots[i].head,
vc_config.num_dts = req->vc_slots[i].stream_count;
vc_stream_entry = list_first_entry(&req->vc_slots[i].head,
struct tpg_hw_stream, list);
vc_config.stream = &vc_stream_entry->stream;
hw->hw_info->ops->process_cmd(hw,
TPG_CONFIG_VC, &vc_config);
list_for_each_safe(pos, pos_next, &hw->vc_slots[i].head) {
list_for_each_safe(pos, pos_next, &req->vc_slots[i].head) {
struct dt_config_args dt_config = {0};
entry = list_entry(pos, struct tpg_hw_stream, list);
dump_stream_configs(hw->hw_idx,
stream_idx++,
@@ -417,47 +528,25 @@ static int tpg_hw_start_default_new(struct tpg_hw *hw)
hw->hw_info->ops->process_cmd(hw, TPG_CONFIG_DT, &dt_config);
}
}
globalargs.num_vcs = num_vcs;
globalargs.globalconfig = &hw->global_config;
hw->hw_info->ops->process_cmd(hw,
TPG_CONFIG_CTRL, &globalargs);
return 0;
}
static int tpg_hw_start_default_new_v3(struct tpg_hw *hw)
{
int i = 0;
uint32_t stream_idx = 0;
int num_vcs = 0;
struct global_config_args globalargs = {0};
if (!hw || !hw->hw_info ||
!hw->hw_info->ops || !hw->hw_info->ops->process_cmd) {
CAM_ERR(CAM_TPG, "Invalid argument");
return -EINVAL;
}
dump_global_configs(hw->hw_idx, &hw->global_config);
} else if (hw->stream_version == 3) {
for (i = 0; i < hw->hw_info->max_vc_channels; i++) {
int dt_slot = 0;
struct vc_config_args_v3 vc_config = {0};
struct list_head *pos = NULL, *pos_next = NULL;
struct tpg_hw_stream_v3 *entry = NULL, *vc_stream_entry = NULL;
if (hw->vc_slots[i].vc == -1)
if (req->vc_slots[i].vc == -1)
break;
num_vcs++;
vc_config.vc_slot = i;
vc_config.num_dts = hw->vc_slots[i].stream_count;
vc_stream_entry = list_first_entry(&hw->vc_slots[i].head,
vc_config.num_dts = req->vc_slots[i].stream_count;
vc_stream_entry = list_first_entry(&req->vc_slots[i].head,
struct tpg_hw_stream_v3, list);
vc_config.stream = &vc_stream_entry->stream;
hw->hw_info->ops->process_cmd(hw,
TPG_CONFIG_VC, &vc_config);
list_for_each_safe(pos, pos_next, &hw->vc_slots[i].head) {
list_for_each_safe(pos, pos_next, &req->vc_slots[i].head) {
struct dt_config_args_v3 dt_config = {0};
entry = list_entry(pos, struct tpg_hw_stream_v3, list);
@@ -470,16 +559,17 @@ static int tpg_hw_start_default_new_v3(struct tpg_hw *hw)
hw->hw_info->ops->process_cmd(hw, TPG_CONFIG_DT, &dt_config);
}
}
}
globalargs.num_vcs = num_vcs;
globalargs.globalconfig = &hw->global_config;
globalargs.globalconfig = &req->global_config;
hw->hw_info->ops->process_cmd(hw,
TPG_CONFIG_CTRL, &globalargs);
if (!cam_presil_mode_enabled())
cam_tpg_mem_dmp(hw->soc_info);
end:
return 0;
}
int tpg_hw_dump_status(struct tpg_hw *hw)
{
if (!hw || !hw->hw_info || !hw->hw_info->ops)
@@ -498,6 +588,182 @@ int tpg_hw_dump_status(struct tpg_hw *hw)
return 0;
}
static int tpg_hw_check_hw_state_and_apply_settings_locked(
struct tpg_hw *hw,
struct tpg_hw_request *req)
{
int rc = 0;
unsigned long wait_jiffies = 0;
unsigned long rem_jiffies = 0;
unsigned long flags;
int32_t timeout_multiplier = 0;
if (!hw || !req) {
CAM_ERR(CAM_TPG, "Invalid param");
rc = -EINVAL;
goto end;
}
if (req->request_type != TPG_HW_REQ_TYPE_NOP) {
if (cam_presil_mode_enabled())
timeout_multiplier = TIMEOUT_MULTIPLIER_PRESIL;
else
timeout_multiplier = TIMEOUT_MULTIPLIER;
/* if hw state is not ready, wait for timeout.
* if hw state becomes ready handle in follow up if.
*/
if (hw->state == TPG_HW_STATE_BUSY) {
wait_jiffies = CAM_TPG_HW_WAIT_TIMEOUT * timeout_multiplier;
reinit_completion(&hw->complete_rup);
rem_jiffies =
cam_common_wait_for_completion_timeout(&hw->complete_rup,
wait_jiffies);
if (!rem_jiffies) {
CAM_ERR(CAM_TPG, "TPG[%d] hw timeout %llu",
hw->hw_idx, rem_jiffies);
rc = -EBUSY;
goto end;
}
}
if (hw->state == TPG_HW_STATE_READY) {
hw->settings_update = 1;
spin_lock_irqsave(&hw->hw_state_lock, flags);
hw->state = TPG_HW_STATE_BUSY;
spin_unlock_irqrestore(&hw->hw_state_lock, flags);
CAM_DBG(CAM_TPG, "HW State ready to busy");
}
}
rc = tpg_hw_apply_settings_to_hw_locked(hw, req);
end:
return rc;
}
static int tpg_hw_reapply_from_active_queue_locked(
struct tpg_hw *hw,
uint64_t request_id)
{
int rc = 0;
struct tpg_hw_request *reapply_req = NULL;
struct tpg_hw_request *entry = NULL;
struct list_head *pos = NULL, *pos_next = NULL;
if (!hw) {
CAM_ERR(CAM_TPG, "Invalid argument");
return -EINVAL;
}
list_for_each_safe(pos, pos_next, &hw->active_request_q) {
entry = list_entry(pos, struct tpg_hw_request, list);
if (entry->request_id == request_id) {
reapply_req = entry;
break;
}
}
if (reapply_req) {
CAM_DBG(CAM_TPG, "TPG[%d] got reapply request %d", hw->hw_idx, request_id);
rc = tpg_hw_check_hw_state_and_apply_settings_locked(hw, reapply_req);
} else {
CAM_ERR(CAM_TPG, "Could not find reapply request in active queue %d", request_id);
return -EINVAL;
}
return rc;
}
static int tpg_hw_lookup_queues_and_apply_req_locked(
struct tpg_hw *hw,
uint64_t request_id)
{
int rc = 0;
struct tpg_hw_request *req = NULL, *active_req = NULL;
if (!hw) {
CAM_ERR(CAM_TPG, "Invalid argument");
rc = -EINVAL;
goto end;
}
if (!list_empty(&hw->waiting_request_q)) {
req = list_first_entry(&hw->waiting_request_q,
struct tpg_hw_request, list);
if (req->request_id == request_id) {
CAM_DBG(CAM_TPG, "TPG[%d] request %d got matched", hw->hw_idx, request_id);
rc = tpg_hw_check_hw_state_and_apply_settings_locked(hw, req);
if (rc == 0) {
/* delete the request from waiting_q*/
list_del(&req->list);
hw->waiting_request_q_depth--;
/* delete active request from active q*/
if (!list_empty(&hw->active_request_q) &&
(hw->active_request_q_depth >= MAX_ACTIVE_QUEUE_DEPTH)) {
active_req = list_first_entry(&hw->active_request_q,
struct tpg_hw_request, list);
list_del(&active_req->list);
hw->active_request_q_depth--;
/* free the previously active request */
tpg_hw_release_vc_slots_locked(hw, active_req);
}
/* Add the currently applied request to active queue*/
list_add_tail(&req->list, &hw->active_request_q);
hw->active_request_q_depth++;
}
} else {
rc = tpg_hw_reapply_from_active_queue_locked(hw, request_id);
}
} else {
CAM_DBG(CAM_TPG, "TPG[%d] got apply for request %d when waiting queue is empty",
hw->hw_idx,
request_id);
rc = tpg_hw_reapply_from_active_queue_locked(hw, request_id);
}
if (rc == 0) {
CAM_DBG(CAM_TPG, "TPG[%d] Hw Apply done for req %lld", hw->hw_idx, request_id);
} else {
CAM_ERR(CAM_TPG, "TPG[%d] Hw Apply Failed for req %lld", hw->hw_idx, request_id);
rc = -EAGAIN;
}
end:
return rc;
}
/* apply all pending requests sequencially */
static int tpg_hw_apply_req_from_waiting_queue_locked(
struct tpg_hw *hw)
{
int rc = 0;
struct list_head *pos = NULL, *pos_next = NULL;
struct tpg_hw_request *req = NULL;
struct tpg_hw_request *prev_req = NULL;
if (!hw) {
CAM_ERR(CAM_TPG, "Invalid Params");
return -EINVAL;
}
list_for_each_safe(pos, pos_next, &hw->waiting_request_q) {
req = list_entry(pos, struct tpg_hw_request, list);
tpg_hw_apply_settings_to_hw_locked(hw, req);
list_del(pos);
hw->waiting_request_q_depth--;
/* free previous req if any*/
if (prev_req)
tpg_hw_release_vc_slots_locked(hw, prev_req);
prev_req = req;
}
/* last applied request in active */
if (prev_req != NULL) {
list_add_tail(&prev_req->list, &hw->active_request_q);
hw->active_request_q_depth++;
}
return rc;
}
int tpg_hw_start(struct tpg_hw *hw)
{
int rc = 0;
@@ -523,18 +789,17 @@ int tpg_hw_start(struct tpg_hw *hw)
case TPG_HW_VERSION_1_3:
case TPG_HW_VERSION_1_3_1:
case TPG_HW_VERSION_1_4:
/* apply all initial requests */
if (hw->hw_info->ops->start)
hw->hw_info->ops->start(hw, NULL);
if (settings_count != 0) {
hw->hw_info->ops->write_settings(hw, config,
reg_settings);
} else {
if (hw->stream_version == 1)
tpg_hw_start_default_new(hw);
else if (hw->stream_version == 3)
tpg_hw_start_default_new_v3(hw);
tpg_hw_apply_req_from_waiting_queue_locked(hw);
}
cam_tpg_mem_dmp(hw->soc_info);
break;
default:
CAM_ERR(CAM_TPG, "TPG[%d] Unsupported HW Version",
@@ -574,6 +839,8 @@ int tpg_hw_stop(struct tpg_hw *hw)
hw->hw_idx, rc);
return rc;
}
tpg_hw_free_waiting_requests_locked(hw);
tpg_hw_free_active_requests_locked(hw);
break;
default:
CAM_ERR(CAM_TPG, "TPG[%d] Unsupported HW Version",
@@ -651,7 +918,7 @@ enum cam_vote_level get_tpg_clk_level(
return -EINVAL;
soc_info = hw->soc_info;
clk = hw->global_config.tpg_clock;
clk = hw->tpg_clock;
for (cam_vote_level = 0;
cam_vote_level < CAM_MAX_VOTE; cam_vote_level++) {
@@ -691,21 +958,25 @@ static int tpg_hw_configure_init_settings(
case TPG_HW_VERSION_1_3:
case TPG_HW_VERSION_1_3_1:
case TPG_HW_VERSION_1_4:
/* Need to handle the clock from the init config */
/* Need to handle this latter */
clk_level = get_tpg_clk_level(hw);
rc = tpg_hw_soc_enable(hw, clk_level);
rc = tpg_hw_soc_enable(hw, clk_level, true);
if (rc) {
CAM_ERR(CAM_TPG, "TPG[%d] hw soc enable failed %d",
hw->hw_idx, rc);
return rc;
}
if (hw->hw_info->ops->init) {
if (hw->hw_info->ops->init)
rc = hw->hw_info->ops->init(hw, settings);
if (rc) {
CAM_ERR(CAM_TPG, "TPG[%d] hw soc enable failed %d",
CAM_ERR(CAM_TPG, "TPG[%d] hw init failed %d",
hw->hw_idx, rc);
return rc;
}
}
break;
default:
CAM_ERR(CAM_TPG, "TPG[%d] Unsupported HW Version",
@@ -735,20 +1006,22 @@ static int tpg_hw_configure_init_settings_v3(
case TPG_HW_VERSION_1_3_1:
case TPG_HW_VERSION_1_4:
clk_level = get_tpg_clk_level(hw);
rc = tpg_hw_soc_enable(hw, clk_level);
rc = tpg_hw_soc_enable(hw, clk_level, true);
if (rc) {
CAM_ERR(CAM_TPG, "TPG[%d] hw soc enable failed %d",
hw->hw_idx, rc);
return rc;
}
if (hw->hw_info->ops->init) {
if (hw->hw_info->ops->init)
rc = hw->hw_info->ops->init(hw, settings);
if (rc) {
CAM_ERR(CAM_TPG, "TPG[%d] hw soc enable failed %d",
CAM_ERR(CAM_TPG, "TPG[%d] hw init failed %d",
hw->hw_idx, rc);
return rc;
}
}
break;
default:
CAM_ERR(CAM_TPG, "TPG[%d] Unsupported HW Version",
@@ -795,64 +1068,6 @@ int tpg_hw_config(
return rc;
}
int tpg_hw_free_streams(struct tpg_hw *hw)
{
struct list_head *pos = NULL, *pos_next = NULL;
struct tpg_hw_stream *entry;
struct tpg_hw_stream_v3 *entry_v3;
int i = 0;
if (!hw)
return -EINVAL;
mutex_lock(&hw->mutex);
/* free up the streams */
CAM_DBG(CAM_TPG, "TPG[%d] Freeing all the streams", hw->hw_idx);
/* reset the slots */
for(i = 0; i < hw->hw_info->max_vc_channels; i++) {
hw->vc_slots[i].slot_id = i;
hw->vc_slots[i].vc = -1;
hw->vc_slots[i].stream_count = 0;
if (hw->stream_version == 1) {
list_for_each_safe(pos, pos_next, &hw->vc_slots[i].head) {
entry = list_entry(pos, struct tpg_hw_stream, list);
list_del(pos);
kfree(entry);
}
} else if (hw->stream_version == 3) {
list_for_each_safe(pos, pos_next, &hw->vc_slots[i].head) {
entry_v3 = list_entry(pos, struct tpg_hw_stream_v3, list);
list_del(pos);
kfree(entry_v3);
}
}
INIT_LIST_HEAD(&(hw->vc_slots[i].head));
}
hw->vc_count = 0;
mutex_unlock(&hw->mutex);
return 0;
}
int tpg_hw_copy_global_config(
struct tpg_hw *hw,
struct tpg_global_config_t *global)
{
if (!hw || !global) {
CAM_ERR(CAM_TPG, "invalid parameter");
return -EINVAL;
}
mutex_lock(&hw->mutex);
memcpy(&hw->global_config,
global,
sizeof(struct tpg_global_config_t));
mutex_unlock(&hw->mutex);
return 0;
}
int tpg_hw_copy_settings_config(
struct tpg_hw *hw,
struct tpg_settings_config_t *settings)
@@ -891,24 +1106,25 @@ int tpg_hw_copy_settings_config(
static int assign_vc_slot(
struct tpg_hw *hw,
int vc,
struct tpg_hw_request *req,
struct tpg_hw_stream *stream
)
{
int rc = -EINVAL, i = 0, slot_matched = 0;
if (!hw || !stream) {
if (!hw || !stream || !req) {
return -EINVAL;
}
for (i = 0; i < hw->hw_info->max_vc_channels; i++) {
/* Found a matching slot */
if(hw->vc_slots[i].vc == vc) {
if (req->vc_slots[i].vc == vc) {
slot_matched = 1;
if (hw->vc_slots[i].stream_count
if (req->vc_slots[i].stream_count
< hw->hw_info->max_dt_channels_per_vc) {
list_add_tail(&stream->list, &hw->vc_slots[i].head);
hw->vc_slots[i].stream_count++;
hw->vc_slots[i].vc = vc;
list_add_tail(&stream->list, &req->vc_slots[i].head);
req->vc_slots[i].stream_count++;
req->vc_slots[i].vc = vc;
rc = 0;
CAM_DBG(CAM_TPG, "vc[%d]dt[%d]=>slot[%d]", vc, stream->stream.dt, i);
break;
@@ -930,11 +1146,11 @@ static int assign_vc_slot(
* none of the above slots matched, and now found an empty slot
* so assigning stream to that slot
*/
if (hw->vc_slots[i].vc == -1) {
list_add_tail(&stream->list, &hw->vc_slots[i].head);
hw->vc_slots[i].stream_count++;
hw->vc_slots[i].vc = vc;
hw->vc_count++;
if (req->vc_slots[i].vc == -1) {
list_add_tail(&stream->list, &req->vc_slots[i].head);
req->vc_slots[i].stream_count++;
req->vc_slots[i].vc = vc;
req->vc_count++;
rc = 0;
CAM_DBG(CAM_TPG, "vc[%d]dt[%d]=>slot[%d]", vc, stream->stream.dt, i);
break;
@@ -949,23 +1165,24 @@ static int assign_vc_slot(
static int assign_vc_slot_v3(
struct tpg_hw *hw,
int vc,
struct tpg_hw_request *req,
struct tpg_hw_stream_v3 *stream
)
{
int rc = -EINVAL, i = 0, slot_matched = 0;
if (!hw || !stream)
if (!hw || !stream || !req)
return -EINVAL;
for (i = 0; i < hw->hw_info->max_vc_channels; i++) {
/* Found a matching slot */
if (hw->vc_slots[i].vc == vc) {
if (req->vc_slots[i].vc == vc) {
slot_matched = 1;
if (hw->vc_slots[i].stream_count
if (req->vc_slots[i].stream_count
< hw->hw_info->max_dt_channels_per_vc) {
list_add_tail(&stream->list, &hw->vc_slots[i].head);
hw->vc_slots[i].stream_count++;
hw->vc_slots[i].vc = vc;
list_add_tail(&stream->list, &req->vc_slots[i].head);
req->vc_slots[i].stream_count++;
req->vc_slots[i].vc = vc;
rc = 0;
CAM_DBG(CAM_TPG, "vc[%d]dt[%d]=>slot[%d]",
vc,
@@ -989,11 +1206,11 @@ static int assign_vc_slot_v3(
* none of the above slots matched, and now found an empty slot
* so assigning stream to that slot
*/
if (hw->vc_slots[i].vc == -1) {
list_add_tail(&stream->list, &hw->vc_slots[i].head);
hw->vc_slots[i].stream_count++;
hw->vc_slots[i].vc = vc;
hw->vc_count++;
if (req->vc_slots[i].vc == -1) {
list_add_tail(&stream->list, &req->vc_slots[i].head);
req->vc_slots[i].stream_count++;
req->vc_slots[i].vc = vc;
req->vc_count++;
rc = 0;
CAM_DBG(CAM_TPG, "vc[%d]dt[%d]=>slot[%d]", vc, stream->stream.dt, i);
break;
@@ -1001,36 +1218,59 @@ static int assign_vc_slot_v3(
}
if ((slot_matched == 0) && (rc != 0))
CAM_ERR(CAM_TPG, "No slot matched");
return rc;
}
int tpg_hw_free_request(
struct tpg_hw *hw,
struct tpg_hw_request *req)
{
int rc = 0;
if (!hw || !req)
return -EINVAL;
mutex_lock(&hw->mutex);
rc = tpg_hw_release_vc_slots_locked(hw, req);
mutex_unlock(&hw->mutex);
return rc;
}
int tpg_hw_reset(struct tpg_hw *hw)
{
int rc = 0;
unsigned long flags;
if (!hw)
return -EINVAL;
/* free up the streams if any*/
rc = tpg_hw_free_streams(hw);
if (rc)
CAM_ERR(CAM_TPG, "TPG[%d] Unable to free up the streams", hw->hw_idx);
CAM_DBG(CAM_TPG, "TPG HW reset");
/* disable the hw */
mutex_lock(&hw->mutex);
rc = tpg_hw_free_waiting_requests_locked(hw);
if (rc)
CAM_ERR(CAM_TPG, "TPG[%d] unable to free up the pending requests",
hw->hw_idx);
if (hw->state != TPG_HW_STATE_HW_DISABLED) {
rc = cam_soc_util_disable_platform_resource(hw->soc_info, CAM_CLK_SW_CLIENT_IDX,
true, false);
if (rc) {
CAM_ERR(CAM_TPG, "TPG[%d] Disable platform failed %d", hw->hw_idx, rc);
return rc;
}
rc = cam_cpas_stop(hw->cpas_handle);
if (rc) {
CAM_ERR(CAM_TPG, "TPG[%d] CPAS stop failed", hw->hw_idx);
return rc;
}
spin_lock_irqsave(&hw->hw_state_lock, flags);
hw->state = TPG_HW_STATE_HW_DISABLED;
spin_unlock_irqrestore(&hw->hw_state_lock, flags);
}
rc = tpg_hw_free_active_requests_locked(hw);
if (rc) {
CAM_ERR(CAM_TPG, "TPG[%d] unable to free up the active requests",
hw->hw_idx);
}
mutex_unlock(&hw->mutex);
@@ -1039,11 +1279,12 @@ int tpg_hw_reset(struct tpg_hw *hw)
int tpg_hw_add_stream(
struct tpg_hw *hw,
struct tpg_hw_request *req,
struct tpg_stream_config_t *cmd)
{
int rc = 0;
struct tpg_hw_stream *stream = NULL;
if (!hw || !cmd) {
if (!hw || !req || !cmd) {
CAM_ERR(CAM_TPG, "Invalid params");
return -EINVAL;
}
@@ -1061,18 +1302,134 @@ int tpg_hw_add_stream(
cmd,
sizeof(struct tpg_stream_config_t));
rc = assign_vc_slot(hw, stream->stream.vc, stream);
rc = assign_vc_slot(hw, stream->stream.vc, req, stream);
mutex_unlock(&hw->mutex);
return rc;
}
int tpg_hw_add_request(struct tpg_hw *hw,
struct tpg_hw_request *req)
{
int rc = 0;
if (!hw || !req) {
CAM_ERR(CAM_TPG, "Invalid params");
return -EINVAL;
}
mutex_lock(&hw->mutex);
if (hw->waiting_request_q_depth <= MAX_WAITING_QUEUE_DEPTH) {
list_add_tail(&req->list, &hw->waiting_request_q);
hw->waiting_request_q_depth++;
} else {
CAM_ERR(CAM_TPG, "waiting queue size is exceed");
}
mutex_unlock(&hw->mutex);
return rc;
}
struct tpg_hw_request *tpg_hw_create_request(
struct tpg_hw *hw,
uint64_t request_id)
{
struct tpg_hw_request *req = NULL;
uint32_t num_vc_channels = hw->hw_info->max_vc_channels;
uint32_t i = 0;
if (!hw) {
CAM_ERR(CAM_TPG, "Invalid params");
return NULL;
}
/* Allocate request */
req = kzalloc(sizeof(struct tpg_hw_request),
GFP_KERNEL);
if (!req) {
CAM_ERR(CAM_TPG, "TPG[%d] request allocation failed",
hw->hw_idx);
return NULL;
}
req->request_id = request_id;
/* Allocate Vc slots in request */
req->vc_slots = kcalloc(num_vc_channels, sizeof(struct tpg_vc_slot_info),
GFP_KERNEL);
req->vc_count = 0;
if (!req->vc_slots) {
CAM_ERR(CAM_TPG, "TPG[%d] vc slot allocation failed",
hw->hw_idx);
goto err_exit_1;
}
INIT_LIST_HEAD(&req->list);
/* Initialize the slot variables */
for (i = 0; i < hw->hw_info->max_vc_channels; i++) {
req->vc_slots[i].slot_id = i;
req->vc_slots[i].vc = -1;
req->vc_slots[i].stream_count = 0;
INIT_LIST_HEAD(&(req->vc_slots[i].head));
}
CAM_DBG(CAM_TPG, "TPG[%d] request(%lld) allocated success",
hw->hw_idx, request_id);
return req;
err_exit_1:
kfree(req);
return NULL;
}
int tpg_hw_request_set_opcode(
struct tpg_hw_request *req,
uint32_t opcode)
{
int rc = 0;
if (!req) {
CAM_ERR(CAM_TPG, "Invalid params");
return -EINVAL;
}
switch (opcode) {
case CAM_TPG_PACKET_OPCODE_INITIAL_CONFIG:
req->request_type = TPG_HW_REQ_TYPE_INIT;
break;
case CAM_TPG_PACKET_OPCODE_NOP:
req->request_type = TPG_HW_REQ_TYPE_NOP;
break;
case CAM_TPG_PACKET_OPCODE_UPDATE:
req->request_type = TPG_HW_REQ_TYPE_UPDATE;
break;
default:
req->request_type = TPG_HW_REQ_TYPE_NOP;
break;
}
CAM_INFO(CAM_TPG, "req[%d] type = %d",
req->request_id,
req->request_type);
return rc;
}
int tpg_hw_apply(
struct tpg_hw *hw,
uint64_t request_id)
{
int rc = 0;
if (!hw) {
CAM_ERR(CAM_TPG, "Invalid params");
return -EINVAL;
}
mutex_lock(&hw->mutex);
rc = tpg_hw_lookup_queues_and_apply_req_locked(hw, request_id);
mutex_unlock(&hw->mutex);
return rc;
}
int tpg_hw_add_stream_v3(
struct tpg_hw *hw,
struct tpg_hw_request *req,
struct tpg_stream_config_v3_t *cmd)
{
int rc = 0;
struct tpg_hw_stream_v3 *stream = NULL;
if (!hw || !cmd) {
if (!hw || !req || !cmd) {
CAM_ERR(CAM_TPG, "Invalid params");
return -EINVAL;
}
@@ -1090,7 +1447,7 @@ int tpg_hw_add_stream_v3(
cmd,
sizeof(struct tpg_stream_config_v3_t));
rc = assign_vc_slot_v3(hw, stream->stream.vc, stream);
rc = assign_vc_slot_v3(hw, stream->stream.vc, req, stream);
mutex_unlock(&hw->mutex);
return rc;
}

View File

@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef __TPG_HW_H__
@@ -12,6 +12,9 @@
#include "cam_soc_util.h"
#include <cam_cpas_api.h>
#include <media/cam_sensor.h>
#include <linux/spinlock.h>
#include <linux/completion.h>
#define TPG_HW_VERSION_1_0 0x10000000
#define TPG_HW_VERSION_1_1 0x10000001
#define TPG_HW_VERSION_1_2 0x10000002
@@ -45,6 +48,7 @@ struct tpg_hw_ops {
int (*dump_status)(struct tpg_hw *hw, void *data);
int (*write_settings)(struct tpg_hw *hw,
struct tpg_settings_config_t *config, struct tpg_reg_settings *settings);
irqreturn_t (*handle_irq)(struct tpg_hw *hw);
};
/**
@@ -52,10 +56,40 @@ struct tpg_hw_ops {
*
* TPG_HW_STATE_HW_DISABLED: tpg hw is not enabled yet
* TPG_HW_STATE_HW_ENABLED : tpg hw is enabled
* TPG_HW_STATE_READY : tpg hw is ready
* TPG_HW_STATE_BUSY : tpg hw is busy
* TPG_HW_STATE_MAX : tp hw is max
*/
enum tpg_hw_state {
TPG_HW_STATE_HW_DISABLED,
TPG_HW_STATE_HW_ENABLED,
TPG_HW_STATE_READY,
TPG_HW_STATE_BUSY,
TPG_HW_STATE_MAX
};
#define TPG_HW_REQ_TYPE_NOP 0x00
#define TPG_HW_REQ_TYPE_INIT 0x01
#define TPG_HW_REQ_TYPE_UPDATE 0x02
#define TPG_HW_REQ_TYPE_BYPASS 0x03
/**
* tpg_hw_request : tpg hw request info
*
* @request_id : request id
* @request_type : request type
* @global_config : global configuration
* @vc_slots : vc slots
* @vc_count : vc count
* @list : list
*/
struct tpg_hw_request {
uint64_t request_id;
int request_type;
struct tpg_global_config_t global_config;
struct tpg_vc_slot_info *vc_slots;
uint32_t vc_count;
struct list_head list;
};
/**
@@ -132,11 +166,21 @@ struct tpg_hw_stream_v3 {
* @stream_version : stream version
* @state : tpg hw state
* @cpas_handle : handle to cpas
* @vc_count : vc count
* @tpg_clock : tpg clock
* @hw_info : tp hw info
* @soc_info : soc info
* @mutex : lock
* @stream_list : list of tpg stream
* @global_config : global configuration
* @complete_rup : rup done completion
* @vc_slots : vc slots info
* @settings_config : settings configuration
* @register_settings : register settings info
* @tpg_hw_irq_handler : handle hw irq
* @waiting_request_q : queue of waiting requests to be applied
* @acitive_request_q : queue of active requests being applied,
* @active_request_q_depth : active request queue depth
* @waiting_request_q_depth : waiting request queue depth
* @settings update : settings update flag
*/
struct tpg_hw {
uint32_t hw_idx;
@@ -144,13 +188,22 @@ struct tpg_hw {
uint32_t state;
uint32_t cpas_handle;
uint32_t vc_count;
uint64_t tpg_clock;
struct tpg_hw_info *hw_info;
struct cam_hw_soc_info *soc_info;
struct mutex mutex;
spinlock_t hw_state_lock;
struct completion complete_rup;
struct tpg_vc_slot_info *vc_slots;
struct tpg_global_config_t global_config;
struct tpg_settings_config_t settings_config;
struct tpg_reg_settings *register_settings;
int (*tpg_hw_irq_handler)(struct tpg_hw *hw);
/* Waiting and active request Queues */
struct list_head waiting_request_q;
struct list_head active_request_q;
uint32_t active_request_q_depth;
uint32_t waiting_request_q_depth;
uint32_t settings_update;
};
/**
@@ -291,7 +344,8 @@ int32_t cam_tpg_mem_dmp(struct cam_hw_soc_info *soc_info);
*
* @return : 0 on success
*/
int dump_global_configs(int idx, struct tpg_global_config_t *global);
int dump_global_configs(int idx,
struct tpg_global_config_t *global);
/**
* @brief : dump stream config command
@@ -302,7 +356,9 @@ int dump_global_configs(int idx, struct tpg_global_config_t *global);
*
* @return : 0 on success
*/
int dump_stream_configs(int hw_idx, int stream_idx, struct tpg_stream_config_t *stream);
int dump_stream_configs(int hw_idx,
int stream_idx,
struct tpg_stream_config_t *stream);
/**
* @brief : dump stream config command version 2
@@ -372,15 +428,6 @@ int tpg_hw_release(struct tpg_hw *hw);
*/
int tpg_hw_config(struct tpg_hw *hw, enum tpg_hw_cmd_t cmd, void *arg);
/**
* @brief : tpg free streams
*
* @param hw: tpg hw instance
*
* @return : 0 on success
*/
int tpg_hw_free_streams(struct tpg_hw *hw);
/**
* @brief : reset the tpg hw instance
*
@@ -394,32 +441,87 @@ int tpg_hw_reset(struct tpg_hw *hw);
* @brief : tp hw add stream
*
* @param hw: tpg hw instance
* @param req: req instance
* @param cmd: tpg hw command
*
* @return : 0 on success
*/
int tpg_hw_add_stream(struct tpg_hw *hw, struct tpg_stream_config_t *cmd);
int tpg_hw_add_stream(
struct tpg_hw *hw,
struct tpg_hw_request *req,
struct tpg_stream_config_t *cmd);
/**
* @brief : tp hw add stream version 2
*
* @param hw: tpg hw instance
* @param req: req instance
* @param cmd: tpg hw command version 2
*
* @return : 0 on success
*/
int tpg_hw_add_stream_v3(struct tpg_hw *hw, struct tpg_stream_config_v3_t *cmd);
int tpg_hw_add_stream_v3(
struct tpg_hw *hw,
struct tpg_hw_request *req,
struct tpg_stream_config_v3_t *cmd);
/**
* @brief : copy global config command
* @brief : hw apply
*
* @param hw: tpg hw instance
* @param global: global config command
* @param request_id: request id
*
* @return : 0 on success
*/
int tpg_hw_copy_global_config(struct tpg_hw *hw, struct tpg_global_config_t *global);
int tpg_hw_apply(
struct tpg_hw *hw,
uint64_t request_id);
/**
* @brief : create hw request
*
* @param hw: tpg hw instance
* @param request_id: request id
*
* @return : req on success
*/
struct tpg_hw_request *tpg_hw_create_request(
struct tpg_hw *hw,
uint64_t request_id);
/**
* @brief : free hw request
*
* @param hw: tpg hw instance
* @param req: request instance
*
* @return : 0 on success
*/
int tpg_hw_free_request(struct tpg_hw *hw,
struct tpg_hw_request *req);
/**
* @brief : add hw request
*
* @param hw: tpg hw instance
* @param req: request instance
*
* @return : 0 on success
*/
int tpg_hw_add_request(struct tpg_hw *hw,
struct tpg_hw_request *req);
/**
* @brief : set opcode of request
*
* @param hw: tpg hw instance
* @param opcode: op code
*
* @return : 0 on success
*/
int tpg_hw_request_set_opcode(
struct tpg_hw_request *req,
uint32_t opcode);
/**
* @brief : copy settings config command

View File

@@ -14,18 +14,16 @@ enum tpg_hw_v_1_4_encode_fomat_t {
RAW_16_BIT
};
#define CAM_TPG_HW_WAIT_TIMEOUT msecs_to_jiffies(100)
#define FRAME_INTERLEAVE 0x0
#define LINE_INTERLEAVE 0x1
#define SHDR_INTERLEAVE 0x2
#define SPARSE_PD_INTERLEAVE 0x3
#define CFA_PATTERN_ROW_WIDTH 8
#define CFA_PATTERN_BITS_PER_INDEX 2
#define Invalid 0x0
#define Red 0x0
#define Green 0x1
#define Blue 0x2
#define IR 0x3
#define Mono 0x3
#define TIMEOUT_MULTIPLIER 1
#define TIMEOUT_MULTIPLIER_PRESIL 5
#define TPG_DISABLE 0
static int get_tpg_vc_dt_pattern_id(
enum tpg_interleaving_format_t vc_dt_pattern)
@@ -155,6 +153,10 @@ static int configure_global_configs(
struct tpg_global_config_t *configs)
{
uint32_t val, phy_type = 0;
uint32_t timeout_multiplier = -1;
int rc = 0;
unsigned long wait_jiffies = 0;
unsigned long rem_jiffies = 0;
struct cam_hw_soc_info *soc_info = NULL;
struct cam_tpg_ver_1_4_reg_offset *tpg_reg = NULL;
@@ -174,9 +176,10 @@ static int configure_global_configs(
return -EINVAL;
}
/* throttle moved to vc config
* Tpg neable bit is moved to TPG_CMD register
**/
val = (1 << tpg_reg->rup_done_mask_vec_shift) | (1 << tpg_reg->tpg_done_mask_vec_shift);
cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
tpg_reg->top_irq_mask);
val = (num_vcs - 1) <<
(tpg_reg->num_active_vc_shift) |
(configs->lane_count - 1) << (tpg_reg->num_active_lanes_shift) |
@@ -192,7 +195,29 @@ static int configure_global_configs(
cam_io_w_mb(val, soc_info->reg_map[0].mem_base + tpg_reg->tpg_ctrl_cmd);
CAM_DBG(CAM_TPG, "tpg[%d] tpg_ctrl_cmd=0x%x", hw->hw_idx, val);
return 0;
if (hw->settings_update) {
cam_io_w_mb(TPG_DISABLE, soc_info->reg_map[0].mem_base + tpg_reg->tpg_ctrl_cmd);
if (cam_presil_mode_enabled())
timeout_multiplier = TIMEOUT_MULTIPLIER_PRESIL;
else
timeout_multiplier = TIMEOUT_MULTIPLIER;
CAM_DBG(CAM_TPG, "wait for TPG done and RUP done Interrupt");
wait_jiffies = CAM_TPG_HW_WAIT_TIMEOUT * timeout_multiplier;
reinit_completion(&hw->complete_rup);
rem_jiffies =
cam_common_wait_for_completion_timeout(&hw->complete_rup, wait_jiffies);
if (rem_jiffies) {
val = (1 << tpg_reg->test_en_cmd_shift);
cam_io_w_mb(val, soc_info->reg_map[0].mem_base + tpg_reg->tpg_ctrl_cmd);
hw->settings_update = 0;
} else {
CAM_ERR(CAM_TPG, "TPG[%d] hw timeout %llu",
hw->hw_idx, rem_jiffies);
rc = -EBUSY;
}
}
return rc;
}
static int get_pixel_coordinate(
@@ -733,6 +758,48 @@ int tpg_hw_v_1_4_stop(struct tpg_hw *hw, void *data)
return 0;
}
irqreturn_t tpg_hw_v_1_4_handle_irq(struct tpg_hw *hw)
{
struct cam_hw_soc_info *soc_info = NULL;
uint32_t val;
unsigned long flags;
struct cam_tpg_ver_1_4_reg_offset *tpg_reg = NULL;
bool rup_done = false;
if (!hw || !hw->hw_info || !hw->hw_info->hw_data || !hw->soc_info) {
CAM_ERR(CAM_TPG, "Invalid Params");
return IRQ_NONE;
}
tpg_reg = hw->hw_info->hw_data;
soc_info = hw->soc_info;
val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
tpg_reg->top_irq_status);
if (val & ((1 << tpg_reg->rup_done_status_shift) |
(1 << tpg_reg->tpg_done_status_shift))) {
CAM_DBG(CAM_TPG, "Got TPG Interrupt val = 0x%x", val);
if (val & (1 << tpg_reg->rup_done_status_shift)) {
rup_done = true;
spin_lock_irqsave(&hw->hw_state_lock, flags);
hw->state = TPG_HW_STATE_READY;
spin_unlock_irqrestore(&hw->hw_state_lock, flags);
}
cam_io_w_mb(val, soc_info->reg_map[0].mem_base + tpg_reg->top_irq_clear);
val = (1 << tpg_reg->clear_shift);
cam_io_w_mb(val, soc_info->reg_map[0].mem_base + tpg_reg->irq_cmd);
if (rup_done)
complete(&hw->complete_rup);
} else {
CAM_ERR(CAM_TPG, "Not a valid event 0x%x", val);
}
return IRQ_HANDLED;
}
int tpg_hw_v_1_4_dump_status(struct tpg_hw *hw, void *data)
{

View File

@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef __TPG_HW_V_1_4_H__
@@ -255,9 +255,13 @@ struct cam_tpg_ver_1_4_reg_offset {
int shdr_offset_num_batch_shift;
int shdr_line_offset1_shift;
int shdr_line_offset0_shift;
int tpg_done_status_shift;
int rup_done_status_shift;
int status_vec_shift;
int mask_vec_shift;
int clear_vec_shift;
int rup_done_mask_vec_shift;
int tpg_done_mask_vec_shift;
int rup_done_clear_vec_shift;
int tpg_done_clear_vec_shift;
int set_vec_shift;
int set_shift;
int clear_shift;
@@ -317,10 +321,26 @@ int tpg_hw_v_1_4_process_cmd(struct tpg_hw *hw,
* @param hw : tpg hw instance
* @param data : argument for status dump
*
* @return : 0 on sucdess
* @return : 0 on success
*/
int tpg_hw_v_1_4_dump_status(struct tpg_hw *hw, void *data);
/**
* @brief hw layer initialization
*
* @param hw : tpg hw instance
*
* @return : 0 on success
*/
int tpg_1_4_layer_init(struct tpg_hw *hw);
/**
* @brief handle tpg irq
*
* @param hw : tpg hw instance
*
* @return : IRQ_HANDLED on success
*/
irqreturn_t tpg_hw_v_1_4_handle_irq(struct tpg_hw *hw);
#endif /* __TPG_HW_V_1_4_H__ */

View File

@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef __TPG_HW_V_1_4_0_H__
@@ -253,9 +253,13 @@ struct cam_tpg_ver_1_4_reg_offset cam_tpg104_reg = {
.shdr_offset_num_batch_shift = 0x10,
.shdr_line_offset1_shift = 0x10,
.shdr_line_offset0_shift = 0x0,
.tpg_done_status_shift = 0x0,
.rup_done_status_shift = 0x1,
.status_vec_shift = 0x0,
.mask_vec_shift = 0x0,
.clear_vec_shift = 0x0,
.rup_done_mask_vec_shift = 0x1,
.tpg_done_mask_vec_shift = 0x0,
.rup_done_clear_vec_shift = 0x1,
.tpg_done_clear_vec_shift = 0x0,
.set_vec_shift = 0x0,
.set_shift = 0x4,
.clear_shift = 0x0,

View File

@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef __TPG_HW_V_1_4_DATA_H__
@@ -19,6 +19,7 @@ struct tpg_hw_ops tpg_hw_v_1_4_ops = {
.process_cmd = tpg_hw_v_1_4_process_cmd,
.dump_status = tpg_hw_v_1_4_dump_status,
.write_settings = tpg_hw_write_settings,
.handle_irq = tpg_hw_v_1_4_handle_irq,
};
struct tpg_hw_info tpg_v_1_4_hw_info = {

View File

@@ -106,6 +106,7 @@ enum cam_tpg_packet_opcodes {
CAM_TPG_PACKET_OPCODE_INVALID = 0,
CAM_TPG_PACKET_OPCODE_INITIAL_CONFIG,
CAM_TPG_PACKET_OPCODE_NOP,
CAM_TPG_PACKET_OPCODE_UPDATE,
CAM_TPG_PACKET_OPCODE_MAX,
};