dataipa: Add ipa_rtp generic netlink interface support

changes to enable ipa-rtp generic netlink for xr use-case to
receive all the commands from IPA Codec2 component in setting
up the control path of IPA HW RTP de-packetization for niobe.

Change-Id: I498204f7cd37675f24e9db6a10fd5668416ef45b
Signed-off-by: Jagadeesh Ponduru <quic_jponduru@quicinc.com>
This commit is contained in:
Jagadeesh Ponduru
2024-02-23 12:00:36 +05:30
parent 024163aed6
commit 90e8adec6c
5 changed files with 898 additions and 1 deletions

View File

@@ -207,6 +207,12 @@ def define_modules(target, variant):
"drivers/platform/msm/ipa/test/ipa_test_ntn.c",
],
},
"CONFIG_ARCH_NIOBE": {
True: [
"drivers/platform/msm/ipa/ipa_v3/ipa_rtp_genl.h",
"drivers/platform/msm/ipa/ipa_v3/ipa_rtp_genl.c",
],
},
},
local_defines = [
"GSI_TRACE_INCLUDE_PATH={}/drivers/platform/msm/gsi".format(include_base),

View File

@@ -12466,6 +12466,9 @@ static int __init ipa_module_init(void)
/* Register as a PCI device driver */
return pci_register_driver(&ipa_pci_driver);
}
#ifdef CONFIG_IPA_RTP
ipa_rtp_genl_init();
#endif
register_pm_notifier(&ipa_pm_notifier);
/* Register as a platform device driver */
@@ -12482,6 +12485,9 @@ static void __exit ipa_module_exit(void)
kfree(ipa3_ctx->hw_stats);
ipa3_ctx->hw_stats = NULL;
}
#ifdef CONFIG_IPA_RTP
ipa_rtp_genl_deinit();
#endif
unregister_pm_notifier(&ipa_pm_notifier);
kfree(ipa3_ctx);
ipa3_ctx = NULL;

View File

@@ -2,7 +2,7 @@
/*
* Copyright (c) 2012-2021, The Linux Foundation. All rights reserved.
*
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _IPA3_I_H_
@@ -43,6 +43,9 @@
#include <linux/rmnet_ipa_fd_ioctl.h>
#include "ipa_uc_holb_monitor.h"
#include <soc/qcom/minidump.h>
#ifdef CONFIG_IPA_RTP
#include "ipa_rtp_genl.h"
#endif
#define IPA_DEV_NAME_MAX_LEN 15
#define DRV_NAME "ipa"
@@ -2423,6 +2426,7 @@ struct ipa3_context {
bool ipa_wdi2_over_gsi;
bool ipa_wdi3_over_gsi;
bool ipa_wdi_opt_dpath;
u8 rtp_stream_id_cnt;
bool ipa_endp_delay_wa;
bool lan_coal_enable;
bool ipa_fltrt_not_hashable;

View File

@@ -0,0 +1,567 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "ipa_rtp_genl.h"
#include "ipa_i.h"
#include <net/sock.h>
#include <linux/skbuff.h>
#include <uapi/linux/in.h>
#define MAX_OPEN_FRAMES 3
/* Single-NAL:0, FU-A Type: 1 */
#define MAX_STREAM_TYPES 2
#define MAX_IP_TYPES 2
#define IPA_RTP_GENL_OP(_cmd, _func) \
{ \
.cmd = _cmd, \
.doit = _func, \
.dumpit = NULL, \
.flags = 0, \
}
static u8 si[MAX_STREAMS];
static struct nla_policy ipa_rtp_genl_attr_policy[IPA_RTP_GENL_ATTR_MAX + 1] = {
[IPA_RTP_GENL_ATTR_STR] = { .type = NLA_NUL_STRING, .len = IPA_RTP_GENL_MAX_STR_LEN },
[IPA_RTP_GENL_ATTR_INT] = { .type = NLA_S32 },
[IPA_RTP_GENL_ATTR_TUPLE_INFO] = NLA_POLICY_EXACT_LEN(sizeof(struct traffic_tuple_info)),
[IPA_RTP_GENL_ATTR_ASSIGN_STREAM_ID] =
NLA_POLICY_EXACT_LEN(sizeof(struct assign_stream_id)),
[IPA_RTP_GENL_ATTR_ADD_BITSTREAM_BUFF] =
NLA_POLICY_EXACT_LEN(sizeof(struct bitstream_buffers)),
[IPA_RTP_GENL_ATTR_SMMU_MAP_BUFF] = NLA_POLICY_EXACT_LEN(sizeof(struct map_buffer)),
[IPA_RTP_GENL_ATTR_SMMU_UNMAP_BUFF] = NLA_POLICY_EXACT_LEN(sizeof(struct unmap_buffer)),
[IPA_RTP_GENL_ATTR_REMOVE_STREAM_ID] =
NLA_POLICY_EXACT_LEN(sizeof(struct remove_bitstream_buffers)),
};
static const struct genl_ops ipa_rtp_genl_ops[] = {
IPA_RTP_GENL_OP(IPA_RTP_GENL_CMD_TUPLE_INFO,
ipa_rtp_tuple_info_req_hdlr),
IPA_RTP_GENL_OP(IPA_RTP_GENL_CMD_ADD_BITSTREAM_BUFF,
ipa_rtp_add_bitstream_buff_req_hdlr),
IPA_RTP_GENL_OP(IPA_RTP_GENL_CMD_SMMU_MAP_BUFF,
ipa_rtp_smmu_map_buff_req_hdlr),
IPA_RTP_GENL_OP(IPA_RTP_GENL_CMD_SMMU_UNMAP_BUFF,
ipa_rtp_smmu_unmap_buff_req_hdlr),
IPA_RTP_GENL_OP(IPA_RTP_GENL_CMD_REMOVE_STREAM_ID,
ipa_rtp_rmv_stream_id_req_hdlr),
};
struct genl_family ipa_rtp_genl_family = {
.id = 0,
.hdrsize = 0,
.name = IPA_RTP_GENL_FAMILY_NAME,
.version = IPA_RTP_GENL_VERSION,
.maxattr = IPA_RTP_GENL_ATTR_MAX,
.policy = ipa_rtp_genl_attr_policy,
.ops = ipa_rtp_genl_ops,
.n_ops = ARRAY_SIZE(ipa_rtp_genl_ops),
};
int ipa_rtp_send_tuple_info_resp(struct genl_info *info,
struct assign_stream_id *tuple_info_resp)
{
struct sk_buff *skb;
void *msg_head;
int rc = -1;
IPADBG_LOW("Entry\n");
if (!info || !tuple_info_resp) {
IPAERR("Invalid params\n");
return rc;
}
skb = genlmsg_new(sizeof(struct assign_stream_id), GFP_KERNEL);
if (!skb) {
IPAERR("failed to alloc genmsg_new\n");
return rc;
}
msg_head = genlmsg_put(skb, 0, info->snd_seq + 1,
&ipa_rtp_genl_family,
0, IPA_RTP_GENL_CMD_ASSIGN_STREAM_ID);
if (!msg_head) {
IPAERR("failed at genlmsg_put\n");
goto free_skb;
}
rc = nla_put(skb, IPA_RTP_GENL_ATTR_ASSIGN_STREAM_ID,
sizeof(struct assign_stream_id),
tuple_info_resp);
if (rc != 0) {
IPAERR("failed at nla_put skb\n");
goto free_skb;
}
genlmsg_end(skb, msg_head);
rc = genlmsg_unicast(genl_info_net(info), skb, info->snd_portid);
if (rc != 0) {
IPAERR("failed in doing genlmsg_unicast\n");
goto free_skb;
}
ipa3_ctx->rtp_stream_id_cnt++;
IPADBG("assigned stream-id is %u\n", tuple_info_resp->stream_id);
IPADBG_LOW("Exit\n");
free_skb:
kfree(skb);
return rc;
}
int ipa_rtp_tuple_info_req_hdlr(struct sk_buff *skb_2,
struct genl_info *info)
{
struct nlattr *na;
struct traffic_tuple_info tuple_info_req;
struct assign_stream_id tuple_info_resp;
int is_req_valid = 0, i = 0;
int stream_id_available = 0, rc = -1;
IPADBG("Entry\n");
if (!info) {
IPAERR("error genl info is null\n");
return rc;
}
na = info->attrs[IPA_RTP_GENL_ATTR_TUPLE_INFO];
if (na) {
if (nla_memcpy(&tuple_info_req, na,
sizeof(tuple_info_req)) > 0) {
is_req_valid = 1;
} else {
IPAERR("nla_memcpy failed %d\n",
IPA_RTP_GENL_ATTR_TUPLE_INFO);
return rc;
}
} else {
IPAERR("no info->attrs %d\n",
IPA_RTP_GENL_ATTR_TUPLE_INFO);
return rc;
}
if (tuple_info_req.ts_info.no_of_openframe <= 0 ||
tuple_info_req.ts_info.no_of_openframe > MAX_OPEN_FRAMES ||
tuple_info_req.ts_info.stream_type >= MAX_STREAM_TYPES ||
!tuple_info_req.ts_info.max_pkt_frame ||
tuple_info_req.ip_type >= MAX_IP_TYPES) {
IPAERR("invalid no-of-open-frames %u or stream_type %u\n",
tuple_info_req.ts_info.no_of_openframe,
tuple_info_req.ts_info.stream_type);
IPAERR("or max_pkt_frames %u or ip_type %u params\n",
tuple_info_req.ts_info.max_pkt_frame,
tuple_info_req.ip_type);
return rc;
}
/* IPv4 Type */
if (!tuple_info_req.ip_type) {
if (tuple_info_req.ip_info.ipv4.protocol != IPPROTO_UDP ||
!tuple_info_req.ip_info.ipv4.src_ip ||
!tuple_info_req.ip_info.ipv4.dst_ip) {
IPAERR("invalid src_ip %u or dst_ip %u or protocol %u params\n",
tuple_info_req.ip_info.ipv4.src_ip, tuple_info_req.ip_info.ipv4.dst_ip,
tuple_info_req.ip_info.ipv4.protocol);
return rc;
}
} else {
if (tuple_info_req.ip_info.ipv6.protocol != IPPROTO_UDP) {
IPAERR("invalid ipv6 protocol %u params\n",
tuple_info_req.ip_info.ipv6.protocol);
return rc;
}
}
IPADBG_LOW("no_of_openframes are %u\n", tuple_info_req.ts_info.no_of_openframe);
IPADBG_LOW("max_pkt_frame is %u\n", tuple_info_req.ts_info.max_pkt_frame);
IPADBG_LOW("stream_type is %u\n", tuple_info_req.ts_info.stream_type);
IPADBG_LOW("reorder_timeout is %u\n", tuple_info_req.ts_info.reorder_timeout);
IPADBG_LOW("num_slices_per_frame are %u\n", tuple_info_req.ts_info.num_slices_per_frame);
IPADBG_LOW("ip_type is %u\n", tuple_info_req.ip_type);
IPADBG_LOW("src_port_number is %u\n", tuple_info_req.ip_info.ipv4.src_port_number);
IPADBG_LOW("dst_port_number is %u\n", tuple_info_req.ip_info.ipv4.dst_port_number);
IPADBG_LOW("src_ip is %u\n", tuple_info_req.ip_info.ipv4.src_ip);
IPADBG_LOW("dst_ip is %u\n", tuple_info_req.ip_info.ipv4.dst_ip);
IPADBG_LOW("protocol is %u\n", tuple_info_req.ip_info.ipv4.protocol);
/* Call IPA driver/uC tuple info API's here */
memset(&tuple_info_resp, 0, sizeof(tuple_info_resp));
for (i = 0; i < MAX_STREAMS; i++) {
if (si[i] == 0) {
tuple_info_resp.stream_id = i;
si[i] = 1;
stream_id_available = 1;
break;
}
}
if (!stream_id_available) {
IPAERR("max stream-ids supported are four only\n");
return rc;
}
if (is_req_valid &&
ipa_rtp_send_tuple_info_resp(info, &tuple_info_resp))
si[tuple_info_resp.stream_id] = 0;
else
rc = 0;
IPADBG("Exit\n");
return rc;
}
int ipa_rtp_smmu_map_buff_req_hdlr(struct sk_buff *skb_2,
struct genl_info *info)
{
struct nlattr *na;
struct map_buffer map_buffer_req;
int i = 0, is_req_valid = 0;
int rc = -1;
IPADBG("Entry\n");
if (!info) {
IPAERR("error genl info is null\n");
return rc;
}
na = info->attrs[IPA_RTP_GENL_ATTR_SMMU_MAP_BUFF];
if (na) {
if (nla_memcpy(&map_buffer_req, na,
sizeof(map_buffer_req)) > 0) {
is_req_valid = 1;
} else {
IPAERR("nla_memcpy failed %d\n",
IPA_RTP_GENL_ATTR_SMMU_MAP_BUFF);
return rc;
}
} else {
IPAERR("no info->attrs %d\n",
IPA_RTP_GENL_ATTR_SMMU_MAP_BUFF);
return rc;
}
if (map_buffer_req.nfd <= 0 || map_buffer_req.nfd > MAX_FDS
|| map_buffer_req.stream_id > MAX_STREAMS) {
IPAERR("invalid nfd %u or stream_id %u params\n",
map_buffer_req.nfd, map_buffer_req.stream_id);
return rc;
}
IPADBG_LOW("number of fd's are %u\n", map_buffer_req.nfd);
IPADBG_LOW("stream_id is %u\n", map_buffer_req.stream_id);
/* If IPA C2 component is providing two fd's for meta fd and bitstream buff fd then
* sizes need to be filled. If it is a single fd for both meta data and bitstream buff
* then meta_buff_fd and bitstream_buffer_fd will be the same. And they need to fill
* bitstream_buffer_size as actual size and meta_buff_size to zero.
*/
for (i = 0; i < map_buffer_req.nfd; i++) {
if (map_buffer_req.buff_info[i].bitstream_buffer_fd ==
map_buffer_req.buff_info[i].meta_buff_fd) {
if (!map_buffer_req.buff_info[i].bitstream_buffer_size ||
map_buffer_req.buff_info[i].meta_buff_size) {
IPAERR("invalid bitstream_buff_size %u\n",
map_buffer_req.buff_info[i].bitstream_buffer_size);
IPAERR("or meta_buff_size %u params\n",
map_buffer_req.buff_info[i].meta_buff_size);
return rc;
}
} else {
if (!map_buffer_req.buff_info[i].bitstream_buffer_size ||
!map_buffer_req.buff_info[i].meta_buff_size) {
IPAERR("invalid bitstream_buff_size %u\n",
map_buffer_req.buff_info[i].bitstream_buffer_size);
IPAERR("or meta_buff_size %u params\n",
map_buffer_req.buff_info[i].meta_buff_size);
return rc;
}
}
IPADBG_LOW("bitstream_buffer_fd is %u\n",
map_buffer_req.buff_info[i].bitstream_buffer_fd);
IPADBG_LOW("meta_buff_fd is %u\n",
map_buffer_req.buff_info[i].meta_buff_fd);
IPADBG_LOW("bitstream_buffer_size is %u\n",
map_buffer_req.buff_info[i].bitstream_buffer_size);
IPADBG_LOW("meta_buff_size is %u\n",
map_buffer_req.buff_info[i].meta_buff_size);
}
/* Call IPA driver/uC API's here */
if (is_req_valid)
rc = ipa3_map_buff_to_device_addr(&map_buffer_req);
IPADBG("Exit\n");
return rc;
}
int ipa_rtp_smmu_unmap_buff_req_hdlr(struct sk_buff *skb_2,
struct genl_info *info)
{
struct nlattr *na;
struct unmap_buffer unmap_buffer_req;
int i = 0, is_req_valid = 0, rc = -1;
IPADBG("Entry\n");
if (!info) {
IPAERR("error genl info is null\n");
return rc;
}
na = info->attrs[IPA_RTP_GENL_ATTR_SMMU_UNMAP_BUFF];
if (na) {
if (nla_memcpy(&unmap_buffer_req, na,
sizeof(unmap_buffer_req)) > 0) {
is_req_valid = 1;
} else {
IPAERR("nla_memcpy failed %d\n",
IPA_RTP_GENL_ATTR_SMMU_UNMAP_BUFF);
return rc;
}
} else {
IPAERR("no info->attrs %d\n",
IPA_RTP_GENL_ATTR_SMMU_UNMAP_BUFF);
return rc;
}
if (unmap_buffer_req.nfd <= 0 || unmap_buffer_req.nfd > MAX_FDS
|| unmap_buffer_req.stream_id > MAX_STREAMS) {
IPAERR("invalid nfd %u or stream_id %u params\n",
unmap_buffer_req.nfd, unmap_buffer_req.stream_id);
return rc;
}
IPADBG_LOW("number of fd's are %u\n", unmap_buffer_req.nfd);
IPADBG_LOW("stream_id is %u\n", unmap_buffer_req.stream_id);
/* If IPA C2 component is providing two fd's for meta fd and bitstream buff fd then
* sizes need to be filled. If it is a single fd for both meta data and bitstream buff
* then meta_buff_fd and bitstream_buffer_fd will be the same. And they need to fill
* bitstream_buffer_size as actual size and meta_buff_size to zero.
*/
for (i = 0; i < unmap_buffer_req.nfd; i++) {
if (unmap_buffer_req.buff_info[i].bitstream_buffer_fd ==
unmap_buffer_req.buff_info[i].meta_buff_fd) {
if (!unmap_buffer_req.buff_info[i].bitstream_buffer_size ||
unmap_buffer_req.buff_info[i].meta_buff_size) {
IPAERR("invalid bitstream_buff_size %u\n",
unmap_buffer_req.buff_info[i].bitstream_buffer_size);
IPAERR("or meta_buff_size %u params\n",
unmap_buffer_req.buff_info[i].meta_buff_size);
return rc;
}
} else {
if (!unmap_buffer_req.buff_info[i].bitstream_buffer_size ||
!unmap_buffer_req.buff_info[i].meta_buff_size) {
IPAERR("invalid bitstream_buff_size %u\n",
unmap_buffer_req.buff_info[i].bitstream_buffer_size);
IPAERR("or meta_buff_size %u params\n",
unmap_buffer_req.buff_info[i].meta_buff_size);
return rc;
}
}
IPADBG_LOW("bitstream_buffer_fd is %u\n",
unmap_buffer_req.buff_info[i].bitstream_buffer_fd);
IPADBG_LOW("meta_buff_fd is %u\n",
unmap_buffer_req.buff_info[i].meta_buff_fd);
IPADBG_LOW("bitstream_buffer_size is %u\n",
unmap_buffer_req.buff_info[i].bitstream_buffer_size);
IPADBG_LOW("meta_buff_size is %u\n",
unmap_buffer_req.buff_info[i].meta_buff_size);
}
/* Call IPA driver/uC tuple info API's here */
if (is_req_valid)
rc = ipa3_unmap_buff_from_device_addr(&unmap_buffer_req);
IPADBG("Exit\n");
return rc;
}
int ipa_rtp_add_bitstream_buff_req_hdlr(struct sk_buff *skb_2,
struct genl_info *info)
{
struct nlattr *na;
struct bitstream_buffers bs_buffer_req;
int i = 0, is_req_valid = 0, rc = -1;
IPADBG("Entry\n");
if (!info) {
IPAERR("error genl info is null\n");
return rc;
}
na = info->attrs[IPA_RTP_GENL_ATTR_ADD_BITSTREAM_BUFF];
if (na) {
if (nla_memcpy(&bs_buffer_req, na,
sizeof(bs_buffer_req)) > 0) {
is_req_valid = 1;
} else {
IPAERR("nla_memcpy failed %d\n",
IPA_RTP_GENL_ATTR_ADD_BITSTREAM_BUFF);
return rc;
}
} else {
IPAERR("no info->attrs %d\n",
IPA_RTP_GENL_ATTR_ADD_BITSTREAM_BUFF);
return rc;
}
if (bs_buffer_req.buff_cnt <= 0 || bs_buffer_req.buff_cnt > MAX_BUFF ||
bs_buffer_req.cookie != IPA_BS_BUFF_COOKIE) {
IPAERR("invalid buff_cnt %u or buff_cookie 0x%x params\n",
bs_buffer_req.buff_cnt, bs_buffer_req.cookie);
return rc;
}
IPADBG_LOW("buff_cnt is %u\n", bs_buffer_req.buff_cnt);
IPADBG_LOW("cookie is 0x%x\n", bs_buffer_req.cookie);
/* If IPA C2 component is providing two buffers for meta data and bitstream buff,
* they need to fill meta_buff_offset and buff_offset as zero.
* If it is a single buffer for meta data and bitstream buff, then meta_buff_fd
* and buff_fd will be the same. And they need to fill meta_buff_offset as zero
* and fill the bitstream buff offset in buff_offset and it should be 4 byte aligned.
*/
for (i = 0; i < bs_buffer_req.buff_cnt; i++) {
if (bs_buffer_req.bs_info[i].stream_id >= MAX_STREAMS) {
IPAERR("invalid stream_id in buffer %u params\n",
bs_buffer_req.bs_info[i].stream_id);
return rc;
}
if (bs_buffer_req.bs_info[i].meta_buff_fd == bs_buffer_req.bs_info[i].buff_fd) {
if (bs_buffer_req.bs_info[i].meta_buff_offset ||
!bs_buffer_req.bs_info[i].buff_offset ||
bs_buffer_req.bs_info[i].meta_buff_size ||
!bs_buffer_req.bs_info[i].buff_size) {
IPAERR("invalid meta_buff_offset %u or bs_buff_offset %u\n",
bs_buffer_req.bs_info[i].meta_buff_offset,
bs_buffer_req.bs_info[i].buff_offset);
IPAERR("or meta_buff_size %u or bs_buff_size %u params\n",
bs_buffer_req.bs_info[i].meta_buff_size,
bs_buffer_req.bs_info[i].buff_size);
return rc;
}
} else {
if (bs_buffer_req.bs_info[i].meta_buff_offset ||
bs_buffer_req.bs_info[i].buff_offset ||
!bs_buffer_req.bs_info[i].meta_buff_size ||
!bs_buffer_req.bs_info[i].buff_size) {
IPAERR("invalid meta_buff_offset %u or bs_buff_offset %u\n",
bs_buffer_req.bs_info[i].meta_buff_offset,
bs_buffer_req.bs_info[i].buff_offset);
IPAERR("or meta_buff_size %u or bs_buff_size %u params\n",
bs_buffer_req.bs_info[i].meta_buff_size,
bs_buffer_req.bs_info[i].buff_size);
return rc;
}
}
IPADBG_LOW("stream_id is %u\n", bs_buffer_req.bs_info[i].stream_id);
IPADBG_LOW("fence_id is %u\n", bs_buffer_req.bs_info[i].fence_id);
IPADBG_LOW("buff_offset is %u\n", bs_buffer_req.bs_info[i].buff_offset);
IPADBG_LOW("buff_fd is %u\n", bs_buffer_req.bs_info[i].buff_fd);
IPADBG_LOW("buff_size is %u\n", bs_buffer_req.bs_info[i].buff_size);
IPADBG_LOW("meta_buff_offset is %u\n", bs_buffer_req.bs_info[i].meta_buff_offset);
IPADBG_LOW("meta_buff_fd is %u\n", bs_buffer_req.bs_info[i].meta_buff_fd);
IPADBG_LOW("meta_buff_size is %u\n", bs_buffer_req.bs_info[i].meta_buff_size);
}
/* Call IPA driver/uC API's here */
if (is_req_valid)
rc = ipa3_send_bitstream_buff_info(&bs_buffer_req);
IPADBG("Exit\n");
return rc;
}
int ipa_rtp_rmv_stream_id_req_hdlr(struct sk_buff *skb_2,
struct genl_info *info)
{
struct nlattr *na;
struct remove_bitstream_buffers rmv_sid_req;
int is_req_valid = 0, rc = -1;
IPADBG("Entry\n");
if (!info) {
IPAERR("error genl info is null\n");
return rc;
}
na = info->attrs[IPA_RTP_GENL_CMD_REMOVE_STREAM_ID];
if (na) {
if (nla_memcpy(&rmv_sid_req, na,
sizeof(rmv_sid_req)) > 0) {
is_req_valid = 1;
} else {
IPAERR("nla_memcpy failed %d\n",
IPA_RTP_GENL_CMD_REMOVE_STREAM_ID);
return rc;
}
} else {
IPAERR("no info->attrs %d\n",
IPA_RTP_GENL_CMD_REMOVE_STREAM_ID);
return rc;
}
if (rmv_sid_req.stream_id >= MAX_STREAMS) {
IPAERR("invalid stream_id %u params\n", rmv_sid_req.stream_id);
return rc;
}
/* Call IPA driver/uC tuple info API's here */
if (is_req_valid)
rc = ipa3_uc_send_remove_stream_cmd(&rmv_sid_req);
si[rmv_sid_req.stream_id] = 0;
ipa3_ctx->rtp_stream_id_cnt--;
IPADBG("Exit\n");
return rc;
}
/* register ipa rtp driver family with generic netlink */
int ipa_rtp_genl_init(void)
{
int rc = 0;
rc = genl_register_family(&ipa_rtp_genl_family);
if (rc != 0) {
IPAERR("ipa_rtp genl register family failed: %d", rc);
genl_unregister_family(&ipa_rtp_genl_family);
return rc;
}
IPAERR("successfully registered ipa_rtp genl family: %s",
IPA_RTP_GENL_FAMILY_NAME);
return rc;
}
/* Unregister the generic netlink family */
int ipa_rtp_genl_deinit(void)
{
int rc = 0;
rc = genl_unregister_family(&ipa_rtp_genl_family);
if (rc != 0)
IPAERR("unregister ipa_rtp genl family failed: %d", rc);
return rc;
}

View File

@@ -0,0 +1,314 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _IPA_RTP_GENL_H_
#define _IPA_RTP_GENL_H_
#include <net/genetlink.h>
/* Generic Netlink Definitions */
#define IPA_RTP_GENL_VERSION 1
#define IPA_RTP_GENL_FAMILY_NAME "ipa_rtp"
#define IPA_RTP_GENL_MAX_STR_LEN 255
#define MAX_BUFF 10
#define MAX_FDS 10
#define IPA_BS_BUFF_COOKIE 0x45670198
/* XR IPAC2 <-> IPA Commands */
/**
* struct buffer_info - buffer information of map and unmap buffers.
* @bitstream_buffer_fd: bit stream buffer file descriptor.
* @meta_buff_fd: meta buffer file descriptor.
* @bitstream_buffer_size: bit stream buffer fd size.
* @meta_buff_size: meta buffer fd size.
*/
struct buffer_info {
uint64_t bitstream_buffer_fd;
uint64_t meta_buff_fd;
uint64_t bitstream_buffer_size;
uint64_t meta_buff_size;
};
/**
* struct map_buffer - SMMU map buffers.
* @nfd: number of fd's.
* @stream_id: reciving stream ID.
* @buff_info: buffer information to map buffers.
*/
struct map_buffer {
uint32_t nfd;
uint32_t stream_id;
struct buffer_info buff_info[MAX_BUFF];
};
/**
* struct unmap_buffer - SMMU unmap buffers.
* @nfd: number of fd's.
* @stream_id: reciving stream ID.
* @buff_info: buffer information to unmap buffers.
*/
struct unmap_buffer {
uint32_t nfd;
uint32_t stream_id;
struct buffer_info buff_info[MAX_BUFF];
};
/**
* struct remove_bitstream_buffers - remove bitstream buffers.
* @stream_id: stream ID to stop using bitstream buffres of the specific stream.
*/
struct remove_bitstream_buffers {
uint32_t stream_id;
};
/**
* struct traffic_selector_info - traffic selector information.
* @no_of_openframe: no. of openframes in a stream.
* @max_pkt_frame: maximum packets per frame.
* @stream_type: type of stream.
* @reorder_timeout: RTP packets reordering timeout.
* @num_slices_per_frame: no. of slices per frame.
*/
struct traffic_selector_info {
uint32_t no_of_openframe;
uint32_t max_pkt_frame;
uint32_t stream_type;
uint64_t reorder_timeout;
uint32_t num_slices_per_frame;
};
/**
* struct ipv6_tuple_info - ipv6 tuple information.
* @src_port_number: source port number.
* @dst_port_number: dst port number.
* @src_ip: source IP.
* @dst_ip: dst IP.
* @protocol: protocol type.
*/
struct ipv6_tuple_info {
uint32_t src_port_number;
uint32_t dst_port_number;
uint8_t src_ip[16];
uint8_t dst_ip[16];
uint32_t protocol;
};
/**
* struct ipv4_tuple_info - ipv4 tuple information.
* @src_port_number: source port number.
* @dst_port_number: dst port number.
* @src_ip: source IP.
* @dst_ip: dst IP.
* @protocol: protocol type.
*/
struct ipv4_tuple_info {
uint32_t src_port_number;
uint32_t dst_port_number;
uint32_t src_ip;
uint32_t dst_ip;
uint32_t protocol;
};
/**
* struct ip_tuple_info - ip tuple information.
* @ipv4_tuple_info: ipv4 tuple information.
* @ipv6_tuple_info: ipv6 tuple information.
*/
union ip_tuple_info {
struct ipv4_tuple_info ipv4;
struct ipv6_tuple_info ipv6;
};
/**
* struct traffic_tuple_info - traffic tuple information.
* @ip_type: ip type (ipv4 or ipv6).
* @ip_tuple_info: ip tuple information.
*/
struct traffic_tuple_info {
struct traffic_selector_info ts_info;
uint8_t ip_type;
union ip_tuple_info ip_info;
};
/**
* struct assign_stream_id - assign stream id for a stream.
* @stream_id: assigned stream id.
*/
struct assign_stream_id {
uint32_t stream_id;
};
/**
* struct bitstream_buffer_info_to_ipa - bitstream buffer info to ipa.
* @stream_id: stream Identifier.
* @fence_id: fence Identifier.
* @buff_offset: bit stream buffer offset.
* @buff_fd: bit stream file descriptor.
* @buff_size: bit stream suffer size.
* @meta_buff_offset: bit stream metadata buffer offset.
* @meta_buff_fd: bit stream metadata buffer file descriptor.
* @meta_buff_size: bit stream metadata buffer size.
*/
struct bitstream_buffer_info_to_ipa {
uint32_t stream_id;
uint32_t fence_id;
uint32_t buff_offset;
uint32_t buff_fd;
uint32_t buff_size;
uint32_t meta_buff_offset;
uint32_t meta_buff_fd;
uint32_t meta_buff_size;
};
/**
* struct bitstream_buffers - bitstream buffers.
* @buff_cnt: number of buffers per stream.
* @cookie: pre-defined macro per stream.
* @bitstream_buffer_info_to_ipa: bitstream buffer info to ipa.
*/
struct bitstream_buffers {
uint32_t buff_cnt;
uint32_t cookie;
struct bitstream_buffer_info_to_ipa bs_info[MAX_BUFF];
};
/**
* struct bitstream_buffer_info_to_uspace - bitstream buffer info to IPA C2.
* @stream_id: stream Identifier.
* @fence_id: fence Identifier.
* @buff_offset: bit stream buffer offset.
* @buff_fd: bit stream file descriptor.
* @buff_size: bit stream suffer size.
* @meta_buff_offset: bit stream metadata buffer offset.
* @meta_buff_fd: bit stream metadata buffer file descriptor.
* @meta_buff_size: bit stream metadata buffer size.
* @reason_failure: reason for failure.
* @qtime_first_pkt_processed: qtime of first packet processed.
* @qtime_last_pkt_processed: qtime of last packet processed.
*/
struct bitstream_buffer_info_to_uspace {
uint32_t frame_id;
uint32_t stream_id;
uint32_t fence_id;
uint64_t buff_offset;
uint32_t buff_fd;
uint32_t buff_size;
uint64_t meta_buff_offset;
uint32_t meta_buff_fd;
uint32_t meta_buff_size;
uint32_t reason_failure;
uint64_t qtime_first_pkt_processed;
uint64_t qtime_last_pkt_processed;
};
/**
* struct statistics_info - statistics information.
* @avg_reoder_latency: average reodering latency.
* @num_frame_to_sw: no. frames to sw-path.
* @last_frame_to_deco: last frame to decoder.
*/
struct statistics_info {
uint32_t avg_reoder_latency;
uint32_t num_frame_to_sw;
uint32_t last_frame_to_deco;
};
enum {
IPA_RTP_GENL_CMD_UNSPEC,
IPA_RTP_GENL_CMD_STR,
IPA_RTP_GENL_CMD_INT,
IPA_RTP_GENL_CMD_TUPLE_INFO,
IPA_RTP_GENL_CMD_ASSIGN_STREAM_ID,
IPA_RTP_GENL_CMD_ADD_BITSTREAM_BUFF,
IPA_RTP_GENL_CMD_SMMU_MAP_BUFF,
IPA_RTP_GENL_CMD_SMMU_UNMAP_BUFF,
IPA_RTP_GENL_CMD_REMOVE_STREAM_ID,
IPA_RTP_GENL_CMD_MAX,
};
enum {
IPA_RTP_GENL_ATTR_UNSPEC,
IPA_RTP_GENL_ATTR_STR,
IPA_RTP_GENL_ATTR_INT,
IPA_RTP_GENL_ATTR_TUPLE_INFO,
IPA_RTP_GENL_ATTR_ASSIGN_STREAM_ID,
IPA_RTP_GENL_ATTR_ADD_BITSTREAM_BUFF,
IPA_RTP_GENL_ATTR_SMMU_MAP_BUFF,
IPA_RTP_GENL_ATTR_SMMU_UNMAP_BUFF,
IPA_RTP_GENL_ATTR_REMOVE_STREAM_ID,
IPA_RTP_GENL_ATTR_MAX,
};
/* Function Prototypes */
/*
* This handler will be invoked when IPA C2 sends TUPLE
* info cmd to IPA Driver via generic netlink interface.
*/
int ipa_rtp_tuple_info_req_hdlr(struct sk_buff *skb_2,
struct genl_info *info);
/*
* This function will be invoked when IPA driver allocates stream
* id and sends it to IPA C2 via generic netlink interface.
*/
int ipa_rtp_send_tuple_info_resp(struct genl_info *info,
struct assign_stream_id *sid);
/*
* This handler will be invoked when IPA C2 sends SMMU MAP
* info cmd to IPA Driver via generic netlink interface.
*/
int ipa_rtp_smmu_map_buff_req_hdlr(struct sk_buff *skb_2,
struct genl_info *info);
/*
* This handler will be invoked when IPA C2 sends SMMU UNMAP
* info cmd to IPA Driver via generic netlink interface.
*/
int ipa_rtp_smmu_unmap_buff_req_hdlr(struct sk_buff *skb_2,
struct genl_info *info);
/*
* This handler will be invoked when IPA C2 sends BITSTREAM BUFF
* info cmd to IPA Driver via generic netlink interface.
*/
int ipa_rtp_add_bitstream_buff_req_hdlr(struct sk_buff *skb_2,
struct genl_info *info);
/*
* This handler will be invoked when IPA C2 sends REMOVE STREAM
* info cmd to IPA Driver via generic netlink interface.
*/
int ipa_rtp_rmv_stream_id_req_hdlr(struct sk_buff *skb_2,
struct genl_info *info);
/*
* This is a generic netlink family init from IPA driver
* and when IPA C2 userspace comes, it will connect to this
* family via pre-defined name.
*/
int ipa_rtp_genl_init(void);
int ipa_rtp_genl_deinit(void);
#endif /*_IPA_RTP_GENL_H_*/