rmnet_core: Add IPA driver support for low-latency framework
Allows the use of the LL channel on IPA based targetds. MHI specific functionality is split into the new rmnet_ll_mhi.c file, and IPA is placed in rmnet_ll_ipa.c. rmnet_ll.c works as a generic interface to the core rmnet module, and handles calling specific functions in the active HW module to provide the low latency channel functionality. Change-Id: Id3e77b8433134872eba09818fc662fc109687d80 Signed-off-by: Sean Tranchetti <stranche@codeaurora.org>
This commit is contained in:

committato da
Subash Abhinov Kasiviswanathan

parent
b8552944d5
commit
aeba491583
@@ -18,8 +18,10 @@ rmnet_core-y := \
|
||||
rmnet_map_command.o \
|
||||
rmnet_map_data.o \
|
||||
rmnet_vnd.o
|
||||
rmnet_core-$(CONFIG_MHI_BUS) += \
|
||||
rmnet_ll.o
|
||||
|
||||
rmnet_core-y += \
|
||||
rmnet_ll.o \
|
||||
rmnet_ll_ipa.o
|
||||
|
||||
#DFC sources
|
||||
rmnet_core-y += \
|
||||
|
@@ -502,8 +502,11 @@ void rmnet_egress_handler(struct sk_buff *skb, bool low_latency)
|
||||
rmnet_vnd_tx_fixup(orig_dev, skb_len);
|
||||
|
||||
if (low_latency) {
|
||||
if (rmnet_ll_send_skb(skb))
|
||||
goto drop;
|
||||
if (rmnet_ll_send_skb(skb)) {
|
||||
/* Drop but no need to free. Above API handles that */
|
||||
this_cpu_inc(priv->pcpu_stats->stats.tx_drops);
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
dev_queue_xmit(skb);
|
||||
}
|
||||
|
243
core/rmnet_ll.c
243
core/rmnet_ll.c
@@ -12,69 +12,33 @@
|
||||
* RmNet Low Latency channel handlers
|
||||
*/
|
||||
|
||||
#include <linux/device.h>
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/skbuff.h>
|
||||
#include <linux/mhi.h>
|
||||
#include <linux/if_ether.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/version.h>
|
||||
#include "rmnet_ll.h"
|
||||
#include "rmnet_ll_core.h"
|
||||
|
||||
#define RMNET_LL_DEFAULT_MRU 0x8000
|
||||
#define RMNET_LL_MAX_RECYCLE_ITER 16
|
||||
|
||||
struct rmnet_ll_buffer {
|
||||
struct list_head list;
|
||||
struct page *page;
|
||||
bool temp_alloc;
|
||||
bool submitted;
|
||||
};
|
||||
|
||||
struct rmnet_ll_buffer_pool {
|
||||
struct list_head buf_list;
|
||||
/* Protect access to the recycle buffer pool */
|
||||
spinlock_t pool_lock;
|
||||
struct list_head *last;
|
||||
u32 pool_size;
|
||||
};
|
||||
|
||||
struct rmnet_ll_endpoint {
|
||||
struct rmnet_ll_buffer_pool buf_pool;
|
||||
struct mhi_device *mhi_dev;
|
||||
struct net_device *mhi_netdev;
|
||||
u32 dev_mru;
|
||||
u32 page_order;
|
||||
u32 buf_len;
|
||||
};
|
||||
|
||||
static struct rmnet_ll_endpoint *rmnet_ll_ep;
|
||||
static struct rmnet_ll_stats rmnet_ll_stats;
|
||||
/* For TX synch with MHI via mhi_queue_transfer() */
|
||||
/* For TX sync with DMA operations */
|
||||
static DEFINE_SPINLOCK(rmnet_ll_tx_lock);
|
||||
|
||||
/* Client operations for respective underlying HW */
|
||||
extern struct rmnet_ll_client_ops rmnet_ll_client;
|
||||
|
||||
static void rmnet_ll_buffers_submit(struct rmnet_ll_endpoint *ll_ep,
|
||||
struct list_head *buf_list)
|
||||
{
|
||||
struct rmnet_ll_buffer *ll_buf;
|
||||
int rc;
|
||||
|
||||
list_for_each_entry(ll_buf, buf_list, list) {
|
||||
if (ll_buf->submitted)
|
||||
continue;
|
||||
|
||||
#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0)
|
||||
rc = mhi_queue_transfer(ll_ep->mhi_dev, DMA_FROM_DEVICE,
|
||||
page_address(ll_buf->page),
|
||||
ll_ep->buf_len, MHI_EOT);
|
||||
#else
|
||||
rc = mhi_queue_buf(ll_ep->mhi_dev, DMA_FROM_DEVICE,
|
||||
page_address(ll_buf->page),
|
||||
ll_ep->buf_len, MHI_EOT);
|
||||
#endif
|
||||
if (rc) {
|
||||
if (!rmnet_ll_client.buffer_queue ||
|
||||
rmnet_ll_client.buffer_queue(ll_ep, ll_buf)) {
|
||||
rmnet_ll_stats.rx_queue_err++;
|
||||
/* Don't leak the page if we're not storing it */
|
||||
if (ll_buf->temp_alloc)
|
||||
@@ -106,7 +70,7 @@ rmnet_ll_buffer_alloc(struct rmnet_ll_endpoint *ll_ep, gfp_t gfp)
|
||||
return ll_buf;
|
||||
}
|
||||
|
||||
static int rmnet_ll_buffer_pool_alloc(struct rmnet_ll_endpoint *ll_ep)
|
||||
int rmnet_ll_buffer_pool_alloc(struct rmnet_ll_endpoint *ll_ep)
|
||||
{
|
||||
spin_lock_init(&ll_ep->buf_pool.pool_lock);
|
||||
INIT_LIST_HEAD(&ll_ep->buf_pool.buf_list);
|
||||
@@ -115,7 +79,7 @@ static int rmnet_ll_buffer_pool_alloc(struct rmnet_ll_endpoint *ll_ep)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void rmnet_ll_buffer_pool_free(struct rmnet_ll_endpoint *ll_ep)
|
||||
void rmnet_ll_buffer_pool_free(struct rmnet_ll_endpoint *ll_ep)
|
||||
{
|
||||
struct rmnet_ll_buffer *ll_buf, *tmp;
|
||||
list_for_each_entry_safe(ll_buf, tmp, &ll_ep->buf_pool.buf_list, list) {
|
||||
@@ -126,17 +90,16 @@ static void rmnet_ll_buffer_pool_free(struct rmnet_ll_endpoint *ll_ep)
|
||||
ll_ep->buf_pool.last = NULL;
|
||||
}
|
||||
|
||||
static void rmnet_ll_buffers_recycle(struct rmnet_ll_endpoint *ll_ep)
|
||||
void rmnet_ll_buffers_recycle(struct rmnet_ll_endpoint *ll_ep)
|
||||
{
|
||||
struct rmnet_ll_buffer *ll_buf, *tmp;
|
||||
LIST_HEAD(buf_list);
|
||||
int num_tre, count = 0, iter = 0;
|
||||
|
||||
#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0)
|
||||
num_tre = mhi_get_no_free_descriptors(ll_ep->mhi_dev, DMA_FROM_DEVICE);
|
||||
#else
|
||||
num_tre = mhi_get_free_desc_count(ll_ep->mhi_dev, DMA_FROM_DEVICE);
|
||||
#endif
|
||||
if (!rmnet_ll_client.query_free_descriptors)
|
||||
goto out;
|
||||
|
||||
num_tre = rmnet_ll_client.query_free_descriptors(ll_ep);
|
||||
if (!num_tre)
|
||||
goto out;
|
||||
|
||||
@@ -178,182 +141,12 @@ out:
|
||||
return;
|
||||
}
|
||||
|
||||
static void rmnet_ll_rx(struct mhi_device *mhi_dev, struct mhi_result *res)
|
||||
{
|
||||
struct rmnet_ll_endpoint *ll_ep = dev_get_drvdata(&mhi_dev->dev);
|
||||
struct rmnet_ll_buffer *ll_buf;
|
||||
struct sk_buff *skb;
|
||||
|
||||
/* Get the buffer struct back for our page information */
|
||||
ll_buf = res->buf_addr + ll_ep->buf_len;
|
||||
ll_buf->submitted = false;
|
||||
if (res->transaction_status) {
|
||||
rmnet_ll_stats.rx_status_err++;
|
||||
goto err;
|
||||
} else if (!res->bytes_xferd) {
|
||||
rmnet_ll_stats.rx_null++;
|
||||
goto err;
|
||||
}
|
||||
|
||||
/* Store this away so we don't have to look it up every time */
|
||||
if (!ll_ep->mhi_netdev) {
|
||||
ll_ep->mhi_netdev = dev_get_by_name(&init_net, "rmnet_mhi0");
|
||||
if (!ll_ep->mhi_netdev)
|
||||
goto err;
|
||||
}
|
||||
|
||||
skb = alloc_skb(0, GFP_ATOMIC);
|
||||
if (!skb) {
|
||||
rmnet_ll_stats.rx_oom++;
|
||||
goto err;
|
||||
}
|
||||
|
||||
/* Build the SKB and pass it off to the stack */
|
||||
skb_add_rx_frag(skb, 0, ll_buf->page, 0, res->bytes_xferd,
|
||||
ll_ep->buf_len);
|
||||
if (!ll_buf->temp_alloc)
|
||||
get_page(ll_buf->page);
|
||||
|
||||
skb->dev = ll_ep->mhi_netdev;
|
||||
skb->protocol = htons(ETH_P_MAP);
|
||||
/* Mark this as arriving on the LL channel. Allows rmnet to skip
|
||||
* module handling as needed.
|
||||
*/
|
||||
skb->priority = 0xda1a;
|
||||
rmnet_ll_stats.rx_pkts++;
|
||||
netif_rx(skb);
|
||||
rmnet_ll_buffers_recycle(ll_ep);
|
||||
return;
|
||||
|
||||
err:
|
||||
/* Go, and never darken my towels again! */
|
||||
if (ll_buf->temp_alloc)
|
||||
put_page(ll_buf->page);
|
||||
}
|
||||
|
||||
static void rmnet_ll_tx_complete(struct mhi_device *mhi_dev,
|
||||
struct mhi_result *res)
|
||||
{
|
||||
struct sk_buff *skb = res->buf_addr;
|
||||
|
||||
/* Check the result and free the SKB */
|
||||
if (res->transaction_status)
|
||||
rmnet_ll_stats.tx_complete_err++;
|
||||
else
|
||||
rmnet_ll_stats.tx_complete++;
|
||||
|
||||
dev_kfree_skb_any(skb);
|
||||
}
|
||||
|
||||
static int rmnet_ll_probe(struct mhi_device *mhi_dev,
|
||||
const struct mhi_device_id *id)
|
||||
{
|
||||
struct rmnet_ll_endpoint *ll_ep;
|
||||
int rc;
|
||||
|
||||
/* Allocate space for our state from the managed pool tied to the life
|
||||
* of the mhi device.
|
||||
*/
|
||||
ll_ep = devm_kzalloc(&mhi_dev->dev, sizeof(*ll_ep), GFP_KERNEL);
|
||||
if (!ll_ep)
|
||||
return -ENOMEM;
|
||||
|
||||
/* Hold on to the mhi_dev so we can send data to it later */
|
||||
ll_ep->mhi_dev = mhi_dev;
|
||||
|
||||
/* Grab the MRU of the device so we know the size of the pages we need
|
||||
* to allocate for the pool.
|
||||
*/
|
||||
rc = of_property_read_u32(mhi_dev->dev.of_node, "mhi,mru",
|
||||
&ll_ep->dev_mru);
|
||||
if (rc || !ll_ep->dev_mru)
|
||||
/* Use our default mru */
|
||||
ll_ep->dev_mru = RMNET_LL_DEFAULT_MRU;
|
||||
|
||||
ll_ep->page_order = get_order(ll_ep->dev_mru);
|
||||
/* We store some stuff at the end of the page, so don't let the HW
|
||||
* use that part of it.
|
||||
*/
|
||||
ll_ep->buf_len = ll_ep->dev_mru - sizeof(struct rmnet_ll_buffer);
|
||||
|
||||
/* Tell MHI to initialize the UL/DL channels for transfer */
|
||||
rc = mhi_prepare_for_transfer(mhi_dev);
|
||||
if (rc) {
|
||||
pr_err("%s(): Failed to prepare device for transfer: 0x%x\n",
|
||||
__func__, rc);
|
||||
return rc;
|
||||
}
|
||||
|
||||
rc = rmnet_ll_buffer_pool_alloc(ll_ep);
|
||||
if (rc) {
|
||||
pr_err("%s(): Failed to allocate buffer pool: %d\n", __func__,
|
||||
rc);
|
||||
mhi_unprepare_from_transfer(mhi_dev);
|
||||
return rc;
|
||||
}
|
||||
|
||||
rmnet_ll_buffers_recycle(ll_ep);
|
||||
|
||||
/* Not a fan of storing this pointer in two locations, but I've yet to
|
||||
* come up with any other good way of accessing it on the TX path from
|
||||
* rmnet otherwise, since we won't have any references to the mhi_dev.
|
||||
*/
|
||||
dev_set_drvdata(&mhi_dev->dev, ll_ep);
|
||||
rmnet_ll_ep = ll_ep;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void rmnet_ll_remove(struct mhi_device *mhi_dev)
|
||||
{
|
||||
struct rmnet_ll_endpoint *ll_ep;
|
||||
|
||||
ll_ep = dev_get_drvdata(&mhi_dev->dev);
|
||||
/* Remove our private data form the device. No need to free it though.
|
||||
* It will be freed once the mhi_dev is released since it was alloced
|
||||
* from a managed pool.
|
||||
*/
|
||||
dev_set_drvdata(&mhi_dev->dev, NULL);
|
||||
rmnet_ll_ep = NULL;
|
||||
rmnet_ll_buffer_pool_free(ll_ep);
|
||||
}
|
||||
|
||||
static const struct mhi_device_id rmnet_ll_channel_table[] = {
|
||||
{
|
||||
.chan = "RMNET_DATA_LL",
|
||||
},
|
||||
{},
|
||||
};
|
||||
|
||||
static struct mhi_driver rmnet_ll_driver = {
|
||||
.probe = rmnet_ll_probe,
|
||||
.remove = rmnet_ll_remove,
|
||||
.dl_xfer_cb = rmnet_ll_rx,
|
||||
.ul_xfer_cb = rmnet_ll_tx_complete,
|
||||
.id_table = rmnet_ll_channel_table,
|
||||
.driver = {
|
||||
.name = "rmnet_ll",
|
||||
.owner = THIS_MODULE,
|
||||
},
|
||||
};
|
||||
|
||||
int rmnet_ll_send_skb(struct sk_buff *skb)
|
||||
{
|
||||
struct rmnet_ll_endpoint *ll_ep = rmnet_ll_ep;
|
||||
int rc = -ENODEV;
|
||||
int rc;
|
||||
|
||||
/* Lock to prevent multiple sends at the same time. mhi_queue_transfer()
|
||||
* cannot be called in parallel for the same DMA direction.
|
||||
*/
|
||||
spin_lock_bh(&rmnet_ll_tx_lock);
|
||||
if (ll_ep)
|
||||
#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0)
|
||||
rc = mhi_queue_transfer(ll_ep->mhi_dev, DMA_TO_DEVICE, skb,
|
||||
skb->len, MHI_EOT);
|
||||
#else
|
||||
rc = mhi_queue_skb(ll_ep->mhi_dev, DMA_TO_DEVICE, skb,
|
||||
skb->len, MHI_EOT);
|
||||
#endif
|
||||
|
||||
rc = rmnet_ll_client.tx(skb);
|
||||
spin_unlock_bh(&rmnet_ll_tx_lock);
|
||||
if (rc)
|
||||
rmnet_ll_stats.tx_queue_err++;
|
||||
@@ -370,10 +163,10 @@ struct rmnet_ll_stats *rmnet_ll_get_stats(void)
|
||||
|
||||
int rmnet_ll_init(void)
|
||||
{
|
||||
return mhi_driver_register(&rmnet_ll_driver);
|
||||
return rmnet_ll_client.init();
|
||||
}
|
||||
|
||||
void rmnet_ll_exit(void)
|
||||
{
|
||||
mhi_driver_unregister(&rmnet_ll_driver);
|
||||
rmnet_ll_client.exit();
|
||||
}
|
||||
|
@@ -31,40 +31,9 @@ struct rmnet_ll_stats {
|
||||
u64 rx_tmp_allocs;
|
||||
};
|
||||
|
||||
#if IS_ENABLED(CONFIG_MHI_BUS)
|
||||
|
||||
int rmnet_ll_send_skb(struct sk_buff *skb);
|
||||
struct rmnet_ll_stats *rmnet_ll_get_stats(void);
|
||||
int rmnet_ll_init(void);
|
||||
void rmnet_ll_exit(void);
|
||||
|
||||
#else
|
||||
|
||||
static struct rmnet_ll_stats rmnet_ll_dummy_stats;
|
||||
|
||||
static inline int rmnet_ll_send_skb(struct sk_buff *skb)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static inline struct rmnet_ll_stats *rmnet_ll_get_stats(void)
|
||||
{
|
||||
return &rmnet_ll_dummy_stats;
|
||||
}
|
||||
|
||||
static inline int rmnet_ll_init(void)
|
||||
{
|
||||
/* Allow configuration to continue. Nothing else will happen since all
|
||||
* this does is register the driver with the mhi framework, and if the
|
||||
* channel never comes up, we don't do anything.
|
||||
*/
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void rmnet_ll_exit(void)
|
||||
{
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
69
core/rmnet_ll_core.h
Normal file
69
core/rmnet_ll_core.h
Normal file
@@ -0,0 +1,69 @@
|
||||
/* Copyright (c) 2020-2021 The Linux Foundation. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 and
|
||||
* only version 2 as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* RmNet Low Latency channel handlers
|
||||
*/
|
||||
|
||||
#ifndef __RMNET_LL_CORE_H__
|
||||
#define __RMNET_LL_CORE_H__
|
||||
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/skbuff.h>
|
||||
#include <linux/list.h>
|
||||
|
||||
#define RMNET_LL_DEFAULT_MRU 0x8000
|
||||
|
||||
struct rmnet_ll_buffer {
|
||||
struct list_head list;
|
||||
struct page *page;
|
||||
bool temp_alloc;
|
||||
bool submitted;
|
||||
};
|
||||
|
||||
struct rmnet_ll_buffer_pool {
|
||||
struct list_head buf_list;
|
||||
/* Protect access to the recycle buffer pool */
|
||||
spinlock_t pool_lock;
|
||||
struct list_head *last;
|
||||
u32 pool_size;
|
||||
};
|
||||
|
||||
struct rmnet_ll_endpoint {
|
||||
struct rmnet_ll_buffer_pool buf_pool;
|
||||
struct net_device *phys_dev;
|
||||
void *priv;
|
||||
u32 dev_mru;
|
||||
u32 page_order;
|
||||
u32 buf_len;
|
||||
};
|
||||
|
||||
/* Core operations to hide differences between physical transports.
|
||||
*
|
||||
* buffer_queue: Queue an allocated buffer to the HW for RX. Optional.
|
||||
* query_free_descriptors: Return number of free RX descriptors. Optional.
|
||||
* tx: Send an SKB over the channel in the TX direction.
|
||||
* init: Initialization callback on module load
|
||||
* exit: Exit callback on module unload
|
||||
*/
|
||||
struct rmnet_ll_client_ops {
|
||||
int (*buffer_queue)(struct rmnet_ll_endpoint *ll_ep,
|
||||
struct rmnet_ll_buffer *ll_buf);
|
||||
int (*query_free_descriptors)(struct rmnet_ll_endpoint *ll_ep);
|
||||
int (*tx)(struct sk_buff *skb);
|
||||
int (*init)(void);
|
||||
int (*exit)(void);
|
||||
};
|
||||
|
||||
int rmnet_ll_buffer_pool_alloc(struct rmnet_ll_endpoint *ll_ep);
|
||||
void rmnet_ll_buffer_pool_free(struct rmnet_ll_endpoint *ll_ep);
|
||||
void rmnet_ll_buffers_recycle(struct rmnet_ll_endpoint *ll_ep);
|
||||
|
||||
#endif
|
129
core/rmnet_ll_ipa.c
Normal file
129
core/rmnet_ll_ipa.c
Normal file
@@ -0,0 +1,129 @@
|
||||
/* Copyright (c) 2021 The Linux Foundation. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 and
|
||||
* only version 2 as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* RmNet IPA Low Latency channel handlers
|
||||
*/
|
||||
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/skbuff.h>
|
||||
#include <linux/ipa.h>
|
||||
#include <linux/if_ether.h>
|
||||
#include "rmnet_ll.h"
|
||||
#include "rmnet_ll_core.h"
|
||||
|
||||
static struct rmnet_ll_endpoint *rmnet_ll_ipa_ep;
|
||||
|
||||
static void rmnet_ll_ipa_rx(void *arg, void *rx_data)
|
||||
{
|
||||
struct rmnet_ll_endpoint *ll_ep = *((struct rmnet_ll_endpoint **)arg);
|
||||
struct rmnet_ll_stats *stats = rmnet_ll_get_stats();
|
||||
struct sk_buff *skb, *tmp;
|
||||
|
||||
skb = rx_data;
|
||||
/* Odds are IPA does this, but just to be safe */
|
||||
skb->dev = ll_ep->phys_dev;
|
||||
skb->protocol = htons(ETH_P_MAP);
|
||||
|
||||
tmp = skb;
|
||||
while (tmp) {
|
||||
/* Mark the SKB as low latency */
|
||||
tmp->priority = 0xda1a;
|
||||
tmp = skb_shinfo(tmp)->frag_list;
|
||||
}
|
||||
|
||||
stats->rx_pkts++;
|
||||
netif_rx(skb);
|
||||
}
|
||||
|
||||
static void rmnet_ll_ipa_probe(void *arg)
|
||||
{
|
||||
struct rmnet_ll_endpoint *ll_ep;
|
||||
|
||||
ll_ep = kzalloc(sizeof(*ll_ep), GFP_KERNEL);
|
||||
if (!ll_ep) {
|
||||
pr_err("%s(): allocating LL CTX failed\n", __func__);
|
||||
return;
|
||||
}
|
||||
|
||||
ll_ep->phys_dev = dev_get_by_name(&init_net, "rmnet_ipa0");
|
||||
if (!ll_ep->phys_dev) {
|
||||
pr_err("%s(): Invalid physical device\n", __func__);
|
||||
kfree(ll_ep);
|
||||
return;
|
||||
}
|
||||
|
||||
*((struct rmnet_ll_endpoint **)arg) = ll_ep;
|
||||
}
|
||||
|
||||
static void rmnet_ll_ipa_remove(void *arg)
|
||||
{
|
||||
struct rmnet_ll_endpoint **ll_ep = arg;
|
||||
|
||||
dev_put((*ll_ep)->phys_dev);
|
||||
kfree(*ll_ep);
|
||||
*ll_ep = NULL;
|
||||
}
|
||||
|
||||
static void rmnet_ll_ipa_ready(void * __unused)
|
||||
{
|
||||
int rc;
|
||||
|
||||
rc = ipa_register_rmnet_ll_cb(rmnet_ll_ipa_probe,
|
||||
(void *)&rmnet_ll_ipa_ep,
|
||||
rmnet_ll_ipa_remove,
|
||||
(void *)&rmnet_ll_ipa_ep,
|
||||
rmnet_ll_ipa_rx,
|
||||
(void *)&rmnet_ll_ipa_ep);
|
||||
if (rc)
|
||||
pr_err("%s(): Registering IPA LL callback failed with rc %d\n",
|
||||
__func__, rc);
|
||||
}
|
||||
|
||||
static int rmnet_ll_ipa_tx(struct sk_buff *skb)
|
||||
{
|
||||
if (!rmnet_ll_ipa_ep)
|
||||
return -ENODEV;
|
||||
|
||||
/* IPA handles freeing the SKB on failure */
|
||||
return ipa_rmnet_ll_xmit(skb);
|
||||
}
|
||||
|
||||
static int rmnet_ll_ipa_init(void)
|
||||
{
|
||||
int rc;
|
||||
|
||||
rc = ipa_register_ipa_ready_cb(rmnet_ll_ipa_ready, NULL);
|
||||
if (rc == -EEXIST) {
|
||||
/* IPA is already up. Call it ourselves, since they don't */
|
||||
rmnet_ll_ipa_ready(NULL);
|
||||
rc = 0;
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int rmnet_ll_ipa_exit(void)
|
||||
{
|
||||
if (rmnet_ll_ipa_ep) {
|
||||
ipa_unregister_rmnet_ll_cb();
|
||||
/* Teardown? */
|
||||
rmnet_ll_ipa_ep = NULL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Export operations struct to the main framework */
|
||||
struct rmnet_ll_client_ops rmnet_ll_client = {
|
||||
.tx = rmnet_ll_ipa_tx,
|
||||
.init = rmnet_ll_ipa_init,
|
||||
.exit = rmnet_ll_ipa_exit,
|
||||
};
|
238
core/rmnet_ll_mhi.c
Normal file
238
core/rmnet_ll_mhi.c
Normal file
@@ -0,0 +1,238 @@
|
||||
/* Copyright (c) 2020-2021 The Linux Foundation. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 and
|
||||
* only version 2 as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* RmNet MHI Low Latency channel handlers
|
||||
*/
|
||||
|
||||
#include <linux/device.h>
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/skbuff.h>
|
||||
#include <linux/mhi.h>
|
||||
#include <linux/if_ether.h>
|
||||
#include <linux/mm.h>
|
||||
#include "rmnet_ll.h"
|
||||
#include "rmnet_ll_core.h"
|
||||
|
||||
static struct rmnet_ll_endpoint *rmnet_ll_mhi_ep;
|
||||
|
||||
static void rmnet_ll_mhi_rx(struct mhi_device *mhi_dev, struct mhi_result *res)
|
||||
{
|
||||
struct rmnet_ll_endpoint *ll_ep = dev_get_drvdata(&mhi_dev->dev);
|
||||
struct rmnet_ll_stats *stats = rmnet_ll_get_stats();
|
||||
struct rmnet_ll_buffer *ll_buf;
|
||||
struct sk_buff *skb;
|
||||
|
||||
/* Get the buffer struct back for our page information */
|
||||
ll_buf = res->buf_addr + ll_ep->buf_len;
|
||||
ll_buf->submitted = false;
|
||||
if (res->transaction_status) {
|
||||
stats->rx_status_err++;
|
||||
goto err;
|
||||
} else if (!res->bytes_xferd) {
|
||||
stats->rx_null++;
|
||||
goto err;
|
||||
}
|
||||
|
||||
/* Store this away so we don't have to look it up every time */
|
||||
if (!ll_ep->phys_dev) {
|
||||
ll_ep->phys_dev = dev_get_by_name(&init_net, "rmnet_mhi0");
|
||||
if (!ll_ep->phys_dev)
|
||||
goto err;
|
||||
}
|
||||
|
||||
skb = alloc_skb(0, GFP_ATOMIC);
|
||||
if (!skb) {
|
||||
stats->rx_oom++;
|
||||
goto err;
|
||||
}
|
||||
|
||||
/* Build the SKB and pass it off to the stack */
|
||||
skb_add_rx_frag(skb, 0, ll_buf->page, 0, res->bytes_xferd,
|
||||
ll_ep->buf_len);
|
||||
if (!ll_buf->temp_alloc)
|
||||
get_page(ll_buf->page);
|
||||
|
||||
skb->dev = ll_ep->phys_dev;
|
||||
skb->protocol = htons(ETH_P_MAP);
|
||||
/* Mark this as arriving on the LL channel. Allows rmnet to skip
|
||||
* module handling as needed.
|
||||
*/
|
||||
skb->priority = 0xda1a;
|
||||
stats->rx_pkts++;
|
||||
netif_rx(skb);
|
||||
rmnet_ll_buffers_recycle(ll_ep);
|
||||
return;
|
||||
|
||||
err:
|
||||
/* Go, and never darken my towels again! */
|
||||
if (ll_buf->temp_alloc)
|
||||
put_page(ll_buf->page);
|
||||
}
|
||||
|
||||
static void rmnet_ll_mhi_tx_complete(struct mhi_device *mhi_dev,
|
||||
struct mhi_result *res)
|
||||
{
|
||||
struct rmnet_ll_stats *stats = rmnet_ll_get_stats();
|
||||
struct sk_buff *skb = res->buf_addr;
|
||||
|
||||
/* Check the result and free the SKB */
|
||||
if (res->transaction_status)
|
||||
stats->tx_complete_err++;
|
||||
else
|
||||
stats->tx_complete++;
|
||||
|
||||
dev_kfree_skb_any(skb);
|
||||
}
|
||||
|
||||
static int rmnet_ll_mhi_probe(struct mhi_device *mhi_dev,
|
||||
const struct mhi_device_id *id)
|
||||
{
|
||||
struct rmnet_ll_endpoint *ll_ep;
|
||||
int rc;
|
||||
|
||||
/* Allocate space for our state from the managed pool tied to the life
|
||||
* of the mhi device.
|
||||
*/
|
||||
ll_ep = devm_kzalloc(&mhi_dev->dev, sizeof(*ll_ep), GFP_KERNEL);
|
||||
if (!ll_ep)
|
||||
return -ENOMEM;
|
||||
|
||||
/* Hold on to the mhi_dev so we can send data to it later */
|
||||
ll_ep->priv = (void *)mhi_dev;
|
||||
|
||||
/* Grab the MRU of the device so we know the size of the pages we need
|
||||
* to allocate for the pool.
|
||||
*/
|
||||
rc = of_property_read_u32(mhi_dev->dev.of_node, "mhi,mru",
|
||||
&ll_ep->dev_mru);
|
||||
if (rc || !ll_ep->dev_mru)
|
||||
/* Use our default mru */
|
||||
ll_ep->dev_mru = RMNET_LL_DEFAULT_MRU;
|
||||
|
||||
ll_ep->page_order = get_order(ll_ep->dev_mru);
|
||||
/* We store some stuff at the end of the page, so don't let the HW
|
||||
* use that part of it.
|
||||
*/
|
||||
ll_ep->buf_len = ll_ep->dev_mru - sizeof(struct rmnet_ll_buffer);
|
||||
|
||||
/* Tell MHI to initialize the UL/DL channels for transfer */
|
||||
rc = mhi_prepare_for_transfer(mhi_dev);
|
||||
if (rc) {
|
||||
pr_err("%s(): Failed to prepare device for transfer: 0x%x\n",
|
||||
__func__, rc);
|
||||
return rc;
|
||||
}
|
||||
|
||||
rc = rmnet_ll_buffer_pool_alloc(ll_ep);
|
||||
if (rc) {
|
||||
pr_err("%s(): Failed to allocate buffer pool: %d\n", __func__,
|
||||
rc);
|
||||
mhi_unprepare_from_transfer(mhi_dev);
|
||||
return rc;
|
||||
}
|
||||
|
||||
rmnet_ll_buffers_recycle(ll_ep);
|
||||
|
||||
/* Not a fan of storing this pointer in two locations, but I've yet to
|
||||
* come up with any other good way of accessing it on the TX path from
|
||||
* rmnet otherwise, since we won't have any references to the mhi_dev.
|
||||
*/
|
||||
dev_set_drvdata(&mhi_dev->dev, ll_ep);
|
||||
rmnet_ll_mhi_ep = ll_ep;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void rmnet_ll_mhi_remove(struct mhi_device *mhi_dev)
|
||||
{
|
||||
struct rmnet_ll_endpoint *ll_ep;
|
||||
|
||||
ll_ep = dev_get_drvdata(&mhi_dev->dev);
|
||||
/* Remove our private data form the device. No need to free it though.
|
||||
* It will be freed once the mhi_dev is released since it was alloced
|
||||
* from a managed pool.
|
||||
*/
|
||||
dev_set_drvdata(&mhi_dev->dev, NULL);
|
||||
rmnet_ll_mhi_ep = NULL;
|
||||
rmnet_ll_buffer_pool_free(ll_ep);
|
||||
}
|
||||
|
||||
static const struct mhi_device_id rmnet_ll_mhi_channel_table[] = {
|
||||
{
|
||||
.chan = "RMNET_DATA_LL",
|
||||
},
|
||||
{},
|
||||
};
|
||||
|
||||
static struct mhi_driver rmnet_ll_driver = {
|
||||
.probe = rmnet_ll_mhi_probe,
|
||||
.remove = rmnet_ll_mhi_remove,
|
||||
.dl_xfer_cb = rmnet_ll_mhi_rx,
|
||||
.ul_xfer_cb = rmnet_ll_mhi_tx_complete,
|
||||
.id_table = rmnet_ll_mhi_channel_table,
|
||||
.driver = {
|
||||
.name = "rmnet_ll",
|
||||
.owner = THIS_MODULE,
|
||||
},
|
||||
};
|
||||
|
||||
static int rmnet_ll_mhi_queue(struct rmnet_ll_endpoint *ll_ep,
|
||||
struct rmnet_ll_buffer *ll_buf)
|
||||
{
|
||||
struct mhi_device *mhi_dev = ll_ep->priv;
|
||||
|
||||
return mhi_queue_buf(mhi_dev, DMA_FROM_DEVICE,
|
||||
page_address(ll_buf->page),
|
||||
ll_ep->buf_len, MHI_EOT);
|
||||
}
|
||||
|
||||
static int rmnet_ll_mhi_query_free_descriptors(struct rmnet_ll_endpoint *ll_ep)
|
||||
{
|
||||
struct mhi_device *mhi_dev = ll_ep->priv;
|
||||
|
||||
return mhi_get_free_desc_count(mhi_dev, DMA_FROM_DEVICE);
|
||||
}
|
||||
|
||||
static int rmnet_ll_mhi_tx(struct sk_buff *skb)
|
||||
{
|
||||
struct mhi_device *mhi_dev;
|
||||
int rc;
|
||||
|
||||
if (!rmnet_ll_mhi_ep)
|
||||
return -ENODEV;
|
||||
|
||||
mhi_dev = rmnet_ll_mhi_ep->priv;
|
||||
rc = mhi_queue_skb(mhi_dev, DMA_TO_DEVICE, skb, skb->len, MHI_EOT);
|
||||
if (rc)
|
||||
kfree_skb(skb);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int rmnet_ll_mhi_init(void)
|
||||
{
|
||||
return mhi_driver_register(&rmnet_ll_driver);
|
||||
}
|
||||
|
||||
static int rmnet_ll_mhi_exit(void)
|
||||
{
|
||||
mhi_driver_unregister(&rmnet_ll_driver);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Export operations struct to the main framework */
|
||||
struct rmnet_ll_client_ops rmnet_ll_client = {
|
||||
.buffer_queue = rmnet_ll_mhi_queue,
|
||||
.query_free_descriptors = rmnet_ll_mhi_query_free_descriptors,
|
||||
.tx = rmnet_ll_mhi_tx,
|
||||
.init = rmnet_ll_mhi_init,
|
||||
.exit = rmnet_ll_mhi_exit,
|
||||
};
|
Fai riferimento in un nuovo problema
Block a user