Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
The UDP offload conflict is dealt with by simply taking what is in net-next where we have removed all of the UFO handling code entirely. The TCP conflict was a case of local variables in a function being removed from both net and net-next. In netvsc we had an assignment right next to where a missing set of u64 stats sync object inits were added. Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
@@ -1785,9 +1785,9 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
|
||||
|
||||
xgene_enet_gpiod_get(pdata);
|
||||
|
||||
if (pdata->phy_mode != PHY_INTERFACE_MODE_SGMII) {
|
||||
pdata->clk = devm_clk_get(&pdev->dev, NULL);
|
||||
if (IS_ERR(pdata->clk)) {
|
||||
pdata->clk = devm_clk_get(&pdev->dev, NULL);
|
||||
if (IS_ERR(pdata->clk)) {
|
||||
if (pdata->phy_mode != PHY_INTERFACE_MODE_SGMII) {
|
||||
/* Abort if the clock is defined but couldn't be
|
||||
* retrived. Always abort if the clock is missing on
|
||||
* DT system as the driver can't cope with this case.
|
||||
|
@@ -2368,6 +2368,7 @@ static int b44_init_one(struct ssb_device *sdev,
|
||||
bp->msg_enable = netif_msg_init(b44_debug, B44_DEF_MSG_ENABLE);
|
||||
|
||||
spin_lock_init(&bp->lock);
|
||||
u64_stats_init(&bp->hw_stats.syncp);
|
||||
|
||||
bp->rx_pending = B44_DEF_RX_RING_PENDING;
|
||||
bp->tx_pending = B44_DEF_TX_RING_PENDING;
|
||||
|
@@ -111,6 +111,7 @@ static void send_request_map(struct ibmvnic_adapter *, dma_addr_t, __be32, u8);
|
||||
static void send_request_unmap(struct ibmvnic_adapter *, u8);
|
||||
static void send_login(struct ibmvnic_adapter *adapter);
|
||||
static void send_cap_queries(struct ibmvnic_adapter *adapter);
|
||||
static int init_sub_crqs(struct ibmvnic_adapter *);
|
||||
static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter);
|
||||
static int ibmvnic_init(struct ibmvnic_adapter *);
|
||||
static void release_crq_queue(struct ibmvnic_adapter *);
|
||||
@@ -676,6 +677,7 @@ static int ibmvnic_login(struct net_device *netdev)
|
||||
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
|
||||
unsigned long timeout = msecs_to_jiffies(30000);
|
||||
struct device *dev = &adapter->vdev->dev;
|
||||
int rc;
|
||||
|
||||
do {
|
||||
if (adapter->renegotiate) {
|
||||
@@ -689,6 +691,18 @@ static int ibmvnic_login(struct net_device *netdev)
|
||||
dev_err(dev, "Capabilities query timeout\n");
|
||||
return -1;
|
||||
}
|
||||
rc = init_sub_crqs(adapter);
|
||||
if (rc) {
|
||||
dev_err(dev,
|
||||
"Initialization of SCRQ's failed\n");
|
||||
return -1;
|
||||
}
|
||||
rc = init_sub_crq_irqs(adapter);
|
||||
if (rc) {
|
||||
dev_err(dev,
|
||||
"Initialization of SCRQ's irqs failed\n");
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
reinit_completion(&adapter->init_done);
|
||||
@@ -3106,7 +3120,6 @@ static void handle_request_cap_rsp(union ibmvnic_crq *crq,
|
||||
*req_value,
|
||||
(long int)be64_to_cpu(crq->request_capability_rsp.
|
||||
number), name);
|
||||
release_sub_crqs(adapter);
|
||||
*req_value = be64_to_cpu(crq->request_capability_rsp.number);
|
||||
ibmvnic_send_req_caps(adapter, 1);
|
||||
return;
|
||||
|
@@ -1113,6 +1113,8 @@ int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring)
|
||||
if (!tx_ring->tx_bi)
|
||||
goto err;
|
||||
|
||||
u64_stats_init(&tx_ring->syncp);
|
||||
|
||||
/* round up to nearest 4K */
|
||||
tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc);
|
||||
/* add u32 for head writeback, align after this takes care of
|
||||
|
@@ -2988,6 +2988,8 @@ int ixgbevf_setup_tx_resources(struct ixgbevf_ring *tx_ring)
|
||||
if (!tx_ring->tx_buffer_info)
|
||||
goto err;
|
||||
|
||||
u64_stats_init(&tx_ring->syncp);
|
||||
|
||||
/* round up to nearest 4K */
|
||||
tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
|
||||
tx_ring->size = ALIGN(tx_ring->size, 4096);
|
||||
@@ -3046,6 +3048,8 @@ int ixgbevf_setup_rx_resources(struct ixgbevf_ring *rx_ring)
|
||||
if (!rx_ring->rx_buffer_info)
|
||||
goto err;
|
||||
|
||||
u64_stats_init(&rx_ring->syncp);
|
||||
|
||||
/* Round up to nearest 4K */
|
||||
rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
|
||||
rx_ring->size = ALIGN(rx_ring->size, 4096);
|
||||
|
@@ -223,6 +223,7 @@ static void mlx4_en_get_wol(struct net_device *netdev,
|
||||
struct ethtool_wolinfo *wol)
|
||||
{
|
||||
struct mlx4_en_priv *priv = netdev_priv(netdev);
|
||||
struct mlx4_caps *caps = &priv->mdev->dev->caps;
|
||||
int err = 0;
|
||||
u64 config = 0;
|
||||
u64 mask;
|
||||
@@ -235,24 +236,24 @@ static void mlx4_en_get_wol(struct net_device *netdev,
|
||||
mask = (priv->port == 1) ? MLX4_DEV_CAP_FLAG_WOL_PORT1 :
|
||||
MLX4_DEV_CAP_FLAG_WOL_PORT2;
|
||||
|
||||
if (!(priv->mdev->dev->caps.flags & mask)) {
|
||||
if (!(caps->flags & mask)) {
|
||||
wol->supported = 0;
|
||||
wol->wolopts = 0;
|
||||
return;
|
||||
}
|
||||
|
||||
if (caps->wol_port[priv->port])
|
||||
wol->supported = WAKE_MAGIC;
|
||||
else
|
||||
wol->supported = 0;
|
||||
|
||||
err = mlx4_wol_read(priv->mdev->dev, &config, priv->port);
|
||||
if (err) {
|
||||
en_err(priv, "Failed to get WoL information\n");
|
||||
return;
|
||||
}
|
||||
|
||||
if (config & MLX4_EN_WOL_MAGIC)
|
||||
wol->supported = WAKE_MAGIC;
|
||||
else
|
||||
wol->supported = 0;
|
||||
|
||||
if (config & MLX4_EN_WOL_ENABLED)
|
||||
if ((config & MLX4_EN_WOL_ENABLED) && (config & MLX4_EN_WOL_MAGIC))
|
||||
wol->wolopts = WAKE_MAGIC;
|
||||
else
|
||||
wol->wolopts = 0;
|
||||
|
@@ -574,16 +574,21 @@ static inline __wsum get_fixed_vlan_csum(__wsum hw_checksum,
|
||||
* header, the HW adds it. To address that, we are subtracting the pseudo
|
||||
* header checksum from the checksum value provided by the HW.
|
||||
*/
|
||||
static void get_fixed_ipv4_csum(__wsum hw_checksum, struct sk_buff *skb,
|
||||
struct iphdr *iph)
|
||||
static int get_fixed_ipv4_csum(__wsum hw_checksum, struct sk_buff *skb,
|
||||
struct iphdr *iph)
|
||||
{
|
||||
__u16 length_for_csum = 0;
|
||||
__wsum csum_pseudo_header = 0;
|
||||
__u8 ipproto = iph->protocol;
|
||||
|
||||
if (unlikely(ipproto == IPPROTO_SCTP))
|
||||
return -1;
|
||||
|
||||
length_for_csum = (be16_to_cpu(iph->tot_len) - (iph->ihl << 2));
|
||||
csum_pseudo_header = csum_tcpudp_nofold(iph->saddr, iph->daddr,
|
||||
length_for_csum, iph->protocol, 0);
|
||||
length_for_csum, ipproto, 0);
|
||||
skb->csum = csum_sub(hw_checksum, csum_pseudo_header);
|
||||
return 0;
|
||||
}
|
||||
|
||||
#if IS_ENABLED(CONFIG_IPV6)
|
||||
@@ -594,17 +599,20 @@ static void get_fixed_ipv4_csum(__wsum hw_checksum, struct sk_buff *skb,
|
||||
static int get_fixed_ipv6_csum(__wsum hw_checksum, struct sk_buff *skb,
|
||||
struct ipv6hdr *ipv6h)
|
||||
{
|
||||
__u8 nexthdr = ipv6h->nexthdr;
|
||||
__wsum csum_pseudo_hdr = 0;
|
||||
|
||||
if (unlikely(ipv6h->nexthdr == IPPROTO_FRAGMENT ||
|
||||
ipv6h->nexthdr == IPPROTO_HOPOPTS))
|
||||
if (unlikely(nexthdr == IPPROTO_FRAGMENT ||
|
||||
nexthdr == IPPROTO_HOPOPTS ||
|
||||
nexthdr == IPPROTO_SCTP))
|
||||
return -1;
|
||||
hw_checksum = csum_add(hw_checksum, (__force __wsum)htons(ipv6h->nexthdr));
|
||||
hw_checksum = csum_add(hw_checksum, (__force __wsum)htons(nexthdr));
|
||||
|
||||
csum_pseudo_hdr = csum_partial(&ipv6h->saddr,
|
||||
sizeof(ipv6h->saddr) + sizeof(ipv6h->daddr), 0);
|
||||
csum_pseudo_hdr = csum_add(csum_pseudo_hdr, (__force __wsum)ipv6h->payload_len);
|
||||
csum_pseudo_hdr = csum_add(csum_pseudo_hdr, (__force __wsum)ntohs(ipv6h->nexthdr));
|
||||
csum_pseudo_hdr = csum_add(csum_pseudo_hdr,
|
||||
(__force __wsum)htons(nexthdr));
|
||||
|
||||
skb->csum = csum_sub(hw_checksum, csum_pseudo_hdr);
|
||||
skb->csum = csum_add(skb->csum, csum_partial(ipv6h, sizeof(struct ipv6hdr), 0));
|
||||
@@ -627,11 +635,10 @@ static int check_csum(struct mlx4_cqe *cqe, struct sk_buff *skb, void *va,
|
||||
}
|
||||
|
||||
if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV4))
|
||||
get_fixed_ipv4_csum(hw_checksum, skb, hdr);
|
||||
return get_fixed_ipv4_csum(hw_checksum, skb, hdr);
|
||||
#if IS_ENABLED(CONFIG_IPV6)
|
||||
else if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV6))
|
||||
if (unlikely(get_fixed_ipv6_csum(hw_checksum, skb, hdr)))
|
||||
return -1;
|
||||
if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV6))
|
||||
return get_fixed_ipv6_csum(hw_checksum, skb, hdr);
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
@@ -159,8 +159,9 @@ static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags)
|
||||
[32] = "Loopback source checks support",
|
||||
[33] = "RoCEv2 support",
|
||||
[34] = "DMFS Sniffer support (UC & MC)",
|
||||
[35] = "QinQ VST mode support",
|
||||
[36] = "sl to vl mapping table change event support"
|
||||
[35] = "Diag counters per port",
|
||||
[36] = "QinQ VST mode support",
|
||||
[37] = "sl to vl mapping table change event support",
|
||||
};
|
||||
int i;
|
||||
|
||||
@@ -764,6 +765,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
|
||||
#define QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET 0x3e
|
||||
#define QUERY_DEV_CAP_MAX_PKEY_OFFSET 0x3f
|
||||
#define QUERY_DEV_CAP_EXT_FLAGS_OFFSET 0x40
|
||||
#define QUERY_DEV_CAP_WOL_OFFSET 0x43
|
||||
#define QUERY_DEV_CAP_FLAGS_OFFSET 0x44
|
||||
#define QUERY_DEV_CAP_RSVD_UAR_OFFSET 0x48
|
||||
#define QUERY_DEV_CAP_UAR_SZ_OFFSET 0x49
|
||||
@@ -920,6 +922,9 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
|
||||
MLX4_GET(ext_flags, outbox, QUERY_DEV_CAP_EXT_FLAGS_OFFSET);
|
||||
MLX4_GET(flags, outbox, QUERY_DEV_CAP_FLAGS_OFFSET);
|
||||
dev_cap->flags = flags | (u64)ext_flags << 32;
|
||||
MLX4_GET(field, outbox, QUERY_DEV_CAP_WOL_OFFSET);
|
||||
dev_cap->wol_port[1] = !!(field & 0x20);
|
||||
dev_cap->wol_port[2] = !!(field & 0x40);
|
||||
MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_UAR_OFFSET);
|
||||
dev_cap->reserved_uars = field >> 4;
|
||||
MLX4_GET(field, outbox, QUERY_DEV_CAP_UAR_SZ_OFFSET);
|
||||
|
@@ -129,6 +129,7 @@ struct mlx4_dev_cap {
|
||||
u32 dmfs_high_rate_qpn_range;
|
||||
struct mlx4_rate_limit_caps rl_caps;
|
||||
struct mlx4_port_cap port_cap[MLX4_MAX_PORTS + 1];
|
||||
bool wol_port[MLX4_MAX_PORTS + 1];
|
||||
};
|
||||
|
||||
struct mlx4_func_cap {
|
||||
|
@@ -424,6 +424,8 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
|
||||
dev->caps.stat_rate_support = dev_cap->stat_rate_support;
|
||||
dev->caps.max_gso_sz = dev_cap->max_gso_sz;
|
||||
dev->caps.max_rss_tbl_sz = dev_cap->max_rss_tbl_sz;
|
||||
dev->caps.wol_port[1] = dev_cap->wol_port[1];
|
||||
dev->caps.wol_port[2] = dev_cap->wol_port[2];
|
||||
|
||||
/* Save uar page shift */
|
||||
if (!mlx4_is_slave(dev)) {
|
||||
|
@@ -626,8 +626,8 @@ static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port,
|
||||
|
||||
bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge,
|
||||
orig_dev);
|
||||
if (WARN_ON(!bridge_port))
|
||||
return -EINVAL;
|
||||
if (!bridge_port)
|
||||
return 0;
|
||||
|
||||
err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port,
|
||||
MLXSW_SP_FLOOD_TYPE_UC,
|
||||
@@ -711,8 +711,8 @@ static int mlxsw_sp_port_attr_mc_router_set(struct mlxsw_sp_port *mlxsw_sp_port,
|
||||
|
||||
bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge,
|
||||
orig_dev);
|
||||
if (WARN_ON(!bridge_port))
|
||||
return -EINVAL;
|
||||
if (!bridge_port)
|
||||
return 0;
|
||||
|
||||
if (!bridge_port->bridge_device->multicast_enabled)
|
||||
return 0;
|
||||
@@ -1283,15 +1283,15 @@ static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port,
|
||||
return 0;
|
||||
|
||||
bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
|
||||
if (WARN_ON(!bridge_port))
|
||||
return -EINVAL;
|
||||
if (!bridge_port)
|
||||
return 0;
|
||||
|
||||
bridge_device = bridge_port->bridge_device;
|
||||
mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port,
|
||||
bridge_device,
|
||||
mdb->vid);
|
||||
if (WARN_ON(!mlxsw_sp_port_vlan))
|
||||
return -EINVAL;
|
||||
if (!mlxsw_sp_port_vlan)
|
||||
return 0;
|
||||
|
||||
fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid);
|
||||
|
||||
@@ -1407,15 +1407,15 @@ static int mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port,
|
||||
int err = 0;
|
||||
|
||||
bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
|
||||
if (WARN_ON(!bridge_port))
|
||||
return -EINVAL;
|
||||
if (!bridge_port)
|
||||
return 0;
|
||||
|
||||
bridge_device = bridge_port->bridge_device;
|
||||
mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port,
|
||||
bridge_device,
|
||||
mdb->vid);
|
||||
if (WARN_ON(!mlxsw_sp_port_vlan))
|
||||
return -EINVAL;
|
||||
if (!mlxsw_sp_port_vlan)
|
||||
return 0;
|
||||
|
||||
fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid);
|
||||
|
||||
@@ -1974,6 +1974,17 @@ static void mlxsw_sp_fdb_fini(struct mlxsw_sp *mlxsw_sp)
|
||||
|
||||
}
|
||||
|
||||
static void mlxsw_sp_mids_fini(struct mlxsw_sp *mlxsw_sp)
|
||||
{
|
||||
struct mlxsw_sp_mid *mid, *tmp;
|
||||
|
||||
list_for_each_entry_safe(mid, tmp, &mlxsw_sp->bridge->mids_list, list) {
|
||||
list_del(&mid->list);
|
||||
clear_bit(mid->mid, mlxsw_sp->bridge->mids_bitmap);
|
||||
kfree(mid);
|
||||
}
|
||||
}
|
||||
|
||||
int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp)
|
||||
{
|
||||
struct mlxsw_sp_bridge *bridge;
|
||||
@@ -1996,7 +2007,7 @@ int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp)
|
||||
void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp)
|
||||
{
|
||||
mlxsw_sp_fdb_fini(mlxsw_sp);
|
||||
WARN_ON(!list_empty(&mlxsw_sp->bridge->mids_list));
|
||||
mlxsw_sp_mids_fini(mlxsw_sp);
|
||||
WARN_ON(!list_empty(&mlxsw_sp->bridge->bridges_list));
|
||||
kfree(mlxsw_sp->bridge);
|
||||
}
|
||||
|
@@ -513,6 +513,7 @@ nfp_net_tx_ring_init(struct nfp_net_tx_ring *tx_ring,
|
||||
tx_ring->idx = idx;
|
||||
tx_ring->r_vec = r_vec;
|
||||
tx_ring->is_xdp = is_xdp;
|
||||
u64_stats_init(&tx_ring->r_vec->tx_sync);
|
||||
|
||||
tx_ring->qcidx = tx_ring->idx * nn->stride_tx;
|
||||
tx_ring->qcp_q = nn->tx_bar + NFP_QCP_QUEUE_OFF(tx_ring->qcidx);
|
||||
@@ -532,6 +533,7 @@ nfp_net_rx_ring_init(struct nfp_net_rx_ring *rx_ring,
|
||||
|
||||
rx_ring->idx = idx;
|
||||
rx_ring->r_vec = r_vec;
|
||||
u64_stats_init(&rx_ring->r_vec->rx_sync);
|
||||
|
||||
rx_ring->fl_qcidx = rx_ring->idx * nn->stride_rx;
|
||||
rx_ring->qcp_fl = nn->rx_bar + NFP_QCP_QUEUE_OFF(rx_ring->fl_qcidx);
|
||||
|
@@ -253,7 +253,7 @@ int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
|
||||
size = MFW_DRV_MSG_MAX_DWORDS(p_info->mfw_mb_length) * sizeof(u32);
|
||||
p_info->mfw_mb_cur = kzalloc(size, GFP_KERNEL);
|
||||
p_info->mfw_mb_shadow = kzalloc(size, GFP_KERNEL);
|
||||
if (!p_info->mfw_mb_shadow || !p_info->mfw_mb_addr)
|
||||
if (!p_info->mfw_mb_cur || !p_info->mfw_mb_shadow)
|
||||
goto err;
|
||||
|
||||
return 0;
|
||||
|
@@ -31,9 +31,18 @@
|
||||
|
||||
#include "cpts.h"
|
||||
|
||||
#define CPTS_SKB_TX_WORK_TIMEOUT 1 /* jiffies */
|
||||
|
||||
struct cpts_skb_cb_data {
|
||||
unsigned long tmo;
|
||||
};
|
||||
|
||||
#define cpts_read32(c, r) readl_relaxed(&c->reg->r)
|
||||
#define cpts_write32(c, v, r) writel_relaxed(v, &c->reg->r)
|
||||
|
||||
static int cpts_match(struct sk_buff *skb, unsigned int ptp_class,
|
||||
u16 ts_seqid, u8 ts_msgtype);
|
||||
|
||||
static int event_expired(struct cpts_event *event)
|
||||
{
|
||||
return time_after(jiffies, event->tmo);
|
||||
@@ -77,6 +86,47 @@ static int cpts_purge_events(struct cpts *cpts)
|
||||
return removed ? 0 : -1;
|
||||
}
|
||||
|
||||
static bool cpts_match_tx_ts(struct cpts *cpts, struct cpts_event *event)
|
||||
{
|
||||
struct sk_buff *skb, *tmp;
|
||||
u16 seqid;
|
||||
u8 mtype;
|
||||
bool found = false;
|
||||
|
||||
mtype = (event->high >> MESSAGE_TYPE_SHIFT) & MESSAGE_TYPE_MASK;
|
||||
seqid = (event->high >> SEQUENCE_ID_SHIFT) & SEQUENCE_ID_MASK;
|
||||
|
||||
/* no need to grab txq.lock as access is always done under cpts->lock */
|
||||
skb_queue_walk_safe(&cpts->txq, skb, tmp) {
|
||||
struct skb_shared_hwtstamps ssh;
|
||||
unsigned int class = ptp_classify_raw(skb);
|
||||
struct cpts_skb_cb_data *skb_cb =
|
||||
(struct cpts_skb_cb_data *)skb->cb;
|
||||
|
||||
if (cpts_match(skb, class, seqid, mtype)) {
|
||||
u64 ns = timecounter_cyc2time(&cpts->tc, event->low);
|
||||
|
||||
memset(&ssh, 0, sizeof(ssh));
|
||||
ssh.hwtstamp = ns_to_ktime(ns);
|
||||
skb_tstamp_tx(skb, &ssh);
|
||||
found = true;
|
||||
__skb_unlink(skb, &cpts->txq);
|
||||
dev_consume_skb_any(skb);
|
||||
dev_dbg(cpts->dev, "match tx timestamp mtype %u seqid %04x\n",
|
||||
mtype, seqid);
|
||||
} else if (time_after(jiffies, skb_cb->tmo)) {
|
||||
/* timeout any expired skbs over 1s */
|
||||
dev_dbg(cpts->dev,
|
||||
"expiring tx timestamp mtype %u seqid %04x\n",
|
||||
mtype, seqid);
|
||||
__skb_unlink(skb, &cpts->txq);
|
||||
dev_consume_skb_any(skb);
|
||||
}
|
||||
}
|
||||
|
||||
return found;
|
||||
}
|
||||
|
||||
/*
|
||||
* Returns zero if matching event type was found.
|
||||
*/
|
||||
@@ -101,9 +151,15 @@ static int cpts_fifo_read(struct cpts *cpts, int match)
|
||||
event->low = lo;
|
||||
type = event_type(event);
|
||||
switch (type) {
|
||||
case CPTS_EV_TX:
|
||||
if (cpts_match_tx_ts(cpts, event)) {
|
||||
/* if the new event matches an existing skb,
|
||||
* then don't queue it
|
||||
*/
|
||||
break;
|
||||
}
|
||||
case CPTS_EV_PUSH:
|
||||
case CPTS_EV_RX:
|
||||
case CPTS_EV_TX:
|
||||
list_del_init(&event->list);
|
||||
list_add_tail(&event->list, &cpts->events);
|
||||
break;
|
||||
@@ -224,6 +280,24 @@ static int cpts_ptp_enable(struct ptp_clock_info *ptp,
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
static long cpts_overflow_check(struct ptp_clock_info *ptp)
|
||||
{
|
||||
struct cpts *cpts = container_of(ptp, struct cpts, info);
|
||||
unsigned long delay = cpts->ov_check_period;
|
||||
struct timespec64 ts;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&cpts->lock, flags);
|
||||
ts = ns_to_timespec64(timecounter_read(&cpts->tc));
|
||||
|
||||
if (!skb_queue_empty(&cpts->txq))
|
||||
delay = CPTS_SKB_TX_WORK_TIMEOUT;
|
||||
spin_unlock_irqrestore(&cpts->lock, flags);
|
||||
|
||||
pr_debug("cpts overflow check at %lld.%09lu\n", ts.tv_sec, ts.tv_nsec);
|
||||
return (long)delay;
|
||||
}
|
||||
|
||||
static struct ptp_clock_info cpts_info = {
|
||||
.owner = THIS_MODULE,
|
||||
.name = "CTPS timer",
|
||||
@@ -236,18 +310,9 @@ static struct ptp_clock_info cpts_info = {
|
||||
.gettime64 = cpts_ptp_gettime,
|
||||
.settime64 = cpts_ptp_settime,
|
||||
.enable = cpts_ptp_enable,
|
||||
.do_aux_work = cpts_overflow_check,
|
||||
};
|
||||
|
||||
static void cpts_overflow_check(struct work_struct *work)
|
||||
{
|
||||
struct timespec64 ts;
|
||||
struct cpts *cpts = container_of(work, struct cpts, overflow_work.work);
|
||||
|
||||
cpts_ptp_gettime(&cpts->info, &ts);
|
||||
pr_debug("cpts overflow check at %lld.%09lu\n", ts.tv_sec, ts.tv_nsec);
|
||||
schedule_delayed_work(&cpts->overflow_work, cpts->ov_check_period);
|
||||
}
|
||||
|
||||
static int cpts_match(struct sk_buff *skb, unsigned int ptp_class,
|
||||
u16 ts_seqid, u8 ts_msgtype)
|
||||
{
|
||||
@@ -299,7 +364,7 @@ static u64 cpts_find_ts(struct cpts *cpts, struct sk_buff *skb, int ev_type)
|
||||
return 0;
|
||||
|
||||
spin_lock_irqsave(&cpts->lock, flags);
|
||||
cpts_fifo_read(cpts, CPTS_EV_PUSH);
|
||||
cpts_fifo_read(cpts, -1);
|
||||
list_for_each_safe(this, next, &cpts->events) {
|
||||
event = list_entry(this, struct cpts_event, list);
|
||||
if (event_expired(event)) {
|
||||
@@ -317,6 +382,19 @@ static u64 cpts_find_ts(struct cpts *cpts, struct sk_buff *skb, int ev_type)
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (ev_type == CPTS_EV_TX && !ns) {
|
||||
struct cpts_skb_cb_data *skb_cb =
|
||||
(struct cpts_skb_cb_data *)skb->cb;
|
||||
/* Not found, add frame to queue for processing later.
|
||||
* The periodic FIFO check will handle this.
|
||||
*/
|
||||
skb_get(skb);
|
||||
/* get the timestamp for timeouts */
|
||||
skb_cb->tmo = jiffies + msecs_to_jiffies(100);
|
||||
__skb_queue_tail(&cpts->txq, skb);
|
||||
ptp_schedule_worker(cpts->clock, 0);
|
||||
}
|
||||
spin_unlock_irqrestore(&cpts->lock, flags);
|
||||
|
||||
return ns;
|
||||
@@ -358,6 +436,7 @@ int cpts_register(struct cpts *cpts)
|
||||
{
|
||||
int err, i;
|
||||
|
||||
skb_queue_head_init(&cpts->txq);
|
||||
INIT_LIST_HEAD(&cpts->events);
|
||||
INIT_LIST_HEAD(&cpts->pool);
|
||||
for (i = 0; i < CPTS_MAX_EVENTS; i++)
|
||||
@@ -378,7 +457,7 @@ int cpts_register(struct cpts *cpts)
|
||||
}
|
||||
cpts->phc_index = ptp_clock_index(cpts->clock);
|
||||
|
||||
schedule_delayed_work(&cpts->overflow_work, cpts->ov_check_period);
|
||||
ptp_schedule_worker(cpts->clock, cpts->ov_check_period);
|
||||
return 0;
|
||||
|
||||
err_ptp:
|
||||
@@ -392,14 +471,15 @@ void cpts_unregister(struct cpts *cpts)
|
||||
if (WARN_ON(!cpts->clock))
|
||||
return;
|
||||
|
||||
cancel_delayed_work_sync(&cpts->overflow_work);
|
||||
|
||||
ptp_clock_unregister(cpts->clock);
|
||||
cpts->clock = NULL;
|
||||
|
||||
cpts_write32(cpts, 0, int_enable);
|
||||
cpts_write32(cpts, 0, control);
|
||||
|
||||
/* Drop all packet */
|
||||
skb_queue_purge(&cpts->txq);
|
||||
|
||||
clk_disable(cpts->refclk);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(cpts_unregister);
|
||||
@@ -476,7 +556,6 @@ struct cpts *cpts_create(struct device *dev, void __iomem *regs,
|
||||
cpts->dev = dev;
|
||||
cpts->reg = (struct cpsw_cpts __iomem *)regs;
|
||||
spin_lock_init(&cpts->lock);
|
||||
INIT_DELAYED_WORK(&cpts->overflow_work, cpts_overflow_check);
|
||||
|
||||
ret = cpts_of_parse(cpts, node);
|
||||
if (ret)
|
||||
|
@@ -119,13 +119,13 @@ struct cpts {
|
||||
u32 cc_mult; /* for the nominal frequency */
|
||||
struct cyclecounter cc;
|
||||
struct timecounter tc;
|
||||
struct delayed_work overflow_work;
|
||||
int phc_index;
|
||||
struct clk *refclk;
|
||||
struct list_head events;
|
||||
struct list_head pool;
|
||||
struct cpts_event pool_data[CPTS_MAX_EVENTS];
|
||||
unsigned long ov_check_period;
|
||||
struct sk_buff_head txq;
|
||||
};
|
||||
|
||||
void cpts_rx_timestamp(struct cpts *cpts, struct sk_buff *skb);
|
||||
|
Reference in New Issue
Block a user