Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Minor overlapping changes for both merge conflicts.

Resolution work done by Stephen Rothwell was used
as a reference.

Signed-off-by: David S. Miller <davem@davemloft.net>
Šī revīzija ir iekļauta:
David S. Miller
2016-08-18 01:17:32 -04:00
revīzija 60747ef4d1
1746 mainīti faili ar 55912 papildinājumiem un 76980 dzēšanām

Parādīt failu

@@ -645,12 +645,6 @@ struct netvsc_reconfig {
u32 event;
};
struct garp_wrk {
struct work_struct dwrk;
struct net_device *netdev;
struct netvsc_device *netvsc_dev;
};
/* The context of the netvsc device */
struct net_device_context {
/* point back to our device context */
@@ -668,7 +662,6 @@ struct net_device_context {
struct work_struct work;
u32 msg_enable; /* debug level */
struct garp_wrk gwrk;
struct netvsc_stats __percpu *tx_stats;
struct netvsc_stats __percpu *rx_stats;
@@ -679,6 +672,15 @@ struct net_device_context {
/* the device is going away */
bool start_remove;
/* State to manage the associated VF interface. */
struct net_device *vf_netdev;
bool vf_inject;
atomic_t vf_use_cnt;
/* 1: allocated, serial number is valid. 0: not allocated */
u32 vf_alloc;
/* Serial number of the VF to team with */
u32 vf_serial;
};
/* Per netvsc device */
@@ -734,15 +736,7 @@ struct netvsc_device {
u32 max_pkt; /* max number of pkt in one send, e.g. 8 */
u32 pkt_align; /* alignment bytes, e.g. 8 */
/* 1: allocated, serial number is valid. 0: not allocated */
u32 vf_alloc;
/* Serial number of the VF to team with */
u32 vf_serial;
atomic_t open_cnt;
/* State to manage the associated VF interface. */
bool vf_inject;
struct net_device *vf_netdev;
atomic_t vf_use_cnt;
};
static inline struct netvsc_device *

Parādīt failu

@@ -77,13 +77,9 @@ static struct netvsc_device *alloc_net_device(void)
init_waitqueue_head(&net_device->wait_drain);
net_device->destroy = false;
atomic_set(&net_device->open_cnt, 0);
atomic_set(&net_device->vf_use_cnt, 0);
net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT;
net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT;
net_device->vf_netdev = NULL;
net_device->vf_inject = false;
return net_device;
}
@@ -1110,16 +1106,16 @@ static void netvsc_send_table(struct hv_device *hdev,
nvscdev->send_table[i] = tab[i];
}
static void netvsc_send_vf(struct netvsc_device *nvdev,
static void netvsc_send_vf(struct net_device_context *net_device_ctx,
struct nvsp_message *nvmsg)
{
nvdev->vf_alloc = nvmsg->msg.v4_msg.vf_assoc.allocated;
nvdev->vf_serial = nvmsg->msg.v4_msg.vf_assoc.serial;
net_device_ctx->vf_alloc = nvmsg->msg.v4_msg.vf_assoc.allocated;
net_device_ctx->vf_serial = nvmsg->msg.v4_msg.vf_assoc.serial;
}
static inline void netvsc_receive_inband(struct hv_device *hdev,
struct netvsc_device *nvdev,
struct nvsp_message *nvmsg)
struct net_device_context *net_device_ctx,
struct nvsp_message *nvmsg)
{
switch (nvmsg->hdr.msg_type) {
case NVSP_MSG5_TYPE_SEND_INDIRECTION_TABLE:
@@ -1127,7 +1123,7 @@ static inline void netvsc_receive_inband(struct hv_device *hdev,
break;
case NVSP_MSG4_TYPE_SEND_VF_ASSOCIATION:
netvsc_send_vf(nvdev, nvmsg);
netvsc_send_vf(net_device_ctx, nvmsg);
break;
}
}
@@ -1140,6 +1136,7 @@ static void netvsc_process_raw_pkt(struct hv_device *device,
struct vmpacket_descriptor *desc)
{
struct nvsp_message *nvmsg;
struct net_device_context *net_device_ctx = netdev_priv(ndev);
nvmsg = (struct nvsp_message *)((unsigned long)
desc + (desc->offset8 << 3));
@@ -1154,7 +1151,7 @@ static void netvsc_process_raw_pkt(struct hv_device *device,
break;
case VM_PKT_DATA_INBAND:
netvsc_receive_inband(device, net_device, nvmsg);
netvsc_receive_inband(device, net_device_ctx, nvmsg);
break;
default:

Parādīt failu

@@ -671,20 +671,19 @@ int netvsc_recv_callback(struct hv_device *device_obj,
struct sk_buff *skb;
struct sk_buff *vf_skb;
struct netvsc_stats *rx_stats;
struct netvsc_device *netvsc_dev = net_device_ctx->nvdev;
u32 bytes_recvd = packet->total_data_buflen;
int ret = 0;
if (!net || net->reg_state != NETREG_REGISTERED)
return NVSP_STAT_FAIL;
if (READ_ONCE(netvsc_dev->vf_inject)) {
atomic_inc(&netvsc_dev->vf_use_cnt);
if (!READ_ONCE(netvsc_dev->vf_inject)) {
if (READ_ONCE(net_device_ctx->vf_inject)) {
atomic_inc(&net_device_ctx->vf_use_cnt);
if (!READ_ONCE(net_device_ctx->vf_inject)) {
/*
* We raced; just move on.
*/
atomic_dec(&netvsc_dev->vf_use_cnt);
atomic_dec(&net_device_ctx->vf_use_cnt);
goto vf_injection_done;
}
@@ -696,17 +695,19 @@ int netvsc_recv_callback(struct hv_device *device_obj,
* the host). Deliver these via the VF interface
* in the guest.
*/
vf_skb = netvsc_alloc_recv_skb(netvsc_dev->vf_netdev, packet,
csum_info, *data, vlan_tci);
vf_skb = netvsc_alloc_recv_skb(net_device_ctx->vf_netdev,
packet, csum_info, *data,
vlan_tci);
if (vf_skb != NULL) {
++netvsc_dev->vf_netdev->stats.rx_packets;
netvsc_dev->vf_netdev->stats.rx_bytes += bytes_recvd;
++net_device_ctx->vf_netdev->stats.rx_packets;
net_device_ctx->vf_netdev->stats.rx_bytes +=
bytes_recvd;
netif_receive_skb(vf_skb);
} else {
++net->stats.rx_dropped;
ret = NVSP_STAT_FAIL;
}
atomic_dec(&netvsc_dev->vf_use_cnt);
atomic_dec(&net_device_ctx->vf_use_cnt);
return ret;
}
@@ -1163,17 +1164,6 @@ static void netvsc_free_netdev(struct net_device *netdev)
free_netdev(netdev);
}
static void netvsc_notify_peers(struct work_struct *wrk)
{
struct garp_wrk *gwrk;
gwrk = container_of(wrk, struct garp_wrk, dwrk);
netdev_notify_peers(gwrk->netdev);
atomic_dec(&gwrk->netvsc_dev->vf_use_cnt);
}
static struct net_device *get_netvsc_net_device(char *mac)
{
struct net_device *dev, *found = NULL;
@@ -1216,7 +1206,7 @@ static int netvsc_register_vf(struct net_device *vf_netdev)
net_device_ctx = netdev_priv(ndev);
netvsc_dev = net_device_ctx->nvdev;
if (netvsc_dev == NULL)
if (!netvsc_dev || net_device_ctx->vf_netdev)
return NOTIFY_DONE;
netdev_info(ndev, "VF registering: %s\n", vf_netdev->name);
@@ -1224,10 +1214,23 @@ static int netvsc_register_vf(struct net_device *vf_netdev)
* Take a reference on the module.
*/
try_module_get(THIS_MODULE);
netvsc_dev->vf_netdev = vf_netdev;
net_device_ctx->vf_netdev = vf_netdev;
return NOTIFY_OK;
}
static void netvsc_inject_enable(struct net_device_context *net_device_ctx)
{
net_device_ctx->vf_inject = true;
}
static void netvsc_inject_disable(struct net_device_context *net_device_ctx)
{
net_device_ctx->vf_inject = false;
/* Wait for currently active users to drain out. */
while (atomic_read(&net_device_ctx->vf_use_cnt) != 0)
udelay(50);
}
static int netvsc_vf_up(struct net_device *vf_netdev)
{
@@ -1246,11 +1249,11 @@ static int netvsc_vf_up(struct net_device *vf_netdev)
net_device_ctx = netdev_priv(ndev);
netvsc_dev = net_device_ctx->nvdev;
if ((netvsc_dev == NULL) || (netvsc_dev->vf_netdev == NULL))
if (!netvsc_dev || !net_device_ctx->vf_netdev)
return NOTIFY_DONE;
netdev_info(ndev, "VF up: %s\n", vf_netdev->name);
netvsc_dev->vf_inject = true;
netvsc_inject_enable(net_device_ctx);
/*
* Open the device before switching data path.
@@ -1265,15 +1268,8 @@ static int netvsc_vf_up(struct net_device *vf_netdev)
netif_carrier_off(ndev);
/*
* Now notify peers. We are scheduling work to
* notify peers; take a reference to prevent
* the VF interface from vanishing.
*/
atomic_inc(&netvsc_dev->vf_use_cnt);
net_device_ctx->gwrk.netdev = vf_netdev;
net_device_ctx->gwrk.netvsc_dev = netvsc_dev;
schedule_work(&net_device_ctx->gwrk.dwrk);
/* Now notify peers through VF device. */
call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, vf_netdev);
return NOTIFY_OK;
}
@@ -1296,29 +1292,18 @@ static int netvsc_vf_down(struct net_device *vf_netdev)
net_device_ctx = netdev_priv(ndev);
netvsc_dev = net_device_ctx->nvdev;
if ((netvsc_dev == NULL) || (netvsc_dev->vf_netdev == NULL))
if (!netvsc_dev || !net_device_ctx->vf_netdev)
return NOTIFY_DONE;
netdev_info(ndev, "VF down: %s\n", vf_netdev->name);
netvsc_dev->vf_inject = false;
/*
* Wait for currently active users to
* drain out.
*/
while (atomic_read(&netvsc_dev->vf_use_cnt) != 0)
udelay(50);
netvsc_inject_disable(net_device_ctx);
netvsc_switch_datapath(ndev, false);
netdev_info(ndev, "Data path switched from VF: %s\n", vf_netdev->name);
rndis_filter_close(netvsc_dev);
netif_carrier_on(ndev);
/*
* Notify peers.
*/
atomic_inc(&netvsc_dev->vf_use_cnt);
net_device_ctx->gwrk.netdev = ndev;
net_device_ctx->gwrk.netvsc_dev = netvsc_dev;
schedule_work(&net_device_ctx->gwrk.dwrk);
/* Now notify peers through netvsc device. */
call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, ndev);
return NOTIFY_OK;
}
@@ -1340,11 +1325,11 @@ static int netvsc_unregister_vf(struct net_device *vf_netdev)
net_device_ctx = netdev_priv(ndev);
netvsc_dev = net_device_ctx->nvdev;
if (netvsc_dev == NULL)
if (!netvsc_dev || !net_device_ctx->vf_netdev)
return NOTIFY_DONE;
netdev_info(ndev, "VF unregistering: %s\n", vf_netdev->name);
netvsc_dev->vf_netdev = NULL;
netvsc_inject_disable(net_device_ctx);
net_device_ctx->vf_netdev = NULL;
module_put(THIS_MODULE);
return NOTIFY_OK;
}
@@ -1392,11 +1377,14 @@ static int netvsc_probe(struct hv_device *dev,
INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change);
INIT_WORK(&net_device_ctx->work, do_set_multicast);
INIT_WORK(&net_device_ctx->gwrk.dwrk, netvsc_notify_peers);
spin_lock_init(&net_device_ctx->lock);
INIT_LIST_HEAD(&net_device_ctx->reconfig_events);
atomic_set(&net_device_ctx->vf_use_cnt, 0);
net_device_ctx->vf_netdev = NULL;
net_device_ctx->vf_inject = false;
net->netdev_ops = &device_ops;
net->hw_features = NETVSC_HW_FEATURES;
@@ -1507,8 +1495,13 @@ static int netvsc_netdev_event(struct notifier_block *this,
{
struct net_device *event_dev = netdev_notifier_info_to_dev(ptr);
/* Avoid Vlan, Bonding dev with same MAC registering as VF */
if (event_dev->priv_flags & (IFF_802_1Q_VLAN | IFF_BONDING))
/* Avoid Vlan dev with same MAC registering as VF */
if (event_dev->priv_flags & IFF_802_1Q_VLAN)
return NOTIFY_DONE;
/* Avoid Bonding master dev with same MAC registering as VF */
if (event_dev->priv_flags & IFF_BONDING &&
event_dev->flags & IFF_MASTER)
return NOTIFY_DONE;
switch (event) {