net: openvswitch: reorder masks array based on usage

This patch reorders the masks array every 4 seconds based on their
usage count. This greatly reduces the masks per packet hit, and
hence the overall performance. Especially in the OVS/OVN case for
OpenShift.

Here are some results from the OVS/OVN OpenShift test, which use
8 pods, each pod having 512 uperf connections, each connection
sends a 64-byte request and gets a 1024-byte response (TCP).
All uperf clients are on 1 worker node while all uperf servers are
on the other worker node.

Kernel without this patch     :  7.71 Gbps
Kernel with this patch applied: 14.52 Gbps

We also run some tests to verify the rebalance activity does not
lower the flow insertion rate, which does not.

Signed-off-by: Eelco Chaudron <echaudro@redhat.com>
Tested-by: Andrew Theurer <atheurer@redhat.com>
Reviewed-by: Paolo Abeni <pabeni@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Eelco Chaudron
2020-07-15 14:09:28 +02:00
committed by David S. Miller
parent b18432c5a4
commit eac87c413b
4 changed files with 207 additions and 7 deletions

View File

@@ -130,6 +130,8 @@ static int queue_userspace_packet(struct datapath *dp, struct sk_buff *,
const struct dp_upcall_info *,
uint32_t cutlen);
static void ovs_dp_masks_rebalance(struct work_struct *work);
/* Must be called with rcu_read_lock or ovs_mutex. */
const char *ovs_dp_name(const struct datapath *dp)
{
@@ -1653,6 +1655,7 @@ static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
goto err_destroy_reply;
ovs_dp_set_net(dp, sock_net(skb->sk));
INIT_DELAYED_WORK(&dp->masks_rebalance, ovs_dp_masks_rebalance);
/* Allocate table. */
err = ovs_flow_tbl_init(&dp->table);
@@ -1712,6 +1715,9 @@ static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
ovs_net = net_generic(ovs_dp_get_net(dp), ovs_net_id);
list_add_tail_rcu(&dp->list_node, &ovs_net->dps);
schedule_delayed_work(&dp->masks_rebalance,
msecs_to_jiffies(DP_MASKS_REBALANCE_INTERVAL));
ovs_unlock();
ovs_notify(&dp_datapath_genl_family, reply, info);
@@ -1756,6 +1762,9 @@ static void __dp_destroy(struct datapath *dp)
/* RCU destroy the flow table */
call_rcu(&dp->rcu, destroy_dp_rcu);
/* Cancel remaining work. */
cancel_delayed_work_sync(&dp->masks_rebalance);
}
static int ovs_dp_cmd_del(struct sk_buff *skb, struct genl_info *info)
@@ -2338,6 +2347,19 @@ out:
return skb->len;
}
static void ovs_dp_masks_rebalance(struct work_struct *work)
{
struct datapath *dp = container_of(work, struct datapath,
masks_rebalance.work);
ovs_lock();
ovs_flow_masks_rebalance(&dp->table);
ovs_unlock();
schedule_delayed_work(&dp->masks_rebalance,
msecs_to_jiffies(DP_MASKS_REBALANCE_INTERVAL));
}
static const struct nla_policy vport_policy[OVS_VPORT_ATTR_MAX + 1] = {
[OVS_VPORT_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
[OVS_VPORT_ATTR_STATS] = { .len = sizeof(struct ovs_vport_stats) },