hv_netvsc: Add validation for untrusted Hyper-V values
For additional robustness in the face of Hyper-V errors or malicious behavior, validate all values that originate from packets that Hyper-V has sent to the guest in the host-to-guest ring buffer. Ensure that invalid values cannot cause indexing off the end of an array, or subvert an existing validation via integer overflow. Ensure that outgoing packets do not have any leftover guest memory that has not been zeroed out. Signed-off-by: Andres Beltran <lkmlabelt@gmail.com> Co-developed-by: Andrea Parri (Microsoft) <parri.andrea@gmail.com> Signed-off-by: Andrea Parri (Microsoft) <parri.andrea@gmail.com> Cc: "David S. Miller" <davem@davemloft.net> Cc: Jakub Kicinski <kuba@kernel.org> Cc: netdev@vger.kernel.org Reviewed-by: Haiyang Zhang <haiyangz@microsoft.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:

committed by
David S. Miller

parent
fd944dc243
commit
4414418595
@@ -388,6 +388,15 @@ static int netvsc_init_buf(struct hv_device *device,
|
||||
net_device->recv_section_size = resp->sections[0].sub_alloc_size;
|
||||
net_device->recv_section_cnt = resp->sections[0].num_sub_allocs;
|
||||
|
||||
/* Ensure buffer will not overflow */
|
||||
if (net_device->recv_section_size < NETVSC_MTU_MIN || (u64)net_device->recv_section_size *
|
||||
(u64)net_device->recv_section_cnt > (u64)buf_size) {
|
||||
netdev_err(ndev, "invalid recv_section_size %u\n",
|
||||
net_device->recv_section_size);
|
||||
ret = -EINVAL;
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
/* Setup receive completion ring.
|
||||
* Add 1 to the recv_section_cnt because at least one entry in a
|
||||
* ring buffer has to be empty.
|
||||
@@ -460,6 +469,12 @@ static int netvsc_init_buf(struct hv_device *device,
|
||||
/* Parse the response */
|
||||
net_device->send_section_size = init_packet->msg.
|
||||
v1_msg.send_send_buf_complete.section_size;
|
||||
if (net_device->send_section_size < NETVSC_MTU_MIN) {
|
||||
netdev_err(ndev, "invalid send_section_size %u\n",
|
||||
net_device->send_section_size);
|
||||
ret = -EINVAL;
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
/* Section count is simply the size divided by the section size. */
|
||||
net_device->send_section_cnt = buf_size / net_device->send_section_size;
|
||||
@@ -731,12 +746,49 @@ static void netvsc_send_completion(struct net_device *ndev,
|
||||
int budget)
|
||||
{
|
||||
const struct nvsp_message *nvsp_packet = hv_pkt_data(desc);
|
||||
u32 msglen = hv_pkt_datalen(desc);
|
||||
|
||||
/* Ensure packet is big enough to read header fields */
|
||||
if (msglen < sizeof(struct nvsp_message_header)) {
|
||||
netdev_err(ndev, "nvsp_message length too small: %u\n", msglen);
|
||||
return;
|
||||
}
|
||||
|
||||
switch (nvsp_packet->hdr.msg_type) {
|
||||
case NVSP_MSG_TYPE_INIT_COMPLETE:
|
||||
if (msglen < sizeof(struct nvsp_message_header) +
|
||||
sizeof(struct nvsp_message_init_complete)) {
|
||||
netdev_err(ndev, "nvsp_msg length too small: %u\n",
|
||||
msglen);
|
||||
return;
|
||||
}
|
||||
fallthrough;
|
||||
|
||||
case NVSP_MSG1_TYPE_SEND_RECV_BUF_COMPLETE:
|
||||
if (msglen < sizeof(struct nvsp_message_header) +
|
||||
sizeof(struct nvsp_1_message_send_receive_buffer_complete)) {
|
||||
netdev_err(ndev, "nvsp_msg1 length too small: %u\n",
|
||||
msglen);
|
||||
return;
|
||||
}
|
||||
fallthrough;
|
||||
|
||||
case NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE:
|
||||
if (msglen < sizeof(struct nvsp_message_header) +
|
||||
sizeof(struct nvsp_1_message_send_send_buffer_complete)) {
|
||||
netdev_err(ndev, "nvsp_msg1 length too small: %u\n",
|
||||
msglen);
|
||||
return;
|
||||
}
|
||||
fallthrough;
|
||||
|
||||
case NVSP_MSG5_TYPE_SUBCHANNEL:
|
||||
if (msglen < sizeof(struct nvsp_message_header) +
|
||||
sizeof(struct nvsp_5_subchannel_complete)) {
|
||||
netdev_err(ndev, "nvsp_msg5 length too small: %u\n",
|
||||
msglen);
|
||||
return;
|
||||
}
|
||||
/* Copy the response back */
|
||||
memcpy(&net_device->channel_init_pkt, nvsp_packet,
|
||||
sizeof(struct nvsp_message));
|
||||
@@ -1117,19 +1169,28 @@ static void enq_receive_complete(struct net_device *ndev,
|
||||
static int netvsc_receive(struct net_device *ndev,
|
||||
struct netvsc_device *net_device,
|
||||
struct netvsc_channel *nvchan,
|
||||
const struct vmpacket_descriptor *desc,
|
||||
const struct nvsp_message *nvsp)
|
||||
const struct vmpacket_descriptor *desc)
|
||||
{
|
||||
struct net_device_context *net_device_ctx = netdev_priv(ndev);
|
||||
struct vmbus_channel *channel = nvchan->channel;
|
||||
const struct vmtransfer_page_packet_header *vmxferpage_packet
|
||||
= container_of(desc, const struct vmtransfer_page_packet_header, d);
|
||||
const struct nvsp_message *nvsp = hv_pkt_data(desc);
|
||||
u32 msglen = hv_pkt_datalen(desc);
|
||||
u16 q_idx = channel->offermsg.offer.sub_channel_index;
|
||||
char *recv_buf = net_device->recv_buf;
|
||||
u32 status = NVSP_STAT_SUCCESS;
|
||||
int i;
|
||||
int count = 0;
|
||||
|
||||
/* Ensure packet is big enough to read header fields */
|
||||
if (msglen < sizeof(struct nvsp_message_header)) {
|
||||
netif_err(net_device_ctx, rx_err, ndev,
|
||||
"invalid nvsp header, length too small: %u\n",
|
||||
msglen);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Make sure this is a valid nvsp packet */
|
||||
if (unlikely(nvsp->hdr.msg_type != NVSP_MSG1_TYPE_SEND_RNDIS_PKT)) {
|
||||
netif_err(net_device_ctx, rx_err, ndev,
|
||||
@@ -1138,6 +1199,14 @@ static int netvsc_receive(struct net_device *ndev,
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Validate xfer page pkt header */
|
||||
if ((desc->offset8 << 3) < sizeof(struct vmtransfer_page_packet_header)) {
|
||||
netif_err(net_device_ctx, rx_err, ndev,
|
||||
"Invalid xfer page pkt, offset too small: %u\n",
|
||||
desc->offset8 << 3);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (unlikely(vmxferpage_packet->xfer_pageset_id != NETVSC_RECEIVE_BUFFER_ID)) {
|
||||
netif_err(net_device_ctx, rx_err, ndev,
|
||||
"Invalid xfer page set id - expecting %x got %x\n",
|
||||
@@ -1148,6 +1217,14 @@ static int netvsc_receive(struct net_device *ndev,
|
||||
|
||||
count = vmxferpage_packet->range_cnt;
|
||||
|
||||
/* Check count for a valid value */
|
||||
if (NETVSC_XFER_HEADER_SIZE(count) > desc->offset8 << 3) {
|
||||
netif_err(net_device_ctx, rx_err, ndev,
|
||||
"Range count is not valid: %d\n",
|
||||
count);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Each range represents 1 RNDIS pkt that contains 1 ethernet frame */
|
||||
for (i = 0; i < count; i++) {
|
||||
u32 offset = vmxferpage_packet->ranges[i].byte_offset;
|
||||
@@ -1155,7 +1232,8 @@ static int netvsc_receive(struct net_device *ndev,
|
||||
void *data;
|
||||
int ret;
|
||||
|
||||
if (unlikely(offset + buflen > net_device->recv_buf_size)) {
|
||||
if (unlikely(offset > net_device->recv_buf_size ||
|
||||
buflen > net_device->recv_buf_size - offset)) {
|
||||
nvchan->rsc.cnt = 0;
|
||||
status = NVSP_STAT_FAIL;
|
||||
netif_err(net_device_ctx, rx_err, ndev,
|
||||
@@ -1194,6 +1272,13 @@ static void netvsc_send_table(struct net_device *ndev,
|
||||
u32 count, offset, *tab;
|
||||
int i;
|
||||
|
||||
/* Ensure packet is big enough to read send_table fields */
|
||||
if (msglen < sizeof(struct nvsp_message_header) +
|
||||
sizeof(struct nvsp_5_send_indirect_table)) {
|
||||
netdev_err(ndev, "nvsp_v5_msg length too small: %u\n", msglen);
|
||||
return;
|
||||
}
|
||||
|
||||
count = nvmsg->msg.v5_msg.send_table.count;
|
||||
offset = nvmsg->msg.v5_msg.send_table.offset;
|
||||
|
||||
@@ -1225,10 +1310,18 @@ static void netvsc_send_table(struct net_device *ndev,
|
||||
}
|
||||
|
||||
static void netvsc_send_vf(struct net_device *ndev,
|
||||
const struct nvsp_message *nvmsg)
|
||||
const struct nvsp_message *nvmsg,
|
||||
u32 msglen)
|
||||
{
|
||||
struct net_device_context *net_device_ctx = netdev_priv(ndev);
|
||||
|
||||
/* Ensure packet is big enough to read its fields */
|
||||
if (msglen < sizeof(struct nvsp_message_header) +
|
||||
sizeof(struct nvsp_4_send_vf_association)) {
|
||||
netdev_err(ndev, "nvsp_v4_msg length too small: %u\n", msglen);
|
||||
return;
|
||||
}
|
||||
|
||||
net_device_ctx->vf_alloc = nvmsg->msg.v4_msg.vf_assoc.allocated;
|
||||
net_device_ctx->vf_serial = nvmsg->msg.v4_msg.vf_assoc.serial;
|
||||
netdev_info(ndev, "VF slot %u %s\n",
|
||||
@@ -1238,16 +1331,24 @@ static void netvsc_send_vf(struct net_device *ndev,
|
||||
|
||||
static void netvsc_receive_inband(struct net_device *ndev,
|
||||
struct netvsc_device *nvscdev,
|
||||
const struct nvsp_message *nvmsg,
|
||||
u32 msglen)
|
||||
const struct vmpacket_descriptor *desc)
|
||||
{
|
||||
const struct nvsp_message *nvmsg = hv_pkt_data(desc);
|
||||
u32 msglen = hv_pkt_datalen(desc);
|
||||
|
||||
/* Ensure packet is big enough to read header fields */
|
||||
if (msglen < sizeof(struct nvsp_message_header)) {
|
||||
netdev_err(ndev, "inband nvsp_message length too small: %u\n", msglen);
|
||||
return;
|
||||
}
|
||||
|
||||
switch (nvmsg->hdr.msg_type) {
|
||||
case NVSP_MSG5_TYPE_SEND_INDIRECTION_TABLE:
|
||||
netvsc_send_table(ndev, nvscdev, nvmsg, msglen);
|
||||
break;
|
||||
|
||||
case NVSP_MSG4_TYPE_SEND_VF_ASSOCIATION:
|
||||
netvsc_send_vf(ndev, nvmsg);
|
||||
netvsc_send_vf(ndev, nvmsg, msglen);
|
||||
break;
|
||||
}
|
||||
}
|
||||
@@ -1261,23 +1362,20 @@ static int netvsc_process_raw_pkt(struct hv_device *device,
|
||||
{
|
||||
struct vmbus_channel *channel = nvchan->channel;
|
||||
const struct nvsp_message *nvmsg = hv_pkt_data(desc);
|
||||
u32 msglen = hv_pkt_datalen(desc);
|
||||
|
||||
trace_nvsp_recv(ndev, channel, nvmsg);
|
||||
|
||||
switch (desc->type) {
|
||||
case VM_PKT_COMP:
|
||||
netvsc_send_completion(ndev, net_device, channel,
|
||||
desc, budget);
|
||||
netvsc_send_completion(ndev, net_device, channel, desc, budget);
|
||||
break;
|
||||
|
||||
case VM_PKT_DATA_USING_XFER_PAGES:
|
||||
return netvsc_receive(ndev, net_device, nvchan,
|
||||
desc, nvmsg);
|
||||
return netvsc_receive(ndev, net_device, nvchan, desc);
|
||||
break;
|
||||
|
||||
case VM_PKT_DATA_INBAND:
|
||||
netvsc_receive_inband(ndev, net_device, nvmsg, msglen);
|
||||
netvsc_receive_inband(ndev, net_device, desc);
|
||||
break;
|
||||
|
||||
default:
|
||||
|
Reference in New Issue
Block a user