Fix CVE-2019-3900 (rhbz 1698757 1702940)

This commit is contained in:
Justin M. Forbes 2019-04-25 07:23:14 -05:00
parent eb5b64f354
commit 2f9efa0cf1
2 changed files with 206 additions and 0 deletions

View File

@ -587,6 +587,9 @@ Patch507: 0001-Drop-that-for-now.patch
# Submitted upstream at https://lkml.org/lkml/2019/4/23/89 # Submitted upstream at https://lkml.org/lkml/2019/4/23/89
Patch508: KEYS-Make-use-of-platform-keyring-for-module-signature.patch Patch508: KEYS-Make-use-of-platform-keyring-for-module-signature.patch
# CVE-2019-3900 rhbz 1698757 1702940
Patch524: net-vhost_net-fix-possible-infinite-loop.patch
# END OF PATCH DEFINITIONS # END OF PATCH DEFINITIONS
%endif %endif
@ -1860,6 +1863,9 @@ fi
# #
# #
%changelog %changelog
* Thu Apr 25 2019 Justin M. Forbes <jforbes@fedoraproject.org>
- Fix CVE-2019-3900 (rhbz 1698757 1702940)
* Wed Apr 24 2019 Jeremy Cline <jcline@redhat.com> - 5.1.0-0.rc6.git2.1 * Wed Apr 24 2019 Jeremy Cline <jcline@redhat.com> - 5.1.0-0.rc6.git2.1
- Linux v5.1-rc6-15-gba25b50d582f - Linux v5.1-rc6-15-gba25b50d582f

View File

@ -0,0 +1,200 @@
From patchwork Thu Apr 25 07:33:19 2019
Content-Type: text/plain; charset="utf-8"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
X-Patchwork-Submitter: Jason Wang <jasowang@redhat.com>
X-Patchwork-Id: 10916185
Return-Path: <kvm-owner@kernel.org>
Received: from mail.wl.linuxfoundation.org (pdx-wl-mail.web.codeaurora.org
[172.30.200.125])
by pdx-korg-patchwork-2.web.codeaurora.org (Postfix) with ESMTP id E4F501575
for <patchwork-kvm@patchwork.kernel.org>;
Thu, 25 Apr 2019 07:33:33 +0000 (UTC)
Received: from mail.wl.linuxfoundation.org (localhost [127.0.0.1])
by mail.wl.linuxfoundation.org (Postfix) with ESMTP id D276828BD7
for <patchwork-kvm@patchwork.kernel.org>;
Thu, 25 Apr 2019 07:33:33 +0000 (UTC)
Received: by mail.wl.linuxfoundation.org (Postfix, from userid 486)
id C64AC28BE1; Thu, 25 Apr 2019 07:33:33 +0000 (UTC)
X-Spam-Checker-Version: SpamAssassin 3.3.1 (2010-03-16) on
pdx-wl-mail.web.codeaurora.org
X-Spam-Level:
X-Spam-Status: No, score=-7.9 required=2.0 tests=BAYES_00,MAILING_LIST_MULTI,
RCVD_IN_DNSWL_HI autolearn=ham version=3.3.1
Received: from vger.kernel.org (vger.kernel.org [209.132.180.67])
by mail.wl.linuxfoundation.org (Postfix) with ESMTP id 590B228BD7
for <patchwork-kvm@patchwork.kernel.org>;
Thu, 25 Apr 2019 07:33:33 +0000 (UTC)
Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand
id S1726957AbfDYHd1 (ORCPT
<rfc822;patchwork-kvm@patchwork.kernel.org>);
Thu, 25 Apr 2019 03:33:27 -0400
Received: from mx1.redhat.com ([209.132.183.28]:60130 "EHLO mx1.redhat.com"
rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP
id S1726317AbfDYHd1 (ORCPT <rfc822;kvm@vger.kernel.org>);
Thu, 25 Apr 2019 03:33:27 -0400
Received: from smtp.corp.redhat.com (int-mx07.intmail.prod.int.phx2.redhat.com
[10.5.11.22])
(using TLSv1.2 with cipher AECDH-AES256-SHA (256/256 bits))
(No client certificate requested)
by mx1.redhat.com (Postfix) with ESMTPS id C2BCE3002619;
Thu, 25 Apr 2019 07:33:26 +0000 (UTC)
Received: from hp-dl380pg8-02.lab.eng.pek2.redhat.com
(hp-dl380pg8-02.lab.eng.pek2.redhat.com [10.73.8.12])
by smtp.corp.redhat.com (Postfix) with ESMTP id 5DA021001DDB;
Thu, 25 Apr 2019 07:33:21 +0000 (UTC)
From: Jason Wang <jasowang@redhat.com>
To: mst@redhat.com, jasowang@redhat.com, kvm@vger.kernel.org,
virtualization@lists.linux-foundation.org, netdev@vger.kernel.org,
linux-kernel@vger.kernel.org
Cc: ppandit@redhat.com
Subject: [PATCH net] vhost_net: fix possible infinite loop
Date: Thu, 25 Apr 2019 03:33:19 -0400
Message-Id: <1556177599-56248-1-git-send-email-jasowang@redhat.com>
X-Scanned-By: MIMEDefang 2.84 on 10.5.11.22
X-Greylist: Sender IP whitelisted,
not delayed by milter-greylist-4.5.16 (mx1.redhat.com [10.5.110.40]);
Thu, 25 Apr 2019 07:33:26 +0000 (UTC)
Sender: kvm-owner@vger.kernel.org
Precedence: bulk
List-ID: <kvm.vger.kernel.org>
X-Mailing-List: kvm@vger.kernel.org
X-Virus-Scanned: ClamAV using ClamSMTP
When the rx buffer is too small for a packet, we will discard the vq
descriptor and retry it for the next packet:
while ((sock_len = vhost_net_rx_peek_head_len(net, sock->sk,
&busyloop_intr))) {
...
/* On overrun, truncate and discard */
if (unlikely(headcount > UIO_MAXIOV)) {
iov_iter_init(&msg.msg_iter, READ, vq->iov, 1, 1);
err = sock->ops->recvmsg(sock, &msg,
1, MSG_DONTWAIT | MSG_TRUNC);
pr_debug("Discarded rx packet: len %zd\n", sock_len);
continue;
}
...
}
This makes it possible to trigger a infinite while..continue loop
through the co-opreation of two VMs like:
1) Malicious VM1 allocate 1 byte rx buffer and try to slow down the
vhost process as much as possible e.g using indirect descriptors or
other.
2) Malicious VM2 generate packets to VM1 as fast as possible
Fixing this by checking against weight at the end of RX and TX
loop. This also eliminate other similar cases when:
- userspace is consuming the packets in the meanwhile
- theoretical TOCTOU attack if guest moving avail index back and forth
to hit the continue after vhost find guest just add new buffers
This addresses CVE-2019-3900.
Fixes: d8316f3991d20 ("vhost: fix total length when packets are too short")
Fixes: 3a4d5c94e9593 ("vhost_net: a kernel-level virtio server")
Signed-off-by: Jason Wang <jasowang@redhat.com>
---
drivers/vhost/net.c | 41 +++++++++++++++++++++--------------------
1 file changed, 21 insertions(+), 20 deletions(-)
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index df51a35..fb46e6b 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -778,8 +778,9 @@ static void handle_tx_copy(struct vhost_net *net, struct socket *sock)
int err;
int sent_pkts = 0;
bool sock_can_batch = (sock->sk->sk_sndbuf == INT_MAX);
+ bool next_round = false;
- for (;;) {
+ do {
bool busyloop_intr = false;
if (nvq->done_idx == VHOST_NET_BATCH)
@@ -845,11 +846,10 @@ static void handle_tx_copy(struct vhost_net *net, struct socket *sock)
vq->heads[nvq->done_idx].id = cpu_to_vhost32(vq, head);
vq->heads[nvq->done_idx].len = 0;
++nvq->done_idx;
- if (vhost_exceeds_weight(++sent_pkts, total_len)) {
- vhost_poll_queue(&vq->poll);
- break;
- }
- }
+ } while (!(next_round = vhost_exceeds_weight(++sent_pkts, total_len)));
+
+ if (next_round)
+ vhost_poll_queue(&vq->poll);
vhost_tx_batch(net, nvq, sock, &msg);
}
@@ -873,8 +873,9 @@ static void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock)
struct vhost_net_ubuf_ref *uninitialized_var(ubufs);
bool zcopy_used;
int sent_pkts = 0;
+ bool next_round = false;
- for (;;) {
+ do {
bool busyloop_intr;
/* Release DMAs done buffers first */
@@ -951,11 +952,10 @@ static void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock)
else
vhost_zerocopy_signal_used(net, vq);
vhost_net_tx_packet(net);
- if (unlikely(vhost_exceeds_weight(++sent_pkts, total_len))) {
- vhost_poll_queue(&vq->poll);
- break;
- }
- }
+ } while (!(next_round = vhost_exceeds_weight(++sent_pkts, total_len)));
+
+ if (next_round)
+ vhost_poll_queue(&vq->poll);
}
/* Expects to be always run from workqueue - which acts as
@@ -1134,6 +1134,7 @@ static void handle_rx(struct vhost_net *net)
struct iov_iter fixup;
__virtio16 num_buffers;
int recv_pkts = 0;
+ bool next_round = false;
mutex_lock_nested(&vq->mutex, VHOST_NET_VQ_RX);
sock = vq->private_data;
@@ -1153,8 +1154,11 @@ static void handle_rx(struct vhost_net *net)
vq->log : NULL;
mergeable = vhost_has_feature(vq, VIRTIO_NET_F_MRG_RXBUF);
- while ((sock_len = vhost_net_rx_peek_head_len(net, sock->sk,
- &busyloop_intr))) {
+ do {
+ sock_len = vhost_net_rx_peek_head_len(net, sock->sk,
+ &busyloop_intr);
+ if (!sock_len)
+ break;
sock_len += sock_hlen;
vhost_len = sock_len + vhost_hlen;
headcount = get_rx_bufs(vq, vq->heads + nvq->done_idx,
@@ -1239,12 +1243,9 @@ static void handle_rx(struct vhost_net *net)
vhost_log_write(vq, vq_log, log, vhost_len,
vq->iov, in);
total_len += vhost_len;
- if (unlikely(vhost_exceeds_weight(++recv_pkts, total_len))) {
- vhost_poll_queue(&vq->poll);
- goto out;
- }
- }
- if (unlikely(busyloop_intr))
+ } while (!(next_round = vhost_exceeds_weight(++recv_pkts, total_len)));
+
+ if (unlikely(busyloop_intr || next_round))
vhost_poll_queue(&vq->poll);
else
vhost_net_enable_vq(net, vq);