[kernel/f17] CVE-2013-0216/0127 xen: netback DoS via malicious guest ring (rhbz 910886)

Josh Boyer jwboyer at fedoraproject.org
Thu Feb 14 14:12:52 UTC 2013


commit d4c9a720d9cc3c9ba7ba3ee33f89c66f8c89ea5e
Author: Josh Boyer <jwboyer at redhat.com>
Date:   Thu Feb 14 09:12:20 2013 -0500

    CVE-2013-0216/0127 xen: netback DoS via malicious guest ring (rhbz 910886)

 kernel.spec                                        |   17 ++-
 ...orrect-netbk_tx_err-to-handle-wrap-around.patch |   29 +++
 ...ages-on-failure-in-xen_netbk_tx_check_gop.patch |  134 +++++++++++
 ...mory-on-failure-in-xen_netbk_get_requests.patch |   46 ++++
 ...-shutdown-the-ring-if-it-contains-garbage.patch |  250 ++++++++++++++++++++
 5 files changed, 475 insertions(+), 1 deletions(-)
---
diff --git a/kernel.spec b/kernel.spec
index 02deeeb..3b27c27 100644
--- a/kernel.spec
+++ b/kernel.spec
@@ -54,7 +54,7 @@ Summary: The Linux kernel
 # For non-released -rc kernels, this will be appended after the rcX and
 # gitX tags, so a 3 here would become part of release "0.rcX.gitX.3"
 #
-%global baserelease 101
+%global baserelease 102
 %global fedora_build %{baserelease}
 
 # base_sublevel is the kernel version we're starting with and patching
@@ -760,6 +760,12 @@ Patch21246: rtlwifi-Fix-scheduling-while-atomic-bug.patch
 #rhbz 892811
 Patch21247: ath9k_rx_dma_stop_check.patch
 
+#rhbz 910886 CVE-2013-0216/CVE-2013-0217
+Patch21250: xen-netback-shutdown-the-ring-if-it-contains-garbage.patch
+Patch21251: xen-netback-don-t-leak-pages-on-failure-in-xen_netbk_tx_check_gop.patch
+Patch21252: xen-netback-free-already-allocated-memory-on-failure-in-xen_netbk_get_requests.patch
+Patch21253: netback-correct-netbk_tx_err-to-handle-wrap-around.patch
+
 # END OF PATCH DEFINITIONS
 
 %endif
@@ -1469,6 +1475,12 @@ ApplyPatch rtlwifi-Fix-scheduling-while-atomic-bug.patch
 #rhbz 892811
 ApplyPatch ath9k_rx_dma_stop_check.patch
 
+#rhbz 910886 CVE-2013-0216/CVE-2013-0217
+ApplyPatch xen-netback-shutdown-the-ring-if-it-contains-garbage.patch
+ApplyPatch xen-netback-don-t-leak-pages-on-failure-in-xen_netbk_tx_check_gop.patch
+ApplyPatch xen-netback-free-already-allocated-memory-on-failure-in-xen_netbk_get_requests.patch
+ApplyPatch netback-correct-netbk_tx_err-to-handle-wrap-around.patch
+
 # END OF PATCH APPLICATIONS
 
 %endif
@@ -2324,6 +2336,9 @@ fi
 #    '-'      |  |
 #              '-'
 %changelog
+* Thu Feb 14 2013 Josh Boyer <jwboyer at redhat.com>
+- CVE-2013-0216/0127 xen: netback DoS via malicious guest ring (rhbz 910886)
+
 * Mon Feb 11 2013 Justin M. Forbes <jforbes at redhat.com> - 3.7.7-101
 - Linux v3.7.7
 
diff --git a/netback-correct-netbk_tx_err-to-handle-wrap-around.patch b/netback-correct-netbk_tx_err-to-handle-wrap-around.patch
new file mode 100644
index 0000000..36a679a
--- /dev/null
+++ b/netback-correct-netbk_tx_err-to-handle-wrap-around.patch
@@ -0,0 +1,29 @@
+From d5b2f3542a1f9c7b5092816b87db08e9f08f1551 Mon Sep 17 00:00:00 2001
+From: Ian Campbell <Ian.Campbell at citrix.com>
+Date: Wed, 6 Feb 2013 23:41:38 +0000
+Subject: netback: correct netbk_tx_err to handle wrap around.
+
+
+From: Ian Campbell <Ian.Campbell at citrix.com>
+
+[ Upstream commit b9149729ebdcfce63f853aa54a404c6a8f6ebbf3 ]
+
+Signed-off-by: Ian Campbell <ian.campbell at citrix.com>
+Acked-by: Jan Beulich <JBeulich at suse.com>
+Signed-off-by: David S. Miller <davem at davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh at linuxfoundation.org>
+---
+ drivers/net/xen-netback/netback.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/xen-netback/netback.c
++++ b/drivers/net/xen-netback/netback.c
+@@ -880,7 +880,7 @@ static void netbk_tx_err(struct xenvif *
+ 
+ 	do {
+ 		make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR);
+-		if (cons >= end)
++		if (cons == end)
+ 			break;
+ 		txp = RING_GET_REQUEST(&vif->tx, cons++);
+ 	} while (1);
diff --git a/xen-netback-don-t-leak-pages-on-failure-in-xen_netbk_tx_check_gop.patch b/xen-netback-don-t-leak-pages-on-failure-in-xen_netbk_tx_check_gop.patch
new file mode 100644
index 0000000..a3403c8
--- /dev/null
+++ b/xen-netback-don-t-leak-pages-on-failure-in-xen_netbk_tx_check_gop.patch
@@ -0,0 +1,134 @@
+From 10948f5aa9992de84e022e218b494586fe92d547 Mon Sep 17 00:00:00 2001
+From: Matthew Daley <mattjd at gmail.com>
+Date: Wed, 6 Feb 2013 23:41:36 +0000
+Subject: xen/netback: don't leak pages on failure in xen_netbk_tx_check_gop.
+
+
+From: Matthew Daley <mattjd at gmail.com>
+
+[ Upstream commit 7d5145d8eb2b9791533ffe4dc003b129b9696c48 ]
+
+Signed-off-by: Matthew Daley <mattjd at gmail.com>
+Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk at oracle.com>
+Acked-by: Ian Campbell <ian.campbell at citrix.com>
+Acked-by: Jan Beulich <JBeulich at suse.com>
+Signed-off-by: David S. Miller <davem at davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh at linuxfoundation.org>
+---
+ drivers/net/xen-netback/netback.c |   38 +++++++++++++-------------------------
+ 1 file changed, 13 insertions(+), 25 deletions(-)
+
+--- a/drivers/net/xen-netback/netback.c
++++ b/drivers/net/xen-netback/netback.c
+@@ -147,7 +147,8 @@ void xen_netbk_remove_xenvif(struct xenv
+ 	atomic_dec(&netbk->netfront_count);
+ }
+ 
+-static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx);
++static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx,
++				  u8 status);
+ static void make_tx_response(struct xenvif *vif,
+ 			     struct xen_netif_tx_request *txp,
+ 			     s8       st);
+@@ -1007,30 +1008,20 @@ static int xen_netbk_tx_check_gop(struct
+ {
+ 	struct gnttab_copy *gop = *gopp;
+ 	u16 pending_idx = *((u16 *)skb->data);
+-	struct pending_tx_info *pending_tx_info = netbk->pending_tx_info;
+-	struct xenvif *vif = pending_tx_info[pending_idx].vif;
+-	struct xen_netif_tx_request *txp;
+ 	struct skb_shared_info *shinfo = skb_shinfo(skb);
+ 	int nr_frags = shinfo->nr_frags;
+ 	int i, err, start;
+ 
+ 	/* Check status of header. */
+ 	err = gop->status;
+-	if (unlikely(err)) {
+-		pending_ring_idx_t index;
+-		index = pending_index(netbk->pending_prod++);
+-		txp = &pending_tx_info[pending_idx].req;
+-		make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR);
+-		netbk->pending_ring[index] = pending_idx;
+-		xenvif_put(vif);
+-	}
++	if (unlikely(err))
++		xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR);
+ 
+ 	/* Skip first skb fragment if it is on same page as header fragment. */
+ 	start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
+ 
+ 	for (i = start; i < nr_frags; i++) {
+ 		int j, newerr;
+-		pending_ring_idx_t index;
+ 
+ 		pending_idx = frag_get_pending_idx(&shinfo->frags[i]);
+ 
+@@ -1039,16 +1030,12 @@ static int xen_netbk_tx_check_gop(struct
+ 		if (likely(!newerr)) {
+ 			/* Had a previous error? Invalidate this fragment. */
+ 			if (unlikely(err))
+-				xen_netbk_idx_release(netbk, pending_idx);
++				xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
+ 			continue;
+ 		}
+ 
+ 		/* Error on this fragment: respond to client with an error. */
+-		txp = &netbk->pending_tx_info[pending_idx].req;
+-		make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR);
+-		index = pending_index(netbk->pending_prod++);
+-		netbk->pending_ring[index] = pending_idx;
+-		xenvif_put(vif);
++		xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR);
+ 
+ 		/* Not the first error? Preceding frags already invalidated. */
+ 		if (err)
+@@ -1056,10 +1043,10 @@ static int xen_netbk_tx_check_gop(struct
+ 
+ 		/* First error: invalidate header and preceding fragments. */
+ 		pending_idx = *((u16 *)skb->data);
+-		xen_netbk_idx_release(netbk, pending_idx);
++		xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
+ 		for (j = start; j < i; j++) {
+ 			pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
+-			xen_netbk_idx_release(netbk, pending_idx);
++			xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
+ 		}
+ 
+ 		/* Remember the error: invalidate all subsequent fragments. */
+@@ -1093,7 +1080,7 @@ static void xen_netbk_fill_frags(struct
+ 
+ 		/* Take an extra reference to offset xen_netbk_idx_release */
+ 		get_page(netbk->mmap_pages[pending_idx]);
+-		xen_netbk_idx_release(netbk, pending_idx);
++		xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
+ 	}
+ }
+ 
+@@ -1476,7 +1463,7 @@ static void xen_netbk_tx_submit(struct x
+ 			txp->size -= data_len;
+ 		} else {
+ 			/* Schedule a response immediately. */
+-			xen_netbk_idx_release(netbk, pending_idx);
++			xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
+ 		}
+ 
+ 		if (txp->flags & XEN_NETTXF_csum_blank)
+@@ -1528,7 +1515,8 @@ static void xen_netbk_tx_action(struct x
+ 	xen_netbk_tx_submit(netbk);
+ }
+ 
+-static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx)
++static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx,
++				  u8 status)
+ {
+ 	struct xenvif *vif;
+ 	struct pending_tx_info *pending_tx_info;
+@@ -1542,7 +1530,7 @@ static void xen_netbk_idx_release(struct
+ 
+ 	vif = pending_tx_info->vif;
+ 
+-	make_tx_response(vif, &pending_tx_info->req, XEN_NETIF_RSP_OKAY);
++	make_tx_response(vif, &pending_tx_info->req, status);
+ 
+ 	index = pending_index(netbk->pending_prod++);
+ 	netbk->pending_ring[index] = pending_idx;
diff --git a/xen-netback-free-already-allocated-memory-on-failure-in-xen_netbk_get_requests.patch b/xen-netback-free-already-allocated-memory-on-failure-in-xen_netbk_get_requests.patch
new file mode 100644
index 0000000..0021615
--- /dev/null
+++ b/xen-netback-free-already-allocated-memory-on-failure-in-xen_netbk_get_requests.patch
@@ -0,0 +1,46 @@
+From 2e0b7c1781a94640566dccf9d7441d500fa69e40 Mon Sep 17 00:00:00 2001
+From: Ian Campbell <Ian.Campbell at citrix.com>
+Date: Wed, 6 Feb 2013 23:41:37 +0000
+Subject: xen/netback: free already allocated memory on failure in xen_netbk_get_requests
+
+
+From: Ian Campbell <Ian.Campbell at citrix.com>
+
+[ Upstream commit 4cc7c1cb7b11b6f3515bd9075527576a1eecc4aa ]
+
+Signed-off-by: Ian Campbell <ian.campbell at citrix.com>
+Signed-off-by: David S. Miller <davem at davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh at linuxfoundation.org>
+---
+ drivers/net/xen-netback/netback.c |   13 ++++++++++++-
+ 1 file changed, 12 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/xen-netback/netback.c
++++ b/drivers/net/xen-netback/netback.c
+@@ -978,7 +978,7 @@ static struct gnttab_copy *xen_netbk_get
+ 		pending_idx = netbk->pending_ring[index];
+ 		page = xen_netbk_alloc_page(netbk, skb, pending_idx);
+ 		if (!page)
+-			return NULL;
++			goto err;
+ 
+ 		gop->source.u.ref = txp->gref;
+ 		gop->source.domid = vif->domid;
+@@ -1000,6 +1000,17 @@ static struct gnttab_copy *xen_netbk_get
+ 	}
+ 
+ 	return gop;
++err:
++	/* Unwind, freeing all pages and sending error responses. */
++	while (i-- > start) {
++		xen_netbk_idx_release(netbk, frag_get_pending_idx(&frags[i]),
++				      XEN_NETIF_RSP_ERROR);
++	}
++	/* The head too, if necessary. */
++	if (start)
++		xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR);
++
++	return NULL;
+ }
+ 
+ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
diff --git a/xen-netback-shutdown-the-ring-if-it-contains-garbage.patch b/xen-netback-shutdown-the-ring-if-it-contains-garbage.patch
new file mode 100644
index 0000000..add71e5
--- /dev/null
+++ b/xen-netback-shutdown-the-ring-if-it-contains-garbage.patch
@@ -0,0 +1,250 @@
+From 5ef9a447aa4e6a1da43a04b98befa5643c806c1b Mon Sep 17 00:00:00 2001
+From: Ian Campbell <Ian.Campbell at citrix.com>
+Date: Wed, 6 Feb 2013 23:41:35 +0000
+Subject: xen/netback: shutdown the ring if it contains garbage.
+
+
+From: Ian Campbell <Ian.Campbell at citrix.com>
+
+[ Upstream commit 48856286b64e4b66ec62b94e504d0b29c1ade664 ]
+
+A buggy or malicious frontend should not be able to confuse netback.
+If we spot anything which is not as it should be then shutdown the
+device and don't try to continue with the ring in a potentially
+hostile state. Well behaved and non-hostile frontends will not be
+penalised.
+
+As well as making the existing checks for such errors fatal also add a
+new check that ensures that there isn't an insane number of requests
+on the ring (i.e. more than would fit in the ring). If the ring
+contains garbage then previously is was possible to loop over this
+insane number, getting an error each time and therefore not generating
+any more pending requests and therefore not exiting the loop in
+xen_netbk_tx_build_gops for an externded period.
+
+Also turn various netdev_dbg calls which no precipitate a fatal error
+into netdev_err, they are rate limited because the device is shutdown
+afterwards.
+
+This fixes at least one known DoS/softlockup of the backend domain.
+
+Signed-off-by: Ian Campbell <ian.campbell at citrix.com>
+Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk at oracle.com>
+Acked-by: Jan Beulich <JBeulich at suse.com>
+Signed-off-by: David S. Miller <davem at davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh at linuxfoundation.org>
+---
+ drivers/net/xen-netback/common.h    |    3 +
+ drivers/net/xen-netback/interface.c |   23 ++++++++-----
+ drivers/net/xen-netback/netback.c   |   62 ++++++++++++++++++++++++++----------
+ 3 files changed, 62 insertions(+), 26 deletions(-)
+
+--- a/drivers/net/xen-netback/common.h
++++ b/drivers/net/xen-netback/common.h
+@@ -151,6 +151,9 @@ void xen_netbk_queue_tx_skb(struct xenvi
+ /* Notify xenvif that ring now has space to send an skb to the frontend */
+ void xenvif_notify_tx_completion(struct xenvif *vif);
+ 
++/* Prevent the device from generating any further traffic. */
++void xenvif_carrier_off(struct xenvif *vif);
++
+ /* Returns number of ring slots required to send an skb to the frontend */
+ unsigned int xen_netbk_count_skb_slots(struct xenvif *vif, struct sk_buff *skb);
+ 
+--- a/drivers/net/xen-netback/interface.c
++++ b/drivers/net/xen-netback/interface.c
+@@ -343,17 +343,22 @@ err:
+ 	return err;
+ }
+ 
+-void xenvif_disconnect(struct xenvif *vif)
++void xenvif_carrier_off(struct xenvif *vif)
+ {
+ 	struct net_device *dev = vif->dev;
+-	if (netif_carrier_ok(dev)) {
+-		rtnl_lock();
+-		netif_carrier_off(dev); /* discard queued packets */
+-		if (netif_running(dev))
+-			xenvif_down(vif);
+-		rtnl_unlock();
+-		xenvif_put(vif);
+-	}
++
++	rtnl_lock();
++	netif_carrier_off(dev); /* discard queued packets */
++	if (netif_running(dev))
++		xenvif_down(vif);
++	rtnl_unlock();
++	xenvif_put(vif);
++}
++
++void xenvif_disconnect(struct xenvif *vif)
++{
++	if (netif_carrier_ok(vif->dev))
++		xenvif_carrier_off(vif);
+ 
+ 	atomic_dec(&vif->refcnt);
+ 	wait_event(vif->waiting_to_free, atomic_read(&vif->refcnt) == 0);
+--- a/drivers/net/xen-netback/netback.c
++++ b/drivers/net/xen-netback/netback.c
+@@ -888,6 +888,13 @@ static void netbk_tx_err(struct xenvif *
+ 	xenvif_put(vif);
+ }
+ 
++static void netbk_fatal_tx_err(struct xenvif *vif)
++{
++	netdev_err(vif->dev, "fatal error; disabling device\n");
++	xenvif_carrier_off(vif);
++	xenvif_put(vif);
++}
++
+ static int netbk_count_requests(struct xenvif *vif,
+ 				struct xen_netif_tx_request *first,
+ 				struct xen_netif_tx_request *txp,
+@@ -901,19 +908,22 @@ static int netbk_count_requests(struct x
+ 
+ 	do {
+ 		if (frags >= work_to_do) {
+-			netdev_dbg(vif->dev, "Need more frags\n");
++			netdev_err(vif->dev, "Need more frags\n");
++			netbk_fatal_tx_err(vif);
+ 			return -frags;
+ 		}
+ 
+ 		if (unlikely(frags >= MAX_SKB_FRAGS)) {
+-			netdev_dbg(vif->dev, "Too many frags\n");
++			netdev_err(vif->dev, "Too many frags\n");
++			netbk_fatal_tx_err(vif);
+ 			return -frags;
+ 		}
+ 
+ 		memcpy(txp, RING_GET_REQUEST(&vif->tx, cons + frags),
+ 		       sizeof(*txp));
+ 		if (txp->size > first->size) {
+-			netdev_dbg(vif->dev, "Frags galore\n");
++			netdev_err(vif->dev, "Frag is bigger than frame.\n");
++			netbk_fatal_tx_err(vif);
+ 			return -frags;
+ 		}
+ 
+@@ -921,8 +931,9 @@ static int netbk_count_requests(struct x
+ 		frags++;
+ 
+ 		if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) {
+-			netdev_dbg(vif->dev, "txp->offset: %x, size: %u\n",
++			netdev_err(vif->dev, "txp->offset: %x, size: %u\n",
+ 				 txp->offset, txp->size);
++			netbk_fatal_tx_err(vif);
+ 			return -frags;
+ 		}
+ 	} while ((txp++)->flags & XEN_NETTXF_more_data);
+@@ -1095,7 +1106,8 @@ static int xen_netbk_get_extras(struct x
+ 
+ 	do {
+ 		if (unlikely(work_to_do-- <= 0)) {
+-			netdev_dbg(vif->dev, "Missing extra info\n");
++			netdev_err(vif->dev, "Missing extra info\n");
++			netbk_fatal_tx_err(vif);
+ 			return -EBADR;
+ 		}
+ 
+@@ -1104,8 +1116,9 @@ static int xen_netbk_get_extras(struct x
+ 		if (unlikely(!extra.type ||
+ 			     extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
+ 			vif->tx.req_cons = ++cons;
+-			netdev_dbg(vif->dev,
++			netdev_err(vif->dev,
+ 				   "Invalid extra type: %d\n", extra.type);
++			netbk_fatal_tx_err(vif);
+ 			return -EINVAL;
+ 		}
+ 
+@@ -1121,13 +1134,15 @@ static int netbk_set_skb_gso(struct xenv
+ 			     struct xen_netif_extra_info *gso)
+ {
+ 	if (!gso->u.gso.size) {
+-		netdev_dbg(vif->dev, "GSO size must not be zero.\n");
++		netdev_err(vif->dev, "GSO size must not be zero.\n");
++		netbk_fatal_tx_err(vif);
+ 		return -EINVAL;
+ 	}
+ 
+ 	/* Currently only TCPv4 S.O. is supported. */
+ 	if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) {
+-		netdev_dbg(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type);
++		netdev_err(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type);
++		netbk_fatal_tx_err(vif);
+ 		return -EINVAL;
+ 	}
+ 
+@@ -1264,9 +1279,25 @@ static unsigned xen_netbk_tx_build_gops(
+ 
+ 		/* Get a netif from the list with work to do. */
+ 		vif = poll_net_schedule_list(netbk);
++		/* This can sometimes happen because the test of
++		 * list_empty(net_schedule_list) at the top of the
++		 * loop is unlocked.  Just go back and have another
++		 * look.
++		 */
+ 		if (!vif)
+ 			continue;
+ 
++		if (vif->tx.sring->req_prod - vif->tx.req_cons >
++		    XEN_NETIF_TX_RING_SIZE) {
++			netdev_err(vif->dev,
++				   "Impossible number of requests. "
++				   "req_prod %d, req_cons %d, size %ld\n",
++				   vif->tx.sring->req_prod, vif->tx.req_cons,
++				   XEN_NETIF_TX_RING_SIZE);
++			netbk_fatal_tx_err(vif);
++			continue;
++		}
++
+ 		RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, work_to_do);
+ 		if (!work_to_do) {
+ 			xenvif_put(vif);
+@@ -1294,17 +1325,14 @@ static unsigned xen_netbk_tx_build_gops(
+ 			work_to_do = xen_netbk_get_extras(vif, extras,
+ 							  work_to_do);
+ 			idx = vif->tx.req_cons;
+-			if (unlikely(work_to_do < 0)) {
+-				netbk_tx_err(vif, &txreq, idx);
++			if (unlikely(work_to_do < 0))
+ 				continue;
+-			}
+ 		}
+ 
+ 		ret = netbk_count_requests(vif, &txreq, txfrags, work_to_do);
+-		if (unlikely(ret < 0)) {
+-			netbk_tx_err(vif, &txreq, idx - ret);
++		if (unlikely(ret < 0))
+ 			continue;
+-		}
++
+ 		idx += ret;
+ 
+ 		if (unlikely(txreq.size < ETH_HLEN)) {
+@@ -1316,11 +1344,11 @@ static unsigned xen_netbk_tx_build_gops(
+ 
+ 		/* No crossing a page as the payload mustn't fragment. */
+ 		if (unlikely((txreq.offset + txreq.size) > PAGE_SIZE)) {
+-			netdev_dbg(vif->dev,
++			netdev_err(vif->dev,
+ 				   "txreq.offset: %x, size: %u, end: %lu\n",
+ 				   txreq.offset, txreq.size,
+ 				   (txreq.offset&~PAGE_MASK) + txreq.size);
+-			netbk_tx_err(vif, &txreq, idx);
++			netbk_fatal_tx_err(vif);
+ 			continue;
+ 		}
+ 
+@@ -1348,8 +1376,8 @@ static unsigned xen_netbk_tx_build_gops(
+ 			gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
+ 
+ 			if (netbk_set_skb_gso(vif, skb, gso)) {
++				/* Failure in netbk_set_skb_gso is fatal. */
+ 				kfree_skb(skb);
+-				netbk_tx_err(vif, &txreq, idx);
+ 				continue;
+ 			}
+ 		}


More information about the scm-commits mailing list