Update to 2.6.32.41 Mainline
/drivers/net/virtio_net.c
blob:bf6d850a658656479134aa3cf4abd402d40df4e1 -> blob:7e3788d543109d11f0bf711ffff203498655aebf
--- drivers/net/virtio_net.c
+++ drivers/net/virtio_net.c
@@ -391,20 +391,6 @@ static void skb_recv_done(struct virtque
}
}
-static void virtnet_napi_enable(struct virtnet_info *vi)
-{
- napi_enable(&vi->napi);
-
- /* If all buffers were filled by other side before we napi_enabled, we
- * won't get another interrupt, so process any outstanding packets
- * now. virtnet_poll wants re-enable the queue, so we disable here.
- * We synchronize against interrupts via NAPI_STATE_SCHED */
- if (napi_schedule_prep(&vi->napi)) {
- vi->rvq->vq_ops->disable_cb(vi->rvq);
- __napi_schedule(&vi->napi);
- }
-}
-
static void refill_work(struct work_struct *work)
{
struct virtnet_info *vi;
@@ -413,7 +399,7 @@ static void refill_work(struct work_stru
vi = container_of(work, struct virtnet_info, refill.work);
napi_disable(&vi->napi);
still_empty = !try_fill_recv(vi, GFP_KERNEL);
- virtnet_napi_enable(vi);
+ napi_enable(&vi->napi);
/* In theory, this can happen: if we don't get any buffers in
* we will *never* try to fill again. */
@@ -525,6 +511,7 @@ static netdev_tx_t start_xmit(struct sk_
struct virtnet_info *vi = netdev_priv(dev);
int capacity;
+again:
/* Free up any pending old buffers before queueing new ones. */
free_old_xmit_skbs(vi);
@@ -533,20 +520,14 @@ static netdev_tx_t start_xmit(struct sk_
/* This can happen with OOM and indirect buffers. */
if (unlikely(capacity < 0)) {
- if (net_ratelimit()) {
- if (likely(capacity == -ENOMEM)) {
- dev_warn(&dev->dev,
- "TX queue failure: out of memory\n");
- } else {
- dev->stats.tx_fifo_errors++;
- dev_warn(&dev->dev,
- "Unexpected TX queue failure: %d\n",
- capacity);
- }
+ netif_stop_queue(dev);
+ dev_warn(&dev->dev, "Unexpected full queue\n");
+ if (unlikely(!vi->svq->vq_ops->enable_cb(vi->svq))) {
+ vi->svq->vq_ops->disable_cb(vi->svq);
+ netif_start_queue(dev);
+ goto again;
}
- dev->stats.tx_dropped++;
- kfree_skb(skb);
- return NETDEV_TX_OK;
+ return NETDEV_TX_BUSY;
}
vi->svq->vq_ops->kick(vi->svq);
@@ -610,7 +591,16 @@ static int virtnet_open(struct net_devic
{
struct virtnet_info *vi = netdev_priv(dev);
- virtnet_napi_enable(vi);
+ napi_enable(&vi->napi);
+
+ /* If all buffers were filled by other side before we napi_enabled, we
+ * won't get another interrupt, so process any outstanding packets
+ * now. virtnet_poll wants re-enable the queue, so we disable here.
+ * We synchronize against interrupts via NAPI_STATE_SCHED */
+ if (napi_schedule_prep(&vi->napi)) {
+ vi->rvq->vq_ops->disable_cb(vi->rvq);
+ __napi_schedule(&vi->napi);
+ }
return 0;
}