From 7ce7679d6bbd1715799a9cf17b9b558bc2d962b7 Mon Sep 17 00:00:00 2001 From: Beniamino Galvani Date: Wed, 10 Sep 2014 22:50:02 +0200 Subject: net: arc_emac: enable tx interrupts In the current implementation the cleaning of tx ring is done by the NAPI poll handler, which is scheduled after rx interrupts. Thus, in absence of received packets the reclaim of used tx buffers is never executed, blocking further transmission. This can be easily reproduced starting the transmission of a UDP flow with iperf, which blocks almost immediately because skbs are not returned to the stack and the socket send buffer becomes full. The patch enables tx interrupts so that the tx reclaim is scheduled after completed transmissions. Signed-off-by: Beniamino Galvani Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/ethernet/arc/emac_main.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c index fe5cfeace6e3..f7ab90d9cd7e 100644 --- a/drivers/net/ethernet/arc/emac_main.c +++ b/drivers/net/ethernet/arc/emac_main.c @@ -298,7 +298,7 @@ static int arc_emac_poll(struct napi_struct *napi, int budget) work_done = arc_emac_rx(ndev, budget); if (work_done < budget) { napi_complete(napi); - arc_reg_or(priv, R_ENABLE, RXINT_MASK); + arc_reg_or(priv, R_ENABLE, RXINT_MASK | TXINT_MASK); } return work_done; @@ -327,9 +327,9 @@ static irqreturn_t arc_emac_intr(int irq, void *dev_instance) /* Reset all flags except "MDIO complete" */ arc_reg_set(priv, R_STATUS, status); - if (status & RXINT_MASK) { + if (status & (RXINT_MASK | TXINT_MASK)) { if (likely(napi_schedule_prep(&priv->napi))) { - arc_reg_clr(priv, R_ENABLE, RXINT_MASK); + arc_reg_clr(priv, R_ENABLE, RXINT_MASK | TXINT_MASK); __napi_schedule(&priv->napi); } } @@ -440,7 +440,7 @@ static int arc_emac_open(struct net_device *ndev) arc_reg_set(priv, R_TX_RING, (unsigned int)priv->txbd_dma); /* Enable interrupts */ - arc_reg_set(priv, R_ENABLE, RXINT_MASK | ERR_MASK); + arc_reg_set(priv, R_ENABLE, RXINT_MASK | TXINT_MASK | ERR_MASK); /* Set CONTROL */ arc_reg_set(priv, R_CTRL, @@ -511,7 +511,7 @@ static int arc_emac_stop(struct net_device *ndev) netif_stop_queue(ndev); /* Disable interrupts */ - arc_reg_clr(priv, R_ENABLE, RXINT_MASK | ERR_MASK); + arc_reg_clr(priv, R_ENABLE, RXINT_MASK | TXINT_MASK | ERR_MASK); /* Disable EMAC */ arc_reg_clr(priv, R_CTRL, EN_MASK); -- cgit v1.2.3 From 74dd40bca95f9968e368751ea81f3b98471109ce Mon Sep 17 00:00:00 2001 From: Beniamino Galvani Date: Wed, 10 Sep 2014 22:50:03 +0200 Subject: net: arc_emac: prevent reuse of unreclaimed tx descriptors This patch changes the logic in tx path to ensure that tx descriptors are reused for transmission only after they have been reclaimed by arc_emac_tx_clean(). Signed-off-by: Beniamino Galvani Signed-off-by: David S. Miller --- drivers/net/ethernet/arc/emac_main.c | 43 +++++++++++++++++++++++++++--------- 1 file changed, 32 insertions(+), 11 deletions(-) diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c index f7ab90d9cd7e..5919394d9f58 100644 --- a/drivers/net/ethernet/arc/emac_main.c +++ b/drivers/net/ethernet/arc/emac_main.c @@ -29,6 +29,17 @@ #define DRV_NAME "arc_emac" #define DRV_VERSION "1.0" +/** + * arc_emac_tx_avail - Return the number of available slots in the tx ring. + * @priv: Pointer to ARC EMAC private data structure. + * + * returns: the number of slots available for transmission in tx the ring. + */ +static inline int arc_emac_tx_avail(struct arc_emac_priv *priv) +{ + return (priv->txbd_dirty + TX_BD_NUM - priv->txbd_curr - 1) % TX_BD_NUM; +} + /** * arc_emac_adjust_link - Adjust the PHY link duplex. * @ndev: Pointer to the net_device structure. @@ -180,10 +191,15 @@ static void arc_emac_tx_clean(struct net_device *ndev) txbd->info = 0; *txbd_dirty = (*txbd_dirty + 1) % TX_BD_NUM; - - if (netif_queue_stopped(ndev)) - netif_wake_queue(ndev); } + + /* Ensure that txbd_dirty is visible to tx() before checking + * for queue stopped. + */ + smp_mb(); + + if (netif_queue_stopped(ndev) && arc_emac_tx_avail(priv)) + netif_wake_queue(ndev); } /** @@ -574,11 +590,9 @@ static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev) len = max_t(unsigned int, ETH_ZLEN, skb->len); - /* EMAC still holds this buffer in its possession. - * CPU must not modify this buffer descriptor - */ - if (unlikely((le32_to_cpu(*info) & OWN_MASK) == FOR_EMAC)) { + if (unlikely(!arc_emac_tx_avail(priv))) { netif_stop_queue(ndev); + netdev_err(ndev, "BUG! Tx Ring full when queue awake!\n"); return NETDEV_TX_BUSY; } @@ -607,12 +621,19 @@ static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev) /* Increment index to point to the next BD */ *txbd_curr = (*txbd_curr + 1) % TX_BD_NUM; - /* Get "info" of the next BD */ - info = &priv->txbd[*txbd_curr].info; + /* Ensure that tx_clean() sees the new txbd_curr before + * checking the queue status. This prevents an unneeded wake + * of the queue in tx_clean(). + */ + smp_mb(); - /* Check if if Tx BD ring is full - next BD is still owned by EMAC */ - if (unlikely((le32_to_cpu(*info) & OWN_MASK) == FOR_EMAC)) + if (!arc_emac_tx_avail(priv)) { netif_stop_queue(ndev); + /* Refresh tx_dirty */ + smp_mb(); + if (arc_emac_tx_avail(priv)) + netif_start_queue(ndev); + } arc_reg_set(priv, R_STATUS, TXPL_MASK); -- cgit v1.2.3