summaryrefslogtreecommitdiff
path: root/drivers/net/ucc_geth.c
diff options
context:
space:
mode:
authorDongdong Deng <dongdong.deng@windriver.com>2009-08-13 23:12:31 +0400
committerDavid S. Miller <davem@davemloft.net>2009-08-15 03:41:16 +0400
commit22580f894ac190c46beebb5c3172e450a2318f79 (patch)
tree01f88df9a5bc27170ef7ca944ba40af578476d72 /drivers/net/ucc_geth.c
parent0527a1a8440a20b3d0fd1d0c9e75a6f38a9d5315 (diff)
downloadlinux-22580f894ac190c46beebb5c3172e450a2318f79.tar.xz
drivers/net: fixed drivers that support netpoll use ndo_start_xmit()
The NETPOLL API requires that interrupts remain disabled in netpoll_send_skb(). The use of spin_lock_irq() and spin_unlock_irq() in the NETPOLL API callbacks causes the interrupts to get enabled and can lead to kernel instability. The solution is to use spin_lock_irqsave() and spin_unlock_restore() to prevent the irqs from getting enabled while in netpoll_send_skb(). Call trace: netpoll_send_skb() { -> local_irq_save(flags) ---> dev->ndo_start_xmit(skb, dev) ---> spin_lock_irq() ---> spin_unlock_irq() *******here would enable the interrupt. ... -> local_irq_restore(flags) } Signed-off-by: Dongdong Deng <dongdong.deng@windriver.com> Signed-off-by: Jason Wessel <jason.wessel@windriver.com> Acked-by: Bruce Ashfield <bruce.ashfield@windriver.com> Acked-by: Matt Mackall <mpm@selenic.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ucc_geth.c')
-rw-r--r--drivers/net/ucc_geth.c5
1 files changed, 3 insertions, 2 deletions
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c
index 3b957e6412ee..8a7b8c7bd781 100644
--- a/drivers/net/ucc_geth.c
+++ b/drivers/net/ucc_geth.c
@@ -3111,10 +3111,11 @@ static int ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev)
u8 __iomem *bd; /* BD pointer */
u32 bd_status;
u8 txQ = 0;
+ unsigned long flags;
ugeth_vdbg("%s: IN", __func__);
- spin_lock_irq(&ugeth->lock);
+ spin_lock_irqsave(&ugeth->lock, flags);
dev->stats.tx_bytes += skb->len;
@@ -3171,7 +3172,7 @@ static int ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev)
uccf = ugeth->uccf;
out_be16(uccf->p_utodr, UCC_FAST_TOD);
#endif
- spin_unlock_irq(&ugeth->lock);
+ spin_unlock_irqrestore(&ugeth->lock, flags);
return 0;
}