summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/natsemi/sonic.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2020-01-26 01:19:32 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2020-01-26 01:19:32 +0300
commit84809aaf78b5b4c2e6478dc6121a1c8fb439a024 (patch)
tree77e0b74f66a80bb7fe0b3c8427e056fd7e226ddb /drivers/net/ethernet/natsemi/sonic.c
parentf041eadad7504b1364274494548b9716b2ed59ac (diff)
parentfa865ba183d61c1ec8cbcab8573159c3b72b89a4 (diff)
downloadlinux-84809aaf78b5b4c2e6478dc6121a1c8fb439a024.tar.xz
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Pull networking fixes from David Miller: 1) Off by one in mt76 airtime calculation, from Dan Carpenter. 2) Fix TLV fragment allocation loop condition in iwlwifi, from Luca Coelho. 3) Don't confirm neigh entries when doing ipsec pmtu updates, from Xu Wang. 4) More checks to make sure we only send TSO packets to lan78xx chips that they can actually handle. From James Hughes. 5) Fix ip_tunnel namespace move, from William Dauchy. 6) Fix unintended packet reordering due to cooperation between listification done by GRO and non-GRO paths. From Maxim Mikityanskiy. 7) Add Jakub Kicincki formally as networking co-maintainer. 8) Info leak in airo ioctls, from Michael Ellerman. 9) IFLA_MTU attribute needs validation during rtnl_create_link(), from Eric Dumazet. 10) Use after free during reload in mlxsw, from Ido Schimmel. 11) Dangling pointers are possible in tp->highest_sack, fix from Eric Dumazet. 12) Missing *pos++ in various networking seq_next handlers, from Vasily Averin. 13) CHELSIO_GET_MEM operation neds CAP_NET_ADMIN check, from Michael Ellerman. * git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (109 commits) firestream: fix memory leaks net: cxgb3_main: Add CAP_NET_ADMIN check to CHELSIO_GET_MEM net: bcmgenet: Use netif_tx_napi_add() for TX NAPI tipc: change maintainer email address net: stmmac: platform: fix probe for ACPI devices net/mlx5e: kTLS, Do not send decrypted-marked SKBs via non-accel path net/mlx5e: kTLS, Remove redundant posts in TX resync flow net/mlx5e: kTLS, Fix corner-case checks in TX resync flow net/mlx5e: Clear VF config when switching modes net/mlx5: DR, use non preemptible call to get the current cpu number net/mlx5: E-Switch, Prevent ingress rate configuration of uplink rep net/mlx5: DR, Enable counter on non-fwd-dest objects net/mlx5: Update the list of the PCI supported devices net/mlx5: Fix lowest FDB pool size net: Fix skb->csum update in inet_proto_csum_replace16(). netfilter: nf_tables: autoload modules from the abort path netfilter: nf_tables: add __nft_chain_type_get() netfilter: nf_tables_offload: fix check the chain offload flag netfilter: conntrack: sctp: use distinct states for new SCTP connections ipv6_route_seq_next should increase position index ...
Diffstat (limited to 'drivers/net/ethernet/natsemi/sonic.c')
-rw-r--r--drivers/net/ethernet/natsemi/sonic.c380
1 files changed, 230 insertions, 150 deletions
diff --git a/drivers/net/ethernet/natsemi/sonic.c b/drivers/net/ethernet/natsemi/sonic.c
index b339125b2f09..05e760444a92 100644
--- a/drivers/net/ethernet/natsemi/sonic.c
+++ b/drivers/net/ethernet/natsemi/sonic.c
@@ -64,6 +64,8 @@ static int sonic_open(struct net_device *dev)
netif_dbg(lp, ifup, dev, "%s: initializing sonic driver\n", __func__);
+ spin_lock_init(&lp->lock);
+
for (i = 0; i < SONIC_NUM_RRS; i++) {
struct sk_buff *skb = netdev_alloc_skb(dev, SONIC_RBSIZE + 2);
if (skb == NULL) {
@@ -114,6 +116,24 @@ static int sonic_open(struct net_device *dev)
return 0;
}
+/* Wait for the SONIC to become idle. */
+static void sonic_quiesce(struct net_device *dev, u16 mask)
+{
+ struct sonic_local * __maybe_unused lp = netdev_priv(dev);
+ int i;
+ u16 bits;
+
+ for (i = 0; i < 1000; ++i) {
+ bits = SONIC_READ(SONIC_CMD) & mask;
+ if (!bits)
+ return;
+ if (irqs_disabled() || in_interrupt())
+ udelay(20);
+ else
+ usleep_range(100, 200);
+ }
+ WARN_ONCE(1, "command deadline expired! 0x%04x\n", bits);
+}
/*
* Close the SONIC device
@@ -130,6 +150,9 @@ static int sonic_close(struct net_device *dev)
/*
* stop the SONIC, disable interrupts
*/
+ SONIC_WRITE(SONIC_CMD, SONIC_CR_RXDIS);
+ sonic_quiesce(dev, SONIC_CR_ALL);
+
SONIC_WRITE(SONIC_IMR, 0);
SONIC_WRITE(SONIC_ISR, 0x7fff);
SONIC_WRITE(SONIC_CMD, SONIC_CR_RST);
@@ -169,6 +192,9 @@ static void sonic_tx_timeout(struct net_device *dev)
* put the Sonic into software-reset mode and
* disable all interrupts before releasing DMA buffers
*/
+ SONIC_WRITE(SONIC_CMD, SONIC_CR_RXDIS);
+ sonic_quiesce(dev, SONIC_CR_ALL);
+
SONIC_WRITE(SONIC_IMR, 0);
SONIC_WRITE(SONIC_ISR, 0x7fff);
SONIC_WRITE(SONIC_CMD, SONIC_CR_RST);
@@ -206,8 +232,6 @@ static void sonic_tx_timeout(struct net_device *dev)
* wake the tx queue
* Concurrently with all of this, the SONIC is potentially writing to
* the status flags of the TDs.
- * Until some mutual exclusion is added, this code will not work with SMP. However,
- * MIPS Jazz machines and m68k Macs were all uni-processor machines.
*/
static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev)
@@ -215,7 +239,8 @@ static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev)
struct sonic_local *lp = netdev_priv(dev);
dma_addr_t laddr;
int length;
- int entry = lp->next_tx;
+ int entry;
+ unsigned long flags;
netif_dbg(lp, tx_queued, dev, "%s: skb=%p\n", __func__, skb);
@@ -237,6 +262,10 @@ static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
+ spin_lock_irqsave(&lp->lock, flags);
+
+ entry = lp->next_tx;
+
sonic_tda_put(dev, entry, SONIC_TD_STATUS, 0); /* clear status */
sonic_tda_put(dev, entry, SONIC_TD_FRAG_COUNT, 1); /* single fragment */
sonic_tda_put(dev, entry, SONIC_TD_PKTSIZE, length); /* length of packet */
@@ -246,10 +275,6 @@ static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev)
sonic_tda_put(dev, entry, SONIC_TD_LINK,
sonic_tda_get(dev, entry, SONIC_TD_LINK) | SONIC_EOL);
- /*
- * Must set tx_skb[entry] only after clearing status, and
- * before clearing EOL and before stopping queue
- */
wmb();
lp->tx_len[entry] = length;
lp->tx_laddr[entry] = laddr;
@@ -272,6 +297,8 @@ static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev)
SONIC_WRITE(SONIC_CMD, SONIC_CR_TXP);
+ spin_unlock_irqrestore(&lp->lock, flags);
+
return NETDEV_TX_OK;
}
@@ -284,15 +311,28 @@ static irqreturn_t sonic_interrupt(int irq, void *dev_id)
struct net_device *dev = dev_id;
struct sonic_local *lp = netdev_priv(dev);
int status;
+ unsigned long flags;
+
+ /* The lock has two purposes. Firstly, it synchronizes sonic_interrupt()
+ * with sonic_send_packet() so that the two functions can share state.
+ * Secondly, it makes sonic_interrupt() re-entrant, as that is required
+ * by macsonic which must use two IRQs with different priority levels.
+ */
+ spin_lock_irqsave(&lp->lock, flags);
+
+ status = SONIC_READ(SONIC_ISR) & SONIC_IMR_DEFAULT;
+ if (!status) {
+ spin_unlock_irqrestore(&lp->lock, flags);
- if (!(status = SONIC_READ(SONIC_ISR) & SONIC_IMR_DEFAULT))
return IRQ_NONE;
+ }
do {
+ SONIC_WRITE(SONIC_ISR, status); /* clear the interrupt(s) */
+
if (status & SONIC_INT_PKTRX) {
netif_dbg(lp, intr, dev, "%s: packet rx\n", __func__);
sonic_rx(dev); /* got packet(s) */
- SONIC_WRITE(SONIC_ISR, SONIC_INT_PKTRX); /* clear the interrupt */
}
if (status & SONIC_INT_TXDN) {
@@ -300,11 +340,12 @@ static irqreturn_t sonic_interrupt(int irq, void *dev_id)
int td_status;
int freed_some = 0;
- /* At this point, cur_tx is the index of a TD that is one of:
- * unallocated/freed (status set & tx_skb[entry] clear)
- * allocated and sent (status set & tx_skb[entry] set )
- * allocated and not yet sent (status clear & tx_skb[entry] set )
- * still being allocated by sonic_send_packet (status clear & tx_skb[entry] clear)
+ /* The state of a Transmit Descriptor may be inferred
+ * from { tx_skb[entry], td_status } as follows.
+ * { clear, clear } => the TD has never been used
+ * { set, clear } => the TD was handed to SONIC
+ * { set, set } => the TD was handed back
+ * { clear, set } => the TD is available for re-use
*/
netif_dbg(lp, intr, dev, "%s: tx done\n", __func__);
@@ -313,18 +354,19 @@ static irqreturn_t sonic_interrupt(int irq, void *dev_id)
if ((td_status = sonic_tda_get(dev, entry, SONIC_TD_STATUS)) == 0)
break;
- if (td_status & 0x0001) {
+ if (td_status & SONIC_TCR_PTX) {
lp->stats.tx_packets++;
lp->stats.tx_bytes += sonic_tda_get(dev, entry, SONIC_TD_PKTSIZE);
} else {
- lp->stats.tx_errors++;
- if (td_status & 0x0642)
+ if (td_status & (SONIC_TCR_EXD |
+ SONIC_TCR_EXC | SONIC_TCR_BCM))
lp->stats.tx_aborted_errors++;
- if (td_status & 0x0180)
+ if (td_status &
+ (SONIC_TCR_NCRS | SONIC_TCR_CRLS))
lp->stats.tx_carrier_errors++;
- if (td_status & 0x0020)
+ if (td_status & SONIC_TCR_OWC)
lp->stats.tx_window_errors++;
- if (td_status & 0x0004)
+ if (td_status & SONIC_TCR_FU)
lp->stats.tx_fifo_errors++;
}
@@ -346,7 +388,6 @@ static irqreturn_t sonic_interrupt(int irq, void *dev_id)
if (freed_some || lp->tx_skb[entry] == NULL)
netif_wake_queue(dev); /* The ring is no longer full */
lp->cur_tx = entry;
- SONIC_WRITE(SONIC_ISR, SONIC_INT_TXDN); /* clear the interrupt */
}
/*
@@ -355,42 +396,37 @@ static irqreturn_t sonic_interrupt(int irq, void *dev_id)
if (status & SONIC_INT_RFO) {
netif_dbg(lp, rx_err, dev, "%s: rx fifo overrun\n",
__func__);
- lp->stats.rx_fifo_errors++;
- SONIC_WRITE(SONIC_ISR, SONIC_INT_RFO); /* clear the interrupt */
}
if (status & SONIC_INT_RDE) {
netif_dbg(lp, rx_err, dev, "%s: rx descriptors exhausted\n",
__func__);
- lp->stats.rx_dropped++;
- SONIC_WRITE(SONIC_ISR, SONIC_INT_RDE); /* clear the interrupt */
}
if (status & SONIC_INT_RBAE) {
netif_dbg(lp, rx_err, dev, "%s: rx buffer area exceeded\n",
__func__);
- lp->stats.rx_dropped++;
- SONIC_WRITE(SONIC_ISR, SONIC_INT_RBAE); /* clear the interrupt */
}
/* counter overruns; all counters are 16bit wide */
- if (status & SONIC_INT_FAE) {
+ if (status & SONIC_INT_FAE)
lp->stats.rx_frame_errors += 65536;
- SONIC_WRITE(SONIC_ISR, SONIC_INT_FAE); /* clear the interrupt */
- }
- if (status & SONIC_INT_CRC) {
+ if (status & SONIC_INT_CRC)
lp->stats.rx_crc_errors += 65536;
- SONIC_WRITE(SONIC_ISR, SONIC_INT_CRC); /* clear the interrupt */
- }
- if (status & SONIC_INT_MP) {
+ if (status & SONIC_INT_MP)
lp->stats.rx_missed_errors += 65536;
- SONIC_WRITE(SONIC_ISR, SONIC_INT_MP); /* clear the interrupt */
- }
/* transmit error */
if (status & SONIC_INT_TXER) {
- if (SONIC_READ(SONIC_TCR) & SONIC_TCR_FU)
- netif_dbg(lp, tx_err, dev, "%s: tx fifo underrun\n",
- __func__);
- SONIC_WRITE(SONIC_ISR, SONIC_INT_TXER); /* clear the interrupt */
+ u16 tcr = SONIC_READ(SONIC_TCR);
+
+ netif_dbg(lp, tx_err, dev, "%s: TXER intr, TCR %04x\n",
+ __func__, tcr);
+
+ if (tcr & (SONIC_TCR_EXD | SONIC_TCR_EXC |
+ SONIC_TCR_FU | SONIC_TCR_BCM)) {
+ /* Aborted transmission. Try again. */
+ netif_stop_queue(dev);
+ SONIC_WRITE(SONIC_CMD, SONIC_CR_TXP);
+ }
}
/* bus retry */
@@ -400,107 +436,164 @@ static irqreturn_t sonic_interrupt(int irq, void *dev_id)
/* ... to help debug DMA problems causing endless interrupts. */
/* Bounce the eth interface to turn on the interrupt again. */
SONIC_WRITE(SONIC_IMR, 0);
- SONIC_WRITE(SONIC_ISR, SONIC_INT_BR); /* clear the interrupt */
}
- /* load CAM done */
- if (status & SONIC_INT_LCD)
- SONIC_WRITE(SONIC_ISR, SONIC_INT_LCD); /* clear the interrupt */
- } while((status = SONIC_READ(SONIC_ISR) & SONIC_IMR_DEFAULT));
+ status = SONIC_READ(SONIC_ISR) & SONIC_IMR_DEFAULT;
+ } while (status);
+
+ spin_unlock_irqrestore(&lp->lock, flags);
+
return IRQ_HANDLED;
}
+/* Return the array index corresponding to a given Receive Buffer pointer. */
+static int index_from_addr(struct sonic_local *lp, dma_addr_t addr,
+ unsigned int last)
+{
+ unsigned int i = last;
+
+ do {
+ i = (i + 1) & SONIC_RRS_MASK;
+ if (addr == lp->rx_laddr[i])
+ return i;
+ } while (i != last);
+
+ return -ENOENT;
+}
+
+/* Allocate and map a new skb to be used as a receive buffer. */
+static bool sonic_alloc_rb(struct net_device *dev, struct sonic_local *lp,
+ struct sk_buff **new_skb, dma_addr_t *new_addr)
+{
+ *new_skb = netdev_alloc_skb(dev, SONIC_RBSIZE + 2);
+ if (!*new_skb)
+ return false;
+
+ if (SONIC_BUS_SCALE(lp->dma_bitmode) == 2)
+ skb_reserve(*new_skb, 2);
+
+ *new_addr = dma_map_single(lp->device, skb_put(*new_skb, SONIC_RBSIZE),
+ SONIC_RBSIZE, DMA_FROM_DEVICE);
+ if (!*new_addr) {
+ dev_kfree_skb(*new_skb);
+ *new_skb = NULL;
+ return false;
+ }
+
+ return true;
+}
+
+/* Place a new receive resource in the Receive Resource Area and update RWP. */
+static void sonic_update_rra(struct net_device *dev, struct sonic_local *lp,
+ dma_addr_t old_addr, dma_addr_t new_addr)
+{
+ unsigned int entry = sonic_rr_entry(dev, SONIC_READ(SONIC_RWP));
+ unsigned int end = sonic_rr_entry(dev, SONIC_READ(SONIC_RRP));
+ u32 buf;
+
+ /* The resources in the range [RRP, RWP) belong to the SONIC. This loop
+ * scans the other resources in the RRA, those in the range [RWP, RRP).
+ */
+ do {
+ buf = (sonic_rra_get(dev, entry, SONIC_RR_BUFADR_H) << 16) |
+ sonic_rra_get(dev, entry, SONIC_RR_BUFADR_L);
+
+ if (buf == old_addr)
+ break;
+
+ entry = (entry + 1) & SONIC_RRS_MASK;
+ } while (entry != end);
+
+ WARN_ONCE(buf != old_addr, "failed to find resource!\n");
+
+ sonic_rra_put(dev, entry, SONIC_RR_BUFADR_H, new_addr >> 16);
+ sonic_rra_put(dev, entry, SONIC_RR_BUFADR_L, new_addr & 0xffff);
+
+ entry = (entry + 1) & SONIC_RRS_MASK;
+
+ SONIC_WRITE(SONIC_RWP, sonic_rr_addr(dev, entry));
+}
+
/*
* We have a good packet(s), pass it/them up the network stack.
*/
static void sonic_rx(struct net_device *dev)
{
struct sonic_local *lp = netdev_priv(dev);
- int status;
int entry = lp->cur_rx;
+ int prev_entry = lp->eol_rx;
+ bool rbe = false;
while (sonic_rda_get(dev, entry, SONIC_RD_IN_USE) == 0) {
- struct sk_buff *used_skb;
- struct sk_buff *new_skb;
- dma_addr_t new_laddr;
- u16 bufadr_l;
- u16 bufadr_h;
- int pkt_len;
-
- status = sonic_rda_get(dev, entry, SONIC_RD_STATUS);
- if (status & SONIC_RCR_PRX) {
- /* Malloc up new buffer. */
- new_skb = netdev_alloc_skb(dev, SONIC_RBSIZE + 2);
- if (new_skb == NULL) {
- lp->stats.rx_dropped++;
+ u16 status = sonic_rda_get(dev, entry, SONIC_RD_STATUS);
+
+ /* If the RD has LPKT set, the chip has finished with the RB */
+ if ((status & SONIC_RCR_PRX) && (status & SONIC_RCR_LPKT)) {
+ struct sk_buff *new_skb;
+ dma_addr_t new_laddr;
+ u32 addr = (sonic_rda_get(dev, entry,
+ SONIC_RD_PKTPTR_H) << 16) |
+ sonic_rda_get(dev, entry, SONIC_RD_PKTPTR_L);
+ int i = index_from_addr(lp, addr, entry);
+
+ if (i < 0) {
+ WARN_ONCE(1, "failed to find buffer!\n");
break;
}
- /* provide 16 byte IP header alignment unless DMA requires otherwise */
- if(SONIC_BUS_SCALE(lp->dma_bitmode) == 2)
- skb_reserve(new_skb, 2);
-
- new_laddr = dma_map_single(lp->device, skb_put(new_skb, SONIC_RBSIZE),
- SONIC_RBSIZE, DMA_FROM_DEVICE);
- if (!new_laddr) {
- dev_kfree_skb(new_skb);
- printk(KERN_ERR "%s: Failed to map rx buffer, dropping packet.\n", dev->name);
+
+ if (sonic_alloc_rb(dev, lp, &new_skb, &new_laddr)) {
+ struct sk_buff *used_skb = lp->rx_skb[i];
+ int pkt_len;
+
+ /* Pass the used buffer up the stack */
+ dma_unmap_single(lp->device, addr, SONIC_RBSIZE,
+ DMA_FROM_DEVICE);
+
+ pkt_len = sonic_rda_get(dev, entry,
+ SONIC_RD_PKTLEN);
+ skb_trim(used_skb, pkt_len);
+ used_skb->protocol = eth_type_trans(used_skb,
+ dev);
+ netif_rx(used_skb);
+ lp->stats.rx_packets++;
+ lp->stats.rx_bytes += pkt_len;
+
+ lp->rx_skb[i] = new_skb;
+ lp->rx_laddr[i] = new_laddr;
+ } else {
+ /* Failed to obtain a new buffer so re-use it */
+ new_laddr = addr;
lp->stats.rx_dropped++;
- break;
}
-
- /* now we have a new skb to replace it, pass the used one up the stack */
- dma_unmap_single(lp->device, lp->rx_laddr[entry], SONIC_RBSIZE, DMA_FROM_DEVICE);
- used_skb = lp->rx_skb[entry];
- pkt_len = sonic_rda_get(dev, entry, SONIC_RD_PKTLEN);
- skb_trim(used_skb, pkt_len);
- used_skb->protocol = eth_type_trans(used_skb, dev);
- netif_rx(used_skb);
- lp->stats.rx_packets++;
- lp->stats.rx_bytes += pkt_len;
-
- /* and insert the new skb */
- lp->rx_laddr[entry] = new_laddr;
- lp->rx_skb[entry] = new_skb;
-
- bufadr_l = (unsigned long)new_laddr & 0xffff;
- bufadr_h = (unsigned long)new_laddr >> 16;
- sonic_rra_put(dev, entry, SONIC_RR_BUFADR_L, bufadr_l);
- sonic_rra_put(dev, entry, SONIC_RR_BUFADR_H, bufadr_h);
- } else {
- /* This should only happen, if we enable accepting broken packets. */
- lp->stats.rx_errors++;
- if (status & SONIC_RCR_FAER)
- lp->stats.rx_frame_errors++;
- if (status & SONIC_RCR_CRCR)
- lp->stats.rx_crc_errors++;
- }
- if (status & SONIC_RCR_LPKT) {
- /*
- * this was the last packet out of the current receive buffer
- * give the buffer back to the SONIC
+ /* If RBE is already asserted when RWP advances then
+ * it's safe to clear RBE after processing this packet.
*/
- lp->cur_rwp += SIZEOF_SONIC_RR * SONIC_BUS_SCALE(lp->dma_bitmode);
- if (lp->cur_rwp >= lp->rra_end) lp->cur_rwp = lp->rra_laddr & 0xffff;
- SONIC_WRITE(SONIC_RWP, lp->cur_rwp);
- if (SONIC_READ(SONIC_ISR) & SONIC_INT_RBE) {
- netif_dbg(lp, rx_err, dev, "%s: rx buffer exhausted\n",
- __func__);
- SONIC_WRITE(SONIC_ISR, SONIC_INT_RBE); /* clear the flag */
- }
- } else
- printk(KERN_ERR "%s: rx desc without RCR_LPKT. Shouldn't happen !?\n",
- dev->name);
+ rbe = rbe || SONIC_READ(SONIC_ISR) & SONIC_INT_RBE;
+ sonic_update_rra(dev, lp, addr, new_laddr);
+ }
/*
* give back the descriptor
*/
- sonic_rda_put(dev, entry, SONIC_RD_LINK,
- sonic_rda_get(dev, entry, SONIC_RD_LINK) | SONIC_EOL);
+ sonic_rda_put(dev, entry, SONIC_RD_STATUS, 0);
sonic_rda_put(dev, entry, SONIC_RD_IN_USE, 1);
- sonic_rda_put(dev, lp->eol_rx, SONIC_RD_LINK,
- sonic_rda_get(dev, lp->eol_rx, SONIC_RD_LINK) & ~SONIC_EOL);
- lp->eol_rx = entry;
- lp->cur_rx = entry = (entry + 1) & SONIC_RDS_MASK;
+
+ prev_entry = entry;
+ entry = (entry + 1) & SONIC_RDS_MASK;
+ }
+
+ lp->cur_rx = entry;
+
+ if (prev_entry != lp->eol_rx) {
+ /* Advance the EOL flag to put descriptors back into service */
+ sonic_rda_put(dev, prev_entry, SONIC_RD_LINK, SONIC_EOL |
+ sonic_rda_get(dev, prev_entry, SONIC_RD_LINK));
+ sonic_rda_put(dev, lp->eol_rx, SONIC_RD_LINK, ~SONIC_EOL &
+ sonic_rda_get(dev, lp->eol_rx, SONIC_RD_LINK));
+ lp->eol_rx = prev_entry;
}
+
+ if (rbe)
+ SONIC_WRITE(SONIC_ISR, SONIC_INT_RBE);
/*
* If any worth-while packets have been received, netif_rx()
* has done a mark_bh(NET_BH) for us and will work on them
@@ -550,6 +643,8 @@ static void sonic_multicast_list(struct net_device *dev)
(netdev_mc_count(dev) > 15)) {
rcr |= SONIC_RCR_AMC;
} else {
+ unsigned long flags;
+
netif_dbg(lp, ifup, dev, "%s: mc_count %d\n", __func__,
netdev_mc_count(dev));
sonic_set_cam_enable(dev, 1); /* always enable our own address */
@@ -563,9 +658,14 @@ static void sonic_multicast_list(struct net_device *dev)
i++;
}
SONIC_WRITE(SONIC_CDC, 16);
- /* issue Load CAM command */
SONIC_WRITE(SONIC_CDP, lp->cda_laddr & 0xffff);
+
+ /* LCAM and TXP commands can't be used simultaneously */
+ spin_lock_irqsave(&lp->lock, flags);
+ sonic_quiesce(dev, SONIC_CR_TXP);
SONIC_WRITE(SONIC_CMD, SONIC_CR_LCAM);
+ sonic_quiesce(dev, SONIC_CR_LCAM);
+ spin_unlock_irqrestore(&lp->lock, flags);
}
}
@@ -580,7 +680,6 @@ static void sonic_multicast_list(struct net_device *dev)
*/
static int sonic_init(struct net_device *dev)
{
- unsigned int cmd;
struct sonic_local *lp = netdev_priv(dev);
int i;
@@ -592,12 +691,16 @@ static int sonic_init(struct net_device *dev)
SONIC_WRITE(SONIC_ISR, 0x7fff);
SONIC_WRITE(SONIC_CMD, SONIC_CR_RST);
+ /* While in reset mode, clear CAM Enable register */
+ SONIC_WRITE(SONIC_CE, 0);
+
/*
* clear software reset flag, disable receiver, clear and
* enable interrupts, then completely initialize the SONIC
*/
SONIC_WRITE(SONIC_CMD, 0);
- SONIC_WRITE(SONIC_CMD, SONIC_CR_RXDIS);
+ SONIC_WRITE(SONIC_CMD, SONIC_CR_RXDIS | SONIC_CR_STP);
+ sonic_quiesce(dev, SONIC_CR_ALL);
/*
* initialize the receive resource area
@@ -615,15 +718,10 @@ static int sonic_init(struct net_device *dev)
}
/* initialize all RRA registers */
- lp->rra_end = (lp->rra_laddr + SONIC_NUM_RRS * SIZEOF_SONIC_RR *
- SONIC_BUS_SCALE(lp->dma_bitmode)) & 0xffff;
- lp->cur_rwp = (lp->rra_laddr + (SONIC_NUM_RRS - 1) * SIZEOF_SONIC_RR *
- SONIC_BUS_SCALE(lp->dma_bitmode)) & 0xffff;
-
- SONIC_WRITE(SONIC_RSA, lp->rra_laddr & 0xffff);
- SONIC_WRITE(SONIC_REA, lp->rra_end);
- SONIC_WRITE(SONIC_RRP, lp->rra_laddr & 0xffff);
- SONIC_WRITE(SONIC_RWP, lp->cur_rwp);
+ SONIC_WRITE(SONIC_RSA, sonic_rr_addr(dev, 0));
+ SONIC_WRITE(SONIC_REA, sonic_rr_addr(dev, SONIC_NUM_RRS));
+ SONIC_WRITE(SONIC_RRP, sonic_rr_addr(dev, 0));
+ SONIC_WRITE(SONIC_RWP, sonic_rr_addr(dev, SONIC_NUM_RRS - 1));
SONIC_WRITE(SONIC_URRA, lp->rra_laddr >> 16);
SONIC_WRITE(SONIC_EOBC, (SONIC_RBSIZE >> 1) - (lp->dma_bitmode ? 2 : 1));
@@ -631,14 +729,7 @@ static int sonic_init(struct net_device *dev)
netif_dbg(lp, ifup, dev, "%s: issuing RRRA command\n", __func__);
SONIC_WRITE(SONIC_CMD, SONIC_CR_RRRA);
- i = 0;
- while (i++ < 100) {
- if (SONIC_READ(SONIC_CMD) & SONIC_CR_RRRA)
- break;
- }
-
- netif_dbg(lp, ifup, dev, "%s: status=%x, i=%d\n", __func__,
- SONIC_READ(SONIC_CMD), i);
+ sonic_quiesce(dev, SONIC_CR_RRRA);
/*
* Initialize the receive descriptors so that they
@@ -713,28 +804,17 @@ static int sonic_init(struct net_device *dev)
* load the CAM
*/
SONIC_WRITE(SONIC_CMD, SONIC_CR_LCAM);
-
- i = 0;
- while (i++ < 100) {
- if (SONIC_READ(SONIC_ISR) & SONIC_INT_LCD)
- break;
- }
- netif_dbg(lp, ifup, dev, "%s: CMD=%x, ISR=%x, i=%d\n", __func__,
- SONIC_READ(SONIC_CMD), SONIC_READ(SONIC_ISR), i);
+ sonic_quiesce(dev, SONIC_CR_LCAM);
/*
* enable receiver, disable loopback
* and enable all interrupts
*/
- SONIC_WRITE(SONIC_CMD, SONIC_CR_RXEN | SONIC_CR_STP);
SONIC_WRITE(SONIC_RCR, SONIC_RCR_DEFAULT);
SONIC_WRITE(SONIC_TCR, SONIC_TCR_DEFAULT);
SONIC_WRITE(SONIC_ISR, 0x7fff);
SONIC_WRITE(SONIC_IMR, SONIC_IMR_DEFAULT);
-
- cmd = SONIC_READ(SONIC_CMD);
- if ((cmd & SONIC_CR_RXEN) == 0 || (cmd & SONIC_CR_STP) == 0)
- printk(KERN_ERR "sonic_init: failed, status=%x\n", cmd);
+ SONIC_WRITE(SONIC_CMD, SONIC_CR_RXEN);
netif_dbg(lp, ifup, dev, "%s: new status=%x\n", __func__,
SONIC_READ(SONIC_CMD));