#define ETH_SWITCH_HEADER_LEN 2
+static int ag71xx_tx_packets(struct ag71xx *ag, bool flush);
+
static inline unsigned int ag71xx_max_frame_len(unsigned int mtu)
{
return ETH_SWITCH_HEADER_LEN + ETH_HLEN + VLAN_HLEN + mtu + ETH_FCS_LEN;
static int ag71xx_ring_alloc(struct ag71xx_ring *ring)
{
int err;
- int i;
ring->desc_size = sizeof(struct ag71xx_desc);
if (ring->desc_size % cache_line_size()) {
goto err;
}
- for (i = 0; i < ring->size; i++) {
- int idx = i * ring->desc_size;
- ring->buf[i].desc = (struct ag71xx_desc *)&ring->descs_cpu[idx];
- DBG("ag71xx: ring %p, desc %d at %p\n",
- ring, i, ring->buf[i].desc);
- }
-
return 0;
err:
u32 bytes_compl = 0, pkts_compl = 0;
while (ring->curr != ring->dirty) {
+ struct ag71xx_desc *desc;
u32 i = ring->dirty % ring->size;
- if (!ag71xx_desc_empty(ring->buf[i].desc)) {
- ring->buf[i].desc->ctrl = 0;
+ desc = ag71xx_ring_desc(ring, i);
+ if (!ag71xx_desc_empty(desc)) {
+ desc->ctrl = 0;
dev->stats.tx_errors++;
}
int i;
for (i = 0; i < ring->size; i++) {
- ring->buf[i].desc->next = (u32) (ring->descs_dma +
+ struct ag71xx_desc *desc = ag71xx_ring_desc(ring, i);
+
+ desc->next = (u32) (ring->descs_dma +
ring->desc_size * ((i + 1) % ring->size));
- ring->buf[i].desc->ctrl = DESC_EMPTY;
+ desc->ctrl = DESC_EMPTY;
ring->buf[i].skb = NULL;
}
static bool ag71xx_fill_rx_buf(struct ag71xx *ag, struct ag71xx_buf *buf,
int offset)
{
+ struct ag71xx_ring *ring = &ag->rx_ring;
+ struct ag71xx_desc *desc = ag71xx_ring_desc(ring, buf - &ring->buf[0]);
void *data;
data = kmalloc(ag->rx_buf_size +
buf->rx_buf = data;
buf->dma_addr = dma_map_single(&ag->dev->dev, data, ag->rx_buf_size,
DMA_FROM_DEVICE);
- buf->desc->data = (u32) buf->dma_addr + offset;
+ desc->data = (u32) buf->dma_addr + offset;
return true;
}
ret = 0;
for (i = 0; i < ring->size; i++) {
- ring->buf[i].desc->next = (u32) (ring->descs_dma +
+ struct ag71xx_desc *desc = ag71xx_ring_desc(ring, i);
+
+ desc->next = (u32) (ring->descs_dma +
ring->desc_size * ((i + 1) % ring->size));
DBG("ag71xx: RX desc at %p, next is %08x\n",
- ring->buf[i].desc,
- ring->buf[i].desc->next);
+ desc, desc->next);
}
for (i = 0; i < ring->size; i++) {
+ struct ag71xx_desc *desc = ag71xx_ring_desc(ring, i);
+
if (!ag71xx_fill_rx_buf(ag, &ring->buf[i], offset)) {
ret = -ENOMEM;
break;
}
- ring->buf[i].desc->ctrl = DESC_EMPTY;
+ desc->ctrl = DESC_EMPTY;
}
/* flush descriptors */
count = 0;
for (; ring->curr - ring->dirty > 0; ring->dirty++) {
+ struct ag71xx_desc *desc;
unsigned int i;
i = ring->dirty % ring->size;
+ desc = ag71xx_ring_desc(ring, i);
if (!ring->buf[i].rx_buf &&
!ag71xx_fill_rx_buf(ag, &ring->buf[i], offset))
break;
- ring->buf[i].desc->ctrl = DESC_EMPTY;
+ desc->ctrl = DESC_EMPTY;
count++;
}
reset_mask &= ~(AR71XX_RESET_GE0_PHY | AR71XX_RESET_GE1_PHY);
ath79_device_reset_set(reset_phy);
- mdelay(50);
+ msleep(50);
ath79_device_reset_clear(reset_phy);
- mdelay(200);
+ msleep(200);
}
ag71xx_sb(ag, AG71XX_REG_MAC_CFG1, MAC_CFG1_SR);
udelay(20);
ath79_device_reset_set(reset_mask);
- mdelay(100);
+ msleep(100);
ath79_device_reset_clear(reset_mask);
- mdelay(200);
+ msleep(200);
ag71xx_hw_setup(ag);
ag71xx_dma_reset(ag);
ag71xx_hw_setup(ag);
+ ag71xx_tx_packets(ag, true);
/* setup max frame length */
ag71xx_wr(ag, AG71XX_REG_MAC_MFL,
/* enable interrupts */
ag71xx_wr(ag, AG71XX_REG_INT_ENABLE, AG71XX_INT_INIT);
+
+ netif_wake_queue(ag->dev);
}
void ag71xx_link_adjust(struct ag71xx *ag)
unsigned int cur_len = len;
i = (ring->curr + ndesc) % ring->size;
- desc = ring->buf[i].desc;
+ desc = ag71xx_ring_desc(ring, i);
if (!ag71xx_desc_empty(desc))
return -1;
DMA_TO_DEVICE);
i = ring->curr % ring->size;
- desc = ring->buf[i].desc;
+ desc = ag71xx_ring_desc(ring, i);
/* setup descriptor fields */
n = ag71xx_fill_dma_desc(ring, (u32) dma_addr, skb->len & ag->desc_pktlen_mask);
return false;
}
-static int ag71xx_tx_packets(struct ag71xx *ag)
+static int ag71xx_tx_packets(struct ag71xx *ag, bool flush)
{
struct ag71xx_ring *ring = &ag->tx_ring;
struct ag71xx_platform_data *pdata = ag71xx_get_pdata(ag);
while (ring->dirty + n != ring->curr) {
unsigned int i = (ring->dirty + n) % ring->size;
- struct ag71xx_desc *desc = ring->buf[i].desc;
+ struct ag71xx_desc *desc = ag71xx_ring_desc(ring, i);
struct sk_buff *skb = ring->buf[i].skb;
- if (!ag71xx_desc_empty(desc)) {
+ if (!flush && !ag71xx_desc_empty(desc)) {
if (pdata->is_ar7240 &&
ag71xx_check_dma_stuck(ag, ring->buf[i].timestamp))
schedule_work(&ag->restart_work);
while (done < limit) {
unsigned int i = ring->curr % ring->size;
- struct ag71xx_desc *desc = ring->buf[i].desc;
+ struct ag71xx_desc *desc = ag71xx_ring_desc(ring, i);
struct sk_buff *skb;
int pktlen;
int err = 0;
int rx_done;
pdata->ddr_flush();
- tx_done = ag71xx_tx_packets(ag);
+ tx_done = ag71xx_tx_packets(ag, false);
DBG("%s: processing RX ring\n", dev->name);
rx_done = ag71xx_rx_packets(ag, limit);
more:
DBG("%s: stay in polling mode, rx=%d, tx=%d, limit=%d\n",
dev->name, rx_done, tx_done, limit);
- return rx_done;
+ return limit;
oom:
if (netif_msg_rx_err(ag))
dev->irq = platform_get_irq(pdev, 0);
err = request_irq(dev->irq, ag71xx_interrupt,
- IRQF_DISABLED,
+ 0x0,
dev->name, dev);
if (err) {
dev_err(&pdev->dev, "unable to request IRQ %d\n", dev->irq);