#include <asm/unaligned.h>
#include <asm/sizes.h>
-#include <mach/hardware.h>
/* Hardware registers */
#define MAC_BASE_ADDR ((priv->mac_base))
#define CTRL_REG (MAC_BASE_ADDR)
-#define MII_BUSY 0x00000001
-#define MII_WRITE 0x00000002
+#define MII_BUSY (1 << 0)
+#define MII_WRITE (1 << 1)
+#define RX_ENABLE (1 << 2)
+#define TX_ENABLE (1 << 3)
+#define DEFER_CHECK (1 << 5)
+#define STRIP_PAD (1 << 8)
+#define DRTRY_DISABLE (1 << 10)
+#define FULL_DUPLEX (1 << 20)
+#define HBD_DISABLE (1 << 28)
#define MAC_ADDR_HIGH_REG (MAC_BASE_ADDR + 0x04)
#define MAC_ADDR_LOW_REG (MAC_BASE_ADDR + 0x08)
#define MII_ADDR_REG (MAC_BASE_ADDR + 0x14)
+#define MII_ADDR_SHIFT (11)
+#define MII_REG_SHIFT (6)
#define MII_DATA_REG (MAC_BASE_ADDR + 0x18)
/* Link interrupt registers */
#define LINK_INT_CSR (MAC_BASE_ADDR + 0xD0)
+#define LINK_INT_EN (1 << 0)
+#define LINK_PHY_ADDR_SHIFT (1)
+#define LINK_PHY_REG_SHIFT (6)
+#define LINK_BIT_UP_SHIFT (11)
+#define LINK_UP (1 << 16)
#define LINK_INT_POLL_TIME (MAC_BASE_ADDR + 0xD4)
+#define LINK_POLL_MASK ((1 << 20) - 1)
#define DMA_CHAN_WIDTH 32
#define DMA_RX_CHAN 0
#define RX_MAX_BYTES (RX_DMA_BASE + 0x04)
#define RX_ACT_BYTES (RX_DMA_BASE + 0x08)
#define RX_START_DMA (RX_DMA_BASE + 0x0C)
+#define RX_DMA_ENABLE (1 << 0)
+#define RX_DMA_RESET (1 << 1)
+#define RX_DMA_STATUS_FIFO (1 << 12)
#define RX_DMA_ENH (RX_DMA_BASE + 0x14)
+#define RX_DMA_INT_ENABLE (1 << 1)
/* Transmit DMA registers */
#define TX_DMA_BASE ((priv->dma_base) + \
#define TX_PKT_BYTES (TX_DMA_BASE + 0x04)
#define TX_BYTES_SENT (TX_DMA_BASE + 0x08)
#define TX_START_DMA (TX_DMA_BASE + 0x0C)
+#define TX_DMA_ENABLE (1 << 0)
+#define TX_DMA_START_FRAME (1 << 2)
+#define TX_DMA_END_FRAME (1 << 3)
+#define TX_DMA_PAD_DISABLE (1 << 8)
+#define TX_DMA_CRC_DISABLE (1 << 9)
+#define TX_DMA_FIFO_FULL (1 << 16)
+#define TX_DMA_FIFO_EMPTY (1 << 17)
+#define TX_DMA_STATUS_AVAIL (1 << 18)
+#define TX_DMA_RESET (1 << 24)
#define TX_DMA_STATUS (TX_DMA_BASE + 0x10)
#define TX_DMA_ENH (TX_DMA_BASE + 0x14)
+#define TX_DMA_ENH_ENABLE (1 << 0)
+#define TX_DMA_INT_FIFO (1 << 1)
#define RX_ALLOC_SIZE SZ_2K
#define MAX_ETH_FRAME_SIZE 1536
/* Transmit buffers */
struct sk_buff *tx_skb[TX_RING_SIZE];
+ dma_addr_t tx_addr;
unsigned int valid_txskb[TX_RING_SIZE];
unsigned int cur_tx;
unsigned int dma_tx;
/* Receive buffers */
struct sk_buff *rx_skb[RX_RING_SIZE];
+ dma_addr_t rx_addr;
unsigned int irq_rxskb[RX_RING_SIZE];
int pkt_len[RX_RING_SIZE];
unsigned int cur_rx;
struct phy_device *phydev;
int old_link;
int old_duplex;
+ u32 msg_level;
+ unsigned int buffer_shifting_len;
};
-void dcache_invalidate_only(unsigned long start, unsigned long end)
-{
- asm("\n"
- " bic r0, r0, #31\n"
- "1: mcr p15, 0, r0, c7, c6, 1\n"
- " add r0, r0, #32\n"
- " cmp r0, r1\n" " blo 1b\n");
-}
-
-void dcache_clean_range(unsigned long start, unsigned long end)
-{
- asm("\n"
- " bic r0, r0, #31\n"
- "1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry\n"
- " add r0, r0, #32\n"
- " cmp r0, r1\n"
- " blo 1b\n" \
- " mcr p15, 0, r0, c7, c10, 4 @ drain WB\n");
-}
-
static inline int nuport_mac_mii_busy_wait(struct nuport_mac_priv *priv)
{
unsigned long curr;
if (ret)
return ret;
- val |= (mii_id << 11) | (regnum << 6) | MII_BUSY;
+ val |= (mii_id << MII_ADDR_SHIFT) | (regnum << MII_REG_SHIFT) | MII_BUSY;
nuport_mac_writel(val, MII_ADDR_REG);
ret = nuport_mac_mii_busy_wait(priv);
if (ret)
if (ret)
return ret;
- val |= (mii_id << 11) | (regnum << 6) | MII_BUSY | MII_WRITE;
+ val |= (mii_id << MII_ADDR_SHIFT) | (regnum << MII_REG_SHIFT);
+ val |= MII_BUSY | MII_WRITE;
nuport_mac_writel(value, MII_DATA_REG);
nuport_mac_writel(val, MII_ADDR_REG);
static int nuport_mac_start_tx_dma(struct nuport_mac_priv *priv,
struct sk_buff *skb)
{
- dma_addr_t p;
u32 reg;
unsigned int timeout = 2048;
while (timeout--) {
reg = nuport_mac_readl(TX_START_DMA);
- if (!(reg & 0x01))
+ if (!(reg & TX_DMA_ENABLE)) {
+ netdev_dbg(priv->dev, "dma ready\n");
break;
+ }
cpu_relax();
}
if (!timeout)
return -EBUSY;
- p = dma_map_single(&priv->pdev->dev, skb->data,
+ priv->tx_addr = dma_map_single(&priv->pdev->dev, skb->data,
skb->len, DMA_TO_DEVICE);
+ if (dma_mapping_error(&priv->pdev->dev, priv->tx_addr))
+ return -ENOMEM;
/* enable enhanced mode */
- nuport_mac_writel(0x03, TX_DMA_ENH);
- nuport_mac_writel(p, TX_BUFFER_ADDR);
+ nuport_mac_writel(TX_DMA_ENH_ENABLE, TX_DMA_ENH);
+ nuport_mac_writel(priv->tx_addr, TX_BUFFER_ADDR);
nuport_mac_writel((skb->len) - 1, TX_PKT_BYTES);
wmb();
- nuport_mac_writel(0x0D, TX_START_DMA);
+ reg = TX_DMA_ENABLE | TX_DMA_START_FRAME | TX_DMA_END_FRAME;
+ nuport_mac_writel(reg, TX_START_DMA);
return 0;
}
u32 reg;
reg = nuport_mac_readl(TX_START_DMA);
- reg |= (1 << 24);
+ reg |= TX_DMA_RESET;
nuport_mac_writel(reg, TX_START_DMA);
}
-static void nuport_mac_start_rx_dma(struct nuport_mac_priv *priv,
+static int nuport_mac_start_rx_dma(struct nuport_mac_priv *priv,
struct sk_buff *skb)
{
- dma_addr_t p;
+ u32 reg;
+ unsigned int timeout = 2048;
- p = dma_map_single(&priv->pdev->dev, skb->data,
+ while (timeout--) {
+ reg = nuport_mac_readl(RX_START_DMA);
+ if (!(reg & RX_DMA_ENABLE)) {
+ netdev_dbg(priv->dev, "dma ready\n");
+ break;
+ }
+ cpu_relax();
+ }
+
+ if (!timeout)
+ return -EBUSY;
+
+ priv->rx_addr = dma_map_single(&priv->pdev->dev, skb->data,
RX_ALLOC_SIZE, DMA_FROM_DEVICE);
+ if (dma_mapping_error(&priv->pdev->dev, priv->rx_addr))
+ return -ENOMEM;
- nuport_mac_writel(p, RX_BUFFER_ADDR);
+ nuport_mac_writel(priv->rx_addr, RX_BUFFER_ADDR);
wmb();
- nuport_mac_writel(0x01, RX_START_DMA);
+ nuport_mac_writel(RX_DMA_ENABLE, RX_START_DMA);
+
+ return 0;
}
static void nuport_mac_reset_rx_dma(struct nuport_mac_priv *priv)
u32 reg;
reg = nuport_mac_readl(RX_START_DMA);
- reg |= (1 << 1);
+ reg |= RX_DMA_RESET;
nuport_mac_writel(reg, RX_START_DMA);
}
u32 reg;
reg = nuport_mac_readl(RX_DMA_ENH);
- reg &= ~(1 << 1);
+ reg &= ~RX_DMA_INT_ENABLE;
nuport_mac_writel(reg, RX_DMA_ENH);
}
u32 reg;
reg = nuport_mac_readl(RX_DMA_ENH);
- reg |= (1 << 1);
+ reg |= RX_DMA_INT_ENABLE;
nuport_mac_writel(reg, RX_DMA_ENH);
}
struct nuport_mac_priv *priv = netdev_priv(dev);
int ret;
- dcache_clean_range((u32) skb->data, (u32)(skb->data + skb->len));
+ if (netif_queue_stopped(dev)) {
+ netdev_warn(dev, "netif queue was stopped, restarting\n");
+ netif_start_queue(dev);
+ }
+
spin_lock_irqsave(&priv->lock, flags);
if (priv->first_pkt) {
ret = nuport_mac_start_tx_dma(priv, skb);
if (ret) {
+ netif_stop_queue(dev);
spin_unlock_irqrestore(&priv->lock, flags);
netdev_err(dev, "transmit path busy\n");
return NETDEV_TX_BUSY;
if (priv->valid_txskb[priv->cur_tx]) {
priv->tx_full = 1;
+ netdev_err(dev, "stopping queue\n");
netif_stop_queue(dev);
}
if (phydev->link & (priv->old_duplex != phydev->duplex)) {
reg = nuport_mac_readl(CTRL_REG);
if (phydev->duplex == DUPLEX_FULL)
- reg |= (1 << 20);
+ reg |= DUPLEX_FULL;
else
- reg &= ~(1 << 20);
+ reg &= ~DUPLEX_FULL;
nuport_mac_writel(reg, CTRL_REG);
status_changed = 1;
struct nuport_mac_priv *priv = netdev_priv(dev);
u32 reg;
u8 phy_addr;
+ unsigned long flags;
+ irqreturn_t ret = IRQ_HANDLED;
+ spin_lock_irqsave(&priv->lock, flags);
reg = nuport_mac_readl(LINK_INT_CSR);
- phy_addr = (reg >> 1) & 0x0f;
+ phy_addr = (reg >> LINK_PHY_ADDR_SHIFT) & (PHY_MAX_ADDR - 1);
if (phy_addr != priv->phydev->addr) {
netdev_err(dev, "spurious PHY irq (phy: %d)\n", phy_addr);
- return IRQ_NONE;
+ ret = IRQ_NONE;
+ goto out;
}
- priv->phydev->link = (reg & (1 << 16));
+ priv->phydev->link = (reg & LINK_UP);
nuport_mac_adjust_link(dev);
- return IRQ_HANDLED;
+out:
+ spin_unlock_irqrestore(&priv->lock, flags);
+ return ret;
}
static irqreturn_t nuport_mac_tx_interrupt(int irq, void *dev_id)
struct sk_buff *skb;
unsigned long flags;
int ret;
+ u32 reg;
spin_lock_irqsave(&priv->lock, flags);
+ /* clear status word available if ready */
+ reg = nuport_mac_readl(TX_START_DMA);
+ if (reg & TX_DMA_STATUS_AVAIL) {
+ nuport_mac_writel(reg, TX_START_DMA);
+ reg = nuport_mac_readl(TX_DMA_STATUS);
+
+ if (reg & 1)
+ dev->stats.tx_errors++;
+ } else
+ netdev_dbg(dev, "no status word: %08x\n", reg);
+
skb = priv->tx_skb[priv->dma_tx];
priv->tx_skb[priv->dma_tx] = NULL;
priv->valid_txskb[priv->dma_tx] = 0;
+ dma_unmap_single(&priv->pdev->dev, priv->rx_addr, skb->len,
+ DMA_TO_DEVICE);
dev_kfree_skb_irq(skb);
priv->dma_tx++;
}
if (priv->tx_full) {
- netdev_err(dev, "restarting transmit queue\n");
+ netdev_dbg(dev, "restarting transmit queue\n");
netif_wake_queue(dev);
priv->tx_full = 0;
}
unsigned int i;
for (i = 0; i < RX_RING_SIZE; i++)
- if (priv->irq_rxskb[i])
+ if (priv->rx_skb[i])
return 1;
return 0;
struct net_device *dev = (struct net_device *)dev_id;
struct nuport_mac_priv *priv = netdev_priv(dev);
unsigned long flags;
+ int ret;
spin_lock_irqsave(&priv->lock, flags);
- priv->pkt_len[priv->dma_rx] = nuport_mac_readl(RX_ACT_BYTES) - 4;
- priv->irq_rxskb[priv->dma_rx] = 0;
- priv->dma_rx++;
-
- if (priv->dma_rx >= RX_RING_SIZE)
- priv->dma_rx = 0;
-
- if (priv->irq_rxskb[priv->dma_rx] == 1)
- nuport_mac_start_rx_dma(priv, priv->rx_skb[priv->dma_rx]);
+ if (!priv->rx_full) {
+ priv->pkt_len[priv->dma_rx] = nuport_mac_readl(RX_ACT_BYTES) - 4;
+ priv->irq_rxskb[priv->dma_rx] = 0;
+ priv->dma_rx++;
+
+ if (priv->dma_rx >= RX_RING_SIZE)
+ priv->dma_rx = 0;
+ } else
+ priv->rx_full = 0;
+
+ if (priv->irq_rxskb[priv->dma_rx] == 1) {
+ ret = nuport_mac_start_rx_dma(priv, priv->rx_skb[priv->dma_rx]);
+ if (ret)
+ netdev_err(dev, "failed to start rx dma\n");
+ } else {
+ priv->rx_full = 1;
+ netdev_dbg(dev, "RX ring full\n");
+ }
if (likely(nuport_mac_has_work(priv))) {
/* find a way to disable DMA rx irq */
nuport_mac_disable_rx_dma(priv);
napi_schedule(&priv->napi);
}
-
spin_unlock_irqrestore(&priv->lock, flags);
return IRQ_HANDLED;
while (count < limit && !priv->irq_rxskb[priv->cur_rx]) {
skb = priv->rx_skb[priv->cur_rx];
len = priv->pkt_len[priv->cur_rx];
- dcache_invalidate_only(((u32) skb->data),
- ((u32) (skb->data + len + 4)));
/* Remove 2 bytes added by RX buffer shifting */
- len = len - 2;
- skb->data = skb->data + 2;
+ len = len - priv->buffer_shifting_len;
+ skb->data = skb->data + priv->buffer_shifting_len;
/* Get packet status */
status = get_unaligned((u32 *) (skb->data + len));
skb->dev = dev;
+ dma_unmap_single(&priv->pdev->dev, priv->rx_addr, skb->len,
+ DMA_FROM_DEVICE);
+
/* packet filter failed */
if (!(status & (1 << 30))) {
dev_kfree_skb_irq(skb);
skb->ip_summed = CHECKSUM_UNNECESSARY;
/* Pass the received packet to network layer */
- netif_receive_skb(skb);
-
+ status = netif_receive_skb(skb);
if (status != NET_RX_DROP)
dev->stats.rx_bytes += len - 4; /* Without CRC */
else
dev_kfree_skb(priv->rx_skb[i]);
priv->rx_skb[i] = NULL;
}
+
+ if (priv->rx_addr)
+ dma_unmap_single(&priv->pdev->dev, priv->rx_addr, RX_ALLOC_SIZE,
+ DMA_TO_DEVICE);
}
static void nuport_mac_read_mac_address(struct net_device *dev)
int ret;
struct nuport_mac_priv *priv = netdev_priv(dev);
unsigned long flags;
- u32 reg;
- u8 tmp;
-
- /* Enable hardware filters */
- reg = nuport_mac_readl((void __iomem *)_CONFADDR_DBGLED);
- reg |= 0x80;
- nuport_mac_writel(reg, (void __iomem *)_CONFADDR_DBGLED);
-
- /* Set LEDs to Link act and RX/TX mode */
- reg = nuport_mac_readl((void __iomem *)(_CONFADDR_SYSDBG + 0x04));
- reg |= 0x01;
- nuport_mac_writel(reg, (void __iomem *)(_CONFADDR_SYSDBG + 0x04));
+ u32 reg = 0;
ret = clk_enable(priv->emac_clk);
if (ret) {
}
/* Set MAC into full duplex mode by default */
- nuport_mac_writel(0x1010052C, CTRL_REG);
+ reg |= RX_ENABLE | TX_ENABLE;
+ reg |= DEFER_CHECK | STRIP_PAD | DRTRY_DISABLE;
+ reg |= FULL_DUPLEX | HBD_DISABLE;
+ nuport_mac_writel(reg, CTRL_REG);
/* set mac address in hardware in case it was not already */
nuport_mac_change_mac_address(dev, dev->dev_addr);
goto out_emac_clk;
}
- phy_start(priv->phydev);
-
- /* Enable link interrupt monitoring */
- spin_lock_irqsave(&priv->lock, flags);
- nuport_mac_writel(0x1041 | (priv->phydev->addr << 1), LINK_INT_CSR);
- nuport_mac_writel(0xFFFFF, LINK_INT_POLL_TIME);
- spin_unlock_irqrestore(&priv->lock, flags);
-
ret = request_irq(priv->tx_irq, &nuport_mac_tx_interrupt,
0, dev->name, dev);
if (ret) {
goto out_link_irq;
}
- napi_enable(&priv->napi);
+ /* Enable link interrupt monitoring for our PHY address */
+ reg = LINK_INT_EN | (priv->phydev->addr << LINK_PHY_ADDR_SHIFT);
+ /* MII_BMSR register to be watched */
+ reg |= (1 << LINK_PHY_REG_SHIFT);
+ /* BMSR_STATUS to be watched in particular */
+ reg |= (2 << LINK_BIT_UP_SHIFT);
+
+ spin_lock_irqsave(&priv->lock, flags);
+ nuport_mac_writel(reg, LINK_INT_CSR);
+ nuport_mac_writel(LINK_POLL_MASK, LINK_INT_POLL_TIME);
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ phy_start(priv->phydev);
ret = request_irq(priv->rx_irq, &nuport_mac_rx_interrupt,
0, dev->name, dev);
goto out_tx_irq;
}
- /* Enable buffer shifting in RX */
- tmp = nuport_mac_readb((void __iomem *)(_CONFADDR_SYSDBG + 0x1D));
- tmp |= 0x01;
- nuport_mac_writeb(tmp, (void __iomem *)(_CONFADDR_SYSDBG + 0x1D));
-
netif_start_queue(dev);
nuport_mac_init_tx_ring(priv);
nuport_mac_reset_rx_dma(priv);
/* Start RX DMA */
- nuport_mac_start_rx_dma(priv, priv->rx_skb[0]);
+ spin_lock_irqsave(&priv->lock, flags);
+ ret = nuport_mac_start_rx_dma(priv, priv->rx_skb[0]);
+ spin_unlock_irqrestore(&priv->lock, flags);
- return 0;
+ napi_enable(&priv->napi);
+
+ return ret;
out_rx_skb:
nuport_mac_free_rx_ring(priv);
static int nuport_mac_close(struct net_device *dev)
{
+ u32 reg;
struct nuport_mac_priv *priv = netdev_priv(dev);
spin_lock_irq(&priv->lock);
+ reg = nuport_mac_readl(CTRL_REG);
+ reg &= ~(RX_ENABLE | TX_ENABLE);
+ nuport_mac_writel(reg, CTRL_REG);
+
napi_disable(&priv->napi);
netif_stop_queue(dev);
free_irq(priv->link_irq, dev);
- nuport_mac_writel(0x00, LINK_INT_CSR);
- nuport_mac_writel(0x00, LINK_INT_POLL_TIME);
+ /* disable PHY polling */
+ nuport_mac_writel(0, LINK_INT_CSR);
+ nuport_mac_writel(0, LINK_INT_POLL_TIME);
phy_stop(priv->phydev);
free_irq(priv->tx_irq, dev);
return 0;
}
+static void nuport_mac_tx_timeout(struct net_device *dev)
+{
+ struct nuport_mac_priv *priv = netdev_priv(dev);
+ unsigned int i;
+
+ netdev_warn(dev, "transmit timeout, attempting recovery\n");
+
+ netdev_info(dev, "TX DMA regs\n");
+ for (i = 0; i < DMA_CHAN_WIDTH; i += 4)
+ netdev_info(dev, "[%02x]: 0x%08x\n", i, nuport_mac_readl(TX_DMA_BASE + i));
+ netdev_info(dev, "RX DMA regs\n");
+ for (i = 0; i < DMA_CHAN_WIDTH; i += 4)
+ netdev_info(dev, "[%02x]: 0x%08x\n", i, nuport_mac_readl(RX_DMA_BASE + i));
+
+ nuport_mac_init_tx_ring(priv);
+ nuport_mac_reset_tx_dma(priv);
+
+ netif_wake_queue(dev);
+}
+
static int nuport_mac_mii_probe(struct net_device *dev)
{
struct nuport_mac_priv *priv = netdev_priv(dev);
phydev->supported &= PHY_BASIC_FEATURES;
phydev->advertising = phydev->supported;
priv->phydev = phydev;
- priv->old_link = 0;
- priv->old_duplex = -1;
+ priv->old_link = 1;
+ priv->old_duplex = DUPLEX_FULL;
dev_info(&priv->pdev->dev, "attached PHY driver [%s] "
"(mii_bus:phy_addr=%d)\n",
return -EINVAL;
}
+static void nuport_mac_set_msglevel(struct net_device *dev, u32 msg_level)
+{
+ struct nuport_mac_priv *priv = netdev_priv(dev);
+
+ priv->msg_level = msg_level;
+}
+
+static u32 nuport_mac_get_msglevel(struct net_device *dev)
+{
+ struct nuport_mac_priv *priv = netdev_priv(dev);
+
+ return priv->msg_level;
+}
+
static const struct ethtool_ops nuport_mac_ethtool_ops = {
.get_drvinfo = nuport_mac_ethtool_drvinfo,
.get_link = ethtool_op_get_link,
.get_settings = nuport_mac_ethtool_get_settings,
.set_settings = nuport_mac_ethtool_set_settings,
+ .set_msglevel = nuport_mac_set_msglevel,
+ .get_msglevel = nuport_mac_get_msglevel,
};
static const struct net_device_ops nuport_mac_ops = {
.ndo_change_mtu = eth_change_mtu,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = nuport_mac_change_mac_address,
+ .ndo_tx_timeout = nuport_mac_tx_timeout,
};
static int __init nuport_mac_probe(struct platform_device *pdev)
int ret = 0;
int rx_irq, tx_irq, link_irq;
int i;
+ const unsigned int *intspec;
dev = alloc_etherdev(sizeof(struct nuport_mac_priv));
if (!dev) {
priv->dev = dev;
spin_lock_init(&priv->lock);
+ intspec = of_get_property(pdev->dev.of_node,
+ "nuport-mac,buffer-shifting", NULL);
+ if (!intspec)
+ priv->buffer_shifting_len = 0;
+ else
+ priv->buffer_shifting_len = 2;
+
priv->mac_base = devm_ioremap(&pdev->dev,
regs->start, resource_size(regs));
if (!priv->mac_base) {
priv->link_irq = link_irq;
priv->rx_irq = rx_irq;
priv->tx_irq = tx_irq;
+ priv->msg_level = NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK;
dev->netdev_ops = &nuport_mac_ops;
dev->ethtool_ops = &nuport_mac_ethtool_ops;
dev->watchdog_timeo = HZ;
dev->flags = IFF_BROADCAST; /* Supports Broadcast */
+ dev->tx_queue_len = TX_RING_SIZE / 2;
netif_napi_add(dev, &priv->napi, nuport_mac_poll, 64);