RAMIPS_GDMA1_MAC_ADRL);
}
+static struct sk_buff *
+ramips_alloc_skb(struct raeth_priv *re)
+{
+ struct sk_buff *skb;
+
+ skb = netdev_alloc_skb(re->netdev, MAX_RX_LENGTH + NET_IP_ALIGN);
+ if (!skb)
+ return NULL;
+
+ skb_reserve(skb, NET_IP_ALIGN);
+
+ return skb;
+}
+
+static void
+ramips_ring_setup(struct raeth_priv *re)
+{
+ int len;
+ int i;
+
+ len = NUM_TX_DESC * sizeof(struct ramips_tx_dma);
+ memset(re->tx, 0, len);
+
+ for (i = 0; i < NUM_TX_DESC; i++) {
+ struct ramips_tx_dma *txd;
+
+ txd = &re->tx[i];
+ txd->txd4 = TX_DMA_QN(3) | TX_DMA_PN(1);
+ txd->txd2 = TX_DMA_LSO | TX_DMA_DONE;
+
+ if (re->tx_skb[i] != NULL) {
+ netdev_warn(re->netdev,
+ "dirty skb for TX desc %d\n", i);
+ re->tx_skb[i] = NULL;
+ }
+ }
+
+ len = NUM_RX_DESC * sizeof(struct ramips_rx_dma);
+ memset(re->rx, 0, len);
+
+ for (i = 0; i < NUM_RX_DESC; i++) {
+ dma_addr_t dma_addr;
+
+ BUG_ON(re->rx_skb[i] == NULL);
+ dma_addr = dma_map_single(&re->netdev->dev, re->rx_skb[i]->data,
+ MAX_RX_LENGTH, DMA_FROM_DEVICE);
+ re->rx_dma[i] = dma_addr;
+ re->rx[i].rxd1 = (unsigned int) dma_addr;
+ re->rx[i].rxd2 = RX_DMA_LSO;
+ }
+
+ /* flush descriptors */
+ wmb();
+}
+
+static void
+ramips_ring_cleanup(struct raeth_priv *re)
+{
+ int i;
+
+ for (i = 0; i < NUM_RX_DESC; i++)
+ if (re->rx_skb[i])
+ dma_unmap_single(&re->netdev->dev, re->rx_dma[i],
+ MAX_RX_LENGTH, DMA_FROM_DEVICE);
+
+ for (i = 0; i < NUM_TX_DESC; i++)
+ if (re->tx_skb[i]) {
+ dev_kfree_skb_any(re->tx_skb[i]);
+ re->tx_skb[i] = NULL;
+ }
+}
+
#if defined(CONFIG_RALINK_RT288X) || defined(CONFIG_RALINK_RT3883)
#define RAMIPS_MDIO_RETRY 1000
-static void
-ramips_setup_mdio_cfg(struct raeth_priv *re)
+static unsigned char *ramips_speed_str(struct raeth_priv *re)
{
- unsigned int mdio_cfg;
+ switch (re->speed) {
+ case SPEED_1000:
+ return "1000";
+ case SPEED_100:
+ return "100";
+ case SPEED_10:
+ return "10";
+ }
+
+ return "?";
+}
+
+static void ramips_link_adjust(struct raeth_priv *re)
+{
+ struct ramips_eth_platform_data *pdata;
+ u32 mdio_cfg;
+
+ pdata = re->parent->platform_data;
+ if (!re->link) {
+ netif_carrier_off(re->netdev);
+ netdev_info(re->netdev, "link down\n");
+ return;
+ }
mdio_cfg = RAMIPS_MDIO_CFG_TX_CLK_SKEW_200 |
RAMIPS_MDIO_CFG_TX_CLK_SKEW_200 |
}
ramips_fe_wr(mdio_cfg, RAMIPS_MDIO_CFG);
+
+ netif_carrier_on(re->netdev);
+ netdev_info(re->netdev, "link up (%sMbps/%s duplex)\n",
+ ramips_speed_str(re),
+ (DUPLEX_FULL == re->duplex) ? "Full" : "Half");
}
+
static int
ramips_mdio_wait_ready(struct raeth_priv *re)
{
kfree(re->mii_bus);
}
-#else
-static inline void
-ramips_setup_mdio_cfg(struct raeth_priv *re)
+static void
+ramips_phy_link_adjust(struct net_device *dev)
+{
+ struct raeth_priv *re = netdev_priv(dev);
+ struct phy_device *phydev = re->phy_dev;
+ unsigned long flags;
+ int status_change = 0;
+
+ spin_lock_irqsave(&re->phy_lock, flags);
+
+ if (phydev->link)
+ if (re->duplex != phydev->duplex ||
+ re->speed != phydev->speed)
+ status_change = 1;
+
+ if (phydev->link != re->link)
+ status_change = 1;
+
+ re->link = phydev->link;
+ re->duplex = phydev->duplex;
+ re->speed = phydev->speed;
+
+ if (status_change)
+ ramips_link_adjust(re);
+
+ spin_unlock_irqrestore(&re->phy_lock, flags);
+}
+
+static int
+ramips_phy_connect_multi(struct raeth_priv *re)
{
+ struct net_device *netdev = re->netdev;
+ struct ramips_eth_platform_data *pdata;
+ struct phy_device *phydev = NULL;
+ int phy_addr;
+ int ret = 0;
+
+ pdata = re->parent->platform_data;
+ for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) {
+ if (!(pdata->phy_mask & (1 << phy_addr)))
+ continue;
+
+ if (re->mii_bus->phy_map[phy_addr] == NULL)
+ continue;
+
+ RADEBUG("%s: PHY found at %s, uid=%08x\n",
+ netdev->name,
+ dev_name(&re->mii_bus->phy_map[phy_addr]->dev),
+ re->mii_bus->phy_map[phy_addr]->phy_id);
+
+ if (phydev == NULL)
+ phydev = re->mii_bus->phy_map[phy_addr];
+ }
+
+ if (!phydev) {
+ netdev_err(netdev, "no PHY found with phy_mask=%08x\n",
+ pdata->phy_mask);
+ return -ENODEV;
+ }
+
+ re->phy_dev = phy_connect(netdev, dev_name(&phydev->dev),
+ ramips_phy_link_adjust, 0,
+ pdata->phy_if_mode);
+
+ if (IS_ERR(re->phy_dev)) {
+ netdev_err(netdev, "could not connect to PHY at %s\n",
+ dev_name(&phydev->dev));
+ return PTR_ERR(re->phy_dev);
+ }
+
+ phydev->supported &= PHY_GBIT_FEATURES;
+ phydev->advertising = phydev->supported;
+
+ RADEBUG("%s: connected to PHY at %s [uid=%08x, driver=%s]\n",
+ netdev->name, dev_name(&phydev->dev),
+ phydev->phy_id, phydev->drv->name);
+
+ re->link = 0;
+ re->speed = 0;
+ re->duplex = -1;
+ re->rx_fc = 0;
+ re->tx_fc = 0;
+
+ return ret;
}
+static int
+ramips_phy_connect_fixed(struct raeth_priv *re)
+{
+ struct ramips_eth_platform_data *pdata;
+
+ pdata = re->parent->platform_data;
+ switch (pdata->speed) {
+ case SPEED_10:
+ case SPEED_100:
+ case SPEED_1000:
+ break;
+ default:
+ netdev_err(re->netdev, "invalid speed specified\n");
+ return -EINVAL;
+ }
+
+ RADEBUG("%s: using fixed link parameters\n", re->netdev->name);
+
+ re->speed = pdata->speed;
+ re->duplex = pdata->duplex;
+ re->tx_fc = pdata->tx_fc;
+ re->rx_fc = pdata->tx_fc;
+
+ return 0;
+}
+
+static int
+ramips_phy_connect(struct raeth_priv *re)
+{
+ struct ramips_eth_platform_data *pdata;
+
+ pdata = re->parent->platform_data;
+ if (pdata->phy_mask)
+ return ramips_phy_connect_multi(re);
+
+ return ramips_phy_connect_fixed(re);
+}
+
+static void
+ramips_phy_disconnect(struct raeth_priv *re)
+{
+ if (re->phy_dev)
+ phy_disconnect(re->phy_dev);
+}
+
+static void
+ramips_phy_start(struct raeth_priv *re)
+{
+ unsigned long flags;
+
+ if (re->phy_dev) {
+ phy_start(re->phy_dev);
+ } else {
+ spin_lock_irqsave(&re->phy_lock, flags);
+ re->link = 1;
+ ramips_link_adjust(re);
+ spin_unlock_irqrestore(&re->phy_lock, flags);
+ }
+}
+
+static void
+ramips_phy_stop(struct raeth_priv *re)
+{
+ unsigned long flags;
+
+ if (re->phy_dev)
+ phy_stop(re->phy_dev);
+
+ spin_lock_irqsave(&re->phy_lock, flags);
+ re->link = 0;
+ ramips_link_adjust(re);
+ spin_unlock_irqrestore(&re->phy_lock, flags);
+}
+#else
static inline int
ramips_mdio_init(struct raeth_priv *re)
{
ramips_mdio_cleanup(struct raeth_priv *re)
{
}
+
+static inline int
+ramips_phy_connect(struct raeth_priv *re)
+{
+ return 0;
+}
+
+static inline void
+ramips_phy_disconnect(struct raeth_priv *re)
+{
+}
+
+static inline void
+ramips_phy_start(struct raeth_priv *re)
+{
+}
+
+static inline void
+ramips_phy_stop(struct raeth_priv *re)
+{
+}
#endif /* CONFIG_RALINK_RT288X || CONFIG_RALINK_RT3883 */
static void
-ramips_cleanup_dma(struct raeth_priv *re)
+ramips_ring_free(struct raeth_priv *re)
{
+ int len;
int i;
for (i = 0; i < NUM_RX_DESC; i++)
- if (re->rx_skb[i]) {
- dma_unmap_single(&re->netdev->dev, re->rx_dma[i],
- MAX_RX_LENGTH, DMA_FROM_DEVICE);
+ if (re->rx_skb[i])
dev_kfree_skb_any(re->rx_skb[i]);
- }
- if (re->rx)
- dma_free_coherent(&re->netdev->dev,
- NUM_RX_DESC * sizeof(struct ramips_rx_dma),
- re->rx, re->rx_desc_dma);
+ if (re->rx) {
+ len = NUM_RX_DESC * sizeof(struct ramips_rx_dma);
+ dma_free_coherent(&re->netdev->dev, len, re->rx,
+ re->rx_desc_dma);
+ }
- if (re->tx)
- dma_free_coherent(&re->netdev->dev,
- NUM_TX_DESC * sizeof(struct ramips_tx_dma),
- re->tx, re->tx_desc_dma);
+ if (re->tx) {
+ len = NUM_TX_DESC * sizeof(struct ramips_tx_dma);
+ dma_free_coherent(&re->netdev->dev, len, re->tx,
+ re->tx_desc_dma);
+ }
}
static int
-ramips_alloc_dma(struct raeth_priv *re)
+ramips_ring_alloc(struct raeth_priv *re)
{
+ int len;
int err = -ENOMEM;
int i;
- re->skb_free_idx = 0;
-
- /* setup tx ring */
- re->tx = dma_alloc_coherent(&re->netdev->dev,
- NUM_TX_DESC * sizeof(struct ramips_tx_dma),
- &re->tx_desc_dma, GFP_ATOMIC);
+ /* allocate tx ring */
+ len = NUM_TX_DESC * sizeof(struct ramips_tx_dma);
+ re->tx = dma_alloc_coherent(&re->netdev->dev, len,
+ &re->tx_desc_dma, GFP_ATOMIC);
if (!re->tx)
goto err_cleanup;
- memset(re->tx, 0, NUM_TX_DESC * sizeof(struct ramips_tx_dma));
- for (i = 0; i < NUM_TX_DESC; i++) {
- re->tx[i].txd2 = TX_DMA_LSO | TX_DMA_DONE;
- re->tx[i].txd4 = TX_DMA_QN(3) | TX_DMA_PN(1);
- }
-
- /* setup rx ring */
- re->rx = dma_alloc_coherent(&re->netdev->dev,
- NUM_RX_DESC * sizeof(struct ramips_rx_dma),
+ /* allocate rx ring */
+ len = NUM_RX_DESC * sizeof(struct ramips_rx_dma);
+ re->rx = dma_alloc_coherent(&re->netdev->dev, len,
&re->rx_desc_dma, GFP_ATOMIC);
if (!re->rx)
goto err_cleanup;
- memset(re->rx, 0, sizeof(struct ramips_rx_dma) * NUM_RX_DESC);
for (i = 0; i < NUM_RX_DESC; i++) {
- dma_addr_t dma_addr;
- struct sk_buff *new_skb = dev_alloc_skb(MAX_RX_LENGTH +
- NET_IP_ALIGN);
+ struct sk_buff *skb;
- if (!new_skb)
+ skb = ramips_alloc_skb(re);
+ if (!skb)
goto err_cleanup;
- skb_reserve(new_skb, NET_IP_ALIGN);
-
- dma_addr = dma_map_single(&re->netdev->dev, new_skb->data,
- MAX_RX_LENGTH, DMA_FROM_DEVICE);
- re->rx_dma[i] = dma_addr;
- re->rx[i].rxd1 = (unsigned int) re->rx_dma[i];
- re->rx[i].rxd2 |= RX_DMA_LSO;
- re->rx_skb[i] = new_skb;
+ re->rx_skb[i] = skb;
}
return 0;
- err_cleanup:
- ramips_cleanup_dma(re);
+err_cleanup:
+ ramips_ring_free(re);
return err;
}
static int
ramips_eth_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
- struct raeth_priv *priv = netdev_priv(dev);
+ struct raeth_priv *re = netdev_priv(dev);
unsigned long tx;
unsigned int tx_next;
dma_addr_t mapped_addr;
- if (priv->plat->min_pkt_len) {
- if (skb->len < priv->plat->min_pkt_len) {
- if (skb_padto(skb, priv->plat->min_pkt_len)) {
+ if (re->plat->min_pkt_len) {
+ if (skb->len < re->plat->min_pkt_len) {
+ if (skb_padto(skb, re->plat->min_pkt_len)) {
printk(KERN_ERR
"ramips_eth: skb_padto failed\n");
kfree_skb(skb);
return 0;
}
- skb_put(skb, priv->plat->min_pkt_len - skb->len);
+ skb_put(skb, re->plat->min_pkt_len - skb->len);
}
}
dev->trans_start = jiffies;
- mapped_addr = dma_map_single(&priv->netdev->dev, skb->data, skb->len,
+ mapped_addr = dma_map_single(&re->netdev->dev, skb->data, skb->len,
DMA_TO_DEVICE);
- spin_lock(&priv->page_lock);
+ spin_lock(&re->page_lock);
tx = ramips_fe_rr(RAMIPS_TX_CTX_IDX0);
tx_next = (tx + 1) % NUM_TX_DESC;
- if ((priv->tx_skb[tx]) || (priv->tx_skb[tx_next]) ||
- !(priv->tx[tx].txd2 & TX_DMA_DONE) ||
- !(priv->tx[tx_next].txd2 & TX_DMA_DONE))
+ if ((re->tx_skb[tx]) || (re->tx_skb[tx_next]) ||
+ !(re->tx[tx].txd2 & TX_DMA_DONE) ||
+ !(re->tx[tx_next].txd2 & TX_DMA_DONE))
goto out;
- priv->tx[tx].txd1 = (unsigned int) mapped_addr;
- priv->tx[tx].txd2 &= ~(TX_DMA_PLEN0_MASK | TX_DMA_DONE);
- priv->tx[tx].txd2 |= TX_DMA_PLEN0(skb->len);
+ re->tx[tx].txd1 = (unsigned int) mapped_addr;
+ re->tx[tx].txd2 &= ~(TX_DMA_PLEN0_MASK | TX_DMA_DONE);
+ re->tx[tx].txd2 |= TX_DMA_PLEN0(skb->len);
dev->stats.tx_packets++;
dev->stats.tx_bytes += skb->len;
- priv->tx_skb[tx] = skb;
+ re->tx_skb[tx] = skb;
wmb();
ramips_fe_wr(tx_next, RAMIPS_TX_CTX_IDX0);
- spin_unlock(&priv->page_lock);
+ spin_unlock(&re->page_lock);
return NETDEV_TX_OK;
out:
- spin_unlock(&priv->page_lock);
+ spin_unlock(&re->page_lock);
dev->stats.tx_dropped++;
kfree_skb(skb);
return NETDEV_TX_OK;
ramips_eth_rx_hw(unsigned long ptr)
{
struct net_device *dev = (struct net_device *) ptr;
- struct raeth_priv *priv = netdev_priv(dev);
+ struct raeth_priv *re = netdev_priv(dev);
int rx;
int max_rx = 16;
int pktlen;
rx = (ramips_fe_rr(RAMIPS_RX_CALC_IDX0) + 1) % NUM_RX_DESC;
- if (!(priv->rx[rx].rxd2 & RX_DMA_DONE))
+ if (!(re->rx[rx].rxd2 & RX_DMA_DONE))
break;
max_rx--;
- rx_skb = priv->rx_skb[rx];
- pktlen = RX_DMA_PLEN0(priv->rx[rx].rxd2);
+ rx_skb = re->rx_skb[rx];
+ pktlen = RX_DMA_PLEN0(re->rx[rx].rxd2);
- new_skb = netdev_alloc_skb(dev, MAX_RX_LENGTH + NET_IP_ALIGN);
+ new_skb = ramips_alloc_skb(re);
/* Reuse the buffer on allocation failures */
if (new_skb) {
dma_addr_t dma_addr;
- dma_unmap_single(&priv->netdev->dev, priv->rx_dma[rx],
+ dma_unmap_single(&re->netdev->dev, re->rx_dma[rx],
MAX_RX_LENGTH, DMA_FROM_DEVICE);
skb_put(rx_skb, pktlen);
dev->stats.rx_bytes += pktlen;
netif_rx(rx_skb);
- priv->rx_skb[rx] = new_skb;
- skb_reserve(new_skb, NET_IP_ALIGN);
+ re->rx_skb[rx] = new_skb;
- dma_addr = dma_map_single(&priv->netdev->dev,
+ dma_addr = dma_map_single(&re->netdev->dev,
new_skb->data,
MAX_RX_LENGTH,
DMA_FROM_DEVICE);
- priv->rx_dma[rx] = dma_addr;
- priv->rx[rx].rxd1 = (unsigned int) dma_addr;
+ re->rx_dma[rx] = dma_addr;
+ re->rx[rx].rxd1 = (unsigned int) dma_addr;
} else {
dev->stats.rx_dropped++;
}
- priv->rx[rx].rxd2 &= ~RX_DMA_DONE;
+ re->rx[rx].rxd2 &= ~RX_DMA_DONE;
wmb();
ramips_fe_wr(rx, RAMIPS_RX_CALC_IDX0);
}
if (max_rx == 0)
- tasklet_schedule(&priv->rx_tasklet);
+ tasklet_schedule(&re->rx_tasklet);
else
ramips_fe_int_enable(RAMIPS_RX_DLY_INT);
}
ramips_eth_tx_housekeeping(unsigned long ptr)
{
struct net_device *dev = (struct net_device*)ptr;
- struct raeth_priv *priv = netdev_priv(dev);
-
- spin_lock(&priv->page_lock);
- while ((priv->tx[priv->skb_free_idx].txd2 & TX_DMA_DONE) &&
- (priv->tx_skb[priv->skb_free_idx])) {
- dev_kfree_skb_irq(priv->tx_skb[priv->skb_free_idx]);
- priv->tx_skb[priv->skb_free_idx] = 0;
- priv->skb_free_idx++;
- if (priv->skb_free_idx >= NUM_TX_DESC)
- priv->skb_free_idx = 0;
+ struct raeth_priv *re = netdev_priv(dev);
+
+ spin_lock(&re->page_lock);
+ while ((re->tx[re->skb_free_idx].txd2 & TX_DMA_DONE) &&
+ (re->tx_skb[re->skb_free_idx])) {
+ dev_kfree_skb_irq(re->tx_skb[re->skb_free_idx]);
+ re->tx_skb[re->skb_free_idx] = 0;
+ re->skb_free_idx++;
+ if (re->skb_free_idx >= NUM_TX_DESC)
+ re->skb_free_idx = 0;
}
- spin_unlock(&priv->page_lock);
+ spin_unlock(&re->page_lock);
ramips_fe_int_enable(RAMIPS_TX_DLY_INT);
}
static void
ramips_eth_timeout(struct net_device *dev)
{
- struct raeth_priv *priv = netdev_priv(dev);
+ struct raeth_priv *re = netdev_priv(dev);
- tasklet_schedule(&priv->tx_housekeeping_tasklet);
+ tasklet_schedule(&re->tx_housekeeping_tasklet);
}
static irqreturn_t
ramips_eth_irq(int irq, void *dev)
{
- struct raeth_priv *priv = netdev_priv(dev);
+ struct raeth_priv *re = netdev_priv(dev);
unsigned long fe_int = ramips_fe_rr(RAMIPS_FE_INT_STATUS);
ramips_fe_wr(0xFFFFFFFF, RAMIPS_FE_INT_STATUS);
if (fe_int & RAMIPS_RX_DLY_INT) {
ramips_fe_int_disable(RAMIPS_RX_DLY_INT);
- tasklet_schedule(&priv->rx_tasklet);
+ tasklet_schedule(&re->rx_tasklet);
}
if (fe_int & RAMIPS_TX_DLY_INT) {
ramips_fe_int_disable(RAMIPS_TX_DLY_INT);
- tasklet_schedule(&priv->tx_housekeeping_tasklet);
+ tasklet_schedule(&re->tx_housekeeping_tasklet);
}
return IRQ_HANDLED;
static int
ramips_eth_open(struct net_device *dev)
{
- struct raeth_priv *priv = netdev_priv(dev);
+ struct raeth_priv *re = netdev_priv(dev);
int err;
err = request_irq(dev->irq, ramips_eth_irq, IRQF_DISABLED,
if (err)
return err;
- err = ramips_alloc_dma(priv);
+ err = ramips_ring_alloc(re);
if (err)
goto err_free_irq;
+ ramips_ring_setup(re);
ramips_hw_set_macaddr(dev->dev_addr);
- ramips_setup_dma(priv);
+ ramips_setup_dma(re);
ramips_fe_wr((ramips_fe_rr(RAMIPS_PDMA_GLO_CFG) & 0xff) |
(RAMIPS_TX_WB_DDONE | RAMIPS_RX_DMA_EN |
RAMIPS_TX_DMA_EN | RAMIPS_PDMA_SIZE_4DWORDS),
RAMIPS_PDMA_GLO_CFG);
ramips_fe_wr((ramips_fe_rr(RAMIPS_FE_GLO_CFG) &
~(RAMIPS_US_CYC_CNT_MASK << RAMIPS_US_CYC_CNT_SHIFT)) |
- ((priv->plat->sys_freq / RAMIPS_US_CYC_CNT_DIVISOR) << RAMIPS_US_CYC_CNT_SHIFT),
+ ((re->plat->sys_freq / RAMIPS_US_CYC_CNT_DIVISOR) << RAMIPS_US_CYC_CNT_SHIFT),
RAMIPS_FE_GLO_CFG);
- tasklet_init(&priv->tx_housekeeping_tasklet, ramips_eth_tx_housekeeping,
+ tasklet_init(&re->tx_housekeeping_tasklet, ramips_eth_tx_housekeeping,
(unsigned long)dev);
- tasklet_init(&priv->rx_tasklet, ramips_eth_rx_hw, (unsigned long)dev);
+ tasklet_init(&re->rx_tasklet, ramips_eth_rx_hw, (unsigned long)dev);
- ramips_setup_mdio_cfg(priv);
+ ramips_phy_start(re);
ramips_fe_wr(RAMIPS_DELAY_INIT, RAMIPS_DLY_INT_CFG);
ramips_fe_wr(RAMIPS_TX_DLY_INT | RAMIPS_RX_DLY_INT, RAMIPS_FE_INT_ENABLE);
static int
ramips_eth_stop(struct net_device *dev)
{
- struct raeth_priv *priv = netdev_priv(dev);
+ struct raeth_priv *re = netdev_priv(dev);
ramips_fe_wr(ramips_fe_rr(RAMIPS_PDMA_GLO_CFG) &
~(RAMIPS_TX_WB_DDONE | RAMIPS_RX_DMA_EN | RAMIPS_TX_DMA_EN),
/* disable all interrupts in the hw */
ramips_fe_wr(0, RAMIPS_FE_INT_ENABLE);
+ ramips_phy_stop(re);
free_irq(dev->irq, dev);
netif_stop_queue(dev);
- tasklet_kill(&priv->tx_housekeeping_tasklet);
- tasklet_kill(&priv->rx_tasklet);
- ramips_cleanup_dma(priv);
+ tasklet_kill(&re->tx_housekeeping_tasklet);
+ tasklet_kill(&re->rx_tasklet);
+ ramips_ring_cleanup(re);
+ ramips_ring_free(re);
RADEBUG("ramips_eth: stopped\n");
return 0;
}
static int __init
ramips_eth_probe(struct net_device *dev)
{
- struct raeth_priv *priv = netdev_priv(dev);
+ struct raeth_priv *re = netdev_priv(dev);
int err;
- BUG_ON(!priv->plat->reset_fe);
- priv->plat->reset_fe();
+ BUG_ON(!re->plat->reset_fe);
+ re->plat->reset_fe();
net_srandom(jiffies);
- memcpy(dev->dev_addr, priv->plat->mac, ETH_ALEN);
+ memcpy(dev->dev_addr, re->plat->mac, ETH_ALEN);
ether_setup(dev);
dev->mtu = 1500;
dev->watchdog_timeo = TX_TIMEOUT;
- spin_lock_init(&priv->page_lock);
+ spin_lock_init(&re->page_lock);
+ spin_lock_init(&re->phy_lock);
+
+ err = ramips_mdio_init(re);
+ if (err)
+ return err;
- err = ramips_mdio_init(priv);
+ err = ramips_phy_connect(re);
+ if (err)
+ goto err_mdio_cleanup;
+
+ return 0;
+
+err_mdio_cleanup:
+ ramips_mdio_cleanup(re);
return err;
}
{
struct raeth_priv *re = netdev_priv(dev);
+ ramips_phy_disconnect(re);
ramips_mdio_cleanup(re);
}
static int
ramips_eth_plat_probe(struct platform_device *plat)
{
- struct raeth_priv *priv;
+ struct raeth_priv *re;
struct ramips_eth_platform_data *data = plat->dev.platform_data;
struct resource *res;
int err;
ramips_dev->base_addr = (unsigned long)ramips_fe_base;
ramips_dev->netdev_ops = &ramips_eth_netdev_ops;
- priv = netdev_priv(ramips_dev);
+ re = netdev_priv(ramips_dev);
- priv->netdev = ramips_dev;
- priv->parent = &plat->dev;
- priv->speed = data->speed;
- priv->duplex = data->duplex;
- priv->rx_fc = data->rx_fc;
- priv->tx_fc = data->tx_fc;
- priv->plat = data;
+ re->netdev = ramips_dev;
+ re->parent = &plat->dev;
+ re->speed = data->speed;
+ re->duplex = data->duplex;
+ re->rx_fc = data->rx_fc;
+ re->tx_fc = data->tx_fc;
+ re->plat = data;
err = register_netdev(ramips_dev);
if (err) {