diff options
Diffstat (limited to 'drivers/net/ethernet/sun')
-rw-r--r-- | drivers/net/ethernet/sun/cassini.c | 121 | ||||
-rw-r--r-- | drivers/net/ethernet/sun/niu.c | 17 | ||||
-rw-r--r-- | drivers/net/ethernet/sun/sungem.c | 129 |
3 files changed, 138 insertions, 129 deletions
diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c index 015fdb851cdb..e2bc7a25f6d1 100644 --- a/drivers/net/ethernet/sun/cassini.c +++ b/drivers/net/ethernet/sun/cassini.c @@ -443,8 +443,8 @@ static void cas_phy_powerdown(struct cas *cp) /* cp->lock held. note: the last put_page will free the buffer */ static int cas_page_free(struct cas *cp, cas_page_t *page) { - pci_unmap_page(cp->pdev, page->dma_addr, cp->page_size, - PCI_DMA_FROMDEVICE); + dma_unmap_page(&cp->pdev->dev, page->dma_addr, cp->page_size, + DMA_FROM_DEVICE); __free_pages(page->buffer, cp->page_order); kfree(page); return 0; @@ -474,8 +474,8 @@ static cas_page_t *cas_page_alloc(struct cas *cp, const gfp_t flags) page->buffer = alloc_pages(flags, cp->page_order); if (!page->buffer) goto page_err; - page->dma_addr = pci_map_page(cp->pdev, page->buffer, 0, - cp->page_size, PCI_DMA_FROMDEVICE); + page->dma_addr = dma_map_page(&cp->pdev->dev, page->buffer, 0, + cp->page_size, DMA_FROM_DEVICE); return page; page_err: @@ -1863,8 +1863,8 @@ static inline void cas_tx_ringN(struct cas *cp, int ring, int limit) daddr = le64_to_cpu(txd->buffer); dlen = CAS_VAL(TX_DESC_BUFLEN, le64_to_cpu(txd->control)); - pci_unmap_page(cp->pdev, daddr, dlen, - PCI_DMA_TODEVICE); + dma_unmap_page(&cp->pdev->dev, daddr, dlen, + DMA_TO_DEVICE); entry = TX_DESC_NEXT(ring, entry); /* tiny buffer may follow */ @@ -1957,12 +1957,13 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc, i = hlen; if (!dlen) /* attach FCS */ i += cp->crc_size; - pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr + off, i, - PCI_DMA_FROMDEVICE); + dma_sync_single_for_cpu(&cp->pdev->dev, page->dma_addr + off, + i, DMA_FROM_DEVICE); addr = cas_page_map(page->buffer); memcpy(p, addr + off, i); - pci_dma_sync_single_for_device(cp->pdev, page->dma_addr + off, i, - PCI_DMA_FROMDEVICE); + dma_sync_single_for_device(&cp->pdev->dev, + page->dma_addr + off, i, + DMA_FROM_DEVICE); cas_page_unmap(addr); RX_USED_ADD(page, 0x100); p += hlen; @@ -1988,16 +1989,17 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc, i = hlen; if (i == dlen) /* attach FCS */ i += cp->crc_size; - pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr + off, i, - PCI_DMA_FROMDEVICE); + dma_sync_single_for_cpu(&cp->pdev->dev, page->dma_addr + off, + i, DMA_FROM_DEVICE); /* make sure we always copy a header */ swivel = 0; if (p == (char *) skb->data) { /* not split */ addr = cas_page_map(page->buffer); memcpy(p, addr + off, RX_COPY_MIN); - pci_dma_sync_single_for_device(cp->pdev, page->dma_addr + off, i, - PCI_DMA_FROMDEVICE); + dma_sync_single_for_device(&cp->pdev->dev, + page->dma_addr + off, i, + DMA_FROM_DEVICE); cas_page_unmap(addr); off += RX_COPY_MIN; swivel = RX_COPY_MIN; @@ -2024,12 +2026,14 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc, i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]); page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)]; - pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr, - hlen + cp->crc_size, - PCI_DMA_FROMDEVICE); - pci_dma_sync_single_for_device(cp->pdev, page->dma_addr, - hlen + cp->crc_size, - PCI_DMA_FROMDEVICE); + dma_sync_single_for_cpu(&cp->pdev->dev, + page->dma_addr, + hlen + cp->crc_size, + DMA_FROM_DEVICE); + dma_sync_single_for_device(&cp->pdev->dev, + page->dma_addr, + hlen + cp->crc_size, + DMA_FROM_DEVICE); skb_shinfo(skb)->nr_frags++; skb->data_len += hlen; @@ -2066,12 +2070,13 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc, i = hlen; if (i == dlen) /* attach FCS */ i += cp->crc_size; - pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr + off, i, - PCI_DMA_FROMDEVICE); + dma_sync_single_for_cpu(&cp->pdev->dev, page->dma_addr + off, + i, DMA_FROM_DEVICE); addr = cas_page_map(page->buffer); memcpy(p, addr + off, i); - pci_dma_sync_single_for_device(cp->pdev, page->dma_addr + off, i, - PCI_DMA_FROMDEVICE); + dma_sync_single_for_device(&cp->pdev->dev, + page->dma_addr + off, i, + DMA_FROM_DEVICE); cas_page_unmap(addr); if (p == (char *) skb->data) /* not split */ RX_USED_ADD(page, cp->mtu_stride); @@ -2083,14 +2088,16 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc, p += hlen; i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]); page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)]; - pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr, - dlen + cp->crc_size, - PCI_DMA_FROMDEVICE); + dma_sync_single_for_cpu(&cp->pdev->dev, + page->dma_addr, + dlen + cp->crc_size, + DMA_FROM_DEVICE); addr = cas_page_map(page->buffer); memcpy(p, addr, dlen + cp->crc_size); - pci_dma_sync_single_for_device(cp->pdev, page->dma_addr, - dlen + cp->crc_size, - PCI_DMA_FROMDEVICE); + dma_sync_single_for_device(&cp->pdev->dev, + page->dma_addr, + dlen + cp->crc_size, + DMA_FROM_DEVICE); cas_page_unmap(addr); RX_USED_ADD(page, dlen + cp->crc_size); } @@ -2766,9 +2773,8 @@ static inline int cas_xmit_tx_ringN(struct cas *cp, int ring, nr_frags = skb_shinfo(skb)->nr_frags; len = skb_headlen(skb); - mapping = pci_map_page(cp->pdev, virt_to_page(skb->data), - offset_in_page(skb->data), len, - PCI_DMA_TODEVICE); + mapping = dma_map_page(&cp->pdev->dev, virt_to_page(skb->data), + offset_in_page(skb->data), len, DMA_TO_DEVICE); tentry = entry; tabort = cas_calc_tabort(cp, (unsigned long) skb->data, len); @@ -3882,8 +3888,8 @@ static void cas_clean_txd(struct cas *cp, int ring) daddr = le64_to_cpu(txd[ent].buffer); dlen = CAS_VAL(TX_DESC_BUFLEN, le64_to_cpu(txd[ent].control)); - pci_unmap_page(cp->pdev, daddr, dlen, - PCI_DMA_TODEVICE); + dma_unmap_page(&cp->pdev->dev, daddr, dlen, + DMA_TO_DEVICE); if (frag != skb_shinfo(skb)->nr_frags) { i++; @@ -4181,9 +4187,8 @@ static void cas_tx_tiny_free(struct cas *cp) if (!cp->tx_tiny_bufs[i]) continue; - pci_free_consistent(pdev, TX_TINY_BUF_BLOCK, - cp->tx_tiny_bufs[i], - cp->tx_tiny_dvma[i]); + dma_free_coherent(&pdev->dev, TX_TINY_BUF_BLOCK, + cp->tx_tiny_bufs[i], cp->tx_tiny_dvma[i]); cp->tx_tiny_bufs[i] = NULL; } } @@ -4195,8 +4200,8 @@ static int cas_tx_tiny_alloc(struct cas *cp) for (i = 0; i < N_TX_RINGS; i++) { cp->tx_tiny_bufs[i] = - pci_alloc_consistent(pdev, TX_TINY_BUF_BLOCK, - &cp->tx_tiny_dvma[i]); + dma_alloc_coherent(&pdev->dev, TX_TINY_BUF_BLOCK, + &cp->tx_tiny_dvma[i], GFP_KERNEL); if (!cp->tx_tiny_bufs[i]) { cas_tx_tiny_free(cp); return -1; @@ -4958,10 +4963,9 @@ static int cas_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) /* Configure DMA attributes. */ - if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { + if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) { pci_using_dac = 1; - err = pci_set_consistent_dma_mask(pdev, - DMA_BIT_MASK(64)); + err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); if (err < 0) { dev_err(&pdev->dev, "Unable to obtain 64-bit DMA " "for consistent allocations\n"); @@ -4969,7 +4973,7 @@ static int cas_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) } } else { - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); if (err) { dev_err(&pdev->dev, "No usable DMA configuration, " "aborting\n"); @@ -5048,8 +5052,8 @@ static int cas_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) cas_saturn_firmware_init(cp); cp->init_block = - pci_alloc_consistent(pdev, sizeof(struct cas_init_block), - &cp->block_dvma); + dma_alloc_coherent(&pdev->dev, sizeof(struct cas_init_block), + &cp->block_dvma, GFP_KERNEL); if (!cp->init_block) { dev_err(&pdev->dev, "Cannot allocate init block, aborting\n"); goto err_out_iounmap; @@ -5109,8 +5113,8 @@ static int cas_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) return 0; err_out_free_consistent: - pci_free_consistent(pdev, sizeof(struct cas_init_block), - cp->init_block, cp->block_dvma); + dma_free_coherent(&pdev->dev, sizeof(struct cas_init_block), + cp->init_block, cp->block_dvma); err_out_iounmap: mutex_lock(&cp->pm_mutex); @@ -5164,18 +5168,17 @@ static void cas_remove_one(struct pci_dev *pdev) cp->orig_cacheline_size); } #endif - pci_free_consistent(pdev, sizeof(struct cas_init_block), - cp->init_block, cp->block_dvma); + dma_free_coherent(&pdev->dev, sizeof(struct cas_init_block), + cp->init_block, cp->block_dvma); pci_iounmap(pdev, cp->regs); free_netdev(dev); pci_release_regions(pdev); pci_disable_device(pdev); } -#ifdef CONFIG_PM -static int cas_suspend(struct pci_dev *pdev, pm_message_t state) +static int __maybe_unused cas_suspend(struct device *dev_d) { - struct net_device *dev = pci_get_drvdata(pdev); + struct net_device *dev = dev_get_drvdata(dev_d); struct cas *cp = netdev_priv(dev); unsigned long flags; @@ -5204,9 +5207,9 @@ static int cas_suspend(struct pci_dev *pdev, pm_message_t state) return 0; } -static int cas_resume(struct pci_dev *pdev) +static int __maybe_unused cas_resume(struct device *dev_d) { - struct net_device *dev = pci_get_drvdata(pdev); + struct net_device *dev = dev_get_drvdata(dev_d); struct cas *cp = netdev_priv(dev); netdev_info(dev, "resuming\n"); @@ -5227,17 +5230,15 @@ static int cas_resume(struct pci_dev *pdev) mutex_unlock(&cp->pm_mutex); return 0; } -#endif /* CONFIG_PM */ + +static SIMPLE_DEV_PM_OPS(cas_pm_ops, cas_suspend, cas_resume); static struct pci_driver cas_driver = { .name = DRV_MODULE_NAME, .id_table = cas_pci_tbl, .probe = cas_init_one, .remove = cas_remove_one, -#ifdef CONFIG_PM - .suspend = cas_suspend, - .resume = cas_resume -#endif + .driver.pm = &cas_pm_ops, }; static int __init cas_init(void) diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c index 1b697e4cd7dc..9b5effb72657 100644 --- a/drivers/net/ethernet/sun/niu.c +++ b/drivers/net/ethernet/sun/niu.c @@ -9873,9 +9873,9 @@ static void niu_pci_remove_one(struct pci_dev *pdev) } } -static int niu_suspend(struct pci_dev *pdev, pm_message_t state) +static int __maybe_unused niu_suspend(struct device *dev_d) { - struct net_device *dev = pci_get_drvdata(pdev); + struct net_device *dev = dev_get_drvdata(dev_d); struct niu *np = netdev_priv(dev); unsigned long flags; @@ -9897,14 +9897,12 @@ static int niu_suspend(struct pci_dev *pdev, pm_message_t state) niu_stop_hw(np); spin_unlock_irqrestore(&np->lock, flags); - pci_save_state(pdev); - return 0; } -static int niu_resume(struct pci_dev *pdev) +static int __maybe_unused niu_resume(struct device *dev_d) { - struct net_device *dev = pci_get_drvdata(pdev); + struct net_device *dev = dev_get_drvdata(dev_d); struct niu *np = netdev_priv(dev); unsigned long flags; int err; @@ -9912,8 +9910,6 @@ static int niu_resume(struct pci_dev *pdev) if (!netif_running(dev)) return 0; - pci_restore_state(pdev); - netif_device_attach(dev); spin_lock_irqsave(&np->lock, flags); @@ -9930,13 +9926,14 @@ static int niu_resume(struct pci_dev *pdev) return err; } +static SIMPLE_DEV_PM_OPS(niu_pm_ops, niu_suspend, niu_resume); + static struct pci_driver niu_pci_driver = { .name = DRV_MODULE_NAME, .id_table = niu_pci_tbl, .probe = niu_pci_init_one, .remove = niu_pci_remove_one, - .suspend = niu_suspend, - .resume = niu_resume, + .driver.pm = &niu_pm_ops, }; #ifdef CONFIG_SPARC64 diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c index 2d392a7b179a..eeb8518c8a84 100644 --- a/drivers/net/ethernet/sun/sungem.c +++ b/drivers/net/ethernet/sun/sungem.c @@ -670,7 +670,8 @@ static __inline__ void gem_tx(struct net_device *dev, struct gem *gp, u32 gem_st dma_addr = le64_to_cpu(txd->buffer); dma_len = le64_to_cpu(txd->control_word) & TXDCTRL_BUFSZ; - pci_unmap_page(gp->pdev, dma_addr, dma_len, PCI_DMA_TODEVICE); + dma_unmap_page(&gp->pdev->dev, dma_addr, dma_len, + DMA_TO_DEVICE); entry = NEXT_TX(entry); } @@ -809,16 +810,15 @@ static int gem_rx(struct gem *gp, int work_to_do) drops++; goto drop_it; } - pci_unmap_page(gp->pdev, dma_addr, - RX_BUF_ALLOC_SIZE(gp), - PCI_DMA_FROMDEVICE); + dma_unmap_page(&gp->pdev->dev, dma_addr, + RX_BUF_ALLOC_SIZE(gp), DMA_FROM_DEVICE); gp->rx_skbs[entry] = new_skb; skb_put(new_skb, (gp->rx_buf_sz + RX_OFFSET)); - rxd->buffer = cpu_to_le64(pci_map_page(gp->pdev, + rxd->buffer = cpu_to_le64(dma_map_page(&gp->pdev->dev, virt_to_page(new_skb->data), offset_in_page(new_skb->data), RX_BUF_ALLOC_SIZE(gp), - PCI_DMA_FROMDEVICE)); + DMA_FROM_DEVICE)); skb_reserve(new_skb, RX_OFFSET); /* Trim the original skb for the netif. */ @@ -833,9 +833,11 @@ static int gem_rx(struct gem *gp, int work_to_do) skb_reserve(copy_skb, 2); skb_put(copy_skb, len); - pci_dma_sync_single_for_cpu(gp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); + dma_sync_single_for_cpu(&gp->pdev->dev, dma_addr, len, + DMA_FROM_DEVICE); skb_copy_from_linear_data(skb, copy_skb->data, len); - pci_dma_sync_single_for_device(gp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); + dma_sync_single_for_device(&gp->pdev->dev, dma_addr, + len, DMA_FROM_DEVICE); /* We'll reuse the original ring buffer. */ skb = copy_skb; @@ -1020,10 +1022,10 @@ static netdev_tx_t gem_start_xmit(struct sk_buff *skb, u32 len; len = skb->len; - mapping = pci_map_page(gp->pdev, + mapping = dma_map_page(&gp->pdev->dev, virt_to_page(skb->data), offset_in_page(skb->data), - len, PCI_DMA_TODEVICE); + len, DMA_TO_DEVICE); ctrl |= TXDCTRL_SOF | TXDCTRL_EOF | len; if (gem_intme(entry)) ctrl |= TXDCTRL_INTME; @@ -1046,9 +1048,10 @@ static netdev_tx_t gem_start_xmit(struct sk_buff *skb, * Otherwise we could race with the device. */ first_len = skb_headlen(skb); - first_mapping = pci_map_page(gp->pdev, virt_to_page(skb->data), + first_mapping = dma_map_page(&gp->pdev->dev, + virt_to_page(skb->data), offset_in_page(skb->data), - first_len, PCI_DMA_TODEVICE); + first_len, DMA_TO_DEVICE); entry = NEXT_TX(entry); for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) { @@ -1574,9 +1577,9 @@ static void gem_clean_rings(struct gem *gp) if (gp->rx_skbs[i] != NULL) { skb = gp->rx_skbs[i]; dma_addr = le64_to_cpu(rxd->buffer); - pci_unmap_page(gp->pdev, dma_addr, + dma_unmap_page(&gp->pdev->dev, dma_addr, RX_BUF_ALLOC_SIZE(gp), - PCI_DMA_FROMDEVICE); + DMA_FROM_DEVICE); dev_kfree_skb_any(skb); gp->rx_skbs[i] = NULL; } @@ -1598,9 +1601,9 @@ static void gem_clean_rings(struct gem *gp) txd = &gb->txd[ent]; dma_addr = le64_to_cpu(txd->buffer); - pci_unmap_page(gp->pdev, dma_addr, + dma_unmap_page(&gp->pdev->dev, dma_addr, le64_to_cpu(txd->control_word) & - TXDCTRL_BUFSZ, PCI_DMA_TODEVICE); + TXDCTRL_BUFSZ, DMA_TO_DEVICE); if (frag != skb_shinfo(skb)->nr_frags) i++; @@ -1637,11 +1640,11 @@ static void gem_init_rings(struct gem *gp) gp->rx_skbs[i] = skb; skb_put(skb, (gp->rx_buf_sz + RX_OFFSET)); - dma_addr = pci_map_page(gp->pdev, + dma_addr = dma_map_page(&gp->pdev->dev, virt_to_page(skb->data), offset_in_page(skb->data), RX_BUF_ALLOC_SIZE(gp), - PCI_DMA_FROMDEVICE); + DMA_FROM_DEVICE); rxd->buffer = cpu_to_le64(dma_addr); dma_wmb(); rxd->status_word = cpu_to_le64(RXDCTRL_FRESH(gp)); @@ -2139,20 +2142,6 @@ static int gem_do_start(struct net_device *dev) struct gem *gp = netdev_priv(dev); int rc; - /* Enable the cell */ - gem_get_cell(gp); - - /* Make sure PCI access and bus master are enabled */ - rc = pci_enable_device(gp->pdev); - if (rc) { - netdev_err(dev, "Failed to enable chip on PCI bus !\n"); - - /* Put cell and forget it for now, it will be considered as - * still asleep, a new sleep cycle may bring it back - */ - gem_put_cell(gp); - return -ENXIO; - } pci_set_master(gp->pdev); /* Init & setup chip hardware */ @@ -2230,13 +2219,6 @@ static void gem_do_stop(struct net_device *dev, int wol) /* Shut the PHY down eventually and setup WOL */ gem_stop_phy(gp, wol); - - /* Make sure bus master is disabled */ - pci_disable_device(gp->pdev); - - /* Cell not needed neither if no WOL */ - if (!wol) - gem_put_cell(gp); } static void gem_reset_task(struct work_struct *work) @@ -2288,26 +2270,53 @@ static void gem_reset_task(struct work_struct *work) static int gem_open(struct net_device *dev) { + struct gem *gp = netdev_priv(dev); + int rc; + /* We allow open while suspended, we just do nothing, * the chip will be initialized in resume() */ - if (netif_device_present(dev)) + if (netif_device_present(dev)) { + /* Enable the cell */ + gem_get_cell(gp); + + /* Make sure PCI access and bus master are enabled */ + rc = pci_enable_device(gp->pdev); + if (rc) { + netdev_err(dev, "Failed to enable chip on PCI bus !\n"); + + /* Put cell and forget it for now, it will be considered + *as still asleep, a new sleep cycle may bring it back + */ + gem_put_cell(gp); + return -ENXIO; + } return gem_do_start(dev); + } + return 0; } static int gem_close(struct net_device *dev) { - if (netif_device_present(dev)) + struct gem *gp = netdev_priv(dev); + + if (netif_device_present(dev)) { gem_do_stop(dev, 0); + /* Make sure bus master is disabled */ + pci_disable_device(gp->pdev); + + /* Cell not needed neither if no WOL */ + if (!gp->asleep_wol) + gem_put_cell(gp); + } return 0; } -#ifdef CONFIG_PM -static int gem_suspend(struct pci_dev *pdev, pm_message_t state) +static int __maybe_unused gem_suspend(struct device *dev_d) { - struct net_device *dev = pci_get_drvdata(pdev); + struct net_device *dev = dev_get_drvdata(dev_d); struct gem *gp = netdev_priv(dev); /* Lock the network stack first to avoid racing with open/close, @@ -2336,15 +2345,19 @@ static int gem_suspend(struct pci_dev *pdev, pm_message_t state) gp->asleep_wol = !!gp->wake_on_lan; gem_do_stop(dev, gp->asleep_wol); + /* Cell not needed neither if no WOL */ + if (!gp->asleep_wol) + gem_put_cell(gp); + /* Unlock the network stack */ rtnl_unlock(); return 0; } -static int gem_resume(struct pci_dev *pdev) +static int __maybe_unused gem_resume(struct device *dev_d) { - struct net_device *dev = pci_get_drvdata(pdev); + struct net_device *dev = dev_get_drvdata(dev_d); struct gem *gp = netdev_priv(dev); /* See locking comment in gem_suspend */ @@ -2359,6 +2372,9 @@ static int gem_resume(struct pci_dev *pdev) return 0; } + /* Enable the cell */ + gem_get_cell(gp); + /* Restart chip. If that fails there isn't much we can do, we * leave things stopped. */ @@ -2375,7 +2391,6 @@ static int gem_resume(struct pci_dev *pdev) return 0; } -#endif /* CONFIG_PM */ static struct net_device_stats *gem_get_stats(struct net_device *dev) { @@ -2802,10 +2817,8 @@ static void gem_remove_one(struct pci_dev *pdev) cancel_work_sync(&gp->reset_task); /* Free resources */ - pci_free_consistent(pdev, - sizeof(struct gem_init_block), - gp->init_block, - gp->gblock_dvma); + dma_free_coherent(&pdev->dev, sizeof(struct gem_init_block), + gp->init_block, gp->gblock_dvma); iounmap(gp->regs); pci_release_regions(pdev); free_netdev(dev); @@ -2861,10 +2874,10 @@ static int gem_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) */ if (pdev->vendor == PCI_VENDOR_ID_SUN && pdev->device == PCI_DEVICE_ID_SUN_GEM && - !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { + !dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) { pci_using_dac = 1; } else { - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); if (err) { pr_err("No usable DMA configuration, aborting\n"); goto err_disable_device; @@ -2953,8 +2966,8 @@ static int gem_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) * PAGE_SIZE aligned. */ gp->init_block = (struct gem_init_block *) - pci_alloc_consistent(pdev, sizeof(struct gem_init_block), - &gp->gblock_dvma); + dma_alloc_coherent(&pdev->dev, sizeof(struct gem_init_block), + &gp->gblock_dvma, GFP_KERNEL); if (!gp->init_block) { pr_err("Cannot allocate init block, aborting\n"); err = -ENOMEM; @@ -3019,16 +3032,14 @@ err_disable_device: } +static SIMPLE_DEV_PM_OPS(gem_pm_ops, gem_suspend, gem_resume); static struct pci_driver gem_driver = { .name = GEM_MODULE_NAME, .id_table = gem_pci_tbl, .probe = gem_init_one, .remove = gem_remove_one, -#ifdef CONFIG_PM - .suspend = gem_suspend, - .resume = gem_resume, -#endif /* CONFIG_PM */ + .driver.pm = &gem_pm_ops, }; module_pci_driver(gem_driver); |