mirror of https://gitee.com/openkylin/linux.git
staging: qlge: replace deprecated apis pci_dma_*
Replace legacy/depreacted pci_dma_* functions to new dma_* functions. Also replace PCI_DMA_* macro to DMA* macro. Signed-off-by: realwakka <realwakka@gmail.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Link: https://lore.kernel.org/r/20200420154009.21161-1-realwakka@gmail.com Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
parent
0eb79fd1e9
commit
e955a071b9
|
@ -214,12 +214,13 @@ int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
|
|||
u32 mask;
|
||||
u32 value;
|
||||
|
||||
direction =
|
||||
(bit & (CFG_LRQ | CFG_LR | CFG_LCQ)) ? PCI_DMA_TODEVICE :
|
||||
PCI_DMA_FROMDEVICE;
|
||||
if (bit & (CFG_LRQ | CFG_LR | CFG_LCQ))
|
||||
direction = DMA_TO_DEVICE;
|
||||
else
|
||||
direction = DMA_FROM_DEVICE;
|
||||
|
||||
map = pci_map_single(qdev->pdev, ptr, size, direction);
|
||||
if (pci_dma_mapping_error(qdev->pdev, map)) {
|
||||
map = dma_map_single(&qdev->pdev->dev, ptr, size, direction);
|
||||
if (dma_mapping_error(&qdev->pdev->dev, map)) {
|
||||
netif_err(qdev, ifup, qdev->ndev, "Couldn't map DMA area.\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
@ -248,7 +249,7 @@ int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
|
|||
status = ql_wait_cfg(qdev, bit);
|
||||
exit:
|
||||
ql_sem_unlock(qdev, SEM_ICB_MASK); /* does flush too */
|
||||
pci_unmap_single(qdev->pdev, map, size, direction);
|
||||
dma_unmap_single(&qdev->pdev->dev, map, size, direction);
|
||||
return status;
|
||||
}
|
||||
|
||||
|
@ -983,14 +984,14 @@ static struct qlge_bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev,
|
|||
{
|
||||
struct qlge_bq_desc *lbq_desc = qlge_get_curr_buf(&rx_ring->lbq);
|
||||
|
||||
pci_dma_sync_single_for_cpu(qdev->pdev, lbq_desc->dma_addr,
|
||||
qdev->lbq_buf_size, PCI_DMA_FROMDEVICE);
|
||||
dma_sync_single_for_cpu(&qdev->pdev->dev, lbq_desc->dma_addr,
|
||||
qdev->lbq_buf_size, DMA_FROM_DEVICE);
|
||||
|
||||
if ((lbq_desc->p.pg_chunk.offset + qdev->lbq_buf_size) ==
|
||||
ql_lbq_block_size(qdev)) {
|
||||
/* last chunk of the master page */
|
||||
pci_unmap_page(qdev->pdev, lbq_desc->dma_addr,
|
||||
ql_lbq_block_size(qdev), PCI_DMA_FROMDEVICE);
|
||||
dma_unmap_page(&qdev->pdev->dev, lbq_desc->dma_addr,
|
||||
ql_lbq_block_size(qdev), DMA_FROM_DEVICE);
|
||||
}
|
||||
|
||||
return lbq_desc;
|
||||
|
@ -1036,10 +1037,10 @@ static int qlge_refill_sb(struct rx_ring *rx_ring,
|
|||
return -ENOMEM;
|
||||
skb_reserve(skb, QLGE_SB_PAD);
|
||||
|
||||
sbq_desc->dma_addr = pci_map_single(qdev->pdev, skb->data,
|
||||
sbq_desc->dma_addr = dma_map_single(&qdev->pdev->dev, skb->data,
|
||||
SMALL_BUF_MAP_SIZE,
|
||||
PCI_DMA_FROMDEVICE);
|
||||
if (pci_dma_mapping_error(qdev->pdev, sbq_desc->dma_addr)) {
|
||||
DMA_FROM_DEVICE);
|
||||
if (dma_mapping_error(&qdev->pdev->dev, sbq_desc->dma_addr)) {
|
||||
netif_err(qdev, ifup, qdev->ndev, "PCI mapping failed.\n");
|
||||
dev_kfree_skb_any(skb);
|
||||
return -EIO;
|
||||
|
@ -1064,10 +1065,10 @@ static int qlge_refill_lb(struct rx_ring *rx_ring,
|
|||
page = alloc_pages(gfp | __GFP_COMP, qdev->lbq_buf_order);
|
||||
if (unlikely(!page))
|
||||
return -ENOMEM;
|
||||
dma_addr = pci_map_page(qdev->pdev, page, 0,
|
||||
dma_addr = dma_map_page(&qdev->pdev->dev, page, 0,
|
||||
ql_lbq_block_size(qdev),
|
||||
PCI_DMA_FROMDEVICE);
|
||||
if (pci_dma_mapping_error(qdev->pdev, dma_addr)) {
|
||||
DMA_FROM_DEVICE);
|
||||
if (dma_mapping_error(&qdev->pdev->dev, dma_addr)) {
|
||||
__free_pages(page, qdev->lbq_buf_order);
|
||||
netif_err(qdev, drv, qdev->ndev,
|
||||
"PCI mapping failed.\n");
|
||||
|
@ -1224,20 +1225,20 @@ static void ql_unmap_send(struct ql_adapter *qdev,
|
|||
qdev->ndev,
|
||||
"unmapping OAL area.\n");
|
||||
}
|
||||
pci_unmap_single(qdev->pdev,
|
||||
dma_unmap_single(&qdev->pdev->dev,
|
||||
dma_unmap_addr(&tx_ring_desc->map[i],
|
||||
mapaddr),
|
||||
dma_unmap_len(&tx_ring_desc->map[i],
|
||||
maplen),
|
||||
PCI_DMA_TODEVICE);
|
||||
DMA_TO_DEVICE);
|
||||
} else {
|
||||
netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev,
|
||||
"unmapping frag %d.\n", i);
|
||||
pci_unmap_page(qdev->pdev,
|
||||
dma_unmap_page(&qdev->pdev->dev,
|
||||
dma_unmap_addr(&tx_ring_desc->map[i],
|
||||
mapaddr),
|
||||
dma_unmap_len(&tx_ring_desc->map[i],
|
||||
maplen), PCI_DMA_TODEVICE);
|
||||
maplen), DMA_TO_DEVICE);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1263,9 +1264,9 @@ static int ql_map_send(struct ql_adapter *qdev,
|
|||
/*
|
||||
* Map the skb buffer first.
|
||||
*/
|
||||
map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
|
||||
map = dma_map_single(&qdev->pdev->dev, skb->data, len, DMA_TO_DEVICE);
|
||||
|
||||
err = pci_dma_mapping_error(qdev->pdev, map);
|
||||
err = dma_mapping_error(&qdev->pdev->dev, map);
|
||||
if (err) {
|
||||
netif_err(qdev, tx_queued, qdev->ndev,
|
||||
"PCI mapping failed with error: %d\n", err);
|
||||
|
@ -1310,10 +1311,10 @@ static int ql_map_send(struct ql_adapter *qdev,
|
|||
* etc...
|
||||
*/
|
||||
/* Tack on the OAL in the eighth segment of IOCB. */
|
||||
map = pci_map_single(qdev->pdev, &tx_ring_desc->oal,
|
||||
map = dma_map_single(&qdev->pdev->dev, &tx_ring_desc->oal,
|
||||
sizeof(struct oal),
|
||||
PCI_DMA_TODEVICE);
|
||||
err = pci_dma_mapping_error(qdev->pdev, map);
|
||||
DMA_TO_DEVICE);
|
||||
err = dma_mapping_error(&qdev->pdev->dev, map);
|
||||
if (err) {
|
||||
netif_err(qdev, tx_queued, qdev->ndev,
|
||||
"PCI mapping outbound address list with error: %d\n",
|
||||
|
@ -1584,8 +1585,8 @@ static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
|
|||
}
|
||||
skb_reserve(new_skb, NET_IP_ALIGN);
|
||||
|
||||
pci_dma_sync_single_for_cpu(qdev->pdev, sbq_desc->dma_addr,
|
||||
SMALL_BUF_MAP_SIZE, PCI_DMA_FROMDEVICE);
|
||||
dma_sync_single_for_cpu(&qdev->pdev->dev, sbq_desc->dma_addr,
|
||||
SMALL_BUF_MAP_SIZE, DMA_FROM_DEVICE);
|
||||
|
||||
skb_put_data(new_skb, skb->data, length);
|
||||
|
||||
|
@ -1707,8 +1708,8 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
|
|||
* Headers fit nicely into a small buffer.
|
||||
*/
|
||||
sbq_desc = qlge_get_curr_buf(&rx_ring->sbq);
|
||||
pci_unmap_single(qdev->pdev, sbq_desc->dma_addr,
|
||||
SMALL_BUF_MAP_SIZE, PCI_DMA_FROMDEVICE);
|
||||
dma_unmap_single(&qdev->pdev->dev, sbq_desc->dma_addr,
|
||||
SMALL_BUF_MAP_SIZE, DMA_FROM_DEVICE);
|
||||
skb = sbq_desc->p.skb;
|
||||
ql_realign_skb(skb, hdr_len);
|
||||
skb_put(skb, hdr_len);
|
||||
|
@ -1737,10 +1738,10 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
|
|||
* buffer.
|
||||
*/
|
||||
sbq_desc = qlge_get_curr_buf(&rx_ring->sbq);
|
||||
pci_dma_sync_single_for_cpu(qdev->pdev,
|
||||
dma_sync_single_for_cpu(&qdev->pdev->dev,
|
||||
sbq_desc->dma_addr,
|
||||
SMALL_BUF_MAP_SIZE,
|
||||
PCI_DMA_FROMDEVICE);
|
||||
DMA_FROM_DEVICE);
|
||||
skb_put_data(skb, sbq_desc->p.skb->data, length);
|
||||
} else {
|
||||
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
|
||||
|
@ -1750,9 +1751,9 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
|
|||
skb = sbq_desc->p.skb;
|
||||
ql_realign_skb(skb, length);
|
||||
skb_put(skb, length);
|
||||
pci_unmap_single(qdev->pdev, sbq_desc->dma_addr,
|
||||
dma_unmap_single(&qdev->pdev->dev, sbq_desc->dma_addr,
|
||||
SMALL_BUF_MAP_SIZE,
|
||||
PCI_DMA_FROMDEVICE);
|
||||
DMA_FROM_DEVICE);
|
||||
sbq_desc->p.skb = NULL;
|
||||
}
|
||||
} else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
|
||||
|
@ -1787,9 +1788,9 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
|
|||
"No skb available, drop the packet.\n");
|
||||
return NULL;
|
||||
}
|
||||
pci_unmap_page(qdev->pdev, lbq_desc->dma_addr,
|
||||
dma_unmap_page(&qdev->pdev->dev, lbq_desc->dma_addr,
|
||||
qdev->lbq_buf_size,
|
||||
PCI_DMA_FROMDEVICE);
|
||||
DMA_FROM_DEVICE);
|
||||
skb_reserve(skb, NET_IP_ALIGN);
|
||||
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
|
||||
"%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
|
||||
|
@ -1820,8 +1821,8 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
|
|||
int size, i = 0;
|
||||
|
||||
sbq_desc = qlge_get_curr_buf(&rx_ring->sbq);
|
||||
pci_unmap_single(qdev->pdev, sbq_desc->dma_addr,
|
||||
SMALL_BUF_MAP_SIZE, PCI_DMA_FROMDEVICE);
|
||||
dma_unmap_single(&qdev->pdev->dev, sbq_desc->dma_addr,
|
||||
SMALL_BUF_MAP_SIZE, DMA_FROM_DEVICE);
|
||||
if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
|
||||
/*
|
||||
* This is an non TCP/UDP IP frame, so
|
||||
|
@ -2636,14 +2637,14 @@ static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
|
|||
static void ql_free_shadow_space(struct ql_adapter *qdev)
|
||||
{
|
||||
if (qdev->rx_ring_shadow_reg_area) {
|
||||
pci_free_consistent(qdev->pdev,
|
||||
dma_free_coherent(&qdev->pdev->dev,
|
||||
PAGE_SIZE,
|
||||
qdev->rx_ring_shadow_reg_area,
|
||||
qdev->rx_ring_shadow_reg_dma);
|
||||
qdev->rx_ring_shadow_reg_area = NULL;
|
||||
}
|
||||
if (qdev->tx_ring_shadow_reg_area) {
|
||||
pci_free_consistent(qdev->pdev,
|
||||
dma_free_coherent(&qdev->pdev->dev,
|
||||
PAGE_SIZE,
|
||||
qdev->tx_ring_shadow_reg_area,
|
||||
qdev->tx_ring_shadow_reg_dma);
|
||||
|
@ -2654,8 +2655,8 @@ static void ql_free_shadow_space(struct ql_adapter *qdev)
|
|||
static int ql_alloc_shadow_space(struct ql_adapter *qdev)
|
||||
{
|
||||
qdev->rx_ring_shadow_reg_area =
|
||||
pci_zalloc_consistent(qdev->pdev, PAGE_SIZE,
|
||||
&qdev->rx_ring_shadow_reg_dma);
|
||||
dma_alloc_coherent(&qdev->pdev->dev, PAGE_SIZE,
|
||||
&qdev->rx_ring_shadow_reg_dma, GFP_ATOMIC);
|
||||
if (!qdev->rx_ring_shadow_reg_area) {
|
||||
netif_err(qdev, ifup, qdev->ndev,
|
||||
"Allocation of RX shadow space failed.\n");
|
||||
|
@ -2663,8 +2664,8 @@ static int ql_alloc_shadow_space(struct ql_adapter *qdev)
|
|||
}
|
||||
|
||||
qdev->tx_ring_shadow_reg_area =
|
||||
pci_zalloc_consistent(qdev->pdev, PAGE_SIZE,
|
||||
&qdev->tx_ring_shadow_reg_dma);
|
||||
dma_alloc_coherent(&qdev->pdev->dev, PAGE_SIZE,
|
||||
&qdev->tx_ring_shadow_reg_dma, GFP_ATOMIC);
|
||||
if (!qdev->tx_ring_shadow_reg_area) {
|
||||
netif_err(qdev, ifup, qdev->ndev,
|
||||
"Allocation of TX shadow space failed.\n");
|
||||
|
@ -2673,7 +2674,7 @@ static int ql_alloc_shadow_space(struct ql_adapter *qdev)
|
|||
return 0;
|
||||
|
||||
err_wqp_sh_area:
|
||||
pci_free_consistent(qdev->pdev,
|
||||
dma_free_coherent(&qdev->pdev->dev,
|
||||
PAGE_SIZE,
|
||||
qdev->rx_ring_shadow_reg_area,
|
||||
qdev->rx_ring_shadow_reg_dma);
|
||||
|
@ -2702,7 +2703,7 @@ static void ql_free_tx_resources(struct ql_adapter *qdev,
|
|||
struct tx_ring *tx_ring)
|
||||
{
|
||||
if (tx_ring->wq_base) {
|
||||
pci_free_consistent(qdev->pdev, tx_ring->wq_size,
|
||||
dma_free_coherent(&qdev->pdev->dev, tx_ring->wq_size,
|
||||
tx_ring->wq_base, tx_ring->wq_base_dma);
|
||||
tx_ring->wq_base = NULL;
|
||||
}
|
||||
|
@ -2714,8 +2715,8 @@ static int ql_alloc_tx_resources(struct ql_adapter *qdev,
|
|||
struct tx_ring *tx_ring)
|
||||
{
|
||||
tx_ring->wq_base =
|
||||
pci_alloc_consistent(qdev->pdev, tx_ring->wq_size,
|
||||
&tx_ring->wq_base_dma);
|
||||
dma_alloc_coherent(&qdev->pdev->dev, tx_ring->wq_size,
|
||||
&tx_ring->wq_base_dma, GFP_ATOMIC);
|
||||
|
||||
if (!tx_ring->wq_base ||
|
||||
tx_ring->wq_base_dma & WQ_ADDR_ALIGN)
|
||||
|
@ -2729,7 +2730,7 @@ static int ql_alloc_tx_resources(struct ql_adapter *qdev,
|
|||
|
||||
return 0;
|
||||
err:
|
||||
pci_free_consistent(qdev->pdev, tx_ring->wq_size,
|
||||
dma_free_coherent(&qdev->pdev->dev, tx_ring->wq_size,
|
||||
tx_ring->wq_base, tx_ring->wq_base_dma);
|
||||
tx_ring->wq_base = NULL;
|
||||
pci_alloc_err:
|
||||
|
@ -2748,17 +2749,17 @@ static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring
|
|||
&lbq->queue[lbq->next_to_clean];
|
||||
|
||||
if (lbq_desc->p.pg_chunk.offset == last_offset)
|
||||
pci_unmap_page(qdev->pdev, lbq_desc->dma_addr,
|
||||
dma_unmap_page(&qdev->pdev->dev, lbq_desc->dma_addr,
|
||||
ql_lbq_block_size(qdev),
|
||||
PCI_DMA_FROMDEVICE);
|
||||
DMA_FROM_DEVICE);
|
||||
put_page(lbq_desc->p.pg_chunk.page);
|
||||
|
||||
lbq->next_to_clean = QLGE_BQ_WRAP(lbq->next_to_clean + 1);
|
||||
}
|
||||
|
||||
if (rx_ring->master_chunk.page) {
|
||||
pci_unmap_page(qdev->pdev, rx_ring->chunk_dma_addr,
|
||||
ql_lbq_block_size(qdev), PCI_DMA_FROMDEVICE);
|
||||
dma_unmap_page(&qdev->pdev->dev, rx_ring->chunk_dma_addr,
|
||||
ql_lbq_block_size(qdev), DMA_FROM_DEVICE);
|
||||
put_page(rx_ring->master_chunk.page);
|
||||
rx_ring->master_chunk.page = NULL;
|
||||
}
|
||||
|
@ -2777,9 +2778,9 @@ static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring
|
|||
return;
|
||||
}
|
||||
if (sbq_desc->p.skb) {
|
||||
pci_unmap_single(qdev->pdev, sbq_desc->dma_addr,
|
||||
dma_unmap_single(&qdev->pdev->dev, sbq_desc->dma_addr,
|
||||
SMALL_BUF_MAP_SIZE,
|
||||
PCI_DMA_FROMDEVICE);
|
||||
DMA_FROM_DEVICE);
|
||||
dev_kfree_skb(sbq_desc->p.skb);
|
||||
sbq_desc->p.skb = NULL;
|
||||
}
|
||||
|
@ -2820,8 +2821,8 @@ static int qlge_init_bq(struct qlge_bq *bq)
|
|||
__le64 *buf_ptr;
|
||||
int i;
|
||||
|
||||
bq->base = pci_alloc_consistent(qdev->pdev, QLGE_BQ_SIZE,
|
||||
&bq->base_dma);
|
||||
bq->base = dma_alloc_coherent(&qdev->pdev->dev, QLGE_BQ_SIZE,
|
||||
&bq->base_dma, GFP_ATOMIC);
|
||||
if (!bq->base) {
|
||||
netif_err(qdev, ifup, qdev->ndev,
|
||||
"ring %u %s allocation failed.\n", rx_ring->cq_id,
|
||||
|
@ -2850,7 +2851,7 @@ static void ql_free_rx_resources(struct ql_adapter *qdev,
|
|||
{
|
||||
/* Free the small buffer queue. */
|
||||
if (rx_ring->sbq.base) {
|
||||
pci_free_consistent(qdev->pdev, QLGE_BQ_SIZE,
|
||||
dma_free_coherent(&qdev->pdev->dev, QLGE_BQ_SIZE,
|
||||
rx_ring->sbq.base, rx_ring->sbq.base_dma);
|
||||
rx_ring->sbq.base = NULL;
|
||||
}
|
||||
|
@ -2861,7 +2862,7 @@ static void ql_free_rx_resources(struct ql_adapter *qdev,
|
|||
|
||||
/* Free the large buffer queue. */
|
||||
if (rx_ring->lbq.base) {
|
||||
pci_free_consistent(qdev->pdev, QLGE_BQ_SIZE,
|
||||
dma_free_coherent(&qdev->pdev->dev, QLGE_BQ_SIZE,
|
||||
rx_ring->lbq.base, rx_ring->lbq.base_dma);
|
||||
rx_ring->lbq.base = NULL;
|
||||
}
|
||||
|
@ -2872,7 +2873,7 @@ static void ql_free_rx_resources(struct ql_adapter *qdev,
|
|||
|
||||
/* Free the rx queue. */
|
||||
if (rx_ring->cq_base) {
|
||||
pci_free_consistent(qdev->pdev,
|
||||
dma_free_coherent(&qdev->pdev->dev,
|
||||
rx_ring->cq_size,
|
||||
rx_ring->cq_base, rx_ring->cq_base_dma);
|
||||
rx_ring->cq_base = NULL;
|
||||
|
@ -2890,8 +2891,8 @@ static int ql_alloc_rx_resources(struct ql_adapter *qdev,
|
|||
* Allocate the completion queue for this rx_ring.
|
||||
*/
|
||||
rx_ring->cq_base =
|
||||
pci_alloc_consistent(qdev->pdev, rx_ring->cq_size,
|
||||
&rx_ring->cq_base_dma);
|
||||
dma_alloc_coherent(&qdev->pdev->dev, rx_ring->cq_size,
|
||||
&rx_ring->cq_base_dma, GFP_ATOMIC);
|
||||
|
||||
if (!rx_ring->cq_base) {
|
||||
netif_err(qdev, ifup, qdev->ndev, "rx_ring alloc failed.\n");
|
||||
|
@ -4430,13 +4431,13 @@ static int ql_init_device(struct pci_dev *pdev, struct net_device *ndev,
|
|||
}
|
||||
|
||||
pci_set_master(pdev);
|
||||
if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
|
||||
if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
|
||||
set_bit(QL_DMA64, &qdev->flags);
|
||||
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
|
||||
err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
|
||||
} else {
|
||||
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
|
||||
err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
|
||||
if (!err)
|
||||
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
|
||||
err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
|
||||
}
|
||||
|
||||
if (err) {
|
||||
|
|
Loading…
Reference in New Issue