net/mlx4_en: do batched put_page using atomic_sub
This patch fixes couple error paths after allocation failures. Atomic set of page reference counter is safe only if it is zero, otherwise set can race with any speculative get_page_unless_zero. Signed-off-by: Konstantin Khlebnikov <khlebnikov@yandex-team.ru> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
04aeb56a17
commit
851b10d608
|
@ -126,7 +126,9 @@ static int mlx4_en_alloc_frags(struct mlx4_en_priv *priv,
|
|||
dma_unmap_page(priv->ddev, page_alloc[i].dma,
|
||||
page_alloc[i].page_size, PCI_DMA_FROMDEVICE);
|
||||
page = page_alloc[i].page;
|
||||
set_page_count(page, 1);
|
||||
/* Revert changes done by mlx4_alloc_pages */
|
||||
page_ref_sub(page, page_alloc[i].page_size /
|
||||
priv->frag_info[i].frag_stride - 1);
|
||||
put_page(page);
|
||||
}
|
||||
}
|
||||
|
@ -176,7 +178,9 @@ static int mlx4_en_init_allocator(struct mlx4_en_priv *priv,
|
|||
dma_unmap_page(priv->ddev, page_alloc->dma,
|
||||
page_alloc->page_size, PCI_DMA_FROMDEVICE);
|
||||
page = page_alloc->page;
|
||||
set_page_count(page, 1);
|
||||
/* Revert changes done by mlx4_alloc_pages */
|
||||
page_ref_sub(page, page_alloc->page_size /
|
||||
priv->frag_info[i].frag_stride - 1);
|
||||
put_page(page);
|
||||
page_alloc->page = NULL;
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue