mirror of https://gitee.com/openkylin/linux.git
Merge branch 'dma_rmb_wmb'
Alexander Duyck says: ==================== Replace wmb()/rmb() with dma_wmb()/dma_rmb() where appropriate, round 2 More cleanup of drivers in order to start making use of dma_rmb and dma_wmb calls. This is another pass of what I would consider to be low hanging fruit. There may be other opportunities to make use of the barriers in the Mellanox and Chelsio drivers but I didn't want to risk meddling with code I was not completely familiar with so I am leaving that for future work. I have revisited the Mellanox driver changes. This time around I went only for the sections with a clearly defined pattern. For dma_wmb I used it between accesses of the descriptor bits followed by owner or size. For dma_rmb I used it to replace rmb following a read of the ownership bit in the descriptor. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
6fb8c381a6
|
@ -422,7 +422,7 @@ static inline int add_one_rx_buf(void *va, unsigned int len,
|
|||
|
||||
d->addr_lo = cpu_to_be32(mapping);
|
||||
d->addr_hi = cpu_to_be32((u64) mapping >> 32);
|
||||
wmb();
|
||||
dma_wmb();
|
||||
d->len_gen = cpu_to_be32(V_FLD_GEN1(gen));
|
||||
d->gen2 = cpu_to_be32(V_FLD_GEN2(gen));
|
||||
return 0;
|
||||
|
@ -433,7 +433,7 @@ static inline int add_one_rx_chunk(dma_addr_t mapping, struct rx_desc *d,
|
|||
{
|
||||
d->addr_lo = cpu_to_be32(mapping);
|
||||
d->addr_hi = cpu_to_be32((u64) mapping >> 32);
|
||||
wmb();
|
||||
dma_wmb();
|
||||
d->len_gen = cpu_to_be32(V_FLD_GEN1(gen));
|
||||
d->gen2 = cpu_to_be32(V_FLD_GEN2(gen));
|
||||
return 0;
|
||||
|
@ -579,7 +579,7 @@ static void recycle_rx_buf(struct adapter *adap, struct sge_fl *q,
|
|||
q->sdesc[q->pidx] = q->sdesc[idx];
|
||||
to->addr_lo = from->addr_lo; /* already big endian */
|
||||
to->addr_hi = from->addr_hi; /* likewise */
|
||||
wmb();
|
||||
dma_wmb();
|
||||
to->len_gen = cpu_to_be32(V_FLD_GEN1(q->gen));
|
||||
to->gen2 = cpu_to_be32(V_FLD_GEN2(q->gen));
|
||||
|
||||
|
@ -1068,7 +1068,7 @@ static void write_wr_hdr_sgl(unsigned int ndesc, struct sk_buff *skb,
|
|||
sd->eop = 1;
|
||||
wrp->wr_hi = htonl(F_WR_SOP | F_WR_EOP | V_WR_DATATYPE(1) |
|
||||
V_WR_SGLSFLT(flits)) | wr_hi;
|
||||
wmb();
|
||||
dma_wmb();
|
||||
wrp->wr_lo = htonl(V_WR_LEN(flits + sgl_flits) |
|
||||
V_WR_GEN(gen)) | wr_lo;
|
||||
wr_gen2(d, gen);
|
||||
|
@ -1114,7 +1114,7 @@ static void write_wr_hdr_sgl(unsigned int ndesc, struct sk_buff *skb,
|
|||
}
|
||||
sd->eop = 1;
|
||||
wrp->wr_hi |= htonl(F_WR_EOP);
|
||||
wmb();
|
||||
dma_wmb();
|
||||
wp->wr_lo = htonl(V_WR_LEN(WR_FLITS) | V_WR_GEN(ogen)) | wr_lo;
|
||||
wr_gen2((struct tx_desc *)wp, ogen);
|
||||
WARN_ON(ndesc != 0);
|
||||
|
@ -1184,7 +1184,7 @@ static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb,
|
|||
cpl->wr.wr_hi = htonl(V_WR_BCNTLFLT(skb->len & 7) |
|
||||
V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT)
|
||||
| F_WR_SOP | F_WR_EOP | compl);
|
||||
wmb();
|
||||
dma_wmb();
|
||||
cpl->wr.wr_lo = htonl(V_WR_LEN(flits) | V_WR_GEN(gen) |
|
||||
V_WR_TID(q->token));
|
||||
wr_gen2(d, gen);
|
||||
|
@ -1342,7 +1342,7 @@ static inline void write_imm(struct tx_desc *d, struct sk_buff *skb,
|
|||
|
||||
to->wr_hi = from->wr_hi | htonl(F_WR_SOP | F_WR_EOP |
|
||||
V_WR_BCNTLFLT(len & 7));
|
||||
wmb();
|
||||
dma_wmb();
|
||||
to->wr_lo = from->wr_lo | htonl(V_WR_GEN(gen) |
|
||||
V_WR_LEN((len + 7) / 8));
|
||||
wr_gen2(d, gen);
|
||||
|
@ -2271,7 +2271,7 @@ static int process_responses(struct adapter *adap, struct sge_qset *qs,
|
|||
u32 len, flags;
|
||||
__be32 rss_hi, rss_lo;
|
||||
|
||||
rmb();
|
||||
dma_rmb();
|
||||
eth = r->rss_hdr.opcode == CPL_RX_PKT;
|
||||
rss_hi = *(const __be32 *)r;
|
||||
rss_lo = r->rss_hdr.rss_hash_val;
|
||||
|
@ -2488,7 +2488,7 @@ static int process_pure_responses(struct adapter *adap, struct sge_qset *qs,
|
|||
}
|
||||
if (!is_new_response(r, q))
|
||||
break;
|
||||
rmb();
|
||||
dma_rmb();
|
||||
} while (is_pure_response(r));
|
||||
|
||||
if (sleeping)
|
||||
|
@ -2523,7 +2523,7 @@ static inline int handle_responses(struct adapter *adap, struct sge_rspq *q)
|
|||
|
||||
if (!is_new_response(r, q))
|
||||
return -1;
|
||||
rmb();
|
||||
dma_rmb();
|
||||
if (is_pure_response(r) && process_pure_responses(adap, qs, r) == 0) {
|
||||
t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
|
||||
V_NEWTIMER(q->holdoff_tmr) | V_NEWINDEX(q->cidx));
|
||||
|
|
|
@ -1968,7 +1968,7 @@ static int process_responses(struct sge_rspq *q, int budget)
|
|||
if (!is_new_response(rc, q))
|
||||
break;
|
||||
|
||||
rmb();
|
||||
dma_rmb();
|
||||
rsp_type = RSPD_TYPE(rc->type_gen);
|
||||
if (likely(rsp_type == RSP_TYPE_FLBUF)) {
|
||||
struct page_frag *fp;
|
||||
|
@ -2160,7 +2160,7 @@ static unsigned int process_intrq(struct adapter *adap)
|
|||
if (!is_new_response(rc, q))
|
||||
break;
|
||||
|
||||
rmb();
|
||||
dma_rmb();
|
||||
if (RSPD_TYPE(rc->type_gen) == RSP_TYPE_INTR) {
|
||||
unsigned int qid = ntohl(rc->pldbuflen_qid);
|
||||
|
||||
|
|
|
@ -1751,7 +1751,7 @@ static int process_responses(struct sge_rspq *rspq, int budget)
|
|||
* Figure out what kind of response we've received from the
|
||||
* SGE.
|
||||
*/
|
||||
rmb();
|
||||
dma_rmb();
|
||||
rsp_type = RSPD_TYPE(rc->type_gen);
|
||||
if (likely(rsp_type == RSP_TYPE_FLBUF)) {
|
||||
struct page_frag *fp;
|
||||
|
@ -1935,7 +1935,7 @@ static unsigned int process_intrq(struct adapter *adapter)
|
|||
* error and go on to the next response message. This should
|
||||
* never happen ...
|
||||
*/
|
||||
rmb();
|
||||
dma_rmb();
|
||||
if (unlikely(RSPD_TYPE(rc->type_gen) != RSP_TYPE_INTR)) {
|
||||
dev_err(adapter->pdev_dev,
|
||||
"Unexpected INTRQ response type %d\n",
|
||||
|
|
|
@ -899,7 +899,7 @@ static int e100_exec_cb(struct nic *nic, struct sk_buff *skb,
|
|||
/* Order is important otherwise we'll be in a race with h/w:
|
||||
* set S-bit in current first, then clear S-bit in previous. */
|
||||
cb->command |= cpu_to_le16(cb_s);
|
||||
wmb();
|
||||
dma_wmb();
|
||||
cb->prev->command &= cpu_to_le16(~cb_s);
|
||||
|
||||
while (nic->cb_to_send != nic->cb_to_use) {
|
||||
|
@ -1843,7 +1843,7 @@ static int e100_tx_clean(struct nic *nic)
|
|||
for (cb = nic->cb_to_clean;
|
||||
cb->status & cpu_to_le16(cb_complete);
|
||||
cb = nic->cb_to_clean = cb->next) {
|
||||
rmb(); /* read skb after status */
|
||||
dma_rmb(); /* read skb after status */
|
||||
netif_printk(nic, tx_done, KERN_DEBUG, nic->netdev,
|
||||
"cb[%d]->status = 0x%04X\n",
|
||||
(int)(((void*)cb - (void*)nic->cbs)/sizeof(struct cb)),
|
||||
|
@ -1993,7 +1993,7 @@ static int e100_rx_indicate(struct nic *nic, struct rx *rx,
|
|||
|
||||
netif_printk(nic, rx_status, KERN_DEBUG, nic->netdev,
|
||||
"status=0x%04X\n", rfd_status);
|
||||
rmb(); /* read size after status bit */
|
||||
dma_rmb(); /* read size after status bit */
|
||||
|
||||
/* If data isn't ready, nothing to indicate */
|
||||
if (unlikely(!(rfd_status & cb_complete))) {
|
||||
|
|
|
@ -1554,7 +1554,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
|
|||
* any other fields out of the rx_desc until we know the
|
||||
* DD bit is set.
|
||||
*/
|
||||
rmb();
|
||||
dma_rmb();
|
||||
if (i40e_rx_is_programming_status(qword)) {
|
||||
i40e_clean_programming_status(rx_ring, rx_desc);
|
||||
I40E_RX_INCREMENT(rx_ring, i);
|
||||
|
@ -1745,7 +1745,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
|
|||
* any other fields out of the rx_desc until we know the
|
||||
* DD bit is set.
|
||||
*/
|
||||
rmb();
|
||||
dma_rmb();
|
||||
|
||||
if (i40e_rx_is_programming_status(qword)) {
|
||||
i40e_clean_programming_status(rx_ring, rx_desc);
|
||||
|
|
|
@ -1034,7 +1034,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
|
|||
* any other fields out of the rx_desc until we know the
|
||||
* DD bit is set.
|
||||
*/
|
||||
rmb();
|
||||
dma_rmb();
|
||||
rx_bi = &rx_ring->rx_bi[i];
|
||||
skb = rx_bi->skb;
|
||||
if (likely(!skb)) {
|
||||
|
@ -1213,7 +1213,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
|
|||
* any other fields out of the rx_desc until we know the
|
||||
* DD bit is set.
|
||||
*/
|
||||
rmb();
|
||||
dma_rmb();
|
||||
|
||||
rx_bi = &rx_ring->rx_bi[i];
|
||||
skb = rx_bi->skb;
|
||||
|
|
|
@ -771,7 +771,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
|
|||
/*
|
||||
* make sure we read the CQE after we read the ownership bit
|
||||
*/
|
||||
rmb();
|
||||
dma_rmb();
|
||||
|
||||
/* Drop packet on bad receive or bad checksum */
|
||||
if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
|
||||
|
|
|
@ -416,7 +416,7 @@ static bool mlx4_en_process_tx_cq(struct net_device *dev,
|
|||
* make sure we read the CQE after we read the
|
||||
* ownership bit
|
||||
*/
|
||||
rmb();
|
||||
dma_rmb();
|
||||
|
||||
if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
|
||||
MLX4_CQE_OPCODE_ERROR)) {
|
||||
|
@ -667,7 +667,7 @@ static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc,
|
|||
skb_frag_size(&shinfo->frags[0]));
|
||||
}
|
||||
|
||||
wmb();
|
||||
dma_wmb();
|
||||
inl->byte_count = cpu_to_be32(1 << 31 | (skb->len - spc));
|
||||
}
|
||||
}
|
||||
|
@ -804,7 +804,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
|
||||
data->addr = cpu_to_be64(dma);
|
||||
data->lkey = ring->mr_key;
|
||||
wmb();
|
||||
dma_wmb();
|
||||
data->byte_count = cpu_to_be32(byte_count);
|
||||
--data;
|
||||
}
|
||||
|
@ -821,7 +821,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
|
||||
data->addr = cpu_to_be64(dma);
|
||||
data->lkey = ring->mr_key;
|
||||
wmb();
|
||||
dma_wmb();
|
||||
data->byte_count = cpu_to_be32(byte_count);
|
||||
}
|
||||
/* tx completion can avoid cache line miss for common cases */
|
||||
|
@ -938,7 +938,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
/* Ensure new descriptor hits memory
|
||||
* before setting ownership of this descriptor to HW
|
||||
*/
|
||||
wmb();
|
||||
dma_wmb();
|
||||
tx_desc->ctrl.owner_opcode = op_own;
|
||||
|
||||
wmb();
|
||||
|
@ -958,7 +958,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
/* Ensure new descriptor hits memory
|
||||
* before setting ownership of this descriptor to HW
|
||||
*/
|
||||
wmb();
|
||||
dma_wmb();
|
||||
tx_desc->ctrl.owner_opcode = op_own;
|
||||
if (send_doorbell) {
|
||||
wmb();
|
||||
|
|
|
@ -188,7 +188,7 @@ static void slave_event(struct mlx4_dev *dev, u8 slave, struct mlx4_eqe *eqe)
|
|||
memcpy(s_eqe, eqe, dev->caps.eqe_size - 1);
|
||||
s_eqe->slave_id = slave;
|
||||
/* ensure all information is written before setting the ownersip bit */
|
||||
wmb();
|
||||
dma_wmb();
|
||||
s_eqe->owner = !!(slave_eq->prod & SLAVE_EVENT_EQ_SIZE) ? 0x0 : 0x80;
|
||||
++slave_eq->prod;
|
||||
|
||||
|
@ -473,7 +473,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
|
|||
* Make sure we read EQ entry contents after we've
|
||||
* checked the ownership bit.
|
||||
*/
|
||||
rmb();
|
||||
dma_rmb();
|
||||
|
||||
switch (eqe->type) {
|
||||
case MLX4_EVENT_TYPE_COMP:
|
||||
|
|
|
@ -208,7 +208,7 @@ static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
|
|||
* Make sure we read EQ entry contents after we've
|
||||
* checked the ownership bit.
|
||||
*/
|
||||
rmb();
|
||||
dma_rmb();
|
||||
|
||||
mlx5_core_dbg(eq->dev, "eqn %d, eqe type %s\n",
|
||||
eq->eqn, eqe_type_str(eqe->type));
|
||||
|
|
Loading…
Reference in New Issue