mirror of https://gitee.com/openkylin/linux.git
octeontx2-af: Relax resource lock into mutex
Mailbox message handling is done in a workqueue context scheduled from interrupt handler. So resource locks does not need to be a spinlock. Therefore relax them into a mutex so that later on we may use them in routines that might sleep. Signed-off-by: Stanislaw Kardach <skardach@marvell.com> Signed-off-by: Sunil Goutham <sgoutham@marvell.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
34425e8c75
commit
0964fc8f5f
|
@ -153,17 +153,17 @@ int rvu_get_lf(struct rvu *rvu, struct rvu_block *block, u16 pcifunc, u16 slot)
|
|||
u16 match = 0;
|
||||
int lf;
|
||||
|
||||
spin_lock(&rvu->rsrc_lock);
|
||||
mutex_lock(&rvu->rsrc_lock);
|
||||
for (lf = 0; lf < block->lf.max; lf++) {
|
||||
if (block->fn_map[lf] == pcifunc) {
|
||||
if (slot == match) {
|
||||
spin_unlock(&rvu->rsrc_lock);
|
||||
mutex_unlock(&rvu->rsrc_lock);
|
||||
return lf;
|
||||
}
|
||||
match++;
|
||||
}
|
||||
}
|
||||
spin_unlock(&rvu->rsrc_lock);
|
||||
mutex_unlock(&rvu->rsrc_lock);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
|
@ -597,6 +597,8 @@ static void rvu_free_hw_resources(struct rvu *rvu)
|
|||
dma_unmap_resource(rvu->dev, rvu->msix_base_iova,
|
||||
max_msix * PCI_MSIX_ENTRY_SIZE,
|
||||
DMA_BIDIRECTIONAL, 0);
|
||||
|
||||
mutex_destroy(&rvu->rsrc_lock);
|
||||
}
|
||||
|
||||
static int rvu_setup_hw_resources(struct rvu *rvu)
|
||||
|
@ -752,7 +754,7 @@ static int rvu_setup_hw_resources(struct rvu *rvu)
|
|||
if (!rvu->hwvf)
|
||||
return -ENOMEM;
|
||||
|
||||
spin_lock_init(&rvu->rsrc_lock);
|
||||
mutex_init(&rvu->rsrc_lock);
|
||||
|
||||
err = rvu_setup_msix_resources(rvu);
|
||||
if (err)
|
||||
|
@ -926,7 +928,7 @@ static int rvu_detach_rsrcs(struct rvu *rvu, struct rsrc_detach *detach,
|
|||
struct rvu_block *block;
|
||||
int blkid;
|
||||
|
||||
spin_lock(&rvu->rsrc_lock);
|
||||
mutex_lock(&rvu->rsrc_lock);
|
||||
|
||||
/* Check for partial resource detach */
|
||||
if (detach && detach->partial)
|
||||
|
@ -956,7 +958,7 @@ static int rvu_detach_rsrcs(struct rvu *rvu, struct rsrc_detach *detach,
|
|||
rvu_detach_block(rvu, pcifunc, block->type);
|
||||
}
|
||||
|
||||
spin_unlock(&rvu->rsrc_lock);
|
||||
mutex_unlock(&rvu->rsrc_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1119,7 +1121,7 @@ static int rvu_mbox_handler_attach_resources(struct rvu *rvu,
|
|||
if (!attach->modify)
|
||||
rvu_detach_rsrcs(rvu, NULL, pcifunc);
|
||||
|
||||
spin_lock(&rvu->rsrc_lock);
|
||||
mutex_lock(&rvu->rsrc_lock);
|
||||
|
||||
/* Check if the request can be accommodated */
|
||||
err = rvu_check_rsrc_availability(rvu, attach, pcifunc);
|
||||
|
@ -1163,7 +1165,7 @@ static int rvu_mbox_handler_attach_resources(struct rvu *rvu,
|
|||
}
|
||||
|
||||
exit:
|
||||
spin_unlock(&rvu->rsrc_lock);
|
||||
mutex_unlock(&rvu->rsrc_lock);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
|
|
@ -64,7 +64,7 @@ struct nix_mcast {
|
|||
struct qmem *mcast_buf;
|
||||
int replay_pkind;
|
||||
int next_free_mce;
|
||||
spinlock_t mce_lock; /* Serialize MCE updates */
|
||||
struct mutex mce_lock; /* Serialize MCE updates */
|
||||
};
|
||||
|
||||
struct nix_mce_list {
|
||||
|
@ -74,7 +74,7 @@ struct nix_mce_list {
|
|||
};
|
||||
|
||||
struct npc_mcam {
|
||||
spinlock_t lock; /* MCAM entries and counters update lock */
|
||||
struct mutex lock; /* MCAM entries and counters update lock */
|
||||
u8 keysize; /* MCAM keysize 112/224/448 bits */
|
||||
u8 banks; /* Number of MCAM banks */
|
||||
u8 banks_per_entry;/* Number of keywords in key */
|
||||
|
@ -174,7 +174,7 @@ struct rvu {
|
|||
struct rvu_hwinfo *hw;
|
||||
struct rvu_pfvf *pf;
|
||||
struct rvu_pfvf *hwvf;
|
||||
spinlock_t rsrc_lock; /* Serialize resource alloc/free */
|
||||
struct mutex rsrc_lock; /* Serialize resource alloc/free */
|
||||
|
||||
/* Mbox */
|
||||
struct otx2_mbox mbox;
|
||||
|
|
|
@ -109,12 +109,12 @@ static bool is_valid_txschq(struct rvu *rvu, int blkaddr,
|
|||
if (schq >= txsch->schq.max)
|
||||
return false;
|
||||
|
||||
spin_lock(&rvu->rsrc_lock);
|
||||
mutex_lock(&rvu->rsrc_lock);
|
||||
if (txsch->pfvf_map[schq] != pcifunc) {
|
||||
spin_unlock(&rvu->rsrc_lock);
|
||||
mutex_unlock(&rvu->rsrc_lock);
|
||||
return false;
|
||||
}
|
||||
spin_unlock(&rvu->rsrc_lock);
|
||||
mutex_unlock(&rvu->rsrc_lock);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -953,7 +953,7 @@ int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu,
|
|||
if (!nix_hw)
|
||||
return -EINVAL;
|
||||
|
||||
spin_lock(&rvu->rsrc_lock);
|
||||
mutex_lock(&rvu->rsrc_lock);
|
||||
for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
|
||||
txsch = &nix_hw->txsch[lvl];
|
||||
req_schq = req->schq_contig[lvl] + req->schq[lvl];
|
||||
|
@ -1009,7 +1009,7 @@ int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu,
|
|||
err:
|
||||
rc = NIX_AF_ERR_TLX_ALLOC_FAIL;
|
||||
exit:
|
||||
spin_unlock(&rvu->rsrc_lock);
|
||||
mutex_unlock(&rvu->rsrc_lock);
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
@ -1034,7 +1034,7 @@ static int nix_txschq_free(struct rvu *rvu, u16 pcifunc)
|
|||
return NIX_AF_ERR_AF_LF_INVALID;
|
||||
|
||||
/* Disable TL2/3 queue links before SMQ flush*/
|
||||
spin_lock(&rvu->rsrc_lock);
|
||||
mutex_lock(&rvu->rsrc_lock);
|
||||
for (lvl = NIX_TXSCH_LVL_TL4; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
|
||||
if (lvl != NIX_TXSCH_LVL_TL2 && lvl != NIX_TXSCH_LVL_TL4)
|
||||
continue;
|
||||
|
@ -1076,7 +1076,7 @@ static int nix_txschq_free(struct rvu *rvu, u16 pcifunc)
|
|||
txsch->pfvf_map[schq] = 0;
|
||||
}
|
||||
}
|
||||
spin_unlock(&rvu->rsrc_lock);
|
||||
mutex_unlock(&rvu->rsrc_lock);
|
||||
|
||||
/* Sync cached info for this LF in NDC-TX to LLC/DRAM */
|
||||
rvu_write64(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12) | nixlf);
|
||||
|
@ -1308,7 +1308,7 @@ static int nix_update_mce_list(struct nix_mce_list *mce_list,
|
|||
return 0;
|
||||
|
||||
/* Add a new one to the list, at the tail */
|
||||
mce = kzalloc(sizeof(*mce), GFP_ATOMIC);
|
||||
mce = kzalloc(sizeof(*mce), GFP_KERNEL);
|
||||
if (!mce)
|
||||
return -ENOMEM;
|
||||
mce->idx = idx;
|
||||
|
@ -1354,7 +1354,7 @@ static int nix_update_bcast_mce_list(struct rvu *rvu, u16 pcifunc, bool add)
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
spin_lock(&mcast->mce_lock);
|
||||
mutex_lock(&mcast->mce_lock);
|
||||
|
||||
err = nix_update_mce_list(mce_list, pcifunc, idx, add);
|
||||
if (err)
|
||||
|
@ -1384,7 +1384,7 @@ static int nix_update_bcast_mce_list(struct rvu *rvu, u16 pcifunc, bool add)
|
|||
}
|
||||
|
||||
end:
|
||||
spin_unlock(&mcast->mce_lock);
|
||||
mutex_unlock(&mcast->mce_lock);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -1469,7 +1469,7 @@ static int nix_setup_mcast(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
|
|||
BIT_ULL(63) | (mcast->replay_pkind << 24) |
|
||||
BIT_ULL(20) | MC_BUF_CNT);
|
||||
|
||||
spin_lock_init(&mcast->mce_lock);
|
||||
mutex_init(&mcast->mce_lock);
|
||||
|
||||
return nix_setup_bcast_tables(rvu, nix_hw);
|
||||
}
|
||||
|
@ -1869,7 +1869,7 @@ int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req,
|
|||
|
||||
/* Update min/maxlen in each of the SMQ attached to this PF/VF */
|
||||
txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
|
||||
spin_lock(&rvu->rsrc_lock);
|
||||
mutex_lock(&rvu->rsrc_lock);
|
||||
for (schq = 0; schq < txsch->schq.max; schq++) {
|
||||
if (txsch->pfvf_map[schq] != pcifunc)
|
||||
continue;
|
||||
|
@ -1879,7 +1879,7 @@ int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req,
|
|||
cfg = (cfg & ~0x7FULL) | ((u64)req->minlen & 0x7F);
|
||||
rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq), cfg);
|
||||
}
|
||||
spin_unlock(&rvu->rsrc_lock);
|
||||
mutex_unlock(&rvu->rsrc_lock);
|
||||
|
||||
rx_frscfg:
|
||||
/* Check if config is for SDP link */
|
||||
|
@ -2162,5 +2162,6 @@ void rvu_nix_freemem(struct rvu *rvu)
|
|||
mcast = &nix_hw->mcast;
|
||||
qmem_free(rvu->dev, mcast->mce_ctx);
|
||||
qmem_free(rvu->dev, mcast->mcast_buf);
|
||||
mutex_destroy(&mcast->mce_lock);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -732,7 +732,7 @@ static int npc_mcam_rsrcs_init(struct rvu *rvu, int blkaddr)
|
|||
mcam->nixlf_offset = mcam->entries;
|
||||
mcam->pf_offset = mcam->nixlf_offset + nixlf_count;
|
||||
|
||||
spin_lock_init(&mcam->lock);
|
||||
mutex_init(&mcam->lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -811,6 +811,8 @@ int rvu_npc_init(struct rvu *rvu)
|
|||
void rvu_npc_freemem(struct rvu *rvu)
|
||||
{
|
||||
struct npc_pkind *pkind = &rvu->hw->pkind;
|
||||
struct npc_mcam *mcam = &rvu->hw->mcam;
|
||||
|
||||
kfree(pkind->rsrc.bmap);
|
||||
mutex_destroy(&mcam->lock);
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue