crypto: octeontx2 - add support for OcteonTX2 98xx CPT block.

OcteonTX2 series of silicons have multiple variants, the
98xx variant has two crypto (CPT0 & CPT1) blocks. This patch
adds support for firmware load on new CPT block(CPT1).

Signed-off-by: Srujana Challa <schalla@marvell.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
This commit is contained in:
Srujana Challa 2021-04-15 17:58:37 +05:30 committed by Herbert Xu
parent 66810912fd
commit b2d17df35e
7 changed files with 153 additions and 58 deletions

View File

@ -121,14 +121,14 @@ int otx2_cpt_send_mbox_msg(struct otx2_mbox *mbox, struct pci_dev *pdev);
int otx2_cpt_send_af_reg_requests(struct otx2_mbox *mbox,
struct pci_dev *pdev);
int otx2_cpt_add_read_af_reg(struct otx2_mbox *mbox,
struct pci_dev *pdev, u64 reg, u64 *val);
int otx2_cpt_add_read_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
u64 reg, u64 *val, int blkaddr);
int otx2_cpt_add_write_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
u64 reg, u64 val);
u64 reg, u64 val, int blkaddr);
int otx2_cpt_read_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
u64 reg, u64 *val);
u64 reg, u64 *val, int blkaddr);
int otx2_cpt_write_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
u64 reg, u64 val);
u64 reg, u64 val, int blkaddr);
struct otx2_cptlfs_info;
int otx2_cpt_attach_rscrs_msg(struct otx2_cptlfs_info *lfs);
int otx2_cpt_detach_rsrcs_msg(struct otx2_cptlfs_info *lfs);

View File

@ -43,7 +43,7 @@ int otx2_cpt_send_af_reg_requests(struct otx2_mbox *mbox, struct pci_dev *pdev)
}
int otx2_cpt_add_read_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
u64 reg, u64 *val)
u64 reg, u64 *val, int blkaddr)
{
struct cpt_rd_wr_reg_msg *reg_msg;
@ -62,12 +62,13 @@ int otx2_cpt_add_read_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
reg_msg->is_write = 0;
reg_msg->reg_offset = reg;
reg_msg->ret_val = val;
reg_msg->blkaddr = blkaddr;
return 0;
}
int otx2_cpt_add_write_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
u64 reg, u64 val)
u64 reg, u64 val, int blkaddr)
{
struct cpt_rd_wr_reg_msg *reg_msg;
@ -86,16 +87,17 @@ int otx2_cpt_add_write_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
reg_msg->is_write = 1;
reg_msg->reg_offset = reg;
reg_msg->val = val;
reg_msg->blkaddr = blkaddr;
return 0;
}
int otx2_cpt_read_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
u64 reg, u64 *val)
u64 reg, u64 *val, int blkaddr)
{
int ret;
ret = otx2_cpt_add_read_af_reg(mbox, pdev, reg, val);
ret = otx2_cpt_add_read_af_reg(mbox, pdev, reg, val, blkaddr);
if (ret)
return ret;
@ -103,11 +105,11 @@ int otx2_cpt_read_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
}
int otx2_cpt_write_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
u64 reg, u64 val)
u64 reg, u64 val, int blkaddr)
{
int ret;
ret = otx2_cpt_add_write_af_reg(mbox, pdev, reg, val);
ret = otx2_cpt_add_write_af_reg(mbox, pdev, reg, val, blkaddr);
if (ret)
return ret;

View File

@ -56,7 +56,7 @@ static int cptlf_set_pri(struct otx2_cptlf_info *lf, int pri)
ret = otx2_cpt_read_af_reg(lfs->mbox, lfs->pdev,
CPT_AF_LFX_CTL(lf->slot),
&lf_ctrl.u);
&lf_ctrl.u, lfs->blkaddr);
if (ret)
return ret;
@ -64,7 +64,7 @@ static int cptlf_set_pri(struct otx2_cptlf_info *lf, int pri)
ret = otx2_cpt_write_af_reg(lfs->mbox, lfs->pdev,
CPT_AF_LFX_CTL(lf->slot),
lf_ctrl.u);
lf_ctrl.u, lfs->blkaddr);
return ret;
}
@ -77,7 +77,7 @@ static int cptlf_set_eng_grps_mask(struct otx2_cptlf_info *lf,
ret = otx2_cpt_read_af_reg(lfs->mbox, lfs->pdev,
CPT_AF_LFX_CTL(lf->slot),
&lf_ctrl.u);
&lf_ctrl.u, lfs->blkaddr);
if (ret)
return ret;
@ -85,7 +85,7 @@ static int cptlf_set_eng_grps_mask(struct otx2_cptlf_info *lf,
ret = otx2_cpt_write_af_reg(lfs->mbox, lfs->pdev,
CPT_AF_LFX_CTL(lf->slot),
lf_ctrl.u);
lf_ctrl.u, lfs->blkaddr);
return ret;
}

View File

@ -95,6 +95,7 @@ struct otx2_cptlfs_info {
u8 kcrypto_eng_grp_num; /* Kernel crypto engine group number */
u8 kvf_limits; /* Kernel crypto limits */
atomic_t state; /* LF's state. started/reset */
int blkaddr; /* CPT blkaddr: BLKADDR_CPT0/BLKADDR_CPT1 */
};
static inline void otx2_cpt_free_instruction_queues(

View File

@ -51,6 +51,7 @@ struct otx2_cptpf_dev {
u8 max_vfs; /* Maximum number of VFs supported by CPT */
u8 enabled_vfs; /* Number of enabled VFs */
u8 kvf_limits; /* Kernel crypto limits */
bool has_cpt1;
};
irqreturn_t otx2_cptpf_afpf_mbox_intr(int irq, void *arg);

View File

@ -451,19 +451,19 @@ static int cpt_is_pf_usable(struct otx2_cptpf_dev *cptpf)
return 0;
}
static int cptpf_device_reset(struct otx2_cptpf_dev *cptpf)
static int cptx_device_reset(struct otx2_cptpf_dev *cptpf, int blkaddr)
{
int timeout = 10, ret;
u64 reg = 0;
ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
CPT_AF_BLK_RST, 0x1);
CPT_AF_BLK_RST, 0x1, blkaddr);
if (ret)
return ret;
do {
ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
CPT_AF_BLK_RST, &reg);
CPT_AF_BLK_RST, &reg, blkaddr);
if (ret)
return ret;
@ -478,11 +478,35 @@ static int cptpf_device_reset(struct otx2_cptpf_dev *cptpf)
return ret;
}
static int cptpf_device_reset(struct otx2_cptpf_dev *cptpf)
{
int ret = 0;
if (cptpf->has_cpt1) {
ret = cptx_device_reset(cptpf, BLKADDR_CPT1);
if (ret)
return ret;
}
return cptx_device_reset(cptpf, BLKADDR_CPT0);
}
static void cptpf_check_block_implemented(struct otx2_cptpf_dev *cptpf)
{
u64 cfg;
cfg = otx2_cpt_read64(cptpf->reg_base, BLKADDR_RVUM, 0,
RVU_PF_BLOCK_ADDRX_DISC(BLKADDR_CPT1));
if (cfg & BIT_ULL(11))
cptpf->has_cpt1 = true;
}
static int cptpf_device_init(struct otx2_cptpf_dev *cptpf)
{
union otx2_cptx_af_constants1 af_cnsts1 = {0};
int ret = 0;
/* check if 'implemented' bit is set for block BLKADDR_CPT1 */
cptpf_check_block_implemented(cptpf);
/* Reset the CPT PF device */
ret = cptpf_device_reset(cptpf);
if (ret)
@ -490,7 +514,8 @@ static int cptpf_device_init(struct otx2_cptpf_dev *cptpf)
/* Get number of SE, IE and AE engines */
ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
CPT_AF_CONSTANTS1, &af_cnsts1.u);
CPT_AF_CONSTANTS1, &af_cnsts1.u,
BLKADDR_CPT0);
if (ret)
return ret;

View File

@ -153,16 +153,16 @@ static int get_ucode_type(struct device *dev,
}
static int __write_ucode_base(struct otx2_cptpf_dev *cptpf, int eng,
dma_addr_t dma_addr)
dma_addr_t dma_addr, int blkaddr)
{
return otx2_cpt_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
CPT_AF_EXEX_UCODE_BASE(eng),
(u64)dma_addr);
(u64)dma_addr, blkaddr);
}
static int cpt_set_ucode_base(struct otx2_cpt_eng_grp_info *eng_grp, void *obj)
static int cptx_set_ucode_base(struct otx2_cpt_eng_grp_info *eng_grp,
struct otx2_cptpf_dev *cptpf, int blkaddr)
{
struct otx2_cptpf_dev *cptpf = obj;
struct otx2_cpt_engs_rsvd *engs;
dma_addr_t dma_addr;
int i, bit, ret;
@ -170,7 +170,7 @@ static int cpt_set_ucode_base(struct otx2_cpt_eng_grp_info *eng_grp, void *obj)
/* Set PF number for microcode fetches */
ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
CPT_AF_PF_FUNC,
cptpf->pf_id << RVU_PFVF_PF_SHIFT);
cptpf->pf_id << RVU_PFVF_PF_SHIFT, blkaddr);
if (ret)
return ret;
@ -187,7 +187,8 @@ static int cpt_set_ucode_base(struct otx2_cpt_eng_grp_info *eng_grp, void *obj)
*/
for_each_set_bit(bit, engs->bmap, eng_grp->g->engs_num)
if (!eng_grp->g->eng_ref_cnt[bit]) {
ret = __write_ucode_base(cptpf, bit, dma_addr);
ret = __write_ucode_base(cptpf, bit, dma_addr,
blkaddr);
if (ret)
return ret;
}
@ -195,23 +196,32 @@ static int cpt_set_ucode_base(struct otx2_cpt_eng_grp_info *eng_grp, void *obj)
return 0;
}
static int cpt_detach_and_disable_cores(struct otx2_cpt_eng_grp_info *eng_grp,
void *obj)
static int cpt_set_ucode_base(struct otx2_cpt_eng_grp_info *eng_grp, void *obj)
{
struct otx2_cptpf_dev *cptpf = obj;
struct otx2_cpt_bitmap bmap;
int ret;
if (cptpf->has_cpt1) {
ret = cptx_set_ucode_base(eng_grp, cptpf, BLKADDR_CPT1);
if (ret)
return ret;
}
return cptx_set_ucode_base(eng_grp, cptpf, BLKADDR_CPT0);
}
static int cptx_detach_and_disable_cores(struct otx2_cpt_eng_grp_info *eng_grp,
struct otx2_cptpf_dev *cptpf,
struct otx2_cpt_bitmap bmap,
int blkaddr)
{
int i, timeout = 10;
int busy, ret;
u64 reg = 0;
bmap = get_cores_bmap(&cptpf->pdev->dev, eng_grp);
if (!bmap.size)
return -EINVAL;
/* Detach the cores from group */
for_each_set_bit(i, bmap.bits, bmap.size) {
ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
CPT_AF_EXEX_CTL2(i), &reg);
CPT_AF_EXEX_CTL2(i), &reg, blkaddr);
if (ret)
return ret;
@ -221,7 +231,8 @@ static int cpt_detach_and_disable_cores(struct otx2_cpt_eng_grp_info *eng_grp,
ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox,
cptpf->pdev,
CPT_AF_EXEX_CTL2(i), reg);
CPT_AF_EXEX_CTL2(i), reg,
blkaddr);
if (ret)
return ret;
}
@ -237,7 +248,8 @@ static int cpt_detach_and_disable_cores(struct otx2_cpt_eng_grp_info *eng_grp,
for_each_set_bit(i, bmap.bits, bmap.size) {
ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox,
cptpf->pdev,
CPT_AF_EXEX_STS(i), &reg);
CPT_AF_EXEX_STS(i), &reg,
blkaddr);
if (ret)
return ret;
@ -253,7 +265,8 @@ static int cpt_detach_and_disable_cores(struct otx2_cpt_eng_grp_info *eng_grp,
if (!eng_grp->g->eng_ref_cnt[i]) {
ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox,
cptpf->pdev,
CPT_AF_EXEX_CTL(i), 0x0);
CPT_AF_EXEX_CTL(i), 0x0,
blkaddr);
if (ret)
return ret;
}
@ -262,22 +275,39 @@ static int cpt_detach_and_disable_cores(struct otx2_cpt_eng_grp_info *eng_grp,
return 0;
}
static int cpt_attach_and_enable_cores(struct otx2_cpt_eng_grp_info *eng_grp,
void *obj)
static int cpt_detach_and_disable_cores(struct otx2_cpt_eng_grp_info *eng_grp,
void *obj)
{
struct otx2_cptpf_dev *cptpf = obj;
struct otx2_cpt_bitmap bmap;
u64 reg = 0;
int i, ret;
int ret;
bmap = get_cores_bmap(&cptpf->pdev->dev, eng_grp);
if (!bmap.size)
return -EINVAL;
if (cptpf->has_cpt1) {
ret = cptx_detach_and_disable_cores(eng_grp, cptpf, bmap,
BLKADDR_CPT1);
if (ret)
return ret;
}
return cptx_detach_and_disable_cores(eng_grp, cptpf, bmap,
BLKADDR_CPT0);
}
static int cptx_attach_and_enable_cores(struct otx2_cpt_eng_grp_info *eng_grp,
struct otx2_cptpf_dev *cptpf,
struct otx2_cpt_bitmap bmap,
int blkaddr)
{
u64 reg = 0;
int i, ret;
/* Attach the cores to the group */
for_each_set_bit(i, bmap.bits, bmap.size) {
ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
CPT_AF_EXEX_CTL2(i), &reg);
CPT_AF_EXEX_CTL2(i), &reg, blkaddr);
if (ret)
return ret;
@ -287,7 +317,8 @@ static int cpt_attach_and_enable_cores(struct otx2_cpt_eng_grp_info *eng_grp,
ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox,
cptpf->pdev,
CPT_AF_EXEX_CTL2(i), reg);
CPT_AF_EXEX_CTL2(i), reg,
blkaddr);
if (ret)
return ret;
}
@ -295,15 +326,33 @@ static int cpt_attach_and_enable_cores(struct otx2_cpt_eng_grp_info *eng_grp,
/* Enable the cores */
for_each_set_bit(i, bmap.bits, bmap.size) {
ret = otx2_cpt_add_write_af_reg(&cptpf->afpf_mbox,
cptpf->pdev,
CPT_AF_EXEX_CTL(i), 0x1);
ret = otx2_cpt_add_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
CPT_AF_EXEX_CTL(i), 0x1,
blkaddr);
if (ret)
return ret;
}
ret = otx2_cpt_send_af_reg_requests(&cptpf->afpf_mbox, cptpf->pdev);
return otx2_cpt_send_af_reg_requests(&cptpf->afpf_mbox, cptpf->pdev);
}
return ret;
static int cpt_attach_and_enable_cores(struct otx2_cpt_eng_grp_info *eng_grp,
void *obj)
{
struct otx2_cptpf_dev *cptpf = obj;
struct otx2_cpt_bitmap bmap;
int ret;
bmap = get_cores_bmap(&cptpf->pdev->dev, eng_grp);
if (!bmap.size)
return -EINVAL;
if (cptpf->has_cpt1) {
ret = cptx_attach_and_enable_cores(eng_grp, cptpf, bmap,
BLKADDR_CPT1);
if (ret)
return ret;
}
return cptx_attach_and_enable_cores(eng_grp, cptpf, bmap, BLKADDR_CPT0);
}
static int load_fw(struct device *dev, struct fw_info_t *fw_info,
@ -1140,20 +1189,18 @@ int otx2_cpt_create_eng_grps(struct pci_dev *pdev,
return ret;
}
int otx2_cpt_disable_all_cores(struct otx2_cptpf_dev *cptpf)
static int cptx_disable_all_cores(struct otx2_cptpf_dev *cptpf, int total_cores,
int blkaddr)
{
int i, ret, busy, total_cores;
int timeout = 10;
u64 reg = 0;
total_cores = cptpf->eng_grps.avail.max_se_cnt +
cptpf->eng_grps.avail.max_ie_cnt +
cptpf->eng_grps.avail.max_ae_cnt;
int timeout = 10, ret;
int i, busy;
u64 reg;
/* Disengage the cores from groups */
for (i = 0; i < total_cores; i++) {
ret = otx2_cpt_add_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
CPT_AF_EXEX_CTL2(i), 0x0);
CPT_AF_EXEX_CTL2(i), 0x0,
blkaddr);
if (ret)
return ret;
@ -1173,7 +1220,8 @@ int otx2_cpt_disable_all_cores(struct otx2_cptpf_dev *cptpf)
for (i = 0; i < total_cores; i++) {
ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox,
cptpf->pdev,
CPT_AF_EXEX_STS(i), &reg);
CPT_AF_EXEX_STS(i), &reg,
blkaddr);
if (ret)
return ret;
@ -1187,13 +1235,30 @@ int otx2_cpt_disable_all_cores(struct otx2_cptpf_dev *cptpf)
/* Disable the cores */
for (i = 0; i < total_cores; i++) {
ret = otx2_cpt_add_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
CPT_AF_EXEX_CTL(i), 0x0);
CPT_AF_EXEX_CTL(i), 0x0,
blkaddr);
if (ret)
return ret;
}
return otx2_cpt_send_af_reg_requests(&cptpf->afpf_mbox, cptpf->pdev);
}
int otx2_cpt_disable_all_cores(struct otx2_cptpf_dev *cptpf)
{
int total_cores, ret;
total_cores = cptpf->eng_grps.avail.max_se_cnt +
cptpf->eng_grps.avail.max_ie_cnt +
cptpf->eng_grps.avail.max_ae_cnt;
if (cptpf->has_cpt1) {
ret = cptx_disable_all_cores(cptpf, total_cores, BLKADDR_CPT1);
if (ret)
return ret;
}
return cptx_disable_all_cores(cptpf, total_cores, BLKADDR_CPT0);
}
void otx2_cpt_cleanup_eng_grps(struct pci_dev *pdev,
struct otx2_cpt_eng_grps *eng_grps)
{
@ -1354,6 +1419,7 @@ int otx2_cpt_discover_eng_capabilities(struct otx2_cptpf_dev *cptpf)
lfs->pdev = pdev;
lfs->reg_base = cptpf->reg_base;
lfs->mbox = &cptpf->afpf_mbox;
lfs->blkaddr = BLKADDR_CPT0;
ret = otx2_cptlf_init(&cptpf->lfs, OTX2_CPT_ALL_ENG_GRPS_MASK,
OTX2_CPT_QUEUE_HI_PRIO, 1);
if (ret)