mirror of https://gitee.com/openkylin/linux.git
Merge candidates for 4.17-rc
- bnxt netdev changes merged this cycle caused the bnxt RDMA driver to crash under certain situations - Arnd found (several, unfortunately) kconfig problems with the patches adding INFINIBAND_ADDR_TRANS. Reverting this last part, will fix it more fully outside -rc. - Subtle change in error code for a uapi function caused breakage in userspace. This was bug was subtly introduced cycle -----BEGIN PGP SIGNATURE----- Version: GnuPG v2 iQIcBAABCgAGBQJbEXg1AAoJEDht9xV+IJsax2YQAJjXnfQ9+QsbtT8sqFzGFLo3 r5aqqwF6RkGDFsovVDRH/9S62JjeqJRQA+4ykooajD+6mXU06Sf3p4tcoco0Dqhn 66b/lkdPFzXSytlne7AnUnA3xkKG4u5jYGReSryIQjXu29iwt8scgiiqt8nX9Gzi eC9U2UQn5ZF65yRo4V/UGuHjdnUXiPYfg2Ff5YqLUxdL0XE42ftpuiR3Xuzgfj8c /rdqDwnvdViQwPeTSNTJoZzeV+49WKp9BP+lzsCeIXvzuzY1aOd94z0i7fq342hB jXpz6PtRTSBkZ4xBuvtopnoz0HQXMv7kQFMobkyjaP3qXcKz6Dx9d3QvWkLq8qdQ D4MPYjVCONJAJvXppxuNSzyz0lg5EaSICWeZhkr5P68Ja0fptiXAumVCTr/kEifV Yz8y0xsEVMIxBy3C9HOCe4khzWKqd9Uoo4/VDM+GdhKHIpUCnnKTte9vIZcYg1JL 1doCithudNX7KF4K79JJqADwtFhXoPaHt3XF9YRhgouDN9gC+/hB5ZZvD4jjcqhF tLfRh1+LeK6QuVTE//ON/OS5xGgEzcZVLUJSUs/NdB+5YAXREz/2+IoK/0+rFiP0 wlHlgUk9ZQx3j7La5iiPrRdK+h6u0LUYd02XuMevnnswGqv+DWrxDnFy/icovPCt AxHZ0KxdLJeo2euOd755 =Ppe+ -----END PGP SIGNATURE----- Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma Pull rdma fixes from Jason Gunthorpe: "Just three small last minute regressions that were found in the last week. The Broadcom fix is a bit big for rc7, but since it is fixing driver crash regressions that were merged via netdev into rc1, I am sending it. - bnxt netdev changes merged this cycle caused the bnxt RDMA driver to crash under certain situations - Arnd found (several, unfortunately) kconfig problems with the patches adding INFINIBAND_ADDR_TRANS. Reverting this last part, will fix it more fully outside -rc. - Subtle change in error code for a uapi function caused breakage in userspace. This was bug was subtly introduced cycle" * tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma: IB/core: Fix error code for invalid GID entry IB: Revert "remove redundant INFINIBAND kconfig dependencies" RDMA/bnxt_re: Fix broken RoCE driver due to recent L2 driver changes
This commit is contained in:
commit
7fdf3e8616
|
@ -502,7 +502,7 @@ static int __ib_cache_gid_get(struct ib_device *ib_dev, u8 port, int index,
|
|||
return -EINVAL;
|
||||
|
||||
if (table->data_vec[index].props & GID_TABLE_ENTRY_INVALID)
|
||||
return -EAGAIN;
|
||||
return -EINVAL;
|
||||
|
||||
memcpy(gid, &table->data_vec[index].gid, sizeof(*gid));
|
||||
if (attr) {
|
||||
|
|
|
@ -185,12 +185,65 @@ static void bnxt_re_shutdown(void *p)
|
|||
bnxt_re_ib_unreg(rdev, false);
|
||||
}
|
||||
|
||||
static void bnxt_re_stop_irq(void *handle)
|
||||
{
|
||||
struct bnxt_re_dev *rdev = (struct bnxt_re_dev *)handle;
|
||||
struct bnxt_qplib_rcfw *rcfw = &rdev->rcfw;
|
||||
struct bnxt_qplib_nq *nq;
|
||||
int indx;
|
||||
|
||||
for (indx = BNXT_RE_NQ_IDX; indx < rdev->num_msix; indx++) {
|
||||
nq = &rdev->nq[indx - 1];
|
||||
bnxt_qplib_nq_stop_irq(nq, false);
|
||||
}
|
||||
|
||||
bnxt_qplib_rcfw_stop_irq(rcfw, false);
|
||||
}
|
||||
|
||||
static void bnxt_re_start_irq(void *handle, struct bnxt_msix_entry *ent)
|
||||
{
|
||||
struct bnxt_re_dev *rdev = (struct bnxt_re_dev *)handle;
|
||||
struct bnxt_msix_entry *msix_ent = rdev->msix_entries;
|
||||
struct bnxt_qplib_rcfw *rcfw = &rdev->rcfw;
|
||||
struct bnxt_qplib_nq *nq;
|
||||
int indx, rc;
|
||||
|
||||
if (!ent) {
|
||||
/* Not setting the f/w timeout bit in rcfw.
|
||||
* During the driver unload the first command
|
||||
* to f/w will timeout and that will set the
|
||||
* timeout bit.
|
||||
*/
|
||||
dev_err(rdev_to_dev(rdev), "Failed to re-start IRQs\n");
|
||||
return;
|
||||
}
|
||||
|
||||
/* Vectors may change after restart, so update with new vectors
|
||||
* in device sctructure.
|
||||
*/
|
||||
for (indx = 0; indx < rdev->num_msix; indx++)
|
||||
rdev->msix_entries[indx].vector = ent[indx].vector;
|
||||
|
||||
bnxt_qplib_rcfw_start_irq(rcfw, msix_ent[BNXT_RE_AEQ_IDX].vector,
|
||||
false);
|
||||
for (indx = BNXT_RE_NQ_IDX ; indx < rdev->num_msix; indx++) {
|
||||
nq = &rdev->nq[indx - 1];
|
||||
rc = bnxt_qplib_nq_start_irq(nq, indx - 1,
|
||||
msix_ent[indx].vector, false);
|
||||
if (rc)
|
||||
dev_warn(rdev_to_dev(rdev),
|
||||
"Failed to reinit NQ index %d\n", indx - 1);
|
||||
}
|
||||
}
|
||||
|
||||
static struct bnxt_ulp_ops bnxt_re_ulp_ops = {
|
||||
.ulp_async_notifier = NULL,
|
||||
.ulp_stop = bnxt_re_stop,
|
||||
.ulp_start = bnxt_re_start,
|
||||
.ulp_sriov_config = bnxt_re_sriov_config,
|
||||
.ulp_shutdown = bnxt_re_shutdown
|
||||
.ulp_shutdown = bnxt_re_shutdown,
|
||||
.ulp_irq_stop = bnxt_re_stop_irq,
|
||||
.ulp_irq_restart = bnxt_re_start_irq
|
||||
};
|
||||
|
||||
/* RoCE -> Net driver */
|
||||
|
|
|
@ -336,22 +336,32 @@ static irqreturn_t bnxt_qplib_nq_irq(int irq, void *dev_instance)
|
|||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
void bnxt_qplib_nq_stop_irq(struct bnxt_qplib_nq *nq, bool kill)
|
||||
{
|
||||
tasklet_disable(&nq->worker);
|
||||
/* Mask h/w interrupt */
|
||||
NQ_DB(nq->bar_reg_iomem, nq->hwq.cons, nq->hwq.max_elements);
|
||||
/* Sync with last running IRQ handler */
|
||||
synchronize_irq(nq->vector);
|
||||
if (kill)
|
||||
tasklet_kill(&nq->worker);
|
||||
if (nq->requested) {
|
||||
irq_set_affinity_hint(nq->vector, NULL);
|
||||
free_irq(nq->vector, nq);
|
||||
nq->requested = false;
|
||||
}
|
||||
}
|
||||
|
||||
void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq)
|
||||
{
|
||||
if (nq->cqn_wq) {
|
||||
destroy_workqueue(nq->cqn_wq);
|
||||
nq->cqn_wq = NULL;
|
||||
}
|
||||
/* Make sure the HW is stopped! */
|
||||
synchronize_irq(nq->vector);
|
||||
tasklet_disable(&nq->worker);
|
||||
tasklet_kill(&nq->worker);
|
||||
|
||||
if (nq->requested) {
|
||||
irq_set_affinity_hint(nq->vector, NULL);
|
||||
free_irq(nq->vector, nq);
|
||||
nq->requested = false;
|
||||
}
|
||||
/* Make sure the HW is stopped! */
|
||||
bnxt_qplib_nq_stop_irq(nq, true);
|
||||
|
||||
if (nq->bar_reg_iomem)
|
||||
iounmap(nq->bar_reg_iomem);
|
||||
nq->bar_reg_iomem = NULL;
|
||||
|
@ -361,6 +371,40 @@ void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq)
|
|||
nq->vector = 0;
|
||||
}
|
||||
|
||||
int bnxt_qplib_nq_start_irq(struct bnxt_qplib_nq *nq, int nq_indx,
|
||||
int msix_vector, bool need_init)
|
||||
{
|
||||
int rc;
|
||||
|
||||
if (nq->requested)
|
||||
return -EFAULT;
|
||||
|
||||
nq->vector = msix_vector;
|
||||
if (need_init)
|
||||
tasklet_init(&nq->worker, bnxt_qplib_service_nq,
|
||||
(unsigned long)nq);
|
||||
else
|
||||
tasklet_enable(&nq->worker);
|
||||
|
||||
snprintf(nq->name, sizeof(nq->name), "bnxt_qplib_nq-%d", nq_indx);
|
||||
rc = request_irq(nq->vector, bnxt_qplib_nq_irq, 0, nq->name, nq);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
cpumask_clear(&nq->mask);
|
||||
cpumask_set_cpu(nq_indx, &nq->mask);
|
||||
rc = irq_set_affinity_hint(nq->vector, &nq->mask);
|
||||
if (rc) {
|
||||
dev_warn(&nq->pdev->dev,
|
||||
"QPLIB: set affinity failed; vector: %d nq_idx: %d\n",
|
||||
nq->vector, nq_indx);
|
||||
}
|
||||
nq->requested = true;
|
||||
NQ_DB_REARM(nq->bar_reg_iomem, nq->hwq.cons, nq->hwq.max_elements);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq,
|
||||
int nq_idx, int msix_vector, int bar_reg_offset,
|
||||
int (*cqn_handler)(struct bnxt_qplib_nq *nq,
|
||||
|
@ -372,41 +416,17 @@ int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq,
|
|||
resource_size_t nq_base;
|
||||
int rc = -1;
|
||||
|
||||
nq->pdev = pdev;
|
||||
nq->vector = msix_vector;
|
||||
if (cqn_handler)
|
||||
nq->cqn_handler = cqn_handler;
|
||||
|
||||
if (srqn_handler)
|
||||
nq->srqn_handler = srqn_handler;
|
||||
|
||||
tasklet_init(&nq->worker, bnxt_qplib_service_nq, (unsigned long)nq);
|
||||
|
||||
/* Have a task to schedule CQ notifiers in post send case */
|
||||
nq->cqn_wq = create_singlethread_workqueue("bnxt_qplib_nq");
|
||||
if (!nq->cqn_wq)
|
||||
goto fail;
|
||||
return -ENOMEM;
|
||||
|
||||
nq->requested = false;
|
||||
memset(nq->name, 0, 32);
|
||||
sprintf(nq->name, "bnxt_qplib_nq-%d", nq_idx);
|
||||
rc = request_irq(nq->vector, bnxt_qplib_nq_irq, 0, nq->name, nq);
|
||||
if (rc) {
|
||||
dev_err(&nq->pdev->dev,
|
||||
"Failed to request IRQ for NQ: %#x", rc);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
cpumask_clear(&nq->mask);
|
||||
cpumask_set_cpu(nq_idx, &nq->mask);
|
||||
rc = irq_set_affinity_hint(nq->vector, &nq->mask);
|
||||
if (rc) {
|
||||
dev_warn(&nq->pdev->dev,
|
||||
"QPLIB: set affinity failed; vector: %d nq_idx: %d\n",
|
||||
nq->vector, nq_idx);
|
||||
}
|
||||
|
||||
nq->requested = true;
|
||||
nq->bar_reg = NQ_CONS_PCI_BAR_REGION;
|
||||
nq->bar_reg_off = bar_reg_offset;
|
||||
nq_base = pci_resource_start(pdev, nq->bar_reg);
|
||||
|
@ -419,7 +439,13 @@ int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq,
|
|||
rc = -ENOMEM;
|
||||
goto fail;
|
||||
}
|
||||
NQ_DB_REARM(nq->bar_reg_iomem, nq->hwq.cons, nq->hwq.max_elements);
|
||||
|
||||
rc = bnxt_qplib_nq_start_irq(nq, nq_idx, msix_vector, true);
|
||||
if (rc) {
|
||||
dev_err(&nq->pdev->dev,
|
||||
"QPLIB: Failed to request irq for nq-idx %d", nq_idx);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
return 0;
|
||||
fail:
|
||||
|
|
|
@ -467,7 +467,10 @@ struct bnxt_qplib_nq_work {
|
|||
struct bnxt_qplib_cq *cq;
|
||||
};
|
||||
|
||||
void bnxt_qplib_nq_stop_irq(struct bnxt_qplib_nq *nq, bool kill);
|
||||
void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq);
|
||||
int bnxt_qplib_nq_start_irq(struct bnxt_qplib_nq *nq, int nq_indx,
|
||||
int msix_vector, bool need_init);
|
||||
int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq,
|
||||
int nq_idx, int msix_vector, int bar_reg_offset,
|
||||
int (*cqn_handler)(struct bnxt_qplib_nq *nq,
|
||||
|
|
|
@ -582,19 +582,29 @@ int bnxt_qplib_alloc_rcfw_channel(struct pci_dev *pdev,
|
|||
return -ENOMEM;
|
||||
}
|
||||
|
||||
void bnxt_qplib_disable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw)
|
||||
void bnxt_qplib_rcfw_stop_irq(struct bnxt_qplib_rcfw *rcfw, bool kill)
|
||||
{
|
||||
unsigned long indx;
|
||||
|
||||
/* Make sure the HW channel is stopped! */
|
||||
synchronize_irq(rcfw->vector);
|
||||
tasklet_disable(&rcfw->worker);
|
||||
tasklet_kill(&rcfw->worker);
|
||||
/* Mask h/w interrupts */
|
||||
CREQ_DB(rcfw->creq_bar_reg_iomem, rcfw->creq.cons,
|
||||
rcfw->creq.max_elements);
|
||||
/* Sync with last running IRQ-handler */
|
||||
synchronize_irq(rcfw->vector);
|
||||
if (kill)
|
||||
tasklet_kill(&rcfw->worker);
|
||||
|
||||
if (rcfw->requested) {
|
||||
free_irq(rcfw->vector, rcfw);
|
||||
rcfw->requested = false;
|
||||
}
|
||||
}
|
||||
|
||||
void bnxt_qplib_disable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw)
|
||||
{
|
||||
unsigned long indx;
|
||||
|
||||
bnxt_qplib_rcfw_stop_irq(rcfw, true);
|
||||
|
||||
if (rcfw->cmdq_bar_reg_iomem)
|
||||
iounmap(rcfw->cmdq_bar_reg_iomem);
|
||||
rcfw->cmdq_bar_reg_iomem = NULL;
|
||||
|
@ -614,6 +624,31 @@ void bnxt_qplib_disable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw)
|
|||
rcfw->vector = 0;
|
||||
}
|
||||
|
||||
int bnxt_qplib_rcfw_start_irq(struct bnxt_qplib_rcfw *rcfw, int msix_vector,
|
||||
bool need_init)
|
||||
{
|
||||
int rc;
|
||||
|
||||
if (rcfw->requested)
|
||||
return -EFAULT;
|
||||
|
||||
rcfw->vector = msix_vector;
|
||||
if (need_init)
|
||||
tasklet_init(&rcfw->worker,
|
||||
bnxt_qplib_service_creq, (unsigned long)rcfw);
|
||||
else
|
||||
tasklet_enable(&rcfw->worker);
|
||||
rc = request_irq(rcfw->vector, bnxt_qplib_creq_irq, 0,
|
||||
"bnxt_qplib_creq", rcfw);
|
||||
if (rc)
|
||||
return rc;
|
||||
rcfw->requested = true;
|
||||
CREQ_DB_REARM(rcfw->creq_bar_reg_iomem, rcfw->creq.cons,
|
||||
rcfw->creq.max_elements);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev,
|
||||
struct bnxt_qplib_rcfw *rcfw,
|
||||
int msix_vector,
|
||||
|
@ -675,27 +710,17 @@ int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev,
|
|||
rcfw->creq_qp_event_processed = 0;
|
||||
rcfw->creq_func_event_processed = 0;
|
||||
|
||||
rcfw->vector = msix_vector;
|
||||
if (aeq_handler)
|
||||
rcfw->aeq_handler = aeq_handler;
|
||||
init_waitqueue_head(&rcfw->waitq);
|
||||
|
||||
tasklet_init(&rcfw->worker, bnxt_qplib_service_creq,
|
||||
(unsigned long)rcfw);
|
||||
|
||||
rcfw->requested = false;
|
||||
rc = request_irq(rcfw->vector, bnxt_qplib_creq_irq, 0,
|
||||
"bnxt_qplib_creq", rcfw);
|
||||
rc = bnxt_qplib_rcfw_start_irq(rcfw, msix_vector, true);
|
||||
if (rc) {
|
||||
dev_err(&rcfw->pdev->dev,
|
||||
"QPLIB: Failed to request IRQ for CREQ rc = 0x%x", rc);
|
||||
bnxt_qplib_disable_rcfw_channel(rcfw);
|
||||
return rc;
|
||||
}
|
||||
rcfw->requested = true;
|
||||
|
||||
init_waitqueue_head(&rcfw->waitq);
|
||||
|
||||
CREQ_DB_REARM(rcfw->creq_bar_reg_iomem, 0, rcfw->creq.max_elements);
|
||||
|
||||
init.cmdq_pbl = cpu_to_le64(rcfw->cmdq.pbl[PBL_LVL_0].pg_map_arr[0]);
|
||||
init.cmdq_size_cmdq_lvl = cpu_to_le16(
|
||||
|
|
|
@ -195,7 +195,10 @@ struct bnxt_qplib_rcfw {
|
|||
void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw);
|
||||
int bnxt_qplib_alloc_rcfw_channel(struct pci_dev *pdev,
|
||||
struct bnxt_qplib_rcfw *rcfw, int qp_tbl_sz);
|
||||
void bnxt_qplib_rcfw_stop_irq(struct bnxt_qplib_rcfw *rcfw, bool kill);
|
||||
void bnxt_qplib_disable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw);
|
||||
int bnxt_qplib_rcfw_start_irq(struct bnxt_qplib_rcfw *rcfw, int msix_vector,
|
||||
bool need_init);
|
||||
int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev,
|
||||
struct bnxt_qplib_rcfw *rcfw,
|
||||
int msix_vector,
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
config INFINIBAND_SRPT
|
||||
tristate "InfiniBand SCSI RDMA Protocol target support"
|
||||
depends on INFINIBAND_ADDR_TRANS && TARGET_CORE
|
||||
depends on INFINIBAND && INFINIBAND_ADDR_TRANS && TARGET_CORE
|
||||
---help---
|
||||
|
||||
Support for the SCSI RDMA Protocol (SRP) Target driver. The
|
||||
|
|
|
@ -27,7 +27,7 @@ config NVME_FABRICS
|
|||
|
||||
config NVME_RDMA
|
||||
tristate "NVM Express over Fabrics RDMA host driver"
|
||||
depends on INFINIBAND_ADDR_TRANS && BLOCK
|
||||
depends on INFINIBAND && INFINIBAND_ADDR_TRANS && BLOCK
|
||||
select NVME_CORE
|
||||
select NVME_FABRICS
|
||||
select SG_POOL
|
||||
|
|
|
@ -27,7 +27,7 @@ config NVME_TARGET_LOOP
|
|||
|
||||
config NVME_TARGET_RDMA
|
||||
tristate "NVMe over Fabrics RDMA target support"
|
||||
depends on INFINIBAND_ADDR_TRANS
|
||||
depends on INFINIBAND && INFINIBAND_ADDR_TRANS
|
||||
depends on NVME_TARGET
|
||||
select SGL_ALLOC
|
||||
help
|
||||
|
|
|
@ -34,7 +34,7 @@ config LNET_SELFTEST
|
|||
|
||||
config LNET_XPRT_IB
|
||||
tristate "LNET infiniband support"
|
||||
depends on LNET && PCI && INFINIBAND_ADDR_TRANS
|
||||
depends on LNET && PCI && INFINIBAND && INFINIBAND_ADDR_TRANS
|
||||
default LNET && INFINIBAND
|
||||
help
|
||||
This option allows the LNET users to use infiniband as an
|
||||
|
|
|
@ -197,7 +197,7 @@ config CIFS_SMB311
|
|||
|
||||
config CIFS_SMB_DIRECT
|
||||
bool "SMB Direct support (Experimental)"
|
||||
depends on CIFS=m && INFINIBAND_ADDR_TRANS || CIFS=y && INFINIBAND_ADDR_TRANS=y
|
||||
depends on CIFS=m && INFINIBAND && INFINIBAND_ADDR_TRANS || CIFS=y && INFINIBAND=y && INFINIBAND_ADDR_TRANS=y
|
||||
help
|
||||
Enables SMB Direct experimental support for SMB 3.0, 3.02 and 3.1.1.
|
||||
SMB Direct allows transferring SMB packets over RDMA. If unsure,
|
||||
|
|
|
@ -32,7 +32,7 @@ config NET_9P_XEN
|
|||
|
||||
|
||||
config NET_9P_RDMA
|
||||
depends on INET && INFINIBAND_ADDR_TRANS
|
||||
depends on INET && INFINIBAND && INFINIBAND_ADDR_TRANS
|
||||
tristate "9P RDMA Transport (Experimental)"
|
||||
help
|
||||
This builds support for an RDMA transport.
|
||||
|
|
|
@ -8,7 +8,7 @@ config RDS
|
|||
|
||||
config RDS_RDMA
|
||||
tristate "RDS over Infiniband"
|
||||
depends on RDS && INFINIBAND_ADDR_TRANS
|
||||
depends on RDS && INFINIBAND && INFINIBAND_ADDR_TRANS
|
||||
---help---
|
||||
Allow RDS to use Infiniband as a transport.
|
||||
This transport supports RDMA operations.
|
||||
|
|
|
@ -50,7 +50,7 @@ config SUNRPC_DEBUG
|
|||
|
||||
config SUNRPC_XPRT_RDMA
|
||||
tristate "RPC-over-RDMA transport"
|
||||
depends on SUNRPC && INFINIBAND_ADDR_TRANS
|
||||
depends on SUNRPC && INFINIBAND && INFINIBAND_ADDR_TRANS
|
||||
default SUNRPC && INFINIBAND
|
||||
select SG_POOL
|
||||
help
|
||||
|
|
Loading…
Reference in New Issue