mirror of https://gitee.com/openkylin/linux.git
Merge of primary rdma-core code for 4.9
- Updates to mlx5 - Updates to mlx4 (two conflicts, both minor and easily resolved) - Updates to iw_cxgb4 (one conflict, not so obvious to resolve, proper resolution is to keep the code in cxgb4_main.c as it is in Linus' tree as attach_uld was refactored and moved into cxgb4_uld.c) - Improvements to uAPI (moved vendor specific API elements to uAPI area) - Add hns-roce driver and hns and hns-roce ACPI reset support - Conversion of all rdma code away from deprecated create_singlethread_workqueue - Security improvement: remove unsafe ib_get_dma_mr (breaks lustre in staging) -----BEGIN PGP SIGNATURE----- iQIcBAABAgAGBQJX+AwSAAoJELgmozMOVy/d0WkQAKxPzVccMWwHv28iZI4ey13u JwE+VoCNpCAZAVuEgzK5zzFdNHPvAk2jU93H4apA7dfXJBXPatVuj9Lnk+ieEEnW tbFwJjBpbQ3Zol3+SPfAHnsVMbtax+xmd6WDKExPXXEDl1L6rutwL3KKfmgWEitg ysX7XOJCiSdyM0hcg4T6UPB9a3jGPff9NLu0oGamV+yoUk5Y0WGoVFxHZ4MKcw8t OkFBYIxGz4SGwq2tulStuH03HteURX594KngtrA8dyq6l1R2GlGRv+bkJAUEIWUv aA0ow3VWusOM6fT+jLXPCv8iUwIXM8tR/U6F7X+cmORUUtWvCl+uCUVid113j/aN BK+Af2nJnfoJ5cDBPsD+bC76l5gQycNZO/Qh8op2kmgJtD+6OpGM3cBXsHx53+kk 0wloJ2lKCGShWxNj+ig8n8rR/rhhs/x3vV3ouCVWNMbOUgOSN3eYHxmK3wGFW4nd Qx+WYCjj9Yi/J6nmUDcfEQ4NWPR22Q2+0ENAabfhLhV6mDloAO5ILHd4GDqC3IA9 UtxlVjf4ZonaiLnTQQzCnDMGVVk6tT8FJ9D42s0ScwjbdYwjyCW9/rs/g2EhcprR Cc+AmjqLviCWGtzBSFO0SijqQon8lcQOwdLw61CdFFvPa/mlLdf1rbx9ArIyNVKn JSrbr3CGyoqyYj6qaEO5 =LC+S -----END PGP SIGNATURE----- Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma Pull main rdma updates from Doug Ledford: "This is the main pull request for the rdma stack this release. The code has been through 0day and I had it tagged for linux-next testing for a couple days. Summary: - updates to mlx5 - updates to mlx4 (two conflicts, both minor and easily resolved) - updates to iw_cxgb4 (one conflict, not so obvious to resolve, proper resolution is to keep the code in cxgb4_main.c as it is in Linus' tree as attach_uld was refactored and moved into cxgb4_uld.c) - improvements to uAPI (moved vendor specific API elements to uAPI area) - add hns-roce driver and hns and hns-roce ACPI reset support - conversion of all rdma code away from deprecated create_singlethread_workqueue - security improvement: remove unsafe ib_get_dma_mr (breaks lustre in staging)" * tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma: (75 commits) staging/lustre: Disable InfiniBand support iw_cxgb4: add fast-path for small REG_MR operations cxgb4: advertise support for FR_NSMR_TPTE_WR IB/core: correctly handle rdma_rw_init_mrs() failure IB/srp: Fix infinite loop when FMR sg[0].offset != 0 IB/srp: Remove an unused argument IB/core: Improve ib_map_mr_sg() documentation IB/mlx4: Fix possible vl/sl field mismatch in LRH header in QP1 packets IB/mthca: Move user vendor structures IB/nes: Move user vendor structures IB/ocrdma: Move user vendor structures IB/mlx4: Move user vendor structures IB/cxgb4: Move user vendor structures IB/cxgb3: Move user vendor structures IB/mlx5: Move and decouple user vendor structures IB/{core,hw}: Add constant for node_desc ipoib: Make ipoib_warn ratelimited IB/mlx4/alias_GUID: Remove deprecated create_singlethread_workqueue IB/ipoib_verbs: Remove deprecated create_singlethread_workqueue IB/ipoib: Remove deprecated create_singlethread_workqueue ...
This commit is contained in:
commit
b9044ac829
|
@ -0,0 +1,107 @@
|
|||
Hisilicon RoCE DT description
|
||||
|
||||
Hisilicon RoCE engine is a part of network subsystem.
|
||||
It works depending on other part of network wubsytem, such as, gmac and
|
||||
dsa fabric.
|
||||
|
||||
Additional properties are described here:
|
||||
|
||||
Required properties:
|
||||
- compatible: Should contain "hisilicon,hns-roce-v1".
|
||||
- reg: Physical base address of the RoCE driver and
|
||||
length of memory mapped region.
|
||||
- eth-handle: phandle, specifies a reference to a node
|
||||
representing a ethernet device.
|
||||
- dsaf-handle: phandle, specifies a reference to a node
|
||||
representing a dsaf device.
|
||||
- #address-cells: must be 2
|
||||
- #size-cells: must be 2
|
||||
Optional properties:
|
||||
- dma-coherent: Present if DMA operations are coherent.
|
||||
- interrupt-parent: the interrupt parent of this device.
|
||||
- interrupts: should contain 32 completion event irq,1 async event irq
|
||||
and 1 event overflow irq.
|
||||
- interrupt-names:should be one of 34 irqs for roce device
|
||||
- hns-roce-comp-0 ~ hns-roce-comp-31: 32 complete event irq
|
||||
- hns-roce-async: 1 async event irq
|
||||
- hns-roce-common: named common exception warning irq
|
||||
Example:
|
||||
infiniband@c4000000 {
|
||||
compatible = "hisilicon,hns-roce-v1";
|
||||
reg = <0x0 0xc4000000 0x0 0x100000>;
|
||||
dma-coherent;
|
||||
eth-handle = <ð2 ð3 ð4 ð5 ð6 ð7>;
|
||||
dsaf-handle = <&soc0_dsa>;
|
||||
#address-cells = <2>;
|
||||
#size-cells = <2>;
|
||||
interrupt-parent = <&mbigen_dsa>;
|
||||
interrupts = <722 1>,
|
||||
<723 1>,
|
||||
<724 1>,
|
||||
<725 1>,
|
||||
<726 1>,
|
||||
<727 1>,
|
||||
<728 1>,
|
||||
<729 1>,
|
||||
<730 1>,
|
||||
<731 1>,
|
||||
<732 1>,
|
||||
<733 1>,
|
||||
<734 1>,
|
||||
<735 1>,
|
||||
<736 1>,
|
||||
<737 1>,
|
||||
<738 1>,
|
||||
<739 1>,
|
||||
<740 1>,
|
||||
<741 1>,
|
||||
<742 1>,
|
||||
<743 1>,
|
||||
<744 1>,
|
||||
<745 1>,
|
||||
<746 1>,
|
||||
<747 1>,
|
||||
<748 1>,
|
||||
<749 1>,
|
||||
<750 1>,
|
||||
<751 1>,
|
||||
<752 1>,
|
||||
<753 1>,
|
||||
<785 1>,
|
||||
<754 4>;
|
||||
|
||||
interrupt-names = "hns-roce-comp-0",
|
||||
"hns-roce-comp-1",
|
||||
"hns-roce-comp-2",
|
||||
"hns-roce-comp-3",
|
||||
"hns-roce-comp-4",
|
||||
"hns-roce-comp-5",
|
||||
"hns-roce-comp-6",
|
||||
"hns-roce-comp-7",
|
||||
"hns-roce-comp-8",
|
||||
"hns-roce-comp-9",
|
||||
"hns-roce-comp-10",
|
||||
"hns-roce-comp-11",
|
||||
"hns-roce-comp-12",
|
||||
"hns-roce-comp-13",
|
||||
"hns-roce-comp-14",
|
||||
"hns-roce-comp-15",
|
||||
"hns-roce-comp-16",
|
||||
"hns-roce-comp-17",
|
||||
"hns-roce-comp-18",
|
||||
"hns-roce-comp-19",
|
||||
"hns-roce-comp-20",
|
||||
"hns-roce-comp-21",
|
||||
"hns-roce-comp-22",
|
||||
"hns-roce-comp-23",
|
||||
"hns-roce-comp-24",
|
||||
"hns-roce-comp-25",
|
||||
"hns-roce-comp-26",
|
||||
"hns-roce-comp-27",
|
||||
"hns-roce-comp-28",
|
||||
"hns-roce-comp-29",
|
||||
"hns-roce-comp-30",
|
||||
"hns-roce-comp-31",
|
||||
"hns-roce-async",
|
||||
"hns-roce-common";
|
||||
};
|
14
MAINTAINERS
14
MAINTAINERS
|
@ -3509,6 +3509,7 @@ L: linux-rdma@vger.kernel.org
|
|||
W: http://www.openfabrics.org
|
||||
S: Supported
|
||||
F: drivers/infiniband/hw/cxgb3/
|
||||
F: include/uapi/rdma/cxgb3-abi.h
|
||||
|
||||
CXGB4 ETHERNET DRIVER (CXGB4)
|
||||
M: Hariprasad S <hariprasad@chelsio.com>
|
||||
|
@ -3530,6 +3531,7 @@ L: linux-rdma@vger.kernel.org
|
|||
W: http://www.openfabrics.org
|
||||
S: Supported
|
||||
F: drivers/infiniband/hw/cxgb4/
|
||||
F: include/uapi/rdma/cxgb4-abi.h
|
||||
|
||||
CXGB4VF ETHERNET DRIVER (CXGB4VF)
|
||||
M: Casey Leedom <leedom@chelsio.com>
|
||||
|
@ -5712,6 +5714,14 @@ S: Maintained
|
|||
F: drivers/net/ethernet/hisilicon/
|
||||
F: Documentation/devicetree/bindings/net/hisilicon*.txt
|
||||
|
||||
HISILICON ROCE DRIVER
|
||||
M: Lijun Ou <oulijun@huawei.com>
|
||||
M: Wei Hu(Xavier) <xavier.huwei@huawei.com>
|
||||
L: linux-rdma@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/infiniband/hw/hns/
|
||||
F: Documentation/devicetree/bindings/infiniband/hisilicon-hns-roce.txt
|
||||
|
||||
HISILICON SAS Controller
|
||||
M: John Garry <john.garry@huawei.com>
|
||||
W: http://www.hisilicon.com
|
||||
|
@ -7970,6 +7980,7 @@ Q: http://patchwork.ozlabs.org/project/netdev/list/
|
|||
S: Supported
|
||||
F: drivers/net/ethernet/mellanox/mlx4/
|
||||
F: include/linux/mlx4/
|
||||
F: include/uapi/rdma/mlx4-abi.h
|
||||
|
||||
MELLANOX MLX4 IB driver
|
||||
M: Yishai Hadas <yishaih@mellanox.com>
|
||||
|
@ -7990,6 +8001,7 @@ Q: http://patchwork.ozlabs.org/project/netdev/list/
|
|||
S: Supported
|
||||
F: drivers/net/ethernet/mellanox/mlx5/core/
|
||||
F: include/linux/mlx5/
|
||||
F: include/uapi/rdma/mlx5-abi.h
|
||||
|
||||
MELLANOX MLX5 IB driver
|
||||
M: Matan Barak <matanb@mellanox.com>
|
||||
|
@ -8262,6 +8274,7 @@ L: linux-rdma@vger.kernel.org
|
|||
W: http://www.intel.com/Products/Server/Adapters/Server-Cluster/Server-Cluster-overview.htm
|
||||
S: Supported
|
||||
F: drivers/infiniband/hw/nes/
|
||||
F: include/uapi/rdma/nes-abi.h
|
||||
|
||||
NETEM NETWORK EMULATOR
|
||||
M: Stephen Hemminger <stephen@networkplumber.org>
|
||||
|
@ -10821,6 +10834,7 @@ L: linux-rdma@vger.kernel.org
|
|||
W: http://www.emulex.com
|
||||
S: Supported
|
||||
F: drivers/infiniband/hw/ocrdma/
|
||||
F: include/uapi/rdma/ocrdma-abi.h
|
||||
|
||||
SFC NETWORK DRIVER
|
||||
M: Solarflare linux maintainers <linux-net-drivers@solarflare.com>
|
||||
|
|
|
@ -74,6 +74,7 @@ source "drivers/infiniband/hw/mlx5/Kconfig"
|
|||
source "drivers/infiniband/hw/nes/Kconfig"
|
||||
source "drivers/infiniband/hw/ocrdma/Kconfig"
|
||||
source "drivers/infiniband/hw/usnic/Kconfig"
|
||||
source "drivers/infiniband/hw/hns/Kconfig"
|
||||
|
||||
source "drivers/infiniband/ulp/ipoib/Kconfig"
|
||||
|
||||
|
|
|
@ -800,7 +800,7 @@ static struct notifier_block nb = {
|
|||
|
||||
int addr_init(void)
|
||||
{
|
||||
addr_wq = create_singlethread_workqueue("ib_addr");
|
||||
addr_wq = alloc_workqueue("ib_addr", WQ_MEM_RECLAIM, 0);
|
||||
if (!addr_wq)
|
||||
return -ENOMEM;
|
||||
|
||||
|
|
|
@ -4369,7 +4369,7 @@ static int __init cma_init(void)
|
|||
{
|
||||
int ret;
|
||||
|
||||
cma_wq = create_singlethread_workqueue("rdma_cm");
|
||||
cma_wq = alloc_ordered_workqueue("rdma_cm", WQ_MEM_RECLAIM);
|
||||
if (!cma_wq)
|
||||
return -ENOMEM;
|
||||
|
||||
|
|
|
@ -1160,7 +1160,7 @@ static int __init iw_cm_init(void)
|
|||
if (ret)
|
||||
pr_err("iw_cm: couldn't register netlink callbacks\n");
|
||||
|
||||
iwcm_wq = create_singlethread_workqueue("iw_cm_wq");
|
||||
iwcm_wq = alloc_ordered_workqueue("iw_cm_wq", WQ_MEM_RECLAIM);
|
||||
if (!iwcm_wq)
|
||||
return -ENOMEM;
|
||||
|
||||
|
|
|
@ -3160,7 +3160,7 @@ static int ib_mad_port_open(struct ib_device *device,
|
|||
goto error3;
|
||||
}
|
||||
|
||||
port_priv->pd = ib_alloc_pd(device);
|
||||
port_priv->pd = ib_alloc_pd(device, 0);
|
||||
if (IS_ERR(port_priv->pd)) {
|
||||
dev_err(&device->dev, "Couldn't create ib_mad PD\n");
|
||||
ret = PTR_ERR(port_priv->pd);
|
||||
|
@ -3177,7 +3177,7 @@ static int ib_mad_port_open(struct ib_device *device,
|
|||
goto error7;
|
||||
|
||||
snprintf(name, sizeof name, "ib_mad%d", port_num);
|
||||
port_priv->wq = create_singlethread_workqueue(name);
|
||||
port_priv->wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM);
|
||||
if (!port_priv->wq) {
|
||||
ret = -ENOMEM;
|
||||
goto error8;
|
||||
|
|
|
@ -873,7 +873,7 @@ int mcast_init(void)
|
|||
{
|
||||
int ret;
|
||||
|
||||
mcast_wq = create_singlethread_workqueue("ib_mcast");
|
||||
mcast_wq = alloc_ordered_workqueue("ib_mcast", WQ_MEM_RECLAIM);
|
||||
if (!mcast_wq)
|
||||
return -ENOMEM;
|
||||
|
||||
|
|
|
@ -2015,7 +2015,7 @@ int ib_sa_init(void)
|
|||
goto err2;
|
||||
}
|
||||
|
||||
ib_nl_wq = create_singlethread_workqueue("ib_nl_sa_wq");
|
||||
ib_nl_wq = alloc_ordered_workqueue("ib_nl_sa_wq", WQ_MEM_RECLAIM);
|
||||
if (!ib_nl_wq) {
|
||||
ret = -ENOMEM;
|
||||
goto err3;
|
||||
|
|
|
@ -1193,7 +1193,7 @@ static ssize_t set_node_desc(struct device *device,
|
|||
if (!dev->modify_device)
|
||||
return -EIO;
|
||||
|
||||
memcpy(desc.node_desc, buf, min_t(int, count, 64));
|
||||
memcpy(desc.node_desc, buf, min_t(int, count, IB_DEVICE_NODE_DESC_MAX));
|
||||
ret = ib_modify_device(dev, IB_DEVICE_MODIFY_NODE_DESC, &desc);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
|
|
@ -1638,7 +1638,8 @@ static int ucma_open(struct inode *inode, struct file *filp)
|
|||
if (!file)
|
||||
return -ENOMEM;
|
||||
|
||||
file->close_wq = create_singlethread_workqueue("ucma_close_id");
|
||||
file->close_wq = alloc_ordered_workqueue("ucma_close_id",
|
||||
WQ_MEM_RECLAIM);
|
||||
if (!file->close_wq) {
|
||||
kfree(file);
|
||||
return -ENOMEM;
|
||||
|
|
|
@ -571,7 +571,7 @@ ssize_t ib_uverbs_alloc_pd(struct ib_uverbs_file *file,
|
|||
|
||||
pd->device = ib_dev;
|
||||
pd->uobject = uobj;
|
||||
pd->local_mr = NULL;
|
||||
pd->__internal_mr = NULL;
|
||||
atomic_set(&pd->usecnt, 0);
|
||||
|
||||
uobj->object = pd;
|
||||
|
@ -3078,51 +3078,102 @@ ssize_t ib_uverbs_detach_mcast(struct ib_uverbs_file *file,
|
|||
return ret ? ret : in_len;
|
||||
}
|
||||
|
||||
static size_t kern_spec_filter_sz(struct ib_uverbs_flow_spec_hdr *spec)
|
||||
{
|
||||
/* Returns user space filter size, includes padding */
|
||||
return (spec->size - sizeof(struct ib_uverbs_flow_spec_hdr)) / 2;
|
||||
}
|
||||
|
||||
static ssize_t spec_filter_size(void *kern_spec_filter, u16 kern_filter_size,
|
||||
u16 ib_real_filter_sz)
|
||||
{
|
||||
/*
|
||||
* User space filter structures must be 64 bit aligned, otherwise this
|
||||
* may pass, but we won't handle additional new attributes.
|
||||
*/
|
||||
|
||||
if (kern_filter_size > ib_real_filter_sz) {
|
||||
if (memchr_inv(kern_spec_filter +
|
||||
ib_real_filter_sz, 0,
|
||||
kern_filter_size - ib_real_filter_sz))
|
||||
return -EINVAL;
|
||||
return ib_real_filter_sz;
|
||||
}
|
||||
return kern_filter_size;
|
||||
}
|
||||
|
||||
static int kern_spec_to_ib_spec(struct ib_uverbs_flow_spec *kern_spec,
|
||||
union ib_flow_spec *ib_spec)
|
||||
{
|
||||
ssize_t actual_filter_sz;
|
||||
ssize_t kern_filter_sz;
|
||||
ssize_t ib_filter_sz;
|
||||
void *kern_spec_mask;
|
||||
void *kern_spec_val;
|
||||
|
||||
if (kern_spec->reserved)
|
||||
return -EINVAL;
|
||||
|
||||
ib_spec->type = kern_spec->type;
|
||||
|
||||
kern_filter_sz = kern_spec_filter_sz(&kern_spec->hdr);
|
||||
/* User flow spec size must be aligned to 4 bytes */
|
||||
if (kern_filter_sz != ALIGN(kern_filter_sz, 4))
|
||||
return -EINVAL;
|
||||
|
||||
kern_spec_val = (void *)kern_spec +
|
||||
sizeof(struct ib_uverbs_flow_spec_hdr);
|
||||
kern_spec_mask = kern_spec_val + kern_filter_sz;
|
||||
|
||||
switch (ib_spec->type) {
|
||||
case IB_FLOW_SPEC_ETH:
|
||||
ib_spec->eth.size = sizeof(struct ib_flow_spec_eth);
|
||||
if (ib_spec->eth.size != kern_spec->eth.size)
|
||||
ib_filter_sz = offsetof(struct ib_flow_eth_filter, real_sz);
|
||||
actual_filter_sz = spec_filter_size(kern_spec_mask,
|
||||
kern_filter_sz,
|
||||
ib_filter_sz);
|
||||
if (actual_filter_sz <= 0)
|
||||
return -EINVAL;
|
||||
memcpy(&ib_spec->eth.val, &kern_spec->eth.val,
|
||||
sizeof(struct ib_flow_eth_filter));
|
||||
memcpy(&ib_spec->eth.mask, &kern_spec->eth.mask,
|
||||
sizeof(struct ib_flow_eth_filter));
|
||||
ib_spec->size = sizeof(struct ib_flow_spec_eth);
|
||||
memcpy(&ib_spec->eth.val, kern_spec_val, actual_filter_sz);
|
||||
memcpy(&ib_spec->eth.mask, kern_spec_mask, actual_filter_sz);
|
||||
break;
|
||||
case IB_FLOW_SPEC_IPV4:
|
||||
ib_spec->ipv4.size = sizeof(struct ib_flow_spec_ipv4);
|
||||
if (ib_spec->ipv4.size != kern_spec->ipv4.size)
|
||||
ib_filter_sz = offsetof(struct ib_flow_ipv4_filter, real_sz);
|
||||
actual_filter_sz = spec_filter_size(kern_spec_mask,
|
||||
kern_filter_sz,
|
||||
ib_filter_sz);
|
||||
if (actual_filter_sz <= 0)
|
||||
return -EINVAL;
|
||||
memcpy(&ib_spec->ipv4.val, &kern_spec->ipv4.val,
|
||||
sizeof(struct ib_flow_ipv4_filter));
|
||||
memcpy(&ib_spec->ipv4.mask, &kern_spec->ipv4.mask,
|
||||
sizeof(struct ib_flow_ipv4_filter));
|
||||
ib_spec->size = sizeof(struct ib_flow_spec_ipv4);
|
||||
memcpy(&ib_spec->ipv4.val, kern_spec_val, actual_filter_sz);
|
||||
memcpy(&ib_spec->ipv4.mask, kern_spec_mask, actual_filter_sz);
|
||||
break;
|
||||
case IB_FLOW_SPEC_IPV6:
|
||||
ib_spec->ipv6.size = sizeof(struct ib_flow_spec_ipv6);
|
||||
if (ib_spec->ipv6.size != kern_spec->ipv6.size)
|
||||
ib_filter_sz = offsetof(struct ib_flow_ipv6_filter, real_sz);
|
||||
actual_filter_sz = spec_filter_size(kern_spec_mask,
|
||||
kern_filter_sz,
|
||||
ib_filter_sz);
|
||||
if (actual_filter_sz <= 0)
|
||||
return -EINVAL;
|
||||
ib_spec->size = sizeof(struct ib_flow_spec_ipv6);
|
||||
memcpy(&ib_spec->ipv6.val, kern_spec_val, actual_filter_sz);
|
||||
memcpy(&ib_spec->ipv6.mask, kern_spec_mask, actual_filter_sz);
|
||||
|
||||
if ((ntohl(ib_spec->ipv6.mask.flow_label)) >= BIT(20) ||
|
||||
(ntohl(ib_spec->ipv6.val.flow_label)) >= BIT(20))
|
||||
return -EINVAL;
|
||||
memcpy(&ib_spec->ipv6.val, &kern_spec->ipv6.val,
|
||||
sizeof(struct ib_flow_ipv6_filter));
|
||||
memcpy(&ib_spec->ipv6.mask, &kern_spec->ipv6.mask,
|
||||
sizeof(struct ib_flow_ipv6_filter));
|
||||
break;
|
||||
case IB_FLOW_SPEC_TCP:
|
||||
case IB_FLOW_SPEC_UDP:
|
||||
ib_spec->tcp_udp.size = sizeof(struct ib_flow_spec_tcp_udp);
|
||||
if (ib_spec->tcp_udp.size != kern_spec->tcp_udp.size)
|
||||
ib_filter_sz = offsetof(struct ib_flow_tcp_udp_filter, real_sz);
|
||||
actual_filter_sz = spec_filter_size(kern_spec_mask,
|
||||
kern_filter_sz,
|
||||
ib_filter_sz);
|
||||
if (actual_filter_sz <= 0)
|
||||
return -EINVAL;
|
||||
memcpy(&ib_spec->tcp_udp.val, &kern_spec->tcp_udp.val,
|
||||
sizeof(struct ib_flow_tcp_udp_filter));
|
||||
memcpy(&ib_spec->tcp_udp.mask, &kern_spec->tcp_udp.mask,
|
||||
sizeof(struct ib_flow_tcp_udp_filter));
|
||||
ib_spec->size = sizeof(struct ib_flow_spec_tcp_udp);
|
||||
memcpy(&ib_spec->tcp_udp.val, kern_spec_val, actual_filter_sz);
|
||||
memcpy(&ib_spec->tcp_udp.mask, kern_spec_mask, actual_filter_sz);
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
|
@ -3654,7 +3705,8 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file,
|
|||
goto err_uobj;
|
||||
}
|
||||
|
||||
flow_attr = kmalloc(sizeof(*flow_attr) + cmd.flow_attr.size, GFP_KERNEL);
|
||||
flow_attr = kzalloc(sizeof(*flow_attr) + cmd.flow_attr.num_of_specs *
|
||||
sizeof(union ib_flow_spec), GFP_KERNEL);
|
||||
if (!flow_attr) {
|
||||
err = -ENOMEM;
|
||||
goto err_put;
|
||||
|
@ -4173,6 +4225,23 @@ int ib_uverbs_ex_query_device(struct ib_uverbs_file *file,
|
|||
|
||||
resp.device_cap_flags_ex = attr.device_cap_flags;
|
||||
resp.response_length += sizeof(resp.device_cap_flags_ex);
|
||||
|
||||
if (ucore->outlen < resp.response_length + sizeof(resp.rss_caps))
|
||||
goto end;
|
||||
|
||||
resp.rss_caps.supported_qpts = attr.rss_caps.supported_qpts;
|
||||
resp.rss_caps.max_rwq_indirection_tables =
|
||||
attr.rss_caps.max_rwq_indirection_tables;
|
||||
resp.rss_caps.max_rwq_indirection_table_size =
|
||||
attr.rss_caps.max_rwq_indirection_table_size;
|
||||
|
||||
resp.response_length += sizeof(resp.rss_caps);
|
||||
|
||||
if (ucore->outlen < resp.response_length + sizeof(resp.max_wq_type_rq))
|
||||
goto end;
|
||||
|
||||
resp.max_wq_type_rq = attr.max_wq_type_rq;
|
||||
resp.response_length += sizeof(resp.max_wq_type_rq);
|
||||
end:
|
||||
err = ib_copy_to_udata(ucore, &resp, resp.response_length);
|
||||
return err;
|
||||
|
|
|
@ -227,9 +227,11 @@ EXPORT_SYMBOL(rdma_port_get_link_layer);
|
|||
* Every PD has a local_dma_lkey which can be used as the lkey value for local
|
||||
* memory operations.
|
||||
*/
|
||||
struct ib_pd *ib_alloc_pd(struct ib_device *device)
|
||||
struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags,
|
||||
const char *caller)
|
||||
{
|
||||
struct ib_pd *pd;
|
||||
int mr_access_flags = 0;
|
||||
|
||||
pd = device->alloc_pd(device, NULL, NULL);
|
||||
if (IS_ERR(pd))
|
||||
|
@ -237,26 +239,46 @@ struct ib_pd *ib_alloc_pd(struct ib_device *device)
|
|||
|
||||
pd->device = device;
|
||||
pd->uobject = NULL;
|
||||
pd->local_mr = NULL;
|
||||
pd->__internal_mr = NULL;
|
||||
atomic_set(&pd->usecnt, 0);
|
||||
pd->flags = flags;
|
||||
|
||||
if (device->attrs.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY)
|
||||
pd->local_dma_lkey = device->local_dma_lkey;
|
||||
else {
|
||||
else
|
||||
mr_access_flags |= IB_ACCESS_LOCAL_WRITE;
|
||||
|
||||
if (flags & IB_PD_UNSAFE_GLOBAL_RKEY) {
|
||||
pr_warn("%s: enabling unsafe global rkey\n", caller);
|
||||
mr_access_flags |= IB_ACCESS_REMOTE_READ | IB_ACCESS_REMOTE_WRITE;
|
||||
}
|
||||
|
||||
if (mr_access_flags) {
|
||||
struct ib_mr *mr;
|
||||
|
||||
mr = ib_get_dma_mr(pd, IB_ACCESS_LOCAL_WRITE);
|
||||
mr = pd->device->get_dma_mr(pd, mr_access_flags);
|
||||
if (IS_ERR(mr)) {
|
||||
ib_dealloc_pd(pd);
|
||||
return (struct ib_pd *)mr;
|
||||
return ERR_CAST(mr);
|
||||
}
|
||||
|
||||
pd->local_mr = mr;
|
||||
pd->local_dma_lkey = pd->local_mr->lkey;
|
||||
mr->device = pd->device;
|
||||
mr->pd = pd;
|
||||
mr->uobject = NULL;
|
||||
mr->need_inval = false;
|
||||
|
||||
pd->__internal_mr = mr;
|
||||
|
||||
if (!(device->attrs.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY))
|
||||
pd->local_dma_lkey = pd->__internal_mr->lkey;
|
||||
|
||||
if (flags & IB_PD_UNSAFE_GLOBAL_RKEY)
|
||||
pd->unsafe_global_rkey = pd->__internal_mr->rkey;
|
||||
}
|
||||
|
||||
return pd;
|
||||
}
|
||||
EXPORT_SYMBOL(ib_alloc_pd);
|
||||
EXPORT_SYMBOL(__ib_alloc_pd);
|
||||
|
||||
/**
|
||||
* ib_dealloc_pd - Deallocates a protection domain.
|
||||
|
@ -270,10 +292,10 @@ void ib_dealloc_pd(struct ib_pd *pd)
|
|||
{
|
||||
int ret;
|
||||
|
||||
if (pd->local_mr) {
|
||||
ret = ib_dereg_mr(pd->local_mr);
|
||||
if (pd->__internal_mr) {
|
||||
ret = pd->device->dereg_mr(pd->__internal_mr);
|
||||
WARN_ON(ret);
|
||||
pd->local_mr = NULL;
|
||||
pd->__internal_mr = NULL;
|
||||
}
|
||||
|
||||
/* uverbs manipulates usecnt with proper locking, while the kabi
|
||||
|
@ -821,7 +843,7 @@ struct ib_qp *ib_create_qp(struct ib_pd *pd,
|
|||
if (ret) {
|
||||
pr_err("failed to init MR pool ret= %d\n", ret);
|
||||
ib_destroy_qp(qp);
|
||||
qp = ERR_PTR(ret);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1391,29 +1413,6 @@ EXPORT_SYMBOL(ib_resize_cq);
|
|||
|
||||
/* Memory regions */
|
||||
|
||||
struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags)
|
||||
{
|
||||
struct ib_mr *mr;
|
||||
int err;
|
||||
|
||||
err = ib_check_mr_access(mr_access_flags);
|
||||
if (err)
|
||||
return ERR_PTR(err);
|
||||
|
||||
mr = pd->device->get_dma_mr(pd, mr_access_flags);
|
||||
|
||||
if (!IS_ERR(mr)) {
|
||||
mr->device = pd->device;
|
||||
mr->pd = pd;
|
||||
mr->uobject = NULL;
|
||||
atomic_inc(&pd->usecnt);
|
||||
mr->need_inval = false;
|
||||
}
|
||||
|
||||
return mr;
|
||||
}
|
||||
EXPORT_SYMBOL(ib_get_dma_mr);
|
||||
|
||||
int ib_dereg_mr(struct ib_mr *mr)
|
||||
{
|
||||
struct ib_pd *pd = mr->pd;
|
||||
|
@ -1812,13 +1811,13 @@ EXPORT_SYMBOL(ib_set_vf_guid);
|
|||
*
|
||||
* Constraints:
|
||||
* - The first sg element is allowed to have an offset.
|
||||
* - Each sg element must be aligned to page_size (or physically
|
||||
* contiguous to the previous element). In case an sg element has a
|
||||
* non contiguous offset, the mapping prefix will not include it.
|
||||
* - Each sg element must either be aligned to page_size or virtually
|
||||
* contiguous to the previous element. In case an sg element has a
|
||||
* non-contiguous offset, the mapping prefix will not include it.
|
||||
* - The last sg element is allowed to have length less than page_size.
|
||||
* - If sg_nents total byte length exceeds the mr max_num_sge * page_size
|
||||
* then only max_num_sg entries will be mapped.
|
||||
* - If the MR was allocated with type IB_MR_TYPE_SG_GAPS_REG, non of these
|
||||
* - If the MR was allocated with type IB_MR_TYPE_SG_GAPS, none of these
|
||||
* constraints holds and the page_size argument is ignored.
|
||||
*
|
||||
* Returns the number of sg elements that were mapped to the memory region.
|
||||
|
|
|
@ -9,3 +9,4 @@ obj-$(CONFIG_INFINIBAND_NES) += nes/
|
|||
obj-$(CONFIG_INFINIBAND_OCRDMA) += ocrdma/
|
||||
obj-$(CONFIG_INFINIBAND_USNIC) += usnic/
|
||||
obj-$(CONFIG_INFINIBAND_HFI1) += hfi1/
|
||||
obj-$(CONFIG_INFINIBAND_HNS) += hns/
|
||||
|
|
|
@ -36,7 +36,7 @@
|
|||
|
||||
#include "cxgb3_offload.h"
|
||||
#include "iwch_provider.h"
|
||||
#include "iwch_user.h"
|
||||
#include <rdma/cxgb3-abi.h>
|
||||
#include "iwch.h"
|
||||
#include "iwch_cm.h"
|
||||
|
||||
|
|
|
@ -2258,7 +2258,7 @@ int __init iwch_cm_init(void)
|
|||
{
|
||||
skb_queue_head_init(&rxq);
|
||||
|
||||
workq = create_singlethread_workqueue("iw_cxgb3");
|
||||
workq = alloc_ordered_workqueue("iw_cxgb3", WQ_MEM_RECLAIM);
|
||||
if (!workq)
|
||||
return -ENOMEM;
|
||||
|
||||
|
|
|
@ -58,7 +58,7 @@
|
|||
#include "iwch.h"
|
||||
#include "iwch_provider.h"
|
||||
#include "iwch_cm.h"
|
||||
#include "iwch_user.h"
|
||||
#include <rdma/cxgb3-abi.h>
|
||||
#include "common.h"
|
||||
|
||||
static struct ib_ah *iwch_ah_create(struct ib_pd *pd,
|
||||
|
@ -1396,6 +1396,7 @@ int iwch_register_device(struct iwch_dev *dev)
|
|||
(1ull << IB_USER_VERBS_CMD_POST_SEND) |
|
||||
(1ull << IB_USER_VERBS_CMD_POST_RECV);
|
||||
dev->ibdev.node_type = RDMA_NODE_RNIC;
|
||||
BUILD_BUG_ON(sizeof(IWCH_NODE_DESC) > IB_DEVICE_NODE_DESC_MAX);
|
||||
memcpy(dev->ibdev.node_desc, IWCH_NODE_DESC, sizeof(IWCH_NODE_DESC));
|
||||
dev->ibdev.phys_port_cnt = dev->rdev.port_info.nports;
|
||||
dev->ibdev.num_comp_vectors = 1;
|
||||
|
|
|
@ -4235,7 +4235,7 @@ int __init c4iw_cm_init(void)
|
|||
spin_lock_init(&timeout_lock);
|
||||
skb_queue_head_init(&rxq);
|
||||
|
||||
workq = create_singlethread_workqueue("iw_cxgb4");
|
||||
workq = alloc_ordered_workqueue("iw_cxgb4", WQ_MEM_RECLAIM);
|
||||
if (!workq)
|
||||
return -ENOMEM;
|
||||
|
||||
|
|
|
@ -666,6 +666,18 @@ static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static void invalidate_mr(struct c4iw_dev *rhp, u32 rkey)
|
||||
{
|
||||
struct c4iw_mr *mhp;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&rhp->lock, flags);
|
||||
mhp = get_mhp(rhp, rkey >> 8);
|
||||
if (mhp)
|
||||
mhp->attr.state = 0;
|
||||
spin_unlock_irqrestore(&rhp->lock, flags);
|
||||
}
|
||||
|
||||
/*
|
||||
* Get one cq entry from c4iw and map it to openib.
|
||||
*
|
||||
|
@ -721,6 +733,7 @@ static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
|
|||
CQE_OPCODE(&cqe) == FW_RI_SEND_WITH_SE_INV) {
|
||||
wc->ex.invalidate_rkey = CQE_WRID_STAG(&cqe);
|
||||
wc->wc_flags |= IB_WC_WITH_INVALIDATE;
|
||||
invalidate_mr(qhp->rhp, wc->ex.invalidate_rkey);
|
||||
}
|
||||
} else {
|
||||
switch (CQE_OPCODE(&cqe)) {
|
||||
|
@ -746,6 +759,10 @@ static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
|
|||
break;
|
||||
case FW_RI_FAST_REGISTER:
|
||||
wc->opcode = IB_WC_REG_MR;
|
||||
|
||||
/* Invalidate the MR if the fastreg failed */
|
||||
if (CQE_STATUS(&cqe) != T4_ERR_SUCCESS)
|
||||
invalidate_mr(qhp->rhp, CQE_WRID_FR_STAG(&cqe));
|
||||
break;
|
||||
default:
|
||||
printk(KERN_ERR MOD "Unexpected opcode %d "
|
||||
|
|
|
@ -58,7 +58,7 @@
|
|||
#include "cxgb4.h"
|
||||
#include "cxgb4_uld.h"
|
||||
#include "l2t.h"
|
||||
#include "user.h"
|
||||
#include <rdma/cxgb4-abi.h>
|
||||
|
||||
#define DRV_NAME "iw_cxgb4"
|
||||
#define MOD DRV_NAME ":"
|
||||
|
|
|
@ -695,7 +695,7 @@ struct ib_mr *c4iw_alloc_mr(struct ib_pd *pd,
|
|||
mhp->attr.pdid = php->pdid;
|
||||
mhp->attr.type = FW_RI_STAG_NSMR;
|
||||
mhp->attr.stag = stag;
|
||||
mhp->attr.state = 1;
|
||||
mhp->attr.state = 0;
|
||||
mmid = (stag) >> 8;
|
||||
mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
|
||||
if (insert_handle(rhp, &rhp->mmidr, mhp, mmid)) {
|
||||
|
|
|
@ -563,6 +563,7 @@ int c4iw_register_device(struct c4iw_dev *dev)
|
|||
(1ull << IB_USER_VERBS_CMD_POST_SEND) |
|
||||
(1ull << IB_USER_VERBS_CMD_POST_RECV);
|
||||
dev->ibdev.node_type = RDMA_NODE_RNIC;
|
||||
BUILD_BUG_ON(sizeof(C4IW_NODE_DESC) > IB_DEVICE_NODE_DESC_MAX);
|
||||
memcpy(dev->ibdev.node_desc, C4IW_NODE_DESC, sizeof(C4IW_NODE_DESC));
|
||||
dev->ibdev.phys_port_cnt = dev->rdev.lldi.nports;
|
||||
dev->ibdev.num_comp_vectors = dev->rdev.lldi.nciq;
|
||||
|
|
|
@ -609,10 +609,42 @@ static int build_rdma_recv(struct c4iw_qp *qhp, union t4_recv_wr *wqe,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int build_memreg(struct t4_sq *sq, union t4_wr *wqe,
|
||||
struct ib_reg_wr *wr, u8 *len16, bool dsgl_supported)
|
||||
static void build_tpte_memreg(struct fw_ri_fr_nsmr_tpte_wr *fr,
|
||||
struct ib_reg_wr *wr, struct c4iw_mr *mhp,
|
||||
u8 *len16)
|
||||
{
|
||||
__be64 *p = (__be64 *)fr->pbl;
|
||||
|
||||
fr->r2 = cpu_to_be32(0);
|
||||
fr->stag = cpu_to_be32(mhp->ibmr.rkey);
|
||||
|
||||
fr->tpte.valid_to_pdid = cpu_to_be32(FW_RI_TPTE_VALID_F |
|
||||
FW_RI_TPTE_STAGKEY_V((mhp->ibmr.rkey & FW_RI_TPTE_STAGKEY_M)) |
|
||||
FW_RI_TPTE_STAGSTATE_V(1) |
|
||||
FW_RI_TPTE_STAGTYPE_V(FW_RI_STAG_NSMR) |
|
||||
FW_RI_TPTE_PDID_V(mhp->attr.pdid));
|
||||
fr->tpte.locread_to_qpid = cpu_to_be32(
|
||||
FW_RI_TPTE_PERM_V(c4iw_ib_to_tpt_access(wr->access)) |
|
||||
FW_RI_TPTE_ADDRTYPE_V(FW_RI_VA_BASED_TO) |
|
||||
FW_RI_TPTE_PS_V(ilog2(wr->mr->page_size) - 12));
|
||||
fr->tpte.nosnoop_pbladdr = cpu_to_be32(FW_RI_TPTE_PBLADDR_V(
|
||||
PBL_OFF(&mhp->rhp->rdev, mhp->attr.pbl_addr)>>3));
|
||||
fr->tpte.dca_mwbcnt_pstag = cpu_to_be32(0);
|
||||
fr->tpte.len_hi = cpu_to_be32(0);
|
||||
fr->tpte.len_lo = cpu_to_be32(mhp->ibmr.length);
|
||||
fr->tpte.va_hi = cpu_to_be32(mhp->ibmr.iova >> 32);
|
||||
fr->tpte.va_lo_fbo = cpu_to_be32(mhp->ibmr.iova & 0xffffffff);
|
||||
|
||||
p[0] = cpu_to_be64((u64)mhp->mpl[0]);
|
||||
p[1] = cpu_to_be64((u64)mhp->mpl[1]);
|
||||
|
||||
*len16 = DIV_ROUND_UP(sizeof(*fr), 16);
|
||||
}
|
||||
|
||||
static int build_memreg(struct t4_sq *sq, union t4_wr *wqe,
|
||||
struct ib_reg_wr *wr, struct c4iw_mr *mhp, u8 *len16,
|
||||
bool dsgl_supported)
|
||||
{
|
||||
struct c4iw_mr *mhp = to_c4iw_mr(wr->mr);
|
||||
struct fw_ri_immd *imdp;
|
||||
__be64 *p;
|
||||
int i;
|
||||
|
@ -674,9 +706,12 @@ static int build_memreg(struct t4_sq *sq, union t4_wr *wqe,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int build_inv_stag(union t4_wr *wqe, struct ib_send_wr *wr,
|
||||
u8 *len16)
|
||||
static int build_inv_stag(struct c4iw_dev *dev, union t4_wr *wqe,
|
||||
struct ib_send_wr *wr, u8 *len16)
|
||||
{
|
||||
struct c4iw_mr *mhp = get_mhp(dev, wr->ex.invalidate_rkey >> 8);
|
||||
|
||||
mhp->attr.state = 0;
|
||||
wqe->inv.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey);
|
||||
wqe->inv.r2 = 0;
|
||||
*len16 = DIV_ROUND_UP(sizeof wqe->inv, 16);
|
||||
|
@ -816,18 +851,32 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
|
|||
if (!qhp->wq.sq.oldest_read)
|
||||
qhp->wq.sq.oldest_read = swsqe;
|
||||
break;
|
||||
case IB_WR_REG_MR:
|
||||
fw_opcode = FW_RI_FR_NSMR_WR;
|
||||
case IB_WR_REG_MR: {
|
||||
struct c4iw_mr *mhp = to_c4iw_mr(reg_wr(wr)->mr);
|
||||
|
||||
swsqe->opcode = FW_RI_FAST_REGISTER;
|
||||
err = build_memreg(&qhp->wq.sq, wqe, reg_wr(wr), &len16,
|
||||
qhp->rhp->rdev.lldi.ulptx_memwrite_dsgl);
|
||||
if (qhp->rhp->rdev.lldi.fr_nsmr_tpte_wr_support &&
|
||||
!mhp->attr.state && mhp->mpl_len <= 2) {
|
||||
fw_opcode = FW_RI_FR_NSMR_TPTE_WR;
|
||||
build_tpte_memreg(&wqe->fr_tpte, reg_wr(wr),
|
||||
mhp, &len16);
|
||||
} else {
|
||||
fw_opcode = FW_RI_FR_NSMR_WR;
|
||||
err = build_memreg(&qhp->wq.sq, wqe, reg_wr(wr),
|
||||
mhp, &len16,
|
||||
qhp->rhp->rdev.lldi.ulptx_memwrite_dsgl);
|
||||
if (err)
|
||||
break;
|
||||
}
|
||||
mhp->attr.state = 1;
|
||||
break;
|
||||
}
|
||||
case IB_WR_LOCAL_INV:
|
||||
if (wr->send_flags & IB_SEND_FENCE)
|
||||
fw_flags |= FW_RI_LOCAL_FENCE_FLAG;
|
||||
fw_opcode = FW_RI_INV_LSTAG_WR;
|
||||
swsqe->opcode = FW_RI_LOCAL_INV;
|
||||
err = build_inv_stag(wqe, wr, &len16);
|
||||
err = build_inv_stag(qhp->rhp, wqe, wr, &len16);
|
||||
break;
|
||||
default:
|
||||
PDBG("%s post of type=%d TBD!\n", __func__,
|
||||
|
|
|
@ -95,6 +95,7 @@ union t4_wr {
|
|||
struct fw_ri_rdma_read_wr read;
|
||||
struct fw_ri_bind_mw_wr bind;
|
||||
struct fw_ri_fr_nsmr_wr fr;
|
||||
struct fw_ri_fr_nsmr_tpte_wr fr_tpte;
|
||||
struct fw_ri_inv_lstag_wr inv;
|
||||
struct t4_status_page status;
|
||||
__be64 flits[T4_EQ_ENTRY_SIZE / sizeof(__be64) * T4_SQ_NUM_SLOTS];
|
||||
|
@ -170,7 +171,7 @@ struct t4_cqe {
|
|||
__be32 msn;
|
||||
} rcqe;
|
||||
struct {
|
||||
u32 nada1;
|
||||
u32 stag;
|
||||
u16 nada2;
|
||||
u16 cidx;
|
||||
} scqe;
|
||||
|
@ -232,6 +233,7 @@ struct t4_cqe {
|
|||
|
||||
/* used for SQ completion processing */
|
||||
#define CQE_WRID_SQ_IDX(x) ((x)->u.scqe.cidx)
|
||||
#define CQE_WRID_FR_STAG(x) (be32_to_cpu((x)->u.scqe.stag))
|
||||
|
||||
/* generic accessor macros */
|
||||
#define CQE_WRID_HI(x) (be32_to_cpu((x)->u.gen.wrid_hi))
|
||||
|
|
|
@ -669,6 +669,18 @@ struct fw_ri_fr_nsmr_wr {
|
|||
#define FW_RI_FR_NSMR_WR_DCACPU_G(x) \
|
||||
(((x) >> FW_RI_FR_NSMR_WR_DCACPU_S) & FW_RI_FR_NSMR_WR_DCACPU_M)
|
||||
|
||||
struct fw_ri_fr_nsmr_tpte_wr {
|
||||
__u8 opcode;
|
||||
__u8 flags;
|
||||
__u16 wrid;
|
||||
__u8 r1[3];
|
||||
__u8 len16;
|
||||
__u32 r2;
|
||||
__u32 stag;
|
||||
struct fw_ri_tpte tpte;
|
||||
__u64 pbl[2];
|
||||
};
|
||||
|
||||
struct fw_ri_inv_lstag_wr {
|
||||
__u8 opcode;
|
||||
__u8 flags;
|
||||
|
|
|
@ -1441,7 +1441,8 @@ static int modify_device(struct ib_device *device,
|
|||
}
|
||||
|
||||
if (device_modify_mask & IB_DEVICE_MODIFY_NODE_DESC) {
|
||||
memcpy(device->node_desc, device_modify->node_desc, 64);
|
||||
memcpy(device->node_desc, device_modify->node_desc,
|
||||
IB_DEVICE_NODE_DESC_MAX);
|
||||
for (i = 0; i < dd->num_pports; i++) {
|
||||
struct hfi1_ibport *ibp = &dd->pport[i].ibport_data;
|
||||
|
||||
|
|
|
@ -0,0 +1,10 @@
|
|||
config INFINIBAND_HNS
|
||||
tristate "HNS RoCE Driver"
|
||||
depends on NET_VENDOR_HISILICON
|
||||
depends on ARM64 && HNS && HNS_DSAF && HNS_ENET
|
||||
---help---
|
||||
This is a RoCE/RDMA driver for the Hisilicon RoCE engine. The engine
|
||||
is used in Hisilicon Hi1610 and more further ICT SoC.
|
||||
|
||||
To compile this driver as a module, choose M here: the module
|
||||
will be called hns-roce.
|
|
@ -0,0 +1,8 @@
|
|||
#
|
||||
# Makefile for the Hisilicon RoCE drivers.
|
||||
#
|
||||
|
||||
obj-$(CONFIG_INFINIBAND_HNS) += hns-roce.o
|
||||
hns-roce-objs := hns_roce_main.o hns_roce_cmd.o hns_roce_eq.o hns_roce_pd.o \
|
||||
hns_roce_ah.o hns_roce_hem.o hns_roce_mr.o hns_roce_qp.o \
|
||||
hns_roce_cq.o hns_roce_alloc.o hns_roce_hw_v1.o
|
|
@ -0,0 +1,128 @@
|
|||
/*
|
||||
* Copyright (c) 2016 Hisilicon Limited.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
* General Public License (GPL) Version 2, available from the file
|
||||
* COPYING in the main directory of this source tree, or the
|
||||
* OpenIB.org BSD license below:
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or
|
||||
* without modification, are permitted provided that the following
|
||||
* conditions are met:
|
||||
*
|
||||
* - Redistributions of source code must retain the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer.
|
||||
*
|
||||
* - Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer in the documentation and/or other materials
|
||||
* provided with the distribution.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <linux/platform_device.h>
|
||||
#include <rdma/ib_addr.h>
|
||||
#include <rdma/ib_cache.h>
|
||||
#include "hns_roce_device.h"
|
||||
|
||||
#define HNS_ROCE_PORT_NUM_SHIFT 24
|
||||
#define HNS_ROCE_VLAN_SL_BIT_MASK 7
|
||||
#define HNS_ROCE_VLAN_SL_SHIFT 13
|
||||
|
||||
struct ib_ah *hns_roce_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *ah_attr)
|
||||
{
|
||||
struct hns_roce_dev *hr_dev = to_hr_dev(ibpd->device);
|
||||
struct device *dev = &hr_dev->pdev->dev;
|
||||
struct ib_gid_attr gid_attr;
|
||||
struct hns_roce_ah *ah;
|
||||
u16 vlan_tag = 0xffff;
|
||||
struct in6_addr in6;
|
||||
union ib_gid sgid;
|
||||
int ret;
|
||||
|
||||
ah = kzalloc(sizeof(*ah), GFP_ATOMIC);
|
||||
if (!ah)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
/* Get mac address */
|
||||
memcpy(&in6, ah_attr->grh.dgid.raw, sizeof(ah_attr->grh.dgid.raw));
|
||||
if (rdma_is_multicast_addr(&in6))
|
||||
rdma_get_mcast_mac(&in6, ah->av.mac);
|
||||
else
|
||||
memcpy(ah->av.mac, ah_attr->dmac, sizeof(ah_attr->dmac));
|
||||
|
||||
/* Get source gid */
|
||||
ret = ib_get_cached_gid(ibpd->device, ah_attr->port_num,
|
||||
ah_attr->grh.sgid_index, &sgid, &gid_attr);
|
||||
if (ret) {
|
||||
dev_err(dev, "get sgid failed! ret = %d\n", ret);
|
||||
kfree(ah);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
if (gid_attr.ndev) {
|
||||
if (is_vlan_dev(gid_attr.ndev))
|
||||
vlan_tag = vlan_dev_vlan_id(gid_attr.ndev);
|
||||
dev_put(gid_attr.ndev);
|
||||
}
|
||||
|
||||
if (vlan_tag < 0x1000)
|
||||
vlan_tag |= (ah_attr->sl & HNS_ROCE_VLAN_SL_BIT_MASK) <<
|
||||
HNS_ROCE_VLAN_SL_SHIFT;
|
||||
|
||||
ah->av.port_pd = cpu_to_be32(to_hr_pd(ibpd)->pdn | (ah_attr->port_num <<
|
||||
HNS_ROCE_PORT_NUM_SHIFT));
|
||||
ah->av.gid_index = ah_attr->grh.sgid_index;
|
||||
ah->av.vlan = cpu_to_le16(vlan_tag);
|
||||
dev_dbg(dev, "gid_index = 0x%x,vlan = 0x%x\n", ah->av.gid_index,
|
||||
ah->av.vlan);
|
||||
|
||||
if (ah_attr->static_rate)
|
||||
ah->av.stat_rate = IB_RATE_10_GBPS;
|
||||
|
||||
memcpy(ah->av.dgid, ah_attr->grh.dgid.raw, HNS_ROCE_GID_SIZE);
|
||||
ah->av.sl_tclass_flowlabel = cpu_to_le32(ah_attr->sl <<
|
||||
HNS_ROCE_SL_SHIFT);
|
||||
|
||||
return &ah->ibah;
|
||||
}
|
||||
|
||||
int hns_roce_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr)
|
||||
{
|
||||
struct hns_roce_ah *ah = to_hr_ah(ibah);
|
||||
|
||||
memset(ah_attr, 0, sizeof(*ah_attr));
|
||||
|
||||
ah_attr->sl = le32_to_cpu(ah->av.sl_tclass_flowlabel) >>
|
||||
HNS_ROCE_SL_SHIFT;
|
||||
ah_attr->port_num = le32_to_cpu(ah->av.port_pd) >>
|
||||
HNS_ROCE_PORT_NUM_SHIFT;
|
||||
ah_attr->static_rate = ah->av.stat_rate;
|
||||
ah_attr->ah_flags = IB_AH_GRH;
|
||||
ah_attr->grh.traffic_class = le32_to_cpu(ah->av.sl_tclass_flowlabel) >>
|
||||
HNS_ROCE_TCLASS_SHIFT;
|
||||
ah_attr->grh.flow_label = le32_to_cpu(ah->av.sl_tclass_flowlabel) &
|
||||
HNS_ROCE_FLOW_LABLE_MASK;
|
||||
ah_attr->grh.hop_limit = ah->av.hop_limit;
|
||||
ah_attr->grh.sgid_index = ah->av.gid_index;
|
||||
memcpy(ah_attr->grh.dgid.raw, ah->av.dgid, HNS_ROCE_GID_SIZE);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int hns_roce_destroy_ah(struct ib_ah *ah)
|
||||
{
|
||||
kfree(to_hr_ah(ah));
|
||||
|
||||
return 0;
|
||||
}
|
|
@ -0,0 +1,257 @@
|
|||
/*
|
||||
* Copyright (c) 2016 Hisilicon Limited.
|
||||
* Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
* General Public License (GPL) Version 2, available from the file
|
||||
* COPYING in the main directory of this source tree, or the
|
||||
* OpenIB.org BSD license below:
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or
|
||||
* without modification, are permitted provided that the following
|
||||
* conditions are met:
|
||||
*
|
||||
* - Redistributions of source code must retain the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer.
|
||||
*
|
||||
* - Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer in the documentation and/or other materials
|
||||
* provided with the distribution.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <linux/platform_device.h>
|
||||
#include "hns_roce_device.h"
|
||||
|
||||
int hns_roce_bitmap_alloc(struct hns_roce_bitmap *bitmap, unsigned long *obj)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
spin_lock(&bitmap->lock);
|
||||
*obj = find_next_zero_bit(bitmap->table, bitmap->max, bitmap->last);
|
||||
if (*obj >= bitmap->max) {
|
||||
bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
|
||||
& bitmap->mask;
|
||||
*obj = find_first_zero_bit(bitmap->table, bitmap->max);
|
||||
}
|
||||
|
||||
if (*obj < bitmap->max) {
|
||||
set_bit(*obj, bitmap->table);
|
||||
bitmap->last = (*obj + 1);
|
||||
if (bitmap->last == bitmap->max)
|
||||
bitmap->last = 0;
|
||||
*obj |= bitmap->top;
|
||||
} else {
|
||||
ret = -1;
|
||||
}
|
||||
|
||||
spin_unlock(&bitmap->lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void hns_roce_bitmap_free(struct hns_roce_bitmap *bitmap, unsigned long obj)
|
||||
{
|
||||
hns_roce_bitmap_free_range(bitmap, obj, 1);
|
||||
}
|
||||
|
||||
int hns_roce_bitmap_alloc_range(struct hns_roce_bitmap *bitmap, int cnt,
|
||||
int align, unsigned long *obj)
|
||||
{
|
||||
int ret = 0;
|
||||
int i;
|
||||
|
||||
if (likely(cnt == 1 && align == 1))
|
||||
return hns_roce_bitmap_alloc(bitmap, obj);
|
||||
|
||||
spin_lock(&bitmap->lock);
|
||||
|
||||
*obj = bitmap_find_next_zero_area(bitmap->table, bitmap->max,
|
||||
bitmap->last, cnt, align - 1);
|
||||
if (*obj >= bitmap->max) {
|
||||
bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
|
||||
& bitmap->mask;
|
||||
*obj = bitmap_find_next_zero_area(bitmap->table, bitmap->max, 0,
|
||||
cnt, align - 1);
|
||||
}
|
||||
|
||||
if (*obj < bitmap->max) {
|
||||
for (i = 0; i < cnt; i++)
|
||||
set_bit(*obj + i, bitmap->table);
|
||||
|
||||
if (*obj == bitmap->last) {
|
||||
bitmap->last = (*obj + cnt);
|
||||
if (bitmap->last >= bitmap->max)
|
||||
bitmap->last = 0;
|
||||
}
|
||||
*obj |= bitmap->top;
|
||||
} else {
|
||||
ret = -1;
|
||||
}
|
||||
|
||||
spin_unlock(&bitmap->lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void hns_roce_bitmap_free_range(struct hns_roce_bitmap *bitmap,
|
||||
unsigned long obj, int cnt)
|
||||
{
|
||||
int i;
|
||||
|
||||
obj &= bitmap->max + bitmap->reserved_top - 1;
|
||||
|
||||
spin_lock(&bitmap->lock);
|
||||
for (i = 0; i < cnt; i++)
|
||||
clear_bit(obj + i, bitmap->table);
|
||||
|
||||
bitmap->last = min(bitmap->last, obj);
|
||||
bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
|
||||
& bitmap->mask;
|
||||
spin_unlock(&bitmap->lock);
|
||||
}
|
||||
|
||||
int hns_roce_bitmap_init(struct hns_roce_bitmap *bitmap, u32 num, u32 mask,
|
||||
u32 reserved_bot, u32 reserved_top)
|
||||
{
|
||||
u32 i;
|
||||
|
||||
if (num != roundup_pow_of_two(num))
|
||||
return -EINVAL;
|
||||
|
||||
bitmap->last = 0;
|
||||
bitmap->top = 0;
|
||||
bitmap->max = num - reserved_top;
|
||||
bitmap->mask = mask;
|
||||
bitmap->reserved_top = reserved_top;
|
||||
spin_lock_init(&bitmap->lock);
|
||||
bitmap->table = kcalloc(BITS_TO_LONGS(bitmap->max), sizeof(long),
|
||||
GFP_KERNEL);
|
||||
if (!bitmap->table)
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < reserved_bot; ++i)
|
||||
set_bit(i, bitmap->table);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void hns_roce_bitmap_cleanup(struct hns_roce_bitmap *bitmap)
|
||||
{
|
||||
kfree(bitmap->table);
|
||||
}
|
||||
|
||||
void hns_roce_buf_free(struct hns_roce_dev *hr_dev, u32 size,
|
||||
struct hns_roce_buf *buf)
|
||||
{
|
||||
int i;
|
||||
struct device *dev = &hr_dev->pdev->dev;
|
||||
u32 bits_per_long = BITS_PER_LONG;
|
||||
|
||||
if (buf->nbufs == 1) {
|
||||
dma_free_coherent(dev, size, buf->direct.buf, buf->direct.map);
|
||||
} else {
|
||||
if (bits_per_long == 64)
|
||||
vunmap(buf->direct.buf);
|
||||
|
||||
for (i = 0; i < buf->nbufs; ++i)
|
||||
if (buf->page_list[i].buf)
|
||||
dma_free_coherent(&hr_dev->pdev->dev, PAGE_SIZE,
|
||||
buf->page_list[i].buf,
|
||||
buf->page_list[i].map);
|
||||
kfree(buf->page_list);
|
||||
}
|
||||
}
|
||||
|
||||
int hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 max_direct,
|
||||
struct hns_roce_buf *buf)
|
||||
{
|
||||
int i = 0;
|
||||
dma_addr_t t;
|
||||
struct page **pages;
|
||||
struct device *dev = &hr_dev->pdev->dev;
|
||||
u32 bits_per_long = BITS_PER_LONG;
|
||||
|
||||
/* SQ/RQ buf lease than one page, SQ + RQ = 8K */
|
||||
if (size <= max_direct) {
|
||||
buf->nbufs = 1;
|
||||
/* Npages calculated by page_size */
|
||||
buf->npages = 1 << get_order(size);
|
||||
buf->page_shift = PAGE_SHIFT;
|
||||
/* MTT PA must be recorded in 4k alignment, t is 4k aligned */
|
||||
buf->direct.buf = dma_alloc_coherent(dev, size, &t, GFP_KERNEL);
|
||||
if (!buf->direct.buf)
|
||||
return -ENOMEM;
|
||||
|
||||
buf->direct.map = t;
|
||||
|
||||
while (t & ((1 << buf->page_shift) - 1)) {
|
||||
--buf->page_shift;
|
||||
buf->npages *= 2;
|
||||
}
|
||||
|
||||
memset(buf->direct.buf, 0, size);
|
||||
} else {
|
||||
buf->nbufs = (size + PAGE_SIZE - 1) / PAGE_SIZE;
|
||||
buf->npages = buf->nbufs;
|
||||
buf->page_shift = PAGE_SHIFT;
|
||||
buf->page_list = kcalloc(buf->nbufs, sizeof(*buf->page_list),
|
||||
GFP_KERNEL);
|
||||
|
||||
if (!buf->page_list)
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < buf->nbufs; ++i) {
|
||||
buf->page_list[i].buf = dma_alloc_coherent(dev,
|
||||
PAGE_SIZE, &t,
|
||||
GFP_KERNEL);
|
||||
|
||||
if (!buf->page_list[i].buf)
|
||||
goto err_free;
|
||||
|
||||
buf->page_list[i].map = t;
|
||||
memset(buf->page_list[i].buf, 0, PAGE_SIZE);
|
||||
}
|
||||
if (bits_per_long == 64) {
|
||||
pages = kmalloc_array(buf->nbufs, sizeof(*pages),
|
||||
GFP_KERNEL);
|
||||
if (!pages)
|
||||
goto err_free;
|
||||
|
||||
for (i = 0; i < buf->nbufs; ++i)
|
||||
pages[i] = virt_to_page(buf->page_list[i].buf);
|
||||
|
||||
buf->direct.buf = vmap(pages, buf->nbufs, VM_MAP,
|
||||
PAGE_KERNEL);
|
||||
kfree(pages);
|
||||
if (!buf->direct.buf)
|
||||
goto err_free;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err_free:
|
||||
hns_roce_buf_free(hr_dev, size, buf);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
void hns_roce_cleanup_bitmap(struct hns_roce_dev *hr_dev)
|
||||
{
|
||||
hns_roce_cleanup_qp_table(hr_dev);
|
||||
hns_roce_cleanup_cq_table(hr_dev);
|
||||
hns_roce_cleanup_mr_table(hr_dev);
|
||||
hns_roce_cleanup_pd_table(hr_dev);
|
||||
hns_roce_cleanup_uar_table(hr_dev);
|
||||
}
|
|
@ -0,0 +1,368 @@
|
|||
/*
|
||||
* Copyright (c) 2016 Hisilicon Limited.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
* General Public License (GPL) Version 2, available from the file
|
||||
* COPYING in the main directory of this source tree, or the
|
||||
* OpenIB.org BSD license below:
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or
|
||||
* without modification, are permitted provided that the following
|
||||
* conditions are met:
|
||||
*
|
||||
* - Redistributions of source code must retain the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer.
|
||||
*
|
||||
* - Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer in the documentation and/or other materials
|
||||
* provided with the distribution.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <linux/dmapool.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include "hns_roce_common.h"
|
||||
#include "hns_roce_device.h"
|
||||
#include "hns_roce_cmd.h"
|
||||
|
||||
#define CMD_POLL_TOKEN 0xffff
|
||||
#define CMD_MAX_NUM 32
|
||||
#define STATUS_MASK 0xff
|
||||
#define CMD_TOKEN_MASK 0x1f
|
||||
#define GO_BIT_TIMEOUT_MSECS 10000
|
||||
|
||||
enum {
|
||||
HCR_TOKEN_OFFSET = 0x14,
|
||||
HCR_STATUS_OFFSET = 0x18,
|
||||
HCR_GO_BIT = 15,
|
||||
};
|
||||
|
||||
static int cmd_pending(struct hns_roce_dev *hr_dev)
|
||||
{
|
||||
u32 status = readl(hr_dev->cmd.hcr + HCR_TOKEN_OFFSET);
|
||||
|
||||
return (!!(status & (1 << HCR_GO_BIT)));
|
||||
}
|
||||
|
||||
/* this function should be serialized with "hcr_mutex" */
|
||||
static int __hns_roce_cmd_mbox_post_hw(struct hns_roce_dev *hr_dev,
|
||||
u64 in_param, u64 out_param,
|
||||
u32 in_modifier, u8 op_modifier, u16 op,
|
||||
u16 token, int event)
|
||||
{
|
||||
struct hns_roce_cmdq *cmd = &hr_dev->cmd;
|
||||
struct device *dev = &hr_dev->pdev->dev;
|
||||
u32 __iomem *hcr = (u32 *)cmd->hcr;
|
||||
int ret = -EAGAIN;
|
||||
unsigned long end;
|
||||
u32 val = 0;
|
||||
|
||||
end = msecs_to_jiffies(GO_BIT_TIMEOUT_MSECS) + jiffies;
|
||||
while (cmd_pending(hr_dev)) {
|
||||
if (time_after(jiffies, end)) {
|
||||
dev_dbg(dev, "jiffies=%d end=%d\n", (int)jiffies,
|
||||
(int)end);
|
||||
goto out;
|
||||
}
|
||||
cond_resched();
|
||||
}
|
||||
|
||||
roce_set_field(val, ROCEE_MB6_ROCEE_MB_CMD_M, ROCEE_MB6_ROCEE_MB_CMD_S,
|
||||
op);
|
||||
roce_set_field(val, ROCEE_MB6_ROCEE_MB_CMD_MDF_M,
|
||||
ROCEE_MB6_ROCEE_MB_CMD_MDF_S, op_modifier);
|
||||
roce_set_bit(val, ROCEE_MB6_ROCEE_MB_EVENT_S, event);
|
||||
roce_set_bit(val, ROCEE_MB6_ROCEE_MB_HW_RUN_S, 1);
|
||||
roce_set_field(val, ROCEE_MB6_ROCEE_MB_TOKEN_M,
|
||||
ROCEE_MB6_ROCEE_MB_TOKEN_S, token);
|
||||
|
||||
__raw_writeq(cpu_to_le64(in_param), hcr + 0);
|
||||
__raw_writeq(cpu_to_le64(out_param), hcr + 2);
|
||||
__raw_writel(cpu_to_le32(in_modifier), hcr + 4);
|
||||
/* Memory barrier */
|
||||
wmb();
|
||||
|
||||
__raw_writel(cpu_to_le32(val), hcr + 5);
|
||||
|
||||
mmiowb();
|
||||
ret = 0;
|
||||
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int hns_roce_cmd_mbox_post_hw(struct hns_roce_dev *hr_dev, u64 in_param,
|
||||
u64 out_param, u32 in_modifier,
|
||||
u8 op_modifier, u16 op, u16 token,
|
||||
int event)
|
||||
{
|
||||
struct hns_roce_cmdq *cmd = &hr_dev->cmd;
|
||||
int ret = -EAGAIN;
|
||||
|
||||
mutex_lock(&cmd->hcr_mutex);
|
||||
ret = __hns_roce_cmd_mbox_post_hw(hr_dev, in_param, out_param,
|
||||
in_modifier, op_modifier, op, token,
|
||||
event);
|
||||
mutex_unlock(&cmd->hcr_mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* this should be called with "poll_sem" */
|
||||
static int __hns_roce_cmd_mbox_poll(struct hns_roce_dev *hr_dev, u64 in_param,
|
||||
u64 out_param, unsigned long in_modifier,
|
||||
u8 op_modifier, u16 op,
|
||||
unsigned long timeout)
|
||||
{
|
||||
struct device *dev = &hr_dev->pdev->dev;
|
||||
u8 __iomem *hcr = hr_dev->cmd.hcr;
|
||||
unsigned long end = 0;
|
||||
u32 status = 0;
|
||||
int ret;
|
||||
|
||||
ret = hns_roce_cmd_mbox_post_hw(hr_dev, in_param, out_param,
|
||||
in_modifier, op_modifier, op,
|
||||
CMD_POLL_TOKEN, 0);
|
||||
if (ret) {
|
||||
dev_err(dev, "[cmd_poll]hns_roce_cmd_mbox_post_hw failed\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
end = msecs_to_jiffies(timeout) + jiffies;
|
||||
while (cmd_pending(hr_dev) && time_before(jiffies, end))
|
||||
cond_resched();
|
||||
|
||||
if (cmd_pending(hr_dev)) {
|
||||
dev_err(dev, "[cmd_poll]hw run cmd TIMEDOUT!\n");
|
||||
ret = -ETIMEDOUT;
|
||||
goto out;
|
||||
}
|
||||
|
||||
status = le32_to_cpu((__force __be32)
|
||||
__raw_readl(hcr + HCR_STATUS_OFFSET));
|
||||
if ((status & STATUS_MASK) != 0x1) {
|
||||
dev_err(dev, "mailbox status 0x%x!\n", status);
|
||||
ret = -EBUSY;
|
||||
goto out;
|
||||
}
|
||||
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int hns_roce_cmd_mbox_poll(struct hns_roce_dev *hr_dev, u64 in_param,
|
||||
u64 out_param, unsigned long in_modifier,
|
||||
u8 op_modifier, u16 op, unsigned long timeout)
|
||||
{
|
||||
int ret;
|
||||
|
||||
down(&hr_dev->cmd.poll_sem);
|
||||
ret = __hns_roce_cmd_mbox_poll(hr_dev, in_param, out_param, in_modifier,
|
||||
op_modifier, op, timeout);
|
||||
up(&hr_dev->cmd.poll_sem);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void hns_roce_cmd_event(struct hns_roce_dev *hr_dev, u16 token, u8 status,
|
||||
u64 out_param)
|
||||
{
|
||||
struct hns_roce_cmd_context
|
||||
*context = &hr_dev->cmd.context[token & hr_dev->cmd.token_mask];
|
||||
|
||||
if (token != context->token)
|
||||
return;
|
||||
|
||||
context->result = (status == HNS_ROCE_CMD_SUCCESS) ? 0 : (-EIO);
|
||||
context->out_param = out_param;
|
||||
complete(&context->done);
|
||||
}
|
||||
|
||||
/* this should be called with "use_events" */
|
||||
static int __hns_roce_cmd_mbox_wait(struct hns_roce_dev *hr_dev, u64 in_param,
|
||||
u64 out_param, unsigned long in_modifier,
|
||||
u8 op_modifier, u16 op,
|
||||
unsigned long timeout)
|
||||
{
|
||||
struct hns_roce_cmdq *cmd = &hr_dev->cmd;
|
||||
struct device *dev = &hr_dev->pdev->dev;
|
||||
struct hns_roce_cmd_context *context;
|
||||
int ret = 0;
|
||||
|
||||
spin_lock(&cmd->context_lock);
|
||||
WARN_ON(cmd->free_head < 0);
|
||||
context = &cmd->context[cmd->free_head];
|
||||
context->token += cmd->token_mask + 1;
|
||||
cmd->free_head = context->next;
|
||||
spin_unlock(&cmd->context_lock);
|
||||
|
||||
init_completion(&context->done);
|
||||
|
||||
ret = hns_roce_cmd_mbox_post_hw(hr_dev, in_param, out_param,
|
||||
in_modifier, op_modifier, op,
|
||||
context->token, 1);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
/*
|
||||
* It is timeout when wait_for_completion_timeout return 0
|
||||
* The return value is the time limit set in advance
|
||||
* how many seconds showing
|
||||
*/
|
||||
if (!wait_for_completion_timeout(&context->done,
|
||||
msecs_to_jiffies(timeout))) {
|
||||
dev_err(dev, "[cmd]wait_for_completion_timeout timeout\n");
|
||||
ret = -EBUSY;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = context->result;
|
||||
if (ret) {
|
||||
dev_err(dev, "[cmd]event mod cmd process error!err=%d\n", ret);
|
||||
goto out;
|
||||
}
|
||||
|
||||
out:
|
||||
spin_lock(&cmd->context_lock);
|
||||
context->next = cmd->free_head;
|
||||
cmd->free_head = context - cmd->context;
|
||||
spin_unlock(&cmd->context_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int hns_roce_cmd_mbox_wait(struct hns_roce_dev *hr_dev, u64 in_param,
|
||||
u64 out_param, unsigned long in_modifier,
|
||||
u8 op_modifier, u16 op, unsigned long timeout)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
down(&hr_dev->cmd.event_sem);
|
||||
ret = __hns_roce_cmd_mbox_wait(hr_dev, in_param, out_param,
|
||||
in_modifier, op_modifier, op, timeout);
|
||||
up(&hr_dev->cmd.event_sem);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int hns_roce_cmd_mbox(struct hns_roce_dev *hr_dev, u64 in_param, u64 out_param,
|
||||
unsigned long in_modifier, u8 op_modifier, u16 op,
|
||||
unsigned long timeout)
|
||||
{
|
||||
if (hr_dev->cmd.use_events)
|
||||
return hns_roce_cmd_mbox_wait(hr_dev, in_param, out_param,
|
||||
in_modifier, op_modifier, op,
|
||||
timeout);
|
||||
else
|
||||
return hns_roce_cmd_mbox_poll(hr_dev, in_param, out_param,
|
||||
in_modifier, op_modifier, op,
|
||||
timeout);
|
||||
}
|
||||
|
||||
int hns_roce_cmd_init(struct hns_roce_dev *hr_dev)
|
||||
{
|
||||
struct device *dev = &hr_dev->pdev->dev;
|
||||
|
||||
mutex_init(&hr_dev->cmd.hcr_mutex);
|
||||
sema_init(&hr_dev->cmd.poll_sem, 1);
|
||||
hr_dev->cmd.use_events = 0;
|
||||
hr_dev->cmd.toggle = 1;
|
||||
hr_dev->cmd.max_cmds = CMD_MAX_NUM;
|
||||
hr_dev->cmd.hcr = hr_dev->reg_base + ROCEE_MB1_REG;
|
||||
hr_dev->cmd.pool = dma_pool_create("hns_roce_cmd", dev,
|
||||
HNS_ROCE_MAILBOX_SIZE,
|
||||
HNS_ROCE_MAILBOX_SIZE, 0);
|
||||
if (!hr_dev->cmd.pool)
|
||||
return -ENOMEM;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void hns_roce_cmd_cleanup(struct hns_roce_dev *hr_dev)
|
||||
{
|
||||
dma_pool_destroy(hr_dev->cmd.pool);
|
||||
}
|
||||
|
||||
int hns_roce_cmd_use_events(struct hns_roce_dev *hr_dev)
|
||||
{
|
||||
struct hns_roce_cmdq *hr_cmd = &hr_dev->cmd;
|
||||
int i;
|
||||
|
||||
hr_cmd->context = kmalloc(hr_cmd->max_cmds *
|
||||
sizeof(struct hns_roce_cmd_context),
|
||||
GFP_KERNEL);
|
||||
if (!hr_cmd->context)
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < hr_cmd->max_cmds; ++i) {
|
||||
hr_cmd->context[i].token = i;
|
||||
hr_cmd->context[i].next = i + 1;
|
||||
}
|
||||
|
||||
hr_cmd->context[hr_cmd->max_cmds - 1].next = -1;
|
||||
hr_cmd->free_head = 0;
|
||||
|
||||
sema_init(&hr_cmd->event_sem, hr_cmd->max_cmds);
|
||||
spin_lock_init(&hr_cmd->context_lock);
|
||||
|
||||
hr_cmd->token_mask = CMD_TOKEN_MASK;
|
||||
hr_cmd->use_events = 1;
|
||||
|
||||
down(&hr_cmd->poll_sem);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void hns_roce_cmd_use_polling(struct hns_roce_dev *hr_dev)
|
||||
{
|
||||
struct hns_roce_cmdq *hr_cmd = &hr_dev->cmd;
|
||||
int i;
|
||||
|
||||
hr_cmd->use_events = 0;
|
||||
|
||||
for (i = 0; i < hr_cmd->max_cmds; ++i)
|
||||
down(&hr_cmd->event_sem);
|
||||
|
||||
kfree(hr_cmd->context);
|
||||
up(&hr_cmd->poll_sem);
|
||||
}
|
||||
|
||||
struct hns_roce_cmd_mailbox
|
||||
*hns_roce_alloc_cmd_mailbox(struct hns_roce_dev *hr_dev)
|
||||
{
|
||||
struct hns_roce_cmd_mailbox *mailbox;
|
||||
|
||||
mailbox = kmalloc(sizeof(*mailbox), GFP_KERNEL);
|
||||
if (!mailbox)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
mailbox->buf = dma_pool_alloc(hr_dev->cmd.pool, GFP_KERNEL,
|
||||
&mailbox->dma);
|
||||
if (!mailbox->buf) {
|
||||
kfree(mailbox);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
return mailbox;
|
||||
}
|
||||
|
||||
void hns_roce_free_cmd_mailbox(struct hns_roce_dev *hr_dev,
|
||||
struct hns_roce_cmd_mailbox *mailbox)
|
||||
{
|
||||
if (!mailbox)
|
||||
return;
|
||||
|
||||
dma_pool_free(hr_dev->cmd.pool, mailbox->buf, mailbox->dma);
|
||||
kfree(mailbox);
|
||||
}
|
|
@ -0,0 +1,80 @@
|
|||
/*
|
||||
* Copyright (c) 2016 Hisilicon Limited.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
* General Public License (GPL) Version 2, available from the file
|
||||
* COPYING in the main directory of this source tree, or the
|
||||
* OpenIB.org BSD license below:
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or
|
||||
* without modification, are permitted provided that the following
|
||||
* conditions are met:
|
||||
*
|
||||
* - Redistributions of source code must retain the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer.
|
||||
*
|
||||
* - Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer in the documentation and/or other materials
|
||||
* provided with the distribution.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef _HNS_ROCE_CMD_H
|
||||
#define _HNS_ROCE_CMD_H
|
||||
|
||||
#define HNS_ROCE_MAILBOX_SIZE 4096
|
||||
|
||||
enum {
|
||||
/* TPT commands */
|
||||
HNS_ROCE_CMD_SW2HW_MPT = 0xd,
|
||||
HNS_ROCE_CMD_HW2SW_MPT = 0xf,
|
||||
|
||||
/* CQ commands */
|
||||
HNS_ROCE_CMD_SW2HW_CQ = 0x16,
|
||||
HNS_ROCE_CMD_HW2SW_CQ = 0x17,
|
||||
|
||||
/* QP/EE commands */
|
||||
HNS_ROCE_CMD_RST2INIT_QP = 0x19,
|
||||
HNS_ROCE_CMD_INIT2RTR_QP = 0x1a,
|
||||
HNS_ROCE_CMD_RTR2RTS_QP = 0x1b,
|
||||
HNS_ROCE_CMD_RTS2RTS_QP = 0x1c,
|
||||
HNS_ROCE_CMD_2ERR_QP = 0x1e,
|
||||
HNS_ROCE_CMD_RTS2SQD_QP = 0x1f,
|
||||
HNS_ROCE_CMD_SQD2SQD_QP = 0x38,
|
||||
HNS_ROCE_CMD_SQD2RTS_QP = 0x20,
|
||||
HNS_ROCE_CMD_2RST_QP = 0x21,
|
||||
HNS_ROCE_CMD_QUERY_QP = 0x22,
|
||||
};
|
||||
|
||||
enum {
|
||||
HNS_ROCE_CMD_TIME_CLASS_A = 10000,
|
||||
HNS_ROCE_CMD_TIME_CLASS_B = 10000,
|
||||
HNS_ROCE_CMD_TIME_CLASS_C = 10000,
|
||||
};
|
||||
|
||||
struct hns_roce_cmd_mailbox {
|
||||
void *buf;
|
||||
dma_addr_t dma;
|
||||
};
|
||||
|
||||
int hns_roce_cmd_mbox(struct hns_roce_dev *hr_dev, u64 in_param, u64 out_param,
|
||||
unsigned long in_modifier, u8 op_modifier, u16 op,
|
||||
unsigned long timeout);
|
||||
|
||||
struct hns_roce_cmd_mailbox
|
||||
*hns_roce_alloc_cmd_mailbox(struct hns_roce_dev *hr_dev);
|
||||
void hns_roce_free_cmd_mailbox(struct hns_roce_dev *hr_dev,
|
||||
struct hns_roce_cmd_mailbox *mailbox);
|
||||
|
||||
#endif /* _HNS_ROCE_CMD_H */
|
|
@ -0,0 +1,325 @@
|
|||
/*
|
||||
* Copyright (c) 2016 Hisilicon Limited.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
* General Public License (GPL) Version 2, available from the file
|
||||
* COPYING in the main directory of this source tree, or the
|
||||
* OpenIB.org BSD license below:
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or
|
||||
* without modification, are permitted provided that the following
|
||||
* conditions are met:
|
||||
*
|
||||
* - Redistributions of source code must retain the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer.
|
||||
*
|
||||
* - Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer in the documentation and/or other materials
|
||||
* provided with the distribution.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef _HNS_ROCE_COMMON_H
|
||||
#define _HNS_ROCE_COMMON_H
|
||||
|
||||
#ifndef assert
|
||||
#define assert(cond)
|
||||
#endif
|
||||
|
||||
#define roce_write(dev, reg, val) writel((val), (dev)->reg_base + (reg))
|
||||
#define roce_read(dev, reg) readl((dev)->reg_base + (reg))
|
||||
#define roce_raw_write(value, addr) \
|
||||
__raw_writel((__force u32)cpu_to_le32(value), (addr))
|
||||
|
||||
#define roce_get_field(origin, mask, shift) \
|
||||
(((origin) & (mask)) >> (shift))
|
||||
|
||||
#define roce_get_bit(origin, shift) \
|
||||
roce_get_field((origin), (1ul << (shift)), (shift))
|
||||
|
||||
#define roce_set_field(origin, mask, shift, val) \
|
||||
do { \
|
||||
(origin) &= (~(mask)); \
|
||||
(origin) |= (((u32)(val) << (shift)) & (mask)); \
|
||||
} while (0)
|
||||
|
||||
#define roce_set_bit(origin, shift, val) \
|
||||
roce_set_field((origin), (1ul << (shift)), (shift), (val))
|
||||
|
||||
#define ROCEE_GLB_CFG_ROCEE_DB_SQ_MODE_S 3
|
||||
#define ROCEE_GLB_CFG_ROCEE_DB_OTH_MODE_S 4
|
||||
|
||||
#define ROCEE_GLB_CFG_SQ_EXT_DB_MODE_S 5
|
||||
|
||||
#define ROCEE_GLB_CFG_OTH_EXT_DB_MODE_S 6
|
||||
|
||||
#define ROCEE_GLB_CFG_ROCEE_PORT_ST_S 10
|
||||
#define ROCEE_GLB_CFG_ROCEE_PORT_ST_M \
|
||||
(((1UL << 6) - 1) << ROCEE_GLB_CFG_ROCEE_PORT_ST_S)
|
||||
|
||||
#define ROCEE_GLB_CFG_TRP_RAQ_DROP_EN_S 16
|
||||
|
||||
#define ROCEE_DMAE_USER_CFG1_ROCEE_STREAM_ID_TB_CFG_S 0
|
||||
#define ROCEE_DMAE_USER_CFG1_ROCEE_STREAM_ID_TB_CFG_M \
|
||||
(((1UL << 24) - 1) << ROCEE_DMAE_USER_CFG1_ROCEE_STREAM_ID_TB_CFG_S)
|
||||
|
||||
#define ROCEE_DMAE_USER_CFG1_ROCEE_CACHE_TB_CFG_S 24
|
||||
#define ROCEE_DMAE_USER_CFG1_ROCEE_CACHE_TB_CFG_M \
|
||||
(((1UL << 4) - 1) << ROCEE_DMAE_USER_CFG1_ROCEE_CACHE_TB_CFG_S)
|
||||
|
||||
#define ROCEE_DMAE_USER_CFG2_ROCEE_STREAM_ID_PKT_CFG_S 0
|
||||
#define ROCEE_DMAE_USER_CFG2_ROCEE_STREAM_ID_PKT_CFG_M \
|
||||
(((1UL << 24) - 1) << ROCEE_DMAE_USER_CFG2_ROCEE_STREAM_ID_PKT_CFG_S)
|
||||
|
||||
#define ROCEE_DMAE_USER_CFG2_ROCEE_CACHE_PKT_CFG_S 24
|
||||
#define ROCEE_DMAE_USER_CFG2_ROCEE_CACHE_PKT_CFG_M \
|
||||
(((1UL << 4) - 1) << ROCEE_DMAE_USER_CFG2_ROCEE_CACHE_PKT_CFG_S)
|
||||
|
||||
#define ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_S 0
|
||||
#define ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_M \
|
||||
(((1UL << 16) - 1) << ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_S)
|
||||
|
||||
#define ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_EMPTY_S 16
|
||||
#define ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_EMPTY_M \
|
||||
(((1UL << 16) - 1) << ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_EMPTY_S)
|
||||
|
||||
#define ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_S 0
|
||||
#define ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_M \
|
||||
(((1UL << 16) - 1) << ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_S)
|
||||
|
||||
#define ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_EMPTY_S 16
|
||||
#define ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_EMPTY_M \
|
||||
(((1UL << 16) - 1) << ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_EMPTY_S)
|
||||
|
||||
#define ROCEE_RAQ_WL_ROCEE_RAQ_WL_S 0
|
||||
#define ROCEE_RAQ_WL_ROCEE_RAQ_WL_M \
|
||||
(((1UL << 8) - 1) << ROCEE_RAQ_WL_ROCEE_RAQ_WL_S)
|
||||
|
||||
#define ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_POL_TIME_INTERVAL_S 0
|
||||
#define ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_POL_TIME_INTERVAL_M \
|
||||
(((1UL << 15) - 1) << \
|
||||
ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_POL_TIME_INTERVAL_S)
|
||||
|
||||
#define ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_RAQ_TIMEOUT_CHK_CFG_S 16
|
||||
#define ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_RAQ_TIMEOUT_CHK_CFG_M \
|
||||
(((1UL << 4) - 1) << \
|
||||
ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_RAQ_TIMEOUT_CHK_CFG_S)
|
||||
|
||||
#define ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_RAQ_TIMEOUT_CHK_EN_S 20
|
||||
|
||||
#define ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_EXT_RAQ_MODE 21
|
||||
|
||||
#define ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_SHIFT_S 0
|
||||
#define ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_SHIFT_M \
|
||||
(((1UL << 5) - 1) << ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_SHIFT_S)
|
||||
|
||||
#define ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_BA_H_S 5
|
||||
#define ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_BA_H_M \
|
||||
(((1UL << 5) - 1) << ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_BA_H_S)
|
||||
|
||||
#define ROCEE_EXT_DB_OTH_H_EXT_DB_OTH_SHIFT_S 0
|
||||
#define ROCEE_EXT_DB_OTH_H_EXT_DB_OTH_SHIFT_M \
|
||||
(((1UL << 5) - 1) << ROCEE_EXT_DB_OTH_H_EXT_DB_OTH_SHIFT_S)
|
||||
|
||||
#define ROCEE_EXT_DB_SQ_H_EXT_DB_OTH_BA_H_S 5
|
||||
#define ROCEE_EXT_DB_SQ_H_EXT_DB_OTH_BA_H_M \
|
||||
(((1UL << 5) - 1) << ROCEE_EXT_DB_SQ_H_EXT_DB_OTH_BA_H_S)
|
||||
|
||||
#define ROCEE_EXT_RAQ_H_EXT_RAQ_SHIFT_S 0
|
||||
#define ROCEE_EXT_RAQ_H_EXT_RAQ_SHIFT_M \
|
||||
(((1UL << 5) - 1) << ROCEE_EXT_RAQ_H_EXT_RAQ_SHIFT_S)
|
||||
|
||||
#define ROCEE_EXT_RAQ_H_EXT_RAQ_BA_H_S 8
|
||||
#define ROCEE_EXT_RAQ_H_EXT_RAQ_BA_H_M \
|
||||
(((1UL << 5) - 1) << ROCEE_EXT_RAQ_H_EXT_RAQ_BA_H_S)
|
||||
|
||||
#define ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_S 0
|
||||
#define ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_M \
|
||||
(((1UL << 19) - 1) << ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_S)
|
||||
|
||||
#define ROCEE_BT_CMD_H_ROCEE_BT_CMD_S 19
|
||||
|
||||
#define ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S 20
|
||||
#define ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M \
|
||||
(((1UL << 2) - 1) << ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S)
|
||||
|
||||
#define ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_S 22
|
||||
#define ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_M \
|
||||
(((1UL << 5) - 1) << ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_S)
|
||||
|
||||
#define ROCEE_BT_CMD_H_ROCEE_BT_CMD_HW_SYNS_S 31
|
||||
|
||||
#define ROCEE_QP1C_CFG0_0_ROCEE_QP1C_QP_ST_S 0
|
||||
#define ROCEE_QP1C_CFG0_0_ROCEE_QP1C_QP_ST_M \
|
||||
(((1UL << 3) - 1) << ROCEE_QP1C_CFG0_0_ROCEE_QP1C_QP_ST_S)
|
||||
|
||||
#define ROCEE_QP1C_CFG3_0_ROCEE_QP1C_RQ_HEAD_S 0
|
||||
#define ROCEE_QP1C_CFG3_0_ROCEE_QP1C_RQ_HEAD_M \
|
||||
(((1UL << 15) - 1) << ROCEE_QP1C_CFG3_0_ROCEE_QP1C_RQ_HEAD_S)
|
||||
|
||||
#define ROCEE_MB6_ROCEE_MB_CMD_S 0
|
||||
#define ROCEE_MB6_ROCEE_MB_CMD_M \
|
||||
(((1UL << 8) - 1) << ROCEE_MB6_ROCEE_MB_CMD_S)
|
||||
|
||||
#define ROCEE_MB6_ROCEE_MB_CMD_MDF_S 8
|
||||
#define ROCEE_MB6_ROCEE_MB_CMD_MDF_M \
|
||||
(((1UL << 4) - 1) << ROCEE_MB6_ROCEE_MB_CMD_MDF_S)
|
||||
|
||||
#define ROCEE_MB6_ROCEE_MB_EVENT_S 14
|
||||
|
||||
#define ROCEE_MB6_ROCEE_MB_HW_RUN_S 15
|
||||
|
||||
#define ROCEE_MB6_ROCEE_MB_TOKEN_S 16
|
||||
#define ROCEE_MB6_ROCEE_MB_TOKEN_M \
|
||||
(((1UL << 16) - 1) << ROCEE_MB6_ROCEE_MB_TOKEN_S)
|
||||
|
||||
#define ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_S 0
|
||||
#define ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_M \
|
||||
(((1UL << 24) - 1) << ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_S)
|
||||
|
||||
#define ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_MDF_S 24
|
||||
#define ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_MDF_M \
|
||||
(((1UL << 4) - 1) << ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_MDF_S)
|
||||
|
||||
#define ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_S 28
|
||||
#define ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_M \
|
||||
(((1UL << 3) - 1) << ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_S)
|
||||
|
||||
#define ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_HW_SYNS_S 31
|
||||
|
||||
#define ROCEE_SMAC_H_ROCEE_SMAC_H_S 0
|
||||
#define ROCEE_SMAC_H_ROCEE_SMAC_H_M \
|
||||
(((1UL << 16) - 1) << ROCEE_SMAC_H_ROCEE_SMAC_H_S)
|
||||
|
||||
#define ROCEE_SMAC_H_ROCEE_PORT_MTU_S 16
|
||||
#define ROCEE_SMAC_H_ROCEE_PORT_MTU_M \
|
||||
(((1UL << 4) - 1) << ROCEE_SMAC_H_ROCEE_PORT_MTU_S)
|
||||
|
||||
#define ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S 0
|
||||
#define ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_M \
|
||||
(((1UL << 2) - 1) << ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S)
|
||||
|
||||
#define ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_AEQE_SHIFT_S 8
|
||||
#define ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_AEQE_SHIFT_M \
|
||||
(((1UL << 4) - 1) << ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_AEQE_SHIFT_S)
|
||||
|
||||
#define ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQ_ALM_OVF_INT_ST_S 17
|
||||
|
||||
#define ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQ_BT_H_S 0
|
||||
#define ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQ_BT_H_M \
|
||||
(((1UL << 5) - 1) << ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQ_BT_H_S)
|
||||
|
||||
#define ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQE_CUR_IDX_S 16
|
||||
#define ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQE_CUR_IDX_M \
|
||||
(((1UL << 16) - 1) << ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQE_CUR_IDX_S)
|
||||
|
||||
#define ROCEE_CAEP_AEQE_CONS_IDX_CAEP_AEQE_CONS_IDX_S 0
|
||||
#define ROCEE_CAEP_AEQE_CONS_IDX_CAEP_AEQE_CONS_IDX_M \
|
||||
(((1UL << 16) - 1) << ROCEE_CAEP_AEQE_CONS_IDX_CAEP_AEQE_CONS_IDX_S)
|
||||
|
||||
#define ROCEE_CAEP_CEQC_SHIFT_CAEP_CEQ_ALM_OVF_INT_ST_S 16
|
||||
#define ROCEE_CAEP_CE_IRQ_MASK_CAEP_CEQ_ALM_OVF_MASK_S 1
|
||||
#define ROCEE_CAEP_CEQ_ALM_OVF_CAEP_CEQ_ALM_OVF_S 0
|
||||
|
||||
#define ROCEE_CAEP_AE_MASK_CAEP_AEQ_ALM_OVF_MASK_S 0
|
||||
#define ROCEE_CAEP_AE_MASK_CAEP_AE_IRQ_MASK_S 1
|
||||
|
||||
#define ROCEE_CAEP_AE_ST_CAEP_AEQ_ALM_OVF_S 0
|
||||
|
||||
#define ROCEE_SDB_ISSUE_PTR_SDB_ISSUE_PTR_S 0
|
||||
#define ROCEE_SDB_ISSUE_PTR_SDB_ISSUE_PTR_M \
|
||||
(((1UL << 28) - 1) << ROCEE_SDB_ISSUE_PTR_SDB_ISSUE_PTR_S)
|
||||
|
||||
#define ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S 0
|
||||
#define ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M \
|
||||
(((1UL << 28) - 1) << ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S)
|
||||
|
||||
#define ROCEE_SDB_INV_CNT_SDB_INV_CNT_S 0
|
||||
#define ROCEE_SDB_INV_CNT_SDB_INV_CNT_M \
|
||||
(((1UL << 16) - 1) << ROCEE_SDB_INV_CNT_SDB_INV_CNT_S)
|
||||
|
||||
/*************ROCEE_REG DEFINITION****************/
|
||||
#define ROCEE_VENDOR_ID_REG 0x0
|
||||
#define ROCEE_VENDOR_PART_ID_REG 0x4
|
||||
|
||||
#define ROCEE_HW_VERSION_REG 0x8
|
||||
|
||||
#define ROCEE_SYS_IMAGE_GUID_L_REG 0xC
|
||||
#define ROCEE_SYS_IMAGE_GUID_H_REG 0x10
|
||||
|
||||
#define ROCEE_PORT_GID_L_0_REG 0x50
|
||||
#define ROCEE_PORT_GID_ML_0_REG 0x54
|
||||
#define ROCEE_PORT_GID_MH_0_REG 0x58
|
||||
#define ROCEE_PORT_GID_H_0_REG 0x5C
|
||||
|
||||
#define ROCEE_BT_CMD_H_REG 0x204
|
||||
|
||||
#define ROCEE_SMAC_L_0_REG 0x240
|
||||
#define ROCEE_SMAC_H_0_REG 0x244
|
||||
|
||||
#define ROCEE_QP1C_CFG3_0_REG 0x27C
|
||||
|
||||
#define ROCEE_CAEP_AEQE_CONS_IDX_REG 0x3AC
|
||||
#define ROCEE_CAEP_CEQC_CONS_IDX_0_REG 0x3BC
|
||||
|
||||
#define ROCEE_ECC_UCERR_ALM1_REG 0xB38
|
||||
#define ROCEE_ECC_UCERR_ALM2_REG 0xB3C
|
||||
#define ROCEE_ECC_CERR_ALM1_REG 0xB44
|
||||
#define ROCEE_ECC_CERR_ALM2_REG 0xB48
|
||||
|
||||
#define ROCEE_ACK_DELAY_REG 0x14
|
||||
#define ROCEE_GLB_CFG_REG 0x18
|
||||
|
||||
#define ROCEE_DMAE_USER_CFG1_REG 0x40
|
||||
#define ROCEE_DMAE_USER_CFG2_REG 0x44
|
||||
|
||||
#define ROCEE_DB_SQ_WL_REG 0x154
|
||||
#define ROCEE_DB_OTHERS_WL_REG 0x158
|
||||
#define ROCEE_RAQ_WL_REG 0x15C
|
||||
#define ROCEE_WRMS_POL_TIME_INTERVAL_REG 0x160
|
||||
#define ROCEE_EXT_DB_SQ_REG 0x164
|
||||
#define ROCEE_EXT_DB_SQ_H_REG 0x168
|
||||
#define ROCEE_EXT_DB_OTH_REG 0x16C
|
||||
|
||||
#define ROCEE_EXT_DB_OTH_H_REG 0x170
|
||||
#define ROCEE_EXT_DB_SQ_WL_EMPTY_REG 0x174
|
||||
#define ROCEE_EXT_DB_SQ_WL_REG 0x178
|
||||
#define ROCEE_EXT_DB_OTHERS_WL_EMPTY_REG 0x17C
|
||||
#define ROCEE_EXT_DB_OTHERS_WL_REG 0x180
|
||||
#define ROCEE_EXT_RAQ_REG 0x184
|
||||
#define ROCEE_EXT_RAQ_H_REG 0x188
|
||||
|
||||
#define ROCEE_CAEP_CE_INTERVAL_CFG_REG 0x190
|
||||
#define ROCEE_CAEP_CE_BURST_NUM_CFG_REG 0x194
|
||||
#define ROCEE_BT_CMD_L_REG 0x200
|
||||
|
||||
#define ROCEE_MB1_REG 0x210
|
||||
#define ROCEE_DB_SQ_L_0_REG 0x230
|
||||
#define ROCEE_DB_OTHERS_L_0_REG 0x238
|
||||
#define ROCEE_QP1C_CFG0_0_REG 0x270
|
||||
|
||||
#define ROCEE_CAEP_AEQC_AEQE_SHIFT_REG 0x3A0
|
||||
#define ROCEE_CAEP_CEQC_SHIFT_0_REG 0x3B0
|
||||
#define ROCEE_CAEP_CE_IRQ_MASK_0_REG 0x3C0
|
||||
#define ROCEE_CAEP_CEQ_ALM_OVF_0_REG 0x3C4
|
||||
#define ROCEE_CAEP_AE_MASK_REG 0x6C8
|
||||
#define ROCEE_CAEP_AE_ST_REG 0x6CC
|
||||
|
||||
#define ROCEE_SDB_ISSUE_PTR_REG 0x758
|
||||
#define ROCEE_SDB_SEND_PTR_REG 0x75C
|
||||
#define ROCEE_SDB_INV_CNT_REG 0x9A4
|
||||
#define ROCEE_ECC_UCERR_ALM0_REG 0xB34
|
||||
#define ROCEE_ECC_CERR_ALM0_REG 0xB40
|
||||
|
||||
#endif /* _HNS_ROCE_COMMON_H */
|
|
@ -0,0 +1,446 @@
|
|||
/*
|
||||
* Copyright (c) 2016 Hisilicon Limited.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
* General Public License (GPL) Version 2, available from the file
|
||||
* COPYING in the main directory of this source tree, or the
|
||||
* OpenIB.org BSD license below:
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or
|
||||
* without modification, are permitted provided that the following
|
||||
* conditions are met:
|
||||
*
|
||||
* - Redistributions of source code must retain the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer.
|
||||
*
|
||||
* - Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer in the documentation and/or other materials
|
||||
* provided with the distribution.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <linux/platform_device.h>
|
||||
#include <rdma/ib_umem.h>
|
||||
#include "hns_roce_device.h"
|
||||
#include "hns_roce_cmd.h"
|
||||
#include "hns_roce_hem.h"
|
||||
#include "hns_roce_user.h"
|
||||
#include "hns_roce_common.h"
|
||||
|
||||
static void hns_roce_ib_cq_comp(struct hns_roce_cq *hr_cq)
|
||||
{
|
||||
struct ib_cq *ibcq = &hr_cq->ib_cq;
|
||||
|
||||
ibcq->comp_handler(ibcq, ibcq->cq_context);
|
||||
}
|
||||
|
||||
static void hns_roce_ib_cq_event(struct hns_roce_cq *hr_cq,
|
||||
enum hns_roce_event event_type)
|
||||
{
|
||||
struct hns_roce_dev *hr_dev;
|
||||
struct ib_event event;
|
||||
struct ib_cq *ibcq;
|
||||
|
||||
ibcq = &hr_cq->ib_cq;
|
||||
hr_dev = to_hr_dev(ibcq->device);
|
||||
|
||||
if (event_type != HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID &&
|
||||
event_type != HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR &&
|
||||
event_type != HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW) {
|
||||
dev_err(&hr_dev->pdev->dev,
|
||||
"hns_roce_ib: Unexpected event type 0x%x on CQ %06lx\n",
|
||||
event_type, hr_cq->cqn);
|
||||
return;
|
||||
}
|
||||
|
||||
if (ibcq->event_handler) {
|
||||
event.device = ibcq->device;
|
||||
event.event = IB_EVENT_CQ_ERR;
|
||||
event.element.cq = ibcq;
|
||||
ibcq->event_handler(&event, ibcq->cq_context);
|
||||
}
|
||||
}
|
||||
|
||||
static int hns_roce_sw2hw_cq(struct hns_roce_dev *dev,
|
||||
struct hns_roce_cmd_mailbox *mailbox,
|
||||
unsigned long cq_num)
|
||||
{
|
||||
return hns_roce_cmd_mbox(dev, mailbox->dma, 0, cq_num, 0,
|
||||
HNS_ROCE_CMD_SW2HW_CQ, HNS_ROCE_CMD_TIME_CLASS_A);
|
||||
}
|
||||
|
||||
static int hns_roce_cq_alloc(struct hns_roce_dev *hr_dev, int nent,
|
||||
struct hns_roce_mtt *hr_mtt,
|
||||
struct hns_roce_uar *hr_uar,
|
||||
struct hns_roce_cq *hr_cq, int vector,
|
||||
int collapsed)
|
||||
{
|
||||
struct hns_roce_cmd_mailbox *mailbox = NULL;
|
||||
struct hns_roce_cq_table *cq_table = NULL;
|
||||
struct device *dev = &hr_dev->pdev->dev;
|
||||
dma_addr_t dma_handle;
|
||||
u64 *mtts = NULL;
|
||||
int ret = 0;
|
||||
|
||||
cq_table = &hr_dev->cq_table;
|
||||
|
||||
/* Get the physical address of cq buf */
|
||||
mtts = hns_roce_table_find(&hr_dev->mr_table.mtt_table,
|
||||
hr_mtt->first_seg, &dma_handle);
|
||||
if (!mtts) {
|
||||
dev_err(dev, "CQ alloc.Failed to find cq buf addr.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (vector >= hr_dev->caps.num_comp_vectors) {
|
||||
dev_err(dev, "CQ alloc.Invalid vector.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
hr_cq->vector = vector;
|
||||
|
||||
ret = hns_roce_bitmap_alloc(&cq_table->bitmap, &hr_cq->cqn);
|
||||
if (ret == -1) {
|
||||
dev_err(dev, "CQ alloc.Failed to alloc index.\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/* Get CQC memory HEM(Hardware Entry Memory) table */
|
||||
ret = hns_roce_table_get(hr_dev, &cq_table->table, hr_cq->cqn);
|
||||
if (ret) {
|
||||
dev_err(dev, "CQ alloc.Failed to get context mem.\n");
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
/* The cq insert radix tree */
|
||||
spin_lock_irq(&cq_table->lock);
|
||||
/* Radix_tree: The associated pointer and long integer key value like */
|
||||
ret = radix_tree_insert(&cq_table->tree, hr_cq->cqn, hr_cq);
|
||||
spin_unlock_irq(&cq_table->lock);
|
||||
if (ret) {
|
||||
dev_err(dev, "CQ alloc.Failed to radix_tree_insert.\n");
|
||||
goto err_put;
|
||||
}
|
||||
|
||||
/* Allocate mailbox memory */
|
||||
mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
|
||||
if (IS_ERR(mailbox)) {
|
||||
ret = PTR_ERR(mailbox);
|
||||
goto err_radix;
|
||||
}
|
||||
|
||||
hr_dev->hw->write_cqc(hr_dev, hr_cq, mailbox->buf, mtts, dma_handle,
|
||||
nent, vector);
|
||||
|
||||
/* Send mailbox to hw */
|
||||
ret = hns_roce_sw2hw_cq(hr_dev, mailbox, hr_cq->cqn);
|
||||
hns_roce_free_cmd_mailbox(hr_dev, mailbox);
|
||||
if (ret) {
|
||||
dev_err(dev, "CQ alloc.Failed to cmd mailbox.\n");
|
||||
goto err_radix;
|
||||
}
|
||||
|
||||
hr_cq->cons_index = 0;
|
||||
hr_cq->uar = hr_uar;
|
||||
|
||||
return 0;
|
||||
|
||||
err_radix:
|
||||
spin_lock_irq(&cq_table->lock);
|
||||
radix_tree_delete(&cq_table->tree, hr_cq->cqn);
|
||||
spin_unlock_irq(&cq_table->lock);
|
||||
|
||||
err_put:
|
||||
hns_roce_table_put(hr_dev, &cq_table->table, hr_cq->cqn);
|
||||
|
||||
err_out:
|
||||
hns_roce_bitmap_free(&cq_table->bitmap, hr_cq->cqn);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int hns_roce_hw2sw_cq(struct hns_roce_dev *dev,
|
||||
struct hns_roce_cmd_mailbox *mailbox,
|
||||
unsigned long cq_num)
|
||||
{
|
||||
return hns_roce_cmd_mbox(dev, 0, mailbox ? mailbox->dma : 0, cq_num,
|
||||
mailbox ? 0 : 1, HNS_ROCE_CMD_HW2SW_CQ,
|
||||
HNS_ROCE_CMD_TIME_CLASS_A);
|
||||
}
|
||||
|
||||
static void hns_roce_free_cq(struct hns_roce_dev *hr_dev,
|
||||
struct hns_roce_cq *hr_cq)
|
||||
{
|
||||
struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
|
||||
struct device *dev = &hr_dev->pdev->dev;
|
||||
int ret;
|
||||
|
||||
ret = hns_roce_hw2sw_cq(hr_dev, NULL, hr_cq->cqn);
|
||||
if (ret)
|
||||
dev_err(dev, "HW2SW_CQ failed (%d) for CQN %06lx\n", ret,
|
||||
hr_cq->cqn);
|
||||
|
||||
/* Waiting interrupt process procedure carried out */
|
||||
synchronize_irq(hr_dev->eq_table.eq[hr_cq->vector].irq);
|
||||
|
||||
spin_lock_irq(&cq_table->lock);
|
||||
radix_tree_delete(&cq_table->tree, hr_cq->cqn);
|
||||
spin_unlock_irq(&cq_table->lock);
|
||||
|
||||
hns_roce_table_put(hr_dev, &cq_table->table, hr_cq->cqn);
|
||||
hns_roce_bitmap_free(&cq_table->bitmap, hr_cq->cqn);
|
||||
}
|
||||
|
||||
static int hns_roce_ib_get_cq_umem(struct hns_roce_dev *hr_dev,
|
||||
struct ib_ucontext *context,
|
||||
struct hns_roce_cq_buf *buf,
|
||||
struct ib_umem **umem, u64 buf_addr, int cqe)
|
||||
{
|
||||
int ret;
|
||||
|
||||
*umem = ib_umem_get(context, buf_addr, cqe * hr_dev->caps.cq_entry_sz,
|
||||
IB_ACCESS_LOCAL_WRITE, 1);
|
||||
if (IS_ERR(*umem))
|
||||
return PTR_ERR(*umem);
|
||||
|
||||
ret = hns_roce_mtt_init(hr_dev, ib_umem_page_count(*umem),
|
||||
ilog2((unsigned int)(*umem)->page_size),
|
||||
&buf->hr_mtt);
|
||||
if (ret)
|
||||
goto err_buf;
|
||||
|
||||
ret = hns_roce_ib_umem_write_mtt(hr_dev, &buf->hr_mtt, *umem);
|
||||
if (ret)
|
||||
goto err_mtt;
|
||||
|
||||
return 0;
|
||||
|
||||
err_mtt:
|
||||
hns_roce_mtt_cleanup(hr_dev, &buf->hr_mtt);
|
||||
|
||||
err_buf:
|
||||
ib_umem_release(*umem);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int hns_roce_ib_alloc_cq_buf(struct hns_roce_dev *hr_dev,
|
||||
struct hns_roce_cq_buf *buf, u32 nent)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = hns_roce_buf_alloc(hr_dev, nent * hr_dev->caps.cq_entry_sz,
|
||||
PAGE_SIZE * 2, &buf->hr_buf);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
ret = hns_roce_mtt_init(hr_dev, buf->hr_buf.npages,
|
||||
buf->hr_buf.page_shift, &buf->hr_mtt);
|
||||
if (ret)
|
||||
goto err_buf;
|
||||
|
||||
ret = hns_roce_buf_write_mtt(hr_dev, &buf->hr_mtt, &buf->hr_buf);
|
||||
if (ret)
|
||||
goto err_mtt;
|
||||
|
||||
return 0;
|
||||
|
||||
err_mtt:
|
||||
hns_roce_mtt_cleanup(hr_dev, &buf->hr_mtt);
|
||||
|
||||
err_buf:
|
||||
hns_roce_buf_free(hr_dev, nent * hr_dev->caps.cq_entry_sz,
|
||||
&buf->hr_buf);
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void hns_roce_ib_free_cq_buf(struct hns_roce_dev *hr_dev,
|
||||
struct hns_roce_cq_buf *buf, int cqe)
|
||||
{
|
||||
hns_roce_buf_free(hr_dev, (cqe + 1) * hr_dev->caps.cq_entry_sz,
|
||||
&buf->hr_buf);
|
||||
}
|
||||
|
||||
struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev,
|
||||
const struct ib_cq_init_attr *attr,
|
||||
struct ib_ucontext *context,
|
||||
struct ib_udata *udata)
|
||||
{
|
||||
struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);
|
||||
struct device *dev = &hr_dev->pdev->dev;
|
||||
struct hns_roce_ib_create_cq ucmd;
|
||||
struct hns_roce_cq *hr_cq = NULL;
|
||||
struct hns_roce_uar *uar = NULL;
|
||||
int vector = attr->comp_vector;
|
||||
int cq_entries = attr->cqe;
|
||||
int ret = 0;
|
||||
|
||||
if (cq_entries < 1 || cq_entries > hr_dev->caps.max_cqes) {
|
||||
dev_err(dev, "Creat CQ failed. entries=%d, max=%d\n",
|
||||
cq_entries, hr_dev->caps.max_cqes);
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
hr_cq = kmalloc(sizeof(*hr_cq), GFP_KERNEL);
|
||||
if (!hr_cq)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
/* In v1 engine, parameter verification */
|
||||
if (cq_entries < HNS_ROCE_MIN_CQE_NUM)
|
||||
cq_entries = HNS_ROCE_MIN_CQE_NUM;
|
||||
|
||||
cq_entries = roundup_pow_of_two((unsigned int)cq_entries);
|
||||
hr_cq->ib_cq.cqe = cq_entries - 1;
|
||||
mutex_init(&hr_cq->resize_mutex);
|
||||
spin_lock_init(&hr_cq->lock);
|
||||
hr_cq->hr_resize_buf = NULL;
|
||||
hr_cq->resize_umem = NULL;
|
||||
|
||||
if (context) {
|
||||
if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
|
||||
dev_err(dev, "Failed to copy_from_udata.\n");
|
||||
ret = -EFAULT;
|
||||
goto err_cq;
|
||||
}
|
||||
|
||||
/* Get user space address, write it into mtt table */
|
||||
ret = hns_roce_ib_get_cq_umem(hr_dev, context, &hr_cq->hr_buf,
|
||||
&hr_cq->umem, ucmd.buf_addr,
|
||||
cq_entries);
|
||||
if (ret) {
|
||||
dev_err(dev, "Failed to get_cq_umem.\n");
|
||||
goto err_cq;
|
||||
}
|
||||
|
||||
/* Get user space parameters */
|
||||
uar = &to_hr_ucontext(context)->uar;
|
||||
} else {
|
||||
/* Init mmt table and write buff address to mtt table */
|
||||
ret = hns_roce_ib_alloc_cq_buf(hr_dev, &hr_cq->hr_buf,
|
||||
cq_entries);
|
||||
if (ret) {
|
||||
dev_err(dev, "Failed to alloc_cq_buf.\n");
|
||||
goto err_cq;
|
||||
}
|
||||
|
||||
uar = &hr_dev->priv_uar;
|
||||
hr_cq->cq_db_l = hr_dev->reg_base + ROCEE_DB_OTHERS_L_0_REG +
|
||||
0x1000 * uar->index;
|
||||
}
|
||||
|
||||
/* Allocate cq index, fill cq_context */
|
||||
ret = hns_roce_cq_alloc(hr_dev, cq_entries, &hr_cq->hr_buf.hr_mtt,
|
||||
uar, hr_cq, vector, 0);
|
||||
if (ret) {
|
||||
dev_err(dev, "Creat CQ .Failed to cq_alloc.\n");
|
||||
goto err_mtt;
|
||||
}
|
||||
|
||||
/* Get created cq handler and carry out event */
|
||||
hr_cq->comp = hns_roce_ib_cq_comp;
|
||||
hr_cq->event = hns_roce_ib_cq_event;
|
||||
hr_cq->cq_depth = cq_entries;
|
||||
|
||||
if (context) {
|
||||
if (ib_copy_to_udata(udata, &hr_cq->cqn, sizeof(u64))) {
|
||||
ret = -EFAULT;
|
||||
goto err_mtt;
|
||||
}
|
||||
}
|
||||
|
||||
return &hr_cq->ib_cq;
|
||||
|
||||
err_mtt:
|
||||
hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt);
|
||||
if (context)
|
||||
ib_umem_release(hr_cq->umem);
|
||||
else
|
||||
hns_roce_ib_free_cq_buf(hr_dev, &hr_cq->hr_buf,
|
||||
hr_cq->ib_cq.cqe);
|
||||
|
||||
err_cq:
|
||||
kfree(hr_cq);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
int hns_roce_ib_destroy_cq(struct ib_cq *ib_cq)
|
||||
{
|
||||
struct hns_roce_dev *hr_dev = to_hr_dev(ib_cq->device);
|
||||
struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq);
|
||||
|
||||
hns_roce_free_cq(hr_dev, hr_cq);
|
||||
hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt);
|
||||
|
||||
if (ib_cq->uobject)
|
||||
ib_umem_release(hr_cq->umem);
|
||||
else
|
||||
/* Free the buff of stored cq */
|
||||
hns_roce_ib_free_cq_buf(hr_dev, &hr_cq->hr_buf, ib_cq->cqe);
|
||||
|
||||
kfree(hr_cq);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn)
|
||||
{
|
||||
struct device *dev = &hr_dev->pdev->dev;
|
||||
struct hns_roce_cq *cq;
|
||||
|
||||
cq = radix_tree_lookup(&hr_dev->cq_table.tree,
|
||||
cqn & (hr_dev->caps.num_cqs - 1));
|
||||
if (!cq) {
|
||||
dev_warn(dev, "Completion event for bogus CQ 0x%08x\n", cqn);
|
||||
return;
|
||||
}
|
||||
|
||||
cq->comp(cq);
|
||||
}
|
||||
|
||||
void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type)
|
||||
{
|
||||
struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
|
||||
struct device *dev = &hr_dev->pdev->dev;
|
||||
struct hns_roce_cq *cq;
|
||||
|
||||
cq = radix_tree_lookup(&cq_table->tree,
|
||||
cqn & (hr_dev->caps.num_cqs - 1));
|
||||
if (cq)
|
||||
atomic_inc(&cq->refcount);
|
||||
|
||||
if (!cq) {
|
||||
dev_warn(dev, "Async event for bogus CQ %08x\n", cqn);
|
||||
return;
|
||||
}
|
||||
|
||||
cq->event(cq, (enum hns_roce_event)event_type);
|
||||
|
||||
if (atomic_dec_and_test(&cq->refcount))
|
||||
complete(&cq->free);
|
||||
}
|
||||
|
||||
int hns_roce_init_cq_table(struct hns_roce_dev *hr_dev)
|
||||
{
|
||||
struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
|
||||
|
||||
spin_lock_init(&cq_table->lock);
|
||||
INIT_RADIX_TREE(&cq_table->tree, GFP_ATOMIC);
|
||||
|
||||
return hns_roce_bitmap_init(&cq_table->bitmap, hr_dev->caps.num_cqs,
|
||||
hr_dev->caps.num_cqs - 1,
|
||||
hr_dev->caps.reserved_cqs, 0);
|
||||
}
|
||||
|
||||
void hns_roce_cleanup_cq_table(struct hns_roce_dev *hr_dev)
|
||||
{
|
||||
hns_roce_bitmap_cleanup(&hr_dev->cq_table.bitmap);
|
||||
}
|
|
@ -0,0 +1,734 @@
|
|||
/*
|
||||
* Copyright (c) 2016 Hisilicon Limited.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
* General Public License (GPL) Version 2, available from the file
|
||||
* COPYING in the main directory of this source tree, or the
|
||||
* OpenIB.org BSD license below:
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or
|
||||
* without modification, are permitted provided that the following
|
||||
* conditions are met:
|
||||
*
|
||||
* - Redistributions of source code must retain the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer.
|
||||
*
|
||||
* - Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer in the documentation and/or other materials
|
||||
* provided with the distribution.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef _HNS_ROCE_DEVICE_H
|
||||
#define _HNS_ROCE_DEVICE_H
|
||||
|
||||
#include <rdma/ib_verbs.h>
|
||||
|
||||
#define DRV_NAME "hns_roce"
|
||||
|
||||
#define MAC_ADDR_OCTET_NUM 6
|
||||
#define HNS_ROCE_MAX_MSG_LEN 0x80000000
|
||||
|
||||
#define HNS_ROCE_ALOGN_UP(a, b) ((((a) + (b) - 1) / (b)) * (b))
|
||||
|
||||
#define HNS_ROCE_IB_MIN_SQ_STRIDE 6
|
||||
|
||||
#define HNS_ROCE_BA_SIZE (32 * 4096)
|
||||
|
||||
/* Hardware specification only for v1 engine */
|
||||
#define HNS_ROCE_MIN_CQE_NUM 0x40
|
||||
#define HNS_ROCE_MIN_WQE_NUM 0x20
|
||||
|
||||
/* Hardware specification only for v1 engine */
|
||||
#define HNS_ROCE_MAX_INNER_MTPT_NUM 0x7
|
||||
#define HNS_ROCE_MAX_MTPT_PBL_NUM 0x100000
|
||||
|
||||
#define HNS_ROCE_MAX_IRQ_NUM 34
|
||||
|
||||
#define HNS_ROCE_COMP_VEC_NUM 32
|
||||
|
||||
#define HNS_ROCE_AEQE_VEC_NUM 1
|
||||
#define HNS_ROCE_AEQE_OF_VEC_NUM 1
|
||||
|
||||
/* 4G/4K = 1M */
|
||||
#define HNS_ROCE_SL_SHIFT 29
|
||||
#define HNS_ROCE_TCLASS_SHIFT 20
|
||||
#define HNS_ROCE_FLOW_LABLE_MASK 0xfffff
|
||||
|
||||
#define HNS_ROCE_MAX_PORTS 6
|
||||
#define HNS_ROCE_MAX_GID_NUM 16
|
||||
#define HNS_ROCE_GID_SIZE 16
|
||||
|
||||
#define MR_TYPE_MR 0x00
|
||||
#define MR_TYPE_DMA 0x03
|
||||
|
||||
#define PKEY_ID 0xffff
|
||||
#define NODE_DESC_SIZE 64
|
||||
|
||||
#define SERV_TYPE_RC 0
|
||||
#define SERV_TYPE_RD 1
|
||||
#define SERV_TYPE_UC 2
|
||||
#define SERV_TYPE_UD 3
|
||||
|
||||
#define PAGES_SHIFT_8 8
|
||||
#define PAGES_SHIFT_16 16
|
||||
#define PAGES_SHIFT_24 24
|
||||
#define PAGES_SHIFT_32 32
|
||||
|
||||
enum hns_roce_qp_state {
|
||||
HNS_ROCE_QP_STATE_RST,
|
||||
HNS_ROCE_QP_STATE_INIT,
|
||||
HNS_ROCE_QP_STATE_RTR,
|
||||
HNS_ROCE_QP_STATE_RTS,
|
||||
HNS_ROCE_QP_STATE_SQD,
|
||||
HNS_ROCE_QP_STATE_ERR,
|
||||
HNS_ROCE_QP_NUM_STATE,
|
||||
};
|
||||
|
||||
enum hns_roce_event {
|
||||
HNS_ROCE_EVENT_TYPE_PATH_MIG = 0x01,
|
||||
HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED = 0x02,
|
||||
HNS_ROCE_EVENT_TYPE_COMM_EST = 0x03,
|
||||
HNS_ROCE_EVENT_TYPE_SQ_DRAINED = 0x04,
|
||||
HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR = 0x05,
|
||||
HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR = 0x06,
|
||||
HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR = 0x07,
|
||||
HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH = 0x08,
|
||||
HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH = 0x09,
|
||||
HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR = 0x0a,
|
||||
HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR = 0x0b,
|
||||
HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW = 0x0c,
|
||||
HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID = 0x0d,
|
||||
HNS_ROCE_EVENT_TYPE_PORT_CHANGE = 0x0f,
|
||||
/* 0x10 and 0x11 is unused in currently application case */
|
||||
HNS_ROCE_EVENT_TYPE_DB_OVERFLOW = 0x12,
|
||||
HNS_ROCE_EVENT_TYPE_MB = 0x13,
|
||||
HNS_ROCE_EVENT_TYPE_CEQ_OVERFLOW = 0x14,
|
||||
};
|
||||
|
||||
/* Local Work Queue Catastrophic Error,SUBTYPE 0x5 */
|
||||
enum {
|
||||
HNS_ROCE_LWQCE_QPC_ERROR = 1,
|
||||
HNS_ROCE_LWQCE_MTU_ERROR = 2,
|
||||
HNS_ROCE_LWQCE_WQE_BA_ADDR_ERROR = 3,
|
||||
HNS_ROCE_LWQCE_WQE_ADDR_ERROR = 4,
|
||||
HNS_ROCE_LWQCE_SQ_WQE_SHIFT_ERROR = 5,
|
||||
HNS_ROCE_LWQCE_SL_ERROR = 6,
|
||||
HNS_ROCE_LWQCE_PORT_ERROR = 7,
|
||||
};
|
||||
|
||||
/* Local Access Violation Work Queue Error,SUBTYPE 0x7 */
|
||||
enum {
|
||||
HNS_ROCE_LAVWQE_R_KEY_VIOLATION = 1,
|
||||
HNS_ROCE_LAVWQE_LENGTH_ERROR = 2,
|
||||
HNS_ROCE_LAVWQE_VA_ERROR = 3,
|
||||
HNS_ROCE_LAVWQE_PD_ERROR = 4,
|
||||
HNS_ROCE_LAVWQE_RW_ACC_ERROR = 5,
|
||||
HNS_ROCE_LAVWQE_KEY_STATE_ERROR = 6,
|
||||
HNS_ROCE_LAVWQE_MR_OPERATION_ERROR = 7,
|
||||
};
|
||||
|
||||
/* DOORBELL overflow subtype */
|
||||
enum {
|
||||
HNS_ROCE_DB_SUBTYPE_SDB_OVF = 1,
|
||||
HNS_ROCE_DB_SUBTYPE_SDB_ALM_OVF = 2,
|
||||
HNS_ROCE_DB_SUBTYPE_ODB_OVF = 3,
|
||||
HNS_ROCE_DB_SUBTYPE_ODB_ALM_OVF = 4,
|
||||
HNS_ROCE_DB_SUBTYPE_SDB_ALM_EMP = 5,
|
||||
HNS_ROCE_DB_SUBTYPE_ODB_ALM_EMP = 6,
|
||||
};
|
||||
|
||||
enum {
|
||||
/* RQ&SRQ related operations */
|
||||
HNS_ROCE_OPCODE_SEND_DATA_RECEIVE = 0x06,
|
||||
HNS_ROCE_OPCODE_RDMA_WITH_IMM_RECEIVE = 0x07,
|
||||
};
|
||||
|
||||
#define HNS_ROCE_CMD_SUCCESS 1
|
||||
|
||||
#define HNS_ROCE_PORT_DOWN 0
|
||||
#define HNS_ROCE_PORT_UP 1
|
||||
|
||||
#define HNS_ROCE_MTT_ENTRY_PER_SEG 8
|
||||
|
||||
#define PAGE_ADDR_SHIFT 12
|
||||
|
||||
struct hns_roce_uar {
|
||||
u64 pfn;
|
||||
unsigned long index;
|
||||
};
|
||||
|
||||
struct hns_roce_ucontext {
|
||||
struct ib_ucontext ibucontext;
|
||||
struct hns_roce_uar uar;
|
||||
};
|
||||
|
||||
struct hns_roce_pd {
|
||||
struct ib_pd ibpd;
|
||||
unsigned long pdn;
|
||||
};
|
||||
|
||||
struct hns_roce_bitmap {
|
||||
/* Bitmap Traversal last a bit which is 1 */
|
||||
unsigned long last;
|
||||
unsigned long top;
|
||||
unsigned long max;
|
||||
unsigned long reserved_top;
|
||||
unsigned long mask;
|
||||
spinlock_t lock;
|
||||
unsigned long *table;
|
||||
};
|
||||
|
||||
/* Order bitmap length -- bit num compute formula: 1 << (max_order - order) */
|
||||
/* Order = 0: bitmap is biggest, order = max bitmap is least (only a bit) */
|
||||
/* Every bit repesent to a partner free/used status in bitmap */
|
||||
/*
|
||||
* Initial, bits of other bitmap are all 0 except that a bit of max_order is 1
|
||||
* Bit = 1 represent to idle and available; bit = 0: not available
|
||||
*/
|
||||
struct hns_roce_buddy {
|
||||
/* Members point to every order level bitmap */
|
||||
unsigned long **bits;
|
||||
/* Represent to avail bits of the order level bitmap */
|
||||
u32 *num_free;
|
||||
int max_order;
|
||||
spinlock_t lock;
|
||||
};
|
||||
|
||||
/* For Hardware Entry Memory */
|
||||
struct hns_roce_hem_table {
|
||||
/* HEM type: 0 = qpc, 1 = mtt, 2 = cqc, 3 = srq, 4 = other */
|
||||
u32 type;
|
||||
/* HEM array elment num */
|
||||
unsigned long num_hem;
|
||||
/* HEM entry record obj total num */
|
||||
unsigned long num_obj;
|
||||
/*Single obj size */
|
||||
unsigned long obj_size;
|
||||
int lowmem;
|
||||
struct mutex mutex;
|
||||
struct hns_roce_hem **hem;
|
||||
};
|
||||
|
||||
struct hns_roce_mtt {
|
||||
unsigned long first_seg;
|
||||
int order;
|
||||
int page_shift;
|
||||
};
|
||||
|
||||
/* Only support 4K page size for mr register */
|
||||
#define MR_SIZE_4K 0
|
||||
|
||||
struct hns_roce_mr {
|
||||
struct ib_mr ibmr;
|
||||
struct ib_umem *umem;
|
||||
u64 iova; /* MR's virtual orignal addr */
|
||||
u64 size; /* Address range of MR */
|
||||
u32 key; /* Key of MR */
|
||||
u32 pd; /* PD num of MR */
|
||||
u32 access;/* Access permission of MR */
|
||||
int enabled; /* MR's active status */
|
||||
int type; /* MR's register type */
|
||||
u64 *pbl_buf;/* MR's PBL space */
|
||||
dma_addr_t pbl_dma_addr; /* MR's PBL space PA */
|
||||
};
|
||||
|
||||
struct hns_roce_mr_table {
|
||||
struct hns_roce_bitmap mtpt_bitmap;
|
||||
struct hns_roce_buddy mtt_buddy;
|
||||
struct hns_roce_hem_table mtt_table;
|
||||
struct hns_roce_hem_table mtpt_table;
|
||||
};
|
||||
|
||||
struct hns_roce_wq {
|
||||
u64 *wrid; /* Work request ID */
|
||||
spinlock_t lock;
|
||||
int wqe_cnt; /* WQE num */
|
||||
u32 max_post;
|
||||
int max_gs;
|
||||
int offset;
|
||||
int wqe_shift;/* WQE size */
|
||||
u32 head;
|
||||
u32 tail;
|
||||
void __iomem *db_reg_l;
|
||||
};
|
||||
|
||||
struct hns_roce_buf_list {
|
||||
void *buf;
|
||||
dma_addr_t map;
|
||||
};
|
||||
|
||||
struct hns_roce_buf {
|
||||
struct hns_roce_buf_list direct;
|
||||
struct hns_roce_buf_list *page_list;
|
||||
int nbufs;
|
||||
u32 npages;
|
||||
int page_shift;
|
||||
};
|
||||
|
||||
struct hns_roce_cq_buf {
|
||||
struct hns_roce_buf hr_buf;
|
||||
struct hns_roce_mtt hr_mtt;
|
||||
};
|
||||
|
||||
struct hns_roce_cq_resize {
|
||||
struct hns_roce_cq_buf hr_buf;
|
||||
int cqe;
|
||||
};
|
||||
|
||||
struct hns_roce_cq {
|
||||
struct ib_cq ib_cq;
|
||||
struct hns_roce_cq_buf hr_buf;
|
||||
/* pointer to store information after resize*/
|
||||
struct hns_roce_cq_resize *hr_resize_buf;
|
||||
spinlock_t lock;
|
||||
struct mutex resize_mutex;
|
||||
struct ib_umem *umem;
|
||||
struct ib_umem *resize_umem;
|
||||
void (*comp)(struct hns_roce_cq *);
|
||||
void (*event)(struct hns_roce_cq *, enum hns_roce_event);
|
||||
|
||||
struct hns_roce_uar *uar;
|
||||
u32 cq_depth;
|
||||
u32 cons_index;
|
||||
void __iomem *cq_db_l;
|
||||
void __iomem *tptr_addr;
|
||||
unsigned long cqn;
|
||||
u32 vector;
|
||||
atomic_t refcount;
|
||||
struct completion free;
|
||||
};
|
||||
|
||||
struct hns_roce_srq {
|
||||
struct ib_srq ibsrq;
|
||||
int srqn;
|
||||
};
|
||||
|
||||
struct hns_roce_uar_table {
|
||||
struct hns_roce_bitmap bitmap;
|
||||
};
|
||||
|
||||
struct hns_roce_qp_table {
|
||||
struct hns_roce_bitmap bitmap;
|
||||
spinlock_t lock;
|
||||
struct hns_roce_hem_table qp_table;
|
||||
struct hns_roce_hem_table irrl_table;
|
||||
};
|
||||
|
||||
struct hns_roce_cq_table {
|
||||
struct hns_roce_bitmap bitmap;
|
||||
spinlock_t lock;
|
||||
struct radix_tree_root tree;
|
||||
struct hns_roce_hem_table table;
|
||||
};
|
||||
|
||||
struct hns_roce_raq_table {
|
||||
struct hns_roce_buf_list *e_raq_buf;
|
||||
};
|
||||
|
||||
struct hns_roce_av {
|
||||
__le32 port_pd;
|
||||
u8 gid_index;
|
||||
u8 stat_rate;
|
||||
u8 hop_limit;
|
||||
__le32 sl_tclass_flowlabel;
|
||||
u8 dgid[HNS_ROCE_GID_SIZE];
|
||||
u8 mac[6];
|
||||
__le16 vlan;
|
||||
};
|
||||
|
||||
struct hns_roce_ah {
|
||||
struct ib_ah ibah;
|
||||
struct hns_roce_av av;
|
||||
};
|
||||
|
||||
struct hns_roce_cmd_context {
|
||||
struct completion done;
|
||||
int result;
|
||||
int next;
|
||||
u64 out_param;
|
||||
u16 token;
|
||||
};
|
||||
|
||||
struct hns_roce_cmdq {
|
||||
struct dma_pool *pool;
|
||||
u8 __iomem *hcr;
|
||||
struct mutex hcr_mutex;
|
||||
struct semaphore poll_sem;
|
||||
/*
|
||||
* Event mode: cmd register mutex protection,
|
||||
* ensure to not exceed max_cmds and user use limit region
|
||||
*/
|
||||
struct semaphore event_sem;
|
||||
int max_cmds;
|
||||
spinlock_t context_lock;
|
||||
int free_head;
|
||||
struct hns_roce_cmd_context *context;
|
||||
/*
|
||||
* Result of get integer part
|
||||
* which max_comds compute according a power of 2
|
||||
*/
|
||||
u16 token_mask;
|
||||
/*
|
||||
* Process whether use event mode, init default non-zero
|
||||
* After the event queue of cmd event ready,
|
||||
* can switch into event mode
|
||||
* close device, switch into poll mode(non event mode)
|
||||
*/
|
||||
u8 use_events;
|
||||
u8 toggle;
|
||||
};
|
||||
|
||||
struct hns_roce_dev;
|
||||
|
||||
struct hns_roce_qp {
|
||||
struct ib_qp ibqp;
|
||||
struct hns_roce_buf hr_buf;
|
||||
struct hns_roce_wq rq;
|
||||
__le64 doorbell_qpn;
|
||||
__le32 sq_signal_bits;
|
||||
u32 sq_next_wqe;
|
||||
int sq_max_wqes_per_wr;
|
||||
int sq_spare_wqes;
|
||||
struct hns_roce_wq sq;
|
||||
|
||||
struct ib_umem *umem;
|
||||
struct hns_roce_mtt mtt;
|
||||
u32 buff_size;
|
||||
struct mutex mutex;
|
||||
u8 port;
|
||||
u8 sl;
|
||||
u8 resp_depth;
|
||||
u8 state;
|
||||
u32 access_flags;
|
||||
u32 pkey_index;
|
||||
void (*event)(struct hns_roce_qp *,
|
||||
enum hns_roce_event);
|
||||
unsigned long qpn;
|
||||
|
||||
atomic_t refcount;
|
||||
struct completion free;
|
||||
};
|
||||
|
||||
struct hns_roce_sqp {
|
||||
struct hns_roce_qp hr_qp;
|
||||
};
|
||||
|
||||
struct hns_roce_ib_iboe {
|
||||
spinlock_t lock;
|
||||
struct net_device *netdevs[HNS_ROCE_MAX_PORTS];
|
||||
struct notifier_block nb;
|
||||
struct notifier_block nb_inet;
|
||||
/* 16 GID is shared by 6 port in v1 engine. */
|
||||
union ib_gid gid_table[HNS_ROCE_MAX_GID_NUM];
|
||||
u8 phy_port[HNS_ROCE_MAX_PORTS];
|
||||
};
|
||||
|
||||
struct hns_roce_eq {
|
||||
struct hns_roce_dev *hr_dev;
|
||||
void __iomem *doorbell;
|
||||
|
||||
int type_flag;/* Aeq:1 ceq:0 */
|
||||
int eqn;
|
||||
u32 entries;
|
||||
int log_entries;
|
||||
int eqe_size;
|
||||
int irq;
|
||||
int log_page_size;
|
||||
int cons_index;
|
||||
struct hns_roce_buf_list *buf_list;
|
||||
};
|
||||
|
||||
struct hns_roce_eq_table {
|
||||
struct hns_roce_eq *eq;
|
||||
void __iomem **eqc_base;
|
||||
};
|
||||
|
||||
struct hns_roce_caps {
|
||||
u8 num_ports;
|
||||
int gid_table_len[HNS_ROCE_MAX_PORTS];
|
||||
int pkey_table_len[HNS_ROCE_MAX_PORTS];
|
||||
int local_ca_ack_delay;
|
||||
int num_uars;
|
||||
u32 phy_num_uars;
|
||||
u32 max_sq_sg; /* 2 */
|
||||
u32 max_sq_inline; /* 32 */
|
||||
u32 max_rq_sg; /* 2 */
|
||||
int num_qps; /* 256k */
|
||||
u32 max_wqes; /* 16k */
|
||||
u32 max_sq_desc_sz; /* 64 */
|
||||
u32 max_rq_desc_sz; /* 64 */
|
||||
int max_qp_init_rdma;
|
||||
int max_qp_dest_rdma;
|
||||
int sqp_start;
|
||||
int num_cqs;
|
||||
int max_cqes;
|
||||
int reserved_cqs;
|
||||
int num_aeq_vectors; /* 1 */
|
||||
int num_comp_vectors; /* 32 ceq */
|
||||
int num_other_vectors;
|
||||
int num_mtpts;
|
||||
u32 num_mtt_segs;
|
||||
int reserved_mrws;
|
||||
int reserved_uars;
|
||||
int num_pds;
|
||||
int reserved_pds;
|
||||
u32 mtt_entry_sz;
|
||||
u32 cq_entry_sz;
|
||||
u32 page_size_cap;
|
||||
u32 reserved_lkey;
|
||||
int mtpt_entry_sz;
|
||||
int qpc_entry_sz;
|
||||
int irrl_entry_sz;
|
||||
int cqc_entry_sz;
|
||||
int aeqe_depth;
|
||||
int ceqe_depth[HNS_ROCE_COMP_VEC_NUM];
|
||||
enum ib_mtu max_mtu;
|
||||
};
|
||||
|
||||
struct hns_roce_hw {
|
||||
int (*reset)(struct hns_roce_dev *hr_dev, bool enable);
|
||||
void (*hw_profile)(struct hns_roce_dev *hr_dev);
|
||||
int (*hw_init)(struct hns_roce_dev *hr_dev);
|
||||
void (*hw_exit)(struct hns_roce_dev *hr_dev);
|
||||
void (*set_gid)(struct hns_roce_dev *hr_dev, u8 port, int gid_index,
|
||||
union ib_gid *gid);
|
||||
void (*set_mac)(struct hns_roce_dev *hr_dev, u8 phy_port, u8 *addr);
|
||||
void (*set_mtu)(struct hns_roce_dev *hr_dev, u8 phy_port,
|
||||
enum ib_mtu mtu);
|
||||
int (*write_mtpt)(void *mb_buf, struct hns_roce_mr *mr,
|
||||
unsigned long mtpt_idx);
|
||||
void (*write_cqc)(struct hns_roce_dev *hr_dev,
|
||||
struct hns_roce_cq *hr_cq, void *mb_buf, u64 *mtts,
|
||||
dma_addr_t dma_handle, int nent, u32 vector);
|
||||
int (*query_qp)(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
|
||||
int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr);
|
||||
int (*modify_qp)(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
|
||||
int attr_mask, enum ib_qp_state cur_state,
|
||||
enum ib_qp_state new_state);
|
||||
int (*destroy_qp)(struct ib_qp *ibqp);
|
||||
int (*post_send)(struct ib_qp *ibqp, struct ib_send_wr *wr,
|
||||
struct ib_send_wr **bad_wr);
|
||||
int (*post_recv)(struct ib_qp *qp, struct ib_recv_wr *recv_wr,
|
||||
struct ib_recv_wr **bad_recv_wr);
|
||||
int (*req_notify_cq)(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
|
||||
int (*poll_cq)(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
|
||||
void *priv;
|
||||
};
|
||||
|
||||
struct hns_roce_dev {
|
||||
struct ib_device ib_dev;
|
||||
struct platform_device *pdev;
|
||||
struct hns_roce_uar priv_uar;
|
||||
const char *irq_names[HNS_ROCE_MAX_IRQ_NUM];
|
||||
spinlock_t sm_lock;
|
||||
spinlock_t cq_db_lock;
|
||||
spinlock_t bt_cmd_lock;
|
||||
struct hns_roce_ib_iboe iboe;
|
||||
|
||||
int irq[HNS_ROCE_MAX_IRQ_NUM];
|
||||
u8 __iomem *reg_base;
|
||||
struct hns_roce_caps caps;
|
||||
struct radix_tree_root qp_table_tree;
|
||||
|
||||
unsigned char dev_addr[HNS_ROCE_MAX_PORTS][MAC_ADDR_OCTET_NUM];
|
||||
u64 sys_image_guid;
|
||||
u32 vendor_id;
|
||||
u32 vendor_part_id;
|
||||
u32 hw_rev;
|
||||
void __iomem *priv_addr;
|
||||
|
||||
struct hns_roce_cmdq cmd;
|
||||
struct hns_roce_bitmap pd_bitmap;
|
||||
struct hns_roce_uar_table uar_table;
|
||||
struct hns_roce_mr_table mr_table;
|
||||
struct hns_roce_cq_table cq_table;
|
||||
struct hns_roce_qp_table qp_table;
|
||||
struct hns_roce_eq_table eq_table;
|
||||
|
||||
int cmd_mod;
|
||||
int loop_idc;
|
||||
struct hns_roce_hw *hw;
|
||||
};
|
||||
|
||||
static inline struct hns_roce_dev *to_hr_dev(struct ib_device *ib_dev)
|
||||
{
|
||||
return container_of(ib_dev, struct hns_roce_dev, ib_dev);
|
||||
}
|
||||
|
||||
static inline struct hns_roce_ucontext
|
||||
*to_hr_ucontext(struct ib_ucontext *ibucontext)
|
||||
{
|
||||
return container_of(ibucontext, struct hns_roce_ucontext, ibucontext);
|
||||
}
|
||||
|
||||
static inline struct hns_roce_pd *to_hr_pd(struct ib_pd *ibpd)
|
||||
{
|
||||
return container_of(ibpd, struct hns_roce_pd, ibpd);
|
||||
}
|
||||
|
||||
static inline struct hns_roce_ah *to_hr_ah(struct ib_ah *ibah)
|
||||
{
|
||||
return container_of(ibah, struct hns_roce_ah, ibah);
|
||||
}
|
||||
|
||||
static inline struct hns_roce_mr *to_hr_mr(struct ib_mr *ibmr)
|
||||
{
|
||||
return container_of(ibmr, struct hns_roce_mr, ibmr);
|
||||
}
|
||||
|
||||
static inline struct hns_roce_qp *to_hr_qp(struct ib_qp *ibqp)
|
||||
{
|
||||
return container_of(ibqp, struct hns_roce_qp, ibqp);
|
||||
}
|
||||
|
||||
static inline struct hns_roce_cq *to_hr_cq(struct ib_cq *ib_cq)
|
||||
{
|
||||
return container_of(ib_cq, struct hns_roce_cq, ib_cq);
|
||||
}
|
||||
|
||||
static inline struct hns_roce_srq *to_hr_srq(struct ib_srq *ibsrq)
|
||||
{
|
||||
return container_of(ibsrq, struct hns_roce_srq, ibsrq);
|
||||
}
|
||||
|
||||
static inline struct hns_roce_sqp *hr_to_hr_sqp(struct hns_roce_qp *hr_qp)
|
||||
{
|
||||
return container_of(hr_qp, struct hns_roce_sqp, hr_qp);
|
||||
}
|
||||
|
||||
static inline void hns_roce_write64_k(__be32 val[2], void __iomem *dest)
|
||||
{
|
||||
__raw_writeq(*(u64 *) val, dest);
|
||||
}
|
||||
|
||||
static inline struct hns_roce_qp
|
||||
*__hns_roce_qp_lookup(struct hns_roce_dev *hr_dev, u32 qpn)
|
||||
{
|
||||
return radix_tree_lookup(&hr_dev->qp_table_tree,
|
||||
qpn & (hr_dev->caps.num_qps - 1));
|
||||
}
|
||||
|
||||
static inline void *hns_roce_buf_offset(struct hns_roce_buf *buf, int offset)
|
||||
{
|
||||
u32 bits_per_long_val = BITS_PER_LONG;
|
||||
|
||||
if (bits_per_long_val == 64 || buf->nbufs == 1)
|
||||
return (char *)(buf->direct.buf) + offset;
|
||||
else
|
||||
return (char *)(buf->page_list[offset >> PAGE_SHIFT].buf) +
|
||||
(offset & (PAGE_SIZE - 1));
|
||||
}
|
||||
|
||||
int hns_roce_init_uar_table(struct hns_roce_dev *dev);
|
||||
int hns_roce_uar_alloc(struct hns_roce_dev *dev, struct hns_roce_uar *uar);
|
||||
void hns_roce_uar_free(struct hns_roce_dev *dev, struct hns_roce_uar *uar);
|
||||
void hns_roce_cleanup_uar_table(struct hns_roce_dev *dev);
|
||||
|
||||
int hns_roce_cmd_init(struct hns_roce_dev *hr_dev);
|
||||
void hns_roce_cmd_cleanup(struct hns_roce_dev *hr_dev);
|
||||
void hns_roce_cmd_event(struct hns_roce_dev *hr_dev, u16 token, u8 status,
|
||||
u64 out_param);
|
||||
int hns_roce_cmd_use_events(struct hns_roce_dev *hr_dev);
|
||||
void hns_roce_cmd_use_polling(struct hns_roce_dev *hr_dev);
|
||||
|
||||
int hns_roce_mtt_init(struct hns_roce_dev *hr_dev, int npages, int page_shift,
|
||||
struct hns_roce_mtt *mtt);
|
||||
void hns_roce_mtt_cleanup(struct hns_roce_dev *hr_dev,
|
||||
struct hns_roce_mtt *mtt);
|
||||
int hns_roce_buf_write_mtt(struct hns_roce_dev *hr_dev,
|
||||
struct hns_roce_mtt *mtt, struct hns_roce_buf *buf);
|
||||
|
||||
int hns_roce_init_pd_table(struct hns_roce_dev *hr_dev);
|
||||
int hns_roce_init_mr_table(struct hns_roce_dev *hr_dev);
|
||||
int hns_roce_init_eq_table(struct hns_roce_dev *hr_dev);
|
||||
int hns_roce_init_cq_table(struct hns_roce_dev *hr_dev);
|
||||
int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev);
|
||||
|
||||
void hns_roce_cleanup_pd_table(struct hns_roce_dev *hr_dev);
|
||||
void hns_roce_cleanup_mr_table(struct hns_roce_dev *hr_dev);
|
||||
void hns_roce_cleanup_eq_table(struct hns_roce_dev *hr_dev);
|
||||
void hns_roce_cleanup_cq_table(struct hns_roce_dev *hr_dev);
|
||||
void hns_roce_cleanup_qp_table(struct hns_roce_dev *hr_dev);
|
||||
|
||||
int hns_roce_bitmap_alloc(struct hns_roce_bitmap *bitmap, unsigned long *obj);
|
||||
void hns_roce_bitmap_free(struct hns_roce_bitmap *bitmap, unsigned long obj);
|
||||
int hns_roce_bitmap_init(struct hns_roce_bitmap *bitmap, u32 num, u32 mask,
|
||||
u32 reserved_bot, u32 resetrved_top);
|
||||
void hns_roce_bitmap_cleanup(struct hns_roce_bitmap *bitmap);
|
||||
void hns_roce_cleanup_bitmap(struct hns_roce_dev *hr_dev);
|
||||
int hns_roce_bitmap_alloc_range(struct hns_roce_bitmap *bitmap, int cnt,
|
||||
int align, unsigned long *obj);
|
||||
void hns_roce_bitmap_free_range(struct hns_roce_bitmap *bitmap,
|
||||
unsigned long obj, int cnt);
|
||||
|
||||
struct ib_ah *hns_roce_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr);
|
||||
int hns_roce_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr);
|
||||
int hns_roce_destroy_ah(struct ib_ah *ah);
|
||||
|
||||
struct ib_pd *hns_roce_alloc_pd(struct ib_device *ib_dev,
|
||||
struct ib_ucontext *context,
|
||||
struct ib_udata *udata);
|
||||
int hns_roce_dealloc_pd(struct ib_pd *pd);
|
||||
|
||||
struct ib_mr *hns_roce_get_dma_mr(struct ib_pd *pd, int acc);
|
||||
struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
|
||||
u64 virt_addr, int access_flags,
|
||||
struct ib_udata *udata);
|
||||
int hns_roce_dereg_mr(struct ib_mr *ibmr);
|
||||
|
||||
void hns_roce_buf_free(struct hns_roce_dev *hr_dev, u32 size,
|
||||
struct hns_roce_buf *buf);
|
||||
int hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 max_direct,
|
||||
struct hns_roce_buf *buf);
|
||||
|
||||
int hns_roce_ib_umem_write_mtt(struct hns_roce_dev *hr_dev,
|
||||
struct hns_roce_mtt *mtt, struct ib_umem *umem);
|
||||
|
||||
struct ib_qp *hns_roce_create_qp(struct ib_pd *ib_pd,
|
||||
struct ib_qp_init_attr *init_attr,
|
||||
struct ib_udata *udata);
|
||||
int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
|
||||
int attr_mask, struct ib_udata *udata);
|
||||
void *get_recv_wqe(struct hns_roce_qp *hr_qp, int n);
|
||||
void *get_send_wqe(struct hns_roce_qp *hr_qp, int n);
|
||||
bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, int nreq,
|
||||
struct ib_cq *ib_cq);
|
||||
enum hns_roce_qp_state to_hns_roce_state(enum ib_qp_state state);
|
||||
void hns_roce_lock_cqs(struct hns_roce_cq *send_cq,
|
||||
struct hns_roce_cq *recv_cq);
|
||||
void hns_roce_unlock_cqs(struct hns_roce_cq *send_cq,
|
||||
struct hns_roce_cq *recv_cq);
|
||||
void hns_roce_qp_remove(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp);
|
||||
void hns_roce_qp_free(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp);
|
||||
void hns_roce_release_range_qp(struct hns_roce_dev *hr_dev, int base_qpn,
|
||||
int cnt);
|
||||
__be32 send_ieth(struct ib_send_wr *wr);
|
||||
int to_hr_qp_type(int qp_type);
|
||||
|
||||
struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev,
|
||||
const struct ib_cq_init_attr *attr,
|
||||
struct ib_ucontext *context,
|
||||
struct ib_udata *udata);
|
||||
|
||||
int hns_roce_ib_destroy_cq(struct ib_cq *ib_cq);
|
||||
|
||||
void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn);
|
||||
void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type);
|
||||
void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type);
|
||||
int hns_get_gid_index(struct hns_roce_dev *hr_dev, u8 port, int gid_index);
|
||||
|
||||
extern struct hns_roce_hw hns_roce_hw_v1;
|
||||
|
||||
#endif /* _HNS_ROCE_DEVICE_H */
|
|
@ -0,0 +1,762 @@
|
|||
/*
|
||||
* Copyright (c) 2016 Hisilicon Limited.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
* General Public License (GPL) Version 2, available from the file
|
||||
* COPYING in the main directory of this source tree, or the
|
||||
* OpenIB.org BSD license below:
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or
|
||||
* without modification, are permitted provided that the following
|
||||
* conditions are met:
|
||||
*
|
||||
* - Redistributions of source code must retain the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer.
|
||||
*
|
||||
* - Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer in the documentation and/or other materials
|
||||
* provided with the distribution.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <linux/platform_device.h>
|
||||
#include "hns_roce_common.h"
|
||||
#include "hns_roce_device.h"
|
||||
#include "hns_roce_eq.h"
|
||||
|
||||
static void eq_set_cons_index(struct hns_roce_eq *eq, int req_not)
|
||||
{
|
||||
roce_raw_write((eq->cons_index & CONS_INDEX_MASK) |
|
||||
(req_not << eq->log_entries), eq->doorbell);
|
||||
/* Memory barrier */
|
||||
mb();
|
||||
}
|
||||
|
||||
static struct hns_roce_aeqe *get_aeqe(struct hns_roce_eq *eq, u32 entry)
|
||||
{
|
||||
unsigned long off = (entry & (eq->entries - 1)) *
|
||||
HNS_ROCE_AEQ_ENTRY_SIZE;
|
||||
|
||||
return (struct hns_roce_aeqe *)((u8 *)
|
||||
(eq->buf_list[off / HNS_ROCE_BA_SIZE].buf) +
|
||||
off % HNS_ROCE_BA_SIZE);
|
||||
}
|
||||
|
||||
static struct hns_roce_aeqe *next_aeqe_sw(struct hns_roce_eq *eq)
|
||||
{
|
||||
struct hns_roce_aeqe *aeqe = get_aeqe(eq, eq->cons_index);
|
||||
|
||||
return (roce_get_bit(aeqe->asyn, HNS_ROCE_AEQE_U32_4_OWNER_S) ^
|
||||
!!(eq->cons_index & eq->entries)) ? aeqe : NULL;
|
||||
}
|
||||
|
||||
static void hns_roce_wq_catas_err_handle(struct hns_roce_dev *hr_dev,
|
||||
struct hns_roce_aeqe *aeqe, int qpn)
|
||||
{
|
||||
struct device *dev = &hr_dev->pdev->dev;
|
||||
|
||||
qpn = roce_get_field(aeqe->event.qp_event.qp,
|
||||
HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M,
|
||||
HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S);
|
||||
dev_warn(dev, "Local Work Queue Catastrophic Error.\n");
|
||||
switch (roce_get_field(aeqe->asyn, HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M,
|
||||
HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S)) {
|
||||
case HNS_ROCE_LWQCE_QPC_ERROR:
|
||||
dev_warn(dev, "QP %d, QPC error.\n", qpn);
|
||||
break;
|
||||
case HNS_ROCE_LWQCE_MTU_ERROR:
|
||||
dev_warn(dev, "QP %d, MTU error.\n", qpn);
|
||||
break;
|
||||
case HNS_ROCE_LWQCE_WQE_BA_ADDR_ERROR:
|
||||
dev_warn(dev, "QP %d, WQE BA addr error.\n", qpn);
|
||||
break;
|
||||
case HNS_ROCE_LWQCE_WQE_ADDR_ERROR:
|
||||
dev_warn(dev, "QP %d, WQE addr error.\n", qpn);
|
||||
break;
|
||||
case HNS_ROCE_LWQCE_SQ_WQE_SHIFT_ERROR:
|
||||
dev_warn(dev, "QP %d, WQE shift error\n", qpn);
|
||||
break;
|
||||
case HNS_ROCE_LWQCE_SL_ERROR:
|
||||
dev_warn(dev, "QP %d, SL error.\n", qpn);
|
||||
break;
|
||||
case HNS_ROCE_LWQCE_PORT_ERROR:
|
||||
dev_warn(dev, "QP %d, port error.\n", qpn);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
hns_roce_qp_event(hr_dev, roce_get_field(aeqe->event.qp_event.qp,
|
||||
HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M,
|
||||
HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S),
|
||||
roce_get_field(aeqe->asyn,
|
||||
HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M,
|
||||
HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S));
|
||||
}
|
||||
|
||||
static void hns_roce_local_wq_access_err_handle(struct hns_roce_dev *hr_dev,
|
||||
struct hns_roce_aeqe *aeqe,
|
||||
int qpn)
|
||||
{
|
||||
struct device *dev = &hr_dev->pdev->dev;
|
||||
|
||||
qpn = roce_get_field(aeqe->event.qp_event.qp,
|
||||
HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M,
|
||||
HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S);
|
||||
dev_warn(dev, "Local Access Violation Work Queue Error.\n");
|
||||
switch (roce_get_field(aeqe->asyn, HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M,
|
||||
HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S)) {
|
||||
case HNS_ROCE_LAVWQE_R_KEY_VIOLATION:
|
||||
dev_warn(dev, "QP %d, R_key violation.\n", qpn);
|
||||
break;
|
||||
case HNS_ROCE_LAVWQE_LENGTH_ERROR:
|
||||
dev_warn(dev, "QP %d, length error.\n", qpn);
|
||||
break;
|
||||
case HNS_ROCE_LAVWQE_VA_ERROR:
|
||||
dev_warn(dev, "QP %d, VA error.\n", qpn);
|
||||
break;
|
||||
case HNS_ROCE_LAVWQE_PD_ERROR:
|
||||
dev_err(dev, "QP %d, PD error.\n", qpn);
|
||||
break;
|
||||
case HNS_ROCE_LAVWQE_RW_ACC_ERROR:
|
||||
dev_warn(dev, "QP %d, rw acc error.\n", qpn);
|
||||
break;
|
||||
case HNS_ROCE_LAVWQE_KEY_STATE_ERROR:
|
||||
dev_warn(dev, "QP %d, key state error.\n", qpn);
|
||||
break;
|
||||
case HNS_ROCE_LAVWQE_MR_OPERATION_ERROR:
|
||||
dev_warn(dev, "QP %d, MR operation error.\n", qpn);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
hns_roce_qp_event(hr_dev, roce_get_field(aeqe->event.qp_event.qp,
|
||||
HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M,
|
||||
HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S),
|
||||
roce_get_field(aeqe->asyn,
|
||||
HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M,
|
||||
HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S));
|
||||
}
|
||||
|
||||
static void hns_roce_db_overflow_handle(struct hns_roce_dev *hr_dev,
|
||||
struct hns_roce_aeqe *aeqe)
|
||||
{
|
||||
struct device *dev = &hr_dev->pdev->dev;
|
||||
|
||||
switch (roce_get_field(aeqe->asyn, HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M,
|
||||
HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S)) {
|
||||
case HNS_ROCE_DB_SUBTYPE_SDB_OVF:
|
||||
dev_warn(dev, "SDB overflow.\n");
|
||||
break;
|
||||
case HNS_ROCE_DB_SUBTYPE_SDB_ALM_OVF:
|
||||
dev_warn(dev, "SDB almost overflow.\n");
|
||||
break;
|
||||
case HNS_ROCE_DB_SUBTYPE_SDB_ALM_EMP:
|
||||
dev_warn(dev, "SDB almost empty.\n");
|
||||
break;
|
||||
case HNS_ROCE_DB_SUBTYPE_ODB_OVF:
|
||||
dev_warn(dev, "ODB overflow.\n");
|
||||
break;
|
||||
case HNS_ROCE_DB_SUBTYPE_ODB_ALM_OVF:
|
||||
dev_warn(dev, "ODB almost overflow.\n");
|
||||
break;
|
||||
case HNS_ROCE_DB_SUBTYPE_ODB_ALM_EMP:
|
||||
dev_warn(dev, "SDB almost empty.\n");
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static int hns_roce_aeq_int(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq)
|
||||
{
|
||||
struct device *dev = &hr_dev->pdev->dev;
|
||||
struct hns_roce_aeqe *aeqe;
|
||||
int aeqes_found = 0;
|
||||
int qpn = 0;
|
||||
|
||||
while ((aeqe = next_aeqe_sw(eq))) {
|
||||
dev_dbg(dev, "aeqe = %p, aeqe->asyn.event_type = 0x%lx\n", aeqe,
|
||||
roce_get_field(aeqe->asyn,
|
||||
HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M,
|
||||
HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S));
|
||||
/* Memory barrier */
|
||||
rmb();
|
||||
|
||||
switch (roce_get_field(aeqe->asyn,
|
||||
HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M,
|
||||
HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S)) {
|
||||
case HNS_ROCE_EVENT_TYPE_PATH_MIG:
|
||||
dev_warn(dev, "PATH MIG not supported\n");
|
||||
break;
|
||||
case HNS_ROCE_EVENT_TYPE_COMM_EST:
|
||||
dev_warn(dev, "COMMUNICATION established\n");
|
||||
break;
|
||||
case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
|
||||
dev_warn(dev, "SQ DRAINED not supported\n");
|
||||
break;
|
||||
case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED:
|
||||
dev_warn(dev, "PATH MIG failed\n");
|
||||
break;
|
||||
case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
|
||||
dev_warn(dev, "qpn = 0x%lx\n",
|
||||
roce_get_field(aeqe->event.qp_event.qp,
|
||||
HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M,
|
||||
HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S));
|
||||
hns_roce_qp_event(hr_dev,
|
||||
roce_get_field(aeqe->event.qp_event.qp,
|
||||
HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M,
|
||||
HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S),
|
||||
roce_get_field(aeqe->asyn,
|
||||
HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M,
|
||||
HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S));
|
||||
break;
|
||||
case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
|
||||
hns_roce_wq_catas_err_handle(hr_dev, aeqe, qpn);
|
||||
break;
|
||||
case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
|
||||
hns_roce_local_wq_access_err_handle(hr_dev, aeqe, qpn);
|
||||
break;
|
||||
case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
|
||||
case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR:
|
||||
case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
|
||||
dev_warn(dev, "SRQ not support!\n");
|
||||
break;
|
||||
case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
|
||||
dev_warn(dev, "CQ 0x%lx access err.\n",
|
||||
roce_get_field(aeqe->event.cq_event.cq,
|
||||
HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_M,
|
||||
HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S));
|
||||
hns_roce_cq_event(hr_dev,
|
||||
le32_to_cpu(roce_get_field(aeqe->event.cq_event.cq,
|
||||
HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_M,
|
||||
HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S)),
|
||||
roce_get_field(aeqe->asyn,
|
||||
HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M,
|
||||
HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S));
|
||||
break;
|
||||
case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
|
||||
dev_warn(dev, "CQ 0x%lx overflow\n",
|
||||
roce_get_field(aeqe->event.cq_event.cq,
|
||||
HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_M,
|
||||
HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S));
|
||||
hns_roce_cq_event(hr_dev,
|
||||
le32_to_cpu(roce_get_field(aeqe->event.cq_event.cq,
|
||||
HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_M,
|
||||
HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S)),
|
||||
roce_get_field(aeqe->asyn,
|
||||
HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M,
|
||||
HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S));
|
||||
break;
|
||||
case HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID:
|
||||
dev_warn(dev, "CQ ID invalid.\n");
|
||||
hns_roce_cq_event(hr_dev,
|
||||
le32_to_cpu(roce_get_field(aeqe->event.cq_event.cq,
|
||||
HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_M,
|
||||
HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S)),
|
||||
roce_get_field(aeqe->asyn,
|
||||
HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M,
|
||||
HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S));
|
||||
break;
|
||||
case HNS_ROCE_EVENT_TYPE_PORT_CHANGE:
|
||||
dev_warn(dev, "port change.\n");
|
||||
break;
|
||||
case HNS_ROCE_EVENT_TYPE_MB:
|
||||
hns_roce_cmd_event(hr_dev,
|
||||
le16_to_cpu(aeqe->event.cmd.token),
|
||||
aeqe->event.cmd.status,
|
||||
le64_to_cpu(aeqe->event.cmd.out_param
|
||||
));
|
||||
break;
|
||||
case HNS_ROCE_EVENT_TYPE_DB_OVERFLOW:
|
||||
hns_roce_db_overflow_handle(hr_dev, aeqe);
|
||||
break;
|
||||
case HNS_ROCE_EVENT_TYPE_CEQ_OVERFLOW:
|
||||
dev_warn(dev, "CEQ 0x%lx overflow.\n",
|
||||
roce_get_field(aeqe->event.ce_event.ceqe,
|
||||
HNS_ROCE_AEQE_EVENT_CE_EVENT_CEQE_CEQN_M,
|
||||
HNS_ROCE_AEQE_EVENT_CE_EVENT_CEQE_CEQN_S));
|
||||
break;
|
||||
default:
|
||||
dev_warn(dev, "Unhandled event 0x%lx on EQ %d at index %u\n",
|
||||
roce_get_field(aeqe->asyn,
|
||||
HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M,
|
||||
HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S),
|
||||
eq->eqn, eq->cons_index);
|
||||
break;
|
||||
};
|
||||
|
||||
eq->cons_index++;
|
||||
aeqes_found = 1;
|
||||
|
||||
if (eq->cons_index > 2 * hr_dev->caps.aeqe_depth - 1) {
|
||||
dev_warn(dev, "cons_index overflow, set back to zero\n"
|
||||
);
|
||||
eq->cons_index = 0;
|
||||
}
|
||||
}
|
||||
|
||||
eq_set_cons_index(eq, 0);
|
||||
|
||||
return aeqes_found;
|
||||
}
|
||||
|
||||
static struct hns_roce_ceqe *get_ceqe(struct hns_roce_eq *eq, u32 entry)
|
||||
{
|
||||
unsigned long off = (entry & (eq->entries - 1)) *
|
||||
HNS_ROCE_CEQ_ENTRY_SIZE;
|
||||
|
||||
return (struct hns_roce_ceqe *)((u8 *)
|
||||
(eq->buf_list[off / HNS_ROCE_BA_SIZE].buf) +
|
||||
off % HNS_ROCE_BA_SIZE);
|
||||
}
|
||||
|
||||
static struct hns_roce_ceqe *next_ceqe_sw(struct hns_roce_eq *eq)
|
||||
{
|
||||
struct hns_roce_ceqe *ceqe = get_ceqe(eq, eq->cons_index);
|
||||
|
||||
return (!!(roce_get_bit(ceqe->ceqe.comp,
|
||||
HNS_ROCE_CEQE_CEQE_COMP_OWNER_S))) ^
|
||||
(!!(eq->cons_index & eq->entries)) ? ceqe : NULL;
|
||||
}
|
||||
|
||||
static int hns_roce_ceq_int(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq)
|
||||
{
|
||||
struct hns_roce_ceqe *ceqe;
|
||||
int ceqes_found = 0;
|
||||
u32 cqn;
|
||||
|
||||
while ((ceqe = next_ceqe_sw(eq))) {
|
||||
/* Memory barrier */
|
||||
rmb();
|
||||
cqn = roce_get_field(ceqe->ceqe.comp,
|
||||
HNS_ROCE_CEQE_CEQE_COMP_CQN_M,
|
||||
HNS_ROCE_CEQE_CEQE_COMP_CQN_S);
|
||||
hns_roce_cq_completion(hr_dev, cqn);
|
||||
|
||||
++eq->cons_index;
|
||||
ceqes_found = 1;
|
||||
|
||||
if (eq->cons_index > 2 * hr_dev->caps.ceqe_depth[eq->eqn] - 1) {
|
||||
dev_warn(&eq->hr_dev->pdev->dev,
|
||||
"cons_index overflow, set back to zero\n");
|
||||
eq->cons_index = 0;
|
||||
}
|
||||
}
|
||||
|
||||
eq_set_cons_index(eq, 0);
|
||||
|
||||
return ceqes_found;
|
||||
}
|
||||
|
||||
static int hns_roce_aeq_ovf_int(struct hns_roce_dev *hr_dev,
|
||||
struct hns_roce_eq *eq)
|
||||
{
|
||||
struct device *dev = &eq->hr_dev->pdev->dev;
|
||||
int eqovf_found = 0;
|
||||
u32 caepaemask_val;
|
||||
u32 cealmovf_val;
|
||||
u32 caepaest_val;
|
||||
u32 aeshift_val;
|
||||
u32 ceshift_val;
|
||||
u32 cemask_val;
|
||||
int i = 0;
|
||||
|
||||
/**
|
||||
* AEQ overflow ECC mult bit err CEQ overflow alarm
|
||||
* must clear interrupt, mask irq, clear irq, cancel mask operation
|
||||
*/
|
||||
aeshift_val = roce_read(hr_dev, ROCEE_CAEP_AEQC_AEQE_SHIFT_REG);
|
||||
|
||||
if (roce_get_bit(aeshift_val,
|
||||
ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQ_ALM_OVF_INT_ST_S) == 1) {
|
||||
dev_warn(dev, "AEQ overflow!\n");
|
||||
|
||||
/* Set mask */
|
||||
caepaemask_val = roce_read(hr_dev, ROCEE_CAEP_AE_MASK_REG);
|
||||
roce_set_bit(caepaemask_val,
|
||||
ROCEE_CAEP_AE_MASK_CAEP_AEQ_ALM_OVF_MASK_S,
|
||||
HNS_ROCE_INT_MASK_ENABLE);
|
||||
roce_write(hr_dev, ROCEE_CAEP_AE_MASK_REG, caepaemask_val);
|
||||
|
||||
/* Clear int state(INT_WC : write 1 clear) */
|
||||
caepaest_val = roce_read(hr_dev, ROCEE_CAEP_AE_ST_REG);
|
||||
roce_set_bit(caepaest_val,
|
||||
ROCEE_CAEP_AE_ST_CAEP_AEQ_ALM_OVF_S, 1);
|
||||
roce_write(hr_dev, ROCEE_CAEP_AE_ST_REG, caepaest_val);
|
||||
|
||||
/* Clear mask */
|
||||
caepaemask_val = roce_read(hr_dev, ROCEE_CAEP_AE_MASK_REG);
|
||||
roce_set_bit(caepaemask_val,
|
||||
ROCEE_CAEP_AE_MASK_CAEP_AEQ_ALM_OVF_MASK_S,
|
||||
HNS_ROCE_INT_MASK_DISABLE);
|
||||
roce_write(hr_dev, ROCEE_CAEP_AE_MASK_REG, caepaemask_val);
|
||||
}
|
||||
|
||||
/* CEQ almost overflow */
|
||||
for (i = 0; i < hr_dev->caps.num_comp_vectors; i++) {
|
||||
ceshift_val = roce_read(hr_dev, ROCEE_CAEP_CEQC_SHIFT_0_REG +
|
||||
i * CEQ_REG_OFFSET);
|
||||
|
||||
if (roce_get_bit(ceshift_val,
|
||||
ROCEE_CAEP_CEQC_SHIFT_CAEP_CEQ_ALM_OVF_INT_ST_S) == 1) {
|
||||
dev_warn(dev, "CEQ[%d] almost overflow!\n", i);
|
||||
eqovf_found++;
|
||||
|
||||
/* Set mask */
|
||||
cemask_val = roce_read(hr_dev,
|
||||
ROCEE_CAEP_CE_IRQ_MASK_0_REG +
|
||||
i * CEQ_REG_OFFSET);
|
||||
roce_set_bit(cemask_val,
|
||||
ROCEE_CAEP_CE_IRQ_MASK_CAEP_CEQ_ALM_OVF_MASK_S,
|
||||
HNS_ROCE_INT_MASK_ENABLE);
|
||||
roce_write(hr_dev, ROCEE_CAEP_CE_IRQ_MASK_0_REG +
|
||||
i * CEQ_REG_OFFSET, cemask_val);
|
||||
|
||||
/* Clear int state(INT_WC : write 1 clear) */
|
||||
cealmovf_val = roce_read(hr_dev,
|
||||
ROCEE_CAEP_CEQ_ALM_OVF_0_REG +
|
||||
i * CEQ_REG_OFFSET);
|
||||
roce_set_bit(cealmovf_val,
|
||||
ROCEE_CAEP_CEQ_ALM_OVF_CAEP_CEQ_ALM_OVF_S,
|
||||
1);
|
||||
roce_write(hr_dev, ROCEE_CAEP_CEQ_ALM_OVF_0_REG +
|
||||
i * CEQ_REG_OFFSET, cealmovf_val);
|
||||
|
||||
/* Clear mask */
|
||||
cemask_val = roce_read(hr_dev,
|
||||
ROCEE_CAEP_CE_IRQ_MASK_0_REG +
|
||||
i * CEQ_REG_OFFSET);
|
||||
roce_set_bit(cemask_val,
|
||||
ROCEE_CAEP_CE_IRQ_MASK_CAEP_CEQ_ALM_OVF_MASK_S,
|
||||
HNS_ROCE_INT_MASK_DISABLE);
|
||||
roce_write(hr_dev, ROCEE_CAEP_CE_IRQ_MASK_0_REG +
|
||||
i * CEQ_REG_OFFSET, cemask_val);
|
||||
}
|
||||
}
|
||||
|
||||
/* ECC multi-bit error alarm */
|
||||
dev_warn(dev, "ECC UCERR ALARM: 0x%x, 0x%x, 0x%x\n",
|
||||
roce_read(hr_dev, ROCEE_ECC_UCERR_ALM0_REG),
|
||||
roce_read(hr_dev, ROCEE_ECC_UCERR_ALM1_REG),
|
||||
roce_read(hr_dev, ROCEE_ECC_UCERR_ALM2_REG));
|
||||
|
||||
dev_warn(dev, "ECC CERR ALARM: 0x%x, 0x%x, 0x%x\n",
|
||||
roce_read(hr_dev, ROCEE_ECC_CERR_ALM0_REG),
|
||||
roce_read(hr_dev, ROCEE_ECC_CERR_ALM1_REG),
|
||||
roce_read(hr_dev, ROCEE_ECC_CERR_ALM2_REG));
|
||||
|
||||
return eqovf_found;
|
||||
}
|
||||
|
||||
static int hns_roce_eq_int(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq)
|
||||
{
|
||||
int eqes_found = 0;
|
||||
|
||||
if (likely(eq->type_flag == HNS_ROCE_CEQ))
|
||||
/* CEQ irq routine, CEQ is pulse irq, not clear */
|
||||
eqes_found = hns_roce_ceq_int(hr_dev, eq);
|
||||
else if (likely(eq->type_flag == HNS_ROCE_AEQ))
|
||||
/* AEQ irq routine, AEQ is pulse irq, not clear */
|
||||
eqes_found = hns_roce_aeq_int(hr_dev, eq);
|
||||
else
|
||||
/* AEQ queue overflow irq */
|
||||
eqes_found = hns_roce_aeq_ovf_int(hr_dev, eq);
|
||||
|
||||
return eqes_found;
|
||||
}
|
||||
|
||||
static irqreturn_t hns_roce_msi_x_interrupt(int irq, void *eq_ptr)
|
||||
{
|
||||
int int_work = 0;
|
||||
struct hns_roce_eq *eq = eq_ptr;
|
||||
struct hns_roce_dev *hr_dev = eq->hr_dev;
|
||||
|
||||
int_work = hns_roce_eq_int(hr_dev, eq);
|
||||
|
||||
return IRQ_RETVAL(int_work);
|
||||
}
|
||||
|
||||
static void hns_roce_enable_eq(struct hns_roce_dev *hr_dev, int eq_num,
|
||||
int enable_flag)
|
||||
{
|
||||
void __iomem *eqc = hr_dev->eq_table.eqc_base[eq_num];
|
||||
u32 val;
|
||||
|
||||
val = readl(eqc);
|
||||
|
||||
if (enable_flag)
|
||||
roce_set_field(val,
|
||||
ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_M,
|
||||
ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S,
|
||||
HNS_ROCE_EQ_STAT_VALID);
|
||||
else
|
||||
roce_set_field(val,
|
||||
ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_M,
|
||||
ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S,
|
||||
HNS_ROCE_EQ_STAT_INVALID);
|
||||
writel(val, eqc);
|
||||
}
|
||||
|
||||
static int hns_roce_create_eq(struct hns_roce_dev *hr_dev,
|
||||
struct hns_roce_eq *eq)
|
||||
{
|
||||
void __iomem *eqc = hr_dev->eq_table.eqc_base[eq->eqn];
|
||||
struct device *dev = &hr_dev->pdev->dev;
|
||||
dma_addr_t tmp_dma_addr;
|
||||
u32 eqconsindx_val = 0;
|
||||
u32 eqcuridx_val = 0;
|
||||
u32 eqshift_val = 0;
|
||||
int num_bas = 0;
|
||||
int ret;
|
||||
int i;
|
||||
|
||||
num_bas = (PAGE_ALIGN(eq->entries * eq->eqe_size) +
|
||||
HNS_ROCE_BA_SIZE - 1) / HNS_ROCE_BA_SIZE;
|
||||
|
||||
if ((eq->entries * eq->eqe_size) > HNS_ROCE_BA_SIZE) {
|
||||
dev_err(dev, "[error]eq buf %d gt ba size(%d) need bas=%d\n",
|
||||
(eq->entries * eq->eqe_size), HNS_ROCE_BA_SIZE,
|
||||
num_bas);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
eq->buf_list = kcalloc(num_bas, sizeof(*eq->buf_list), GFP_KERNEL);
|
||||
if (!eq->buf_list)
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < num_bas; ++i) {
|
||||
eq->buf_list[i].buf = dma_alloc_coherent(dev, HNS_ROCE_BA_SIZE,
|
||||
&tmp_dma_addr,
|
||||
GFP_KERNEL);
|
||||
if (!eq->buf_list[i].buf) {
|
||||
ret = -ENOMEM;
|
||||
goto err_out_free_pages;
|
||||
}
|
||||
|
||||
eq->buf_list[i].map = tmp_dma_addr;
|
||||
memset(eq->buf_list[i].buf, 0, HNS_ROCE_BA_SIZE);
|
||||
}
|
||||
eq->cons_index = 0;
|
||||
roce_set_field(eqshift_val,
|
||||
ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_M,
|
||||
ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S,
|
||||
HNS_ROCE_EQ_STAT_INVALID);
|
||||
roce_set_field(eqshift_val,
|
||||
ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_AEQE_SHIFT_M,
|
||||
ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_AEQE_SHIFT_S,
|
||||
eq->log_entries);
|
||||
writel(eqshift_val, eqc);
|
||||
|
||||
/* Configure eq extended address 12~44bit */
|
||||
writel((u32)(eq->buf_list[0].map >> 12), (u8 *)eqc + 4);
|
||||
|
||||
/*
|
||||
* Configure eq extended address 45~49 bit.
|
||||
* 44 = 32 + 12, When evaluating addr to hardware, shift 12 because of
|
||||
* using 4K page, and shift more 32 because of
|
||||
* caculating the high 32 bit value evaluated to hardware.
|
||||
*/
|
||||
roce_set_field(eqcuridx_val, ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQ_BT_H_M,
|
||||
ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQ_BT_H_S,
|
||||
eq->buf_list[0].map >> 44);
|
||||
roce_set_field(eqcuridx_val,
|
||||
ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQE_CUR_IDX_M,
|
||||
ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQE_CUR_IDX_S, 0);
|
||||
writel(eqcuridx_val, (u8 *)eqc + 8);
|
||||
|
||||
/* Configure eq consumer index */
|
||||
roce_set_field(eqconsindx_val,
|
||||
ROCEE_CAEP_AEQE_CONS_IDX_CAEP_AEQE_CONS_IDX_M,
|
||||
ROCEE_CAEP_AEQE_CONS_IDX_CAEP_AEQE_CONS_IDX_S, 0);
|
||||
writel(eqconsindx_val, (u8 *)eqc + 0xc);
|
||||
|
||||
return 0;
|
||||
|
||||
err_out_free_pages:
|
||||
for (i = i - 1; i >= 0; i--)
|
||||
dma_free_coherent(dev, HNS_ROCE_BA_SIZE, eq->buf_list[i].buf,
|
||||
eq->buf_list[i].map);
|
||||
|
||||
kfree(eq->buf_list);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void hns_roce_free_eq(struct hns_roce_dev *hr_dev,
|
||||
struct hns_roce_eq *eq)
|
||||
{
|
||||
int i = 0;
|
||||
int npages = (PAGE_ALIGN(eq->eqe_size * eq->entries) +
|
||||
HNS_ROCE_BA_SIZE - 1) / HNS_ROCE_BA_SIZE;
|
||||
|
||||
if (!eq->buf_list)
|
||||
return;
|
||||
|
||||
for (i = 0; i < npages; ++i)
|
||||
dma_free_coherent(&hr_dev->pdev->dev, HNS_ROCE_BA_SIZE,
|
||||
eq->buf_list[i].buf, eq->buf_list[i].map);
|
||||
|
||||
kfree(eq->buf_list);
|
||||
}
|
||||
|
||||
static void hns_roce_int_mask_en(struct hns_roce_dev *hr_dev)
|
||||
{
|
||||
int i = 0;
|
||||
u32 aemask_val;
|
||||
int masken = 0;
|
||||
|
||||
/* AEQ INT */
|
||||
aemask_val = roce_read(hr_dev, ROCEE_CAEP_AE_MASK_REG);
|
||||
roce_set_bit(aemask_val, ROCEE_CAEP_AE_MASK_CAEP_AEQ_ALM_OVF_MASK_S,
|
||||
masken);
|
||||
roce_set_bit(aemask_val, ROCEE_CAEP_AE_MASK_CAEP_AE_IRQ_MASK_S, masken);
|
||||
roce_write(hr_dev, ROCEE_CAEP_AE_MASK_REG, aemask_val);
|
||||
|
||||
/* CEQ INT */
|
||||
for (i = 0; i < hr_dev->caps.num_comp_vectors; i++) {
|
||||
/* IRQ mask */
|
||||
roce_write(hr_dev, ROCEE_CAEP_CE_IRQ_MASK_0_REG +
|
||||
i * CEQ_REG_OFFSET, masken);
|
||||
}
|
||||
}
|
||||
|
||||
static void hns_roce_ce_int_default_cfg(struct hns_roce_dev *hr_dev)
|
||||
{
|
||||
/* Configure ce int interval */
|
||||
roce_write(hr_dev, ROCEE_CAEP_CE_INTERVAL_CFG_REG,
|
||||
HNS_ROCE_CEQ_DEFAULT_INTERVAL);
|
||||
|
||||
/* Configure ce int burst num */
|
||||
roce_write(hr_dev, ROCEE_CAEP_CE_BURST_NUM_CFG_REG,
|
||||
HNS_ROCE_CEQ_DEFAULT_BURST_NUM);
|
||||
}
|
||||
|
||||
int hns_roce_init_eq_table(struct hns_roce_dev *hr_dev)
|
||||
{
|
||||
struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
|
||||
struct device *dev = &hr_dev->pdev->dev;
|
||||
struct hns_roce_eq *eq = NULL;
|
||||
int eq_num = 0;
|
||||
int ret = 0;
|
||||
int i = 0;
|
||||
int j = 0;
|
||||
|
||||
eq_num = hr_dev->caps.num_comp_vectors + hr_dev->caps.num_aeq_vectors;
|
||||
eq_table->eq = kcalloc(eq_num, sizeof(*eq_table->eq), GFP_KERNEL);
|
||||
if (!eq_table->eq)
|
||||
return -ENOMEM;
|
||||
|
||||
eq_table->eqc_base = kcalloc(eq_num, sizeof(*eq_table->eqc_base),
|
||||
GFP_KERNEL);
|
||||
if (!eq_table->eqc_base) {
|
||||
ret = -ENOMEM;
|
||||
goto err_eqc_base_alloc_fail;
|
||||
}
|
||||
|
||||
for (i = 0; i < eq_num; i++) {
|
||||
eq = &eq_table->eq[i];
|
||||
eq->hr_dev = hr_dev;
|
||||
eq->eqn = i;
|
||||
eq->irq = hr_dev->irq[i];
|
||||
eq->log_page_size = PAGE_SHIFT;
|
||||
|
||||
if (i < hr_dev->caps.num_comp_vectors) {
|
||||
/* CEQ */
|
||||
eq_table->eqc_base[i] = hr_dev->reg_base +
|
||||
ROCEE_CAEP_CEQC_SHIFT_0_REG +
|
||||
HNS_ROCE_CEQC_REG_OFFSET * i;
|
||||
eq->type_flag = HNS_ROCE_CEQ;
|
||||
eq->doorbell = hr_dev->reg_base +
|
||||
ROCEE_CAEP_CEQC_CONS_IDX_0_REG +
|
||||
HNS_ROCE_CEQC_REG_OFFSET * i;
|
||||
eq->entries = hr_dev->caps.ceqe_depth[i];
|
||||
eq->log_entries = ilog2(eq->entries);
|
||||
eq->eqe_size = sizeof(struct hns_roce_ceqe);
|
||||
} else {
|
||||
/* AEQ */
|
||||
eq_table->eqc_base[i] = hr_dev->reg_base +
|
||||
ROCEE_CAEP_AEQC_AEQE_SHIFT_REG;
|
||||
eq->type_flag = HNS_ROCE_AEQ;
|
||||
eq->doorbell = hr_dev->reg_base +
|
||||
ROCEE_CAEP_AEQE_CONS_IDX_REG;
|
||||
eq->entries = hr_dev->caps.aeqe_depth;
|
||||
eq->log_entries = ilog2(eq->entries);
|
||||
eq->eqe_size = sizeof(struct hns_roce_aeqe);
|
||||
}
|
||||
}
|
||||
|
||||
/* Disable irq */
|
||||
hns_roce_int_mask_en(hr_dev);
|
||||
|
||||
/* Configure CE irq interval and burst num */
|
||||
hns_roce_ce_int_default_cfg(hr_dev);
|
||||
|
||||
for (i = 0; i < eq_num; i++) {
|
||||
ret = hns_roce_create_eq(hr_dev, &eq_table->eq[i]);
|
||||
if (ret) {
|
||||
dev_err(dev, "eq create failed\n");
|
||||
goto err_create_eq_fail;
|
||||
}
|
||||
}
|
||||
|
||||
for (j = 0; j < eq_num; j++) {
|
||||
ret = request_irq(eq_table->eq[j].irq, hns_roce_msi_x_interrupt,
|
||||
0, hr_dev->irq_names[j], eq_table->eq + j);
|
||||
if (ret) {
|
||||
dev_err(dev, "request irq error!\n");
|
||||
goto err_request_irq_fail;
|
||||
}
|
||||
}
|
||||
|
||||
for (i = 0; i < eq_num; i++)
|
||||
hns_roce_enable_eq(hr_dev, i, EQ_ENABLE);
|
||||
|
||||
return 0;
|
||||
|
||||
err_request_irq_fail:
|
||||
for (j = j - 1; j >= 0; j--)
|
||||
free_irq(eq_table->eq[j].irq, eq_table->eq + j);
|
||||
|
||||
err_create_eq_fail:
|
||||
for (i = i - 1; i >= 0; i--)
|
||||
hns_roce_free_eq(hr_dev, &eq_table->eq[i]);
|
||||
|
||||
kfree(eq_table->eqc_base);
|
||||
|
||||
err_eqc_base_alloc_fail:
|
||||
kfree(eq_table->eq);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void hns_roce_cleanup_eq_table(struct hns_roce_dev *hr_dev)
|
||||
{
|
||||
int i;
|
||||
int eq_num;
|
||||
struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
|
||||
|
||||
eq_num = hr_dev->caps.num_comp_vectors + hr_dev->caps.num_aeq_vectors;
|
||||
for (i = 0; i < eq_num; i++) {
|
||||
/* Disable EQ */
|
||||
hns_roce_enable_eq(hr_dev, i, EQ_DISABLE);
|
||||
|
||||
free_irq(eq_table->eq[i].irq, eq_table->eq + i);
|
||||
|
||||
hns_roce_free_eq(hr_dev, &eq_table->eq[i]);
|
||||
}
|
||||
|
||||
kfree(eq_table->eqc_base);
|
||||
kfree(eq_table->eq);
|
||||
}
|
|
@ -0,0 +1,130 @@
|
|||
/*
|
||||
* Copyright (c) 2016 Hisilicon Limited.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
* General Public License (GPL) Version 2, available from the file
|
||||
* COPYING in the main directory of this source tree, or the
|
||||
* OpenIB.org BSD license below:
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or
|
||||
* without modification, are permitted provided that the following
|
||||
* conditions are met:
|
||||
*
|
||||
* - Redistributions of source code must retain the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer.
|
||||
*
|
||||
* - Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer in the documentation and/or other materials
|
||||
* provided with the distribution.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef _HNS_ROCE_EQ_H
|
||||
#define _HNS_ROCE_EQ_H
|
||||
|
||||
#define HNS_ROCE_CEQ 1
|
||||
#define HNS_ROCE_AEQ 2
|
||||
|
||||
#define HNS_ROCE_CEQ_ENTRY_SIZE 0x4
|
||||
#define HNS_ROCE_AEQ_ENTRY_SIZE 0x10
|
||||
#define HNS_ROCE_CEQC_REG_OFFSET 0x18
|
||||
|
||||
#define HNS_ROCE_CEQ_DEFAULT_INTERVAL 0x10
|
||||
#define HNS_ROCE_CEQ_DEFAULT_BURST_NUM 0x10
|
||||
|
||||
#define HNS_ROCE_INT_MASK_DISABLE 0
|
||||
#define HNS_ROCE_INT_MASK_ENABLE 1
|
||||
|
||||
#define EQ_ENABLE 1
|
||||
#define EQ_DISABLE 0
|
||||
#define CONS_INDEX_MASK 0xffff
|
||||
|
||||
#define CEQ_REG_OFFSET 0x18
|
||||
|
||||
enum {
|
||||
HNS_ROCE_EQ_STAT_INVALID = 0,
|
||||
HNS_ROCE_EQ_STAT_VALID = 2,
|
||||
};
|
||||
|
||||
struct hns_roce_aeqe {
|
||||
u32 asyn;
|
||||
union {
|
||||
struct {
|
||||
u32 qp;
|
||||
u32 rsv0;
|
||||
u32 rsv1;
|
||||
} qp_event;
|
||||
|
||||
struct {
|
||||
u32 cq;
|
||||
u32 rsv0;
|
||||
u32 rsv1;
|
||||
} cq_event;
|
||||
|
||||
struct {
|
||||
u32 port;
|
||||
u32 rsv0;
|
||||
u32 rsv1;
|
||||
} port_event;
|
||||
|
||||
struct {
|
||||
u32 ceqe;
|
||||
u32 rsv0;
|
||||
u32 rsv1;
|
||||
} ce_event;
|
||||
|
||||
struct {
|
||||
__le64 out_param;
|
||||
__le16 token;
|
||||
u8 status;
|
||||
u8 rsv0;
|
||||
} __packed cmd;
|
||||
} event;
|
||||
};
|
||||
|
||||
#define HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S 16
|
||||
#define HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M \
|
||||
(((1UL << 8) - 1) << HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S)
|
||||
|
||||
#define HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S 24
|
||||
#define HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M \
|
||||
(((1UL << 7) - 1) << HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S)
|
||||
|
||||
#define HNS_ROCE_AEQE_U32_4_OWNER_S 31
|
||||
|
||||
#define HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S 0
|
||||
#define HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M \
|
||||
(((1UL << 24) - 1) << HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S)
|
||||
|
||||
#define HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S 0
|
||||
#define HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_M \
|
||||
(((1UL << 16) - 1) << HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S)
|
||||
|
||||
#define HNS_ROCE_AEQE_EVENT_CE_EVENT_CEQE_CEQN_S 0
|
||||
#define HNS_ROCE_AEQE_EVENT_CE_EVENT_CEQE_CEQN_M \
|
||||
(((1UL << 5) - 1) << HNS_ROCE_AEQE_EVENT_CE_EVENT_CEQE_CEQN_S)
|
||||
|
||||
struct hns_roce_ceqe {
|
||||
union {
|
||||
int comp;
|
||||
} ceqe;
|
||||
};
|
||||
|
||||
#define HNS_ROCE_CEQE_CEQE_COMP_OWNER_S 0
|
||||
|
||||
#define HNS_ROCE_CEQE_CEQE_COMP_CQN_S 16
|
||||
#define HNS_ROCE_CEQE_CEQE_COMP_CQN_M \
|
||||
(((1UL << 16) - 1) << HNS_ROCE_CEQE_CEQE_COMP_CQN_S)
|
||||
|
||||
#endif /* _HNS_ROCE_EQ_H */
|
|
@ -0,0 +1,476 @@
|
|||
/*
|
||||
* Copyright (c) 2016 Hisilicon Limited.
|
||||
* Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
* General Public License (GPL) Version 2, available from the file
|
||||
* COPYING in the main directory of this source tree, or the
|
||||
* OpenIB.org BSD license below:
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or
|
||||
* without modification, are permitted provided that the following
|
||||
* conditions are met:
|
||||
*
|
||||
* - Redistributions of source code must retain the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer.
|
||||
*
|
||||
* - Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer in the documentation and/or other materials
|
||||
* provided with the distribution.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <linux/platform_device.h>
|
||||
#include "hns_roce_device.h"
|
||||
#include "hns_roce_hem.h"
|
||||
#include "hns_roce_common.h"
|
||||
|
||||
#define HW_SYNC_TIMEOUT_MSECS 500
|
||||
#define HW_SYNC_SLEEP_TIME_INTERVAL 20
|
||||
|
||||
#define HNS_ROCE_HEM_ALLOC_SIZE (1 << 17)
|
||||
#define HNS_ROCE_TABLE_CHUNK_SIZE (1 << 17)
|
||||
|
||||
#define DMA_ADDR_T_SHIFT 12
|
||||
#define BT_CMD_SYNC_SHIFT 31
|
||||
#define BT_BA_SHIFT 32
|
||||
|
||||
struct hns_roce_hem *hns_roce_alloc_hem(struct hns_roce_dev *hr_dev, int npages,
|
||||
gfp_t gfp_mask)
|
||||
{
|
||||
struct hns_roce_hem_chunk *chunk = NULL;
|
||||
struct hns_roce_hem *hem;
|
||||
struct scatterlist *mem;
|
||||
int order;
|
||||
void *buf;
|
||||
|
||||
WARN_ON(gfp_mask & __GFP_HIGHMEM);
|
||||
|
||||
hem = kmalloc(sizeof(*hem),
|
||||
gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
|
||||
if (!hem)
|
||||
return NULL;
|
||||
|
||||
hem->refcount = 0;
|
||||
INIT_LIST_HEAD(&hem->chunk_list);
|
||||
|
||||
order = get_order(HNS_ROCE_HEM_ALLOC_SIZE);
|
||||
|
||||
while (npages > 0) {
|
||||
if (!chunk) {
|
||||
chunk = kmalloc(sizeof(*chunk),
|
||||
gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
|
||||
if (!chunk)
|
||||
goto fail;
|
||||
|
||||
sg_init_table(chunk->mem, HNS_ROCE_HEM_CHUNK_LEN);
|
||||
chunk->npages = 0;
|
||||
chunk->nsg = 0;
|
||||
list_add_tail(&chunk->list, &hem->chunk_list);
|
||||
}
|
||||
|
||||
while (1 << order > npages)
|
||||
--order;
|
||||
|
||||
/*
|
||||
* Alloc memory one time. If failed, don't alloc small block
|
||||
* memory, directly return fail.
|
||||
*/
|
||||
mem = &chunk->mem[chunk->npages];
|
||||
buf = dma_alloc_coherent(&hr_dev->pdev->dev, PAGE_SIZE << order,
|
||||
&sg_dma_address(mem), gfp_mask);
|
||||
if (!buf)
|
||||
goto fail;
|
||||
|
||||
sg_set_buf(mem, buf, PAGE_SIZE << order);
|
||||
WARN_ON(mem->offset);
|
||||
sg_dma_len(mem) = PAGE_SIZE << order;
|
||||
|
||||
++chunk->npages;
|
||||
++chunk->nsg;
|
||||
npages -= 1 << order;
|
||||
}
|
||||
|
||||
return hem;
|
||||
|
||||
fail:
|
||||
hns_roce_free_hem(hr_dev, hem);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void hns_roce_free_hem(struct hns_roce_dev *hr_dev, struct hns_roce_hem *hem)
|
||||
{
|
||||
struct hns_roce_hem_chunk *chunk, *tmp;
|
||||
int i;
|
||||
|
||||
if (!hem)
|
||||
return;
|
||||
|
||||
list_for_each_entry_safe(chunk, tmp, &hem->chunk_list, list) {
|
||||
for (i = 0; i < chunk->npages; ++i)
|
||||
dma_free_coherent(&hr_dev->pdev->dev,
|
||||
chunk->mem[i].length,
|
||||
lowmem_page_address(sg_page(&chunk->mem[i])),
|
||||
sg_dma_address(&chunk->mem[i]));
|
||||
kfree(chunk);
|
||||
}
|
||||
|
||||
kfree(hem);
|
||||
}
|
||||
|
||||
static int hns_roce_set_hem(struct hns_roce_dev *hr_dev,
|
||||
struct hns_roce_hem_table *table, unsigned long obj)
|
||||
{
|
||||
struct device *dev = &hr_dev->pdev->dev;
|
||||
spinlock_t *lock = &hr_dev->bt_cmd_lock;
|
||||
unsigned long end = 0;
|
||||
unsigned long flags;
|
||||
struct hns_roce_hem_iter iter;
|
||||
void __iomem *bt_cmd;
|
||||
u32 bt_cmd_h_val = 0;
|
||||
u32 bt_cmd_val[2];
|
||||
u32 bt_cmd_l = 0;
|
||||
u64 bt_ba = 0;
|
||||
int ret = 0;
|
||||
|
||||
/* Find the HEM(Hardware Entry Memory) entry */
|
||||
unsigned long i = (obj & (table->num_obj - 1)) /
|
||||
(HNS_ROCE_TABLE_CHUNK_SIZE / table->obj_size);
|
||||
|
||||
switch (table->type) {
|
||||
case HEM_TYPE_QPC:
|
||||
roce_set_field(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M,
|
||||
ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, HEM_TYPE_QPC);
|
||||
break;
|
||||
case HEM_TYPE_MTPT:
|
||||
roce_set_field(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M,
|
||||
ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S,
|
||||
HEM_TYPE_MTPT);
|
||||
break;
|
||||
case HEM_TYPE_CQC:
|
||||
roce_set_field(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M,
|
||||
ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, HEM_TYPE_CQC);
|
||||
break;
|
||||
case HEM_TYPE_SRQC:
|
||||
roce_set_field(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M,
|
||||
ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S,
|
||||
HEM_TYPE_SRQC);
|
||||
break;
|
||||
default:
|
||||
return ret;
|
||||
}
|
||||
roce_set_field(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_M,
|
||||
ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_S, obj);
|
||||
roce_set_bit(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_S, 0);
|
||||
roce_set_bit(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_HW_SYNS_S, 1);
|
||||
|
||||
/* Currently iter only a chunk */
|
||||
for (hns_roce_hem_first(table->hem[i], &iter);
|
||||
!hns_roce_hem_last(&iter); hns_roce_hem_next(&iter)) {
|
||||
bt_ba = hns_roce_hem_addr(&iter) >> DMA_ADDR_T_SHIFT;
|
||||
|
||||
spin_lock_irqsave(lock, flags);
|
||||
|
||||
bt_cmd = hr_dev->reg_base + ROCEE_BT_CMD_H_REG;
|
||||
|
||||
end = msecs_to_jiffies(HW_SYNC_TIMEOUT_MSECS) + jiffies;
|
||||
while (1) {
|
||||
if (readl(bt_cmd) >> BT_CMD_SYNC_SHIFT) {
|
||||
if (!(time_before(jiffies, end))) {
|
||||
dev_err(dev, "Write bt_cmd err,hw_sync is not zero.\n");
|
||||
spin_unlock_irqrestore(lock, flags);
|
||||
return -EBUSY;
|
||||
}
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
msleep(HW_SYNC_SLEEP_TIME_INTERVAL);
|
||||
}
|
||||
|
||||
bt_cmd_l = (u32)bt_ba;
|
||||
roce_set_field(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_M,
|
||||
ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_S,
|
||||
bt_ba >> BT_BA_SHIFT);
|
||||
|
||||
bt_cmd_val[0] = bt_cmd_l;
|
||||
bt_cmd_val[1] = bt_cmd_h_val;
|
||||
hns_roce_write64_k(bt_cmd_val,
|
||||
hr_dev->reg_base + ROCEE_BT_CMD_L_REG);
|
||||
spin_unlock_irqrestore(lock, flags);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int hns_roce_clear_hem(struct hns_roce_dev *hr_dev,
|
||||
struct hns_roce_hem_table *table,
|
||||
unsigned long obj)
|
||||
{
|
||||
struct device *dev = &hr_dev->pdev->dev;
|
||||
unsigned long end = 0;
|
||||
unsigned long flags;
|
||||
void __iomem *bt_cmd;
|
||||
uint32_t bt_cmd_val[2];
|
||||
u32 bt_cmd_h_val = 0;
|
||||
int ret = 0;
|
||||
|
||||
switch (table->type) {
|
||||
case HEM_TYPE_QPC:
|
||||
roce_set_field(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M,
|
||||
ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, HEM_TYPE_QPC);
|
||||
break;
|
||||
case HEM_TYPE_MTPT:
|
||||
roce_set_field(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M,
|
||||
ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S,
|
||||
HEM_TYPE_MTPT);
|
||||
break;
|
||||
case HEM_TYPE_CQC:
|
||||
roce_set_field(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M,
|
||||
ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, HEM_TYPE_CQC);
|
||||
break;
|
||||
case HEM_TYPE_SRQC:
|
||||
roce_set_field(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M,
|
||||
ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S,
|
||||
HEM_TYPE_SRQC);
|
||||
break;
|
||||
default:
|
||||
return ret;
|
||||
}
|
||||
roce_set_field(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_M,
|
||||
ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_S, obj);
|
||||
roce_set_bit(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_S, 0);
|
||||
roce_set_bit(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_HW_SYNS_S, 1);
|
||||
roce_set_field(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_M,
|
||||
ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_S, 0);
|
||||
|
||||
spin_lock_irqsave(&hr_dev->bt_cmd_lock, flags);
|
||||
|
||||
bt_cmd = hr_dev->reg_base + ROCEE_BT_CMD_H_REG;
|
||||
|
||||
end = msecs_to_jiffies(HW_SYNC_TIMEOUT_MSECS) + jiffies;
|
||||
while (1) {
|
||||
if (readl(bt_cmd) >> BT_CMD_SYNC_SHIFT) {
|
||||
if (!(time_before(jiffies, end))) {
|
||||
dev_err(dev, "Write bt_cmd err,hw_sync is not zero.\n");
|
||||
spin_unlock_irqrestore(&hr_dev->bt_cmd_lock,
|
||||
flags);
|
||||
return -EBUSY;
|
||||
}
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
msleep(HW_SYNC_SLEEP_TIME_INTERVAL);
|
||||
}
|
||||
|
||||
bt_cmd_val[0] = 0;
|
||||
bt_cmd_val[1] = bt_cmd_h_val;
|
||||
hns_roce_write64_k(bt_cmd_val, hr_dev->reg_base + ROCEE_BT_CMD_L_REG);
|
||||
spin_unlock_irqrestore(&hr_dev->bt_cmd_lock, flags);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int hns_roce_table_get(struct hns_roce_dev *hr_dev,
|
||||
struct hns_roce_hem_table *table, unsigned long obj)
|
||||
{
|
||||
struct device *dev = &hr_dev->pdev->dev;
|
||||
int ret = 0;
|
||||
unsigned long i;
|
||||
|
||||
i = (obj & (table->num_obj - 1)) / (HNS_ROCE_TABLE_CHUNK_SIZE /
|
||||
table->obj_size);
|
||||
|
||||
mutex_lock(&table->mutex);
|
||||
|
||||
if (table->hem[i]) {
|
||||
++table->hem[i]->refcount;
|
||||
goto out;
|
||||
}
|
||||
|
||||
table->hem[i] = hns_roce_alloc_hem(hr_dev,
|
||||
HNS_ROCE_TABLE_CHUNK_SIZE >> PAGE_SHIFT,
|
||||
(table->lowmem ? GFP_KERNEL :
|
||||
GFP_HIGHUSER) | __GFP_NOWARN);
|
||||
if (!table->hem[i]) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Set HEM base address(128K/page, pa) to Hardware */
|
||||
if (hns_roce_set_hem(hr_dev, table, obj)) {
|
||||
ret = -ENODEV;
|
||||
dev_err(dev, "set HEM base address to HW failed.\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
++table->hem[i]->refcount;
|
||||
out:
|
||||
mutex_unlock(&table->mutex);
|
||||
return ret;
|
||||
}
|
||||
|
||||
void hns_roce_table_put(struct hns_roce_dev *hr_dev,
|
||||
struct hns_roce_hem_table *table, unsigned long obj)
|
||||
{
|
||||
struct device *dev = &hr_dev->pdev->dev;
|
||||
unsigned long i;
|
||||
|
||||
i = (obj & (table->num_obj - 1)) /
|
||||
(HNS_ROCE_TABLE_CHUNK_SIZE / table->obj_size);
|
||||
|
||||
mutex_lock(&table->mutex);
|
||||
|
||||
if (--table->hem[i]->refcount == 0) {
|
||||
/* Clear HEM base address */
|
||||
if (hns_roce_clear_hem(hr_dev, table, obj))
|
||||
dev_warn(dev, "Clear HEM base address failed.\n");
|
||||
|
||||
hns_roce_free_hem(hr_dev, table->hem[i]);
|
||||
table->hem[i] = NULL;
|
||||
}
|
||||
|
||||
mutex_unlock(&table->mutex);
|
||||
}
|
||||
|
||||
void *hns_roce_table_find(struct hns_roce_hem_table *table, unsigned long obj,
|
||||
dma_addr_t *dma_handle)
|
||||
{
|
||||
struct hns_roce_hem_chunk *chunk;
|
||||
unsigned long idx;
|
||||
int i;
|
||||
int offset, dma_offset;
|
||||
struct hns_roce_hem *hem;
|
||||
struct page *page = NULL;
|
||||
|
||||
if (!table->lowmem)
|
||||
return NULL;
|
||||
|
||||
mutex_lock(&table->mutex);
|
||||
idx = (obj & (table->num_obj - 1)) * table->obj_size;
|
||||
hem = table->hem[idx / HNS_ROCE_TABLE_CHUNK_SIZE];
|
||||
dma_offset = offset = idx % HNS_ROCE_TABLE_CHUNK_SIZE;
|
||||
|
||||
if (!hem)
|
||||
goto out;
|
||||
|
||||
list_for_each_entry(chunk, &hem->chunk_list, list) {
|
||||
for (i = 0; i < chunk->npages; ++i) {
|
||||
if (dma_handle && dma_offset >= 0) {
|
||||
if (sg_dma_len(&chunk->mem[i]) >
|
||||
(u32)dma_offset)
|
||||
*dma_handle = sg_dma_address(
|
||||
&chunk->mem[i]) + dma_offset;
|
||||
dma_offset -= sg_dma_len(&chunk->mem[i]);
|
||||
}
|
||||
|
||||
if (chunk->mem[i].length > (u32)offset) {
|
||||
page = sg_page(&chunk->mem[i]);
|
||||
goto out;
|
||||
}
|
||||
offset -= chunk->mem[i].length;
|
||||
}
|
||||
}
|
||||
|
||||
out:
|
||||
mutex_unlock(&table->mutex);
|
||||
return page ? lowmem_page_address(page) + offset : NULL;
|
||||
}
|
||||
|
||||
int hns_roce_table_get_range(struct hns_roce_dev *hr_dev,
|
||||
struct hns_roce_hem_table *table,
|
||||
unsigned long start, unsigned long end)
|
||||
{
|
||||
unsigned long inc = HNS_ROCE_TABLE_CHUNK_SIZE / table->obj_size;
|
||||
unsigned long i = 0;
|
||||
int ret = 0;
|
||||
|
||||
/* Allocate MTT entry memory according to chunk(128K) */
|
||||
for (i = start; i <= end; i += inc) {
|
||||
ret = hns_roce_table_get(hr_dev, table, i);
|
||||
if (ret)
|
||||
goto fail;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
while (i > start) {
|
||||
i -= inc;
|
||||
hns_roce_table_put(hr_dev, table, i);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
void hns_roce_table_put_range(struct hns_roce_dev *hr_dev,
|
||||
struct hns_roce_hem_table *table,
|
||||
unsigned long start, unsigned long end)
|
||||
{
|
||||
unsigned long i;
|
||||
|
||||
for (i = start; i <= end;
|
||||
i += HNS_ROCE_TABLE_CHUNK_SIZE / table->obj_size)
|
||||
hns_roce_table_put(hr_dev, table, i);
|
||||
}
|
||||
|
||||
int hns_roce_init_hem_table(struct hns_roce_dev *hr_dev,
|
||||
struct hns_roce_hem_table *table, u32 type,
|
||||
unsigned long obj_size, unsigned long nobj,
|
||||
int use_lowmem)
|
||||
{
|
||||
unsigned long obj_per_chunk;
|
||||
unsigned long num_hem;
|
||||
|
||||
obj_per_chunk = HNS_ROCE_TABLE_CHUNK_SIZE / obj_size;
|
||||
num_hem = (nobj + obj_per_chunk - 1) / obj_per_chunk;
|
||||
|
||||
table->hem = kcalloc(num_hem, sizeof(*table->hem), GFP_KERNEL);
|
||||
if (!table->hem)
|
||||
return -ENOMEM;
|
||||
|
||||
table->type = type;
|
||||
table->num_hem = num_hem;
|
||||
table->num_obj = nobj;
|
||||
table->obj_size = obj_size;
|
||||
table->lowmem = use_lowmem;
|
||||
mutex_init(&table->mutex);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void hns_roce_cleanup_hem_table(struct hns_roce_dev *hr_dev,
|
||||
struct hns_roce_hem_table *table)
|
||||
{
|
||||
struct device *dev = &hr_dev->pdev->dev;
|
||||
unsigned long i;
|
||||
|
||||
for (i = 0; i < table->num_hem; ++i)
|
||||
if (table->hem[i]) {
|
||||
if (hns_roce_clear_hem(hr_dev, table,
|
||||
i * HNS_ROCE_TABLE_CHUNK_SIZE / table->obj_size))
|
||||
dev_err(dev, "Clear HEM base address failed.\n");
|
||||
|
||||
hns_roce_free_hem(hr_dev, table->hem[i]);
|
||||
}
|
||||
|
||||
kfree(table->hem);
|
||||
}
|
||||
|
||||
void hns_roce_cleanup_hem(struct hns_roce_dev *hr_dev)
|
||||
{
|
||||
hns_roce_cleanup_hem_table(hr_dev, &hr_dev->cq_table.table);
|
||||
hns_roce_cleanup_hem_table(hr_dev, &hr_dev->qp_table.irrl_table);
|
||||
hns_roce_cleanup_hem_table(hr_dev, &hr_dev->qp_table.qp_table);
|
||||
hns_roce_cleanup_hem_table(hr_dev, &hr_dev->mr_table.mtpt_table);
|
||||
hns_roce_cleanup_hem_table(hr_dev, &hr_dev->mr_table.mtt_table);
|
||||
}
|
|
@ -0,0 +1,131 @@
|
|||
/*
|
||||
* Copyright (c) 2016 Hisilicon Limited.
|
||||
* Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
* General Public License (GPL) Version 2, available from the file
|
||||
* COPYING in the main directory of this source tree, or the
|
||||
* OpenIB.org BSD license below:
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or
|
||||
* without modification, are permitted provided that the following
|
||||
* conditions are met:
|
||||
*
|
||||
* - Redistributions of source code must retain the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer.
|
||||
*
|
||||
* - Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer in the documentation and/or other materials
|
||||
* provided with the distribution.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef _HNS_ROCE_HEM_H
|
||||
#define _HNS_ROCE_HEM_H
|
||||
|
||||
enum {
|
||||
/* MAP HEM(Hardware Entry Memory) */
|
||||
HEM_TYPE_QPC = 0,
|
||||
HEM_TYPE_MTPT,
|
||||
HEM_TYPE_CQC,
|
||||
HEM_TYPE_SRQC,
|
||||
|
||||
/* UNMAP HEM */
|
||||
HEM_TYPE_MTT,
|
||||
HEM_TYPE_IRRL,
|
||||
};
|
||||
|
||||
#define HNS_ROCE_HEM_CHUNK_LEN \
|
||||
((256 - sizeof(struct list_head) - 2 * sizeof(int)) / \
|
||||
(sizeof(struct scatterlist)))
|
||||
|
||||
enum {
|
||||
HNS_ROCE_HEM_PAGE_SHIFT = 12,
|
||||
HNS_ROCE_HEM_PAGE_SIZE = 1 << HNS_ROCE_HEM_PAGE_SHIFT,
|
||||
};
|
||||
|
||||
struct hns_roce_hem_chunk {
|
||||
struct list_head list;
|
||||
int npages;
|
||||
int nsg;
|
||||
struct scatterlist mem[HNS_ROCE_HEM_CHUNK_LEN];
|
||||
};
|
||||
|
||||
struct hns_roce_hem {
|
||||
struct list_head chunk_list;
|
||||
int refcount;
|
||||
};
|
||||
|
||||
struct hns_roce_hem_iter {
|
||||
struct hns_roce_hem *hem;
|
||||
struct hns_roce_hem_chunk *chunk;
|
||||
int page_idx;
|
||||
};
|
||||
|
||||
void hns_roce_free_hem(struct hns_roce_dev *hr_dev, struct hns_roce_hem *hem);
|
||||
int hns_roce_table_get(struct hns_roce_dev *hr_dev,
|
||||
struct hns_roce_hem_table *table, unsigned long obj);
|
||||
void hns_roce_table_put(struct hns_roce_dev *hr_dev,
|
||||
struct hns_roce_hem_table *table, unsigned long obj);
|
||||
void *hns_roce_table_find(struct hns_roce_hem_table *table, unsigned long obj,
|
||||
dma_addr_t *dma_handle);
|
||||
int hns_roce_table_get_range(struct hns_roce_dev *hr_dev,
|
||||
struct hns_roce_hem_table *table,
|
||||
unsigned long start, unsigned long end);
|
||||
void hns_roce_table_put_range(struct hns_roce_dev *hr_dev,
|
||||
struct hns_roce_hem_table *table,
|
||||
unsigned long start, unsigned long end);
|
||||
int hns_roce_init_hem_table(struct hns_roce_dev *hr_dev,
|
||||
struct hns_roce_hem_table *table, u32 type,
|
||||
unsigned long obj_size, unsigned long nobj,
|
||||
int use_lowmem);
|
||||
void hns_roce_cleanup_hem_table(struct hns_roce_dev *hr_dev,
|
||||
struct hns_roce_hem_table *table);
|
||||
void hns_roce_cleanup_hem(struct hns_roce_dev *hr_dev);
|
||||
|
||||
static inline void hns_roce_hem_first(struct hns_roce_hem *hem,
|
||||
struct hns_roce_hem_iter *iter)
|
||||
{
|
||||
iter->hem = hem;
|
||||
iter->chunk = list_empty(&hem->chunk_list) ? NULL :
|
||||
list_entry(hem->chunk_list.next,
|
||||
struct hns_roce_hem_chunk, list);
|
||||
iter->page_idx = 0;
|
||||
}
|
||||
|
||||
static inline int hns_roce_hem_last(struct hns_roce_hem_iter *iter)
|
||||
{
|
||||
return !iter->chunk;
|
||||
}
|
||||
|
||||
static inline void hns_roce_hem_next(struct hns_roce_hem_iter *iter)
|
||||
{
|
||||
if (++iter->page_idx >= iter->chunk->nsg) {
|
||||
if (iter->chunk->list.next == &iter->hem->chunk_list) {
|
||||
iter->chunk = NULL;
|
||||
return;
|
||||
}
|
||||
|
||||
iter->chunk = list_entry(iter->chunk->list.next,
|
||||
struct hns_roce_hem_chunk, list);
|
||||
iter->page_idx = 0;
|
||||
}
|
||||
}
|
||||
|
||||
static inline dma_addr_t hns_roce_hem_addr(struct hns_roce_hem_iter *iter)
|
||||
{
|
||||
return sg_dma_address(&iter->chunk->mem[iter->page_idx]);
|
||||
}
|
||||
|
||||
#endif /*_HNS_ROCE_HEM_H*/
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,981 @@
|
|||
/*
|
||||
* Copyright (c) 2016 Hisilicon Limited.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
* General Public License (GPL) Version 2, available from the file
|
||||
* COPYING in the main directory of this source tree, or the
|
||||
* OpenIB.org BSD license below:
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or
|
||||
* without modification, are permitted provided that the following
|
||||
* conditions are met:
|
||||
*
|
||||
* - Redistributions of source code must retain the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer.
|
||||
*
|
||||
* - Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer in the documentation and/or other materials
|
||||
* provided with the distribution.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef _HNS_ROCE_HW_V1_H
|
||||
#define _HNS_ROCE_HW_V1_H
|
||||
|
||||
#define CQ_STATE_VALID 2
|
||||
|
||||
#define HNS_ROCE_V1_MAX_PD_NUM 0x8000
|
||||
#define HNS_ROCE_V1_MAX_CQ_NUM 0x10000
|
||||
#define HNS_ROCE_V1_MAX_CQE_NUM 0x8000
|
||||
|
||||
#define HNS_ROCE_V1_MAX_QP_NUM 0x40000
|
||||
#define HNS_ROCE_V1_MAX_WQE_NUM 0x4000
|
||||
|
||||
#define HNS_ROCE_V1_MAX_MTPT_NUM 0x80000
|
||||
|
||||
#define HNS_ROCE_V1_MAX_MTT_SEGS 0x100000
|
||||
|
||||
#define HNS_ROCE_V1_MAX_QP_INIT_RDMA 128
|
||||
#define HNS_ROCE_V1_MAX_QP_DEST_RDMA 128
|
||||
|
||||
#define HNS_ROCE_V1_MAX_SQ_DESC_SZ 64
|
||||
#define HNS_ROCE_V1_MAX_RQ_DESC_SZ 64
|
||||
#define HNS_ROCE_V1_SG_NUM 2
|
||||
#define HNS_ROCE_V1_INLINE_SIZE 32
|
||||
|
||||
#define HNS_ROCE_V1_UAR_NUM 256
|
||||
#define HNS_ROCE_V1_PHY_UAR_NUM 8
|
||||
|
||||
#define HNS_ROCE_V1_GID_NUM 16
|
||||
|
||||
#define HNS_ROCE_V1_NUM_COMP_EQE 0x8000
|
||||
#define HNS_ROCE_V1_NUM_ASYNC_EQE 0x400
|
||||
|
||||
#define HNS_ROCE_V1_QPC_ENTRY_SIZE 256
|
||||
#define HNS_ROCE_V1_IRRL_ENTRY_SIZE 8
|
||||
#define HNS_ROCE_V1_CQC_ENTRY_SIZE 64
|
||||
#define HNS_ROCE_V1_MTPT_ENTRY_SIZE 64
|
||||
#define HNS_ROCE_V1_MTT_ENTRY_SIZE 64
|
||||
|
||||
#define HNS_ROCE_V1_CQE_ENTRY_SIZE 32
|
||||
#define HNS_ROCE_V1_PAGE_SIZE_SUPPORT 0xFFFFF000
|
||||
|
||||
#define HNS_ROCE_V1_EXT_RAQ_WF 8
|
||||
#define HNS_ROCE_V1_RAQ_ENTRY 64
|
||||
#define HNS_ROCE_V1_RAQ_DEPTH 32768
|
||||
#define HNS_ROCE_V1_RAQ_SIZE (HNS_ROCE_V1_RAQ_ENTRY * HNS_ROCE_V1_RAQ_DEPTH)
|
||||
|
||||
#define HNS_ROCE_V1_SDB_DEPTH 0x400
|
||||
#define HNS_ROCE_V1_ODB_DEPTH 0x400
|
||||
|
||||
#define HNS_ROCE_V1_DB_RSVD 0x80
|
||||
|
||||
#define HNS_ROCE_V1_SDB_ALEPT HNS_ROCE_V1_DB_RSVD
|
||||
#define HNS_ROCE_V1_SDB_ALFUL (HNS_ROCE_V1_SDB_DEPTH - HNS_ROCE_V1_DB_RSVD)
|
||||
#define HNS_ROCE_V1_ODB_ALEPT HNS_ROCE_V1_DB_RSVD
|
||||
#define HNS_ROCE_V1_ODB_ALFUL (HNS_ROCE_V1_ODB_DEPTH - HNS_ROCE_V1_DB_RSVD)
|
||||
|
||||
#define HNS_ROCE_V1_EXT_SDB_DEPTH 0x4000
|
||||
#define HNS_ROCE_V1_EXT_ODB_DEPTH 0x4000
|
||||
#define HNS_ROCE_V1_EXT_SDB_ENTRY 16
|
||||
#define HNS_ROCE_V1_EXT_ODB_ENTRY 16
|
||||
#define HNS_ROCE_V1_EXT_SDB_SIZE \
|
||||
(HNS_ROCE_V1_EXT_SDB_DEPTH * HNS_ROCE_V1_EXT_SDB_ENTRY)
|
||||
#define HNS_ROCE_V1_EXT_ODB_SIZE \
|
||||
(HNS_ROCE_V1_EXT_ODB_DEPTH * HNS_ROCE_V1_EXT_ODB_ENTRY)
|
||||
|
||||
#define HNS_ROCE_V1_EXT_SDB_ALEPT HNS_ROCE_V1_DB_RSVD
|
||||
#define HNS_ROCE_V1_EXT_SDB_ALFUL \
|
||||
(HNS_ROCE_V1_EXT_SDB_DEPTH - HNS_ROCE_V1_DB_RSVD)
|
||||
#define HNS_ROCE_V1_EXT_ODB_ALEPT HNS_ROCE_V1_DB_RSVD
|
||||
#define HNS_ROCE_V1_EXT_ODB_ALFUL \
|
||||
(HNS_ROCE_V1_EXT_ODB_DEPTH - HNS_ROCE_V1_DB_RSVD)
|
||||
|
||||
#define HNS_ROCE_ODB_POLL_MODE 0
|
||||
|
||||
#define HNS_ROCE_SDB_NORMAL_MODE 0
|
||||
#define HNS_ROCE_SDB_EXTEND_MODE 1
|
||||
|
||||
#define HNS_ROCE_ODB_EXTEND_MODE 1
|
||||
|
||||
#define KEY_VALID 0x02
|
||||
|
||||
#define HNS_ROCE_CQE_QPN_MASK 0x3ffff
|
||||
#define HNS_ROCE_CQE_STATUS_MASK 0x1f
|
||||
#define HNS_ROCE_CQE_OPCODE_MASK 0xf
|
||||
|
||||
#define HNS_ROCE_CQE_SUCCESS 0x00
|
||||
#define HNS_ROCE_CQE_SYNDROME_LOCAL_LENGTH_ERR 0x01
|
||||
#define HNS_ROCE_CQE_SYNDROME_LOCAL_QP_OP_ERR 0x02
|
||||
#define HNS_ROCE_CQE_SYNDROME_LOCAL_PROT_ERR 0x03
|
||||
#define HNS_ROCE_CQE_SYNDROME_WR_FLUSH_ERR 0x04
|
||||
#define HNS_ROCE_CQE_SYNDROME_MEM_MANAGE_OPERATE_ERR 0x05
|
||||
#define HNS_ROCE_CQE_SYNDROME_BAD_RESP_ERR 0x06
|
||||
#define HNS_ROCE_CQE_SYNDROME_LOCAL_ACCESS_ERR 0x07
|
||||
#define HNS_ROCE_CQE_SYNDROME_REMOTE_INVAL_REQ_ERR 0x08
|
||||
#define HNS_ROCE_CQE_SYNDROME_REMOTE_ACCESS_ERR 0x09
|
||||
#define HNS_ROCE_CQE_SYNDROME_REMOTE_OP_ERR 0x0a
|
||||
#define HNS_ROCE_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR 0x0b
|
||||
#define HNS_ROCE_CQE_SYNDROME_RNR_RETRY_EXC_ERR 0x0c
|
||||
|
||||
#define QP1C_CFGN_OFFSET 0x28
|
||||
#define PHY_PORT_OFFSET 0x8
|
||||
#define MTPT_IDX_SHIFT 16
|
||||
#define ALL_PORT_VAL_OPEN 0x3f
|
||||
#define POL_TIME_INTERVAL_VAL 0x80
|
||||
#define SLEEP_TIME_INTERVAL 20
|
||||
#define SQ_PSN_SHIFT 8
|
||||
#define QKEY_VAL 0x80010000
|
||||
#define SDB_INV_CNT_OFFSET 8
|
||||
|
||||
struct hns_roce_cq_context {
|
||||
u32 cqc_byte_4;
|
||||
u32 cq_bt_l;
|
||||
u32 cqc_byte_12;
|
||||
u32 cur_cqe_ba0_l;
|
||||
u32 cqc_byte_20;
|
||||
u32 cqe_tptr_addr_l;
|
||||
u32 cur_cqe_ba1_l;
|
||||
u32 cqc_byte_32;
|
||||
};
|
||||
|
||||
#define CQ_CONTEXT_CQC_BYTE_4_CQC_STATE_S 0
|
||||
#define CQ_CONTEXT_CQC_BYTE_4_CQC_STATE_M \
|
||||
(((1UL << 2) - 1) << CQ_CONTEXT_CQC_BYTE_4_CQC_STATE_S)
|
||||
|
||||
#define CQ_CONTEXT_CQC_BYTE_4_CQN_S 16
|
||||
#define CQ_CONTEXT_CQC_BYTE_4_CQN_M \
|
||||
(((1UL << 16) - 1) << CQ_CONTEXT_CQC_BYTE_4_CQN_S)
|
||||
|
||||
#define CQ_CONTEXT_CQC_BYTE_12_CQ_BT_H_S 0
|
||||
#define CQ_CONTEXT_CQC_BYTE_12_CQ_BT_H_M \
|
||||
(((1UL << 17) - 1) << CQ_CONTEXT_CQC_BYTE_12_CQ_BT_H_S)
|
||||
|
||||
#define CQ_CONTEXT_CQC_BYTE_12_CQ_CQE_SHIFT_S 20
|
||||
#define CQ_CONTEXT_CQC_BYTE_12_CQ_CQE_SHIFT_M \
|
||||
(((1UL << 4) - 1) << CQ_CONTEXT_CQC_BYTE_12_CQ_CQE_SHIFT_S)
|
||||
|
||||
#define CQ_CONTEXT_CQC_BYTE_12_CEQN_S 24
|
||||
#define CQ_CONTEXT_CQC_BYTE_12_CEQN_M \
|
||||
(((1UL << 5) - 1) << CQ_CONTEXT_CQC_BYTE_12_CEQN_S)
|
||||
|
||||
#define CQ_CONTEXT_CQC_BYTE_20_CUR_CQE_BA0_H_S 0
|
||||
#define CQ_CONTEXT_CQC_BYTE_20_CUR_CQE_BA0_H_M \
|
||||
(((1UL << 5) - 1) << CQ_CONTEXT_CQC_BYTE_20_CUR_CQE_BA0_H_S)
|
||||
|
||||
#define CQ_CONTEXT_CQC_BYTE_20_CQ_CUR_INDEX_S 16
|
||||
#define CQ_CONTEXT_CQC_BYTE_20_CQ_CUR_INDEX_M \
|
||||
(((1UL << 16) - 1) << CQ_CONTEXT_CQC_BYTE_20_CQ_CUR_INDEX_S)
|
||||
|
||||
#define CQ_CONTEXT_CQC_BYTE_20_CQE_TPTR_ADDR_H_S 8
|
||||
#define CQ_CONTEXT_CQC_BYTE_20_CQE_TPTR_ADDR_H_M \
|
||||
(((1UL << 5) - 1) << CQ_CONTEXT_CQC_BYTE_20_CQE_TPTR_ADDR_H_S)
|
||||
|
||||
#define CQ_CONTEXT_CQC_BYTE_32_CUR_CQE_BA1_H_S 0
|
||||
#define CQ_CONTEXT_CQC_BYTE_32_CUR_CQE_BA1_H_M \
|
||||
(((1UL << 5) - 1) << CQ_CONTEXT_CQC_BYTE_32_CUR_CQE_BA1_H_S)
|
||||
|
||||
#define CQ_CONTEXT_CQC_BYTE_32_SE_FLAG_S 9
|
||||
|
||||
#define CQ_CONTEXT_CQC_BYTE_32_CE_FLAG_S 8
|
||||
#define CQ_CONTEXT_CQC_BYTE_32_NOTIFICATION_FLAG_S 14
|
||||
#define CQ_CQNTEXT_CQC_BYTE_32_TYPE_OF_COMPLETION_NOTIFICATION_S 15
|
||||
|
||||
#define CQ_CONTEXT_CQC_BYTE_32_CQ_CONS_IDX_S 16
|
||||
#define CQ_CONTEXT_CQC_BYTE_32_CQ_CONS_IDX_M \
|
||||
(((1UL << 16) - 1) << CQ_CONTEXT_CQC_BYTE_32_CQ_CONS_IDX_S)
|
||||
|
||||
struct hns_roce_cqe {
|
||||
u32 cqe_byte_4;
|
||||
union {
|
||||
u32 r_key;
|
||||
u32 immediate_data;
|
||||
};
|
||||
u32 byte_cnt;
|
||||
u32 cqe_byte_16;
|
||||
u32 cqe_byte_20;
|
||||
u32 s_mac_l;
|
||||
u32 cqe_byte_28;
|
||||
u32 reserved;
|
||||
};
|
||||
|
||||
#define CQE_BYTE_4_OWNER_S 7
|
||||
#define CQE_BYTE_4_SQ_RQ_FLAG_S 14
|
||||
|
||||
#define CQE_BYTE_4_STATUS_OF_THE_OPERATION_S 8
|
||||
#define CQE_BYTE_4_STATUS_OF_THE_OPERATION_M \
|
||||
(((1UL << 5) - 1) << CQE_BYTE_4_STATUS_OF_THE_OPERATION_S)
|
||||
|
||||
#define CQE_BYTE_4_WQE_INDEX_S 16
|
||||
#define CQE_BYTE_4_WQE_INDEX_M (((1UL << 14) - 1) << CQE_BYTE_4_WQE_INDEX_S)
|
||||
|
||||
#define CQE_BYTE_4_OPERATION_TYPE_S 0
|
||||
#define CQE_BYTE_4_OPERATION_TYPE_M \
|
||||
(((1UL << 4) - 1) << CQE_BYTE_4_OPERATION_TYPE_S)
|
||||
|
||||
#define CQE_BYTE_4_IMM_INDICATOR_S 15
|
||||
|
||||
#define CQE_BYTE_16_LOCAL_QPN_S 0
|
||||
#define CQE_BYTE_16_LOCAL_QPN_M (((1UL << 24) - 1) << CQE_BYTE_16_LOCAL_QPN_S)
|
||||
|
||||
#define CQE_BYTE_20_PORT_NUM_S 26
|
||||
#define CQE_BYTE_20_PORT_NUM_M (((1UL << 3) - 1) << CQE_BYTE_20_PORT_NUM_S)
|
||||
|
||||
#define CQE_BYTE_20_SL_S 24
|
||||
#define CQE_BYTE_20_SL_M (((1UL << 2) - 1) << CQE_BYTE_20_SL_S)
|
||||
|
||||
#define CQE_BYTE_20_REMOTE_QPN_S 0
|
||||
#define CQE_BYTE_20_REMOTE_QPN_M \
|
||||
(((1UL << 24) - 1) << CQE_BYTE_20_REMOTE_QPN_S)
|
||||
|
||||
#define CQE_BYTE_20_GRH_PRESENT_S 29
|
||||
|
||||
#define CQE_BYTE_28_P_KEY_IDX_S 16
|
||||
#define CQE_BYTE_28_P_KEY_IDX_M (((1UL << 16) - 1) << CQE_BYTE_28_P_KEY_IDX_S)
|
||||
|
||||
#define CQ_DB_REQ_NOT_SOL 0
|
||||
#define CQ_DB_REQ_NOT (1 << 16)
|
||||
|
||||
struct hns_roce_v1_mpt_entry {
|
||||
u32 mpt_byte_4;
|
||||
u32 pbl_addr_l;
|
||||
u32 mpt_byte_12;
|
||||
u32 virt_addr_l;
|
||||
u32 virt_addr_h;
|
||||
u32 length;
|
||||
u32 mpt_byte_28;
|
||||
u32 pa0_l;
|
||||
u32 mpt_byte_36;
|
||||
u32 mpt_byte_40;
|
||||
u32 mpt_byte_44;
|
||||
u32 mpt_byte_48;
|
||||
u32 pa4_l;
|
||||
u32 mpt_byte_56;
|
||||
u32 mpt_byte_60;
|
||||
u32 mpt_byte_64;
|
||||
};
|
||||
|
||||
#define MPT_BYTE_4_KEY_STATE_S 0
|
||||
#define MPT_BYTE_4_KEY_STATE_M (((1UL << 2) - 1) << MPT_BYTE_4_KEY_STATE_S)
|
||||
|
||||
#define MPT_BYTE_4_KEY_S 8
|
||||
#define MPT_BYTE_4_KEY_M (((1UL << 8) - 1) << MPT_BYTE_4_KEY_S)
|
||||
|
||||
#define MPT_BYTE_4_PAGE_SIZE_S 16
|
||||
#define MPT_BYTE_4_PAGE_SIZE_M (((1UL << 2) - 1) << MPT_BYTE_4_PAGE_SIZE_S)
|
||||
|
||||
#define MPT_BYTE_4_MW_TYPE_S 20
|
||||
|
||||
#define MPT_BYTE_4_MW_BIND_ENABLE_S 21
|
||||
|
||||
#define MPT_BYTE_4_OWN_S 22
|
||||
|
||||
#define MPT_BYTE_4_MEMORY_LOCATION_TYPE_S 24
|
||||
#define MPT_BYTE_4_MEMORY_LOCATION_TYPE_M \
|
||||
(((1UL << 2) - 1) << MPT_BYTE_4_MEMORY_LOCATION_TYPE_S)
|
||||
|
||||
#define MPT_BYTE_4_REMOTE_ATOMIC_S 26
|
||||
#define MPT_BYTE_4_LOCAL_WRITE_S 27
|
||||
#define MPT_BYTE_4_REMOTE_WRITE_S 28
|
||||
#define MPT_BYTE_4_REMOTE_READ_S 29
|
||||
#define MPT_BYTE_4_REMOTE_INVAL_ENABLE_S 30
|
||||
#define MPT_BYTE_4_ADDRESS_TYPE_S 31
|
||||
|
||||
#define MPT_BYTE_12_PBL_ADDR_H_S 0
|
||||
#define MPT_BYTE_12_PBL_ADDR_H_M \
|
||||
(((1UL << 17) - 1) << MPT_BYTE_12_PBL_ADDR_H_S)
|
||||
|
||||
#define MPT_BYTE_12_MW_BIND_COUNTER_S 17
|
||||
#define MPT_BYTE_12_MW_BIND_COUNTER_M \
|
||||
(((1UL << 15) - 1) << MPT_BYTE_12_MW_BIND_COUNTER_S)
|
||||
|
||||
#define MPT_BYTE_28_PD_S 0
|
||||
#define MPT_BYTE_28_PD_M (((1UL << 16) - 1) << MPT_BYTE_28_PD_S)
|
||||
|
||||
#define MPT_BYTE_28_L_KEY_IDX_L_S 16
|
||||
#define MPT_BYTE_28_L_KEY_IDX_L_M \
|
||||
(((1UL << 16) - 1) << MPT_BYTE_28_L_KEY_IDX_L_S)
|
||||
|
||||
#define MPT_BYTE_36_PA0_H_S 0
|
||||
#define MPT_BYTE_36_PA0_H_M (((1UL << 5) - 1) << MPT_BYTE_36_PA0_H_S)
|
||||
|
||||
#define MPT_BYTE_36_PA1_L_S 8
|
||||
#define MPT_BYTE_36_PA1_L_M (((1UL << 24) - 1) << MPT_BYTE_36_PA1_L_S)
|
||||
|
||||
#define MPT_BYTE_40_PA1_H_S 0
|
||||
#define MPT_BYTE_40_PA1_H_M (((1UL << 13) - 1) << MPT_BYTE_40_PA1_H_S)
|
||||
|
||||
#define MPT_BYTE_40_PA2_L_S 16
|
||||
#define MPT_BYTE_40_PA2_L_M (((1UL << 16) - 1) << MPT_BYTE_40_PA2_L_S)
|
||||
|
||||
#define MPT_BYTE_44_PA2_H_S 0
|
||||
#define MPT_BYTE_44_PA2_H_M (((1UL << 21) - 1) << MPT_BYTE_44_PA2_H_S)
|
||||
|
||||
#define MPT_BYTE_44_PA3_L_S 24
|
||||
#define MPT_BYTE_44_PA3_L_M (((1UL << 8) - 1) << MPT_BYTE_44_PA3_L_S)
|
||||
|
||||
#define MPT_BYTE_48_PA3_H_S 0
|
||||
#define MPT_BYTE_48_PA3_H_M (((1UL << 29) - 1) << MPT_BYTE_48_PA3_H_S)
|
||||
|
||||
#define MPT_BYTE_56_PA4_H_S 0
|
||||
#define MPT_BYTE_56_PA4_H_M (((1UL << 5) - 1) << MPT_BYTE_56_PA4_H_S)
|
||||
|
||||
#define MPT_BYTE_56_PA5_L_S 8
|
||||
#define MPT_BYTE_56_PA5_L_M (((1UL << 24) - 1) << MPT_BYTE_56_PA5_L_S)
|
||||
|
||||
#define MPT_BYTE_60_PA5_H_S 0
|
||||
#define MPT_BYTE_60_PA5_H_M (((1UL << 13) - 1) << MPT_BYTE_60_PA5_H_S)
|
||||
|
||||
#define MPT_BYTE_60_PA6_L_S 16
|
||||
#define MPT_BYTE_60_PA6_L_M (((1UL << 16) - 1) << MPT_BYTE_60_PA6_L_S)
|
||||
|
||||
#define MPT_BYTE_64_PA6_H_S 0
|
||||
#define MPT_BYTE_64_PA6_H_M (((1UL << 21) - 1) << MPT_BYTE_64_PA6_H_S)
|
||||
|
||||
#define MPT_BYTE_64_L_KEY_IDX_H_S 24
|
||||
#define MPT_BYTE_64_L_KEY_IDX_H_M \
|
||||
(((1UL << 8) - 1) << MPT_BYTE_64_L_KEY_IDX_H_S)
|
||||
|
||||
struct hns_roce_wqe_ctrl_seg {
|
||||
__be32 sgl_pa_h;
|
||||
__be32 flag;
|
||||
__be32 imm_data;
|
||||
__be32 msg_length;
|
||||
};
|
||||
|
||||
struct hns_roce_wqe_data_seg {
|
||||
__be64 addr;
|
||||
__be32 lkey;
|
||||
__be32 len;
|
||||
};
|
||||
|
||||
struct hns_roce_wqe_raddr_seg {
|
||||
__be32 rkey;
|
||||
__be32 len;/* reserved */
|
||||
__be64 raddr;
|
||||
};
|
||||
|
||||
struct hns_roce_rq_wqe_ctrl {
|
||||
|
||||
u32 rwqe_byte_4;
|
||||
u32 rocee_sgl_ba_l;
|
||||
u32 rwqe_byte_12;
|
||||
u32 reserved[5];
|
||||
};
|
||||
|
||||
#define RQ_WQE_CTRL_RWQE_BYTE_12_RWQE_SGE_NUM_S 16
|
||||
#define RQ_WQE_CTRL_RWQE_BYTE_12_RWQE_SGE_NUM_M \
|
||||
(((1UL << 6) - 1) << RQ_WQE_CTRL_RWQE_BYTE_12_RWQE_SGE_NUM_S)
|
||||
|
||||
#define HNS_ROCE_QP_DESTROY_TIMEOUT_MSECS 10000
|
||||
|
||||
#define GID_LEN 16
|
||||
|
||||
struct hns_roce_ud_send_wqe {
|
||||
u32 dmac_h;
|
||||
u32 u32_8;
|
||||
u32 immediate_data;
|
||||
|
||||
u32 u32_16;
|
||||
union {
|
||||
unsigned char dgid[GID_LEN];
|
||||
struct {
|
||||
u32 u32_20;
|
||||
u32 u32_24;
|
||||
u32 u32_28;
|
||||
u32 u32_32;
|
||||
};
|
||||
};
|
||||
|
||||
u32 u32_36;
|
||||
u32 u32_40;
|
||||
|
||||
u32 va0_l;
|
||||
u32 va0_h;
|
||||
u32 l_key0;
|
||||
|
||||
u32 va1_l;
|
||||
u32 va1_h;
|
||||
u32 l_key1;
|
||||
};
|
||||
|
||||
#define UD_SEND_WQE_U32_4_DMAC_0_S 0
|
||||
#define UD_SEND_WQE_U32_4_DMAC_0_M \
|
||||
(((1UL << 8) - 1) << UD_SEND_WQE_U32_4_DMAC_0_S)
|
||||
|
||||
#define UD_SEND_WQE_U32_4_DMAC_1_S 8
|
||||
#define UD_SEND_WQE_U32_4_DMAC_1_M \
|
||||
(((1UL << 8) - 1) << UD_SEND_WQE_U32_4_DMAC_1_S)
|
||||
|
||||
#define UD_SEND_WQE_U32_4_DMAC_2_S 16
|
||||
#define UD_SEND_WQE_U32_4_DMAC_2_M \
|
||||
(((1UL << 8) - 1) << UD_SEND_WQE_U32_4_DMAC_2_S)
|
||||
|
||||
#define UD_SEND_WQE_U32_4_DMAC_3_S 24
|
||||
#define UD_SEND_WQE_U32_4_DMAC_3_M \
|
||||
(((1UL << 8) - 1) << UD_SEND_WQE_U32_4_DMAC_3_S)
|
||||
|
||||
#define UD_SEND_WQE_U32_8_DMAC_4_S 0
|
||||
#define UD_SEND_WQE_U32_8_DMAC_4_M \
|
||||
(((1UL << 8) - 1) << UD_SEND_WQE_U32_8_DMAC_4_S)
|
||||
|
||||
#define UD_SEND_WQE_U32_8_DMAC_5_S 8
|
||||
#define UD_SEND_WQE_U32_8_DMAC_5_M \
|
||||
(((1UL << 8) - 1) << UD_SEND_WQE_U32_8_DMAC_5_S)
|
||||
|
||||
#define UD_SEND_WQE_U32_8_OPERATION_TYPE_S 16
|
||||
#define UD_SEND_WQE_U32_8_OPERATION_TYPE_M \
|
||||
(((1UL << 4) - 1) << UD_SEND_WQE_U32_8_OPERATION_TYPE_S)
|
||||
|
||||
#define UD_SEND_WQE_U32_8_NUMBER_OF_DATA_SEG_S 24
|
||||
#define UD_SEND_WQE_U32_8_NUMBER_OF_DATA_SEG_M \
|
||||
(((1UL << 6) - 1) << UD_SEND_WQE_U32_8_NUMBER_OF_DATA_SEG_S)
|
||||
|
||||
#define UD_SEND_WQE_U32_8_SEND_GL_ROUTING_HDR_FLAG_S 31
|
||||
|
||||
#define UD_SEND_WQE_U32_16_DEST_QP_S 0
|
||||
#define UD_SEND_WQE_U32_16_DEST_QP_M \
|
||||
(((1UL << 24) - 1) << UD_SEND_WQE_U32_16_DEST_QP_S)
|
||||
|
||||
#define UD_SEND_WQE_U32_16_MAX_STATIC_RATE_S 24
|
||||
#define UD_SEND_WQE_U32_16_MAX_STATIC_RATE_M \
|
||||
(((1UL << 8) - 1) << UD_SEND_WQE_U32_16_MAX_STATIC_RATE_S)
|
||||
|
||||
#define UD_SEND_WQE_U32_36_FLOW_LABEL_S 0
|
||||
#define UD_SEND_WQE_U32_36_FLOW_LABEL_M \
|
||||
(((1UL << 20) - 1) << UD_SEND_WQE_U32_36_FLOW_LABEL_S)
|
||||
|
||||
#define UD_SEND_WQE_U32_36_PRIORITY_S 20
|
||||
#define UD_SEND_WQE_U32_36_PRIORITY_M \
|
||||
(((1UL << 4) - 1) << UD_SEND_WQE_U32_36_PRIORITY_S)
|
||||
|
||||
#define UD_SEND_WQE_U32_36_SGID_INDEX_S 24
|
||||
#define UD_SEND_WQE_U32_36_SGID_INDEX_M \
|
||||
(((1UL << 8) - 1) << UD_SEND_WQE_U32_36_SGID_INDEX_S)
|
||||
|
||||
#define UD_SEND_WQE_U32_40_HOP_LIMIT_S 0
|
||||
#define UD_SEND_WQE_U32_40_HOP_LIMIT_M \
|
||||
(((1UL << 8) - 1) << UD_SEND_WQE_U32_40_HOP_LIMIT_S)
|
||||
|
||||
#define UD_SEND_WQE_U32_40_TRAFFIC_CLASS_S 8
|
||||
#define UD_SEND_WQE_U32_40_TRAFFIC_CLASS_M \
|
||||
(((1UL << 8) - 1) << UD_SEND_WQE_U32_40_TRAFFIC_CLASS_S)
|
||||
|
||||
struct hns_roce_sqp_context {
|
||||
u32 qp1c_bytes_4;
|
||||
u32 sq_rq_bt_l;
|
||||
u32 qp1c_bytes_12;
|
||||
u32 qp1c_bytes_16;
|
||||
u32 qp1c_bytes_20;
|
||||
u32 qp1c_bytes_28;
|
||||
u32 cur_rq_wqe_ba_l;
|
||||
u32 qp1c_bytes_32;
|
||||
u32 cur_sq_wqe_ba_l;
|
||||
u32 qp1c_bytes_40;
|
||||
};
|
||||
|
||||
#define QP1C_BYTES_4_SQ_WQE_SHIFT_S 8
|
||||
#define QP1C_BYTES_4_SQ_WQE_SHIFT_M \
|
||||
(((1UL << 4) - 1) << QP1C_BYTES_4_SQ_WQE_SHIFT_S)
|
||||
|
||||
#define QP1C_BYTES_4_RQ_WQE_SHIFT_S 12
|
||||
#define QP1C_BYTES_4_RQ_WQE_SHIFT_M \
|
||||
(((1UL << 4) - 1) << QP1C_BYTES_4_RQ_WQE_SHIFT_S)
|
||||
|
||||
#define QP1C_BYTES_4_PD_S 16
|
||||
#define QP1C_BYTES_4_PD_M (((1UL << 16) - 1) << QP1C_BYTES_4_PD_S)
|
||||
|
||||
#define QP1C_BYTES_12_SQ_RQ_BT_H_S 0
|
||||
#define QP1C_BYTES_12_SQ_RQ_BT_H_M \
|
||||
(((1UL << 17) - 1) << QP1C_BYTES_12_SQ_RQ_BT_H_S)
|
||||
|
||||
#define QP1C_BYTES_16_RQ_HEAD_S 0
|
||||
#define QP1C_BYTES_16_RQ_HEAD_M (((1UL << 15) - 1) << QP1C_BYTES_16_RQ_HEAD_S)
|
||||
|
||||
#define QP1C_BYTES_16_PORT_NUM_S 16
|
||||
#define QP1C_BYTES_16_PORT_NUM_M \
|
||||
(((1UL << 3) - 1) << QP1C_BYTES_16_PORT_NUM_S)
|
||||
|
||||
#define QP1C_BYTES_16_SIGNALING_TYPE_S 27
|
||||
#define QP1C_BYTES_16_LOCAL_ENABLE_E2E_CREDIT_S 28
|
||||
#define QP1C_BYTES_16_RQ_BA_FLG_S 29
|
||||
#define QP1C_BYTES_16_SQ_BA_FLG_S 30
|
||||
#define QP1C_BYTES_16_QP1_ERR_S 31
|
||||
|
||||
#define QP1C_BYTES_20_SQ_HEAD_S 0
|
||||
#define QP1C_BYTES_20_SQ_HEAD_M (((1UL << 15) - 1) << QP1C_BYTES_20_SQ_HEAD_S)
|
||||
|
||||
#define QP1C_BYTES_20_PKEY_IDX_S 16
|
||||
#define QP1C_BYTES_20_PKEY_IDX_M \
|
||||
(((1UL << 16) - 1) << QP1C_BYTES_20_PKEY_IDX_S)
|
||||
|
||||
#define QP1C_BYTES_28_CUR_RQ_WQE_BA_H_S 0
|
||||
#define QP1C_BYTES_28_CUR_RQ_WQE_BA_H_M \
|
||||
(((1UL << 5) - 1) << QP1C_BYTES_28_CUR_RQ_WQE_BA_H_S)
|
||||
|
||||
#define QP1C_BYTES_28_RQ_CUR_IDX_S 16
|
||||
#define QP1C_BYTES_28_RQ_CUR_IDX_M \
|
||||
(((1UL << 15) - 1) << QP1C_BYTES_28_RQ_CUR_IDX_S)
|
||||
|
||||
#define QP1C_BYTES_32_TX_CQ_NUM_S 0
|
||||
#define QP1C_BYTES_32_TX_CQ_NUM_M \
|
||||
(((1UL << 16) - 1) << QP1C_BYTES_32_TX_CQ_NUM_S)
|
||||
|
||||
#define QP1C_BYTES_32_RX_CQ_NUM_S 16
|
||||
#define QP1C_BYTES_32_RX_CQ_NUM_M \
|
||||
(((1UL << 16) - 1) << QP1C_BYTES_32_RX_CQ_NUM_S)
|
||||
|
||||
#define QP1C_BYTES_40_CUR_SQ_WQE_BA_H_S 0
|
||||
#define QP1C_BYTES_40_CUR_SQ_WQE_BA_H_M \
|
||||
(((1UL << 5) - 1) << QP1C_BYTES_40_CUR_SQ_WQE_BA_H_S)
|
||||
|
||||
#define QP1C_BYTES_40_SQ_CUR_IDX_S 16
|
||||
#define QP1C_BYTES_40_SQ_CUR_IDX_M \
|
||||
(((1UL << 15) - 1) << QP1C_BYTES_40_SQ_CUR_IDX_S)
|
||||
|
||||
#define HNS_ROCE_WQE_INLINE (1UL<<31)
|
||||
#define HNS_ROCE_WQE_SE (1UL<<30)
|
||||
|
||||
#define HNS_ROCE_WQE_SGE_NUM_BIT 24
|
||||
#define HNS_ROCE_WQE_IMM (1UL<<23)
|
||||
#define HNS_ROCE_WQE_FENCE (1UL<<21)
|
||||
#define HNS_ROCE_WQE_CQ_NOTIFY (1UL<<20)
|
||||
|
||||
#define HNS_ROCE_WQE_OPCODE_SEND (0<<16)
|
||||
#define HNS_ROCE_WQE_OPCODE_RDMA_READ (1<<16)
|
||||
#define HNS_ROCE_WQE_OPCODE_RDMA_WRITE (2<<16)
|
||||
#define HNS_ROCE_WQE_OPCODE_LOCAL_INV (4<<16)
|
||||
#define HNS_ROCE_WQE_OPCODE_UD_SEND (7<<16)
|
||||
#define HNS_ROCE_WQE_OPCODE_MASK (15<<16)
|
||||
|
||||
struct hns_roce_qp_context {
|
||||
u32 qpc_bytes_4;
|
||||
u32 qpc_bytes_8;
|
||||
u32 qpc_bytes_12;
|
||||
u32 qpc_bytes_16;
|
||||
u32 sq_rq_bt_l;
|
||||
u32 qpc_bytes_24;
|
||||
u32 irrl_ba_l;
|
||||
u32 qpc_bytes_32;
|
||||
u32 qpc_bytes_36;
|
||||
u32 dmac_l;
|
||||
u32 qpc_bytes_44;
|
||||
u32 qpc_bytes_48;
|
||||
u8 dgid[16];
|
||||
u32 qpc_bytes_68;
|
||||
u32 cur_rq_wqe_ba_l;
|
||||
u32 qpc_bytes_76;
|
||||
u32 rx_rnr_time;
|
||||
u32 qpc_bytes_84;
|
||||
u32 qpc_bytes_88;
|
||||
union {
|
||||
u32 rx_sge_len;
|
||||
u32 dma_length;
|
||||
};
|
||||
union {
|
||||
u32 rx_sge_num;
|
||||
u32 rx_send_pktn;
|
||||
u32 r_key;
|
||||
};
|
||||
u32 va_l;
|
||||
u32 va_h;
|
||||
u32 qpc_bytes_108;
|
||||
u32 qpc_bytes_112;
|
||||
u32 rx_cur_sq_wqe_ba_l;
|
||||
u32 qpc_bytes_120;
|
||||
u32 qpc_bytes_124;
|
||||
u32 qpc_bytes_128;
|
||||
u32 qpc_bytes_132;
|
||||
u32 qpc_bytes_136;
|
||||
u32 qpc_bytes_140;
|
||||
u32 qpc_bytes_144;
|
||||
u32 qpc_bytes_148;
|
||||
union {
|
||||
u32 rnr_retry;
|
||||
u32 ack_time;
|
||||
};
|
||||
u32 qpc_bytes_156;
|
||||
u32 pkt_use_len;
|
||||
u32 qpc_bytes_164;
|
||||
u32 qpc_bytes_168;
|
||||
union {
|
||||
u32 sge_use_len;
|
||||
u32 pa_use_len;
|
||||
};
|
||||
u32 qpc_bytes_176;
|
||||
u32 qpc_bytes_180;
|
||||
u32 tx_cur_sq_wqe_ba_l;
|
||||
u32 qpc_bytes_188;
|
||||
u32 rvd21;
|
||||
};
|
||||
|
||||
#define QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_S 0
|
||||
#define QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_M \
|
||||
(((1UL << 3) - 1) << QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_S)
|
||||
|
||||
#define QP_CONTEXT_QPC_BYTE_4_ENABLE_FPMR_S 3
|
||||
#define QP_CONTEXT_QPC_BYTE_4_RDMA_READ_ENABLE_S 4
|
||||
#define QP_CONTEXT_QPC_BYTE_4_RDMA_WRITE_ENABLE_S 5
|
||||
#define QP_CONTEXT_QPC_BYTE_4_ATOMIC_OPERATION_ENABLE_S 6
|
||||
#define QP_CONTEXT_QPC_BYTE_4_RDMAR_USE_S 7
|
||||
|
||||
#define QP_CONTEXT_QPC_BYTES_4_SQ_WQE_SHIFT_S 8
|
||||
#define QP_CONTEXT_QPC_BYTES_4_SQ_WQE_SHIFT_M \
|
||||
(((1UL << 4) - 1) << QP_CONTEXT_QPC_BYTES_4_SQ_WQE_SHIFT_S)
|
||||
|
||||
#define QP_CONTEXT_QPC_BYTES_4_RQ_WQE_SHIFT_S 12
|
||||
#define QP_CONTEXT_QPC_BYTES_4_RQ_WQE_SHIFT_M \
|
||||
(((1UL << 4) - 1) << QP_CONTEXT_QPC_BYTES_4_RQ_WQE_SHIFT_S)
|
||||
|
||||
#define QP_CONTEXT_QPC_BYTES_4_PD_S 16
|
||||
#define QP_CONTEXT_QPC_BYTES_4_PD_M \
|
||||
(((1UL << 16) - 1) << QP_CONTEXT_QPC_BYTES_4_PD_S)
|
||||
|
||||
#define QP_CONTEXT_QPC_BYTES_8_TX_COMPLETION_S 0
|
||||
#define QP_CONTEXT_QPC_BYTES_8_TX_COMPLETION_M \
|
||||
(((1UL << 16) - 1) << QP_CONTEXT_QPC_BYTES_8_TX_COMPLETION_S)
|
||||
|
||||
#define QP_CONTEXT_QPC_BYTES_8_RX_COMPLETION_S 16
|
||||
#define QP_CONTEXT_QPC_BYTES_8_RX_COMPLETION_M \
|
||||
(((1UL << 16) - 1) << QP_CONTEXT_QPC_BYTES_8_RX_COMPLETION_S)
|
||||
|
||||
#define QP_CONTEXT_QPC_BYTES_12_SRQ_NUMBER_S 0
|
||||
#define QP_CONTEXT_QPC_BYTES_12_SRQ_NUMBER_M \
|
||||
(((1UL << 16) - 1) << QP_CONTEXT_QPC_BYTES_12_SRQ_NUMBER_S)
|
||||
|
||||
#define QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_S 16
|
||||
#define QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_M \
|
||||
(((1UL << 16) - 1) << QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_S)
|
||||
|
||||
#define QP_CONTEXT_QPC_BYTES_16_QP_NUM_S 0
|
||||
#define QP_CONTEXT_QPC_BYTES_16_QP_NUM_M \
|
||||
(((1UL << 24) - 1) << QP_CONTEXT_QPC_BYTES_16_QP_NUM_S)
|
||||
|
||||
#define QP_CONTEXT_QPC_BYTES_24_SQ_RQ_BT_H_S 0
|
||||
#define QP_CONTEXT_QPC_BYTES_24_SQ_RQ_BT_H_M \
|
||||
(((1UL << 17) - 1) << QP_CONTEXT_QPC_BYTES_24_SQ_RQ_BT_H_S)
|
||||
|
||||
#define QP_CONTEXT_QPC_BYTES_24_MINIMUM_RNR_NAK_TIMER_S 18
|
||||
#define QP_CONTEXT_QPC_BYTES_24_MINIMUM_RNR_NAK_TIMER_M \
|
||||
(((1UL << 5) - 1) << QP_CONTEXT_QPC_BYTES_24_MINIMUM_RNR_NAK_TIMER_S)
|
||||
|
||||
#define QP_CONTEXT_QPC_BYTE_24_REMOTE_ENABLE_E2E_CREDITS_S 23
|
||||
|
||||
#define QP_CONTEXT_QPC_BYTES_32_IRRL_BA_H_S 0
|
||||
#define QP_CONTEXT_QPC_BYTES_32_IRRL_BA_H_M \
|
||||
(((1UL << 17) - 1) << QP_CONTEXT_QPC_BYTES_32_IRRL_BA_H_S)
|
||||
|
||||
#define QP_CONTEXT_QPC_BYTES_32_MIG_STATE_S 18
|
||||
#define QP_CONTEXT_QPC_BYTES_32_MIG_STATE_M \
|
||||
(((1UL << 2) - 1) << QP_CONTEXT_QPC_BYTES_32_MIG_STATE_S)
|
||||
|
||||
#define QP_CONTEXT_QPC_BYTE_32_LOCAL_ENABLE_E2E_CREDITS_S 20
|
||||
#define QP_CONTEXT_QPC_BYTE_32_SIGNALING_TYPE_S 21
|
||||
#define QP_CONTEXT_QPC_BYTE_32_LOOPBACK_INDICATOR_S 22
|
||||
#define QP_CONTEXT_QPC_BYTE_32_GLOBAL_HEADER_S 23
|
||||
|
||||
#define QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_S 24
|
||||
#define QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_M \
|
||||
(((1UL << 8) - 1) << QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_S)
|
||||
|
||||
#define QP_CONTEXT_QPC_BYTES_36_DEST_QP_S 0
|
||||
#define QP_CONTEXT_QPC_BYTES_36_DEST_QP_M \
|
||||
(((1UL << 24) - 1) << QP_CONTEXT_QPC_BYTES_36_DEST_QP_S)
|
||||
|
||||
#define QP_CONTEXT_QPC_BYTES_36_SGID_INDEX_S 24
|
||||
#define QP_CONTEXT_QPC_BYTES_36_SGID_INDEX_M \
|
||||
(((1UL << 8) - 1) << QP_CONTEXT_QPC_BYTES_36_SGID_INDEX_S)
|
||||
|
||||
#define QP_CONTEXT_QPC_BYTES_44_DMAC_H_S 0
|
||||
#define QP_CONTEXT_QPC_BYTES_44_DMAC_H_M \
|
||||
(((1UL << 16) - 1) << QP_CONTEXT_QPC_BYTES_44_DMAC_H_S)
|
||||
|
||||
#define QP_CONTEXT_QPC_BYTES_44_MAXIMUM_STATIC_RATE_S 16
|
||||
#define QP_CONTEXT_QPC_BYTES_44_MAXIMUM_STATIC_RATE_M \
|
||||
(((1UL << 8) - 1) << QP_CONTEXT_QPC_BYTES_44_MAXIMUM_STATIC_RATE_S)
|
||||
|
||||
#define QP_CONTEXT_QPC_BYTES_44_HOPLMT_S 24
|
||||
#define QP_CONTEXT_QPC_BYTES_44_HOPLMT_M \
|
||||
(((1UL << 8) - 1) << QP_CONTEXT_QPC_BYTES_44_HOPLMT_S)
|
||||
|
||||
#define QP_CONTEXT_QPC_BYTES_48_FLOWLABEL_S 0
|
||||
#define QP_CONTEXT_QPC_BYTES_48_FLOWLABEL_M \
|
||||
(((1UL << 20) - 1) << QP_CONTEXT_QPC_BYTES_48_FLOWLABEL_S)
|
||||
|
||||
#define QP_CONTEXT_QPC_BYTES_48_TCLASS_S 20
|
||||
#define QP_CONTEXT_QPC_BYTES_48_TCLASS_M \
|
||||
(((1UL << 8) - 1) << QP_CONTEXT_QPC_BYTES_48_TCLASS_S)
|
||||
|
||||
#define QP_CONTEXT_QPC_BYTES_48_MTU_S 28
|
||||
#define QP_CONTEXT_QPC_BYTES_48_MTU_M \
|
||||
(((1UL << 4) - 1) << QP_CONTEXT_QPC_BYTES_48_MTU_S)
|
||||
|
||||
#define QP_CONTEXT_QPC_BYTES_68_RQ_HEAD_S 0
|
||||
#define QP_CONTEXT_QPC_BYTES_68_RQ_HEAD_M \
|
||||
(((1UL << 15) - 1) << QP_CONTEXT_QPC_BYTES_68_RQ_HEAD_S)
|
||||
|
||||
#define QP_CONTEXT_QPC_BYTES_68_RQ_CUR_INDEX_S 16
|
||||
#define QP_CONTEXT_QPC_BYTES_68_RQ_CUR_INDEX_M \
|
||||
(((1UL << 15) - 1) << QP_CONTEXT_QPC_BYTES_68_RQ_CUR_INDEX_S)
|
||||
|
||||
#define QP_CONTEXT_QPC_BYTES_76_CUR_RQ_WQE_BA_H_S 0
|
||||
#define QP_CONTEXT_QPC_BYTES_76_CUR_RQ_WQE_BA_H_M \
|
||||
(((1UL << 5) - 1) << QP_CONTEXT_QPC_BYTES_76_CUR_RQ_WQE_BA_H_S)
|
||||
|
||||
#define QP_CONTEXT_QPC_BYTES_76_RX_REQ_MSN_S 8
|
||||
#define QP_CONTEXT_QPC_BYTES_76_RX_REQ_MSN_M \
|
||||
(((1UL << 24) - 1) << QP_CONTEXT_QPC_BYTES_76_RX_REQ_MSN_S)
|
||||
|
||||
#define QP_CONTEXT_QPC_BYTES_84_LAST_ACK_PSN_S 0
|
||||
#define QP_CONTEXT_QPC_BYTES_84_LAST_ACK_PSN_M \
|
||||
(((1UL << 24) - 1) << QP_CONTEXT_QPC_BYTES_84_LAST_ACK_PSN_S)
|
||||
|
||||
#define QP_CONTEXT_QPC_BYTES_84_TRRL_HEAD_S 24
|
||||
#define QP_CONTEXT_QPC_BYTES_84_TRRL_HEAD_M \
|
||||
(((1UL << 8) - 1) << QP_CONTEXT_QPC_BYTES_84_TRRL_HEAD_S)
|
||||
|
||||
#define QP_CONTEXT_QPC_BYTES_88_RX_REQ_EPSN_S 0
|
||||
#define QP_CONTEXT_QPC_BYTES_88_RX_REQ_EPSN_M \
|
||||
(((1UL << 24) - 1) << QP_CONTEXT_QPC_BYTES_88_RX_REQ_EPSN_S)
|
||||
|
||||
#define QP_CONTEXT_QPC_BYTES_88_RX_REQ_PSN_ERR_FLAG_S 24
|
||||
#define QP_CONTEXT_QPC_BYTES_88_RX_LAST_OPCODE_FLG_S 25
|
||||
|
||||
#define QP_CONTEXT_QPC_BYTES_88_RQ_REQ_LAST_OPERATION_TYPE_S 26
|
||||
#define QP_CONTEXT_QPC_BYTES_88_RQ_REQ_LAST_OPERATION_TYPE_M \
|
||||
(((1UL << 2) - 1) << \
|
||||
QP_CONTEXT_QPC_BYTES_88_RQ_REQ_LAST_OPERATION_TYPE_S)
|
||||
|
||||
#define QP_CONTEXT_QPC_BYTES_88_RQ_REQ_RDMA_WR_FLAG_S 29
|
||||
#define QP_CONTEXT_QPC_BYTES_88_RQ_REQ_RDMA_WR_FLAG_M \
|
||||
(((1UL << 2) - 1) << QP_CONTEXT_QPC_BYTES_88_RQ_REQ_RDMA_WR_FLAG_S)
|
||||
|
||||
#define QP_CONTEXT_QPC_BYTES_108_TRRL_SDB_PSN_S 0
|
||||
#define QP_CONTEXT_QPC_BYTES_108_TRRL_SDB_PSN_M \
|
||||
(((1UL << 24) - 1) << QP_CONTEXT_QPC_BYTES_108_TRRL_SDB_PSN_S)
|
||||
|
||||
#define QP_CONTEXT_QPC_BYTES_108_TRRL_SDB_PSN_FLG_S 24
|
||||
#define QP_CONTEXT_QPC_BYTES_108_TRRL_TDB_PSN_FLG_S 25
|
||||
|
||||
#define QP_CONTEXT_QPC_BYTES_112_TRRL_TDB_PSN_S 0
|
||||
#define QP_CONTEXT_QPC_BYTES_112_TRRL_TDB_PSN_M \
|
||||
(((1UL << 24) - 1) << QP_CONTEXT_QPC_BYTES_112_TRRL_TDB_PSN_S)
|
||||
|
||||
#define QP_CONTEXT_QPC_BYTES_112_TRRL_TAIL_S 24
|
||||
#define QP_CONTEXT_QPC_BYTES_112_TRRL_TAIL_M \
|
||||
(((1UL << 8) - 1) << QP_CONTEXT_QPC_BYTES_112_TRRL_TAIL_S)
|
||||
|
||||
#define QP_CONTEXT_QPC_BYTES_120_RX_CUR_SQ_WQE_BA_H_S 0
|
||||
#define QP_CONTEXT_QPC_BYTES_120_RX_CUR_SQ_WQE_BA_H_M \
|
||||
(((1UL << 5) - 1) << QP_CONTEXT_QPC_BYTES_120_RX_CUR_SQ_WQE_BA_H_S)
|
||||
|
||||
#define QP_CONTEXT_QPC_BYTES_124_RX_ACK_MSN_S 0
|
||||
#define QP_CONTEXT_QPC_BYTES_124_RX_ACK_MSN_M \
|
||||
(((1UL << 15) - 1) << QP_CONTEXT_QPC_BYTES_124_RX_ACK_MSN_S)
|
||||
|
||||
#define QP_CONTEXT_QPC_BYTES_124_IRRL_MSG_IDX_S 16
|
||||
#define QP_CONTEXT_QPC_BYTES_124_IRRL_MSG_IDX_M \
|
||||
(((1UL << 15) - 1) << QP_CONTEXT_QPC_BYTES_124_IRRL_MSG_IDX_S)
|
||||
|
||||
#define QP_CONTEXT_QPC_BYTES_128_RX_ACK_EPSN_S 0
|
||||
#define QP_CONTEXT_QPC_BYTES_128_RX_ACK_EPSN_M \
|
||||
(((1UL << 24) - 1) << QP_CONTEXT_QPC_BYTES_128_RX_ACK_EPSN_S)
|
||||
|
||||
#define QP_CONTEXT_QPC_BYTES_128_RX_ACK_PSN_ERR_FLG_S 24
|
||||
|
||||
#define QP_CONTEXT_QPC_BYTES_128_ACK_LAST_OPERATION_TYPE_S 25
|
||||
#define QP_CONTEXT_QPC_BYTES_128_ACK_LAST_OPERATION_TYPE_M \
|
||||
(((1UL << 2) - 1) << QP_CONTEXT_QPC_BYTES_128_ACK_LAST_OPERATION_TYPE_S)
|
||||
|
||||
#define QP_CONTEXT_QPC_BYTES_128_IRRL_PSN_VLD_FLG_S 27
|
||||
|
||||
#define QP_CONTEXT_QPC_BYTES_132_IRRL_PSN_S 0
|
||||
#define QP_CONTEXT_QPC_BYTES_132_IRRL_PSN_M \
|
||||
(((1UL << 24) - 1) << QP_CONTEXT_QPC_BYTES_132_IRRL_PSN_S)
|
||||
|
||||
#define QP_CONTEXT_QPC_BYTES_132_IRRL_TAIL_S 24
|
||||
#define QP_CONTEXT_QPC_BYTES_132_IRRL_TAIL_M \
|
||||
(((1UL << 8) - 1) << QP_CONTEXT_QPC_BYTES_132_IRRL_TAIL_S)
|
||||
|
||||
#define QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_PSN_S 0
|
||||
#define QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_PSN_M \
|
||||
(((1UL << 24) - 1) << QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_PSN_S)
|
||||
|
||||
#define QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_FPKT_PSN_L_S 24
|
||||
#define QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_FPKT_PSN_L_M \
|
||||
(((1UL << 8) - 1) << QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_FPKT_PSN_L_S)
|
||||
|
||||
#define QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_FPKT_PSN_H_S 0
|
||||
#define QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_FPKT_PSN_H_M \
|
||||
(((1UL << 16) - 1) << QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_FPKT_PSN_H_S)
|
||||
|
||||
#define QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_MSN_S 16
|
||||
#define QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_MSN_M \
|
||||
(((1UL << 15) - 1) << QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_MSN_S)
|
||||
|
||||
#define QP_CONTEXT_QPC_BYTES_140_RNR_RETRY_FLG_S 31
|
||||
|
||||
#define QP_CONTEXT_QPC_BYTES_144_QP_STATE_S 0
|
||||
#define QP_CONTEXT_QPC_BYTES_144_QP_STATE_M \
|
||||
(((1UL << 3) - 1) << QP_CONTEXT_QPC_BYTES_144_QP_STATE_S)
|
||||
|
||||
#define QP_CONTEXT_QPC_BYTES_148_CHECK_FLAG_S 0
|
||||
#define QP_CONTEXT_QPC_BYTES_148_CHECK_FLAG_M \
|
||||
(((1UL << 2) - 1) << QP_CONTEXT_QPC_BYTES_148_CHECK_FLAG_S)
|
||||
|
||||
#define QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_S 2
|
||||
#define QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_M \
|
||||
(((1UL << 3) - 1) << QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_S)
|
||||
|
||||
#define QP_CONTEXT_QPC_BYTES_148_RNR_RETRY_COUNT_S 5
|
||||
#define QP_CONTEXT_QPC_BYTES_148_RNR_RETRY_COUNT_M \
|
||||
(((1UL << 3) - 1) << QP_CONTEXT_QPC_BYTES_148_RNR_RETRY_COUNT_S)
|
||||
|
||||
#define QP_CONTEXT_QPC_BYTES_148_LSN_S 8
|
||||
#define QP_CONTEXT_QPC_BYTES_148_LSN_M \
|
||||
(((1UL << 16) - 1) << QP_CONTEXT_QPC_BYTES_148_LSN_S)
|
||||
|
||||
#define QP_CONTEXT_QPC_BYTES_156_RETRY_COUNT_INIT_S 0
|
||||
#define QP_CONTEXT_QPC_BYTES_156_RETRY_COUNT_INIT_M \
|
||||
(((1UL << 3) - 1) << QP_CONTEXT_QPC_BYTES_156_RETRY_COUNT_INIT_S)
|
||||
|
||||
#define QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_S 3
|
||||
#define QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_M \
|
||||
(((1UL << 5) - 1) << QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_S)
|
||||
|
||||
#define QP_CONTEXT_QPC_BYTES_156_RNR_RETRY_COUNT_INIT_S 8
|
||||
#define QP_CONTEXT_QPC_BYTES_156_RNR_RETRY_COUNT_INIT_M \
|
||||
(((1UL << 3) - 1) << QP_CONTEXT_QPC_BYTES_156_RNR_RETRY_COUNT_INIT_S)
|
||||
|
||||
#define QP_CONTEXT_QPC_BYTES_156_PORT_NUM_S 11
|
||||
#define QP_CONTEXT_QPC_BYTES_156_PORT_NUM_M \
|
||||
(((1UL << 3) - 1) << QP_CONTEXT_QPC_BYTES_156_PORT_NUM_S)
|
||||
|
||||
#define QP_CONTEXT_QPC_BYTES_156_SL_S 14
|
||||
#define QP_CONTEXT_QPC_BYTES_156_SL_M \
|
||||
(((1UL << 2) - 1) << QP_CONTEXT_QPC_BYTES_156_SL_S)
|
||||
|
||||
#define QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_S 16
|
||||
#define QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_M \
|
||||
(((1UL << 8) - 1) << QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_S)
|
||||
|
||||
#define QP_CONTEXT_QPC_BYTES_156_ACK_REQ_IND_S 24
|
||||
#define QP_CONTEXT_QPC_BYTES_156_ACK_REQ_IND_M \
|
||||
(((1UL << 2) - 1) << QP_CONTEXT_QPC_BYTES_156_ACK_REQ_IND_S)
|
||||
|
||||
#define QP_CONTEXT_QPC_BYTES_164_SQ_PSN_S 0
|
||||
#define QP_CONTEXT_QPC_BYTES_164_SQ_PSN_M \
|
||||
(((1UL << 24) - 1) << QP_CONTEXT_QPC_BYTES_164_SQ_PSN_S)
|
||||
|
||||
#define QP_CONTEXT_QPC_BYTES_164_IRRL_HEAD_S 24
|
||||
#define QP_CONTEXT_QPC_BYTES_164_IRRL_HEAD_M \
|
||||
(((1UL << 8) - 1) << QP_CONTEXT_QPC_BYTES_164_IRRL_HEAD_S)
|
||||
|
||||
#define QP_CONTEXT_QPC_BYTES_168_RETRY_SQ_PSN_S 0
|
||||
#define QP_CONTEXT_QPC_BYTES_168_RETRY_SQ_PSN_M \
|
||||
(((1UL << 24) - 1) << QP_CONTEXT_QPC_BYTES_168_RETRY_SQ_PSN_S)
|
||||
|
||||
#define QP_CONTEXT_QPC_BYTES_168_SGE_USE_FLA_S 24
|
||||
#define QP_CONTEXT_QPC_BYTES_168_SGE_USE_FLA_M \
|
||||
(((1UL << 2) - 1) << QP_CONTEXT_QPC_BYTES_168_SGE_USE_FLA_S)
|
||||
|
||||
#define QP_CONTEXT_QPC_BYTES_168_DB_TYPE_S 26
|
||||
#define QP_CONTEXT_QPC_BYTES_168_DB_TYPE_M \
|
||||
(((1UL << 2) - 1) << QP_CONTEXT_QPC_BYTES_168_DB_TYPE_S)
|
||||
|
||||
#define QP_CONTEXT_QPC_BYTES_168_MSG_LP_IND_S 28
|
||||
#define QP_CONTEXT_QPC_BYTES_168_CSDB_LP_IND_S 29
|
||||
#define QP_CONTEXT_QPC_BYTES_168_QP_ERR_FLG_S 30
|
||||
|
||||
#define QP_CONTEXT_QPC_BYTES_176_DB_CUR_INDEX_S 0
|
||||
#define QP_CONTEXT_QPC_BYTES_176_DB_CUR_INDEX_M \
|
||||
(((1UL << 15) - 1) << QP_CONTEXT_QPC_BYTES_176_DB_CUR_INDEX_S)
|
||||
|
||||
#define QP_CONTEXT_QPC_BYTES_176_RETRY_DB_CUR_INDEX_S 16
|
||||
#define QP_CONTEXT_QPC_BYTES_176_RETRY_DB_CUR_INDEX_M \
|
||||
(((1UL << 15) - 1) << QP_CONTEXT_QPC_BYTES_176_RETRY_DB_CUR_INDEX_S)
|
||||
|
||||
#define QP_CONTEXT_QPC_BYTES_180_SQ_HEAD_S 0
|
||||
#define QP_CONTEXT_QPC_BYTES_180_SQ_HEAD_M \
|
||||
(((1UL << 15) - 1) << QP_CONTEXT_QPC_BYTES_180_SQ_HEAD_S)
|
||||
|
||||
#define QP_CONTEXT_QPC_BYTES_180_SQ_CUR_INDEX_S 16
|
||||
#define QP_CONTEXT_QPC_BYTES_180_SQ_CUR_INDEX_M \
|
||||
(((1UL << 15) - 1) << QP_CONTEXT_QPC_BYTES_180_SQ_CUR_INDEX_S)
|
||||
|
||||
#define QP_CONTEXT_QPC_BYTES_188_TX_CUR_SQ_WQE_BA_H_S 0
|
||||
#define QP_CONTEXT_QPC_BYTES_188_TX_CUR_SQ_WQE_BA_H_M \
|
||||
(((1UL << 5) - 1) << QP_CONTEXT_QPC_BYTES_188_TX_CUR_SQ_WQE_BA_H_S)
|
||||
|
||||
#define QP_CONTEXT_QPC_BYTES_188_PKT_RETRY_FLG_S 8
|
||||
|
||||
#define QP_CONTEXT_QPC_BYTES_188_TX_RETRY_CUR_INDEX_S 16
|
||||
#define QP_CONTEXT_QPC_BYTES_188_TX_RETRY_CUR_INDEX_M \
|
||||
(((1UL << 15) - 1) << QP_CONTEXT_QPC_BYTES_188_TX_RETRY_CUR_INDEX_S)
|
||||
|
||||
struct hns_roce_rq_db {
|
||||
u32 u32_4;
|
||||
u32 u32_8;
|
||||
};
|
||||
|
||||
#define RQ_DOORBELL_U32_4_RQ_HEAD_S 0
|
||||
#define RQ_DOORBELL_U32_4_RQ_HEAD_M \
|
||||
(((1UL << 15) - 1) << RQ_DOORBELL_U32_4_RQ_HEAD_S)
|
||||
|
||||
#define RQ_DOORBELL_U32_8_QPN_S 0
|
||||
#define RQ_DOORBELL_U32_8_QPN_M (((1UL << 24) - 1) << RQ_DOORBELL_U32_8_QPN_S)
|
||||
|
||||
#define RQ_DOORBELL_U32_8_CMD_S 28
|
||||
#define RQ_DOORBELL_U32_8_CMD_M (((1UL << 3) - 1) << RQ_DOORBELL_U32_8_CMD_S)
|
||||
|
||||
#define RQ_DOORBELL_U32_8_HW_SYNC_S 31
|
||||
|
||||
struct hns_roce_sq_db {
|
||||
u32 u32_4;
|
||||
u32 u32_8;
|
||||
};
|
||||
|
||||
#define SQ_DOORBELL_U32_4_SQ_HEAD_S 0
|
||||
#define SQ_DOORBELL_U32_4_SQ_HEAD_M \
|
||||
(((1UL << 15) - 1) << SQ_DOORBELL_U32_4_SQ_HEAD_S)
|
||||
|
||||
#define SQ_DOORBELL_U32_4_PORT_S 18
|
||||
#define SQ_DOORBELL_U32_4_PORT_M (((1UL << 3) - 1) << SQ_DOORBELL_U32_4_PORT_S)
|
||||
|
||||
#define SQ_DOORBELL_U32_8_QPN_S 0
|
||||
#define SQ_DOORBELL_U32_8_QPN_M (((1UL << 24) - 1) << SQ_DOORBELL_U32_8_QPN_S)
|
||||
|
||||
#define SQ_DOORBELL_HW_SYNC_S 31
|
||||
|
||||
struct hns_roce_ext_db {
|
||||
int esdb_dep;
|
||||
int eodb_dep;
|
||||
struct hns_roce_buf_list *sdb_buf_list;
|
||||
struct hns_roce_buf_list *odb_buf_list;
|
||||
};
|
||||
|
||||
struct hns_roce_db_table {
|
||||
int sdb_ext_mod;
|
||||
int odb_ext_mod;
|
||||
struct hns_roce_ext_db *ext_db;
|
||||
};
|
||||
|
||||
struct hns_roce_v1_priv {
|
||||
struct hns_roce_db_table db_table;
|
||||
struct hns_roce_raq_table raq_table;
|
||||
};
|
||||
|
||||
int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool dereset);
|
||||
|
||||
#endif
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,614 @@
|
|||
/*
|
||||
* Copyright (c) 2016 Hisilicon Limited.
|
||||
* Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
* General Public License (GPL) Version 2, available from the file
|
||||
* COPYING in the main directory of this source tree, or the
|
||||
* OpenIB.org BSD license below:
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or
|
||||
* without modification, are permitted provided that the following
|
||||
* conditions are met:
|
||||
*
|
||||
* - Redistributions of source code must retain the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer.
|
||||
*
|
||||
* - Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer in the documentation and/or other materials
|
||||
* provided with the distribution.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <linux/platform_device.h>
|
||||
#include <rdma/ib_umem.h>
|
||||
#include "hns_roce_device.h"
|
||||
#include "hns_roce_cmd.h"
|
||||
#include "hns_roce_hem.h"
|
||||
|
||||
static u32 hw_index_to_key(unsigned long ind)
|
||||
{
|
||||
return (u32)(ind >> 24) | (ind << 8);
|
||||
}
|
||||
|
||||
static unsigned long key_to_hw_index(u32 key)
|
||||
{
|
||||
return (key << 24) | (key >> 8);
|
||||
}
|
||||
|
||||
static int hns_roce_sw2hw_mpt(struct hns_roce_dev *hr_dev,
|
||||
struct hns_roce_cmd_mailbox *mailbox,
|
||||
unsigned long mpt_index)
|
||||
{
|
||||
return hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, mpt_index, 0,
|
||||
HNS_ROCE_CMD_SW2HW_MPT,
|
||||
HNS_ROCE_CMD_TIME_CLASS_B);
|
||||
}
|
||||
|
||||
static int hns_roce_hw2sw_mpt(struct hns_roce_dev *hr_dev,
|
||||
struct hns_roce_cmd_mailbox *mailbox,
|
||||
unsigned long mpt_index)
|
||||
{
|
||||
return hns_roce_cmd_mbox(hr_dev, 0, mailbox ? mailbox->dma : 0,
|
||||
mpt_index, !mailbox, HNS_ROCE_CMD_HW2SW_MPT,
|
||||
HNS_ROCE_CMD_TIME_CLASS_B);
|
||||
}
|
||||
|
||||
static int hns_roce_buddy_alloc(struct hns_roce_buddy *buddy, int order,
|
||||
unsigned long *seg)
|
||||
{
|
||||
int o;
|
||||
u32 m;
|
||||
|
||||
spin_lock(&buddy->lock);
|
||||
|
||||
for (o = order; o <= buddy->max_order; ++o) {
|
||||
if (buddy->num_free[o]) {
|
||||
m = 1 << (buddy->max_order - o);
|
||||
*seg = find_first_bit(buddy->bits[o], m);
|
||||
if (*seg < m)
|
||||
goto found;
|
||||
}
|
||||
}
|
||||
spin_unlock(&buddy->lock);
|
||||
return -1;
|
||||
|
||||
found:
|
||||
clear_bit(*seg, buddy->bits[o]);
|
||||
--buddy->num_free[o];
|
||||
|
||||
while (o > order) {
|
||||
--o;
|
||||
*seg <<= 1;
|
||||
set_bit(*seg ^ 1, buddy->bits[o]);
|
||||
++buddy->num_free[o];
|
||||
}
|
||||
|
||||
spin_unlock(&buddy->lock);
|
||||
|
||||
*seg <<= order;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void hns_roce_buddy_free(struct hns_roce_buddy *buddy, unsigned long seg,
|
||||
int order)
|
||||
{
|
||||
seg >>= order;
|
||||
|
||||
spin_lock(&buddy->lock);
|
||||
|
||||
while (test_bit(seg ^ 1, buddy->bits[order])) {
|
||||
clear_bit(seg ^ 1, buddy->bits[order]);
|
||||
--buddy->num_free[order];
|
||||
seg >>= 1;
|
||||
++order;
|
||||
}
|
||||
|
||||
set_bit(seg, buddy->bits[order]);
|
||||
++buddy->num_free[order];
|
||||
|
||||
spin_unlock(&buddy->lock);
|
||||
}
|
||||
|
||||
static int hns_roce_buddy_init(struct hns_roce_buddy *buddy, int max_order)
|
||||
{
|
||||
int i, s;
|
||||
|
||||
buddy->max_order = max_order;
|
||||
spin_lock_init(&buddy->lock);
|
||||
|
||||
buddy->bits = kzalloc((buddy->max_order + 1) * sizeof(long *),
|
||||
GFP_KERNEL);
|
||||
buddy->num_free = kzalloc((buddy->max_order + 1) * sizeof(int *),
|
||||
GFP_KERNEL);
|
||||
if (!buddy->bits || !buddy->num_free)
|
||||
goto err_out;
|
||||
|
||||
for (i = 0; i <= buddy->max_order; ++i) {
|
||||
s = BITS_TO_LONGS(1 << (buddy->max_order - i));
|
||||
buddy->bits[i] = kmalloc_array(s, sizeof(long), GFP_KERNEL);
|
||||
if (!buddy->bits[i])
|
||||
goto err_out_free;
|
||||
|
||||
bitmap_zero(buddy->bits[i], 1 << (buddy->max_order - i));
|
||||
}
|
||||
|
||||
set_bit(0, buddy->bits[buddy->max_order]);
|
||||
buddy->num_free[buddy->max_order] = 1;
|
||||
|
||||
return 0;
|
||||
|
||||
err_out_free:
|
||||
for (i = 0; i <= buddy->max_order; ++i)
|
||||
kfree(buddy->bits[i]);
|
||||
|
||||
err_out:
|
||||
kfree(buddy->bits);
|
||||
kfree(buddy->num_free);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static void hns_roce_buddy_cleanup(struct hns_roce_buddy *buddy)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i <= buddy->max_order; ++i)
|
||||
kfree(buddy->bits[i]);
|
||||
|
||||
kfree(buddy->bits);
|
||||
kfree(buddy->num_free);
|
||||
}
|
||||
|
||||
static int hns_roce_alloc_mtt_range(struct hns_roce_dev *hr_dev, int order,
|
||||
unsigned long *seg)
|
||||
{
|
||||
struct hns_roce_mr_table *mr_table = &hr_dev->mr_table;
|
||||
int ret = 0;
|
||||
|
||||
ret = hns_roce_buddy_alloc(&mr_table->mtt_buddy, order, seg);
|
||||
if (ret == -1)
|
||||
return -1;
|
||||
|
||||
if (hns_roce_table_get_range(hr_dev, &mr_table->mtt_table, *seg,
|
||||
*seg + (1 << order) - 1)) {
|
||||
hns_roce_buddy_free(&mr_table->mtt_buddy, *seg, order);
|
||||
return -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int hns_roce_mtt_init(struct hns_roce_dev *hr_dev, int npages, int page_shift,
|
||||
struct hns_roce_mtt *mtt)
|
||||
{
|
||||
int ret = 0;
|
||||
int i;
|
||||
|
||||
/* Page num is zero, correspond to DMA memory register */
|
||||
if (!npages) {
|
||||
mtt->order = -1;
|
||||
mtt->page_shift = HNS_ROCE_HEM_PAGE_SHIFT;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Note: if page_shift is zero, FAST memory regsiter */
|
||||
mtt->page_shift = page_shift;
|
||||
|
||||
/* Compute MTT entry necessary */
|
||||
for (mtt->order = 0, i = HNS_ROCE_MTT_ENTRY_PER_SEG; i < npages;
|
||||
i <<= 1)
|
||||
++mtt->order;
|
||||
|
||||
/* Allocate MTT entry */
|
||||
ret = hns_roce_alloc_mtt_range(hr_dev, mtt->order, &mtt->first_seg);
|
||||
if (ret == -1)
|
||||
return -ENOMEM;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void hns_roce_mtt_cleanup(struct hns_roce_dev *hr_dev, struct hns_roce_mtt *mtt)
|
||||
{
|
||||
struct hns_roce_mr_table *mr_table = &hr_dev->mr_table;
|
||||
|
||||
if (mtt->order < 0)
|
||||
return;
|
||||
|
||||
hns_roce_buddy_free(&mr_table->mtt_buddy, mtt->first_seg, mtt->order);
|
||||
hns_roce_table_put_range(hr_dev, &mr_table->mtt_table, mtt->first_seg,
|
||||
mtt->first_seg + (1 << mtt->order) - 1);
|
||||
}
|
||||
|
||||
static int hns_roce_mr_alloc(struct hns_roce_dev *hr_dev, u32 pd, u64 iova,
|
||||
u64 size, u32 access, int npages,
|
||||
struct hns_roce_mr *mr)
|
||||
{
|
||||
unsigned long index = 0;
|
||||
int ret = 0;
|
||||
struct device *dev = &hr_dev->pdev->dev;
|
||||
|
||||
/* Allocate a key for mr from mr_table */
|
||||
ret = hns_roce_bitmap_alloc(&hr_dev->mr_table.mtpt_bitmap, &index);
|
||||
if (ret == -1)
|
||||
return -ENOMEM;
|
||||
|
||||
mr->iova = iova; /* MR va starting addr */
|
||||
mr->size = size; /* MR addr range */
|
||||
mr->pd = pd; /* MR num */
|
||||
mr->access = access; /* MR access permit */
|
||||
mr->enabled = 0; /* MR active status */
|
||||
mr->key = hw_index_to_key(index); /* MR key */
|
||||
|
||||
if (size == ~0ull) {
|
||||
mr->type = MR_TYPE_DMA;
|
||||
mr->pbl_buf = NULL;
|
||||
mr->pbl_dma_addr = 0;
|
||||
} else {
|
||||
mr->type = MR_TYPE_MR;
|
||||
mr->pbl_buf = dma_alloc_coherent(dev, npages * 8,
|
||||
&(mr->pbl_dma_addr),
|
||||
GFP_KERNEL);
|
||||
if (!mr->pbl_buf)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void hns_roce_mr_free(struct hns_roce_dev *hr_dev,
|
||||
struct hns_roce_mr *mr)
|
||||
{
|
||||
struct device *dev = &hr_dev->pdev->dev;
|
||||
int npages = 0;
|
||||
int ret;
|
||||
|
||||
if (mr->enabled) {
|
||||
ret = hns_roce_hw2sw_mpt(hr_dev, NULL, key_to_hw_index(mr->key)
|
||||
& (hr_dev->caps.num_mtpts - 1));
|
||||
if (ret)
|
||||
dev_warn(dev, "HW2SW_MPT failed (%d)\n", ret);
|
||||
}
|
||||
|
||||
if (mr->size != ~0ULL) {
|
||||
npages = ib_umem_page_count(mr->umem);
|
||||
dma_free_coherent(dev, (unsigned int)(npages * 8), mr->pbl_buf,
|
||||
mr->pbl_dma_addr);
|
||||
}
|
||||
|
||||
hns_roce_bitmap_free(&hr_dev->mr_table.mtpt_bitmap,
|
||||
key_to_hw_index(mr->key));
|
||||
}
|
||||
|
||||
static int hns_roce_mr_enable(struct hns_roce_dev *hr_dev,
|
||||
struct hns_roce_mr *mr)
|
||||
{
|
||||
int ret;
|
||||
unsigned long mtpt_idx = key_to_hw_index(mr->key);
|
||||
struct device *dev = &hr_dev->pdev->dev;
|
||||
struct hns_roce_cmd_mailbox *mailbox;
|
||||
struct hns_roce_mr_table *mr_table = &hr_dev->mr_table;
|
||||
|
||||
/* Prepare HEM entry memory */
|
||||
ret = hns_roce_table_get(hr_dev, &mr_table->mtpt_table, mtpt_idx);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Allocate mailbox memory */
|
||||
mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
|
||||
if (IS_ERR(mailbox)) {
|
||||
ret = PTR_ERR(mailbox);
|
||||
goto err_table;
|
||||
}
|
||||
|
||||
ret = hr_dev->hw->write_mtpt(mailbox->buf, mr, mtpt_idx);
|
||||
if (ret) {
|
||||
dev_err(dev, "Write mtpt fail!\n");
|
||||
goto err_page;
|
||||
}
|
||||
|
||||
ret = hns_roce_sw2hw_mpt(hr_dev, mailbox,
|
||||
mtpt_idx & (hr_dev->caps.num_mtpts - 1));
|
||||
if (ret) {
|
||||
dev_err(dev, "SW2HW_MPT failed (%d)\n", ret);
|
||||
goto err_page;
|
||||
}
|
||||
|
||||
mr->enabled = 1;
|
||||
hns_roce_free_cmd_mailbox(hr_dev, mailbox);
|
||||
|
||||
return 0;
|
||||
|
||||
err_page:
|
||||
hns_roce_free_cmd_mailbox(hr_dev, mailbox);
|
||||
|
||||
err_table:
|
||||
hns_roce_table_put(hr_dev, &mr_table->mtpt_table, mtpt_idx);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int hns_roce_write_mtt_chunk(struct hns_roce_dev *hr_dev,
|
||||
struct hns_roce_mtt *mtt, u32 start_index,
|
||||
u32 npages, u64 *page_list)
|
||||
{
|
||||
u32 i = 0;
|
||||
__le64 *mtts = NULL;
|
||||
dma_addr_t dma_handle;
|
||||
u32 s = start_index * sizeof(u64);
|
||||
|
||||
/* All MTTs must fit in the same page */
|
||||
if (start_index / (PAGE_SIZE / sizeof(u64)) !=
|
||||
(start_index + npages - 1) / (PAGE_SIZE / sizeof(u64)))
|
||||
return -EINVAL;
|
||||
|
||||
if (start_index & (HNS_ROCE_MTT_ENTRY_PER_SEG - 1))
|
||||
return -EINVAL;
|
||||
|
||||
mtts = hns_roce_table_find(&hr_dev->mr_table.mtt_table,
|
||||
mtt->first_seg + s / hr_dev->caps.mtt_entry_sz,
|
||||
&dma_handle);
|
||||
if (!mtts)
|
||||
return -ENOMEM;
|
||||
|
||||
/* Save page addr, low 12 bits : 0 */
|
||||
for (i = 0; i < npages; ++i)
|
||||
mtts[i] = (cpu_to_le64(page_list[i])) >> PAGE_ADDR_SHIFT;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int hns_roce_write_mtt(struct hns_roce_dev *hr_dev,
|
||||
struct hns_roce_mtt *mtt, u32 start_index,
|
||||
u32 npages, u64 *page_list)
|
||||
{
|
||||
int chunk;
|
||||
int ret;
|
||||
|
||||
if (mtt->order < 0)
|
||||
return -EINVAL;
|
||||
|
||||
while (npages > 0) {
|
||||
chunk = min_t(int, PAGE_SIZE / sizeof(u64), npages);
|
||||
|
||||
ret = hns_roce_write_mtt_chunk(hr_dev, mtt, start_index, chunk,
|
||||
page_list);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
npages -= chunk;
|
||||
start_index += chunk;
|
||||
page_list += chunk;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int hns_roce_buf_write_mtt(struct hns_roce_dev *hr_dev,
|
||||
struct hns_roce_mtt *mtt, struct hns_roce_buf *buf)
|
||||
{
|
||||
u32 i = 0;
|
||||
int ret = 0;
|
||||
u64 *page_list = NULL;
|
||||
|
||||
page_list = kmalloc_array(buf->npages, sizeof(*page_list), GFP_KERNEL);
|
||||
if (!page_list)
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < buf->npages; ++i) {
|
||||
if (buf->nbufs == 1)
|
||||
page_list[i] = buf->direct.map + (i << buf->page_shift);
|
||||
else
|
||||
page_list[i] = buf->page_list[i].map;
|
||||
|
||||
}
|
||||
ret = hns_roce_write_mtt(hr_dev, mtt, 0, buf->npages, page_list);
|
||||
|
||||
kfree(page_list);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int hns_roce_init_mr_table(struct hns_roce_dev *hr_dev)
|
||||
{
|
||||
struct hns_roce_mr_table *mr_table = &hr_dev->mr_table;
|
||||
int ret = 0;
|
||||
|
||||
ret = hns_roce_bitmap_init(&mr_table->mtpt_bitmap,
|
||||
hr_dev->caps.num_mtpts,
|
||||
hr_dev->caps.num_mtpts - 1,
|
||||
hr_dev->caps.reserved_mrws, 0);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = hns_roce_buddy_init(&mr_table->mtt_buddy,
|
||||
ilog2(hr_dev->caps.num_mtt_segs));
|
||||
if (ret)
|
||||
goto err_buddy;
|
||||
|
||||
return 0;
|
||||
|
||||
err_buddy:
|
||||
hns_roce_bitmap_cleanup(&mr_table->mtpt_bitmap);
|
||||
return ret;
|
||||
}
|
||||
|
||||
void hns_roce_cleanup_mr_table(struct hns_roce_dev *hr_dev)
|
||||
{
|
||||
struct hns_roce_mr_table *mr_table = &hr_dev->mr_table;
|
||||
|
||||
hns_roce_buddy_cleanup(&mr_table->mtt_buddy);
|
||||
hns_roce_bitmap_cleanup(&mr_table->mtpt_bitmap);
|
||||
}
|
||||
|
||||
struct ib_mr *hns_roce_get_dma_mr(struct ib_pd *pd, int acc)
|
||||
{
|
||||
int ret = 0;
|
||||
struct hns_roce_mr *mr = NULL;
|
||||
|
||||
mr = kmalloc(sizeof(*mr), GFP_KERNEL);
|
||||
if (mr == NULL)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
/* Allocate memory region key */
|
||||
ret = hns_roce_mr_alloc(to_hr_dev(pd->device), to_hr_pd(pd)->pdn, 0,
|
||||
~0ULL, acc, 0, mr);
|
||||
if (ret)
|
||||
goto err_free;
|
||||
|
||||
ret = hns_roce_mr_enable(to_hr_dev(pd->device), mr);
|
||||
if (ret)
|
||||
goto err_mr;
|
||||
|
||||
mr->ibmr.rkey = mr->ibmr.lkey = mr->key;
|
||||
mr->umem = NULL;
|
||||
|
||||
return &mr->ibmr;
|
||||
|
||||
err_mr:
|
||||
hns_roce_mr_free(to_hr_dev(pd->device), mr);
|
||||
|
||||
err_free:
|
||||
kfree(mr);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
int hns_roce_ib_umem_write_mtt(struct hns_roce_dev *hr_dev,
|
||||
struct hns_roce_mtt *mtt, struct ib_umem *umem)
|
||||
{
|
||||
struct scatterlist *sg;
|
||||
int i, k, entry;
|
||||
int ret = 0;
|
||||
u64 *pages;
|
||||
u32 n;
|
||||
int len;
|
||||
|
||||
pages = (u64 *) __get_free_page(GFP_KERNEL);
|
||||
if (!pages)
|
||||
return -ENOMEM;
|
||||
|
||||
i = n = 0;
|
||||
|
||||
for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
|
||||
len = sg_dma_len(sg) >> mtt->page_shift;
|
||||
for (k = 0; k < len; ++k) {
|
||||
pages[i++] = sg_dma_address(sg) + umem->page_size * k;
|
||||
if (i == PAGE_SIZE / sizeof(u64)) {
|
||||
ret = hns_roce_write_mtt(hr_dev, mtt, n, i,
|
||||
pages);
|
||||
if (ret)
|
||||
goto out;
|
||||
n += i;
|
||||
i = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (i)
|
||||
ret = hns_roce_write_mtt(hr_dev, mtt, n, i, pages);
|
||||
|
||||
out:
|
||||
free_page((unsigned long) pages);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int hns_roce_ib_umem_write_mr(struct hns_roce_mr *mr,
|
||||
struct ib_umem *umem)
|
||||
{
|
||||
int i = 0;
|
||||
int entry;
|
||||
struct scatterlist *sg;
|
||||
|
||||
for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
|
||||
mr->pbl_buf[i] = ((u64)sg_dma_address(sg)) >> 12;
|
||||
i++;
|
||||
}
|
||||
|
||||
/* Memory barrier */
|
||||
mb();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
|
||||
u64 virt_addr, int access_flags,
|
||||
struct ib_udata *udata)
|
||||
{
|
||||
struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
|
||||
struct device *dev = &hr_dev->pdev->dev;
|
||||
struct hns_roce_mr *mr = NULL;
|
||||
int ret = 0;
|
||||
int n = 0;
|
||||
|
||||
mr = kmalloc(sizeof(*mr), GFP_KERNEL);
|
||||
if (!mr)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
mr->umem = ib_umem_get(pd->uobject->context, start, length,
|
||||
access_flags, 0);
|
||||
if (IS_ERR(mr->umem)) {
|
||||
ret = PTR_ERR(mr->umem);
|
||||
goto err_free;
|
||||
}
|
||||
|
||||
n = ib_umem_page_count(mr->umem);
|
||||
if (mr->umem->page_size != HNS_ROCE_HEM_PAGE_SIZE) {
|
||||
dev_err(dev, "Just support 4K page size but is 0x%x now!\n",
|
||||
mr->umem->page_size);
|
||||
}
|
||||
|
||||
if (n > HNS_ROCE_MAX_MTPT_PBL_NUM) {
|
||||
dev_err(dev, " MR len %lld err. MR is limited to 4G at most!\n",
|
||||
length);
|
||||
goto err_umem;
|
||||
}
|
||||
|
||||
ret = hns_roce_mr_alloc(hr_dev, to_hr_pd(pd)->pdn, virt_addr, length,
|
||||
access_flags, n, mr);
|
||||
if (ret)
|
||||
goto err_umem;
|
||||
|
||||
ret = hns_roce_ib_umem_write_mr(mr, mr->umem);
|
||||
if (ret)
|
||||
goto err_mr;
|
||||
|
||||
ret = hns_roce_mr_enable(hr_dev, mr);
|
||||
if (ret)
|
||||
goto err_mr;
|
||||
|
||||
mr->ibmr.rkey = mr->ibmr.lkey = mr->key;
|
||||
|
||||
return &mr->ibmr;
|
||||
|
||||
err_mr:
|
||||
hns_roce_mr_free(hr_dev, mr);
|
||||
|
||||
err_umem:
|
||||
ib_umem_release(mr->umem);
|
||||
|
||||
err_free:
|
||||
kfree(mr);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
int hns_roce_dereg_mr(struct ib_mr *ibmr)
|
||||
{
|
||||
struct hns_roce_mr *mr = to_hr_mr(ibmr);
|
||||
|
||||
hns_roce_mr_free(to_hr_dev(ibmr->device), mr);
|
||||
if (mr->umem)
|
||||
ib_umem_release(mr->umem);
|
||||
|
||||
kfree(mr);
|
||||
|
||||
return 0;
|
||||
}
|
|
@ -0,0 +1,144 @@
|
|||
/*
|
||||
* Copyright (c) 2016 Hisilicon Limited.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
* General Public License (GPL) Version 2, available from the file
|
||||
* COPYING in the main directory of this source tree, or the
|
||||
* OpenIB.org BSD license below:
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or
|
||||
* without modification, are permitted provided that the following
|
||||
* conditions are met:
|
||||
*
|
||||
* - Redistributions of source code must retain the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer.
|
||||
*
|
||||
* - Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer in the documentation and/or other materials
|
||||
* provided with the distribution.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <linux/platform_device.h>
|
||||
#include "hns_roce_device.h"
|
||||
|
||||
static int hns_roce_pd_alloc(struct hns_roce_dev *hr_dev, unsigned long *pdn)
|
||||
{
|
||||
struct device *dev = &hr_dev->pdev->dev;
|
||||
unsigned long pd_number;
|
||||
int ret = 0;
|
||||
|
||||
ret = hns_roce_bitmap_alloc(&hr_dev->pd_bitmap, &pd_number);
|
||||
if (ret == -1) {
|
||||
dev_err(dev, "alloc pdn from pdbitmap failed\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
*pdn = pd_number;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void hns_roce_pd_free(struct hns_roce_dev *hr_dev, unsigned long pdn)
|
||||
{
|
||||
hns_roce_bitmap_free(&hr_dev->pd_bitmap, pdn);
|
||||
}
|
||||
|
||||
int hns_roce_init_pd_table(struct hns_roce_dev *hr_dev)
|
||||
{
|
||||
return hns_roce_bitmap_init(&hr_dev->pd_bitmap, hr_dev->caps.num_pds,
|
||||
hr_dev->caps.num_pds - 1,
|
||||
hr_dev->caps.reserved_pds, 0);
|
||||
}
|
||||
|
||||
void hns_roce_cleanup_pd_table(struct hns_roce_dev *hr_dev)
|
||||
{
|
||||
hns_roce_bitmap_cleanup(&hr_dev->pd_bitmap);
|
||||
}
|
||||
|
||||
struct ib_pd *hns_roce_alloc_pd(struct ib_device *ib_dev,
|
||||
struct ib_ucontext *context,
|
||||
struct ib_udata *udata)
|
||||
{
|
||||
struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);
|
||||
struct device *dev = &hr_dev->pdev->dev;
|
||||
struct hns_roce_pd *pd;
|
||||
int ret;
|
||||
|
||||
pd = kmalloc(sizeof(*pd), GFP_KERNEL);
|
||||
if (!pd)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
ret = hns_roce_pd_alloc(to_hr_dev(ib_dev), &pd->pdn);
|
||||
if (ret) {
|
||||
kfree(pd);
|
||||
dev_err(dev, "[alloc_pd]hns_roce_pd_alloc failed!\n");
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
if (context) {
|
||||
if (ib_copy_to_udata(udata, &pd->pdn, sizeof(u64))) {
|
||||
hns_roce_pd_free(to_hr_dev(ib_dev), pd->pdn);
|
||||
dev_err(dev, "[alloc_pd]ib_copy_to_udata failed!\n");
|
||||
kfree(pd);
|
||||
return ERR_PTR(-EFAULT);
|
||||
}
|
||||
}
|
||||
|
||||
return &pd->ibpd;
|
||||
}
|
||||
|
||||
int hns_roce_dealloc_pd(struct ib_pd *pd)
|
||||
{
|
||||
hns_roce_pd_free(to_hr_dev(pd->device), to_hr_pd(pd)->pdn);
|
||||
kfree(to_hr_pd(pd));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int hns_roce_uar_alloc(struct hns_roce_dev *hr_dev, struct hns_roce_uar *uar)
|
||||
{
|
||||
struct resource *res;
|
||||
int ret = 0;
|
||||
|
||||
/* Using bitmap to manager UAR index */
|
||||
ret = hns_roce_bitmap_alloc(&hr_dev->uar_table.bitmap, &uar->index);
|
||||
if (ret == -1)
|
||||
return -ENOMEM;
|
||||
|
||||
uar->index = (uar->index - 1) % hr_dev->caps.phy_num_uars + 1;
|
||||
|
||||
res = platform_get_resource(hr_dev->pdev, IORESOURCE_MEM, 0);
|
||||
uar->pfn = ((res->start) >> PAGE_SHIFT) + uar->index;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void hns_roce_uar_free(struct hns_roce_dev *hr_dev, struct hns_roce_uar *uar)
|
||||
{
|
||||
hns_roce_bitmap_free(&hr_dev->uar_table.bitmap, uar->index);
|
||||
}
|
||||
|
||||
int hns_roce_init_uar_table(struct hns_roce_dev *hr_dev)
|
||||
{
|
||||
return hns_roce_bitmap_init(&hr_dev->uar_table.bitmap,
|
||||
hr_dev->caps.num_uars,
|
||||
hr_dev->caps.num_uars - 1,
|
||||
hr_dev->caps.reserved_uars, 0);
|
||||
}
|
||||
|
||||
void hns_roce_cleanup_uar_table(struct hns_roce_dev *hr_dev)
|
||||
{
|
||||
hns_roce_bitmap_cleanup(&hr_dev->uar_table.bitmap);
|
||||
}
|
|
@ -0,0 +1,855 @@
|
|||
/*
|
||||
* Copyright (c) 2016 Hisilicon Limited.
|
||||
* Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
* General Public License (GPL) Version 2, available from the file
|
||||
* COPYING in the main directory of this source tree, or the
|
||||
* OpenIB.org BSD license below:
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or
|
||||
* without modification, are permitted provided that the following
|
||||
* conditions are met:
|
||||
*
|
||||
* - Redistributions of source code must retain the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer.
|
||||
*
|
||||
* - Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer in the documentation and/or other materials
|
||||
* provided with the distribution.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <linux/platform_device.h>
|
||||
#include <rdma/ib_umem.h>
|
||||
#include "hns_roce_common.h"
|
||||
#include "hns_roce_device.h"
|
||||
#include "hns_roce_hem.h"
|
||||
#include "hns_roce_user.h"
|
||||
|
||||
#define DB_REG_OFFSET 0x1000
|
||||
#define SQP_NUM 12
|
||||
|
||||
void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type)
|
||||
{
|
||||
struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
|
||||
struct device *dev = &hr_dev->pdev->dev;
|
||||
struct hns_roce_qp *qp;
|
||||
|
||||
spin_lock(&qp_table->lock);
|
||||
|
||||
qp = __hns_roce_qp_lookup(hr_dev, qpn);
|
||||
if (qp)
|
||||
atomic_inc(&qp->refcount);
|
||||
|
||||
spin_unlock(&qp_table->lock);
|
||||
|
||||
if (!qp) {
|
||||
dev_warn(dev, "Async event for bogus QP %08x\n", qpn);
|
||||
return;
|
||||
}
|
||||
|
||||
qp->event(qp, (enum hns_roce_event)event_type);
|
||||
|
||||
if (atomic_dec_and_test(&qp->refcount))
|
||||
complete(&qp->free);
|
||||
}
|
||||
|
||||
static void hns_roce_ib_qp_event(struct hns_roce_qp *hr_qp,
|
||||
enum hns_roce_event type)
|
||||
{
|
||||
struct ib_event event;
|
||||
struct ib_qp *ibqp = &hr_qp->ibqp;
|
||||
|
||||
if (ibqp->event_handler) {
|
||||
event.device = ibqp->device;
|
||||
event.element.qp = ibqp;
|
||||
switch (type) {
|
||||
case HNS_ROCE_EVENT_TYPE_PATH_MIG:
|
||||
event.event = IB_EVENT_PATH_MIG;
|
||||
break;
|
||||
case HNS_ROCE_EVENT_TYPE_COMM_EST:
|
||||
event.event = IB_EVENT_COMM_EST;
|
||||
break;
|
||||
case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
|
||||
event.event = IB_EVENT_SQ_DRAINED;
|
||||
break;
|
||||
case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
|
||||
event.event = IB_EVENT_QP_LAST_WQE_REACHED;
|
||||
break;
|
||||
case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
|
||||
event.event = IB_EVENT_QP_FATAL;
|
||||
break;
|
||||
case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED:
|
||||
event.event = IB_EVENT_PATH_MIG_ERR;
|
||||
break;
|
||||
case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
|
||||
event.event = IB_EVENT_QP_REQ_ERR;
|
||||
break;
|
||||
case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
|
||||
event.event = IB_EVENT_QP_ACCESS_ERR;
|
||||
break;
|
||||
default:
|
||||
dev_dbg(ibqp->device->dma_device, "roce_ib: Unexpected event type %d on QP %06lx\n",
|
||||
type, hr_qp->qpn);
|
||||
return;
|
||||
}
|
||||
ibqp->event_handler(&event, ibqp->qp_context);
|
||||
}
|
||||
}
|
||||
|
||||
static int hns_roce_reserve_range_qp(struct hns_roce_dev *hr_dev, int cnt,
|
||||
int align, unsigned long *base)
|
||||
{
|
||||
struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
|
||||
int ret = 0;
|
||||
unsigned long qpn;
|
||||
|
||||
ret = hns_roce_bitmap_alloc_range(&qp_table->bitmap, cnt, align, &qpn);
|
||||
if (ret == -1)
|
||||
return -ENOMEM;
|
||||
|
||||
*base = qpn;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
enum hns_roce_qp_state to_hns_roce_state(enum ib_qp_state state)
|
||||
{
|
||||
switch (state) {
|
||||
case IB_QPS_RESET:
|
||||
return HNS_ROCE_QP_STATE_RST;
|
||||
case IB_QPS_INIT:
|
||||
return HNS_ROCE_QP_STATE_INIT;
|
||||
case IB_QPS_RTR:
|
||||
return HNS_ROCE_QP_STATE_RTR;
|
||||
case IB_QPS_RTS:
|
||||
return HNS_ROCE_QP_STATE_RTS;
|
||||
case IB_QPS_SQD:
|
||||
return HNS_ROCE_QP_STATE_SQD;
|
||||
case IB_QPS_ERR:
|
||||
return HNS_ROCE_QP_STATE_ERR;
|
||||
default:
|
||||
return HNS_ROCE_QP_NUM_STATE;
|
||||
}
|
||||
}
|
||||
|
||||
static int hns_roce_gsi_qp_alloc(struct hns_roce_dev *hr_dev, unsigned long qpn,
|
||||
struct hns_roce_qp *hr_qp)
|
||||
{
|
||||
struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
|
||||
int ret;
|
||||
|
||||
if (!qpn)
|
||||
return -EINVAL;
|
||||
|
||||
hr_qp->qpn = qpn;
|
||||
|
||||
spin_lock_irq(&qp_table->lock);
|
||||
ret = radix_tree_insert(&hr_dev->qp_table_tree,
|
||||
hr_qp->qpn & (hr_dev->caps.num_qps - 1), hr_qp);
|
||||
spin_unlock_irq(&qp_table->lock);
|
||||
if (ret) {
|
||||
dev_err(&hr_dev->pdev->dev, "QPC radix_tree_insert failed\n");
|
||||
goto err_put_irrl;
|
||||
}
|
||||
|
||||
atomic_set(&hr_qp->refcount, 1);
|
||||
init_completion(&hr_qp->free);
|
||||
|
||||
return 0;
|
||||
|
||||
err_put_irrl:
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int hns_roce_qp_alloc(struct hns_roce_dev *hr_dev, unsigned long qpn,
|
||||
struct hns_roce_qp *hr_qp)
|
||||
{
|
||||
struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
|
||||
struct device *dev = &hr_dev->pdev->dev;
|
||||
int ret;
|
||||
|
||||
if (!qpn)
|
||||
return -EINVAL;
|
||||
|
||||
hr_qp->qpn = qpn;
|
||||
|
||||
/* Alloc memory for QPC */
|
||||
ret = hns_roce_table_get(hr_dev, &qp_table->qp_table, hr_qp->qpn);
|
||||
if (ret) {
|
||||
dev_err(dev, "QPC table get failed\n");
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
/* Alloc memory for IRRL */
|
||||
ret = hns_roce_table_get(hr_dev, &qp_table->irrl_table, hr_qp->qpn);
|
||||
if (ret) {
|
||||
dev_err(dev, "IRRL table get failed\n");
|
||||
goto err_put_qp;
|
||||
}
|
||||
|
||||
spin_lock_irq(&qp_table->lock);
|
||||
ret = radix_tree_insert(&hr_dev->qp_table_tree,
|
||||
hr_qp->qpn & (hr_dev->caps.num_qps - 1), hr_qp);
|
||||
spin_unlock_irq(&qp_table->lock);
|
||||
if (ret) {
|
||||
dev_err(dev, "QPC radix_tree_insert failed\n");
|
||||
goto err_put_irrl;
|
||||
}
|
||||
|
||||
atomic_set(&hr_qp->refcount, 1);
|
||||
init_completion(&hr_qp->free);
|
||||
|
||||
return 0;
|
||||
|
||||
err_put_irrl:
|
||||
hns_roce_table_put(hr_dev, &qp_table->irrl_table, hr_qp->qpn);
|
||||
|
||||
err_put_qp:
|
||||
hns_roce_table_put(hr_dev, &qp_table->qp_table, hr_qp->qpn);
|
||||
|
||||
err_out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
void hns_roce_qp_remove(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
|
||||
{
|
||||
struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&qp_table->lock, flags);
|
||||
radix_tree_delete(&hr_dev->qp_table_tree,
|
||||
hr_qp->qpn & (hr_dev->caps.num_qps - 1));
|
||||
spin_unlock_irqrestore(&qp_table->lock, flags);
|
||||
}
|
||||
|
||||
void hns_roce_qp_free(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
|
||||
{
|
||||
struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
|
||||
|
||||
if (atomic_dec_and_test(&hr_qp->refcount))
|
||||
complete(&hr_qp->free);
|
||||
wait_for_completion(&hr_qp->free);
|
||||
|
||||
if ((hr_qp->ibqp.qp_type) != IB_QPT_GSI) {
|
||||
hns_roce_table_put(hr_dev, &qp_table->irrl_table, hr_qp->qpn);
|
||||
hns_roce_table_put(hr_dev, &qp_table->qp_table, hr_qp->qpn);
|
||||
}
|
||||
}
|
||||
|
||||
void hns_roce_release_range_qp(struct hns_roce_dev *hr_dev, int base_qpn,
|
||||
int cnt)
|
||||
{
|
||||
struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
|
||||
|
||||
if (base_qpn < (hr_dev->caps.sqp_start + 2 * hr_dev->caps.num_ports))
|
||||
return;
|
||||
|
||||
hns_roce_bitmap_free_range(&qp_table->bitmap, base_qpn, cnt);
|
||||
}
|
||||
|
||||
static int hns_roce_set_rq_size(struct hns_roce_dev *hr_dev,
|
||||
struct ib_qp_cap *cap, int is_user, int has_srq,
|
||||
struct hns_roce_qp *hr_qp)
|
||||
{
|
||||
u32 max_cnt;
|
||||
struct device *dev = &hr_dev->pdev->dev;
|
||||
|
||||
/* Check the validity of QP support capacity */
|
||||
if (cap->max_recv_wr > hr_dev->caps.max_wqes ||
|
||||
cap->max_recv_sge > hr_dev->caps.max_rq_sg) {
|
||||
dev_err(dev, "RQ WR or sge error!max_recv_wr=%d max_recv_sge=%d\n",
|
||||
cap->max_recv_wr, cap->max_recv_sge);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* If srq exit, set zero for relative number of rq */
|
||||
if (has_srq) {
|
||||
if (cap->max_recv_wr) {
|
||||
dev_dbg(dev, "srq no need config max_recv_wr\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
hr_qp->rq.wqe_cnt = hr_qp->rq.max_gs = 0;
|
||||
} else {
|
||||
if (is_user && (!cap->max_recv_wr || !cap->max_recv_sge)) {
|
||||
dev_err(dev, "user space no need config max_recv_wr max_recv_sge\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* In v1 engine, parameter verification procession */
|
||||
max_cnt = cap->max_recv_wr > HNS_ROCE_MIN_WQE_NUM ?
|
||||
cap->max_recv_wr : HNS_ROCE_MIN_WQE_NUM;
|
||||
hr_qp->rq.wqe_cnt = roundup_pow_of_two(max_cnt);
|
||||
|
||||
if ((u32)hr_qp->rq.wqe_cnt > hr_dev->caps.max_wqes) {
|
||||
dev_err(dev, "hns_roce_set_rq_size rq.wqe_cnt too large\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
max_cnt = max(1U, cap->max_recv_sge);
|
||||
hr_qp->rq.max_gs = roundup_pow_of_two(max_cnt);
|
||||
/* WQE is fixed for 64B */
|
||||
hr_qp->rq.wqe_shift = ilog2(hr_dev->caps.max_rq_desc_sz);
|
||||
}
|
||||
|
||||
cap->max_recv_wr = hr_qp->rq.max_post = hr_qp->rq.wqe_cnt;
|
||||
cap->max_recv_sge = hr_qp->rq.max_gs;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev,
|
||||
struct hns_roce_qp *hr_qp,
|
||||
struct hns_roce_ib_create_qp *ucmd)
|
||||
{
|
||||
u32 roundup_sq_stride = roundup_pow_of_two(hr_dev->caps.max_sq_desc_sz);
|
||||
u8 max_sq_stride = ilog2(roundup_sq_stride);
|
||||
|
||||
/* Sanity check SQ size before proceeding */
|
||||
if ((u32)(1 << ucmd->log_sq_bb_count) > hr_dev->caps.max_wqes ||
|
||||
ucmd->log_sq_stride > max_sq_stride ||
|
||||
ucmd->log_sq_stride < HNS_ROCE_IB_MIN_SQ_STRIDE) {
|
||||
dev_err(&hr_dev->pdev->dev, "check SQ size error!\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
hr_qp->sq.wqe_cnt = 1 << ucmd->log_sq_bb_count;
|
||||
hr_qp->sq.wqe_shift = ucmd->log_sq_stride;
|
||||
|
||||
/* Get buf size, SQ and RQ are aligned to page_szie */
|
||||
hr_qp->buff_size = HNS_ROCE_ALOGN_UP((hr_qp->rq.wqe_cnt <<
|
||||
hr_qp->rq.wqe_shift), PAGE_SIZE) +
|
||||
HNS_ROCE_ALOGN_UP((hr_qp->sq.wqe_cnt <<
|
||||
hr_qp->sq.wqe_shift), PAGE_SIZE);
|
||||
|
||||
hr_qp->sq.offset = 0;
|
||||
hr_qp->rq.offset = HNS_ROCE_ALOGN_UP((hr_qp->sq.wqe_cnt <<
|
||||
hr_qp->sq.wqe_shift), PAGE_SIZE);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev,
|
||||
struct ib_qp_cap *cap,
|
||||
enum ib_qp_type type,
|
||||
struct hns_roce_qp *hr_qp)
|
||||
{
|
||||
struct device *dev = &hr_dev->pdev->dev;
|
||||
u32 max_cnt;
|
||||
(void)type;
|
||||
|
||||
if (cap->max_send_wr > hr_dev->caps.max_wqes ||
|
||||
cap->max_send_sge > hr_dev->caps.max_sq_sg ||
|
||||
cap->max_inline_data > hr_dev->caps.max_sq_inline) {
|
||||
dev_err(dev, "hns_roce_set_kernel_sq_size error1\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
hr_qp->sq.wqe_shift = ilog2(hr_dev->caps.max_sq_desc_sz);
|
||||
hr_qp->sq_max_wqes_per_wr = 1;
|
||||
hr_qp->sq_spare_wqes = 0;
|
||||
|
||||
/* In v1 engine, parameter verification procession */
|
||||
max_cnt = cap->max_send_wr > HNS_ROCE_MIN_WQE_NUM ?
|
||||
cap->max_send_wr : HNS_ROCE_MIN_WQE_NUM;
|
||||
hr_qp->sq.wqe_cnt = roundup_pow_of_two(max_cnt);
|
||||
if ((u32)hr_qp->sq.wqe_cnt > hr_dev->caps.max_wqes) {
|
||||
dev_err(dev, "hns_roce_set_kernel_sq_size sq.wqe_cnt too large\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Get data_seg numbers */
|
||||
max_cnt = max(1U, cap->max_send_sge);
|
||||
hr_qp->sq.max_gs = roundup_pow_of_two(max_cnt);
|
||||
|
||||
/* Get buf size, SQ and RQ are aligned to page_szie */
|
||||
hr_qp->buff_size = HNS_ROCE_ALOGN_UP((hr_qp->rq.wqe_cnt <<
|
||||
hr_qp->rq.wqe_shift), PAGE_SIZE) +
|
||||
HNS_ROCE_ALOGN_UP((hr_qp->sq.wqe_cnt <<
|
||||
hr_qp->sq.wqe_shift), PAGE_SIZE);
|
||||
hr_qp->sq.offset = 0;
|
||||
hr_qp->rq.offset = HNS_ROCE_ALOGN_UP((hr_qp->sq.wqe_cnt <<
|
||||
hr_qp->sq.wqe_shift), PAGE_SIZE);
|
||||
|
||||
/* Get wr and sge number which send */
|
||||
cap->max_send_wr = hr_qp->sq.max_post = hr_qp->sq.wqe_cnt;
|
||||
cap->max_send_sge = hr_qp->sq.max_gs;
|
||||
|
||||
/* We don't support inline sends for kernel QPs (yet) */
|
||||
cap->max_inline_data = 0;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
|
||||
struct ib_pd *ib_pd,
|
||||
struct ib_qp_init_attr *init_attr,
|
||||
struct ib_udata *udata, unsigned long sqpn,
|
||||
struct hns_roce_qp *hr_qp)
|
||||
{
|
||||
struct device *dev = &hr_dev->pdev->dev;
|
||||
struct hns_roce_ib_create_qp ucmd;
|
||||
unsigned long qpn = 0;
|
||||
int ret = 0;
|
||||
|
||||
mutex_init(&hr_qp->mutex);
|
||||
spin_lock_init(&hr_qp->sq.lock);
|
||||
spin_lock_init(&hr_qp->rq.lock);
|
||||
|
||||
hr_qp->state = IB_QPS_RESET;
|
||||
|
||||
if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
|
||||
hr_qp->sq_signal_bits = IB_SIGNAL_ALL_WR;
|
||||
else
|
||||
hr_qp->sq_signal_bits = IB_SIGNAL_REQ_WR;
|
||||
|
||||
ret = hns_roce_set_rq_size(hr_dev, &init_attr->cap, !!ib_pd->uobject,
|
||||
!!init_attr->srq, hr_qp);
|
||||
if (ret) {
|
||||
dev_err(dev, "hns_roce_set_rq_size failed\n");
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
if (ib_pd->uobject) {
|
||||
if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
|
||||
dev_err(dev, "ib_copy_from_udata error for create qp\n");
|
||||
ret = -EFAULT;
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
ret = hns_roce_set_user_sq_size(hr_dev, hr_qp, &ucmd);
|
||||
if (ret) {
|
||||
dev_err(dev, "hns_roce_set_user_sq_size error for create qp\n");
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
hr_qp->umem = ib_umem_get(ib_pd->uobject->context,
|
||||
ucmd.buf_addr, hr_qp->buff_size, 0,
|
||||
0);
|
||||
if (IS_ERR(hr_qp->umem)) {
|
||||
dev_err(dev, "ib_umem_get error for create qp\n");
|
||||
ret = PTR_ERR(hr_qp->umem);
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
ret = hns_roce_mtt_init(hr_dev, ib_umem_page_count(hr_qp->umem),
|
||||
ilog2((unsigned int)hr_qp->umem->page_size),
|
||||
&hr_qp->mtt);
|
||||
if (ret) {
|
||||
dev_err(dev, "hns_roce_mtt_init error for create qp\n");
|
||||
goto err_buf;
|
||||
}
|
||||
|
||||
ret = hns_roce_ib_umem_write_mtt(hr_dev, &hr_qp->mtt,
|
||||
hr_qp->umem);
|
||||
if (ret) {
|
||||
dev_err(dev, "hns_roce_ib_umem_write_mtt error for create qp\n");
|
||||
goto err_mtt;
|
||||
}
|
||||
} else {
|
||||
if (init_attr->create_flags &
|
||||
IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) {
|
||||
dev_err(dev, "init_attr->create_flags error!\n");
|
||||
ret = -EINVAL;
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
if (init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO) {
|
||||
dev_err(dev, "init_attr->create_flags error!\n");
|
||||
ret = -EINVAL;
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
/* Set SQ size */
|
||||
ret = hns_roce_set_kernel_sq_size(hr_dev, &init_attr->cap,
|
||||
init_attr->qp_type, hr_qp);
|
||||
if (ret) {
|
||||
dev_err(dev, "hns_roce_set_kernel_sq_size error!\n");
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
/* QP doorbell register address */
|
||||
hr_qp->sq.db_reg_l = hr_dev->reg_base + ROCEE_DB_SQ_L_0_REG +
|
||||
DB_REG_OFFSET * hr_dev->priv_uar.index;
|
||||
hr_qp->rq.db_reg_l = hr_dev->reg_base +
|
||||
ROCEE_DB_OTHERS_L_0_REG +
|
||||
DB_REG_OFFSET * hr_dev->priv_uar.index;
|
||||
|
||||
/* Allocate QP buf */
|
||||
if (hns_roce_buf_alloc(hr_dev, hr_qp->buff_size, PAGE_SIZE * 2,
|
||||
&hr_qp->hr_buf)) {
|
||||
dev_err(dev, "hns_roce_buf_alloc error!\n");
|
||||
ret = -ENOMEM;
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
/* Write MTT */
|
||||
ret = hns_roce_mtt_init(hr_dev, hr_qp->hr_buf.npages,
|
||||
hr_qp->hr_buf.page_shift, &hr_qp->mtt);
|
||||
if (ret) {
|
||||
dev_err(dev, "hns_roce_mtt_init error for kernel create qp\n");
|
||||
goto err_buf;
|
||||
}
|
||||
|
||||
ret = hns_roce_buf_write_mtt(hr_dev, &hr_qp->mtt,
|
||||
&hr_qp->hr_buf);
|
||||
if (ret) {
|
||||
dev_err(dev, "hns_roce_buf_write_mtt error for kernel create qp\n");
|
||||
goto err_mtt;
|
||||
}
|
||||
|
||||
hr_qp->sq.wrid = kmalloc_array(hr_qp->sq.wqe_cnt, sizeof(u64),
|
||||
GFP_KERNEL);
|
||||
hr_qp->rq.wrid = kmalloc_array(hr_qp->rq.wqe_cnt, sizeof(u64),
|
||||
GFP_KERNEL);
|
||||
if (!hr_qp->sq.wrid || !hr_qp->rq.wrid) {
|
||||
ret = -ENOMEM;
|
||||
goto err_wrid;
|
||||
}
|
||||
}
|
||||
|
||||
if (sqpn) {
|
||||
qpn = sqpn;
|
||||
} else {
|
||||
/* Get QPN */
|
||||
ret = hns_roce_reserve_range_qp(hr_dev, 1, 1, &qpn);
|
||||
if (ret) {
|
||||
dev_err(dev, "hns_roce_reserve_range_qp alloc qpn error\n");
|
||||
goto err_wrid;
|
||||
}
|
||||
}
|
||||
|
||||
if ((init_attr->qp_type) == IB_QPT_GSI) {
|
||||
ret = hns_roce_gsi_qp_alloc(hr_dev, qpn, hr_qp);
|
||||
if (ret) {
|
||||
dev_err(dev, "hns_roce_qp_alloc failed!\n");
|
||||
goto err_qpn;
|
||||
}
|
||||
} else {
|
||||
ret = hns_roce_qp_alloc(hr_dev, qpn, hr_qp);
|
||||
if (ret) {
|
||||
dev_err(dev, "hns_roce_qp_alloc failed!\n");
|
||||
goto err_qpn;
|
||||
}
|
||||
}
|
||||
|
||||
if (sqpn)
|
||||
hr_qp->doorbell_qpn = 1;
|
||||
else
|
||||
hr_qp->doorbell_qpn = cpu_to_le64(hr_qp->qpn);
|
||||
|
||||
hr_qp->event = hns_roce_ib_qp_event;
|
||||
|
||||
return 0;
|
||||
|
||||
err_qpn:
|
||||
if (!sqpn)
|
||||
hns_roce_release_range_qp(hr_dev, qpn, 1);
|
||||
|
||||
err_wrid:
|
||||
kfree(hr_qp->sq.wrid);
|
||||
kfree(hr_qp->rq.wrid);
|
||||
|
||||
err_mtt:
|
||||
hns_roce_mtt_cleanup(hr_dev, &hr_qp->mtt);
|
||||
|
||||
err_buf:
|
||||
if (ib_pd->uobject)
|
||||
ib_umem_release(hr_qp->umem);
|
||||
else
|
||||
hns_roce_buf_free(hr_dev, hr_qp->buff_size, &hr_qp->hr_buf);
|
||||
|
||||
err_out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
struct ib_qp *hns_roce_create_qp(struct ib_pd *pd,
|
||||
struct ib_qp_init_attr *init_attr,
|
||||
struct ib_udata *udata)
|
||||
{
|
||||
struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
|
||||
struct device *dev = &hr_dev->pdev->dev;
|
||||
struct hns_roce_sqp *hr_sqp;
|
||||
struct hns_roce_qp *hr_qp;
|
||||
int ret;
|
||||
|
||||
switch (init_attr->qp_type) {
|
||||
case IB_QPT_RC: {
|
||||
hr_qp = kzalloc(sizeof(*hr_qp), GFP_KERNEL);
|
||||
if (!hr_qp)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
ret = hns_roce_create_qp_common(hr_dev, pd, init_attr, udata, 0,
|
||||
hr_qp);
|
||||
if (ret) {
|
||||
dev_err(dev, "Create RC QP failed\n");
|
||||
kfree(hr_qp);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
hr_qp->ibqp.qp_num = hr_qp->qpn;
|
||||
|
||||
break;
|
||||
}
|
||||
case IB_QPT_GSI: {
|
||||
/* Userspace is not allowed to create special QPs: */
|
||||
if (pd->uobject) {
|
||||
dev_err(dev, "not support usr space GSI\n");
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
hr_sqp = kzalloc(sizeof(*hr_sqp), GFP_KERNEL);
|
||||
if (!hr_sqp)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
hr_qp = &hr_sqp->hr_qp;
|
||||
|
||||
ret = hns_roce_create_qp_common(hr_dev, pd, init_attr, udata,
|
||||
hr_dev->caps.sqp_start +
|
||||
hr_dev->caps.num_ports +
|
||||
init_attr->port_num - 1, hr_qp);
|
||||
if (ret) {
|
||||
dev_err(dev, "Create GSI QP failed!\n");
|
||||
kfree(hr_sqp);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
hr_qp->port = (init_attr->port_num - 1);
|
||||
hr_qp->ibqp.qp_num = hr_dev->caps.sqp_start +
|
||||
hr_dev->caps.num_ports +
|
||||
init_attr->port_num - 1;
|
||||
break;
|
||||
}
|
||||
default:{
|
||||
dev_err(dev, "not support QP type %d\n", init_attr->qp_type);
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
}
|
||||
|
||||
return &hr_qp->ibqp;
|
||||
}
|
||||
|
||||
int to_hr_qp_type(int qp_type)
|
||||
{
|
||||
int transport_type;
|
||||
|
||||
if (qp_type == IB_QPT_RC)
|
||||
transport_type = SERV_TYPE_RC;
|
||||
else if (qp_type == IB_QPT_UC)
|
||||
transport_type = SERV_TYPE_UC;
|
||||
else if (qp_type == IB_QPT_UD)
|
||||
transport_type = SERV_TYPE_UD;
|
||||
else if (qp_type == IB_QPT_GSI)
|
||||
transport_type = SERV_TYPE_UD;
|
||||
else
|
||||
transport_type = -1;
|
||||
|
||||
return transport_type;
|
||||
}
|
||||
|
||||
int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
|
||||
int attr_mask, struct ib_udata *udata)
|
||||
{
|
||||
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
|
||||
struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
|
||||
enum ib_qp_state cur_state, new_state;
|
||||
struct device *dev = &hr_dev->pdev->dev;
|
||||
int ret = -EINVAL;
|
||||
int p;
|
||||
|
||||
mutex_lock(&hr_qp->mutex);
|
||||
|
||||
cur_state = attr_mask & IB_QP_CUR_STATE ?
|
||||
attr->cur_qp_state : (enum ib_qp_state)hr_qp->state;
|
||||
new_state = attr_mask & IB_QP_STATE ?
|
||||
attr->qp_state : cur_state;
|
||||
|
||||
if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask,
|
||||
IB_LINK_LAYER_ETHERNET)) {
|
||||
dev_err(dev, "ib_modify_qp_is_ok failed\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
if ((attr_mask & IB_QP_PORT) &&
|
||||
(attr->port_num == 0 || attr->port_num > hr_dev->caps.num_ports)) {
|
||||
dev_err(dev, "attr port_num invalid.attr->port_num=%d\n",
|
||||
attr->port_num);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (attr_mask & IB_QP_PKEY_INDEX) {
|
||||
p = attr_mask & IB_QP_PORT ? (attr->port_num - 1) : hr_qp->port;
|
||||
if (attr->pkey_index >= hr_dev->caps.pkey_table_len[p]) {
|
||||
dev_err(dev, "attr pkey_index invalid.attr->pkey_index=%d\n",
|
||||
attr->pkey_index);
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
|
||||
attr->max_rd_atomic > hr_dev->caps.max_qp_init_rdma) {
|
||||
dev_err(dev, "attr max_rd_atomic invalid.attr->max_rd_atomic=%d\n",
|
||||
attr->max_rd_atomic);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
|
||||
attr->max_dest_rd_atomic > hr_dev->caps.max_qp_dest_rdma) {
|
||||
dev_err(dev, "attr max_dest_rd_atomic invalid.attr->max_dest_rd_atomic=%d\n",
|
||||
attr->max_dest_rd_atomic);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (cur_state == new_state && cur_state == IB_QPS_RESET) {
|
||||
ret = -EPERM;
|
||||
dev_err(dev, "cur_state=%d new_state=%d\n", cur_state,
|
||||
new_state);
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = hr_dev->hw->modify_qp(ibqp, attr, attr_mask, cur_state,
|
||||
new_state);
|
||||
|
||||
out:
|
||||
mutex_unlock(&hr_qp->mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void hns_roce_lock_cqs(struct hns_roce_cq *send_cq, struct hns_roce_cq *recv_cq)
|
||||
__acquires(&send_cq->lock) __acquires(&recv_cq->lock)
|
||||
{
|
||||
if (send_cq == recv_cq) {
|
||||
spin_lock_irq(&send_cq->lock);
|
||||
__acquire(&recv_cq->lock);
|
||||
} else if (send_cq->cqn < recv_cq->cqn) {
|
||||
spin_lock_irq(&send_cq->lock);
|
||||
spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING);
|
||||
} else {
|
||||
spin_lock_irq(&recv_cq->lock);
|
||||
spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING);
|
||||
}
|
||||
}
|
||||
|
||||
void hns_roce_unlock_cqs(struct hns_roce_cq *send_cq,
|
||||
struct hns_roce_cq *recv_cq) __releases(&send_cq->lock)
|
||||
__releases(&recv_cq->lock)
|
||||
{
|
||||
if (send_cq == recv_cq) {
|
||||
__release(&recv_cq->lock);
|
||||
spin_unlock_irq(&send_cq->lock);
|
||||
} else if (send_cq->cqn < recv_cq->cqn) {
|
||||
spin_unlock(&recv_cq->lock);
|
||||
spin_unlock_irq(&send_cq->lock);
|
||||
} else {
|
||||
spin_unlock(&send_cq->lock);
|
||||
spin_unlock_irq(&recv_cq->lock);
|
||||
}
|
||||
}
|
||||
|
||||
__be32 send_ieth(struct ib_send_wr *wr)
|
||||
{
|
||||
switch (wr->opcode) {
|
||||
case IB_WR_SEND_WITH_IMM:
|
||||
case IB_WR_RDMA_WRITE_WITH_IMM:
|
||||
return cpu_to_le32(wr->ex.imm_data);
|
||||
case IB_WR_SEND_WITH_INV:
|
||||
return cpu_to_le32(wr->ex.invalidate_rkey);
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
static void *get_wqe(struct hns_roce_qp *hr_qp, int offset)
|
||||
{
|
||||
|
||||
return hns_roce_buf_offset(&hr_qp->hr_buf, offset);
|
||||
}
|
||||
|
||||
void *get_recv_wqe(struct hns_roce_qp *hr_qp, int n)
|
||||
{
|
||||
struct ib_qp *ibqp = &hr_qp->ibqp;
|
||||
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
|
||||
|
||||
if ((n < 0) || (n > hr_qp->rq.wqe_cnt)) {
|
||||
dev_err(&hr_dev->pdev->dev, "rq wqe index:%d,rq wqe cnt:%d\r\n",
|
||||
n, hr_qp->rq.wqe_cnt);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return get_wqe(hr_qp, hr_qp->rq.offset + (n << hr_qp->rq.wqe_shift));
|
||||
}
|
||||
|
||||
void *get_send_wqe(struct hns_roce_qp *hr_qp, int n)
|
||||
{
|
||||
struct ib_qp *ibqp = &hr_qp->ibqp;
|
||||
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
|
||||
|
||||
if ((n < 0) || (n > hr_qp->sq.wqe_cnt)) {
|
||||
dev_err(&hr_dev->pdev->dev, "sq wqe index:%d,sq wqe cnt:%d\r\n",
|
||||
n, hr_qp->sq.wqe_cnt);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return get_wqe(hr_qp, hr_qp->sq.offset + (n << hr_qp->sq.wqe_shift));
|
||||
}
|
||||
|
||||
bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, int nreq,
|
||||
struct ib_cq *ib_cq)
|
||||
{
|
||||
struct hns_roce_cq *hr_cq;
|
||||
u32 cur;
|
||||
|
||||
cur = hr_wq->head - hr_wq->tail;
|
||||
if (likely(cur + nreq < hr_wq->max_post))
|
||||
return 0;
|
||||
|
||||
hr_cq = to_hr_cq(ib_cq);
|
||||
spin_lock(&hr_cq->lock);
|
||||
cur = hr_wq->head - hr_wq->tail;
|
||||
spin_unlock(&hr_cq->lock);
|
||||
|
||||
return cur + nreq >= hr_wq->max_post;
|
||||
}
|
||||
|
||||
int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev)
|
||||
{
|
||||
struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
|
||||
int reserved_from_top = 0;
|
||||
int ret;
|
||||
|
||||
spin_lock_init(&qp_table->lock);
|
||||
INIT_RADIX_TREE(&hr_dev->qp_table_tree, GFP_ATOMIC);
|
||||
|
||||
/* A port include two SQP, six port total 12 */
|
||||
ret = hns_roce_bitmap_init(&qp_table->bitmap, hr_dev->caps.num_qps,
|
||||
hr_dev->caps.num_qps - 1,
|
||||
hr_dev->caps.sqp_start + SQP_NUM,
|
||||
reserved_from_top);
|
||||
if (ret) {
|
||||
dev_err(&hr_dev->pdev->dev, "qp bitmap init failed!error=%d\n",
|
||||
ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void hns_roce_cleanup_qp_table(struct hns_roce_dev *hr_dev)
|
||||
{
|
||||
hns_roce_bitmap_cleanup(&hr_dev->qp_table.bitmap);
|
||||
}
|
|
@ -0,0 +1,53 @@
|
|||
/*
|
||||
* Copyright (c) 2016 Hisilicon Limited.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
* General Public License (GPL) Version 2, available from the file
|
||||
* COPYING in the main directory of this source tree, or the
|
||||
* OpenIB.org BSD license below:
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or
|
||||
* without modification, are permitted provided that the following
|
||||
* conditions are met:
|
||||
*
|
||||
* - Redistributions of source code must retain the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer.
|
||||
*
|
||||
* - Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer in the documentation and/or other materials
|
||||
* provided with the distribution.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef _HNS_ROCE_USER_H
|
||||
#define _HNS_ROCE_USER_H
|
||||
|
||||
struct hns_roce_ib_create_cq {
|
||||
__u64 buf_addr;
|
||||
};
|
||||
|
||||
struct hns_roce_ib_create_qp {
|
||||
__u64 buf_addr;
|
||||
__u64 db_addr;
|
||||
__u8 log_sq_bb_count;
|
||||
__u8 log_sq_stride;
|
||||
__u8 sq_no_prefetch;
|
||||
__u8 reserved[5];
|
||||
};
|
||||
|
||||
struct hns_roce_ib_alloc_ucontext_resp {
|
||||
__u32 qp_tab_size;
|
||||
};
|
||||
|
||||
#endif /*_HNS_ROCE_USER_H */
|
|
@ -3166,8 +3166,11 @@ void i40iw_setup_cm_core(struct i40iw_device *iwdev)
|
|||
spin_lock_init(&cm_core->ht_lock);
|
||||
spin_lock_init(&cm_core->listen_list_lock);
|
||||
|
||||
cm_core->event_wq = create_singlethread_workqueue("iwewq");
|
||||
cm_core->disconn_wq = create_singlethread_workqueue("iwdwq");
|
||||
cm_core->event_wq = alloc_ordered_workqueue("iwewq",
|
||||
WQ_MEM_RECLAIM);
|
||||
|
||||
cm_core->disconn_wq = alloc_ordered_workqueue("iwdwq",
|
||||
WQ_MEM_RECLAIM);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -1615,7 +1615,7 @@ static int i40iw_open(struct i40e_info *ldev, struct i40e_client *client)
|
|||
status = i40iw_hmc_init_pble(&iwdev->sc_dev, iwdev->pble_rsrc);
|
||||
if (status)
|
||||
break;
|
||||
iwdev->virtchnl_wq = create_singlethread_workqueue("iwvch");
|
||||
iwdev->virtchnl_wq = alloc_ordered_workqueue("iwvch", WQ_MEM_RECLAIM);
|
||||
i40iw_register_notifiers();
|
||||
iwdev->init_state = INET_NOTIFIER;
|
||||
status = i40iw_add_mac_ip(iwdev);
|
||||
|
|
|
@ -881,7 +881,7 @@ int mlx4_ib_init_alias_guid_service(struct mlx4_ib_dev *dev)
|
|||
|
||||
snprintf(alias_wq_name, sizeof alias_wq_name, "alias_guid%d", i);
|
||||
dev->sriov.alias_guid.ports_guid[i].wq =
|
||||
create_singlethread_workqueue(alias_wq_name);
|
||||
alloc_ordered_workqueue(alias_wq_name, WQ_MEM_RECLAIM);
|
||||
if (!dev->sriov.alias_guid.ports_guid[i].wq) {
|
||||
ret = -ENOMEM;
|
||||
goto err_thread;
|
||||
|
|
|
@ -37,7 +37,7 @@
|
|||
#include <linux/slab.h>
|
||||
|
||||
#include "mlx4_ib.h"
|
||||
#include "user.h"
|
||||
#include <rdma/mlx4-abi.h>
|
||||
|
||||
static void mlx4_ib_cq_comp(struct mlx4_cq *cq)
|
||||
{
|
||||
|
|
|
@ -230,6 +230,8 @@ static void smp_snoop(struct ib_device *ibdev, u8 port_num, const struct ib_mad
|
|||
mad->mad_hdr.method == IB_MGMT_METHOD_SET)
|
||||
switch (mad->mad_hdr.attr_id) {
|
||||
case IB_SMP_ATTR_PORT_INFO:
|
||||
if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV)
|
||||
return;
|
||||
pinfo = (struct ib_port_info *) ((struct ib_smp *) mad)->data;
|
||||
lid = be16_to_cpu(pinfo->lid);
|
||||
|
||||
|
@ -245,6 +247,8 @@ static void smp_snoop(struct ib_device *ibdev, u8 port_num, const struct ib_mad
|
|||
break;
|
||||
|
||||
case IB_SMP_ATTR_PKEY_TABLE:
|
||||
if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV)
|
||||
return;
|
||||
if (!mlx4_is_mfunc(dev->dev)) {
|
||||
mlx4_ib_dispatch_event(dev, port_num,
|
||||
IB_EVENT_PKEY_CHANGE);
|
||||
|
@ -281,6 +285,8 @@ static void smp_snoop(struct ib_device *ibdev, u8 port_num, const struct ib_mad
|
|||
break;
|
||||
|
||||
case IB_SMP_ATTR_GUID_INFO:
|
||||
if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV)
|
||||
return;
|
||||
/* paravirtualized master's guid is guid 0 -- does not change */
|
||||
if (!mlx4_is_master(dev->dev))
|
||||
mlx4_ib_dispatch_event(dev, port_num,
|
||||
|
@ -296,6 +302,26 @@ static void smp_snoop(struct ib_device *ibdev, u8 port_num, const struct ib_mad
|
|||
}
|
||||
break;
|
||||
|
||||
case IB_SMP_ATTR_SL_TO_VL_TABLE:
|
||||
/* cache sl to vl mapping changes for use in
|
||||
* filling QP1 LRH VL field when sending packets
|
||||
*/
|
||||
if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV &&
|
||||
dev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SL_TO_VL_CHANGE_EVENT)
|
||||
return;
|
||||
if (!mlx4_is_slave(dev->dev)) {
|
||||
union sl2vl_tbl_to_u64 sl2vl64;
|
||||
int jj;
|
||||
|
||||
for (jj = 0; jj < 8; jj++) {
|
||||
sl2vl64.sl8[jj] = ((struct ib_smp *)mad)->data[jj];
|
||||
pr_debug("port %u, sl2vl[%d] = %02x\n",
|
||||
port_num, jj, sl2vl64.sl8[jj]);
|
||||
}
|
||||
atomic64_set(&dev->sl2vl[port_num - 1], sl2vl64.sl64);
|
||||
}
|
||||
break;
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
@ -345,7 +371,8 @@ static void node_desc_override(struct ib_device *dev,
|
|||
mad->mad_hdr.method == IB_MGMT_METHOD_GET_RESP &&
|
||||
mad->mad_hdr.attr_id == IB_SMP_ATTR_NODE_DESC) {
|
||||
spin_lock_irqsave(&to_mdev(dev)->sm_lock, flags);
|
||||
memcpy(((struct ib_smp *) mad)->data, dev->node_desc, 64);
|
||||
memcpy(((struct ib_smp *) mad)->data, dev->node_desc,
|
||||
IB_DEVICE_NODE_DESC_MAX);
|
||||
spin_unlock_irqrestore(&to_mdev(dev)->sm_lock, flags);
|
||||
}
|
||||
}
|
||||
|
@ -805,8 +832,7 @@ static int ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
|
|||
return IB_MAD_RESULT_FAILURE;
|
||||
|
||||
if (!out_mad->mad_hdr.status) {
|
||||
if (!(to_mdev(ibdev)->dev->caps.flags & MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV))
|
||||
smp_snoop(ibdev, port_num, in_mad, prev_lid);
|
||||
smp_snoop(ibdev, port_num, in_mad, prev_lid);
|
||||
/* slaves get node desc from FW */
|
||||
if (!mlx4_is_slave(to_mdev(ibdev)->dev))
|
||||
node_desc_override(ibdev, out_mad);
|
||||
|
@ -1037,6 +1063,23 @@ static void handle_client_rereg_event(struct mlx4_ib_dev *dev, u8 port_num)
|
|||
MLX4_EQ_PORT_INFO_CLIENT_REREG_MASK);
|
||||
}
|
||||
}
|
||||
|
||||
/* Update the sl to vl table from inside client rereg
|
||||
* only if in secure-host mode (snooping is not possible)
|
||||
* and the sl-to-vl change event is not generated by FW.
|
||||
*/
|
||||
if (!mlx4_is_slave(dev->dev) &&
|
||||
dev->dev->flags & MLX4_FLAG_SECURE_HOST &&
|
||||
!(dev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SL_TO_VL_CHANGE_EVENT)) {
|
||||
if (mlx4_is_master(dev->dev))
|
||||
/* already in work queue from mlx4_ib_event queueing
|
||||
* mlx4_handle_port_mgmt_change_event, which calls
|
||||
* this procedure. Therefore, call sl2vl_update directly.
|
||||
*/
|
||||
mlx4_ib_sl2vl_update(dev, port_num);
|
||||
else
|
||||
mlx4_sched_ib_sl2vl_update_work(dev, port_num);
|
||||
}
|
||||
mlx4_ib_dispatch_event(dev, port_num, IB_EVENT_CLIENT_REREGISTER);
|
||||
}
|
||||
|
||||
|
@ -1176,6 +1219,24 @@ void handle_port_mgmt_change_event(struct work_struct *work)
|
|||
handle_slaves_guid_change(dev, port, tbl_block, change_bitmap);
|
||||
}
|
||||
break;
|
||||
|
||||
case MLX4_DEV_PMC_SUBTYPE_SL_TO_VL_MAP:
|
||||
/* cache sl to vl mapping changes for use in
|
||||
* filling QP1 LRH VL field when sending packets
|
||||
*/
|
||||
if (!mlx4_is_slave(dev->dev)) {
|
||||
union sl2vl_tbl_to_u64 sl2vl64;
|
||||
int jj;
|
||||
|
||||
for (jj = 0; jj < 8; jj++) {
|
||||
sl2vl64.sl8[jj] =
|
||||
eqe->event.port_mgmt_change.params.sl2vl_tbl_change_info.sl2vl_table[jj];
|
||||
pr_debug("port %u, sl2vl[%d] = %02x\n",
|
||||
port, jj, sl2vl64.sl8[jj]);
|
||||
}
|
||||
atomic64_set(&dev->sl2vl[port - 1], sl2vl64.sl64);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
pr_warn("Unsupported subtype 0x%x for "
|
||||
"Port Management Change event\n", eqe->subtype);
|
||||
|
@ -1918,7 +1979,7 @@ static int create_pv_resources(struct ib_device *ibdev, int slave, int port,
|
|||
goto err_buf;
|
||||
}
|
||||
|
||||
ctx->pd = ib_alloc_pd(ctx->ib_dev);
|
||||
ctx->pd = ib_alloc_pd(ctx->ib_dev, 0);
|
||||
if (IS_ERR(ctx->pd)) {
|
||||
ret = PTR_ERR(ctx->pd);
|
||||
pr_err("Couldn't create tunnel PD (%d)\n", ret);
|
||||
|
@ -2091,7 +2152,7 @@ static int mlx4_ib_alloc_demux_ctx(struct mlx4_ib_dev *dev,
|
|||
}
|
||||
|
||||
snprintf(name, sizeof name, "mlx4_ibt%d", port);
|
||||
ctx->wq = create_singlethread_workqueue(name);
|
||||
ctx->wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM);
|
||||
if (!ctx->wq) {
|
||||
pr_err("Failed to create tunnelling WQ for port %d\n", port);
|
||||
ret = -ENOMEM;
|
||||
|
@ -2099,7 +2160,7 @@ static int mlx4_ib_alloc_demux_ctx(struct mlx4_ib_dev *dev,
|
|||
}
|
||||
|
||||
snprintf(name, sizeof name, "mlx4_ibud%d", port);
|
||||
ctx->ud_wq = create_singlethread_workqueue(name);
|
||||
ctx->ud_wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM);
|
||||
if (!ctx->ud_wq) {
|
||||
pr_err("Failed to create up/down WQ for port %d\n", port);
|
||||
ret = -ENOMEM;
|
||||
|
|
|
@ -55,7 +55,7 @@
|
|||
#include <linux/mlx4/qp.h>
|
||||
|
||||
#include "mlx4_ib.h"
|
||||
#include "user.h"
|
||||
#include <rdma/mlx4-abi.h>
|
||||
|
||||
#define DRV_NAME MLX4_IB_DRV_NAME
|
||||
#define DRV_VERSION "2.2-1"
|
||||
|
@ -832,6 +832,66 @@ static int mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int mlx4_ib_query_sl2vl(struct ib_device *ibdev, u8 port, u64 *sl2vl_tbl)
|
||||
{
|
||||
union sl2vl_tbl_to_u64 sl2vl64;
|
||||
struct ib_smp *in_mad = NULL;
|
||||
struct ib_smp *out_mad = NULL;
|
||||
int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
|
||||
int err = -ENOMEM;
|
||||
int jj;
|
||||
|
||||
if (mlx4_is_slave(to_mdev(ibdev)->dev)) {
|
||||
*sl2vl_tbl = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL);
|
||||
out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
|
||||
if (!in_mad || !out_mad)
|
||||
goto out;
|
||||
|
||||
init_query_mad(in_mad);
|
||||
in_mad->attr_id = IB_SMP_ATTR_SL_TO_VL_TABLE;
|
||||
in_mad->attr_mod = 0;
|
||||
|
||||
if (mlx4_is_mfunc(to_mdev(ibdev)->dev))
|
||||
mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
|
||||
|
||||
err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port, NULL, NULL,
|
||||
in_mad, out_mad);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
for (jj = 0; jj < 8; jj++)
|
||||
sl2vl64.sl8[jj] = ((struct ib_smp *)out_mad)->data[jj];
|
||||
*sl2vl_tbl = sl2vl64.sl64;
|
||||
|
||||
out:
|
||||
kfree(in_mad);
|
||||
kfree(out_mad);
|
||||
return err;
|
||||
}
|
||||
|
||||
static void mlx4_init_sl2vl_tbl(struct mlx4_ib_dev *mdev)
|
||||
{
|
||||
u64 sl2vl;
|
||||
int i;
|
||||
int err;
|
||||
|
||||
for (i = 1; i <= mdev->dev->caps.num_ports; i++) {
|
||||
if (mdev->dev->caps.port_type[i] == MLX4_PORT_TYPE_ETH)
|
||||
continue;
|
||||
err = mlx4_ib_query_sl2vl(&mdev->ib_dev, i, &sl2vl);
|
||||
if (err) {
|
||||
pr_err("Unable to get default sl to vl mapping for port %d. Using all zeroes (%d)\n",
|
||||
i, err);
|
||||
sl2vl = 0;
|
||||
}
|
||||
atomic64_set(&mdev->sl2vl[i - 1], sl2vl);
|
||||
}
|
||||
}
|
||||
|
||||
int __mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
|
||||
u16 *pkey, int netw_view)
|
||||
{
|
||||
|
@ -886,7 +946,7 @@ static int mlx4_ib_modify_device(struct ib_device *ibdev, int mask,
|
|||
return -EOPNOTSUPP;
|
||||
|
||||
spin_lock_irqsave(&to_mdev(ibdev)->sm_lock, flags);
|
||||
memcpy(ibdev->node_desc, props->node_desc, 64);
|
||||
memcpy(ibdev->node_desc, props->node_desc, IB_DEVICE_NODE_DESC_MAX);
|
||||
spin_unlock_irqrestore(&to_mdev(ibdev)->sm_lock, flags);
|
||||
|
||||
/*
|
||||
|
@ -897,7 +957,7 @@ static int mlx4_ib_modify_device(struct ib_device *ibdev, int mask,
|
|||
if (IS_ERR(mailbox))
|
||||
return 0;
|
||||
|
||||
memcpy(mailbox->buf, props->node_desc, 64);
|
||||
memcpy(mailbox->buf, props->node_desc, IB_DEVICE_NODE_DESC_MAX);
|
||||
mlx4_cmd(to_mdev(ibdev)->dev, mailbox->dma, 1, 0,
|
||||
MLX4_CMD_SET_NODE, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
|
||||
|
||||
|
@ -1259,7 +1319,7 @@ static struct ib_xrcd *mlx4_ib_alloc_xrcd(struct ib_device *ibdev,
|
|||
if (err)
|
||||
goto err1;
|
||||
|
||||
xrcd->pd = ib_alloc_pd(ibdev);
|
||||
xrcd->pd = ib_alloc_pd(ibdev, 0);
|
||||
if (IS_ERR(xrcd->pd)) {
|
||||
err = PTR_ERR(xrcd->pd);
|
||||
goto err2;
|
||||
|
@ -1361,6 +1421,19 @@ struct mlx4_ib_steering {
|
|||
union ib_gid gid;
|
||||
};
|
||||
|
||||
#define LAST_ETH_FIELD vlan_tag
|
||||
#define LAST_IB_FIELD sl
|
||||
#define LAST_IPV4_FIELD dst_ip
|
||||
#define LAST_TCP_UDP_FIELD src_port
|
||||
|
||||
/* Field is the last supported field */
|
||||
#define FIELDS_NOT_SUPPORTED(filter, field)\
|
||||
memchr_inv((void *)&filter.field +\
|
||||
sizeof(filter.field), 0,\
|
||||
sizeof(filter) -\
|
||||
offsetof(typeof(filter), field) -\
|
||||
sizeof(filter.field))
|
||||
|
||||
static int parse_flow_attr(struct mlx4_dev *dev,
|
||||
u32 qp_num,
|
||||
union ib_flow_spec *ib_spec,
|
||||
|
@ -1370,6 +1443,9 @@ static int parse_flow_attr(struct mlx4_dev *dev,
|
|||
|
||||
switch (ib_spec->type) {
|
||||
case IB_FLOW_SPEC_ETH:
|
||||
if (FIELDS_NOT_SUPPORTED(ib_spec->eth.mask, LAST_ETH_FIELD))
|
||||
return -ENOTSUPP;
|
||||
|
||||
type = MLX4_NET_TRANS_RULE_ID_ETH;
|
||||
memcpy(mlx4_spec->eth.dst_mac, ib_spec->eth.val.dst_mac,
|
||||
ETH_ALEN);
|
||||
|
@ -1379,6 +1455,9 @@ static int parse_flow_attr(struct mlx4_dev *dev,
|
|||
mlx4_spec->eth.vlan_tag_msk = ib_spec->eth.mask.vlan_tag;
|
||||
break;
|
||||
case IB_FLOW_SPEC_IB:
|
||||
if (FIELDS_NOT_SUPPORTED(ib_spec->ib.mask, LAST_IB_FIELD))
|
||||
return -ENOTSUPP;
|
||||
|
||||
type = MLX4_NET_TRANS_RULE_ID_IB;
|
||||
mlx4_spec->ib.l3_qpn =
|
||||
cpu_to_be32(qp_num);
|
||||
|
@ -1388,6 +1467,9 @@ static int parse_flow_attr(struct mlx4_dev *dev,
|
|||
|
||||
|
||||
case IB_FLOW_SPEC_IPV4:
|
||||
if (FIELDS_NOT_SUPPORTED(ib_spec->ipv4.mask, LAST_IPV4_FIELD))
|
||||
return -ENOTSUPP;
|
||||
|
||||
type = MLX4_NET_TRANS_RULE_ID_IPV4;
|
||||
mlx4_spec->ipv4.src_ip = ib_spec->ipv4.val.src_ip;
|
||||
mlx4_spec->ipv4.src_ip_msk = ib_spec->ipv4.mask.src_ip;
|
||||
|
@ -1397,6 +1479,9 @@ static int parse_flow_attr(struct mlx4_dev *dev,
|
|||
|
||||
case IB_FLOW_SPEC_TCP:
|
||||
case IB_FLOW_SPEC_UDP:
|
||||
if (FIELDS_NOT_SUPPORTED(ib_spec->tcp_udp.mask, LAST_TCP_UDP_FIELD))
|
||||
return -ENOTSUPP;
|
||||
|
||||
type = ib_spec->type == IB_FLOW_SPEC_TCP ?
|
||||
MLX4_NET_TRANS_RULE_ID_TCP :
|
||||
MLX4_NET_TRANS_RULE_ID_UDP;
|
||||
|
@ -2000,7 +2085,7 @@ static int init_node_data(struct mlx4_ib_dev *dev)
|
|||
if (err)
|
||||
goto out;
|
||||
|
||||
memcpy(dev->ib_dev.node_desc, out_mad->data, 64);
|
||||
memcpy(dev->ib_dev.node_desc, out_mad->data, IB_DEVICE_NODE_DESC_MAX);
|
||||
|
||||
in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
|
||||
|
||||
|
@ -2653,6 +2738,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
|
|||
|
||||
if (init_node_data(ibdev))
|
||||
goto err_map;
|
||||
mlx4_init_sl2vl_tbl(ibdev);
|
||||
|
||||
for (i = 0; i < ibdev->num_ports; ++i) {
|
||||
mutex_init(&ibdev->counters_table[i].mutex);
|
||||
|
@ -3101,6 +3187,47 @@ static void handle_bonded_port_state_event(struct work_struct *work)
|
|||
ib_dispatch_event(&ibev);
|
||||
}
|
||||
|
||||
void mlx4_ib_sl2vl_update(struct mlx4_ib_dev *mdev, int port)
|
||||
{
|
||||
u64 sl2vl;
|
||||
int err;
|
||||
|
||||
err = mlx4_ib_query_sl2vl(&mdev->ib_dev, port, &sl2vl);
|
||||
if (err) {
|
||||
pr_err("Unable to get current sl to vl mapping for port %d. Using all zeroes (%d)\n",
|
||||
port, err);
|
||||
sl2vl = 0;
|
||||
}
|
||||
atomic64_set(&mdev->sl2vl[port - 1], sl2vl);
|
||||
}
|
||||
|
||||
static void ib_sl2vl_update_work(struct work_struct *work)
|
||||
{
|
||||
struct ib_event_work *ew = container_of(work, struct ib_event_work, work);
|
||||
struct mlx4_ib_dev *mdev = ew->ib_dev;
|
||||
int port = ew->port;
|
||||
|
||||
mlx4_ib_sl2vl_update(mdev, port);
|
||||
|
||||
kfree(ew);
|
||||
}
|
||||
|
||||
void mlx4_sched_ib_sl2vl_update_work(struct mlx4_ib_dev *ibdev,
|
||||
int port)
|
||||
{
|
||||
struct ib_event_work *ew;
|
||||
|
||||
ew = kmalloc(sizeof(*ew), GFP_ATOMIC);
|
||||
if (ew) {
|
||||
INIT_WORK(&ew->work, ib_sl2vl_update_work);
|
||||
ew->port = port;
|
||||
ew->ib_dev = ibdev;
|
||||
queue_work(wq, &ew->work);
|
||||
} else {
|
||||
pr_err("failed to allocate memory for sl2vl update work\n");
|
||||
}
|
||||
}
|
||||
|
||||
static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
|
||||
enum mlx4_dev_event event, unsigned long param)
|
||||
{
|
||||
|
@ -3131,10 +3258,14 @@ static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
|
|||
case MLX4_DEV_EVENT_PORT_UP:
|
||||
if (p > ibdev->num_ports)
|
||||
return;
|
||||
if (mlx4_is_master(dev) &&
|
||||
if (!mlx4_is_slave(dev) &&
|
||||
rdma_port_get_link_layer(&ibdev->ib_dev, p) ==
|
||||
IB_LINK_LAYER_INFINIBAND) {
|
||||
mlx4_ib_invalidate_all_guid_record(ibdev, p);
|
||||
if (mlx4_is_master(dev))
|
||||
mlx4_ib_invalidate_all_guid_record(ibdev, p);
|
||||
if (ibdev->dev->flags & MLX4_FLAG_SECURE_HOST &&
|
||||
!(ibdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SL_TO_VL_CHANGE_EVENT))
|
||||
mlx4_sched_ib_sl2vl_update_work(ibdev, p);
|
||||
}
|
||||
ibev.event = IB_EVENT_PORT_ACTIVE;
|
||||
break;
|
||||
|
@ -3222,7 +3353,7 @@ static int __init mlx4_ib_init(void)
|
|||
{
|
||||
int err;
|
||||
|
||||
wq = create_singlethread_workqueue("mlx4_ib");
|
||||
wq = alloc_ordered_workqueue("mlx4_ib", WQ_MEM_RECLAIM);
|
||||
if (!wq)
|
||||
return -ENOMEM;
|
||||
|
||||
|
|
|
@ -1045,7 +1045,7 @@ int mlx4_ib_mcg_port_init(struct mlx4_ib_demux_ctx *ctx)
|
|||
|
||||
atomic_set(&ctx->tid, 0);
|
||||
sprintf(name, "mlx4_ib_mcg%d", ctx->port);
|
||||
ctx->mcg_wq = create_singlethread_workqueue(name);
|
||||
ctx->mcg_wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM);
|
||||
if (!ctx->mcg_wq)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -1246,7 +1246,7 @@ void clean_vf_mcast(struct mlx4_ib_demux_ctx *ctx, int slave)
|
|||
|
||||
int mlx4_ib_mcg_init(void)
|
||||
{
|
||||
clean_wq = create_singlethread_workqueue("mlx4_ib_mcg");
|
||||
clean_wq = alloc_ordered_workqueue("mlx4_ib_mcg", WQ_MEM_RECLAIM);
|
||||
if (!clean_wq)
|
||||
return -ENOMEM;
|
||||
|
||||
|
|
|
@ -570,6 +570,7 @@ struct mlx4_ib_dev {
|
|||
struct ib_mad_agent *send_agent[MLX4_MAX_PORTS][2];
|
||||
struct ib_ah *sm_ah[MLX4_MAX_PORTS];
|
||||
spinlock_t sm_lock;
|
||||
atomic64_t sl2vl[MLX4_MAX_PORTS];
|
||||
struct mlx4_ib_sriov sriov;
|
||||
|
||||
struct mutex cap_mask_mutex;
|
||||
|
@ -600,6 +601,7 @@ struct ib_event_work {
|
|||
struct work_struct work;
|
||||
struct mlx4_ib_dev *ib_dev;
|
||||
struct mlx4_eqe ib_eqe;
|
||||
int port;
|
||||
};
|
||||
|
||||
struct mlx4_ib_qp_tunnel_init_attr {
|
||||
|
@ -883,4 +885,9 @@ int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags,
|
|||
int mlx4_ib_gid_index_to_real_index(struct mlx4_ib_dev *ibdev,
|
||||
u8 port_num, int index);
|
||||
|
||||
void mlx4_sched_ib_sl2vl_update_work(struct mlx4_ib_dev *ibdev,
|
||||
int port);
|
||||
|
||||
void mlx4_ib_sl2vl_update(struct mlx4_ib_dev *mdev, int port);
|
||||
|
||||
#endif /* MLX4_IB_H */
|
||||
|
|
|
@ -47,7 +47,7 @@
|
|||
#include <linux/mlx4/qp.h>
|
||||
|
||||
#include "mlx4_ib.h"
|
||||
#include "user.h"
|
||||
#include <rdma/mlx4-abi.h>
|
||||
|
||||
static void mlx4_ib_lock_cqs(struct mlx4_ib_cq *send_cq,
|
||||
struct mlx4_ib_cq *recv_cq);
|
||||
|
@ -2405,6 +2405,22 @@ static int build_sriov_qp0_header(struct mlx4_ib_sqp *sqp,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static u8 sl_to_vl(struct mlx4_ib_dev *dev, u8 sl, int port_num)
|
||||
{
|
||||
union sl2vl_tbl_to_u64 tmp_vltab;
|
||||
u8 vl;
|
||||
|
||||
if (sl > 15)
|
||||
return 0xf;
|
||||
tmp_vltab.sl64 = atomic64_read(&dev->sl2vl[port_num - 1]);
|
||||
vl = tmp_vltab.sl8[sl >> 1];
|
||||
if (sl & 1)
|
||||
vl &= 0x0f;
|
||||
else
|
||||
vl >>= 4;
|
||||
return vl;
|
||||
}
|
||||
|
||||
#define MLX4_ROCEV2_QP1_SPORT 0xC000
|
||||
static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_ud_wr *wr,
|
||||
void *wqe, unsigned *mlx_seg_len)
|
||||
|
@ -2590,7 +2606,12 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_ud_wr *wr,
|
|||
sqp->ud_header.vlan.tag = cpu_to_be16(vlan | pcp);
|
||||
}
|
||||
} else {
|
||||
sqp->ud_header.lrh.virtual_lane = !sqp->qp.ibqp.qp_num ? 15 : 0;
|
||||
sqp->ud_header.lrh.virtual_lane = !sqp->qp.ibqp.qp_num ? 15 :
|
||||
sl_to_vl(to_mdev(ib_dev),
|
||||
sqp->ud_header.lrh.service_level,
|
||||
sqp->qp.port);
|
||||
if (sqp->qp.ibqp.qp_num && sqp->ud_header.lrh.virtual_lane == 15)
|
||||
return -EINVAL;
|
||||
if (sqp->ud_header.lrh.destination_lid == IB_LID_PERMISSIVE)
|
||||
sqp->ud_header.lrh.source_lid = IB_LID_PERMISSIVE;
|
||||
}
|
||||
|
|
|
@ -37,7 +37,7 @@
|
|||
#include <linux/vmalloc.h>
|
||||
|
||||
#include "mlx4_ib.h"
|
||||
#include "user.h"
|
||||
#include <rdma/mlx4-abi.h>
|
||||
|
||||
static void *get_wqe(struct mlx4_ib_srq *srq, int n)
|
||||
{
|
||||
|
|
|
@ -35,7 +35,6 @@
|
|||
#include <rdma/ib_user_verbs.h>
|
||||
#include <rdma/ib_cache.h>
|
||||
#include "mlx5_ib.h"
|
||||
#include "user.h"
|
||||
|
||||
static void mlx5_ib_cq_comp(struct mlx5_core_cq *cq)
|
||||
{
|
||||
|
|
|
@ -394,7 +394,7 @@ int mlx5_query_mad_ifc_node_desc(struct mlx5_ib_dev *dev, char *node_desc)
|
|||
if (err)
|
||||
goto out;
|
||||
|
||||
memcpy(node_desc, out_mad->data, 64);
|
||||
memcpy(node_desc, out_mad->data, IB_DEVICE_NODE_DESC_MAX);
|
||||
out:
|
||||
kfree(in_mad);
|
||||
kfree(out_mad);
|
||||
|
|
|
@ -53,7 +53,6 @@
|
|||
#include <linux/in.h>
|
||||
#include <linux/etherdevice.h>
|
||||
#include <linux/mlx5/fs.h>
|
||||
#include "user.h"
|
||||
#include "mlx5_ib.h"
|
||||
|
||||
#define DRIVER_NAME "mlx5_ib"
|
||||
|
@ -106,13 +105,42 @@ static int mlx5_netdev_event(struct notifier_block *this,
|
|||
struct mlx5_ib_dev *ibdev = container_of(this, struct mlx5_ib_dev,
|
||||
roce.nb);
|
||||
|
||||
if ((event != NETDEV_UNREGISTER) && (event != NETDEV_REGISTER))
|
||||
return NOTIFY_DONE;
|
||||
switch (event) {
|
||||
case NETDEV_REGISTER:
|
||||
case NETDEV_UNREGISTER:
|
||||
write_lock(&ibdev->roce.netdev_lock);
|
||||
if (ndev->dev.parent == &ibdev->mdev->pdev->dev)
|
||||
ibdev->roce.netdev = (event == NETDEV_UNREGISTER) ?
|
||||
NULL : ndev;
|
||||
write_unlock(&ibdev->roce.netdev_lock);
|
||||
break;
|
||||
|
||||
write_lock(&ibdev->roce.netdev_lock);
|
||||
if (ndev->dev.parent == &ibdev->mdev->pdev->dev)
|
||||
ibdev->roce.netdev = (event == NETDEV_UNREGISTER) ? NULL : ndev;
|
||||
write_unlock(&ibdev->roce.netdev_lock);
|
||||
case NETDEV_UP:
|
||||
case NETDEV_DOWN: {
|
||||
struct net_device *lag_ndev = mlx5_lag_get_roce_netdev(ibdev->mdev);
|
||||
struct net_device *upper = NULL;
|
||||
|
||||
if (lag_ndev) {
|
||||
upper = netdev_master_upper_dev_get(lag_ndev);
|
||||
dev_put(lag_ndev);
|
||||
}
|
||||
|
||||
if ((upper == ndev || (!upper && ndev == ibdev->roce.netdev))
|
||||
&& ibdev->ib_active) {
|
||||
struct ib_event ibev = {0};
|
||||
|
||||
ibev.device = &ibdev->ib_dev;
|
||||
ibev.event = (event == NETDEV_UP) ?
|
||||
IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
|
||||
ibev.element.port_num = 1;
|
||||
ib_dispatch_event(&ibev);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
|
@ -123,6 +151,10 @@ static struct net_device *mlx5_ib_get_netdev(struct ib_device *device,
|
|||
struct mlx5_ib_dev *ibdev = to_mdev(device);
|
||||
struct net_device *ndev;
|
||||
|
||||
ndev = mlx5_lag_get_roce_netdev(ibdev->mdev);
|
||||
if (ndev)
|
||||
return ndev;
|
||||
|
||||
/* Ensure ndev does not disappear before we invoke dev_hold()
|
||||
*/
|
||||
read_lock(&ibdev->roce.netdev_lock);
|
||||
|
@ -138,7 +170,7 @@ static int mlx5_query_port_roce(struct ib_device *device, u8 port_num,
|
|||
struct ib_port_attr *props)
|
||||
{
|
||||
struct mlx5_ib_dev *dev = to_mdev(device);
|
||||
struct net_device *ndev;
|
||||
struct net_device *ndev, *upper;
|
||||
enum ib_mtu ndev_ib_mtu;
|
||||
u16 qkey_viol_cntr;
|
||||
|
||||
|
@ -162,6 +194,17 @@ static int mlx5_query_port_roce(struct ib_device *device, u8 port_num,
|
|||
if (!ndev)
|
||||
return 0;
|
||||
|
||||
if (mlx5_lag_is_active(dev->mdev)) {
|
||||
rcu_read_lock();
|
||||
upper = netdev_master_upper_dev_get_rcu(ndev);
|
||||
if (upper) {
|
||||
dev_put(ndev);
|
||||
ndev = upper;
|
||||
dev_hold(ndev);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
if (netif_running(ndev) && netif_carrier_ok(ndev)) {
|
||||
props->state = IB_PORT_ACTIVE;
|
||||
props->phys_state = 5;
|
||||
|
@ -429,7 +472,7 @@ static int mlx5_query_node_guid(struct mlx5_ib_dev *dev,
|
|||
}
|
||||
|
||||
struct mlx5_reg_node_desc {
|
||||
u8 desc[64];
|
||||
u8 desc[IB_DEVICE_NODE_DESC_MAX];
|
||||
};
|
||||
|
||||
static int mlx5_query_node_desc(struct mlx5_ib_dev *dev, char *node_desc)
|
||||
|
@ -532,6 +575,26 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
|
|||
resp.response_length += sizeof(resp.tso_caps);
|
||||
}
|
||||
}
|
||||
|
||||
if (field_avail(typeof(resp), rss_caps, uhw->outlen)) {
|
||||
resp.rss_caps.rx_hash_function =
|
||||
MLX5_RX_HASH_FUNC_TOEPLITZ;
|
||||
resp.rss_caps.rx_hash_fields_mask =
|
||||
MLX5_RX_HASH_SRC_IPV4 |
|
||||
MLX5_RX_HASH_DST_IPV4 |
|
||||
MLX5_RX_HASH_SRC_IPV6 |
|
||||
MLX5_RX_HASH_DST_IPV6 |
|
||||
MLX5_RX_HASH_SRC_PORT_TCP |
|
||||
MLX5_RX_HASH_DST_PORT_TCP |
|
||||
MLX5_RX_HASH_SRC_PORT_UDP |
|
||||
MLX5_RX_HASH_DST_PORT_UDP;
|
||||
resp.response_length += sizeof(resp.rss_caps);
|
||||
}
|
||||
} else {
|
||||
if (field_avail(typeof(resp), tso_caps, uhw->outlen))
|
||||
resp.response_length += sizeof(resp.tso_caps);
|
||||
if (field_avail(typeof(resp), rss_caps, uhw->outlen))
|
||||
resp.response_length += sizeof(resp.rss_caps);
|
||||
}
|
||||
|
||||
if (MLX5_CAP_GEN(mdev, ipoib_basic_offloads)) {
|
||||
|
@ -595,6 +658,17 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
|
|||
if (!mlx5_core_is_pf(mdev))
|
||||
props->device_cap_flags |= IB_DEVICE_VIRTUAL_FUNCTION;
|
||||
|
||||
if (mlx5_ib_port_link_layer(ibdev, 1) ==
|
||||
IB_LINK_LAYER_ETHERNET) {
|
||||
props->rss_caps.max_rwq_indirection_tables =
|
||||
1 << MLX5_CAP_GEN(dev->mdev, log_max_rqt);
|
||||
props->rss_caps.max_rwq_indirection_table_size =
|
||||
1 << MLX5_CAP_GEN(dev->mdev, log_max_rqt_size);
|
||||
props->rss_caps.supported_qpts = 1 << IB_QPT_RAW_PACKET;
|
||||
props->max_wq_type_rq =
|
||||
1 << MLX5_CAP_GEN(dev->mdev, log_max_rq);
|
||||
}
|
||||
|
||||
if (uhw->outlen) {
|
||||
err = ib_copy_to_udata(uhw, &resp, resp.response_length);
|
||||
|
||||
|
@ -846,13 +920,13 @@ static int mlx5_ib_modify_device(struct ib_device *ibdev, int mask,
|
|||
* If possible, pass node desc to FW, so it can generate
|
||||
* a 144 trap. If cmd fails, just ignore.
|
||||
*/
|
||||
memcpy(&in, props->node_desc, 64);
|
||||
memcpy(&in, props->node_desc, IB_DEVICE_NODE_DESC_MAX);
|
||||
err = mlx5_core_access_reg(dev->mdev, &in, sizeof(in), &out,
|
||||
sizeof(out), MLX5_REG_NODE_DESC, 0, 1);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
memcpy(ibdev->node_desc, props->node_desc, 64);
|
||||
memcpy(ibdev->node_desc, props->node_desc, IB_DEVICE_NODE_DESC_MAX);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
@ -1395,28 +1469,77 @@ static int mlx5_ib_dealloc_pd(struct ib_pd *pd)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static bool outer_header_zero(u32 *match_criteria)
|
||||
{
|
||||
int size = MLX5_ST_SZ_BYTES(fte_match_param);
|
||||
char *outer_headers_c = MLX5_ADDR_OF(fte_match_param, match_criteria,
|
||||
outer_headers);
|
||||
enum {
|
||||
MATCH_CRITERIA_ENABLE_OUTER_BIT,
|
||||
MATCH_CRITERIA_ENABLE_MISC_BIT,
|
||||
MATCH_CRITERIA_ENABLE_INNER_BIT
|
||||
};
|
||||
|
||||
return outer_headers_c[0] == 0 && !memcmp(outer_headers_c,
|
||||
outer_headers_c + 1,
|
||||
size - 1);
|
||||
#define HEADER_IS_ZERO(match_criteria, headers) \
|
||||
!(memchr_inv(MLX5_ADDR_OF(fte_match_param, match_criteria, headers), \
|
||||
0, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \
|
||||
|
||||
static u8 get_match_criteria_enable(u32 *match_criteria)
|
||||
{
|
||||
u8 match_criteria_enable;
|
||||
|
||||
match_criteria_enable =
|
||||
(!HEADER_IS_ZERO(match_criteria, outer_headers)) <<
|
||||
MATCH_CRITERIA_ENABLE_OUTER_BIT;
|
||||
match_criteria_enable |=
|
||||
(!HEADER_IS_ZERO(match_criteria, misc_parameters)) <<
|
||||
MATCH_CRITERIA_ENABLE_MISC_BIT;
|
||||
match_criteria_enable |=
|
||||
(!HEADER_IS_ZERO(match_criteria, inner_headers)) <<
|
||||
MATCH_CRITERIA_ENABLE_INNER_BIT;
|
||||
|
||||
return match_criteria_enable;
|
||||
}
|
||||
|
||||
static void set_proto(void *outer_c, void *outer_v, u8 mask, u8 val)
|
||||
{
|
||||
MLX5_SET(fte_match_set_lyr_2_4, outer_c, ip_protocol, mask);
|
||||
MLX5_SET(fte_match_set_lyr_2_4, outer_v, ip_protocol, val);
|
||||
}
|
||||
|
||||
static void set_tos(void *outer_c, void *outer_v, u8 mask, u8 val)
|
||||
{
|
||||
MLX5_SET(fte_match_set_lyr_2_4, outer_c, ip_ecn, mask);
|
||||
MLX5_SET(fte_match_set_lyr_2_4, outer_v, ip_ecn, val);
|
||||
MLX5_SET(fte_match_set_lyr_2_4, outer_c, ip_dscp, mask >> 2);
|
||||
MLX5_SET(fte_match_set_lyr_2_4, outer_v, ip_dscp, val >> 2);
|
||||
}
|
||||
|
||||
#define LAST_ETH_FIELD vlan_tag
|
||||
#define LAST_IB_FIELD sl
|
||||
#define LAST_IPV4_FIELD tos
|
||||
#define LAST_IPV6_FIELD traffic_class
|
||||
#define LAST_TCP_UDP_FIELD src_port
|
||||
|
||||
/* Field is the last supported field */
|
||||
#define FIELDS_NOT_SUPPORTED(filter, field)\
|
||||
memchr_inv((void *)&filter.field +\
|
||||
sizeof(filter.field), 0,\
|
||||
sizeof(filter) -\
|
||||
offsetof(typeof(filter), field) -\
|
||||
sizeof(filter.field))
|
||||
|
||||
static int parse_flow_attr(u32 *match_c, u32 *match_v,
|
||||
union ib_flow_spec *ib_spec)
|
||||
const union ib_flow_spec *ib_spec)
|
||||
{
|
||||
void *outer_headers_c = MLX5_ADDR_OF(fte_match_param, match_c,
|
||||
outer_headers);
|
||||
void *outer_headers_v = MLX5_ADDR_OF(fte_match_param, match_v,
|
||||
outer_headers);
|
||||
void *misc_params_c = MLX5_ADDR_OF(fte_match_param, match_c,
|
||||
misc_parameters);
|
||||
void *misc_params_v = MLX5_ADDR_OF(fte_match_param, match_v,
|
||||
misc_parameters);
|
||||
|
||||
switch (ib_spec->type) {
|
||||
case IB_FLOW_SPEC_ETH:
|
||||
if (ib_spec->size != sizeof(ib_spec->eth))
|
||||
return -EINVAL;
|
||||
if (FIELDS_NOT_SUPPORTED(ib_spec->eth.mask, LAST_ETH_FIELD))
|
||||
return -ENOTSUPP;
|
||||
|
||||
ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
|
||||
dmac_47_16),
|
||||
|
@ -1463,8 +1586,8 @@ static int parse_flow_attr(u32 *match_c, u32 *match_v,
|
|||
ethertype, ntohs(ib_spec->eth.val.ether_type));
|
||||
break;
|
||||
case IB_FLOW_SPEC_IPV4:
|
||||
if (ib_spec->size != sizeof(ib_spec->ipv4))
|
||||
return -EINVAL;
|
||||
if (FIELDS_NOT_SUPPORTED(ib_spec->ipv4.mask, LAST_IPV4_FIELD))
|
||||
return -ENOTSUPP;
|
||||
|
||||
MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
|
||||
ethertype, 0xffff);
|
||||
|
@ -1487,10 +1610,16 @@ static int parse_flow_attr(u32 *match_c, u32 *match_v,
|
|||
dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
|
||||
&ib_spec->ipv4.val.dst_ip,
|
||||
sizeof(ib_spec->ipv4.val.dst_ip));
|
||||
|
||||
set_tos(outer_headers_c, outer_headers_v,
|
||||
ib_spec->ipv4.mask.tos, ib_spec->ipv4.val.tos);
|
||||
|
||||
set_proto(outer_headers_c, outer_headers_v,
|
||||
ib_spec->ipv4.mask.proto, ib_spec->ipv4.val.proto);
|
||||
break;
|
||||
case IB_FLOW_SPEC_IPV6:
|
||||
if (ib_spec->size != sizeof(ib_spec->ipv6))
|
||||
return -EINVAL;
|
||||
if (FIELDS_NOT_SUPPORTED(ib_spec->ipv6.mask, LAST_IPV6_FIELD))
|
||||
return -ENOTSUPP;
|
||||
|
||||
MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
|
||||
ethertype, 0xffff);
|
||||
|
@ -1513,10 +1642,26 @@ static int parse_flow_attr(u32 *match_c, u32 *match_v,
|
|||
dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
|
||||
&ib_spec->ipv6.val.dst_ip,
|
||||
sizeof(ib_spec->ipv6.val.dst_ip));
|
||||
|
||||
set_tos(outer_headers_c, outer_headers_v,
|
||||
ib_spec->ipv6.mask.traffic_class,
|
||||
ib_spec->ipv6.val.traffic_class);
|
||||
|
||||
set_proto(outer_headers_c, outer_headers_v,
|
||||
ib_spec->ipv6.mask.next_hdr,
|
||||
ib_spec->ipv6.val.next_hdr);
|
||||
|
||||
MLX5_SET(fte_match_set_misc, misc_params_c,
|
||||
outer_ipv6_flow_label,
|
||||
ntohl(ib_spec->ipv6.mask.flow_label));
|
||||
MLX5_SET(fte_match_set_misc, misc_params_v,
|
||||
outer_ipv6_flow_label,
|
||||
ntohl(ib_spec->ipv6.val.flow_label));
|
||||
break;
|
||||
case IB_FLOW_SPEC_TCP:
|
||||
if (ib_spec->size != sizeof(ib_spec->tcp_udp))
|
||||
return -EINVAL;
|
||||
if (FIELDS_NOT_SUPPORTED(ib_spec->tcp_udp.mask,
|
||||
LAST_TCP_UDP_FIELD))
|
||||
return -ENOTSUPP;
|
||||
|
||||
MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, ip_protocol,
|
||||
0xff);
|
||||
|
@ -1534,8 +1679,9 @@ static int parse_flow_attr(u32 *match_c, u32 *match_v,
|
|||
ntohs(ib_spec->tcp_udp.val.dst_port));
|
||||
break;
|
||||
case IB_FLOW_SPEC_UDP:
|
||||
if (ib_spec->size != sizeof(ib_spec->tcp_udp))
|
||||
return -EINVAL;
|
||||
if (FIELDS_NOT_SUPPORTED(ib_spec->tcp_udp.mask,
|
||||
LAST_TCP_UDP_FIELD))
|
||||
return -ENOTSUPP;
|
||||
|
||||
MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, ip_protocol,
|
||||
0xff);
|
||||
|
@ -1582,7 +1728,7 @@ static bool flow_is_multicast_only(struct ib_flow_attr *ib_attr)
|
|||
is_multicast_ether_addr(eth_spec->val.dst_mac);
|
||||
}
|
||||
|
||||
static bool is_valid_attr(struct ib_flow_attr *flow_attr)
|
||||
static bool is_valid_attr(const struct ib_flow_attr *flow_attr)
|
||||
{
|
||||
union ib_flow_spec *ib_spec = (union ib_flow_spec *)(flow_attr + 1);
|
||||
bool has_ipv4_spec = false;
|
||||
|
@ -1626,12 +1772,13 @@ static int mlx5_ib_destroy_flow(struct ib_flow *flow_id)
|
|||
|
||||
list_for_each_entry_safe(iter, tmp, &handler->list, list) {
|
||||
mlx5_del_flow_rule(iter->rule);
|
||||
put_flow_table(dev, iter->prio, true);
|
||||
list_del(&iter->list);
|
||||
kfree(iter);
|
||||
}
|
||||
|
||||
mlx5_del_flow_rule(handler->rule);
|
||||
put_flow_table(dev, &dev->flow_db.prios[handler->prio], true);
|
||||
put_flow_table(dev, handler->prio, true);
|
||||
mutex_unlock(&dev->flow_db.lock);
|
||||
|
||||
kfree(handler);
|
||||
|
@ -1647,10 +1794,16 @@ static int ib_prio_to_core_prio(unsigned int priority, bool dont_trap)
|
|||
return priority;
|
||||
}
|
||||
|
||||
enum flow_table_type {
|
||||
MLX5_IB_FT_RX,
|
||||
MLX5_IB_FT_TX
|
||||
};
|
||||
|
||||
#define MLX5_FS_MAX_TYPES 10
|
||||
#define MLX5_FS_MAX_ENTRIES 32000UL
|
||||
static struct mlx5_ib_flow_prio *get_flow_table(struct mlx5_ib_dev *dev,
|
||||
struct ib_flow_attr *flow_attr)
|
||||
struct ib_flow_attr *flow_attr,
|
||||
enum flow_table_type ft_type)
|
||||
{
|
||||
bool dont_trap = flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP;
|
||||
struct mlx5_flow_namespace *ns = NULL;
|
||||
|
@ -1681,6 +1834,19 @@ static struct mlx5_ib_flow_prio *get_flow_table(struct mlx5_ib_dev *dev,
|
|||
&num_entries,
|
||||
&num_groups);
|
||||
prio = &dev->flow_db.prios[MLX5_IB_FLOW_LEFTOVERS_PRIO];
|
||||
} else if (flow_attr->type == IB_FLOW_ATTR_SNIFFER) {
|
||||
if (!MLX5_CAP_FLOWTABLE(dev->mdev,
|
||||
allow_sniffer_and_nic_rx_shared_tir))
|
||||
return ERR_PTR(-ENOTSUPP);
|
||||
|
||||
ns = mlx5_get_flow_namespace(dev->mdev, ft_type == MLX5_IB_FT_RX ?
|
||||
MLX5_FLOW_NAMESPACE_SNIFFER_RX :
|
||||
MLX5_FLOW_NAMESPACE_SNIFFER_TX);
|
||||
|
||||
prio = &dev->flow_db.sniffer[ft_type];
|
||||
priority = 0;
|
||||
num_entries = 1;
|
||||
num_groups = 1;
|
||||
}
|
||||
|
||||
if (!ns)
|
||||
|
@ -1706,13 +1872,13 @@ static struct mlx5_ib_flow_prio *get_flow_table(struct mlx5_ib_dev *dev,
|
|||
|
||||
static struct mlx5_ib_flow_handler *create_flow_rule(struct mlx5_ib_dev *dev,
|
||||
struct mlx5_ib_flow_prio *ft_prio,
|
||||
struct ib_flow_attr *flow_attr,
|
||||
const struct ib_flow_attr *flow_attr,
|
||||
struct mlx5_flow_destination *dst)
|
||||
{
|
||||
struct mlx5_flow_table *ft = ft_prio->flow_table;
|
||||
struct mlx5_ib_flow_handler *handler;
|
||||
struct mlx5_flow_spec *spec;
|
||||
void *ib_flow = flow_attr + 1;
|
||||
const void *ib_flow = (const void *)flow_attr + sizeof(*flow_attr);
|
||||
unsigned int spec_index;
|
||||
u32 action;
|
||||
int err = 0;
|
||||
|
@ -1738,9 +1904,7 @@ static struct mlx5_ib_flow_handler *create_flow_rule(struct mlx5_ib_dev *dev,
|
|||
ib_flow += ((union ib_flow_spec *)ib_flow)->size;
|
||||
}
|
||||
|
||||
/* Outer header support only */
|
||||
spec->match_criteria_enable = (!outer_header_zero(spec->match_criteria))
|
||||
<< 0;
|
||||
spec->match_criteria_enable = get_match_criteria_enable(spec->match_criteria);
|
||||
action = dst ? MLX5_FLOW_CONTEXT_ACTION_FWD_DEST :
|
||||
MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO;
|
||||
handler->rule = mlx5_add_flow_rule(ft, spec,
|
||||
|
@ -1753,7 +1917,8 @@ static struct mlx5_ib_flow_handler *create_flow_rule(struct mlx5_ib_dev *dev,
|
|||
goto free;
|
||||
}
|
||||
|
||||
handler->prio = ft_prio - dev->flow_db.prios;
|
||||
ft_prio->refcount++;
|
||||
handler->prio = ft_prio;
|
||||
|
||||
ft_prio->flow_table = ft;
|
||||
free:
|
||||
|
@ -1777,6 +1942,7 @@ static struct mlx5_ib_flow_handler *create_dont_trap_rule(struct mlx5_ib_dev *de
|
|||
flow_attr, dst);
|
||||
if (IS_ERR(handler_dst)) {
|
||||
mlx5_del_flow_rule(handler->rule);
|
||||
ft_prio->refcount--;
|
||||
kfree(handler);
|
||||
handler = handler_dst;
|
||||
} else {
|
||||
|
@ -1838,6 +2004,8 @@ static struct mlx5_ib_flow_handler *create_leftovers_rule(struct mlx5_ib_dev *de
|
|||
&leftovers_specs[LEFTOVERS_UC].flow_attr,
|
||||
dst);
|
||||
if (IS_ERR(handler_ucast)) {
|
||||
mlx5_del_flow_rule(handler->rule);
|
||||
ft_prio->refcount--;
|
||||
kfree(handler);
|
||||
handler = handler_ucast;
|
||||
} else {
|
||||
|
@ -1848,6 +2016,43 @@ static struct mlx5_ib_flow_handler *create_leftovers_rule(struct mlx5_ib_dev *de
|
|||
return handler;
|
||||
}
|
||||
|
||||
static struct mlx5_ib_flow_handler *create_sniffer_rule(struct mlx5_ib_dev *dev,
|
||||
struct mlx5_ib_flow_prio *ft_rx,
|
||||
struct mlx5_ib_flow_prio *ft_tx,
|
||||
struct mlx5_flow_destination *dst)
|
||||
{
|
||||
struct mlx5_ib_flow_handler *handler_rx;
|
||||
struct mlx5_ib_flow_handler *handler_tx;
|
||||
int err;
|
||||
static const struct ib_flow_attr flow_attr = {
|
||||
.num_of_specs = 0,
|
||||
.size = sizeof(flow_attr)
|
||||
};
|
||||
|
||||
handler_rx = create_flow_rule(dev, ft_rx, &flow_attr, dst);
|
||||
if (IS_ERR(handler_rx)) {
|
||||
err = PTR_ERR(handler_rx);
|
||||
goto err;
|
||||
}
|
||||
|
||||
handler_tx = create_flow_rule(dev, ft_tx, &flow_attr, dst);
|
||||
if (IS_ERR(handler_tx)) {
|
||||
err = PTR_ERR(handler_tx);
|
||||
goto err_tx;
|
||||
}
|
||||
|
||||
list_add(&handler_tx->list, &handler_rx->list);
|
||||
|
||||
return handler_rx;
|
||||
|
||||
err_tx:
|
||||
mlx5_del_flow_rule(handler_rx->rule);
|
||||
ft_rx->refcount--;
|
||||
kfree(handler_rx);
|
||||
err:
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp,
|
||||
struct ib_flow_attr *flow_attr,
|
||||
int domain)
|
||||
|
@ -1856,6 +2061,7 @@ static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp,
|
|||
struct mlx5_ib_qp *mqp = to_mqp(qp);
|
||||
struct mlx5_ib_flow_handler *handler = NULL;
|
||||
struct mlx5_flow_destination *dst = NULL;
|
||||
struct mlx5_ib_flow_prio *ft_prio_tx = NULL;
|
||||
struct mlx5_ib_flow_prio *ft_prio;
|
||||
int err;
|
||||
|
||||
|
@ -1873,11 +2079,19 @@ static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp,
|
|||
|
||||
mutex_lock(&dev->flow_db.lock);
|
||||
|
||||
ft_prio = get_flow_table(dev, flow_attr);
|
||||
ft_prio = get_flow_table(dev, flow_attr, MLX5_IB_FT_RX);
|
||||
if (IS_ERR(ft_prio)) {
|
||||
err = PTR_ERR(ft_prio);
|
||||
goto unlock;
|
||||
}
|
||||
if (flow_attr->type == IB_FLOW_ATTR_SNIFFER) {
|
||||
ft_prio_tx = get_flow_table(dev, flow_attr, MLX5_IB_FT_TX);
|
||||
if (IS_ERR(ft_prio_tx)) {
|
||||
err = PTR_ERR(ft_prio_tx);
|
||||
ft_prio_tx = NULL;
|
||||
goto destroy_ft;
|
||||
}
|
||||
}
|
||||
|
||||
dst->type = MLX5_FLOW_DESTINATION_TYPE_TIR;
|
||||
if (mqp->flags & MLX5_IB_QP_RSS)
|
||||
|
@ -1897,6 +2111,8 @@ static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp,
|
|||
flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT) {
|
||||
handler = create_leftovers_rule(dev, ft_prio, flow_attr,
|
||||
dst);
|
||||
} else if (flow_attr->type == IB_FLOW_ATTR_SNIFFER) {
|
||||
handler = create_sniffer_rule(dev, ft_prio, ft_prio_tx, dst);
|
||||
} else {
|
||||
err = -EINVAL;
|
||||
goto destroy_ft;
|
||||
|
@ -1908,7 +2124,6 @@ static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp,
|
|||
goto destroy_ft;
|
||||
}
|
||||
|
||||
ft_prio->refcount++;
|
||||
mutex_unlock(&dev->flow_db.lock);
|
||||
kfree(dst);
|
||||
|
||||
|
@ -1916,6 +2131,8 @@ static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp,
|
|||
|
||||
destroy_ft:
|
||||
put_flow_table(dev, ft_prio, false);
|
||||
if (ft_prio_tx)
|
||||
put_flow_table(dev, ft_prio_tx, false);
|
||||
unlock:
|
||||
mutex_unlock(&dev->flow_db.lock);
|
||||
kfree(dst);
|
||||
|
@ -2105,14 +2322,19 @@ static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context,
|
|||
break;
|
||||
|
||||
case MLX5_DEV_EVENT_PORT_UP:
|
||||
ibev.event = IB_EVENT_PORT_ACTIVE;
|
||||
port = (u8)param;
|
||||
break;
|
||||
|
||||
case MLX5_DEV_EVENT_PORT_DOWN:
|
||||
case MLX5_DEV_EVENT_PORT_INITIALIZED:
|
||||
ibev.event = IB_EVENT_PORT_ERR;
|
||||
port = (u8)param;
|
||||
|
||||
/* In RoCE, port up/down events are handled in
|
||||
* mlx5_netdev_event().
|
||||
*/
|
||||
if (mlx5_ib_port_link_layer(&ibdev->ib_dev, port) ==
|
||||
IB_LINK_LAYER_ETHERNET)
|
||||
return;
|
||||
|
||||
ibev.event = (event == MLX5_DEV_EVENT_PORT_UP) ?
|
||||
IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
|
||||
break;
|
||||
|
||||
case MLX5_DEV_EVENT_LID_CHANGE:
|
||||
|
@ -2235,7 +2457,7 @@ static int create_umr_res(struct mlx5_ib_dev *dev)
|
|||
goto error_0;
|
||||
}
|
||||
|
||||
pd = ib_alloc_pd(&dev->ib_dev);
|
||||
pd = ib_alloc_pd(&dev->ib_dev, 0);
|
||||
if (IS_ERR(pd)) {
|
||||
mlx5_ib_dbg(dev, "Couldn't create PD for sync UMR QP\n");
|
||||
ret = PTR_ERR(pd);
|
||||
|
@ -2517,30 +2739,88 @@ static void get_dev_fw_str(struct ib_device *ibdev, char *str,
|
|||
fw_rev_min(dev->mdev), fw_rev_sub(dev->mdev));
|
||||
}
|
||||
|
||||
static int mlx5_roce_lag_init(struct mlx5_ib_dev *dev)
|
||||
{
|
||||
struct mlx5_core_dev *mdev = dev->mdev;
|
||||
struct mlx5_flow_namespace *ns = mlx5_get_flow_namespace(mdev,
|
||||
MLX5_FLOW_NAMESPACE_LAG);
|
||||
struct mlx5_flow_table *ft;
|
||||
int err;
|
||||
|
||||
if (!ns || !mlx5_lag_is_active(mdev))
|
||||
return 0;
|
||||
|
||||
err = mlx5_cmd_create_vport_lag(mdev);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
ft = mlx5_create_lag_demux_flow_table(ns, 0, 0);
|
||||
if (IS_ERR(ft)) {
|
||||
err = PTR_ERR(ft);
|
||||
goto err_destroy_vport_lag;
|
||||
}
|
||||
|
||||
dev->flow_db.lag_demux_ft = ft;
|
||||
return 0;
|
||||
|
||||
err_destroy_vport_lag:
|
||||
mlx5_cmd_destroy_vport_lag(mdev);
|
||||
return err;
|
||||
}
|
||||
|
||||
static void mlx5_roce_lag_cleanup(struct mlx5_ib_dev *dev)
|
||||
{
|
||||
struct mlx5_core_dev *mdev = dev->mdev;
|
||||
|
||||
if (dev->flow_db.lag_demux_ft) {
|
||||
mlx5_destroy_flow_table(dev->flow_db.lag_demux_ft);
|
||||
dev->flow_db.lag_demux_ft = NULL;
|
||||
|
||||
mlx5_cmd_destroy_vport_lag(mdev);
|
||||
}
|
||||
}
|
||||
|
||||
static void mlx5_remove_roce_notifier(struct mlx5_ib_dev *dev)
|
||||
{
|
||||
if (dev->roce.nb.notifier_call) {
|
||||
unregister_netdevice_notifier(&dev->roce.nb);
|
||||
dev->roce.nb.notifier_call = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static int mlx5_enable_roce(struct mlx5_ib_dev *dev)
|
||||
{
|
||||
int err;
|
||||
|
||||
dev->roce.nb.notifier_call = mlx5_netdev_event;
|
||||
err = register_netdevice_notifier(&dev->roce.nb);
|
||||
if (err)
|
||||
if (err) {
|
||||
dev->roce.nb.notifier_call = NULL;
|
||||
return err;
|
||||
}
|
||||
|
||||
err = mlx5_nic_vport_enable_roce(dev->mdev);
|
||||
if (err)
|
||||
goto err_unregister_netdevice_notifier;
|
||||
|
||||
err = mlx5_roce_lag_init(dev);
|
||||
if (err)
|
||||
goto err_disable_roce;
|
||||
|
||||
return 0;
|
||||
|
||||
err_disable_roce:
|
||||
mlx5_nic_vport_disable_roce(dev->mdev);
|
||||
|
||||
err_unregister_netdevice_notifier:
|
||||
unregister_netdevice_notifier(&dev->roce.nb);
|
||||
mlx5_remove_roce_notifier(dev);
|
||||
return err;
|
||||
}
|
||||
|
||||
static void mlx5_disable_roce(struct mlx5_ib_dev *dev)
|
||||
{
|
||||
mlx5_roce_lag_cleanup(dev);
|
||||
mlx5_nic_vport_disable_roce(dev->mdev);
|
||||
unregister_netdevice_notifier(&dev->roce.nb);
|
||||
}
|
||||
|
||||
static void mlx5_ib_dealloc_q_counters(struct mlx5_ib_dev *dev)
|
||||
|
@ -2655,6 +2935,7 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
|
|||
struct mlx5_ib_dev *dev;
|
||||
enum rdma_link_layer ll;
|
||||
int port_type_cap;
|
||||
const char *name;
|
||||
int err;
|
||||
int i;
|
||||
|
||||
|
@ -2687,7 +2968,12 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
|
|||
|
||||
MLX5_INIT_DOORBELL_LOCK(&dev->uar_lock);
|
||||
|
||||
strlcpy(dev->ib_dev.name, "mlx5_%d", IB_DEVICE_NAME_MAX);
|
||||
if (!mlx5_lag_is_active(mdev))
|
||||
name = "mlx5_%d";
|
||||
else
|
||||
name = "mlx5_bond_%d";
|
||||
|
||||
strlcpy(dev->ib_dev.name, name, IB_DEVICE_NAME_MAX);
|
||||
dev->ib_dev.owner = THIS_MODULE;
|
||||
dev->ib_dev.node_type = RDMA_NODE_IB_CA;
|
||||
dev->ib_dev.local_dma_lkey = 0 /* not supported for now */;
|
||||
|
@ -2889,8 +3175,10 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
|
|||
destroy_dev_resources(&dev->devr);
|
||||
|
||||
err_disable_roce:
|
||||
if (ll == IB_LINK_LAYER_ETHERNET)
|
||||
if (ll == IB_LINK_LAYER_ETHERNET) {
|
||||
mlx5_disable_roce(dev);
|
||||
mlx5_remove_roce_notifier(dev);
|
||||
}
|
||||
|
||||
err_free_port:
|
||||
kfree(dev->port);
|
||||
|
@ -2906,6 +3194,7 @@ static void mlx5_ib_remove(struct mlx5_core_dev *mdev, void *context)
|
|||
struct mlx5_ib_dev *dev = context;
|
||||
enum rdma_link_layer ll = mlx5_ib_port_link_layer(&dev->ib_dev, 1);
|
||||
|
||||
mlx5_remove_roce_notifier(dev);
|
||||
ib_unregister_device(&dev->ib_dev);
|
||||
mlx5_ib_dealloc_q_counters(dev);
|
||||
destroy_umrc_res(dev);
|
||||
|
|
|
@ -44,6 +44,7 @@
|
|||
#include <linux/types.h>
|
||||
#include <linux/mlx5/transobj.h>
|
||||
#include <rdma/ib_user_verbs.h>
|
||||
#include <rdma/mlx5-abi.h>
|
||||
|
||||
#define mlx5_ib_dbg(dev, format, arg...) \
|
||||
pr_debug("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__, \
|
||||
|
@ -142,6 +143,7 @@ struct mlx5_ib_pd {
|
|||
#define MLX5_IB_FLOW_LEFTOVERS_PRIO (MLX5_IB_FLOW_MCAST_PRIO + 1)
|
||||
|
||||
#define MLX5_IB_NUM_FLOW_FT (MLX5_IB_FLOW_LEFTOVERS_PRIO + 1)
|
||||
#define MLX5_IB_NUM_SNIFFER_FTS 2
|
||||
struct mlx5_ib_flow_prio {
|
||||
struct mlx5_flow_table *flow_table;
|
||||
unsigned int refcount;
|
||||
|
@ -150,12 +152,14 @@ struct mlx5_ib_flow_prio {
|
|||
struct mlx5_ib_flow_handler {
|
||||
struct list_head list;
|
||||
struct ib_flow ibflow;
|
||||
unsigned int prio;
|
||||
struct mlx5_ib_flow_prio *prio;
|
||||
struct mlx5_flow_rule *rule;
|
||||
};
|
||||
|
||||
struct mlx5_ib_flow_db {
|
||||
struct mlx5_ib_flow_prio prios[MLX5_IB_NUM_FLOW_FT];
|
||||
struct mlx5_ib_flow_prio sniffer[MLX5_IB_NUM_SNIFFER_FTS];
|
||||
struct mlx5_flow_table *lag_demux_ft;
|
||||
/* Protect flow steering bypass flow tables
|
||||
* when add/del flow rules.
|
||||
* only single add/removal of flow steering rule could be done
|
||||
|
@ -225,7 +229,7 @@ struct mlx5_ib_wq {
|
|||
|
||||
struct mlx5_ib_rwq {
|
||||
struct ib_wq ibwq;
|
||||
u32 rqn;
|
||||
struct mlx5_core_qp core_qp;
|
||||
u32 rq_num_pas;
|
||||
u32 log_rq_stride;
|
||||
u32 log_rq_size;
|
||||
|
@ -603,6 +607,7 @@ struct mlx5_roce {
|
|||
rwlock_t netdev_lock;
|
||||
struct net_device *netdev;
|
||||
struct notifier_block nb;
|
||||
atomic_t next_port;
|
||||
};
|
||||
|
||||
struct mlx5_ib_dev {
|
||||
|
@ -663,6 +668,11 @@ static inline struct mlx5_ib_qp *to_mibqp(struct mlx5_core_qp *mqp)
|
|||
return container_of(mqp, struct mlx5_ib_qp_base, mqp)->container_mibqp;
|
||||
}
|
||||
|
||||
static inline struct mlx5_ib_rwq *to_mibrwq(struct mlx5_core_qp *core_qp)
|
||||
{
|
||||
return container_of(core_qp, struct mlx5_ib_rwq, core_qp);
|
||||
}
|
||||
|
||||
static inline struct mlx5_ib_mr *to_mibmr(struct mlx5_core_mkey *mmkey)
|
||||
{
|
||||
return container_of(mmkey, struct mlx5_ib_mr, mmkey);
|
||||
|
@ -947,4 +957,40 @@ static inline int verify_assign_uidx(u8 cqe_version, u32 cmd_uidx,
|
|||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int get_qp_user_index(struct mlx5_ib_ucontext *ucontext,
|
||||
struct mlx5_ib_create_qp *ucmd,
|
||||
int inlen,
|
||||
u32 *user_index)
|
||||
{
|
||||
u8 cqe_version = ucontext->cqe_version;
|
||||
|
||||
if (field_avail(struct mlx5_ib_create_qp, uidx, inlen) &&
|
||||
!cqe_version && (ucmd->uidx == MLX5_IB_DEFAULT_UIDX))
|
||||
return 0;
|
||||
|
||||
if (!!(field_avail(struct mlx5_ib_create_qp, uidx, inlen) !=
|
||||
!!cqe_version))
|
||||
return -EINVAL;
|
||||
|
||||
return verify_assign_uidx(cqe_version, ucmd->uidx, user_index);
|
||||
}
|
||||
|
||||
static inline int get_srq_user_index(struct mlx5_ib_ucontext *ucontext,
|
||||
struct mlx5_ib_create_srq *ucmd,
|
||||
int inlen,
|
||||
u32 *user_index)
|
||||
{
|
||||
u8 cqe_version = ucontext->cqe_version;
|
||||
|
||||
if (field_avail(struct mlx5_ib_create_srq, uidx, inlen) &&
|
||||
!cqe_version && (ucmd->uidx == MLX5_IB_DEFAULT_UIDX))
|
||||
return 0;
|
||||
|
||||
if (!!(field_avail(struct mlx5_ib_create_srq, uidx, inlen) !=
|
||||
!!cqe_version))
|
||||
return -EINVAL;
|
||||
|
||||
return verify_assign_uidx(cqe_version, ucmd->uidx, user_index);
|
||||
}
|
||||
#endif /* MLX5_IB_H */
|
||||
|
|
|
@ -40,7 +40,6 @@
|
|||
#include <rdma/ib_umem_odp.h>
|
||||
#include <rdma/ib_verbs.h>
|
||||
#include "mlx5_ib.h"
|
||||
#include "user.h"
|
||||
|
||||
enum {
|
||||
MAX_PENDING_REG_MR = 8,
|
||||
|
@ -611,7 +610,7 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
|
|||
int err;
|
||||
int i;
|
||||
|
||||
cache->wq = create_singlethread_workqueue("mkey_cache");
|
||||
cache->wq = alloc_ordered_workqueue("mkey_cache", WQ_MEM_RECLAIM);
|
||||
if (!cache->wq) {
|
||||
mlx5_ib_warn(dev, "failed to create work queue\n");
|
||||
return -ENOMEM;
|
||||
|
|
|
@ -782,8 +782,8 @@ void mlx5_ib_odp_remove_one(struct mlx5_ib_dev *ibdev)
|
|||
|
||||
int __init mlx5_ib_odp_init(void)
|
||||
{
|
||||
mlx5_ib_page_fault_wq =
|
||||
create_singlethread_workqueue("mlx5_ib_page_faults");
|
||||
mlx5_ib_page_fault_wq = alloc_ordered_workqueue("mlx5_ib_page_faults",
|
||||
WQ_MEM_RECLAIM);
|
||||
if (!mlx5_ib_page_fault_wq)
|
||||
return -ENOMEM;
|
||||
|
||||
|
|
|
@ -35,7 +35,6 @@
|
|||
#include <rdma/ib_cache.h>
|
||||
#include <rdma/ib_user_verbs.h>
|
||||
#include "mlx5_ib.h"
|
||||
#include "user.h"
|
||||
|
||||
/* not supported currently */
|
||||
static int wq_signature;
|
||||
|
@ -77,6 +76,17 @@ struct mlx5_wqe_eth_pad {
|
|||
u8 rsvd0[16];
|
||||
};
|
||||
|
||||
enum raw_qp_set_mask_map {
|
||||
MLX5_RAW_QP_MOD_SET_RQ_Q_CTR_ID = 1UL << 0,
|
||||
};
|
||||
|
||||
struct mlx5_modify_raw_qp_param {
|
||||
u16 operation;
|
||||
|
||||
u32 set_mask; /* raw_qp_set_mask_map */
|
||||
u8 rq_q_ctr_id;
|
||||
};
|
||||
|
||||
static void get_cqs(enum ib_qp_type qp_type,
|
||||
struct ib_cq *ib_send_cq, struct ib_cq *ib_recv_cq,
|
||||
struct mlx5_ib_cq **send_cq, struct mlx5_ib_cq **recv_cq);
|
||||
|
@ -1863,7 +1873,8 @@ static void get_cqs(enum ib_qp_type qp_type,
|
|||
}
|
||||
|
||||
static int modify_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
|
||||
u16 operation);
|
||||
const struct mlx5_modify_raw_qp_param *raw_qp_param,
|
||||
u8 lag_tx_affinity);
|
||||
|
||||
static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
|
||||
{
|
||||
|
@ -1888,8 +1899,11 @@ static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
|
|||
MLX5_CMD_OP_2RST_QP, 0,
|
||||
NULL, &base->mqp);
|
||||
} else {
|
||||
err = modify_raw_packet_qp(dev, qp,
|
||||
MLX5_CMD_OP_2RST_QP);
|
||||
struct mlx5_modify_raw_qp_param raw_qp_param = {
|
||||
.operation = MLX5_CMD_OP_2RST_QP
|
||||
};
|
||||
|
||||
err = modify_raw_packet_qp(dev, qp, &raw_qp_param, 0);
|
||||
}
|
||||
if (err)
|
||||
mlx5_ib_warn(dev, "mlx5_ib: modify QP 0x%06x to RESET failed\n",
|
||||
|
@ -2153,6 +2167,31 @@ static int modify_raw_packet_eth_prio(struct mlx5_core_dev *dev,
|
|||
return err;
|
||||
}
|
||||
|
||||
static int modify_raw_packet_tx_affinity(struct mlx5_core_dev *dev,
|
||||
struct mlx5_ib_sq *sq, u8 tx_affinity)
|
||||
{
|
||||
void *in;
|
||||
void *tisc;
|
||||
int inlen;
|
||||
int err;
|
||||
|
||||
inlen = MLX5_ST_SZ_BYTES(modify_tis_in);
|
||||
in = mlx5_vzalloc(inlen);
|
||||
if (!in)
|
||||
return -ENOMEM;
|
||||
|
||||
MLX5_SET(modify_tis_in, in, bitmask.lag_tx_port_affinity, 1);
|
||||
|
||||
tisc = MLX5_ADDR_OF(modify_tis_in, in, ctx);
|
||||
MLX5_SET(tisc, tisc, lag_tx_port_affinity, tx_affinity);
|
||||
|
||||
err = mlx5_core_modify_tis(dev, sq->tisn, in, inlen);
|
||||
|
||||
kvfree(in);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int mlx5_set_path(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
|
||||
const struct ib_ah_attr *ah,
|
||||
struct mlx5_qp_path *path, u8 port, int attr_mask,
|
||||
|
@ -2363,8 +2402,9 @@ static int ib_mask_to_mlx5_opt(int ib_mask)
|
|||
return result;
|
||||
}
|
||||
|
||||
static int modify_raw_packet_qp_rq(struct mlx5_core_dev *dev,
|
||||
struct mlx5_ib_rq *rq, int new_state)
|
||||
static int modify_raw_packet_qp_rq(struct mlx5_ib_dev *dev,
|
||||
struct mlx5_ib_rq *rq, int new_state,
|
||||
const struct mlx5_modify_raw_qp_param *raw_qp_param)
|
||||
{
|
||||
void *in;
|
||||
void *rqc;
|
||||
|
@ -2381,7 +2421,17 @@ static int modify_raw_packet_qp_rq(struct mlx5_core_dev *dev,
|
|||
rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
|
||||
MLX5_SET(rqc, rqc, state, new_state);
|
||||
|
||||
err = mlx5_core_modify_rq(dev, rq->base.mqp.qpn, in, inlen);
|
||||
if (raw_qp_param->set_mask & MLX5_RAW_QP_MOD_SET_RQ_Q_CTR_ID) {
|
||||
if (MLX5_CAP_GEN(dev->mdev, modify_rq_counter_set_id)) {
|
||||
MLX5_SET64(modify_rq_in, in, modify_bitmask,
|
||||
MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_MODIFY_RQ_COUNTER_SET_ID);
|
||||
MLX5_SET(rqc, rqc, counter_set_id, raw_qp_param->rq_q_ctr_id);
|
||||
} else
|
||||
pr_info_once("%s: RAW PACKET QP counters are not supported on current FW\n",
|
||||
dev->ib_dev.name);
|
||||
}
|
||||
|
||||
err = mlx5_core_modify_rq(dev->mdev, rq->base.mqp.qpn, in, inlen);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
|
@ -2422,7 +2472,8 @@ static int modify_raw_packet_qp_sq(struct mlx5_core_dev *dev,
|
|||
}
|
||||
|
||||
static int modify_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
|
||||
u16 operation)
|
||||
const struct mlx5_modify_raw_qp_param *raw_qp_param,
|
||||
u8 tx_affinity)
|
||||
{
|
||||
struct mlx5_ib_raw_packet_qp *raw_packet_qp = &qp->raw_packet_qp;
|
||||
struct mlx5_ib_rq *rq = &raw_packet_qp->rq;
|
||||
|
@ -2431,7 +2482,7 @@ static int modify_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
|
|||
int sq_state;
|
||||
int err;
|
||||
|
||||
switch (operation) {
|
||||
switch (raw_qp_param->operation) {
|
||||
case MLX5_CMD_OP_RST2INIT_QP:
|
||||
rq_state = MLX5_RQC_STATE_RDY;
|
||||
sq_state = MLX5_SQC_STATE_RDY;
|
||||
|
@ -2448,21 +2499,31 @@ static int modify_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
|
|||
case MLX5_CMD_OP_INIT2RTR_QP:
|
||||
case MLX5_CMD_OP_RTR2RTS_QP:
|
||||
case MLX5_CMD_OP_RTS2RTS_QP:
|
||||
/* Nothing to do here... */
|
||||
return 0;
|
||||
if (raw_qp_param->set_mask)
|
||||
return -EINVAL;
|
||||
else
|
||||
return 0;
|
||||
default:
|
||||
WARN_ON(1);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (qp->rq.wqe_cnt) {
|
||||
err = modify_raw_packet_qp_rq(dev->mdev, rq, rq_state);
|
||||
err = modify_raw_packet_qp_rq(dev, rq, rq_state, raw_qp_param);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
||||
if (qp->sq.wqe_cnt)
|
||||
if (qp->sq.wqe_cnt) {
|
||||
if (tx_affinity) {
|
||||
err = modify_raw_packet_tx_affinity(dev->mdev, sq,
|
||||
tx_affinity);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
||||
return modify_raw_packet_qp_sq(dev->mdev, sq, sq_state);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -2514,12 +2575,14 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
|
|||
struct mlx5_ib_cq *send_cq, *recv_cq;
|
||||
struct mlx5_qp_context *context;
|
||||
struct mlx5_ib_pd *pd;
|
||||
struct mlx5_ib_port *mibport = NULL;
|
||||
enum mlx5_qp_state mlx5_cur, mlx5_new;
|
||||
enum mlx5_qp_optpar optpar;
|
||||
int sqd_event;
|
||||
int mlx5_st;
|
||||
int err;
|
||||
u16 op;
|
||||
u8 tx_affinity = 0;
|
||||
|
||||
context = kzalloc(sizeof(*context), GFP_KERNEL);
|
||||
if (!context)
|
||||
|
@ -2549,6 +2612,23 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
|
|||
}
|
||||
}
|
||||
|
||||
if ((cur_state == IB_QPS_RESET) && (new_state == IB_QPS_INIT)) {
|
||||
if ((ibqp->qp_type == IB_QPT_RC) ||
|
||||
(ibqp->qp_type == IB_QPT_UD &&
|
||||
!(qp->flags & MLX5_IB_QP_SQPN_QP1)) ||
|
||||
(ibqp->qp_type == IB_QPT_UC) ||
|
||||
(ibqp->qp_type == IB_QPT_RAW_PACKET) ||
|
||||
(ibqp->qp_type == IB_QPT_XRC_INI) ||
|
||||
(ibqp->qp_type == IB_QPT_XRC_TGT)) {
|
||||
if (mlx5_lag_is_active(dev->mdev)) {
|
||||
tx_affinity = (unsigned int)atomic_add_return(1,
|
||||
&dev->roce.next_port) %
|
||||
MLX5_MAX_PORTS + 1;
|
||||
context->flags |= cpu_to_be32(tx_affinity << 24);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (is_sqp(ibqp->qp_type)) {
|
||||
context->mtu_msgmax = (IB_MTU_256 << 5) | 8;
|
||||
} else if (ibqp->qp_type == IB_QPT_UD ||
|
||||
|
@ -2654,8 +2734,7 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
|
|||
if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
|
||||
u8 port_num = (attr_mask & IB_QP_PORT ? attr->port_num :
|
||||
qp->port) - 1;
|
||||
struct mlx5_ib_port *mibport = &dev->port[port_num];
|
||||
|
||||
mibport = &dev->port[port_num];
|
||||
context->qp_counter_set_usr_page |=
|
||||
cpu_to_be32((u32)(mibport->q_cnt_id) << 24);
|
||||
}
|
||||
|
@ -2690,11 +2769,20 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
|
|||
optpar = ib_mask_to_mlx5_opt(attr_mask);
|
||||
optpar &= opt_mask[mlx5_cur][mlx5_new][mlx5_st];
|
||||
|
||||
if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET)
|
||||
err = modify_raw_packet_qp(dev, qp, op);
|
||||
else
|
||||
if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET) {
|
||||
struct mlx5_modify_raw_qp_param raw_qp_param = {};
|
||||
|
||||
raw_qp_param.operation = op;
|
||||
if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
|
||||
raw_qp_param.rq_q_ctr_id = mibport->q_cnt_id;
|
||||
raw_qp_param.set_mask |= MLX5_RAW_QP_MOD_SET_RQ_Q_CTR_ID;
|
||||
}
|
||||
err = modify_raw_packet_qp(dev, qp, &raw_qp_param, tx_affinity);
|
||||
} else {
|
||||
err = mlx5_core_qp_modify(dev->mdev, op, optpar, context,
|
||||
&base->mqp);
|
||||
}
|
||||
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
|
@ -4497,6 +4585,28 @@ int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void mlx5_ib_wq_event(struct mlx5_core_qp *core_qp, int type)
|
||||
{
|
||||
struct mlx5_ib_rwq *rwq = to_mibrwq(core_qp);
|
||||
struct mlx5_ib_dev *dev = to_mdev(rwq->ibwq.device);
|
||||
struct ib_event event;
|
||||
|
||||
if (rwq->ibwq.event_handler) {
|
||||
event.device = rwq->ibwq.device;
|
||||
event.element.wq = &rwq->ibwq;
|
||||
switch (type) {
|
||||
case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
|
||||
event.event = IB_EVENT_WQ_FATAL;
|
||||
break;
|
||||
default:
|
||||
mlx5_ib_warn(dev, "Unexpected event type %d on WQ %06x\n", type, core_qp->qpn);
|
||||
return;
|
||||
}
|
||||
|
||||
rwq->ibwq.event_handler(&event, rwq->ibwq.wq_context);
|
||||
}
|
||||
}
|
||||
|
||||
static int create_rq(struct mlx5_ib_rwq *rwq, struct ib_pd *pd,
|
||||
struct ib_wq_init_attr *init_attr)
|
||||
{
|
||||
|
@ -4534,7 +4644,7 @@ static int create_rq(struct mlx5_ib_rwq *rwq, struct ib_pd *pd,
|
|||
MLX5_SET64(wq, wq, dbr_addr, rwq->db.dma);
|
||||
rq_pas0 = (__be64 *)MLX5_ADDR_OF(wq, wq, pas);
|
||||
mlx5_ib_populate_pas(dev, rwq->umem, rwq->page_shift, rq_pas0, 0);
|
||||
err = mlx5_core_create_rq(dev->mdev, in, inlen, &rwq->rqn);
|
||||
err = mlx5_core_create_rq_tracked(dev->mdev, in, inlen, &rwq->core_qp);
|
||||
kvfree(in);
|
||||
return err;
|
||||
}
|
||||
|
@ -4650,7 +4760,7 @@ struct ib_wq *mlx5_ib_create_wq(struct ib_pd *pd,
|
|||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
rwq->ibwq.wq_num = rwq->rqn;
|
||||
rwq->ibwq.wq_num = rwq->core_qp.qpn;
|
||||
rwq->ibwq.state = IB_WQS_RESET;
|
||||
if (udata->outlen) {
|
||||
resp.response_length = offsetof(typeof(resp), response_length) +
|
||||
|
@ -4660,10 +4770,12 @@ struct ib_wq *mlx5_ib_create_wq(struct ib_pd *pd,
|
|||
goto err_copy;
|
||||
}
|
||||
|
||||
rwq->core_qp.event = mlx5_ib_wq_event;
|
||||
rwq->ibwq.event_handler = init_attr->event_handler;
|
||||
return &rwq->ibwq;
|
||||
|
||||
err_copy:
|
||||
mlx5_core_destroy_rq(dev->mdev, rwq->rqn);
|
||||
mlx5_core_destroy_rq_tracked(dev->mdev, &rwq->core_qp);
|
||||
err_user_rq:
|
||||
destroy_user_rq(pd, rwq);
|
||||
err:
|
||||
|
@ -4676,7 +4788,7 @@ int mlx5_ib_destroy_wq(struct ib_wq *wq)
|
|||
struct mlx5_ib_dev *dev = to_mdev(wq->device);
|
||||
struct mlx5_ib_rwq *rwq = to_mrwq(wq);
|
||||
|
||||
mlx5_core_destroy_rq(dev->mdev, rwq->rqn);
|
||||
mlx5_core_destroy_rq_tracked(dev->mdev, &rwq->core_qp);
|
||||
destroy_user_rq(wq->pd, rwq);
|
||||
kfree(rwq);
|
||||
|
||||
|
@ -4808,7 +4920,7 @@ int mlx5_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr,
|
|||
MLX5_SET(modify_rq_in, in, rq_state, curr_wq_state);
|
||||
MLX5_SET(rqc, rqc, state, wq_state);
|
||||
|
||||
err = mlx5_core_modify_rq(dev->mdev, rwq->rqn, in, inlen);
|
||||
err = mlx5_core_modify_rq(dev->mdev, rwq->core_qp.qpn, in, inlen);
|
||||
kvfree(in);
|
||||
if (!err)
|
||||
rwq->ibwq.state = (wq_state == MLX5_RQC_STATE_ERR) ? IB_WQS_ERR : wq_state;
|
||||
|
|
|
@ -38,7 +38,6 @@
|
|||
#include <rdma/ib_user_verbs.h>
|
||||
|
||||
#include "mlx5_ib.h"
|
||||
#include "user.h"
|
||||
|
||||
/* not supported currently */
|
||||
static int srq_signature;
|
||||
|
|
|
@ -187,7 +187,7 @@ int __init mthca_catas_init(void)
|
|||
{
|
||||
INIT_WORK(&catas_work, catas_reset);
|
||||
|
||||
catas_wq = create_singlethread_workqueue("mthca_catas");
|
||||
catas_wq = alloc_ordered_workqueue("mthca_catas", WQ_MEM_RECLAIM);
|
||||
if (!catas_wq)
|
||||
return -ENOMEM;
|
||||
|
||||
|
|
|
@ -153,7 +153,8 @@ static void node_desc_override(struct ib_device *dev,
|
|||
mad->mad_hdr.method == IB_MGMT_METHOD_GET_RESP &&
|
||||
mad->mad_hdr.attr_id == IB_SMP_ATTR_NODE_DESC) {
|
||||
mutex_lock(&to_mdev(dev)->cap_mask_mutex);
|
||||
memcpy(((struct ib_smp *) mad)->data, dev->node_desc, 64);
|
||||
memcpy(((struct ib_smp *) mad)->data, dev->node_desc,
|
||||
IB_DEVICE_NODE_DESC_MAX);
|
||||
mutex_unlock(&to_mdev(dev)->cap_mask_mutex);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -46,7 +46,7 @@
|
|||
|
||||
#include "mthca_dev.h"
|
||||
#include "mthca_cmd.h"
|
||||
#include "mthca_user.h"
|
||||
#include <rdma/mthca-abi.h>
|
||||
#include "mthca_memfree.h"
|
||||
|
||||
static void init_query_mad(struct ib_smp *mad)
|
||||
|
@ -193,7 +193,8 @@ static int mthca_modify_device(struct ib_device *ibdev,
|
|||
if (mask & IB_DEVICE_MODIFY_NODE_DESC) {
|
||||
if (mutex_lock_interruptible(&to_mdev(ibdev)->cap_mask_mutex))
|
||||
return -ERESTARTSYS;
|
||||
memcpy(ibdev->node_desc, props->node_desc, 64);
|
||||
memcpy(ibdev->node_desc, props->node_desc,
|
||||
IB_DEVICE_NODE_DESC_MAX);
|
||||
mutex_unlock(&to_mdev(ibdev)->cap_mask_mutex);
|
||||
}
|
||||
|
||||
|
@ -1138,7 +1139,7 @@ static int mthca_init_node_data(struct mthca_dev *dev)
|
|||
if (err)
|
||||
goto out;
|
||||
|
||||
memcpy(dev->ib_dev.node_desc, out_mad->data, 64);
|
||||
memcpy(dev->ib_dev.node_desc, out_mad->data, IB_DEVICE_NODE_DESC_MAX);
|
||||
|
||||
in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
|
||||
|
||||
|
|
|
@ -165,7 +165,7 @@ do { \
|
|||
#include "nes_hw.h"
|
||||
#include "nes_verbs.h"
|
||||
#include "nes_context.h"
|
||||
#include "nes_user.h"
|
||||
#include <rdma/nes-abi.h>
|
||||
#include "nes_cm.h"
|
||||
#include "nes_mgt.h"
|
||||
|
||||
|
|
|
@ -2692,12 +2692,12 @@ static struct nes_cm_core *nes_cm_alloc_core(void)
|
|||
nes_debug(NES_DBG_CM, "Init CM Core completed -- cm_core=%p\n", cm_core);
|
||||
|
||||
nes_debug(NES_DBG_CM, "Enable QUEUE EVENTS\n");
|
||||
cm_core->event_wq = create_singlethread_workqueue("nesewq");
|
||||
cm_core->event_wq = alloc_ordered_workqueue("nesewq", 0);
|
||||
if (!cm_core->event_wq)
|
||||
goto out_free_cmcore;
|
||||
cm_core->post_event = nes_cm_post_event;
|
||||
nes_debug(NES_DBG_CM, "Enable QUEUE DISCONNECTS\n");
|
||||
cm_core->disconn_wq = create_singlethread_workqueue("nesdwq");
|
||||
cm_core->disconn_wq = alloc_ordered_workqueue("nesdwq", 0);
|
||||
if (!cm_core->disconn_wq)
|
||||
goto out_free_wq;
|
||||
|
||||
|
|
|
@ -56,7 +56,7 @@
|
|||
#include "be_roce.h"
|
||||
#include "ocrdma_hw.h"
|
||||
#include "ocrdma_stats.h"
|
||||
#include "ocrdma_abi.h"
|
||||
#include <rdma/ocrdma-abi.h>
|
||||
|
||||
MODULE_VERSION(OCRDMA_ROCE_DRV_VERSION);
|
||||
MODULE_DESCRIPTION(OCRDMA_ROCE_DRV_DESC " " OCRDMA_ROCE_DRV_VERSION);
|
||||
|
@ -119,6 +119,7 @@ static int ocrdma_register_device(struct ocrdma_dev *dev)
|
|||
{
|
||||
strlcpy(dev->ibdev.name, "ocrdma%d", IB_DEVICE_NAME_MAX);
|
||||
ocrdma_get_guid(dev, (u8 *)&dev->ibdev.node_guid);
|
||||
BUILD_BUG_ON(sizeof(OCRDMA_NODE_DESC) > IB_DEVICE_NODE_DESC_MAX);
|
||||
memcpy(dev->ibdev.node_desc, OCRDMA_NODE_DESC,
|
||||
sizeof(OCRDMA_NODE_DESC));
|
||||
dev->ibdev.owner = THIS_MODULE;
|
||||
|
|
|
@ -51,7 +51,7 @@
|
|||
#include "ocrdma.h"
|
||||
#include "ocrdma_hw.h"
|
||||
#include "ocrdma_verbs.h"
|
||||
#include "ocrdma_abi.h"
|
||||
#include <rdma/ocrdma-abi.h>
|
||||
|
||||
int ocrdma_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
|
||||
{
|
||||
|
|
|
@ -614,8 +614,8 @@ static int qib_create_workqueues(struct qib_devdata *dd)
|
|||
|
||||
snprintf(wq_name, sizeof(wq_name), "qib%d_%d",
|
||||
dd->unit, pidx);
|
||||
ppd->qib_wq =
|
||||
create_singlethread_workqueue(wq_name);
|
||||
ppd->qib_wq = alloc_ordered_workqueue(wq_name,
|
||||
WQ_MEM_RECLAIM);
|
||||
if (!ppd->qib_wq)
|
||||
goto wq_error;
|
||||
}
|
||||
|
|
|
@ -1370,7 +1370,8 @@ static int qib_modify_device(struct ib_device *device,
|
|||
}
|
||||
|
||||
if (device_modify_mask & IB_DEVICE_MODIFY_NODE_DESC) {
|
||||
memcpy(device->node_desc, device_modify->node_desc, 64);
|
||||
memcpy(device->node_desc, device_modify->node_desc,
|
||||
IB_DEVICE_NODE_DESC_MAX);
|
||||
for (i = 0; i < dd->num_pports; i++) {
|
||||
struct qib_ibport *ibp = &dd->pport[i].ibport_data;
|
||||
|
||||
|
|
|
@ -772,7 +772,13 @@ static inline void ipoib_unregister_debugfs(void) { }
|
|||
#define ipoib_printk(level, priv, format, arg...) \
|
||||
printk(level "%s: " format, ((struct ipoib_dev_priv *) priv)->dev->name , ## arg)
|
||||
#define ipoib_warn(priv, format, arg...) \
|
||||
ipoib_printk(KERN_WARNING, priv, format , ## arg)
|
||||
do { \
|
||||
static DEFINE_RATELIMIT_STATE(_rs, \
|
||||
10 * HZ /*10 seconds */, \
|
||||
100); \
|
||||
if (__ratelimit(&_rs)) \
|
||||
ipoib_printk(KERN_WARNING, priv, format , ## arg);\
|
||||
} while (0)
|
||||
|
||||
extern int ipoib_sendq_size;
|
||||
extern int ipoib_recvq_size;
|
||||
|
|
|
@ -2196,7 +2196,8 @@ static int __init ipoib_init_module(void)
|
|||
* its private workqueue, and we only queue up flush events
|
||||
* on our global flush workqueue. This avoids the deadlocks.
|
||||
*/
|
||||
ipoib_workqueue = create_singlethread_workqueue("ipoib_flush");
|
||||
ipoib_workqueue = alloc_ordered_workqueue("ipoib_flush",
|
||||
WQ_MEM_RECLAIM);
|
||||
if (!ipoib_workqueue) {
|
||||
ret = -ENOMEM;
|
||||
goto err_fs;
|
||||
|
|
|
@ -147,7 +147,7 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
|
|||
int ret, size;
|
||||
int i;
|
||||
|
||||
priv->pd = ib_alloc_pd(priv->ca);
|
||||
priv->pd = ib_alloc_pd(priv->ca, 0);
|
||||
if (IS_ERR(priv->pd)) {
|
||||
printk(KERN_WARNING "%s: failed to allocate PD\n", ca->name);
|
||||
return -ENODEV;
|
||||
|
@ -157,7 +157,7 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
|
|||
* the various IPoIB tasks assume they will never race against
|
||||
* themselves, so always use a single thread workqueue
|
||||
*/
|
||||
priv->wq = create_singlethread_workqueue("ipoib_wq");
|
||||
priv->wq = alloc_ordered_workqueue("ipoib_wq", WQ_MEM_RECLAIM);
|
||||
if (!priv->wq) {
|
||||
printk(KERN_WARNING "ipoib: failed to allocate device WQ\n");
|
||||
goto out_free_pd;
|
||||
|
|
|
@ -374,7 +374,6 @@ struct iser_reg_ops {
|
|||
struct iser_device {
|
||||
struct ib_device *ib_device;
|
||||
struct ib_pd *pd;
|
||||
struct ib_mr *mr;
|
||||
struct ib_event_handler event_handler;
|
||||
struct list_head ig_list;
|
||||
int refcount;
|
||||
|
|
|
@ -199,7 +199,11 @@ iser_reg_dma(struct iser_device *device, struct iser_data_buf *mem,
|
|||
* FIXME: rework the registration code path to differentiate
|
||||
* rkey/lkey use cases
|
||||
*/
|
||||
reg->rkey = device->mr ? device->mr->rkey : 0;
|
||||
|
||||
if (device->pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY)
|
||||
reg->rkey = device->pd->unsafe_global_rkey;
|
||||
else
|
||||
reg->rkey = 0;
|
||||
reg->sge.addr = ib_sg_dma_address(device->ib_device, &sg[0]);
|
||||
reg->sge.length = ib_sg_dma_len(device->ib_device, &sg[0]);
|
||||
|
||||
|
|
|
@ -88,7 +88,8 @@ static int iser_create_device_ib_res(struct iser_device *device)
|
|||
device->comps_used, ib_dev->name,
|
||||
ib_dev->num_comp_vectors, max_cqe);
|
||||
|
||||
device->pd = ib_alloc_pd(ib_dev);
|
||||
device->pd = ib_alloc_pd(ib_dev,
|
||||
iser_always_reg ? 0 : IB_PD_UNSAFE_GLOBAL_RKEY);
|
||||
if (IS_ERR(device->pd))
|
||||
goto pd_err;
|
||||
|
||||
|
@ -103,26 +104,13 @@ static int iser_create_device_ib_res(struct iser_device *device)
|
|||
}
|
||||
}
|
||||
|
||||
if (!iser_always_reg) {
|
||||
int access = IB_ACCESS_LOCAL_WRITE |
|
||||
IB_ACCESS_REMOTE_WRITE |
|
||||
IB_ACCESS_REMOTE_READ;
|
||||
|
||||
device->mr = ib_get_dma_mr(device->pd, access);
|
||||
if (IS_ERR(device->mr))
|
||||
goto cq_err;
|
||||
}
|
||||
|
||||
INIT_IB_EVENT_HANDLER(&device->event_handler, ib_dev,
|
||||
iser_event_handler);
|
||||
if (ib_register_event_handler(&device->event_handler))
|
||||
goto handler_err;
|
||||
goto cq_err;
|
||||
|
||||
return 0;
|
||||
|
||||
handler_err:
|
||||
if (device->mr)
|
||||
ib_dereg_mr(device->mr);
|
||||
cq_err:
|
||||
for (i = 0; i < device->comps_used; i++) {
|
||||
struct iser_comp *comp = &device->comps[i];
|
||||
|
@ -154,14 +142,10 @@ static void iser_free_device_ib_res(struct iser_device *device)
|
|||
}
|
||||
|
||||
(void)ib_unregister_event_handler(&device->event_handler);
|
||||
if (device->mr)
|
||||
(void)ib_dereg_mr(device->mr);
|
||||
ib_dealloc_pd(device->pd);
|
||||
|
||||
kfree(device->comps);
|
||||
device->comps = NULL;
|
||||
|
||||
device->mr = NULL;
|
||||
device->pd = NULL;
|
||||
}
|
||||
|
||||
|
|
|
@ -309,7 +309,7 @@ isert_create_device_ib_res(struct isert_device *device)
|
|||
if (ret)
|
||||
goto out;
|
||||
|
||||
device->pd = ib_alloc_pd(ib_dev);
|
||||
device->pd = ib_alloc_pd(ib_dev, 0);
|
||||
if (IS_ERR(device->pd)) {
|
||||
ret = PTR_ERR(device->pd);
|
||||
isert_err("failed to allocate pd, device %p, ret=%d\n",
|
||||
|
|
|
@ -1262,6 +1262,7 @@ static int srp_map_finish_fmr(struct srp_map_state *state,
|
|||
{
|
||||
struct srp_target_port *target = ch->target;
|
||||
struct srp_device *dev = target->srp_host->srp_dev;
|
||||
struct ib_pd *pd = target->pd;
|
||||
struct ib_pool_fmr *fmr;
|
||||
u64 io_addr = 0;
|
||||
|
||||
|
@ -1273,9 +1274,9 @@ static int srp_map_finish_fmr(struct srp_map_state *state,
|
|||
if (state->npages == 0)
|
||||
return 0;
|
||||
|
||||
if (state->npages == 1 && target->global_mr) {
|
||||
if (state->npages == 1 && (pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY)) {
|
||||
srp_map_desc(state, state->base_dma_addr, state->dma_len,
|
||||
target->global_mr->rkey);
|
||||
pd->unsafe_global_rkey);
|
||||
goto reset_state;
|
||||
}
|
||||
|
||||
|
@ -1315,6 +1316,7 @@ static int srp_map_finish_fr(struct srp_map_state *state,
|
|||
{
|
||||
struct srp_target_port *target = ch->target;
|
||||
struct srp_device *dev = target->srp_host->srp_dev;
|
||||
struct ib_pd *pd = target->pd;
|
||||
struct ib_send_wr *bad_wr;
|
||||
struct ib_reg_wr wr;
|
||||
struct srp_fr_desc *desc;
|
||||
|
@ -1326,12 +1328,12 @@ static int srp_map_finish_fr(struct srp_map_state *state,
|
|||
|
||||
WARN_ON_ONCE(!dev->use_fast_reg);
|
||||
|
||||
if (sg_nents == 1 && target->global_mr) {
|
||||
if (sg_nents == 1 && (pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY)) {
|
||||
unsigned int sg_offset = sg_offset_p ? *sg_offset_p : 0;
|
||||
|
||||
srp_map_desc(state, sg_dma_address(state->sg) + sg_offset,
|
||||
sg_dma_len(state->sg) - sg_offset,
|
||||
target->global_mr->rkey);
|
||||
pd->unsafe_global_rkey);
|
||||
if (sg_offset_p)
|
||||
*sg_offset_p = 0;
|
||||
return 1;
|
||||
|
@ -1386,7 +1388,7 @@ static int srp_map_finish_fr(struct srp_map_state *state,
|
|||
|
||||
static int srp_map_sg_entry(struct srp_map_state *state,
|
||||
struct srp_rdma_ch *ch,
|
||||
struct scatterlist *sg, int sg_index)
|
||||
struct scatterlist *sg)
|
||||
{
|
||||
struct srp_target_port *target = ch->target;
|
||||
struct srp_device *dev = target->srp_host->srp_dev;
|
||||
|
@ -1400,7 +1402,9 @@ static int srp_map_sg_entry(struct srp_map_state *state,
|
|||
|
||||
while (dma_len) {
|
||||
unsigned offset = dma_addr & ~dev->mr_page_mask;
|
||||
if (state->npages == dev->max_pages_per_mr || offset != 0) {
|
||||
|
||||
if (state->npages == dev->max_pages_per_mr ||
|
||||
(state->npages > 0 && offset != 0)) {
|
||||
ret = srp_map_finish_fmr(state, ch);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -1417,12 +1421,12 @@ static int srp_map_sg_entry(struct srp_map_state *state,
|
|||
}
|
||||
|
||||
/*
|
||||
* If the last entry of the MR wasn't a full page, then we need to
|
||||
* If the end of the MR is not on a page boundary then we need to
|
||||
* close it out and start a new one -- we can only merge at page
|
||||
* boundaries.
|
||||
*/
|
||||
ret = 0;
|
||||
if (len != dev->mr_page_size)
|
||||
if ((dma_addr & ~dev->mr_page_mask) != 0)
|
||||
ret = srp_map_finish_fmr(state, ch);
|
||||
return ret;
|
||||
}
|
||||
|
@ -1439,7 +1443,7 @@ static int srp_map_sg_fmr(struct srp_map_state *state, struct srp_rdma_ch *ch,
|
|||
state->fmr.end = req->fmr_list + ch->target->mr_per_cmd;
|
||||
|
||||
for_each_sg(scat, sg, count, i) {
|
||||
ret = srp_map_sg_entry(state, ch, sg, i);
|
||||
ret = srp_map_sg_entry(state, ch, sg);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
@ -1491,7 +1495,7 @@ static int srp_map_sg_dma(struct srp_map_state *state, struct srp_rdma_ch *ch,
|
|||
for_each_sg(scat, sg, count, i) {
|
||||
srp_map_desc(state, ib_sg_dma_address(dev->dev, sg),
|
||||
ib_sg_dma_len(dev->dev, sg),
|
||||
target->global_mr->rkey);
|
||||
target->pd->unsafe_global_rkey);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -1591,6 +1595,7 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
|
|||
struct srp_request *req)
|
||||
{
|
||||
struct srp_target_port *target = ch->target;
|
||||
struct ib_pd *pd = target->pd;
|
||||
struct scatterlist *scat;
|
||||
struct srp_cmd *cmd = req->cmd->buf;
|
||||
int len, nents, count, ret;
|
||||
|
@ -1626,7 +1631,7 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
|
|||
fmt = SRP_DATA_DESC_DIRECT;
|
||||
len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf);
|
||||
|
||||
if (count == 1 && target->global_mr) {
|
||||
if (count == 1 && (pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY)) {
|
||||
/*
|
||||
* The midlayer only generated a single gather/scatter
|
||||
* entry, or DMA mapping coalesced everything to a
|
||||
|
@ -1636,7 +1641,7 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
|
|||
struct srp_direct_buf *buf = (void *) cmd->add_data;
|
||||
|
||||
buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, scat));
|
||||
buf->key = cpu_to_be32(target->global_mr->rkey);
|
||||
buf->key = cpu_to_be32(pd->unsafe_global_rkey);
|
||||
buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat));
|
||||
|
||||
req->nmdesc = 0;
|
||||
|
@ -1709,14 +1714,14 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
|
|||
memcpy(indirect_hdr->desc_list, req->indirect_desc,
|
||||
count * sizeof (struct srp_direct_buf));
|
||||
|
||||
if (!target->global_mr) {
|
||||
if (!(pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY)) {
|
||||
ret = srp_map_idb(ch, req, state.gen.next, state.gen.end,
|
||||
idb_len, &idb_rkey);
|
||||
if (ret < 0)
|
||||
goto unmap;
|
||||
req->nmdesc++;
|
||||
} else {
|
||||
idb_rkey = cpu_to_be32(target->global_mr->rkey);
|
||||
idb_rkey = cpu_to_be32(pd->unsafe_global_rkey);
|
||||
}
|
||||
|
||||
indirect_hdr->table_desc.va = cpu_to_be64(req->indirect_dma_addr);
|
||||
|
@ -3268,8 +3273,8 @@ static ssize_t srp_create_target(struct device *dev,
|
|||
target->io_class = SRP_REV16A_IB_IO_CLASS;
|
||||
target->scsi_host = target_host;
|
||||
target->srp_host = host;
|
||||
target->pd = host->srp_dev->pd;
|
||||
target->lkey = host->srp_dev->pd->local_dma_lkey;
|
||||
target->global_mr = host->srp_dev->global_mr;
|
||||
target->cmd_sg_cnt = cmd_sg_entries;
|
||||
target->sg_tablesize = indirect_sg_entries ? : cmd_sg_entries;
|
||||
target->allow_ext_sg = allow_ext_sg;
|
||||
|
@ -3524,6 +3529,7 @@ static void srp_add_one(struct ib_device *device)
|
|||
struct srp_host *host;
|
||||
int mr_page_shift, p;
|
||||
u64 max_pages_per_mr;
|
||||
unsigned int flags = 0;
|
||||
|
||||
srp_dev = kzalloc(sizeof(*srp_dev), GFP_KERNEL);
|
||||
if (!srp_dev)
|
||||
|
@ -3558,6 +3564,10 @@ static void srp_add_one(struct ib_device *device)
|
|||
srp_dev->use_fmr = !srp_dev->use_fast_reg && srp_dev->has_fmr;
|
||||
}
|
||||
|
||||
if (never_register || !register_always ||
|
||||
(!srp_dev->has_fmr && !srp_dev->has_fr))
|
||||
flags |= IB_PD_UNSAFE_GLOBAL_RKEY;
|
||||
|
||||
if (srp_dev->use_fast_reg) {
|
||||
srp_dev->max_pages_per_mr =
|
||||
min_t(u32, srp_dev->max_pages_per_mr,
|
||||
|
@ -3573,19 +3583,10 @@ static void srp_add_one(struct ib_device *device)
|
|||
INIT_LIST_HEAD(&srp_dev->dev_list);
|
||||
|
||||
srp_dev->dev = device;
|
||||
srp_dev->pd = ib_alloc_pd(device);
|
||||
srp_dev->pd = ib_alloc_pd(device, flags);
|
||||
if (IS_ERR(srp_dev->pd))
|
||||
goto free_dev;
|
||||
|
||||
if (never_register || !register_always ||
|
||||
(!srp_dev->has_fmr && !srp_dev->has_fr)) {
|
||||
srp_dev->global_mr = ib_get_dma_mr(srp_dev->pd,
|
||||
IB_ACCESS_LOCAL_WRITE |
|
||||
IB_ACCESS_REMOTE_READ |
|
||||
IB_ACCESS_REMOTE_WRITE);
|
||||
if (IS_ERR(srp_dev->global_mr))
|
||||
goto err_pd;
|
||||
}
|
||||
|
||||
for (p = rdma_start_port(device); p <= rdma_end_port(device); ++p) {
|
||||
host = srp_add_port(srp_dev, p);
|
||||
|
@ -3596,9 +3597,6 @@ static void srp_add_one(struct ib_device *device)
|
|||
ib_set_client_data(device, &srp_client, srp_dev);
|
||||
return;
|
||||
|
||||
err_pd:
|
||||
ib_dealloc_pd(srp_dev->pd);
|
||||
|
||||
free_dev:
|
||||
kfree(srp_dev);
|
||||
}
|
||||
|
@ -3638,8 +3636,6 @@ static void srp_remove_one(struct ib_device *device, void *client_data)
|
|||
kfree(host);
|
||||
}
|
||||
|
||||
if (srp_dev->global_mr)
|
||||
ib_dereg_mr(srp_dev->global_mr);
|
||||
ib_dealloc_pd(srp_dev->pd);
|
||||
|
||||
kfree(srp_dev);
|
||||
|
|
|
@ -90,7 +90,6 @@ struct srp_device {
|
|||
struct list_head dev_list;
|
||||
struct ib_device *dev;
|
||||
struct ib_pd *pd;
|
||||
struct ib_mr *global_mr;
|
||||
u64 mr_page_mask;
|
||||
int mr_page_size;
|
||||
int mr_max_size;
|
||||
|
@ -179,7 +178,7 @@ struct srp_target_port {
|
|||
spinlock_t lock;
|
||||
|
||||
/* read only in the hot path */
|
||||
struct ib_mr *global_mr;
|
||||
struct ib_pd *pd;
|
||||
struct srp_rdma_ch *ch;
|
||||
u32 ch_count;
|
||||
u32 lkey;
|
||||
|
|
|
@ -2480,7 +2480,7 @@ static void srpt_add_one(struct ib_device *device)
|
|||
init_waitqueue_head(&sdev->ch_releaseQ);
|
||||
mutex_init(&sdev->mutex);
|
||||
|
||||
sdev->pd = ib_alloc_pd(device);
|
||||
sdev->pd = ib_alloc_pd(device, 0);
|
||||
if (IS_ERR(sdev->pd))
|
||||
goto free_dev;
|
||||
|
||||
|
|
|
@ -350,6 +350,7 @@ struct adapter_params {
|
|||
unsigned int nsched_cls; /* number of traffic classes */
|
||||
unsigned int max_ordird_qp; /* Max read depth per RDMA QP */
|
||||
unsigned int max_ird_adapter; /* Max read depth per adapter */
|
||||
bool fr_nsmr_tpte_wr_support; /* FW support for FR_NSMR_TPTE_WR */
|
||||
};
|
||||
|
||||
/* State needed to monitor the forward progress of SGE Ingress DMA activities
|
||||
|
|
|
@ -3668,6 +3668,12 @@ static int adap_init0(struct adapter *adap)
|
|||
adap->params.ulptx_memwrite_dsgl = (ret == 0 && val[0] != 0);
|
||||
}
|
||||
|
||||
/* See if FW supports FW_RI_FR_NSMR_TPTE_WR work request */
|
||||
params[0] = FW_PARAM_DEV(RI_FR_NSMR_TPTE_WR);
|
||||
ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
|
||||
1, params, val);
|
||||
adap->params.fr_nsmr_tpte_wr_support = (ret == 0 && val[0] != 0);
|
||||
|
||||
/*
|
||||
* Get device capabilities so we can determine what resources we need
|
||||
* to manage.
|
||||
|
|
|
@ -550,6 +550,7 @@ static void uld_init(struct adapter *adap, struct cxgb4_lld_info *lld)
|
|||
lld->max_ird_adapter = adap->params.max_ird_adapter;
|
||||
lld->ulptx_memwrite_dsgl = adap->params.ulptx_memwrite_dsgl;
|
||||
lld->nodeid = dev_to_node(adap->pdev_dev);
|
||||
lld->fr_nsmr_tpte_wr_support = adap->params.fr_nsmr_tpte_wr_support;
|
||||
}
|
||||
|
||||
static void uld_attach(struct adapter *adap, unsigned int uld)
|
||||
|
|
|
@ -308,6 +308,7 @@ struct cxgb4_lld_info {
|
|||
unsigned int iscsi_llimit; /* chip's iscsi region llimit */
|
||||
void **iscsi_ppm; /* iscsi page pod manager */
|
||||
int nodeid; /* device numa node id */
|
||||
bool fr_nsmr_tpte_wr_support; /* FW supports FR_NSMR_TPTE_WR */
|
||||
};
|
||||
|
||||
struct cxgb4_uld_info {
|
||||
|
|
|
@ -100,6 +100,7 @@ enum fw_wr_opcodes {
|
|||
FW_RI_RECV_WR = 0x17,
|
||||
FW_RI_BIND_MW_WR = 0x18,
|
||||
FW_RI_FR_NSMR_WR = 0x19,
|
||||
FW_RI_FR_NSMR_TPTE_WR = 0x20,
|
||||
FW_RI_INV_LSTAG_WR = 0x1a,
|
||||
FW_ISCSI_TX_DATA_WR = 0x45,
|
||||
FW_CRYPTO_LOOKASIDE_WR = 0X6d,
|
||||
|
@ -1121,6 +1122,7 @@ enum fw_params_param_dev {
|
|||
FW_PARAMS_PARAM_DEV_MAXIRD_ADAPTER = 0x14, /* max supported adap IRD */
|
||||
FW_PARAMS_PARAM_DEV_ULPTX_MEMWRITE_DSGL = 0x17,
|
||||
FW_PARAMS_PARAM_DEV_FWCACHE = 0x18,
|
||||
FW_PARAMS_PARAM_DEV_RI_FR_NSMR_TPTE_WR = 0x1C,
|
||||
};
|
||||
|
||||
/*
|
||||
|
|
|
@ -644,21 +644,6 @@ hns_mac_phy_parse_addr(struct device *dev, struct fwnode_handle *fwnode)
|
|||
return addr;
|
||||
}
|
||||
|
||||
static int hns_mac_phydev_match(struct device *dev, void *fwnode)
|
||||
{
|
||||
return dev->fwnode == fwnode;
|
||||
}
|
||||
|
||||
static struct
|
||||
platform_device *hns_mac_find_platform_device(struct fwnode_handle *fwnode)
|
||||
{
|
||||
struct device *dev;
|
||||
|
||||
dev = bus_find_device(&platform_bus_type, NULL,
|
||||
fwnode, hns_mac_phydev_match);
|
||||
return dev ? to_platform_device(dev) : NULL;
|
||||
}
|
||||
|
||||
static int
|
||||
hns_mac_register_phydev(struct mii_bus *mdio, struct hns_mac_cb *mac_cb,
|
||||
u32 addr)
|
||||
|
@ -724,7 +709,7 @@ static void hns_mac_register_phy(struct hns_mac_cb *mac_cb)
|
|||
return;
|
||||
|
||||
/* dev address in adev */
|
||||
pdev = hns_mac_find_platform_device(acpi_fwnode_handle(args.adev));
|
||||
pdev = hns_dsaf_find_platform_device(acpi_fwnode_handle(args.adev));
|
||||
mii_bus = platform_get_drvdata(pdev);
|
||||
rc = hns_mac_register_phydev(mii_bus, mac_cb, addr);
|
||||
if (!rc)
|
||||
|
|
|
@ -2780,7 +2780,7 @@ module_platform_driver(g_dsaf_driver);
|
|||
* @enable: false - request reset , true - drop reset
|
||||
* retuen 0 - success , negative -fail
|
||||
*/
|
||||
int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool enable)
|
||||
int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool dereset)
|
||||
{
|
||||
struct dsaf_device *dsaf_dev;
|
||||
struct platform_device *pdev;
|
||||
|
@ -2809,24 +2809,44 @@ int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool enable)
|
|||
{DSAF_ROCE_SL_1, DSAF_ROCE_SL_1, DSAF_ROCE_SL_3},
|
||||
};
|
||||
|
||||
if (!is_of_node(dsaf_fwnode)) {
|
||||
pr_err("hisi_dsaf: Only support DT node!\n");
|
||||
/* find the platform device corresponding to fwnode */
|
||||
if (is_of_node(dsaf_fwnode)) {
|
||||
pdev = of_find_device_by_node(to_of_node(dsaf_fwnode));
|
||||
} else if (is_acpi_device_node(dsaf_fwnode)) {
|
||||
pdev = hns_dsaf_find_platform_device(dsaf_fwnode);
|
||||
} else {
|
||||
pr_err("fwnode is neither OF or ACPI type\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
pdev = of_find_device_by_node(to_of_node(dsaf_fwnode));
|
||||
|
||||
/* check if we were a success in fetching pdev */
|
||||
if (!pdev) {
|
||||
pr_err("couldn't find platform device for node\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
/* retrieve the dsaf_device from the driver data */
|
||||
dsaf_dev = dev_get_drvdata(&pdev->dev);
|
||||
if (!dsaf_dev) {
|
||||
dev_err(&pdev->dev, "dsaf_dev is NULL\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
/* now, make sure we are running on compatible SoC */
|
||||
if (AE_IS_VER1(dsaf_dev->dsaf_ver)) {
|
||||
dev_err(dsaf_dev->dev, "%s v1 chip doesn't support RoCE!\n",
|
||||
dsaf_dev->ae_dev.name);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
if (!enable) {
|
||||
/* Reset rocee-channels in dsaf and rocee */
|
||||
hns_dsaf_srst_chns(dsaf_dev, DSAF_CHNS_MASK, false);
|
||||
hns_dsaf_roce_srst(dsaf_dev, false);
|
||||
/* do reset or de-reset according to the flag */
|
||||
if (!dereset) {
|
||||
/* reset rocee-channels in dsaf and rocee */
|
||||
dsaf_dev->misc_op->hns_dsaf_srst_chns(dsaf_dev, DSAF_CHNS_MASK,
|
||||
false);
|
||||
dsaf_dev->misc_op->hns_dsaf_roce_srst(dsaf_dev, false);
|
||||
} else {
|
||||
/* Configure dsaf tx roce correspond to port map and sl map */
|
||||
/* configure dsaf tx roce correspond to port map and sl map */
|
||||
mp = dsaf_read_dev(dsaf_dev, DSAF_ROCE_PORT_MAP_REG);
|
||||
for (i = 0; i < DSAF_ROCE_CREDIT_CHN; i++)
|
||||
dsaf_set_field(mp, 7 << i * 3, i * 3,
|
||||
|
@ -2840,12 +2860,13 @@ int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool enable)
|
|||
sl_map[i][DSAF_ROCE_6PORT_MODE]);
|
||||
dsaf_write_dev(dsaf_dev, DSAF_ROCE_SL_MAP_REG, sl);
|
||||
|
||||
/* De-reset rocee-channels in dsaf and rocee */
|
||||
hns_dsaf_srst_chns(dsaf_dev, DSAF_CHNS_MASK, true);
|
||||
/* de-reset rocee-channels in dsaf and rocee */
|
||||
dsaf_dev->misc_op->hns_dsaf_srst_chns(dsaf_dev, DSAF_CHNS_MASK,
|
||||
true);
|
||||
msleep(SRST_TIME_INTERVAL);
|
||||
hns_dsaf_roce_srst(dsaf_dev, true);
|
||||
dsaf_dev->misc_op->hns_dsaf_roce_srst(dsaf_dev, true);
|
||||
|
||||
/* Eanble dsaf channel rocee credit */
|
||||
/* enable dsaf channel rocee credit */
|
||||
credit = dsaf_read_dev(dsaf_dev, DSAF_SBM_ROCEE_CFG_REG_REG);
|
||||
dsaf_set_bit(credit, DSAF_SBM_ROCEE_CFG_CRD_EN_B, 0);
|
||||
dsaf_write_dev(dsaf_dev, DSAF_SBM_ROCEE_CFG_REG_REG, credit);
|
||||
|
|
|
@ -305,7 +305,7 @@ struct dsaf_misc_op {
|
|||
void (*cpld_reset_led)(struct hns_mac_cb *mac_cb);
|
||||
int (*cpld_set_led_id)(struct hns_mac_cb *mac_cb,
|
||||
enum hnae_led_state status);
|
||||
/* reset seris function, it will be reset if the dereseet is 0 */
|
||||
/* reset series function, it will be reset if the dereset is 0 */
|
||||
void (*dsaf_reset)(struct dsaf_device *dsaf_dev, bool dereset);
|
||||
void (*xge_srst)(struct dsaf_device *dsaf_dev, u32 port, bool dereset);
|
||||
void (*xge_core_srst)(struct dsaf_device *dsaf_dev, u32 port,
|
||||
|
@ -313,6 +313,9 @@ struct dsaf_misc_op {
|
|||
void (*ge_srst)(struct dsaf_device *dsaf_dev, u32 port, bool dereset);
|
||||
void (*ppe_srst)(struct dsaf_device *dsaf_dev, u32 port, bool dereset);
|
||||
void (*ppe_comm_srst)(struct dsaf_device *dsaf_dev, bool dereset);
|
||||
void (*hns_dsaf_srst_chns)(struct dsaf_device *dsaf_dev, u32 msk,
|
||||
bool dereset);
|
||||
void (*hns_dsaf_roce_srst)(struct dsaf_device *dsaf_dev, bool dereset);
|
||||
|
||||
phy_interface_t (*get_phy_if)(struct hns_mac_cb *mac_cb);
|
||||
int (*get_sfp_prsnt)(struct hns_mac_cb *mac_cb, int *sfp_prsnt);
|
||||
|
@ -445,10 +448,6 @@ int hns_dsaf_get_mac_entry_by_index(
|
|||
|
||||
void hns_dsaf_fix_mac_mode(struct hns_mac_cb *mac_cb);
|
||||
|
||||
void hns_dsaf_srst_chns(struct dsaf_device *dsaf_dev, u32 msk, bool enable);
|
||||
|
||||
void hns_dsaf_roce_srst(struct dsaf_device *dsaf_dev, bool enable);
|
||||
|
||||
int hns_dsaf_ae_init(struct dsaf_device *dsaf_dev);
|
||||
void hns_dsaf_ae_uninit(struct dsaf_device *dsaf_dev);
|
||||
|
||||
|
|
|
@ -26,6 +26,8 @@ enum _dsm_rst_type {
|
|||
HNS_XGE_CORE_RESET_FUNC = 0x3,
|
||||
HNS_XGE_RESET_FUNC = 0x4,
|
||||
HNS_GE_RESET_FUNC = 0x5,
|
||||
HNS_DSAF_CHN_RESET_FUNC = 0x6,
|
||||
HNS_ROCE_RESET_FUNC = 0x7,
|
||||
};
|
||||
|
||||
const u8 hns_dsaf_acpi_dsm_uuid[] = {
|
||||
|
@ -241,11 +243,11 @@ static void hns_dsaf_xge_core_srst_by_port(struct dsaf_device *dsaf_dev,
|
|||
* bit18-19 for com/dfx
|
||||
* @enable: false - request reset , true - drop reset
|
||||
*/
|
||||
void hns_dsaf_srst_chns(struct dsaf_device *dsaf_dev, u32 msk, bool enable)
|
||||
void hns_dsaf_srst_chns(struct dsaf_device *dsaf_dev, u32 msk, bool dereset)
|
||||
{
|
||||
u32 reg_addr;
|
||||
|
||||
if (!enable)
|
||||
if (!dereset)
|
||||
reg_addr = DSAF_SUB_SC_DSAF_RESET_REQ_REG;
|
||||
else
|
||||
reg_addr = DSAF_SUB_SC_DSAF_RESET_DREQ_REG;
|
||||
|
@ -253,9 +255,27 @@ void hns_dsaf_srst_chns(struct dsaf_device *dsaf_dev, u32 msk, bool enable)
|
|||
dsaf_write_sub(dsaf_dev, reg_addr, msk);
|
||||
}
|
||||
|
||||
void hns_dsaf_roce_srst(struct dsaf_device *dsaf_dev, bool enable)
|
||||
/**
|
||||
* hns_dsaf_srst_chns - reset dsaf channels
|
||||
* @dsaf_dev: dsaf device struct pointer
|
||||
* @msk: xbar channels mask value:
|
||||
* bit0-5 for xge0-5
|
||||
* bit6-11 for ppe0-5
|
||||
* bit12-17 for roce0-5
|
||||
* bit18-19 for com/dfx
|
||||
* @enable: false - request reset , true - drop reset
|
||||
*/
|
||||
void
|
||||
hns_dsaf_srst_chns_acpi(struct dsaf_device *dsaf_dev, u32 msk, bool dereset)
|
||||
{
|
||||
if (!enable) {
|
||||
hns_dsaf_acpi_srst_by_port(dsaf_dev, HNS_OP_RESET_FUNC,
|
||||
HNS_DSAF_CHN_RESET_FUNC,
|
||||
msk, dereset);
|
||||
}
|
||||
|
||||
void hns_dsaf_roce_srst(struct dsaf_device *dsaf_dev, bool dereset)
|
||||
{
|
||||
if (!dereset) {
|
||||
dsaf_write_sub(dsaf_dev, DSAF_SUB_SC_ROCEE_RESET_REQ_REG, 1);
|
||||
} else {
|
||||
dsaf_write_sub(dsaf_dev,
|
||||
|
@ -267,6 +287,12 @@ void hns_dsaf_roce_srst(struct dsaf_device *dsaf_dev, bool enable)
|
|||
}
|
||||
}
|
||||
|
||||
void hns_dsaf_roce_srst_acpi(struct dsaf_device *dsaf_dev, bool dereset)
|
||||
{
|
||||
hns_dsaf_acpi_srst_by_port(dsaf_dev, HNS_OP_RESET_FUNC,
|
||||
HNS_ROCE_RESET_FUNC, 0, dereset);
|
||||
}
|
||||
|
||||
static void
|
||||
hns_dsaf_xge_core_srst_by_port_acpi(struct dsaf_device *dsaf_dev,
|
||||
u32 port, bool dereset)
|
||||
|
@ -575,6 +601,8 @@ struct dsaf_misc_op *hns_misc_op_get(struct dsaf_device *dsaf_dev)
|
|||
misc_op->ge_srst = hns_dsaf_ge_srst_by_port;
|
||||
misc_op->ppe_srst = hns_ppe_srst_by_port;
|
||||
misc_op->ppe_comm_srst = hns_ppe_com_srst;
|
||||
misc_op->hns_dsaf_srst_chns = hns_dsaf_srst_chns;
|
||||
misc_op->hns_dsaf_roce_srst = hns_dsaf_roce_srst;
|
||||
|
||||
misc_op->get_phy_if = hns_mac_get_phy_if;
|
||||
misc_op->get_sfp_prsnt = hns_mac_get_sfp_prsnt;
|
||||
|
@ -591,6 +619,8 @@ struct dsaf_misc_op *hns_misc_op_get(struct dsaf_device *dsaf_dev)
|
|||
misc_op->ge_srst = hns_dsaf_ge_srst_by_port_acpi;
|
||||
misc_op->ppe_srst = hns_ppe_srst_by_port_acpi;
|
||||
misc_op->ppe_comm_srst = hns_ppe_com_srst;
|
||||
misc_op->hns_dsaf_srst_chns = hns_dsaf_srst_chns_acpi;
|
||||
misc_op->hns_dsaf_roce_srst = hns_dsaf_roce_srst_acpi;
|
||||
|
||||
misc_op->get_phy_if = hns_mac_get_phy_if_acpi;
|
||||
misc_op->get_sfp_prsnt = hns_mac_get_sfp_prsnt;
|
||||
|
@ -603,3 +633,18 @@ struct dsaf_misc_op *hns_misc_op_get(struct dsaf_device *dsaf_dev)
|
|||
|
||||
return (void *)misc_op;
|
||||
}
|
||||
|
||||
static int hns_dsaf_dev_match(struct device *dev, void *fwnode)
|
||||
{
|
||||
return dev->fwnode == fwnode;
|
||||
}
|
||||
|
||||
struct
|
||||
platform_device *hns_dsaf_find_platform_device(struct fwnode_handle *fwnode)
|
||||
{
|
||||
struct device *dev;
|
||||
|
||||
dev = bus_find_device(&platform_bus_type, NULL,
|
||||
fwnode, hns_dsaf_dev_match);
|
||||
return dev ? to_platform_device(dev) : NULL;
|
||||
}
|
||||
|
|
|
@ -34,5 +34,6 @@
|
|||
#define DSAF_LED_ANCHOR_B 5
|
||||
|
||||
struct dsaf_misc_op *hns_misc_op_get(struct dsaf_device *dsaf_dev);
|
||||
|
||||
struct
|
||||
platform_device *hns_dsaf_find_platform_device(struct fwnode_handle *fwnode);
|
||||
#endif
|
||||
|
|
|
@ -78,10 +78,10 @@
|
|||
#define DSAF_SUB_SC_RCB_PPE_COM_RESET_REQ_REG 0xA88
|
||||
#define DSAF_SUB_SC_RCB_PPE_COM_RESET_DREQ_REG 0xA8C
|
||||
#define DSAF_SUB_SC_DSAF_RESET_REQ_REG 0xAA8
|
||||
#define DSAF_SUB_SC_ROCEE_RESET_REQ_REG 0xA50
|
||||
#define DSAF_SUB_SC_DSAF_RESET_DREQ_REG 0xAAC
|
||||
#define DSAF_SUB_SC_ROCEE_CLK_DIS_REG 0x32C
|
||||
#define DSAF_SUB_SC_ROCEE_RESET_REQ_REG 0xA50
|
||||
#define DSAF_SUB_SC_ROCEE_RESET_DREQ_REG 0xA54
|
||||
#define DSAF_SUB_SC_ROCEE_CLK_DIS_REG 0x32C
|
||||
#define DSAF_SUB_SC_ROCEE_CLK_EN_REG 0x328
|
||||
#define DSAF_SUB_SC_LIGHT_MODULE_DETECT_EN_REG 0x2060
|
||||
#define DSAF_SUB_SC_TCAM_MBIST_EN_REG 0x2300
|
||||
|
|
|
@ -160,6 +160,7 @@ static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags)
|
|||
[33] = "RoCEv2 support",
|
||||
[34] = "DMFS Sniffer support (UC & MC)",
|
||||
[35] = "QinQ VST mode support",
|
||||
[36] = "sl to vl mapping table change event support"
|
||||
};
|
||||
int i;
|
||||
|
||||
|
@ -789,6 +790,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
|
|||
#define QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET 0x74
|
||||
#define QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET 0x76
|
||||
#define QUERY_DEV_CAP_FLOW_STEERING_MAX_QP_OFFSET 0x77
|
||||
#define QUERY_DEV_CAP_SL2VL_EVENT_OFFSET 0x78
|
||||
#define QUERY_DEV_CAP_CQ_EQ_CACHE_LINE_STRIDE 0x7a
|
||||
#define QUERY_DEV_CAP_ECN_QCN_VER_OFFSET 0x7b
|
||||
#define QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET 0x80
|
||||
|
@ -904,6 +906,9 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
|
|||
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_DMFS_IPOIB;
|
||||
MLX4_GET(field, outbox, QUERY_DEV_CAP_FLOW_STEERING_MAX_QP_OFFSET);
|
||||
dev_cap->fs_max_num_qp_per_entry = field;
|
||||
MLX4_GET(field, outbox, QUERY_DEV_CAP_SL2VL_EVENT_OFFSET);
|
||||
if (field & (1 << 5))
|
||||
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_SL_TO_VL_CHANGE_EVENT;
|
||||
MLX4_GET(field, outbox, QUERY_DEV_CAP_ECN_QCN_VER_OFFSET);
|
||||
if (field & 0x1)
|
||||
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_QCN;
|
||||
|
@ -2783,7 +2788,6 @@ static int mlx4_check_smp_firewall_active(struct mlx4_dev *dev,
|
|||
int mlx4_config_mad_demux(struct mlx4_dev *dev)
|
||||
{
|
||||
struct mlx4_cmd_mailbox *mailbox;
|
||||
int secure_host_active;
|
||||
int err;
|
||||
|
||||
/* Check if mad_demux is supported */
|
||||
|
@ -2806,7 +2810,8 @@ int mlx4_config_mad_demux(struct mlx4_dev *dev)
|
|||
goto out;
|
||||
}
|
||||
|
||||
secure_host_active = mlx4_check_smp_firewall_active(dev, mailbox);
|
||||
if (mlx4_check_smp_firewall_active(dev, mailbox))
|
||||
dev->flags |= MLX4_FLAG_SECURE_HOST;
|
||||
|
||||
/* Config mad_demux to handle all MADs returned by the query above */
|
||||
err = mlx4_cmd(dev, mailbox->dma, 0x01 /* subn mgmt class */,
|
||||
|
@ -2817,7 +2822,7 @@ int mlx4_config_mad_demux(struct mlx4_dev *dev)
|
|||
goto out;
|
||||
}
|
||||
|
||||
if (secure_host_active)
|
||||
if (dev->flags & MLX4_FLAG_SECURE_HOST)
|
||||
mlx4_warn(dev, "HCA operating in secure-host mode. SMP firewall activated.\n");
|
||||
out:
|
||||
mlx4_free_cmd_mailbox(dev, mailbox);
|
||||
|
|
|
@ -54,7 +54,6 @@
|
|||
struct nvme_rdma_device {
|
||||
struct ib_device *dev;
|
||||
struct ib_pd *pd;
|
||||
struct ib_mr *mr;
|
||||
struct kref ref;
|
||||
struct list_head entry;
|
||||
};
|
||||
|
@ -408,10 +407,7 @@ static void nvme_rdma_free_dev(struct kref *ref)
|
|||
list_del(&ndev->entry);
|
||||
mutex_unlock(&device_list_mutex);
|
||||
|
||||
if (!register_always)
|
||||
ib_dereg_mr(ndev->mr);
|
||||
ib_dealloc_pd(ndev->pd);
|
||||
|
||||
kfree(ndev);
|
||||
}
|
||||
|
||||
|
@ -444,24 +440,16 @@ nvme_rdma_find_get_device(struct rdma_cm_id *cm_id)
|
|||
ndev->dev = cm_id->device;
|
||||
kref_init(&ndev->ref);
|
||||
|
||||
ndev->pd = ib_alloc_pd(ndev->dev);
|
||||
ndev->pd = ib_alloc_pd(ndev->dev,
|
||||
register_always ? 0 : IB_PD_UNSAFE_GLOBAL_RKEY);
|
||||
if (IS_ERR(ndev->pd))
|
||||
goto out_free_dev;
|
||||
|
||||
if (!register_always) {
|
||||
ndev->mr = ib_get_dma_mr(ndev->pd,
|
||||
IB_ACCESS_LOCAL_WRITE |
|
||||
IB_ACCESS_REMOTE_READ |
|
||||
IB_ACCESS_REMOTE_WRITE);
|
||||
if (IS_ERR(ndev->mr))
|
||||
goto out_free_pd;
|
||||
}
|
||||
|
||||
if (!(ndev->dev->attrs.device_cap_flags &
|
||||
IB_DEVICE_MEM_MGT_EXTENSIONS)) {
|
||||
dev_err(&ndev->dev->dev,
|
||||
"Memory registrations not supported.\n");
|
||||
goto out_free_mr;
|
||||
goto out_free_pd;
|
||||
}
|
||||
|
||||
list_add(&ndev->entry, &device_list);
|
||||
|
@ -469,9 +457,6 @@ nvme_rdma_find_get_device(struct rdma_cm_id *cm_id)
|
|||
mutex_unlock(&device_list_mutex);
|
||||
return ndev;
|
||||
|
||||
out_free_mr:
|
||||
if (!register_always)
|
||||
ib_dereg_mr(ndev->mr);
|
||||
out_free_pd:
|
||||
ib_dealloc_pd(ndev->pd);
|
||||
out_free_dev:
|
||||
|
@ -915,7 +900,7 @@ static int nvme_rdma_map_sg_single(struct nvme_rdma_queue *queue,
|
|||
|
||||
sg->addr = cpu_to_le64(sg_dma_address(req->sg_table.sgl));
|
||||
put_unaligned_le24(sg_dma_len(req->sg_table.sgl), sg->length);
|
||||
put_unaligned_le32(queue->device->mr->rkey, sg->key);
|
||||
put_unaligned_le32(queue->device->pd->unsafe_global_rkey, sg->key);
|
||||
sg->type = NVME_KEY_SGL_FMT_DATA_DESC << 4;
|
||||
return 0;
|
||||
}
|
||||
|
@ -1000,7 +985,7 @@ static int nvme_rdma_map_data(struct nvme_rdma_queue *queue,
|
|||
nvme_rdma_queue_idx(queue))
|
||||
return nvme_rdma_map_sg_inline(queue, req, c);
|
||||
|
||||
if (!register_always)
|
||||
if (dev->pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY)
|
||||
return nvme_rdma_map_sg_single(queue, req, c);
|
||||
}
|
||||
|
||||
|
|
|
@ -848,7 +848,7 @@ nvmet_rdma_find_get_device(struct rdma_cm_id *cm_id)
|
|||
ndev->device = cm_id->device;
|
||||
kref_init(&ndev->ref);
|
||||
|
||||
ndev->pd = ib_alloc_pd(ndev->device);
|
||||
ndev->pd = ib_alloc_pd(ndev->device, 0);
|
||||
if (IS_ERR(ndev->pd))
|
||||
goto out_free_dev;
|
||||
|
||||
|
|
|
@ -35,6 +35,7 @@ config LNET_SELFTEST
|
|||
config LNET_XPRT_IB
|
||||
tristate "LNET infiniband support"
|
||||
depends on LNET && INFINIBAND && INFINIBAND_ADDR_TRANS
|
||||
depends on BROKEN
|
||||
default LNET && INFINIBAND
|
||||
help
|
||||
This option allows the LNET users to use infiniband as an
|
||||
|
|
|
@ -2467,7 +2467,7 @@ int kiblnd_dev_failover(struct kib_dev *dev)
|
|||
hdev->ibh_cmid = cmid;
|
||||
hdev->ibh_ibdev = cmid->device;
|
||||
|
||||
pd = ib_alloc_pd(cmid->device);
|
||||
pd = ib_alloc_pd(cmid->device, 0);
|
||||
if (IS_ERR(pd)) {
|
||||
rc = PTR_ERR(pd);
|
||||
CERROR("Can't allocate PD: %d\n", rc);
|
||||
|
|
|
@ -71,7 +71,8 @@ enum {
|
|||
MLX4_FLAG_SLAVE = 1 << 3,
|
||||
MLX4_FLAG_SRIOV = 1 << 4,
|
||||
MLX4_FLAG_OLD_REG_MAC = 1 << 6,
|
||||
MLX4_FLAG_BONDED = 1 << 7
|
||||
MLX4_FLAG_BONDED = 1 << 7,
|
||||
MLX4_FLAG_SECURE_HOST = 1 << 8,
|
||||
};
|
||||
|
||||
enum {
|
||||
|
@ -222,6 +223,7 @@ enum {
|
|||
MLX4_DEV_CAP_FLAG2_DMFS_UC_MC_SNIFFER = 1ULL << 34,
|
||||
MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT = 1ULL << 35,
|
||||
MLX4_DEV_CAP_FLAG2_SVLAN_BY_QP = 1ULL << 36,
|
||||
MLX4_DEV_CAP_FLAG2_SL_TO_VL_CHANGE_EVENT = 1ULL << 37,
|
||||
};
|
||||
|
||||
enum {
|
||||
|
@ -449,6 +451,7 @@ enum {
|
|||
MLX4_DEV_PMC_SUBTYPE_GUID_INFO = 0x14,
|
||||
MLX4_DEV_PMC_SUBTYPE_PORT_INFO = 0x15,
|
||||
MLX4_DEV_PMC_SUBTYPE_PKEY_TABLE = 0x16,
|
||||
MLX4_DEV_PMC_SUBTYPE_SL_TO_VL_MAP = 0x17,
|
||||
};
|
||||
|
||||
/* Port mgmt change event handling */
|
||||
|
@ -460,6 +463,11 @@ enum {
|
|||
MLX4_EQ_PORT_INFO_MSTR_SM_SL_CHANGE_MASK = 1 << 4,
|
||||
};
|
||||
|
||||
union sl2vl_tbl_to_u64 {
|
||||
u8 sl8[8];
|
||||
u64 sl64;
|
||||
};
|
||||
|
||||
enum {
|
||||
MLX4_DEVICE_STATE_UP = 1 << 0,
|
||||
MLX4_DEVICE_STATE_INTERNAL_ERROR = 1 << 1,
|
||||
|
@ -946,6 +954,9 @@ struct mlx4_eqe {
|
|||
__be32 block_ptr;
|
||||
__be32 tbl_entries_mask;
|
||||
} __packed tbl_change_info;
|
||||
struct {
|
||||
u8 sl2vl_table[8];
|
||||
} __packed sl2vl_tbl_change_info;
|
||||
} params;
|
||||
} __packed port_mgmt_change;
|
||||
struct {
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue