2010-04-22 06:30:06 +08:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
|
|
|
|
*
|
|
|
|
* This software is available to you under a choice of one of two
|
|
|
|
* licenses. You may choose to be licensed under the terms of the GNU
|
|
|
|
* General Public License (GPL) Version 2, available from the file
|
|
|
|
* COPYING in the main directory of this source tree, or the
|
|
|
|
* OpenIB.org BSD license below:
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or
|
|
|
|
* without modification, are permitted provided that the following
|
|
|
|
* conditions are met:
|
|
|
|
*
|
|
|
|
* - Redistributions of source code must retain the above
|
|
|
|
* copyright notice, this list of conditions and the following
|
|
|
|
* disclaimer.
|
|
|
|
*
|
|
|
|
* - Redistributions in binary form must reproduce the above
|
|
|
|
* copyright notice, this list of conditions and the following
|
|
|
|
* disclaimer in the documentation and/or other materials
|
|
|
|
* provided with the distribution.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
|
|
|
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
|
|
|
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
|
|
|
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
|
|
|
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
|
|
|
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
|
|
|
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
|
|
* SOFTWARE.
|
|
|
|
*/
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/moduleparam.h>
|
|
|
|
#include <linux/device.h>
|
|
|
|
#include <linux/netdevice.h>
|
|
|
|
#include <linux/etherdevice.h>
|
|
|
|
#include <linux/delay.h>
|
|
|
|
#include <linux/errno.h>
|
|
|
|
#include <linux/list.h>
|
|
|
|
#include <linux/spinlock.h>
|
|
|
|
#include <linux/ethtool.h>
|
|
|
|
#include <linux/rtnetlink.h>
|
|
|
|
#include <linux/inetdevice.h>
|
|
|
|
#include <linux/io.h>
|
|
|
|
|
|
|
|
#include <asm/irq.h>
|
|
|
|
#include <asm/byteorder.h>
|
|
|
|
|
|
|
|
#include <rdma/iw_cm.h>
|
|
|
|
#include <rdma/ib_verbs.h>
|
|
|
|
#include <rdma/ib_smi.h>
|
|
|
|
#include <rdma/ib_umem.h>
|
|
|
|
#include <rdma/ib_user_verbs.h>
|
|
|
|
|
|
|
|
#include "iw_cxgb4.h"
|
|
|
|
|
2010-09-18 04:40:15 +08:00
|
|
|
static int fastreg_support = 1;
|
2010-04-22 06:30:06 +08:00
|
|
|
module_param(fastreg_support, int, 0644);
|
2010-09-18 04:40:15 +08:00
|
|
|
MODULE_PARM_DESC(fastreg_support, "Advertise fastreg support (default=1)");
|
2010-04-22 06:30:06 +08:00
|
|
|
|
|
|
|
static struct ib_ah *c4iw_ah_create(struct ib_pd *pd,
|
|
|
|
struct ib_ah_attr *ah_attr)
|
|
|
|
{
|
|
|
|
return ERR_PTR(-ENOSYS);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int c4iw_ah_destroy(struct ib_ah *ah)
|
|
|
|
{
|
|
|
|
return -ENOSYS;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int c4iw_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
|
|
|
|
{
|
|
|
|
return -ENOSYS;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int c4iw_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
|
|
|
|
{
|
|
|
|
return -ENOSYS;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int c4iw_process_mad(struct ib_device *ibdev, int mad_flags,
|
2015-06-01 05:15:30 +08:00
|
|
|
u8 port_num, const struct ib_wc *in_wc,
|
|
|
|
const struct ib_grh *in_grh,
|
2015-06-07 02:38:31 +08:00
|
|
|
const struct ib_mad_hdr *in_mad,
|
|
|
|
size_t in_mad_size,
|
|
|
|
struct ib_mad_hdr *out_mad,
|
|
|
|
size_t *out_mad_size,
|
|
|
|
u16 *out_mad_pkey_index)
|
2010-04-22 06:30:06 +08:00
|
|
|
{
|
|
|
|
return -ENOSYS;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int c4iw_dealloc_ucontext(struct ib_ucontext *context)
|
|
|
|
{
|
|
|
|
struct c4iw_dev *rhp = to_c4iw_dev(context->device);
|
|
|
|
struct c4iw_ucontext *ucontext = to_c4iw_ucontext(context);
|
|
|
|
struct c4iw_mm_entry *mm, *tmp;
|
|
|
|
|
|
|
|
PDBG("%s context %p\n", __func__, context);
|
|
|
|
list_for_each_entry_safe(mm, tmp, &ucontext->mmaps, entry)
|
|
|
|
kfree(mm);
|
|
|
|
c4iw_release_dev_ucontext(&rhp->rdev, &ucontext->uctx);
|
|
|
|
kfree(ucontext);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct ib_ucontext *c4iw_alloc_ucontext(struct ib_device *ibdev,
|
|
|
|
struct ib_udata *udata)
|
|
|
|
{
|
|
|
|
struct c4iw_ucontext *context;
|
|
|
|
struct c4iw_dev *rhp = to_c4iw_dev(ibdev);
|
cxgb4/iw_cxgb4: Doorbell Drop Avoidance Bug Fixes
The current logic suffers from a slow response time to disable user DB
usage, and also fails to avoid DB FIFO drops under heavy load. This commit
fixes these deficiencies and makes the avoidance logic more optimal.
This is done by more efficiently notifying the ULDs of potential DB
problems, and implements a smoother flow control algorithm in iw_cxgb4,
which is the ULD that puts the most load on the DB fifo.
Design:
cxgb4:
Direct ULD callback from the DB FULL/DROP interrupt handler. This allows
the ULD to stop doing user DB writes as quickly as possible.
While user DB usage is disabled, the LLD will accumulate DB write events
for its queues. Then once DB usage is reenabled, a single DB write is
done for each queue with its accumulated write count. This reduces the
load put on the DB fifo when reenabling.
iw_cxgb4:
Instead of marking each qp to indicate DB writes are disabled, we create
a device-global status page that each user process maps. This allows
iw_cxgb4 to only set this single bit to disable all DB writes for all
user QPs vs traversing the idr of all the active QPs. If the libcxgb4
doesn't support this, then we fall back to the old approach of marking
each QP. Thus we allow the new driver to work with an older libcxgb4.
When the LLD upcalls iw_cxgb4 indicating DB FULL, we disable all DB writes
via the status page and transition the DB state to STOPPED. As user
processes see that DB writes are disabled, they call into iw_cxgb4
to submit their DB write events. Since the DB state is in STOPPED,
the QP trying to write gets enqueued on a new DB "flow control" list.
As subsequent DB writes are submitted for this flow controlled QP, the
amount of writes are accumulated for each QP on the flow control list.
So all the user QPs that are actively ringing the DB get put on this
list and the number of writes they request are accumulated.
When the LLD upcalls iw_cxgb4 indicating DB EMPTY, which is in a workq
context, we change the DB state to FLOW_CONTROL, and begin resuming all
the QPs that are on the flow control list. This logic runs on until
the flow control list is empty or we exit FLOW_CONTROL mode (due to
a DB DROP upcall, for example). QPs are removed from this list, and
their accumulated DB write counts written to the DB FIFO. Sets of QPs,
called chunks in the code, are removed at one time. The chunk size is 64.
So 64 QPs are resumed at a time, and before the next chunk is resumed, the
logic waits (blocks) for the DB FIFO to drain. This prevents resuming to
quickly and overflowing the FIFO. Once the flow control list is empty,
the db state transitions back to NORMAL and user QPs are again allowed
to write directly to the user DB register.
The algorithm is designed such that if the DB write load is high enough,
then all the DB writes get submitted by the kernel using this flow
controlled approach to avoid DB drops. As the load lightens though, we
resume to normal DB writes directly by user applications.
Signed-off-by: Steve Wise <swise@opengridcomputing.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-03-15 00:22:08 +08:00
|
|
|
static int warned;
|
|
|
|
struct c4iw_alloc_ucontext_resp uresp;
|
|
|
|
int ret = 0;
|
|
|
|
struct c4iw_mm_entry *mm = NULL;
|
2010-04-22 06:30:06 +08:00
|
|
|
|
|
|
|
PDBG("%s ibdev %p\n", __func__, ibdev);
|
|
|
|
context = kzalloc(sizeof(*context), GFP_KERNEL);
|
cxgb4/iw_cxgb4: Doorbell Drop Avoidance Bug Fixes
The current logic suffers from a slow response time to disable user DB
usage, and also fails to avoid DB FIFO drops under heavy load. This commit
fixes these deficiencies and makes the avoidance logic more optimal.
This is done by more efficiently notifying the ULDs of potential DB
problems, and implements a smoother flow control algorithm in iw_cxgb4,
which is the ULD that puts the most load on the DB fifo.
Design:
cxgb4:
Direct ULD callback from the DB FULL/DROP interrupt handler. This allows
the ULD to stop doing user DB writes as quickly as possible.
While user DB usage is disabled, the LLD will accumulate DB write events
for its queues. Then once DB usage is reenabled, a single DB write is
done for each queue with its accumulated write count. This reduces the
load put on the DB fifo when reenabling.
iw_cxgb4:
Instead of marking each qp to indicate DB writes are disabled, we create
a device-global status page that each user process maps. This allows
iw_cxgb4 to only set this single bit to disable all DB writes for all
user QPs vs traversing the idr of all the active QPs. If the libcxgb4
doesn't support this, then we fall back to the old approach of marking
each QP. Thus we allow the new driver to work with an older libcxgb4.
When the LLD upcalls iw_cxgb4 indicating DB FULL, we disable all DB writes
via the status page and transition the DB state to STOPPED. As user
processes see that DB writes are disabled, they call into iw_cxgb4
to submit their DB write events. Since the DB state is in STOPPED,
the QP trying to write gets enqueued on a new DB "flow control" list.
As subsequent DB writes are submitted for this flow controlled QP, the
amount of writes are accumulated for each QP on the flow control list.
So all the user QPs that are actively ringing the DB get put on this
list and the number of writes they request are accumulated.
When the LLD upcalls iw_cxgb4 indicating DB EMPTY, which is in a workq
context, we change the DB state to FLOW_CONTROL, and begin resuming all
the QPs that are on the flow control list. This logic runs on until
the flow control list is empty or we exit FLOW_CONTROL mode (due to
a DB DROP upcall, for example). QPs are removed from this list, and
their accumulated DB write counts written to the DB FIFO. Sets of QPs,
called chunks in the code, are removed at one time. The chunk size is 64.
So 64 QPs are resumed at a time, and before the next chunk is resumed, the
logic waits (blocks) for the DB FIFO to drain. This prevents resuming to
quickly and overflowing the FIFO. Once the flow control list is empty,
the db state transitions back to NORMAL and user QPs are again allowed
to write directly to the user DB register.
The algorithm is designed such that if the DB write load is high enough,
then all the DB writes get submitted by the kernel using this flow
controlled approach to avoid DB drops. As the load lightens though, we
resume to normal DB writes directly by user applications.
Signed-off-by: Steve Wise <swise@opengridcomputing.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-03-15 00:22:08 +08:00
|
|
|
if (!context) {
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
2010-04-22 06:30:06 +08:00
|
|
|
c4iw_init_dev_ucontext(&rhp->rdev, &context->uctx);
|
|
|
|
INIT_LIST_HEAD(&context->mmaps);
|
|
|
|
spin_lock_init(&context->mmap_lock);
|
cxgb4/iw_cxgb4: Doorbell Drop Avoidance Bug Fixes
The current logic suffers from a slow response time to disable user DB
usage, and also fails to avoid DB FIFO drops under heavy load. This commit
fixes these deficiencies and makes the avoidance logic more optimal.
This is done by more efficiently notifying the ULDs of potential DB
problems, and implements a smoother flow control algorithm in iw_cxgb4,
which is the ULD that puts the most load on the DB fifo.
Design:
cxgb4:
Direct ULD callback from the DB FULL/DROP interrupt handler. This allows
the ULD to stop doing user DB writes as quickly as possible.
While user DB usage is disabled, the LLD will accumulate DB write events
for its queues. Then once DB usage is reenabled, a single DB write is
done for each queue with its accumulated write count. This reduces the
load put on the DB fifo when reenabling.
iw_cxgb4:
Instead of marking each qp to indicate DB writes are disabled, we create
a device-global status page that each user process maps. This allows
iw_cxgb4 to only set this single bit to disable all DB writes for all
user QPs vs traversing the idr of all the active QPs. If the libcxgb4
doesn't support this, then we fall back to the old approach of marking
each QP. Thus we allow the new driver to work with an older libcxgb4.
When the LLD upcalls iw_cxgb4 indicating DB FULL, we disable all DB writes
via the status page and transition the DB state to STOPPED. As user
processes see that DB writes are disabled, they call into iw_cxgb4
to submit their DB write events. Since the DB state is in STOPPED,
the QP trying to write gets enqueued on a new DB "flow control" list.
As subsequent DB writes are submitted for this flow controlled QP, the
amount of writes are accumulated for each QP on the flow control list.
So all the user QPs that are actively ringing the DB get put on this
list and the number of writes they request are accumulated.
When the LLD upcalls iw_cxgb4 indicating DB EMPTY, which is in a workq
context, we change the DB state to FLOW_CONTROL, and begin resuming all
the QPs that are on the flow control list. This logic runs on until
the flow control list is empty or we exit FLOW_CONTROL mode (due to
a DB DROP upcall, for example). QPs are removed from this list, and
their accumulated DB write counts written to the DB FIFO. Sets of QPs,
called chunks in the code, are removed at one time. The chunk size is 64.
So 64 QPs are resumed at a time, and before the next chunk is resumed, the
logic waits (blocks) for the DB FIFO to drain. This prevents resuming to
quickly and overflowing the FIFO. Once the flow control list is empty,
the db state transitions back to NORMAL and user QPs are again allowed
to write directly to the user DB register.
The algorithm is designed such that if the DB write load is high enough,
then all the DB writes get submitted by the kernel using this flow
controlled approach to avoid DB drops. As the load lightens though, we
resume to normal DB writes directly by user applications.
Signed-off-by: Steve Wise <swise@opengridcomputing.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-03-15 00:22:08 +08:00
|
|
|
|
RDMA/cxgb4: add missing padding at end of struct c4iw_alloc_ucontext_resp
The i386 ABI disagrees with most other ABIs regarding alignment of
data types larger than 4 bytes: on most ABIs a padding must be added
at end of the structures, while it is not required on i386.
So for most ABI struct c4iw_alloc_ucontext_resp gets implicitly padded
to be aligned on a 8 bytes multiple, while for i386, such padding is
not added.
The tool pahole can be used to find such implicit padding:
$ pahole --anon_include \
--nested_anon_include \
--recursive \
--class_name c4iw_alloc_ucontext_resp \
drivers/infiniband/hw/cxgb4/iw_cxgb4.o
Then, structure layout can be compared between i386 and x86_64:
+++ obj-i386/drivers/infiniband/hw/cxgb4/iw_cxgb4.o.pahole.txt 2014-03-28 11:43:05.547432195 +0100
--- obj-x86_64/drivers/infiniband/hw/cxgb4/iw_cxgb4.o.pahole.txt 2014-03-28 10:55:10.990133017 +0100
@@ -2,9 +2,8 @@ struct c4iw_alloc_ucontext_resp {
__u64 status_page_key; /* 0 8 */
__u32 status_page_size; /* 8 4 */
- /* size: 12, cachelines: 1, members: 2 */
- /* last cacheline: 12 bytes */
+ /* size: 16, cachelines: 1, members: 2 */
+ /* padding: 4 */
+ /* last cacheline: 16 bytes */
};
This ABI disagreement will make an x86_64 kernel try to write past the
buffer provided by an i386 binary.
When boundary check will be implemented, the x86_64 kernel will refuse
to write past the i386 userspace provided buffer and the uverbs will
fail.
If the structure is on a page boundary and the next page is not
mapped, ib_copy_to_udata() will fail and the uverb will fail.
Additionally, as reported by Dan Carpenter, without the implicit
padding being properly cleared, an information leak would take place
in most architectures.
This patch adds an explicit padding to struct c4iw_alloc_ucontext_resp,
and, like 92b0ca7cb149 ("IB/mlx5: Fix stack info leak in
mlx5_ib_alloc_ucontext()"), makes function c4iw_alloc_ucontext()
not writting this padding field to userspace. This way, x86_64 kernel
will be able to write struct c4iw_alloc_ucontext_resp as expected by
unpatched and patched i386 libcxgb4.
Link: http://marc.info/?i=cover.1399309513.git.ydroneaud@opteya.com
Link: http://marc.info/?i=1395848977.3297.15.camel@localhost.localdomain
Link: http://marc.info/?i=20140328082428.GH25192@mwanda
Cc: <stable@vger.kernel.org>
Fixes: 05eb23893c2c ("cxgb4/iw_cxgb4: Doorbell Drop Avoidance Bug Fixes")
Reported-by: Yann Droneaud <ydroneaud@opteya.com>
Reported-by: Dan Carpenter <dan.carpenter@oracle.com>
Signed-off-by: Yann Droneaud <ydroneaud@opteya.com>
Acked-by: Steve Wise <swise@opengridcomputing.com>
Signed-off-by: Roland Dreier <roland@purestorage.com>
2014-05-06 01:35:26 +08:00
|
|
|
if (udata->outlen < sizeof(uresp) - sizeof(uresp.reserved)) {
|
cxgb4/iw_cxgb4: Doorbell Drop Avoidance Bug Fixes
The current logic suffers from a slow response time to disable user DB
usage, and also fails to avoid DB FIFO drops under heavy load. This commit
fixes these deficiencies and makes the avoidance logic more optimal.
This is done by more efficiently notifying the ULDs of potential DB
problems, and implements a smoother flow control algorithm in iw_cxgb4,
which is the ULD that puts the most load on the DB fifo.
Design:
cxgb4:
Direct ULD callback from the DB FULL/DROP interrupt handler. This allows
the ULD to stop doing user DB writes as quickly as possible.
While user DB usage is disabled, the LLD will accumulate DB write events
for its queues. Then once DB usage is reenabled, a single DB write is
done for each queue with its accumulated write count. This reduces the
load put on the DB fifo when reenabling.
iw_cxgb4:
Instead of marking each qp to indicate DB writes are disabled, we create
a device-global status page that each user process maps. This allows
iw_cxgb4 to only set this single bit to disable all DB writes for all
user QPs vs traversing the idr of all the active QPs. If the libcxgb4
doesn't support this, then we fall back to the old approach of marking
each QP. Thus we allow the new driver to work with an older libcxgb4.
When the LLD upcalls iw_cxgb4 indicating DB FULL, we disable all DB writes
via the status page and transition the DB state to STOPPED. As user
processes see that DB writes are disabled, they call into iw_cxgb4
to submit their DB write events. Since the DB state is in STOPPED,
the QP trying to write gets enqueued on a new DB "flow control" list.
As subsequent DB writes are submitted for this flow controlled QP, the
amount of writes are accumulated for each QP on the flow control list.
So all the user QPs that are actively ringing the DB get put on this
list and the number of writes they request are accumulated.
When the LLD upcalls iw_cxgb4 indicating DB EMPTY, which is in a workq
context, we change the DB state to FLOW_CONTROL, and begin resuming all
the QPs that are on the flow control list. This logic runs on until
the flow control list is empty or we exit FLOW_CONTROL mode (due to
a DB DROP upcall, for example). QPs are removed from this list, and
their accumulated DB write counts written to the DB FIFO. Sets of QPs,
called chunks in the code, are removed at one time. The chunk size is 64.
So 64 QPs are resumed at a time, and before the next chunk is resumed, the
logic waits (blocks) for the DB FIFO to drain. This prevents resuming to
quickly and overflowing the FIFO. Once the flow control list is empty,
the db state transitions back to NORMAL and user QPs are again allowed
to write directly to the user DB register.
The algorithm is designed such that if the DB write load is high enough,
then all the DB writes get submitted by the kernel using this flow
controlled approach to avoid DB drops. As the load lightens though, we
resume to normal DB writes directly by user applications.
Signed-off-by: Steve Wise <swise@opengridcomputing.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-03-15 00:22:08 +08:00
|
|
|
if (!warned++)
|
|
|
|
pr_err(MOD "Warning - downlevel libcxgb4 (non-fatal), device status page disabled.");
|
|
|
|
rhp->rdev.flags |= T4_STATUS_PAGE_DISABLED;
|
|
|
|
} else {
|
|
|
|
mm = kmalloc(sizeof(*mm), GFP_KERNEL);
|
2014-03-29 02:55:21 +08:00
|
|
|
if (!mm) {
|
|
|
|
ret = -ENOMEM;
|
cxgb4/iw_cxgb4: Doorbell Drop Avoidance Bug Fixes
The current logic suffers from a slow response time to disable user DB
usage, and also fails to avoid DB FIFO drops under heavy load. This commit
fixes these deficiencies and makes the avoidance logic more optimal.
This is done by more efficiently notifying the ULDs of potential DB
problems, and implements a smoother flow control algorithm in iw_cxgb4,
which is the ULD that puts the most load on the DB fifo.
Design:
cxgb4:
Direct ULD callback from the DB FULL/DROP interrupt handler. This allows
the ULD to stop doing user DB writes as quickly as possible.
While user DB usage is disabled, the LLD will accumulate DB write events
for its queues. Then once DB usage is reenabled, a single DB write is
done for each queue with its accumulated write count. This reduces the
load put on the DB fifo when reenabling.
iw_cxgb4:
Instead of marking each qp to indicate DB writes are disabled, we create
a device-global status page that each user process maps. This allows
iw_cxgb4 to only set this single bit to disable all DB writes for all
user QPs vs traversing the idr of all the active QPs. If the libcxgb4
doesn't support this, then we fall back to the old approach of marking
each QP. Thus we allow the new driver to work with an older libcxgb4.
When the LLD upcalls iw_cxgb4 indicating DB FULL, we disable all DB writes
via the status page and transition the DB state to STOPPED. As user
processes see that DB writes are disabled, they call into iw_cxgb4
to submit their DB write events. Since the DB state is in STOPPED,
the QP trying to write gets enqueued on a new DB "flow control" list.
As subsequent DB writes are submitted for this flow controlled QP, the
amount of writes are accumulated for each QP on the flow control list.
So all the user QPs that are actively ringing the DB get put on this
list and the number of writes they request are accumulated.
When the LLD upcalls iw_cxgb4 indicating DB EMPTY, which is in a workq
context, we change the DB state to FLOW_CONTROL, and begin resuming all
the QPs that are on the flow control list. This logic runs on until
the flow control list is empty or we exit FLOW_CONTROL mode (due to
a DB DROP upcall, for example). QPs are removed from this list, and
their accumulated DB write counts written to the DB FIFO. Sets of QPs,
called chunks in the code, are removed at one time. The chunk size is 64.
So 64 QPs are resumed at a time, and before the next chunk is resumed, the
logic waits (blocks) for the DB FIFO to drain. This prevents resuming to
quickly and overflowing the FIFO. Once the flow control list is empty,
the db state transitions back to NORMAL and user QPs are again allowed
to write directly to the user DB register.
The algorithm is designed such that if the DB write load is high enough,
then all the DB writes get submitted by the kernel using this flow
controlled approach to avoid DB drops. As the load lightens though, we
resume to normal DB writes directly by user applications.
Signed-off-by: Steve Wise <swise@opengridcomputing.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-03-15 00:22:08 +08:00
|
|
|
goto err_free;
|
2014-03-29 02:55:21 +08:00
|
|
|
}
|
cxgb4/iw_cxgb4: Doorbell Drop Avoidance Bug Fixes
The current logic suffers from a slow response time to disable user DB
usage, and also fails to avoid DB FIFO drops under heavy load. This commit
fixes these deficiencies and makes the avoidance logic more optimal.
This is done by more efficiently notifying the ULDs of potential DB
problems, and implements a smoother flow control algorithm in iw_cxgb4,
which is the ULD that puts the most load on the DB fifo.
Design:
cxgb4:
Direct ULD callback from the DB FULL/DROP interrupt handler. This allows
the ULD to stop doing user DB writes as quickly as possible.
While user DB usage is disabled, the LLD will accumulate DB write events
for its queues. Then once DB usage is reenabled, a single DB write is
done for each queue with its accumulated write count. This reduces the
load put on the DB fifo when reenabling.
iw_cxgb4:
Instead of marking each qp to indicate DB writes are disabled, we create
a device-global status page that each user process maps. This allows
iw_cxgb4 to only set this single bit to disable all DB writes for all
user QPs vs traversing the idr of all the active QPs. If the libcxgb4
doesn't support this, then we fall back to the old approach of marking
each QP. Thus we allow the new driver to work with an older libcxgb4.
When the LLD upcalls iw_cxgb4 indicating DB FULL, we disable all DB writes
via the status page and transition the DB state to STOPPED. As user
processes see that DB writes are disabled, they call into iw_cxgb4
to submit their DB write events. Since the DB state is in STOPPED,
the QP trying to write gets enqueued on a new DB "flow control" list.
As subsequent DB writes are submitted for this flow controlled QP, the
amount of writes are accumulated for each QP on the flow control list.
So all the user QPs that are actively ringing the DB get put on this
list and the number of writes they request are accumulated.
When the LLD upcalls iw_cxgb4 indicating DB EMPTY, which is in a workq
context, we change the DB state to FLOW_CONTROL, and begin resuming all
the QPs that are on the flow control list. This logic runs on until
the flow control list is empty or we exit FLOW_CONTROL mode (due to
a DB DROP upcall, for example). QPs are removed from this list, and
their accumulated DB write counts written to the DB FIFO. Sets of QPs,
called chunks in the code, are removed at one time. The chunk size is 64.
So 64 QPs are resumed at a time, and before the next chunk is resumed, the
logic waits (blocks) for the DB FIFO to drain. This prevents resuming to
quickly and overflowing the FIFO. Once the flow control list is empty,
the db state transitions back to NORMAL and user QPs are again allowed
to write directly to the user DB register.
The algorithm is designed such that if the DB write load is high enough,
then all the DB writes get submitted by the kernel using this flow
controlled approach to avoid DB drops. As the load lightens though, we
resume to normal DB writes directly by user applications.
Signed-off-by: Steve Wise <swise@opengridcomputing.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-03-15 00:22:08 +08:00
|
|
|
|
|
|
|
uresp.status_page_size = PAGE_SIZE;
|
|
|
|
|
|
|
|
spin_lock(&context->mmap_lock);
|
|
|
|
uresp.status_page_key = context->key;
|
|
|
|
context->key += PAGE_SIZE;
|
|
|
|
spin_unlock(&context->mmap_lock);
|
|
|
|
|
RDMA/cxgb4: add missing padding at end of struct c4iw_alloc_ucontext_resp
The i386 ABI disagrees with most other ABIs regarding alignment of
data types larger than 4 bytes: on most ABIs a padding must be added
at end of the structures, while it is not required on i386.
So for most ABI struct c4iw_alloc_ucontext_resp gets implicitly padded
to be aligned on a 8 bytes multiple, while for i386, such padding is
not added.
The tool pahole can be used to find such implicit padding:
$ pahole --anon_include \
--nested_anon_include \
--recursive \
--class_name c4iw_alloc_ucontext_resp \
drivers/infiniband/hw/cxgb4/iw_cxgb4.o
Then, structure layout can be compared between i386 and x86_64:
+++ obj-i386/drivers/infiniband/hw/cxgb4/iw_cxgb4.o.pahole.txt 2014-03-28 11:43:05.547432195 +0100
--- obj-x86_64/drivers/infiniband/hw/cxgb4/iw_cxgb4.o.pahole.txt 2014-03-28 10:55:10.990133017 +0100
@@ -2,9 +2,8 @@ struct c4iw_alloc_ucontext_resp {
__u64 status_page_key; /* 0 8 */
__u32 status_page_size; /* 8 4 */
- /* size: 12, cachelines: 1, members: 2 */
- /* last cacheline: 12 bytes */
+ /* size: 16, cachelines: 1, members: 2 */
+ /* padding: 4 */
+ /* last cacheline: 16 bytes */
};
This ABI disagreement will make an x86_64 kernel try to write past the
buffer provided by an i386 binary.
When boundary check will be implemented, the x86_64 kernel will refuse
to write past the i386 userspace provided buffer and the uverbs will
fail.
If the structure is on a page boundary and the next page is not
mapped, ib_copy_to_udata() will fail and the uverb will fail.
Additionally, as reported by Dan Carpenter, without the implicit
padding being properly cleared, an information leak would take place
in most architectures.
This patch adds an explicit padding to struct c4iw_alloc_ucontext_resp,
and, like 92b0ca7cb149 ("IB/mlx5: Fix stack info leak in
mlx5_ib_alloc_ucontext()"), makes function c4iw_alloc_ucontext()
not writting this padding field to userspace. This way, x86_64 kernel
will be able to write struct c4iw_alloc_ucontext_resp as expected by
unpatched and patched i386 libcxgb4.
Link: http://marc.info/?i=cover.1399309513.git.ydroneaud@opteya.com
Link: http://marc.info/?i=1395848977.3297.15.camel@localhost.localdomain
Link: http://marc.info/?i=20140328082428.GH25192@mwanda
Cc: <stable@vger.kernel.org>
Fixes: 05eb23893c2c ("cxgb4/iw_cxgb4: Doorbell Drop Avoidance Bug Fixes")
Reported-by: Yann Droneaud <ydroneaud@opteya.com>
Reported-by: Dan Carpenter <dan.carpenter@oracle.com>
Signed-off-by: Yann Droneaud <ydroneaud@opteya.com>
Acked-by: Steve Wise <swise@opengridcomputing.com>
Signed-off-by: Roland Dreier <roland@purestorage.com>
2014-05-06 01:35:26 +08:00
|
|
|
ret = ib_copy_to_udata(udata, &uresp,
|
|
|
|
sizeof(uresp) - sizeof(uresp.reserved));
|
cxgb4/iw_cxgb4: Doorbell Drop Avoidance Bug Fixes
The current logic suffers from a slow response time to disable user DB
usage, and also fails to avoid DB FIFO drops under heavy load. This commit
fixes these deficiencies and makes the avoidance logic more optimal.
This is done by more efficiently notifying the ULDs of potential DB
problems, and implements a smoother flow control algorithm in iw_cxgb4,
which is the ULD that puts the most load on the DB fifo.
Design:
cxgb4:
Direct ULD callback from the DB FULL/DROP interrupt handler. This allows
the ULD to stop doing user DB writes as quickly as possible.
While user DB usage is disabled, the LLD will accumulate DB write events
for its queues. Then once DB usage is reenabled, a single DB write is
done for each queue with its accumulated write count. This reduces the
load put on the DB fifo when reenabling.
iw_cxgb4:
Instead of marking each qp to indicate DB writes are disabled, we create
a device-global status page that each user process maps. This allows
iw_cxgb4 to only set this single bit to disable all DB writes for all
user QPs vs traversing the idr of all the active QPs. If the libcxgb4
doesn't support this, then we fall back to the old approach of marking
each QP. Thus we allow the new driver to work with an older libcxgb4.
When the LLD upcalls iw_cxgb4 indicating DB FULL, we disable all DB writes
via the status page and transition the DB state to STOPPED. As user
processes see that DB writes are disabled, they call into iw_cxgb4
to submit their DB write events. Since the DB state is in STOPPED,
the QP trying to write gets enqueued on a new DB "flow control" list.
As subsequent DB writes are submitted for this flow controlled QP, the
amount of writes are accumulated for each QP on the flow control list.
So all the user QPs that are actively ringing the DB get put on this
list and the number of writes they request are accumulated.
When the LLD upcalls iw_cxgb4 indicating DB EMPTY, which is in a workq
context, we change the DB state to FLOW_CONTROL, and begin resuming all
the QPs that are on the flow control list. This logic runs on until
the flow control list is empty or we exit FLOW_CONTROL mode (due to
a DB DROP upcall, for example). QPs are removed from this list, and
their accumulated DB write counts written to the DB FIFO. Sets of QPs,
called chunks in the code, are removed at one time. The chunk size is 64.
So 64 QPs are resumed at a time, and before the next chunk is resumed, the
logic waits (blocks) for the DB FIFO to drain. This prevents resuming to
quickly and overflowing the FIFO. Once the flow control list is empty,
the db state transitions back to NORMAL and user QPs are again allowed
to write directly to the user DB register.
The algorithm is designed such that if the DB write load is high enough,
then all the DB writes get submitted by the kernel using this flow
controlled approach to avoid DB drops. As the load lightens though, we
resume to normal DB writes directly by user applications.
Signed-off-by: Steve Wise <swise@opengridcomputing.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-03-15 00:22:08 +08:00
|
|
|
if (ret)
|
|
|
|
goto err_mm;
|
|
|
|
|
|
|
|
mm->key = uresp.status_page_key;
|
|
|
|
mm->addr = virt_to_phys(rhp->rdev.status_page);
|
|
|
|
mm->len = PAGE_SIZE;
|
|
|
|
insert_mmap(context, mm);
|
|
|
|
}
|
2010-04-22 06:30:06 +08:00
|
|
|
return &context->ibucontext;
|
cxgb4/iw_cxgb4: Doorbell Drop Avoidance Bug Fixes
The current logic suffers from a slow response time to disable user DB
usage, and also fails to avoid DB FIFO drops under heavy load. This commit
fixes these deficiencies and makes the avoidance logic more optimal.
This is done by more efficiently notifying the ULDs of potential DB
problems, and implements a smoother flow control algorithm in iw_cxgb4,
which is the ULD that puts the most load on the DB fifo.
Design:
cxgb4:
Direct ULD callback from the DB FULL/DROP interrupt handler. This allows
the ULD to stop doing user DB writes as quickly as possible.
While user DB usage is disabled, the LLD will accumulate DB write events
for its queues. Then once DB usage is reenabled, a single DB write is
done for each queue with its accumulated write count. This reduces the
load put on the DB fifo when reenabling.
iw_cxgb4:
Instead of marking each qp to indicate DB writes are disabled, we create
a device-global status page that each user process maps. This allows
iw_cxgb4 to only set this single bit to disable all DB writes for all
user QPs vs traversing the idr of all the active QPs. If the libcxgb4
doesn't support this, then we fall back to the old approach of marking
each QP. Thus we allow the new driver to work with an older libcxgb4.
When the LLD upcalls iw_cxgb4 indicating DB FULL, we disable all DB writes
via the status page and transition the DB state to STOPPED. As user
processes see that DB writes are disabled, they call into iw_cxgb4
to submit their DB write events. Since the DB state is in STOPPED,
the QP trying to write gets enqueued on a new DB "flow control" list.
As subsequent DB writes are submitted for this flow controlled QP, the
amount of writes are accumulated for each QP on the flow control list.
So all the user QPs that are actively ringing the DB get put on this
list and the number of writes they request are accumulated.
When the LLD upcalls iw_cxgb4 indicating DB EMPTY, which is in a workq
context, we change the DB state to FLOW_CONTROL, and begin resuming all
the QPs that are on the flow control list. This logic runs on until
the flow control list is empty or we exit FLOW_CONTROL mode (due to
a DB DROP upcall, for example). QPs are removed from this list, and
their accumulated DB write counts written to the DB FIFO. Sets of QPs,
called chunks in the code, are removed at one time. The chunk size is 64.
So 64 QPs are resumed at a time, and before the next chunk is resumed, the
logic waits (blocks) for the DB FIFO to drain. This prevents resuming to
quickly and overflowing the FIFO. Once the flow control list is empty,
the db state transitions back to NORMAL and user QPs are again allowed
to write directly to the user DB register.
The algorithm is designed such that if the DB write load is high enough,
then all the DB writes get submitted by the kernel using this flow
controlled approach to avoid DB drops. As the load lightens though, we
resume to normal DB writes directly by user applications.
Signed-off-by: Steve Wise <swise@opengridcomputing.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-03-15 00:22:08 +08:00
|
|
|
err_mm:
|
|
|
|
kfree(mm);
|
|
|
|
err_free:
|
|
|
|
kfree(context);
|
|
|
|
err:
|
|
|
|
return ERR_PTR(ret);
|
2010-04-22 06:30:06 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int c4iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
|
|
|
|
{
|
|
|
|
int len = vma->vm_end - vma->vm_start;
|
|
|
|
u32 key = vma->vm_pgoff << PAGE_SHIFT;
|
|
|
|
struct c4iw_rdev *rdev;
|
|
|
|
int ret = 0;
|
|
|
|
struct c4iw_mm_entry *mm;
|
|
|
|
struct c4iw_ucontext *ucontext;
|
|
|
|
u64 addr;
|
|
|
|
|
|
|
|
PDBG("%s pgoff 0x%lx key 0x%x len %d\n", __func__, vma->vm_pgoff,
|
|
|
|
key, len);
|
|
|
|
|
|
|
|
if (vma->vm_start & (PAGE_SIZE-1))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
rdev = &(to_c4iw_dev(context->device)->rdev);
|
|
|
|
ucontext = to_c4iw_ucontext(context);
|
|
|
|
|
|
|
|
mm = remove_mmap(ucontext, key, len);
|
|
|
|
if (!mm)
|
|
|
|
return -EINVAL;
|
|
|
|
addr = mm->addr;
|
|
|
|
kfree(mm);
|
|
|
|
|
2010-09-14 00:23:57 +08:00
|
|
|
if ((addr >= pci_resource_start(rdev->lldi.pdev, 0)) &&
|
|
|
|
(addr < (pci_resource_start(rdev->lldi.pdev, 0) +
|
|
|
|
pci_resource_len(rdev->lldi.pdev, 0)))) {
|
2010-04-22 06:30:06 +08:00
|
|
|
|
|
|
|
/*
|
2010-09-14 00:23:57 +08:00
|
|
|
* MA_SYNC register...
|
2010-04-22 06:30:06 +08:00
|
|
|
*/
|
|
|
|
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
|
2010-09-14 00:23:57 +08:00
|
|
|
ret = io_remap_pfn_range(vma, vma->vm_start,
|
|
|
|
addr >> PAGE_SHIFT,
|
|
|
|
len, vma->vm_page_prot);
|
|
|
|
} else if ((addr >= pci_resource_start(rdev->lldi.pdev, 2)) &&
|
|
|
|
(addr < (pci_resource_start(rdev->lldi.pdev, 2) +
|
|
|
|
pci_resource_len(rdev->lldi.pdev, 2)))) {
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Map user DB or OCQP memory...
|
|
|
|
*/
|
|
|
|
if (addr >= rdev->oc_mw_pa)
|
|
|
|
vma->vm_page_prot = t4_pgprot_wc(vma->vm_page_prot);
|
2013-03-14 13:08:58 +08:00
|
|
|
else {
|
|
|
|
if (is_t5(rdev->lldi.adapter_type))
|
|
|
|
vma->vm_page_prot =
|
|
|
|
t4_pgprot_wc(vma->vm_page_prot);
|
|
|
|
else
|
|
|
|
vma->vm_page_prot =
|
|
|
|
pgprot_noncached(vma->vm_page_prot);
|
|
|
|
}
|
2010-04-22 06:30:06 +08:00
|
|
|
ret = io_remap_pfn_range(vma, vma->vm_start,
|
|
|
|
addr >> PAGE_SHIFT,
|
|
|
|
len, vma->vm_page_prot);
|
|
|
|
} else {
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Map WQ or CQ contig dma memory...
|
|
|
|
*/
|
|
|
|
ret = remap_pfn_range(vma, vma->vm_start,
|
|
|
|
addr >> PAGE_SHIFT,
|
|
|
|
len, vma->vm_page_prot);
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int c4iw_deallocate_pd(struct ib_pd *pd)
|
|
|
|
{
|
|
|
|
struct c4iw_dev *rhp;
|
|
|
|
struct c4iw_pd *php;
|
|
|
|
|
|
|
|
php = to_c4iw_pd(pd);
|
|
|
|
rhp = php->rhp;
|
|
|
|
PDBG("%s ibpd %p pdid 0x%x\n", __func__, pd, php->pdid);
|
2012-05-18 17:59:32 +08:00
|
|
|
c4iw_put_resource(&rhp->rdev.resource.pdid_table, php->pdid);
|
2012-05-18 17:59:27 +08:00
|
|
|
mutex_lock(&rhp->rdev.stats.lock);
|
|
|
|
rhp->rdev.stats.pd.cur--;
|
|
|
|
mutex_unlock(&rhp->rdev.stats.lock);
|
2010-04-22 06:30:06 +08:00
|
|
|
kfree(php);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct ib_pd *c4iw_allocate_pd(struct ib_device *ibdev,
|
|
|
|
struct ib_ucontext *context,
|
|
|
|
struct ib_udata *udata)
|
|
|
|
{
|
|
|
|
struct c4iw_pd *php;
|
|
|
|
u32 pdid;
|
|
|
|
struct c4iw_dev *rhp;
|
|
|
|
|
|
|
|
PDBG("%s ibdev %p\n", __func__, ibdev);
|
|
|
|
rhp = (struct c4iw_dev *) ibdev;
|
2012-05-18 17:59:32 +08:00
|
|
|
pdid = c4iw_get_resource(&rhp->rdev.resource.pdid_table);
|
2010-04-22 06:30:06 +08:00
|
|
|
if (!pdid)
|
|
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
php = kzalloc(sizeof(*php), GFP_KERNEL);
|
|
|
|
if (!php) {
|
2012-05-18 17:59:32 +08:00
|
|
|
c4iw_put_resource(&rhp->rdev.resource.pdid_table, pdid);
|
2010-04-22 06:30:06 +08:00
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
}
|
|
|
|
php->pdid = pdid;
|
|
|
|
php->rhp = rhp;
|
|
|
|
if (context) {
|
|
|
|
if (ib_copy_to_udata(udata, &php->pdid, sizeof(u32))) {
|
|
|
|
c4iw_deallocate_pd(&php->ibpd);
|
|
|
|
return ERR_PTR(-EFAULT);
|
|
|
|
}
|
|
|
|
}
|
2012-05-18 17:59:27 +08:00
|
|
|
mutex_lock(&rhp->rdev.stats.lock);
|
|
|
|
rhp->rdev.stats.pd.cur++;
|
|
|
|
if (rhp->rdev.stats.pd.cur > rhp->rdev.stats.pd.max)
|
|
|
|
rhp->rdev.stats.pd.max = rhp->rdev.stats.pd.cur;
|
|
|
|
mutex_unlock(&rhp->rdev.stats.lock);
|
2010-04-22 06:30:06 +08:00
|
|
|
PDBG("%s pdid 0x%0x ptr 0x%p\n", __func__, pdid, php);
|
|
|
|
return &php->ibpd;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int c4iw_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
|
|
|
|
u16 *pkey)
|
|
|
|
{
|
|
|
|
PDBG("%s ibdev %p\n", __func__, ibdev);
|
|
|
|
*pkey = 0;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int c4iw_query_gid(struct ib_device *ibdev, u8 port, int index,
|
|
|
|
union ib_gid *gid)
|
|
|
|
{
|
|
|
|
struct c4iw_dev *dev;
|
|
|
|
|
|
|
|
PDBG("%s ibdev %p, port %d, index %d, gid %p\n",
|
|
|
|
__func__, ibdev, port, index, gid);
|
|
|
|
dev = to_c4iw_dev(ibdev);
|
|
|
|
BUG_ON(port == 0);
|
|
|
|
memset(&(gid->raw[0]), 0, sizeof(gid->raw));
|
|
|
|
memcpy(&(gid->raw[0]), dev->rdev.lldi.ports[port-1]->dev_addr, 6);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-06-11 21:35:25 +08:00
|
|
|
static int c4iw_query_device(struct ib_device *ibdev, struct ib_device_attr *props,
|
|
|
|
struct ib_udata *uhw)
|
2010-04-22 06:30:06 +08:00
|
|
|
{
|
|
|
|
|
|
|
|
struct c4iw_dev *dev;
|
2015-06-11 21:35:25 +08:00
|
|
|
|
2010-04-22 06:30:06 +08:00
|
|
|
PDBG("%s ibdev %p\n", __func__, ibdev);
|
|
|
|
|
2015-06-11 21:35:25 +08:00
|
|
|
if (uhw->inlen || uhw->outlen)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2010-04-22 06:30:06 +08:00
|
|
|
dev = to_c4iw_dev(ibdev);
|
|
|
|
memset(props, 0, sizeof *props);
|
|
|
|
memcpy(&props->sys_image_guid, dev->rdev.lldi.ports[0]->dev_addr, 6);
|
2013-03-14 13:08:58 +08:00
|
|
|
props->hw_ver = CHELSIO_CHIP_RELEASE(dev->rdev.lldi.adapter_type);
|
2010-04-22 06:30:06 +08:00
|
|
|
props->fw_ver = dev->rdev.lldi.fw_vers;
|
|
|
|
props->device_cap_flags = dev->device_cap_flags;
|
|
|
|
props->page_size_cap = T4_PAGESIZE_MASK;
|
|
|
|
props->vendor_id = (u32)dev->rdev.lldi.pdev->vendor;
|
|
|
|
props->vendor_part_id = (u32)dev->rdev.lldi.pdev->device;
|
|
|
|
props->max_mr_size = T4_MAX_MR_SIZE;
|
2014-07-21 23:25:15 +08:00
|
|
|
props->max_qp = dev->rdev.lldi.vr->qp.size / 2;
|
2014-07-15 00:04:51 +08:00
|
|
|
props->max_qp_wr = dev->rdev.hw_queue.t4_max_qp_depth;
|
2010-04-22 06:30:06 +08:00
|
|
|
props->max_sge = T4_MAX_RECV_SGE;
|
|
|
|
props->max_sge_rd = 1;
|
2014-07-15 00:04:52 +08:00
|
|
|
props->max_res_rd_atom = dev->rdev.lldi.max_ird_adapter;
|
|
|
|
props->max_qp_rd_atom = min(dev->rdev.lldi.max_ordird_qp,
|
|
|
|
c4iw_max_read_depth);
|
|
|
|
props->max_qp_init_rd_atom = props->max_qp_rd_atom;
|
2014-07-21 23:25:15 +08:00
|
|
|
props->max_cq = dev->rdev.lldi.vr->qp.size;
|
2014-07-15 00:04:51 +08:00
|
|
|
props->max_cqe = dev->rdev.hw_queue.t4_max_cq_depth;
|
2010-04-22 06:30:06 +08:00
|
|
|
props->max_mr = c4iw_num_stags(&dev->rdev);
|
|
|
|
props->max_pd = T4_MAX_NUM_PD;
|
|
|
|
props->local_ca_ack_delay = 0;
|
2014-04-09 22:38:27 +08:00
|
|
|
props->max_fast_reg_page_list_len = t4_max_fr_depth(use_dsgl);
|
2010-04-22 06:30:06 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int c4iw_query_port(struct ib_device *ibdev, u8 port,
|
|
|
|
struct ib_port_attr *props)
|
|
|
|
{
|
|
|
|
struct c4iw_dev *dev;
|
|
|
|
struct net_device *netdev;
|
|
|
|
struct in_device *inetdev;
|
|
|
|
|
|
|
|
PDBG("%s ibdev %p\n", __func__, ibdev);
|
|
|
|
|
|
|
|
dev = to_c4iw_dev(ibdev);
|
|
|
|
netdev = dev->rdev.lldi.ports[port-1];
|
|
|
|
|
|
|
|
memset(props, 0, sizeof(struct ib_port_attr));
|
|
|
|
props->max_mtu = IB_MTU_4096;
|
|
|
|
if (netdev->mtu >= 4096)
|
|
|
|
props->active_mtu = IB_MTU_4096;
|
|
|
|
else if (netdev->mtu >= 2048)
|
|
|
|
props->active_mtu = IB_MTU_2048;
|
|
|
|
else if (netdev->mtu >= 1024)
|
|
|
|
props->active_mtu = IB_MTU_1024;
|
|
|
|
else if (netdev->mtu >= 512)
|
|
|
|
props->active_mtu = IB_MTU_512;
|
|
|
|
else
|
|
|
|
props->active_mtu = IB_MTU_256;
|
|
|
|
|
|
|
|
if (!netif_carrier_ok(netdev))
|
|
|
|
props->state = IB_PORT_DOWN;
|
|
|
|
else {
|
|
|
|
inetdev = in_dev_get(netdev);
|
|
|
|
if (inetdev) {
|
|
|
|
if (inetdev->ifa_list)
|
|
|
|
props->state = IB_PORT_ACTIVE;
|
|
|
|
else
|
|
|
|
props->state = IB_PORT_INIT;
|
|
|
|
in_dev_put(inetdev);
|
|
|
|
} else
|
|
|
|
props->state = IB_PORT_INIT;
|
|
|
|
}
|
|
|
|
|
|
|
|
props->port_cap_flags =
|
|
|
|
IB_PORT_CM_SUP |
|
|
|
|
IB_PORT_SNMP_TUNNEL_SUP |
|
|
|
|
IB_PORT_REINIT_SUP |
|
|
|
|
IB_PORT_DEVICE_MGMT_SUP |
|
|
|
|
IB_PORT_VENDOR_CLASS_SUP | IB_PORT_BOOT_MGMT_SUP;
|
|
|
|
props->gid_tbl_len = 1;
|
|
|
|
props->pkey_tbl_len = 1;
|
|
|
|
props->active_width = 2;
|
2012-02-29 00:49:50 +08:00
|
|
|
props->active_speed = IB_SPEED_DDR;
|
2010-04-22 06:30:06 +08:00
|
|
|
props->max_msg_sz = -1;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t show_rev(struct device *dev, struct device_attribute *attr,
|
|
|
|
char *buf)
|
|
|
|
{
|
|
|
|
struct c4iw_dev *c4iw_dev = container_of(dev, struct c4iw_dev,
|
|
|
|
ibdev.dev);
|
|
|
|
PDBG("%s dev 0x%p\n", __func__, dev);
|
2013-03-14 13:08:58 +08:00
|
|
|
return sprintf(buf, "%d\n",
|
|
|
|
CHELSIO_CHIP_RELEASE(c4iw_dev->rdev.lldi.adapter_type));
|
2010-04-22 06:30:06 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t show_fw_ver(struct device *dev, struct device_attribute *attr,
|
|
|
|
char *buf)
|
|
|
|
{
|
|
|
|
struct c4iw_dev *c4iw_dev = container_of(dev, struct c4iw_dev,
|
|
|
|
ibdev.dev);
|
|
|
|
PDBG("%s dev 0x%p\n", __func__, dev);
|
|
|
|
|
|
|
|
return sprintf(buf, "%u.%u.%u.%u\n",
|
2014-11-21 15:22:05 +08:00
|
|
|
FW_HDR_FW_VER_MAJOR_G(c4iw_dev->rdev.lldi.fw_vers),
|
|
|
|
FW_HDR_FW_VER_MINOR_G(c4iw_dev->rdev.lldi.fw_vers),
|
|
|
|
FW_HDR_FW_VER_MICRO_G(c4iw_dev->rdev.lldi.fw_vers),
|
|
|
|
FW_HDR_FW_VER_BUILD_G(c4iw_dev->rdev.lldi.fw_vers));
|
2010-04-22 06:30:06 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t show_hca(struct device *dev, struct device_attribute *attr,
|
|
|
|
char *buf)
|
|
|
|
{
|
|
|
|
struct c4iw_dev *c4iw_dev = container_of(dev, struct c4iw_dev,
|
|
|
|
ibdev.dev);
|
|
|
|
struct ethtool_drvinfo info;
|
|
|
|
struct net_device *lldev = c4iw_dev->rdev.lldi.ports[0];
|
|
|
|
|
|
|
|
PDBG("%s dev 0x%p\n", __func__, dev);
|
|
|
|
lldev->ethtool_ops->get_drvinfo(lldev, &info);
|
|
|
|
return sprintf(buf, "%s\n", info.driver);
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t show_board(struct device *dev, struct device_attribute *attr,
|
|
|
|
char *buf)
|
|
|
|
{
|
|
|
|
struct c4iw_dev *c4iw_dev = container_of(dev, struct c4iw_dev,
|
|
|
|
ibdev.dev);
|
|
|
|
PDBG("%s dev 0x%p\n", __func__, dev);
|
|
|
|
return sprintf(buf, "%x.%x\n", c4iw_dev->rdev.lldi.pdev->vendor,
|
|
|
|
c4iw_dev->rdev.lldi.pdev->device);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int c4iw_get_mib(struct ib_device *ibdev,
|
|
|
|
union rdma_protocol_stats *stats)
|
|
|
|
{
|
2010-10-18 23:16:40 +08:00
|
|
|
struct tp_tcp_stats v4, v6;
|
|
|
|
struct c4iw_dev *c4iw_dev = to_c4iw_dev(ibdev);
|
|
|
|
|
|
|
|
cxgb4_get_tcp_stats(c4iw_dev->rdev.lldi.pdev, &v4, &v6);
|
|
|
|
memset(stats, 0, sizeof *stats);
|
2015-06-03 23:34:39 +08:00
|
|
|
stats->iw.tcpInSegs = v4.tcp_in_segs + v6.tcp_in_segs;
|
|
|
|
stats->iw.tcpOutSegs = v4.tcp_out_segs + v6.tcp_out_segs;
|
|
|
|
stats->iw.tcpRetransSegs = v4.tcp_retrans_segs + v6.tcp_retrans_segs;
|
|
|
|
stats->iw.tcpOutRsts = v4.tcp_out_rsts + v6.tcp_out_rsts;
|
2010-10-18 23:16:40 +08:00
|
|
|
|
|
|
|
return 0;
|
2010-04-22 06:30:06 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
|
|
|
|
static DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL);
|
|
|
|
static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
|
|
|
|
static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL);
|
|
|
|
|
|
|
|
static struct device_attribute *c4iw_class_attributes[] = {
|
|
|
|
&dev_attr_hw_rev,
|
|
|
|
&dev_attr_fw_ver,
|
|
|
|
&dev_attr_hca_type,
|
|
|
|
&dev_attr_board_id,
|
|
|
|
};
|
|
|
|
|
2015-05-14 08:02:58 +08:00
|
|
|
static int c4iw_port_immutable(struct ib_device *ibdev, u8 port_num,
|
|
|
|
struct ib_port_immutable *immutable)
|
|
|
|
{
|
|
|
|
struct ib_port_attr attr;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
err = c4iw_query_port(ibdev, port_num, &attr);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
immutable->pkey_tbl_len = attr.pkey_tbl_len;
|
|
|
|
immutable->gid_tbl_len = attr.gid_tbl_len;
|
2015-05-14 08:02:59 +08:00
|
|
|
immutable->core_cap_flags = RDMA_CORE_PORT_IWARP;
|
2015-05-14 08:02:58 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2010-04-22 06:30:06 +08:00
|
|
|
int c4iw_register_device(struct c4iw_dev *dev)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
PDBG("%s c4iw_dev %p\n", __func__, dev);
|
|
|
|
BUG_ON(!dev->rdev.lldi.ports[0]);
|
|
|
|
strlcpy(dev->ibdev.name, "cxgb4_%d", IB_DEVICE_NAME_MAX);
|
|
|
|
memset(&dev->ibdev.node_guid, 0, sizeof(dev->ibdev.node_guid));
|
|
|
|
memcpy(&dev->ibdev.node_guid, dev->rdev.lldi.ports[0]->dev_addr, 6);
|
|
|
|
dev->ibdev.owner = THIS_MODULE;
|
|
|
|
dev->device_cap_flags = IB_DEVICE_LOCAL_DMA_LKEY | IB_DEVICE_MEM_WINDOW;
|
|
|
|
if (fastreg_support)
|
|
|
|
dev->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
|
|
|
|
dev->ibdev.local_dma_lkey = 0;
|
|
|
|
dev->ibdev.uverbs_cmd_mask =
|
|
|
|
(1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
|
|
|
|
(1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
|
|
|
|
(1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
|
|
|
|
(1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
|
|
|
|
(1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
|
|
|
|
(1ull << IB_USER_VERBS_CMD_REG_MR) |
|
|
|
|
(1ull << IB_USER_VERBS_CMD_DEREG_MR) |
|
|
|
|
(1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
|
|
|
|
(1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
|
|
|
|
(1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
|
|
|
|
(1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) |
|
|
|
|
(1ull << IB_USER_VERBS_CMD_CREATE_QP) |
|
|
|
|
(1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
|
2012-05-18 17:59:33 +08:00
|
|
|
(1ull << IB_USER_VERBS_CMD_QUERY_QP) |
|
2010-04-22 06:30:06 +08:00
|
|
|
(1ull << IB_USER_VERBS_CMD_POLL_CQ) |
|
|
|
|
(1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
|
|
|
|
(1ull << IB_USER_VERBS_CMD_POST_SEND) |
|
|
|
|
(1ull << IB_USER_VERBS_CMD_POST_RECV);
|
|
|
|
dev->ibdev.node_type = RDMA_NODE_RNIC;
|
|
|
|
memcpy(dev->ibdev.node_desc, C4IW_NODE_DESC, sizeof(C4IW_NODE_DESC));
|
|
|
|
dev->ibdev.phys_port_cnt = dev->rdev.lldi.nports;
|
2014-06-07 00:10:42 +08:00
|
|
|
dev->ibdev.num_comp_vectors = dev->rdev.lldi.nciq;
|
2010-04-22 06:30:06 +08:00
|
|
|
dev->ibdev.dma_device = &(dev->rdev.lldi.pdev->dev);
|
|
|
|
dev->ibdev.query_device = c4iw_query_device;
|
|
|
|
dev->ibdev.query_port = c4iw_query_port;
|
|
|
|
dev->ibdev.query_pkey = c4iw_query_pkey;
|
|
|
|
dev->ibdev.query_gid = c4iw_query_gid;
|
|
|
|
dev->ibdev.alloc_ucontext = c4iw_alloc_ucontext;
|
|
|
|
dev->ibdev.dealloc_ucontext = c4iw_dealloc_ucontext;
|
|
|
|
dev->ibdev.mmap = c4iw_mmap;
|
|
|
|
dev->ibdev.alloc_pd = c4iw_allocate_pd;
|
|
|
|
dev->ibdev.dealloc_pd = c4iw_deallocate_pd;
|
|
|
|
dev->ibdev.create_ah = c4iw_ah_create;
|
|
|
|
dev->ibdev.destroy_ah = c4iw_ah_destroy;
|
|
|
|
dev->ibdev.create_qp = c4iw_create_qp;
|
|
|
|
dev->ibdev.modify_qp = c4iw_ib_modify_qp;
|
2012-05-18 17:59:33 +08:00
|
|
|
dev->ibdev.query_qp = c4iw_ib_query_qp;
|
2010-04-22 06:30:06 +08:00
|
|
|
dev->ibdev.destroy_qp = c4iw_destroy_qp;
|
|
|
|
dev->ibdev.create_cq = c4iw_create_cq;
|
|
|
|
dev->ibdev.destroy_cq = c4iw_destroy_cq;
|
|
|
|
dev->ibdev.resize_cq = c4iw_resize_cq;
|
|
|
|
dev->ibdev.poll_cq = c4iw_poll_cq;
|
|
|
|
dev->ibdev.get_dma_mr = c4iw_get_dma_mr;
|
|
|
|
dev->ibdev.reg_phys_mr = c4iw_register_phys_mem;
|
|
|
|
dev->ibdev.rereg_phys_mr = c4iw_reregister_phys_mem;
|
|
|
|
dev->ibdev.reg_user_mr = c4iw_reg_user_mr;
|
|
|
|
dev->ibdev.dereg_mr = c4iw_dereg_mr;
|
|
|
|
dev->ibdev.alloc_mw = c4iw_alloc_mw;
|
|
|
|
dev->ibdev.bind_mw = c4iw_bind_mw;
|
|
|
|
dev->ibdev.dealloc_mw = c4iw_dealloc_mw;
|
|
|
|
dev->ibdev.alloc_fast_reg_mr = c4iw_alloc_fast_reg_mr;
|
|
|
|
dev->ibdev.alloc_fast_reg_page_list = c4iw_alloc_fastreg_pbl;
|
|
|
|
dev->ibdev.free_fast_reg_page_list = c4iw_free_fastreg_pbl;
|
|
|
|
dev->ibdev.attach_mcast = c4iw_multicast_attach;
|
|
|
|
dev->ibdev.detach_mcast = c4iw_multicast_detach;
|
|
|
|
dev->ibdev.process_mad = c4iw_process_mad;
|
|
|
|
dev->ibdev.req_notify_cq = c4iw_arm_cq;
|
|
|
|
dev->ibdev.post_send = c4iw_post_send;
|
|
|
|
dev->ibdev.post_recv = c4iw_post_receive;
|
|
|
|
dev->ibdev.get_protocol_stats = c4iw_get_mib;
|
2010-09-14 00:23:57 +08:00
|
|
|
dev->ibdev.uverbs_abi_ver = C4IW_UVERBS_ABI_VERSION;
|
2015-05-14 08:02:58 +08:00
|
|
|
dev->ibdev.get_port_immutable = c4iw_port_immutable;
|
2010-04-22 06:30:06 +08:00
|
|
|
|
|
|
|
dev->ibdev.iwcm = kmalloc(sizeof(struct iw_cm_verbs), GFP_KERNEL);
|
|
|
|
if (!dev->ibdev.iwcm)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
dev->ibdev.iwcm->connect = c4iw_connect;
|
|
|
|
dev->ibdev.iwcm->accept = c4iw_accept_cr;
|
|
|
|
dev->ibdev.iwcm->reject = c4iw_reject_cr;
|
|
|
|
dev->ibdev.iwcm->create_listen = c4iw_create_listen;
|
|
|
|
dev->ibdev.iwcm->destroy_listen = c4iw_destroy_listen;
|
|
|
|
dev->ibdev.iwcm->add_ref = c4iw_qp_add_ref;
|
|
|
|
dev->ibdev.iwcm->rem_ref = c4iw_qp_rem_ref;
|
|
|
|
dev->ibdev.iwcm->get_qp = c4iw_get_qp;
|
|
|
|
|
2010-05-07 08:03:25 +08:00
|
|
|
ret = ib_register_device(&dev->ibdev, NULL);
|
2010-04-22 06:30:06 +08:00
|
|
|
if (ret)
|
|
|
|
goto bail1;
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(c4iw_class_attributes); ++i) {
|
|
|
|
ret = device_create_file(&dev->ibdev.dev,
|
|
|
|
c4iw_class_attributes[i]);
|
|
|
|
if (ret)
|
|
|
|
goto bail2;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
bail2:
|
|
|
|
ib_unregister_device(&dev->ibdev);
|
|
|
|
bail1:
|
|
|
|
kfree(dev->ibdev.iwcm);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
void c4iw_unregister_device(struct c4iw_dev *dev)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
PDBG("%s c4iw_dev %p\n", __func__, dev);
|
|
|
|
for (i = 0; i < ARRAY_SIZE(c4iw_class_attributes); ++i)
|
|
|
|
device_remove_file(&dev->ibdev.dev,
|
|
|
|
c4iw_class_attributes[i]);
|
|
|
|
ib_unregister_device(&dev->ibdev);
|
|
|
|
kfree(dev->ibdev.iwcm);
|
|
|
|
return;
|
|
|
|
}
|