mirror of https://gitee.com/openkylin/linux.git
Merge branches 'cxgb4' and 'mlx5' into k.o/for-4.8
This commit is contained in:
commit
3e5e8e8a9a
|
@ -183,15 +183,14 @@ static void free_cm_id(struct iwcm_id_private *cm_id_priv)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Release a reference on cm_id. If the last reference is being
|
* Release a reference on cm_id. If the last reference is being
|
||||||
* released, enable the waiting thread (in iw_destroy_cm_id) to
|
* released, free the cm_id and return 1.
|
||||||
* get woken up, and return 1 if a thread is already waiting.
|
|
||||||
*/
|
*/
|
||||||
static int iwcm_deref_id(struct iwcm_id_private *cm_id_priv)
|
static int iwcm_deref_id(struct iwcm_id_private *cm_id_priv)
|
||||||
{
|
{
|
||||||
BUG_ON(atomic_read(&cm_id_priv->refcount)==0);
|
BUG_ON(atomic_read(&cm_id_priv->refcount)==0);
|
||||||
if (atomic_dec_and_test(&cm_id_priv->refcount)) {
|
if (atomic_dec_and_test(&cm_id_priv->refcount)) {
|
||||||
BUG_ON(!list_empty(&cm_id_priv->work_list));
|
BUG_ON(!list_empty(&cm_id_priv->work_list));
|
||||||
complete(&cm_id_priv->destroy_comp);
|
free_cm_id(cm_id_priv);
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -208,19 +207,10 @@ static void add_ref(struct iw_cm_id *cm_id)
|
||||||
static void rem_ref(struct iw_cm_id *cm_id)
|
static void rem_ref(struct iw_cm_id *cm_id)
|
||||||
{
|
{
|
||||||
struct iwcm_id_private *cm_id_priv;
|
struct iwcm_id_private *cm_id_priv;
|
||||||
int cb_destroy;
|
|
||||||
|
|
||||||
cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
|
cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
|
||||||
|
|
||||||
/*
|
(void)iwcm_deref_id(cm_id_priv);
|
||||||
* Test bit before deref in case the cm_id gets freed on another
|
|
||||||
* thread.
|
|
||||||
*/
|
|
||||||
cb_destroy = test_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags);
|
|
||||||
if (iwcm_deref_id(cm_id_priv) && cb_destroy) {
|
|
||||||
BUG_ON(!list_empty(&cm_id_priv->work_list));
|
|
||||||
free_cm_id(cm_id_priv);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int cm_event_handler(struct iw_cm_id *cm_id, struct iw_cm_event *event);
|
static int cm_event_handler(struct iw_cm_id *cm_id, struct iw_cm_event *event);
|
||||||
|
@ -370,6 +360,12 @@ static void destroy_cm_id(struct iw_cm_id *cm_id)
|
||||||
wait_event(cm_id_priv->connect_wait,
|
wait_event(cm_id_priv->connect_wait,
|
||||||
!test_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags));
|
!test_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags));
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Since we're deleting the cm_id, drop any events that
|
||||||
|
* might arrive before the last dereference.
|
||||||
|
*/
|
||||||
|
set_bit(IWCM_F_DROP_EVENTS, &cm_id_priv->flags);
|
||||||
|
|
||||||
spin_lock_irqsave(&cm_id_priv->lock, flags);
|
spin_lock_irqsave(&cm_id_priv->lock, flags);
|
||||||
switch (cm_id_priv->state) {
|
switch (cm_id_priv->state) {
|
||||||
case IW_CM_STATE_LISTEN:
|
case IW_CM_STATE_LISTEN:
|
||||||
|
@ -433,13 +429,7 @@ void iw_destroy_cm_id(struct iw_cm_id *cm_id)
|
||||||
struct iwcm_id_private *cm_id_priv;
|
struct iwcm_id_private *cm_id_priv;
|
||||||
|
|
||||||
cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
|
cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
|
||||||
BUG_ON(test_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags));
|
|
||||||
|
|
||||||
destroy_cm_id(cm_id);
|
destroy_cm_id(cm_id);
|
||||||
|
|
||||||
wait_for_completion(&cm_id_priv->destroy_comp);
|
|
||||||
|
|
||||||
free_cm_id(cm_id_priv);
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(iw_destroy_cm_id);
|
EXPORT_SYMBOL(iw_destroy_cm_id);
|
||||||
|
|
||||||
|
@ -809,10 +799,7 @@ static void cm_conn_req_handler(struct iwcm_id_private *listen_id_priv,
|
||||||
ret = cm_id->cm_handler(cm_id, iw_event);
|
ret = cm_id->cm_handler(cm_id, iw_event);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
iw_cm_reject(cm_id, NULL, 0);
|
iw_cm_reject(cm_id, NULL, 0);
|
||||||
set_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags);
|
iw_destroy_cm_id(cm_id);
|
||||||
destroy_cm_id(cm_id);
|
|
||||||
if (atomic_read(&cm_id_priv->refcount)==0)
|
|
||||||
free_cm_id(cm_id_priv);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
out:
|
out:
|
||||||
|
@ -1000,7 +987,6 @@ static void cm_work_handler(struct work_struct *_work)
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
int empty;
|
int empty;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
int destroy_id;
|
|
||||||
|
|
||||||
spin_lock_irqsave(&cm_id_priv->lock, flags);
|
spin_lock_irqsave(&cm_id_priv->lock, flags);
|
||||||
empty = list_empty(&cm_id_priv->work_list);
|
empty = list_empty(&cm_id_priv->work_list);
|
||||||
|
@ -1013,20 +999,14 @@ static void cm_work_handler(struct work_struct *_work)
|
||||||
put_work(work);
|
put_work(work);
|
||||||
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
|
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
|
||||||
|
|
||||||
|
if (!test_bit(IWCM_F_DROP_EVENTS, &cm_id_priv->flags)) {
|
||||||
ret = process_event(cm_id_priv, &levent);
|
ret = process_event(cm_id_priv, &levent);
|
||||||
if (ret) {
|
if (ret)
|
||||||
set_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags);
|
|
||||||
destroy_cm_id(&cm_id_priv->id);
|
destroy_cm_id(&cm_id_priv->id);
|
||||||
}
|
} else
|
||||||
BUG_ON(atomic_read(&cm_id_priv->refcount)==0);
|
pr_debug("dropping event %d\n", levent.event);
|
||||||
destroy_id = test_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags);
|
if (iwcm_deref_id(cm_id_priv))
|
||||||
if (iwcm_deref_id(cm_id_priv)) {
|
|
||||||
if (destroy_id) {
|
|
||||||
BUG_ON(!list_empty(&cm_id_priv->work_list));
|
|
||||||
free_cm_id(cm_id_priv);
|
|
||||||
}
|
|
||||||
return;
|
return;
|
||||||
}
|
|
||||||
if (empty)
|
if (empty)
|
||||||
return;
|
return;
|
||||||
spin_lock_irqsave(&cm_id_priv->lock, flags);
|
spin_lock_irqsave(&cm_id_priv->lock, flags);
|
||||||
|
|
|
@ -56,7 +56,7 @@ struct iwcm_id_private {
|
||||||
struct list_head work_free_list;
|
struct list_head work_free_list;
|
||||||
};
|
};
|
||||||
|
|
||||||
#define IWCM_F_CALLBACK_DESTROY 1
|
#define IWCM_F_DROP_EVENTS 1
|
||||||
#define IWCM_F_CONNECT_WAIT 2
|
#define IWCM_F_CONNECT_WAIT 2
|
||||||
|
|
||||||
#endif /* IWCM_H */
|
#endif /* IWCM_H */
|
||||||
|
|
|
@ -229,7 +229,10 @@ static void ibnl_rcv(struct sk_buff *skb)
|
||||||
int ibnl_unicast(struct sk_buff *skb, struct nlmsghdr *nlh,
|
int ibnl_unicast(struct sk_buff *skb, struct nlmsghdr *nlh,
|
||||||
__u32 pid)
|
__u32 pid)
|
||||||
{
|
{
|
||||||
return nlmsg_unicast(nls, skb, pid);
|
int err;
|
||||||
|
|
||||||
|
err = netlink_unicast(nls, skb, pid, 0);
|
||||||
|
return (err < 0) ? err : 0;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(ibnl_unicast);
|
EXPORT_SYMBOL(ibnl_unicast);
|
||||||
|
|
||||||
|
@ -252,6 +255,7 @@ int __init ibnl_init(void)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
nls->sk_sndtimeo = 10 * HZ;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1396,10 +1396,10 @@ static int pass_accept_req(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
|
||||||
state_set(&child_ep->com, CONNECTING);
|
state_set(&child_ep->com, CONNECTING);
|
||||||
child_ep->com.tdev = tdev;
|
child_ep->com.tdev = tdev;
|
||||||
child_ep->com.cm_id = NULL;
|
child_ep->com.cm_id = NULL;
|
||||||
child_ep->com.local_addr.sin_family = PF_INET;
|
child_ep->com.local_addr.sin_family = AF_INET;
|
||||||
child_ep->com.local_addr.sin_port = req->local_port;
|
child_ep->com.local_addr.sin_port = req->local_port;
|
||||||
child_ep->com.local_addr.sin_addr.s_addr = req->local_ip;
|
child_ep->com.local_addr.sin_addr.s_addr = req->local_ip;
|
||||||
child_ep->com.remote_addr.sin_family = PF_INET;
|
child_ep->com.remote_addr.sin_family = AF_INET;
|
||||||
child_ep->com.remote_addr.sin_port = req->peer_port;
|
child_ep->com.remote_addr.sin_port = req->peer_port;
|
||||||
child_ep->com.remote_addr.sin_addr.s_addr = req->peer_ip;
|
child_ep->com.remote_addr.sin_addr.s_addr = req->peer_ip;
|
||||||
get_ep(&parent_ep->com);
|
get_ep(&parent_ep->com);
|
||||||
|
|
|
@ -3068,9 +3068,9 @@ static int fw4_ack(struct c4iw_dev *dev, struct sk_buff *skb)
|
||||||
PDBG("%s last streaming msg ack ep %p tid %u state %u "
|
PDBG("%s last streaming msg ack ep %p tid %u state %u "
|
||||||
"initiator %u freeing skb\n", __func__, ep, ep->hwtid,
|
"initiator %u freeing skb\n", __func__, ep, ep->hwtid,
|
||||||
state_read(&ep->com), ep->mpa_attr.initiator ? 1 : 0);
|
state_read(&ep->com), ep->mpa_attr.initiator ? 1 : 0);
|
||||||
|
mutex_lock(&ep->com.mutex);
|
||||||
kfree_skb(ep->mpa_skb);
|
kfree_skb(ep->mpa_skb);
|
||||||
ep->mpa_skb = NULL;
|
ep->mpa_skb = NULL;
|
||||||
mutex_lock(&ep->com.mutex);
|
|
||||||
if (test_bit(STOP_MPA_TIMER, &ep->com.flags))
|
if (test_bit(STOP_MPA_TIMER, &ep->com.flags))
|
||||||
stop_ep_timer(ep);
|
stop_ep_timer(ep);
|
||||||
mutex_unlock(&ep->com.mutex);
|
mutex_unlock(&ep->com.mutex);
|
||||||
|
@ -3647,6 +3647,16 @@ int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
|
||||||
ep->com.state = ABORTING;
|
ep->com.state = ABORTING;
|
||||||
else {
|
else {
|
||||||
ep->com.state = CLOSING;
|
ep->com.state = CLOSING;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* if we close before we see the fw4_ack() then we fix
|
||||||
|
* up the timer state since we're reusing it.
|
||||||
|
*/
|
||||||
|
if (ep->mpa_skb &&
|
||||||
|
test_bit(STOP_MPA_TIMER, &ep->com.flags)) {
|
||||||
|
clear_bit(STOP_MPA_TIMER, &ep->com.flags);
|
||||||
|
stop_ep_timer(ep);
|
||||||
|
}
|
||||||
start_ep_timer(ep);
|
start_ep_timer(ep);
|
||||||
}
|
}
|
||||||
set_bit(CLOSE_SENT, &ep->com.flags);
|
set_bit(CLOSE_SENT, &ep->com.flags);
|
||||||
|
|
|
@ -475,7 +475,7 @@ struct c4iw_qp {
|
||||||
struct t4_wq wq;
|
struct t4_wq wq;
|
||||||
spinlock_t lock;
|
spinlock_t lock;
|
||||||
struct mutex mutex;
|
struct mutex mutex;
|
||||||
atomic_t refcnt;
|
struct kref kref;
|
||||||
wait_queue_head_t wait;
|
wait_queue_head_t wait;
|
||||||
struct timer_list timer;
|
struct timer_list timer;
|
||||||
int sq_sig_all;
|
int sq_sig_all;
|
||||||
|
|
|
@ -603,16 +603,13 @@ struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
|
||||||
|
|
||||||
mhp->dereg_skb = alloc_skb(SGE_MAX_WR_LEN, GFP_KERNEL);
|
mhp->dereg_skb = alloc_skb(SGE_MAX_WR_LEN, GFP_KERNEL);
|
||||||
if (!mhp->dereg_skb) {
|
if (!mhp->dereg_skb) {
|
||||||
kfree(mhp);
|
ret = -ENOMEM;
|
||||||
return ERR_PTR(-ENOMEM);
|
goto free_mhp;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = allocate_window(&rhp->rdev, &stag, php->pdid);
|
ret = allocate_window(&rhp->rdev, &stag, php->pdid);
|
||||||
if (ret) {
|
if (ret)
|
||||||
kfree(mhp->dereg_skb);
|
goto free_skb;
|
||||||
kfree(mhp);
|
|
||||||
return ERR_PTR(ret);
|
|
||||||
}
|
|
||||||
mhp->rhp = rhp;
|
mhp->rhp = rhp;
|
||||||
mhp->attr.pdid = php->pdid;
|
mhp->attr.pdid = php->pdid;
|
||||||
mhp->attr.type = FW_RI_STAG_MW;
|
mhp->attr.type = FW_RI_STAG_MW;
|
||||||
|
@ -620,13 +617,19 @@ struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
|
||||||
mmid = (stag) >> 8;
|
mmid = (stag) >> 8;
|
||||||
mhp->ibmw.rkey = stag;
|
mhp->ibmw.rkey = stag;
|
||||||
if (insert_handle(rhp, &rhp->mmidr, mhp, mmid)) {
|
if (insert_handle(rhp, &rhp->mmidr, mhp, mmid)) {
|
||||||
deallocate_window(&rhp->rdev, mhp->attr.stag, mhp->dereg_skb);
|
ret = -ENOMEM;
|
||||||
kfree(mhp->dereg_skb);
|
goto dealloc_win;
|
||||||
kfree(mhp);
|
|
||||||
return ERR_PTR(-ENOMEM);
|
|
||||||
}
|
}
|
||||||
PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag);
|
PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag);
|
||||||
return &(mhp->ibmw);
|
return &(mhp->ibmw);
|
||||||
|
|
||||||
|
dealloc_win:
|
||||||
|
deallocate_window(&rhp->rdev, mhp->attr.stag, mhp->dereg_skb);
|
||||||
|
free_skb:
|
||||||
|
kfree_skb(mhp->dereg_skb);
|
||||||
|
free_mhp:
|
||||||
|
kfree(mhp);
|
||||||
|
return ERR_PTR(ret);
|
||||||
}
|
}
|
||||||
|
|
||||||
int c4iw_dealloc_mw(struct ib_mw *mw)
|
int c4iw_dealloc_mw(struct ib_mw *mw)
|
||||||
|
@ -640,6 +643,7 @@ int c4iw_dealloc_mw(struct ib_mw *mw)
|
||||||
mmid = (mw->rkey) >> 8;
|
mmid = (mw->rkey) >> 8;
|
||||||
remove_handle(rhp, &rhp->mmidr, mmid);
|
remove_handle(rhp, &rhp->mmidr, mmid);
|
||||||
deallocate_window(&rhp->rdev, mhp->attr.stag, mhp->dereg_skb);
|
deallocate_window(&rhp->rdev, mhp->attr.stag, mhp->dereg_skb);
|
||||||
|
kfree_skb(mhp->dereg_skb);
|
||||||
kfree(mhp);
|
kfree(mhp);
|
||||||
PDBG("%s ib_mw %p mmid 0x%x ptr %p\n", __func__, mw, mmid, mhp);
|
PDBG("%s ib_mw %p mmid 0x%x ptr %p\n", __func__, mw, mmid, mhp);
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -683,17 +683,25 @@ static int build_inv_stag(union t4_wr *wqe, struct ib_send_wr *wr,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void _free_qp(struct kref *kref)
|
||||||
|
{
|
||||||
|
struct c4iw_qp *qhp;
|
||||||
|
|
||||||
|
qhp = container_of(kref, struct c4iw_qp, kref);
|
||||||
|
PDBG("%s qhp %p\n", __func__, qhp);
|
||||||
|
kfree(qhp);
|
||||||
|
}
|
||||||
|
|
||||||
void c4iw_qp_add_ref(struct ib_qp *qp)
|
void c4iw_qp_add_ref(struct ib_qp *qp)
|
||||||
{
|
{
|
||||||
PDBG("%s ib_qp %p\n", __func__, qp);
|
PDBG("%s ib_qp %p\n", __func__, qp);
|
||||||
atomic_inc(&(to_c4iw_qp(qp)->refcnt));
|
kref_get(&to_c4iw_qp(qp)->kref);
|
||||||
}
|
}
|
||||||
|
|
||||||
void c4iw_qp_rem_ref(struct ib_qp *qp)
|
void c4iw_qp_rem_ref(struct ib_qp *qp)
|
||||||
{
|
{
|
||||||
PDBG("%s ib_qp %p\n", __func__, qp);
|
PDBG("%s ib_qp %p\n", __func__, qp);
|
||||||
if (atomic_dec_and_test(&(to_c4iw_qp(qp)->refcnt)))
|
kref_put(&to_c4iw_qp(qp)->kref, _free_qp);
|
||||||
wake_up(&(to_c4iw_qp(qp)->wait));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void add_to_fc_list(struct list_head *head, struct list_head *entry)
|
static void add_to_fc_list(struct list_head *head, struct list_head *entry)
|
||||||
|
@ -1594,8 +1602,6 @@ int c4iw_destroy_qp(struct ib_qp *ib_qp)
|
||||||
wait_event(qhp->wait, !qhp->ep);
|
wait_event(qhp->wait, !qhp->ep);
|
||||||
|
|
||||||
remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid);
|
remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid);
|
||||||
atomic_dec(&qhp->refcnt);
|
|
||||||
wait_event(qhp->wait, !atomic_read(&qhp->refcnt));
|
|
||||||
|
|
||||||
spin_lock_irq(&rhp->lock);
|
spin_lock_irq(&rhp->lock);
|
||||||
if (!list_empty(&qhp->db_fc_entry))
|
if (!list_empty(&qhp->db_fc_entry))
|
||||||
|
@ -1608,8 +1614,9 @@ int c4iw_destroy_qp(struct ib_qp *ib_qp)
|
||||||
destroy_qp(&rhp->rdev, &qhp->wq,
|
destroy_qp(&rhp->rdev, &qhp->wq,
|
||||||
ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
|
ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
|
||||||
|
|
||||||
|
c4iw_qp_rem_ref(ib_qp);
|
||||||
|
|
||||||
PDBG("%s ib_qp %p qpid 0x%0x\n", __func__, ib_qp, qhp->wq.sq.qid);
|
PDBG("%s ib_qp %p qpid 0x%0x\n", __func__, ib_qp, qhp->wq.sq.qid);
|
||||||
kfree(qhp);
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1706,7 +1713,7 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
|
||||||
init_completion(&qhp->rq_drained);
|
init_completion(&qhp->rq_drained);
|
||||||
mutex_init(&qhp->mutex);
|
mutex_init(&qhp->mutex);
|
||||||
init_waitqueue_head(&qhp->wait);
|
init_waitqueue_head(&qhp->wait);
|
||||||
atomic_set(&qhp->refcnt, 1);
|
kref_init(&qhp->kref);
|
||||||
|
|
||||||
ret = insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.sq.qid);
|
ret = insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.sq.qid);
|
||||||
if (ret)
|
if (ret)
|
||||||
|
@ -1898,12 +1905,20 @@ int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void move_qp_to_err(struct c4iw_qp *qp)
|
||||||
|
{
|
||||||
|
struct c4iw_qp_attributes attrs = { .next_state = C4IW_QP_STATE_ERROR };
|
||||||
|
|
||||||
|
(void)c4iw_modify_qp(qp->rhp, qp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
|
||||||
|
}
|
||||||
|
|
||||||
void c4iw_drain_sq(struct ib_qp *ibqp)
|
void c4iw_drain_sq(struct ib_qp *ibqp)
|
||||||
{
|
{
|
||||||
struct c4iw_qp *qp = to_c4iw_qp(ibqp);
|
struct c4iw_qp *qp = to_c4iw_qp(ibqp);
|
||||||
unsigned long flag;
|
unsigned long flag;
|
||||||
bool need_to_wait;
|
bool need_to_wait;
|
||||||
|
|
||||||
|
move_qp_to_err(qp);
|
||||||
spin_lock_irqsave(&qp->lock, flag);
|
spin_lock_irqsave(&qp->lock, flag);
|
||||||
need_to_wait = !t4_sq_empty(&qp->wq);
|
need_to_wait = !t4_sq_empty(&qp->wq);
|
||||||
spin_unlock_irqrestore(&qp->lock, flag);
|
spin_unlock_irqrestore(&qp->lock, flag);
|
||||||
|
@ -1918,6 +1933,7 @@ void c4iw_drain_rq(struct ib_qp *ibqp)
|
||||||
unsigned long flag;
|
unsigned long flag;
|
||||||
bool need_to_wait;
|
bool need_to_wait;
|
||||||
|
|
||||||
|
move_qp_to_err(qp);
|
||||||
spin_lock_irqsave(&qp->lock, flag);
|
spin_lock_irqsave(&qp->lock, flag);
|
||||||
need_to_wait = !t4_rq_empty(&qp->wq);
|
need_to_wait = !t4_rq_empty(&qp->wq);
|
||||||
spin_unlock_irqrestore(&qp->lock, flag);
|
spin_unlock_irqrestore(&qp->lock, flag);
|
||||||
|
|
|
@ -69,15 +69,6 @@ static bool mlx5_ib_deth_sqpn_cap(struct mlx5_ib_dev *dev)
|
||||||
return MLX5_CAP_GEN(dev->mdev, set_deth_sqpn);
|
return MLX5_CAP_GEN(dev->mdev, set_deth_sqpn);
|
||||||
}
|
}
|
||||||
|
|
||||||
static u32 next_outstanding(struct mlx5_ib_gsi_qp *gsi, u32 index)
|
|
||||||
{
|
|
||||||
return ++index % gsi->cap.max_send_wr;
|
|
||||||
}
|
|
||||||
|
|
||||||
#define for_each_outstanding_wr(gsi, index) \
|
|
||||||
for (index = gsi->outstanding_ci; index != gsi->outstanding_pi; \
|
|
||||||
index = next_outstanding(gsi, index))
|
|
||||||
|
|
||||||
/* Call with gsi->lock locked */
|
/* Call with gsi->lock locked */
|
||||||
static void generate_completions(struct mlx5_ib_gsi_qp *gsi)
|
static void generate_completions(struct mlx5_ib_gsi_qp *gsi)
|
||||||
{
|
{
|
||||||
|
@ -85,8 +76,9 @@ static void generate_completions(struct mlx5_ib_gsi_qp *gsi)
|
||||||
struct mlx5_ib_gsi_wr *wr;
|
struct mlx5_ib_gsi_wr *wr;
|
||||||
u32 index;
|
u32 index;
|
||||||
|
|
||||||
for_each_outstanding_wr(gsi, index) {
|
for (index = gsi->outstanding_ci; index != gsi->outstanding_pi;
|
||||||
wr = &gsi->outstanding_wrs[index];
|
index++) {
|
||||||
|
wr = &gsi->outstanding_wrs[index % gsi->cap.max_send_wr];
|
||||||
|
|
||||||
if (!wr->completed)
|
if (!wr->completed)
|
||||||
break;
|
break;
|
||||||
|
@ -430,8 +422,9 @@ static int mlx5_ib_add_outstanding_wr(struct mlx5_ib_gsi_qp *gsi,
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
gsi_wr = &gsi->outstanding_wrs[gsi->outstanding_pi];
|
gsi_wr = &gsi->outstanding_wrs[gsi->outstanding_pi %
|
||||||
gsi->outstanding_pi = next_outstanding(gsi, gsi->outstanding_pi);
|
gsi->cap.max_send_wr];
|
||||||
|
gsi->outstanding_pi++;
|
||||||
|
|
||||||
if (!wc) {
|
if (!wc) {
|
||||||
memset(&gsi_wr->wc, 0, sizeof(gsi_wr->wc));
|
memset(&gsi_wr->wc, 0, sizeof(gsi_wr->wc));
|
||||||
|
|
|
@ -2574,7 +2574,7 @@ static int mlx5_ib_alloc_q_counters(struct mlx5_ib_dev *dev)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static const char const *names[] = {
|
static const char * const names[] = {
|
||||||
"rx_write_requests",
|
"rx_write_requests",
|
||||||
"rx_read_requests",
|
"rx_read_requests",
|
||||||
"rx_atomic_requests",
|
"rx_atomic_requests",
|
||||||
|
|
|
@ -2657,7 +2657,7 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
|
||||||
struct mlx5_ib_port *mibport = &dev->port[port_num];
|
struct mlx5_ib_port *mibport = &dev->port[port_num];
|
||||||
|
|
||||||
context->qp_counter_set_usr_page |=
|
context->qp_counter_set_usr_page |=
|
||||||
cpu_to_be32(mibport->q_cnt_id << 16);
|
cpu_to_be32((u32)(mibport->q_cnt_id) << 24);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!ibqp->uobject && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
|
if (!ibqp->uobject && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
|
||||||
|
|
Loading…
Reference in New Issue