mirror of https://gitee.com/openkylin/linux.git
net: sched: Do not drop root lock in tcf_qevent_handle()
Mirred currently does not mix well with blocks executed after the qdisc root lock is taken. This includes classification blocks (such as in PRIO, ETS, DRR qdiscs) and qevents. The locking caused by the packet mirrored by mirred can cause deadlocks: either when the thread of execution attempts to take the lock a second time, or when two threads end up waiting on each other's locks. The qevent patchset attempted to not introduce further badness of this sort, and dropped the lock before executing the qevent block. However this lead to too little locking and races between qdisc configuration and packet enqueue in the RED qdisc. Before the deadlock issues are solved in a way that can be applied across many qdiscs reasonably easily, do for qevents what is done for the classification blocks and just keep holding the root lock. Signed-off-by: Petr Machata <petrm@mellanox.com> Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
parent
89e35f66d5
commit
55f656cdb8
|
@ -568,7 +568,7 @@ void tcf_qevent_destroy(struct tcf_qevent *qe, struct Qdisc *sch);
|
|||
int tcf_qevent_validate_change(struct tcf_qevent *qe, struct nlattr *block_index_attr,
|
||||
struct netlink_ext_ack *extack);
|
||||
struct sk_buff *tcf_qevent_handle(struct tcf_qevent *qe, struct Qdisc *sch, struct sk_buff *skb,
|
||||
spinlock_t *root_lock, struct sk_buff **to_free, int *ret);
|
||||
struct sk_buff **to_free, int *ret);
|
||||
int tcf_qevent_dump(struct sk_buff *skb, int attr_name, struct tcf_qevent *qe);
|
||||
#else
|
||||
static inline int tcf_qevent_init(struct tcf_qevent *qe, struct Qdisc *sch,
|
||||
|
@ -591,7 +591,7 @@ static inline int tcf_qevent_validate_change(struct tcf_qevent *qe, struct nlatt
|
|||
|
||||
static inline struct sk_buff *
|
||||
tcf_qevent_handle(struct tcf_qevent *qe, struct Qdisc *sch, struct sk_buff *skb,
|
||||
spinlock_t *root_lock, struct sk_buff **to_free, int *ret)
|
||||
struct sk_buff **to_free, int *ret)
|
||||
{
|
||||
return skb;
|
||||
}
|
||||
|
|
|
@ -3822,7 +3822,7 @@ int tcf_qevent_validate_change(struct tcf_qevent *qe, struct nlattr *block_index
|
|||
EXPORT_SYMBOL(tcf_qevent_validate_change);
|
||||
|
||||
struct sk_buff *tcf_qevent_handle(struct tcf_qevent *qe, struct Qdisc *sch, struct sk_buff *skb,
|
||||
spinlock_t *root_lock, struct sk_buff **to_free, int *ret)
|
||||
struct sk_buff **to_free, int *ret)
|
||||
{
|
||||
struct tcf_result cl_res;
|
||||
struct tcf_proto *fl;
|
||||
|
@ -3832,9 +3832,6 @@ struct sk_buff *tcf_qevent_handle(struct tcf_qevent *qe, struct Qdisc *sch, stru
|
|||
|
||||
fl = rcu_dereference_bh(qe->filter_chain);
|
||||
|
||||
if (root_lock)
|
||||
spin_unlock(root_lock);
|
||||
|
||||
switch (tcf_classify(skb, fl, &cl_res, false)) {
|
||||
case TC_ACT_SHOT:
|
||||
qdisc_qstats_drop(sch);
|
||||
|
@ -3853,9 +3850,6 @@ struct sk_buff *tcf_qevent_handle(struct tcf_qevent *qe, struct Qdisc *sch, stru
|
|||
return NULL;
|
||||
}
|
||||
|
||||
if (root_lock)
|
||||
spin_lock(root_lock);
|
||||
|
||||
return skb;
|
||||
}
|
||||
EXPORT_SYMBOL(tcf_qevent_handle);
|
||||
|
|
|
@ -94,7 +94,7 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_
|
|||
|
||||
if (INET_ECN_set_ce(skb)) {
|
||||
q->stats.prob_mark++;
|
||||
skb = tcf_qevent_handle(&q->qe_mark, sch, skb, root_lock, to_free, &ret);
|
||||
skb = tcf_qevent_handle(&q->qe_mark, sch, skb, to_free, &ret);
|
||||
if (!skb)
|
||||
return NET_XMIT_CN | ret;
|
||||
} else if (!red_use_nodrop(q)) {
|
||||
|
@ -114,7 +114,7 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_
|
|||
|
||||
if (INET_ECN_set_ce(skb)) {
|
||||
q->stats.forced_mark++;
|
||||
skb = tcf_qevent_handle(&q->qe_mark, sch, skb, root_lock, to_free, &ret);
|
||||
skb = tcf_qevent_handle(&q->qe_mark, sch, skb, to_free, &ret);
|
||||
if (!skb)
|
||||
return NET_XMIT_CN | ret;
|
||||
} else if (!red_use_nodrop(q)) {
|
||||
|
@ -137,7 +137,7 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_
|
|||
return ret;
|
||||
|
||||
congestion_drop:
|
||||
skb = tcf_qevent_handle(&q->qe_early_drop, sch, skb, root_lock, to_free, &ret);
|
||||
skb = tcf_qevent_handle(&q->qe_early_drop, sch, skb, to_free, &ret);
|
||||
if (!skb)
|
||||
return NET_XMIT_CN | ret;
|
||||
|
||||
|
|
Loading…
Reference in New Issue