ip_tunnel: disable preemption when updating per-cpu tstats
Drivers like vxlan use the recently introduced udp_tunnel_xmit_skb/udp_tunnel6_xmit_skb APIs. udp_tunnel6_xmit_skb makes use of ip6tunnel_xmit, and ip6tunnel_xmit, after sending the packet, updates the struct stats using the usual u64_stats_update_begin/end calls on this_cpu_ptr(dev->tstats). udp_tunnel_xmit_skb makes use of iptunnel_xmit, which doesn't touch tstats, so drivers like vxlan, immediately after, call iptunnel_xmit_stats, which does the same thing - calls u64_stats_update_begin/end on this_cpu_ptr(dev->tstats). While vxlan is probably fine (I don't know?), calling a similar function from, say, an unbound workqueue, on a fully preemptable kernel causes real issues: [ 188.434537] BUG: using smp_processor_id() in preemptible [00000000] code: kworker/u8:0/6 [ 188.435579] caller is debug_smp_processor_id+0x17/0x20 [ 188.435583] CPU: 0 PID: 6 Comm: kworker/u8:0 Not tainted 4.2.6 #2 [ 188.435607] Call Trace: [ 188.435611] [<ffffffff8234e936>] dump_stack+0x4f/0x7b [ 188.435615] [<ffffffff81915f3d>] check_preemption_disabled+0x19d/0x1c0 [ 188.435619] [<ffffffff81915f77>] debug_smp_processor_id+0x17/0x20 The solution would be to protect the whole this_cpu_ptr(dev->tstats)/u64_stats_update_begin/end blocks with disabling preemption and then reenabling it. Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com> Acked-by: Hannes Frederic Sowa <hannes@stressinduktion.org> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
17c790a60d
commit
b4fe85f9c9
|
@ -90,11 +90,12 @@ static inline void ip6tunnel_xmit(struct sock *sk, struct sk_buff *skb,
|
|||
err = ip6_local_out(dev_net(skb_dst(skb)->dev), sk, skb);
|
||||
|
||||
if (net_xmit_eval(err) == 0) {
|
||||
struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats);
|
||||
struct pcpu_sw_netstats *tstats = get_cpu_ptr(dev->tstats);
|
||||
u64_stats_update_begin(&tstats->syncp);
|
||||
tstats->tx_bytes += pkt_len;
|
||||
tstats->tx_packets++;
|
||||
u64_stats_update_end(&tstats->syncp);
|
||||
put_cpu_ptr(tstats);
|
||||
} else {
|
||||
stats->tx_errors++;
|
||||
stats->tx_aborted_errors++;
|
||||
|
|
|
@ -287,12 +287,13 @@ static inline void iptunnel_xmit_stats(int err,
|
|||
struct pcpu_sw_netstats __percpu *stats)
|
||||
{
|
||||
if (err > 0) {
|
||||
struct pcpu_sw_netstats *tstats = this_cpu_ptr(stats);
|
||||
struct pcpu_sw_netstats *tstats = get_cpu_ptr(stats);
|
||||
|
||||
u64_stats_update_begin(&tstats->syncp);
|
||||
tstats->tx_bytes += err;
|
||||
tstats->tx_packets++;
|
||||
u64_stats_update_end(&tstats->syncp);
|
||||
put_cpu_ptr(tstats);
|
||||
} else if (err < 0) {
|
||||
err_stats->tx_errors++;
|
||||
err_stats->tx_aborted_errors++;
|
||||
|
|
Loading…
Reference in New Issue