net: convert __IPTUNNEL_XMIT() to an inline function

__IPTUNNEL_XMIT() is an ugly macro, convert it to a static
inline function, so make it more readable.

IPTUNNEL_XMIT() is unused, just remove it.

Cc: David S. Miller <davem@davemloft.net>
Signed-off-by: Cong Wang <amwang@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Amerigo Wang 2012-11-11 21:52:33 +00:00 committed by David S. Miller
parent bf0098f22c
commit aa0010f880
9 changed files with 35 additions and 85 deletions

View File

@ -769,7 +769,7 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
vxlan_set_owner(dev, skb);
/* See __IPTUNNEL_XMIT */
/* See iptunnel_xmit() */
skb->ip_summed = CHECKSUM_NONE;
ip_select_ident(iph, &rt->dst, NULL);

View File

@ -4,5 +4,15 @@
#include <linux/ip.h>
#include <linux/in6.h>
#include <uapi/linux/if_tunnel.h>
#include <linux/u64_stats_sync.h>
/* often modified stats are per cpu, other are shared (netdev->stats) */
struct pcpu_tstats {
u64 rx_packets;
u64 rx_bytes;
u64 tx_packets;
u64 tx_bytes;
struct u64_stats_sync syncp;
};
#endif /* _IF_TUNNEL_H_ */

View File

@ -48,25 +48,27 @@ struct ip_tunnel_prl_entry {
struct rcu_head rcu_head;
};
#define __IPTUNNEL_XMIT(stats1, stats2) do { \
int err; \
int pkt_len = skb->len - skb_transport_offset(skb); \
\
skb->ip_summed = CHECKSUM_NONE; \
ip_select_ident(iph, &rt->dst, NULL); \
\
err = ip_local_out(skb); \
if (likely(net_xmit_eval(err) == 0)) { \
u64_stats_update_begin(&(stats1)->syncp); \
(stats1)->tx_bytes += pkt_len; \
(stats1)->tx_packets++; \
u64_stats_update_end(&(stats1)->syncp); \
} else { \
(stats2)->tx_errors++; \
(stats2)->tx_aborted_errors++; \
} \
} while (0)
static inline void iptunnel_xmit(struct sk_buff *skb, struct net_device *dev)
{
int err;
struct iphdr *iph = ip_hdr(skb);
int pkt_len = skb->len - skb_transport_offset(skb);
struct pcpu_tstats *tstats = this_cpu_ptr(dev->tstats);
#define IPTUNNEL_XMIT() __IPTUNNEL_XMIT(txq, stats)
nf_reset(skb);
skb->ip_summed = CHECKSUM_NONE;
ip_select_ident(iph, skb_dst(skb), NULL);
err = ip_local_out(skb);
if (likely(net_xmit_eval(err) == 0)) {
u64_stats_update_begin(&tstats->syncp);
tstats->tx_bytes += pkt_len;
tstats->tx_packets++;
u64_stats_update_end(&tstats->syncp);
} else {
dev->stats.tx_errors++;
dev->stats.tx_aborted_errors++;
}
}
#endif

View File

@ -171,15 +171,6 @@ struct ipgre_net {
#define for_each_ip_tunnel_rcu(start) \
for (t = rcu_dereference(start); t; t = rcu_dereference(t->next))
/* often modified stats are per cpu, other are shared (netdev->stats) */
struct pcpu_tstats {
u64 rx_packets;
u64 rx_bytes;
u64 tx_packets;
u64 tx_bytes;
struct u64_stats_sync syncp;
};
static struct rtnl_link_stats64 *ipgre_get_stats64(struct net_device *dev,
struct rtnl_link_stats64 *tot)
{
@ -753,7 +744,6 @@ static int ipgre_rcv(struct sk_buff *skb)
static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct ip_tunnel *tunnel = netdev_priv(dev);
struct pcpu_tstats *tstats;
const struct iphdr *old_iph = ip_hdr(skb);
const struct iphdr *tiph;
struct flowi4 fl4;
@ -977,9 +967,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
}
}
nf_reset(skb);
tstats = this_cpu_ptr(dev->tstats);
__IPTUNNEL_XMIT(tstats, &dev->stats);
iptunnel_xmit(skb, dev);
return NETDEV_TX_OK;
#if IS_ENABLED(CONFIG_IPV6)

View File

@ -71,15 +71,6 @@ static int vti_tunnel_bind_dev(struct net_device *dev);
#define for_each_ip_tunnel_rcu(start) \
for (t = rcu_dereference(start); t; t = rcu_dereference(t->next))
/* often modified stats are per cpu, other are shared (netdev->stats) */
struct pcpu_tstats {
u64 rx_packets;
u64 rx_bytes;
u64 tx_packets;
u64 tx_bytes;
struct u64_stats_sync syncp;
};
#define VTI_XMIT(stats1, stats2) do { \
int err; \
int pkt_len = skb->len; \

View File

@ -147,15 +147,6 @@ static struct rtnl_link_ops ipip_link_ops __read_mostly;
#define for_each_ip_tunnel_rcu(start) \
for (t = rcu_dereference(start); t; t = rcu_dereference(t->next))
/* often modified stats are per cpu, other are shared (netdev->stats) */
struct pcpu_tstats {
u64 rx_packets;
u64 rx_bytes;
u64 tx_packets;
u64 tx_bytes;
struct u64_stats_sync syncp;
};
static struct rtnl_link_stats64 *ipip_get_stats64(struct net_device *dev,
struct rtnl_link_stats64 *tot)
{
@ -465,7 +456,6 @@ static int ipip_rcv(struct sk_buff *skb)
static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct ip_tunnel *tunnel = netdev_priv(dev);
struct pcpu_tstats *tstats;
const struct iphdr *tiph = &tunnel->parms.iph;
u8 tos = tunnel->parms.iph.tos;
__be16 df = tiph->frag_off;
@ -592,9 +582,7 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
if ((iph->ttl = tiph->ttl) == 0)
iph->ttl = old_iph->ttl;
nf_reset(skb);
tstats = this_cpu_ptr(dev->tstats);
__IPTUNNEL_XMIT(tstats, &dev->stats);
iptunnel_xmit(skb, dev);
return NETDEV_TX_OK;
tx_error_icmp:

View File

@ -116,15 +116,6 @@ static u32 HASH_ADDR(const struct in6_addr *addr)
#define for_each_ip_tunnel_rcu(start) \
for (t = rcu_dereference(start); t; t = rcu_dereference(t->next))
/* often modified stats are per cpu, other are shared (netdev->stats) */
struct pcpu_tstats {
u64 rx_packets;
u64 rx_bytes;
u64 tx_packets;
u64 tx_bytes;
struct u64_stats_sync syncp;
};
static struct rtnl_link_stats64 *ip6gre_get_stats64(struct net_device *dev,
struct rtnl_link_stats64 *tot)
{

View File

@ -95,14 +95,6 @@ struct ip6_tnl_net {
struct ip6_tnl __rcu **tnls[2];
};
/* often modified stats are per cpu, other are shared (netdev->stats) */
struct pcpu_tstats {
unsigned long rx_packets;
unsigned long rx_bytes;
unsigned long tx_packets;
unsigned long tx_bytes;
} __attribute__((aligned(4*sizeof(unsigned long))));
static struct net_device_stats *ip6_get_stats(struct net_device *dev)
{
struct pcpu_tstats sum = { 0 };

View File

@ -88,15 +88,6 @@ struct sit_net {
#define for_each_ip_tunnel_rcu(start) \
for (t = rcu_dereference(start); t; t = rcu_dereference(t->next))
/* often modified stats are per cpu, other are shared (netdev->stats) */
struct pcpu_tstats {
u64 rx_packets;
u64 rx_bytes;
u64 tx_packets;
u64 tx_bytes;
struct u64_stats_sync syncp;
};
static struct rtnl_link_stats64 *ipip6_get_stats64(struct net_device *dev,
struct rtnl_link_stats64 *tot)
{
@ -685,7 +676,6 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
struct net_device *dev)
{
struct ip_tunnel *tunnel = netdev_priv(dev);
struct pcpu_tstats *tstats;
const struct iphdr *tiph = &tunnel->parms.iph;
const struct ipv6hdr *iph6 = ipv6_hdr(skb);
u8 tos = tunnel->parms.iph.tos;
@ -866,9 +856,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
if ((iph->ttl = tiph->ttl) == 0)
iph->ttl = iph6->hop_limit;
nf_reset(skb);
tstats = this_cpu_ptr(dev->tstats);
__IPTUNNEL_XMIT(tstats, &dev->stats);
iptunnel_xmit(skb, dev);
return NETDEV_TX_OK;
tx_error_icmp: