Merge branch 'net-checkpatch'

Tobin C. Harding says:

====================
Whitespace checkpatch fixes

This patch set fixes various whitespace checkpatch errors and warnings.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2017-02-10 13:37:49 -05:00
commit 201dacbb1d
1 changed files with 136 additions and 121 deletions

View File

@ -1,5 +1,5 @@
/* /*
* NET3 Protocol independent device support routines. * NET3 Protocol independent device support routines.
* *
* This program is free software; you can redistribute it and/or * This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License * modify it under the terms of the GNU General Public License
@ -7,7 +7,7 @@
* 2 of the License, or (at your option) any later version. * 2 of the License, or (at your option) any later version.
* *
* Derived from the non IP parts of dev.c 1.0.19 * Derived from the non IP parts of dev.c 1.0.19
* Authors: Ross Biro * Authors: Ross Biro
* Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
* Mark Evans, <evansmp@uhura.aston.ac.uk> * Mark Evans, <evansmp@uhura.aston.ac.uk>
* *
@ -21,9 +21,9 @@
* *
* Changes: * Changes:
* D.J. Barrow : Fixed bug where dev->refcnt gets set * D.J. Barrow : Fixed bug where dev->refcnt gets set
* to 2 if register_netdev gets called * to 2 if register_netdev gets called
* before net_dev_init & also removed a * before net_dev_init & also removed a
* few lines of code in the process. * few lines of code in the process.
* Alan Cox : device private ioctl copies fields back. * Alan Cox : device private ioctl copies fields back.
* Alan Cox : Transmit queue code does relevant * Alan Cox : Transmit queue code does relevant
* stunts to keep the queue safe. * stunts to keep the queue safe.
@ -36,7 +36,7 @@
* Alan Cox : 100 backlog just doesn't cut it when * Alan Cox : 100 backlog just doesn't cut it when
* you start doing multicast video 8) * you start doing multicast video 8)
* Alan Cox : Rewrote net_bh and list manager. * Alan Cox : Rewrote net_bh and list manager.
* Alan Cox : Fix ETH_P_ALL echoback lengths. * Alan Cox : Fix ETH_P_ALL echoback lengths.
* Alan Cox : Took out transmit every packet pass * Alan Cox : Took out transmit every packet pass
* Saved a few bytes in the ioctl handler * Saved a few bytes in the ioctl handler
* Alan Cox : Network driver sets packet type before * Alan Cox : Network driver sets packet type before
@ -46,7 +46,7 @@
* Richard Kooijman: Timestamp fixes. * Richard Kooijman: Timestamp fixes.
* Alan Cox : Wrong field in SIOCGIFDSTADDR * Alan Cox : Wrong field in SIOCGIFDSTADDR
* Alan Cox : Device lock protection. * Alan Cox : Device lock protection.
* Alan Cox : Fixed nasty side effect of device close * Alan Cox : Fixed nasty side effect of device close
* changes. * changes.
* Rudi Cilibrasi : Pass the right thing to * Rudi Cilibrasi : Pass the right thing to
* set_mac_address() * set_mac_address()
@ -67,8 +67,8 @@
* Paul Rusty Russell : SIOCSIFNAME * Paul Rusty Russell : SIOCSIFNAME
* Pekka Riikonen : Netdev boot-time settings code * Pekka Riikonen : Netdev boot-time settings code
* Andrew Morton : Make unregister_netdevice wait * Andrew Morton : Make unregister_netdevice wait
* indefinitely on dev->refcnt * indefinitely on dev->refcnt
* J Hadi Salim : - Backlog queue sampling * J Hadi Salim : - Backlog queue sampling
* - netif_rx() feedback * - netif_rx() feedback
*/ */
@ -192,7 +192,8 @@ static seqcount_t devnet_rename_seq;
static inline void dev_base_seq_inc(struct net *net) static inline void dev_base_seq_inc(struct net *net)
{ {
while (++net->dev_base_seq == 0); while (++net->dev_base_seq == 0)
;
} }
static inline struct hlist_head *dev_name_hash(struct net *net, const char *name) static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
@ -274,8 +275,8 @@ EXPORT_PER_CPU_SYMBOL(softnet_data);
* register_netdevice() inits txq->_xmit_lock and sets lockdep class * register_netdevice() inits txq->_xmit_lock and sets lockdep class
* according to dev->type * according to dev->type
*/ */
static const unsigned short netdev_lock_type[] = static const unsigned short netdev_lock_type[] = {
{ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25, ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET, ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM, ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP, ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
@ -291,22 +292,22 @@ static const unsigned short netdev_lock_type[] =
ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, ARPHRD_PHONET_PIPE, ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, ARPHRD_PHONET_PIPE,
ARPHRD_IEEE802154, ARPHRD_VOID, ARPHRD_NONE}; ARPHRD_IEEE802154, ARPHRD_VOID, ARPHRD_NONE};
static const char *const netdev_lock_name[] = static const char *const netdev_lock_name[] = {
{"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25", "_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
"_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET", "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
"_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM", "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
"_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP", "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
"_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD", "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
"_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25", "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
"_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP", "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
"_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD", "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
"_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI", "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
"_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE", "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
"_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET", "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
"_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL", "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
"_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM", "_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM",
"_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE", "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE",
"_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"}; "_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"};
static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)]; static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)]; static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
@ -352,10 +353,11 @@ static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
#endif #endif
/******************************************************************************* /*******************************************************************************
*
* Protocol management and registration routines
*
*******************************************************************************/
Protocol management and registration routines
*******************************************************************************/
/* /*
* Add a protocol ID to the list. Now that the input handler is * Add a protocol ID to the list. Now that the input handler is
@ -538,10 +540,10 @@ void dev_remove_offload(struct packet_offload *po)
EXPORT_SYMBOL(dev_remove_offload); EXPORT_SYMBOL(dev_remove_offload);
/****************************************************************************** /******************************************************************************
*
Device Boot-time Settings Routines * Device Boot-time Settings Routines
*
*******************************************************************************/ ******************************************************************************/
/* Boot time configuration table */ /* Boot time configuration table */
static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX]; static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
@ -574,13 +576,13 @@ static int netdev_boot_setup_add(char *name, struct ifmap *map)
} }
/** /**
* netdev_boot_setup_check - check boot time settings * netdev_boot_setup_check - check boot time settings
* @dev: the netdevice * @dev: the netdevice
* *
* Check boot time settings for the device. * Check boot time settings for the device.
* The found settings are set for the device to be used * The found settings are set for the device to be used
* later in the device probing. * later in the device probing.
* Returns 0 if no settings found, 1 if they are. * Returns 0 if no settings found, 1 if they are.
*/ */
int netdev_boot_setup_check(struct net_device *dev) int netdev_boot_setup_check(struct net_device *dev)
{ {
@ -590,10 +592,10 @@ int netdev_boot_setup_check(struct net_device *dev)
for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) { for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
if (s[i].name[0] != '\0' && s[i].name[0] != ' ' && if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
!strcmp(dev->name, s[i].name)) { !strcmp(dev->name, s[i].name)) {
dev->irq = s[i].map.irq; dev->irq = s[i].map.irq;
dev->base_addr = s[i].map.base_addr; dev->base_addr = s[i].map.base_addr;
dev->mem_start = s[i].map.mem_start; dev->mem_start = s[i].map.mem_start;
dev->mem_end = s[i].map.mem_end; dev->mem_end = s[i].map.mem_end;
return 1; return 1;
} }
} }
@ -603,14 +605,14 @@ EXPORT_SYMBOL(netdev_boot_setup_check);
/** /**
* netdev_boot_base - get address from boot time settings * netdev_boot_base - get address from boot time settings
* @prefix: prefix for network device * @prefix: prefix for network device
* @unit: id for network device * @unit: id for network device
* *
* Check boot time settings for the base address of device. * Check boot time settings for the base address of device.
* The found settings are set for the device to be used * The found settings are set for the device to be used
* later in the device probing. * later in the device probing.
* Returns 0 if no settings found. * Returns 0 if no settings found.
*/ */
unsigned long netdev_boot_base(const char *prefix, int unit) unsigned long netdev_boot_base(const char *prefix, int unit)
{ {
@ -663,10 +665,10 @@ int __init netdev_boot_setup(char *str)
__setup("netdev=", netdev_boot_setup); __setup("netdev=", netdev_boot_setup);
/******************************************************************************* /*******************************************************************************
*
Device Interface Subroutines * Device Interface Subroutines
*
*******************************************************************************/ *******************************************************************************/
/** /**
* dev_get_iflink - get 'iflink' value of a interface * dev_get_iflink - get 'iflink' value of a interface
@ -737,15 +739,15 @@ struct net_device *__dev_get_by_name(struct net *net, const char *name)
EXPORT_SYMBOL(__dev_get_by_name); EXPORT_SYMBOL(__dev_get_by_name);
/** /**
* dev_get_by_name_rcu - find a device by its name * dev_get_by_name_rcu - find a device by its name
* @net: the applicable net namespace * @net: the applicable net namespace
* @name: name to find * @name: name to find
* *
* Find an interface by name. * Find an interface by name.
* If the name is found a pointer to the device is returned. * If the name is found a pointer to the device is returned.
* If the name is not found then %NULL is returned. * If the name is not found then %NULL is returned.
* The reference counters are not incremented so the caller must be * The reference counters are not incremented so the caller must be
* careful with locks. The caller must hold RCU lock. * careful with locks. The caller must hold RCU lock.
*/ */
struct net_device *dev_get_by_name_rcu(struct net *net, const char *name) struct net_device *dev_get_by_name_rcu(struct net *net, const char *name)
@ -1289,8 +1291,8 @@ void netdev_state_change(struct net_device *dev)
EXPORT_SYMBOL(netdev_state_change); EXPORT_SYMBOL(netdev_state_change);
/** /**
* netdev_notify_peers - notify network peers about existence of @dev * netdev_notify_peers - notify network peers about existence of @dev
* @dev: network device * @dev: network device
* *
* Generate traffic such that interested network peers are aware of * Generate traffic such that interested network peers are aware of
* @dev, such as by generating a gratuitous ARP. This may be used when * @dev, such as by generating a gratuitous ARP. This may be used when
@ -1518,17 +1520,17 @@ static int call_netdevice_notifier(struct notifier_block *nb, unsigned long val,
static int dev_boot_phase = 1; static int dev_boot_phase = 1;
/** /**
* register_netdevice_notifier - register a network notifier block * register_netdevice_notifier - register a network notifier block
* @nb: notifier * @nb: notifier
* *
* Register a notifier to be called when network device events occur. * Register a notifier to be called when network device events occur.
* The notifier passed is linked into the kernel structures and must * The notifier passed is linked into the kernel structures and must
* not be reused until it has been unregistered. A negative errno code * not be reused until it has been unregistered. A negative errno code
* is returned on a failure. * is returned on a failure.
* *
* When registered all registration and up events are replayed * When registered all registration and up events are replayed
* to the new notifier to allow device to have a race free * to the new notifier to allow device to have a race free
* view of the network device list. * view of the network device list.
*/ */
int register_netdevice_notifier(struct notifier_block *nb) int register_netdevice_notifier(struct notifier_block *nb)
@ -1585,17 +1587,17 @@ int register_netdevice_notifier(struct notifier_block *nb)
EXPORT_SYMBOL(register_netdevice_notifier); EXPORT_SYMBOL(register_netdevice_notifier);
/** /**
* unregister_netdevice_notifier - unregister a network notifier block * unregister_netdevice_notifier - unregister a network notifier block
* @nb: notifier * @nb: notifier
* *
* Unregister a notifier previously registered by * Unregister a notifier previously registered by
* register_netdevice_notifier(). The notifier is unlinked into the * register_netdevice_notifier(). The notifier is unlinked into the
* kernel structures and may then be reused. A negative errno code * kernel structures and may then be reused. A negative errno code
* is returned on a failure. * is returned on a failure.
* *
* After unregistering unregister and down device events are synthesized * After unregistering unregister and down device events are synthesized
* for all devices on the device list to the removed notifier to remove * for all devices on the device list to the removed notifier to remove
* the need for special case cleanup code. * the need for special case cleanup code.
*/ */
int unregister_netdevice_notifier(struct notifier_block *nb) int unregister_netdevice_notifier(struct notifier_block *nb)
@ -2496,6 +2498,7 @@ u16 __skb_tx_hash(const struct net_device *dev, struct sk_buff *skb,
if (dev->num_tc) { if (dev->num_tc) {
u8 tc = netdev_get_prio_tc_map(dev, skb->priority); u8 tc = netdev_get_prio_tc_map(dev, skb->priority);
qoffset = dev->tc_to_txq[tc].offset; qoffset = dev->tc_to_txq[tc].offset;
qcount = dev->tc_to_txq[tc].count; qcount = dev->tc_to_txq[tc].count;
} }
@ -2717,9 +2720,11 @@ static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
{ {
#ifdef CONFIG_HIGHMEM #ifdef CONFIG_HIGHMEM
int i; int i;
if (!(dev->features & NETIF_F_HIGHDMA)) { if (!(dev->features & NETIF_F_HIGHDMA)) {
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
if (PageHighMem(skb_frag_page(frag))) if (PageHighMem(skb_frag_page(frag)))
return 1; return 1;
} }
@ -2733,6 +2738,7 @@ static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
dma_addr_t addr = page_to_phys(skb_frag_page(frag)); dma_addr_t addr = page_to_phys(skb_frag_page(frag));
if (!pdev->dma_mask || addr + PAGE_SIZE - 1 > *pdev->dma_mask) if (!pdev->dma_mask || addr + PAGE_SIZE - 1 > *pdev->dma_mask)
return 1; return 1;
} }
@ -3208,6 +3214,7 @@ static u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb)
if (queue_index < 0 || skb->ooo_okay || if (queue_index < 0 || skb->ooo_okay ||
queue_index >= dev->real_num_tx_queues) { queue_index >= dev->real_num_tx_queues) {
int new_index = get_xps_queue(dev, skb); int new_index = get_xps_queue(dev, skb);
if (new_index < 0) if (new_index < 0)
new_index = skb_tx_hash(dev, skb); new_index = skb_tx_hash(dev, skb);
@ -3237,6 +3244,7 @@ struct netdev_queue *netdev_pick_tx(struct net_device *dev,
if (dev->real_num_tx_queues != 1) { if (dev->real_num_tx_queues != 1) {
const struct net_device_ops *ops = dev->netdev_ops; const struct net_device_ops *ops = dev->netdev_ops;
if (ops->ndo_select_queue) if (ops->ndo_select_queue)
queue_index = ops->ndo_select_queue(dev, skb, accel_priv, queue_index = ops->ndo_select_queue(dev, skb, accel_priv,
__netdev_pick_tx); __netdev_pick_tx);
@ -3325,16 +3333,16 @@ static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
} }
/* The device has no queue. Common case for software devices: /* The device has no queue. Common case for software devices:
loopback, all the sorts of tunnels... * loopback, all the sorts of tunnels...
Really, it is unlikely that netif_tx_lock protection is necessary * Really, it is unlikely that netif_tx_lock protection is necessary
here. (f.e. loopback and IP tunnels are clean ignoring statistics * here. (f.e. loopback and IP tunnels are clean ignoring statistics
counters.) * counters.)
However, it is possible, that they rely on protection * However, it is possible, that they rely on protection
made by us here. * made by us here.
Check this and shot the lock. It is not prone from deadlocks. * Check this and shot the lock. It is not prone from deadlocks.
Either shot noqueue qdisc, it is even simpler 8) *Either shot noqueue qdisc, it is even simpler 8)
*/ */
if (dev->flags & IFF_UP) { if (dev->flags & IFF_UP) {
int cpu = smp_processor_id(); /* ok because BHs are off */ int cpu = smp_processor_id(); /* ok because BHs are off */
@ -3396,9 +3404,9 @@ int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv)
EXPORT_SYMBOL(dev_queue_xmit_accel); EXPORT_SYMBOL(dev_queue_xmit_accel);
/*======================================================================= /*************************************************************************
Receiver routines * Receiver routines
=======================================================================*/ *************************************************************************/
int netdev_max_backlog __read_mostly = 1000; int netdev_max_backlog __read_mostly = 1000;
EXPORT_SYMBOL(netdev_max_backlog); EXPORT_SYMBOL(netdev_max_backlog);
@ -3766,6 +3774,7 @@ static int netif_rx_internal(struct sk_buff *skb)
#endif #endif
{ {
unsigned int qtail; unsigned int qtail;
ret = enqueue_to_backlog(skb, get_cpu(), &qtail); ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
put_cpu(); put_cpu();
} }
@ -3825,6 +3834,7 @@ static __latent_entropy void net_tx_action(struct softirq_action *h)
while (clist) { while (clist) {
struct sk_buff *skb = clist; struct sk_buff *skb = clist;
clist = clist->next; clist = clist->next;
WARN_ON(atomic_read(&skb->users)); WARN_ON(atomic_read(&skb->users));
@ -5661,6 +5671,7 @@ static int netdev_adjacent_sysfs_add(struct net_device *dev,
struct list_head *dev_list) struct list_head *dev_list)
{ {
char linkname[IFNAMSIZ+7]; char linkname[IFNAMSIZ+7];
sprintf(linkname, dev_list == &dev->adj_list.upper ? sprintf(linkname, dev_list == &dev->adj_list.upper ?
"upper_%s" : "lower_%s", adj_dev->name); "upper_%s" : "lower_%s", adj_dev->name);
return sysfs_create_link(&(dev->dev.kobj), &(adj_dev->dev.kobj), return sysfs_create_link(&(dev->dev.kobj), &(adj_dev->dev.kobj),
@ -5671,6 +5682,7 @@ static void netdev_adjacent_sysfs_del(struct net_device *dev,
struct list_head *dev_list) struct list_head *dev_list)
{ {
char linkname[IFNAMSIZ+7]; char linkname[IFNAMSIZ+7];
sprintf(linkname, dev_list == &dev->adj_list.upper ? sprintf(linkname, dev_list == &dev->adj_list.upper ?
"upper_%s" : "lower_%s", name); "upper_%s" : "lower_%s", name);
sysfs_remove_link(&(dev->dev.kobj), linkname); sysfs_remove_link(&(dev->dev.kobj), linkname);
@ -5940,6 +5952,7 @@ void netdev_upper_dev_unlink(struct net_device *dev,
struct net_device *upper_dev) struct net_device *upper_dev)
{ {
struct netdev_notifier_changeupper_info changeupper_info; struct netdev_notifier_changeupper_info changeupper_info;
ASSERT_RTNL(); ASSERT_RTNL();
changeupper_info.upper_dev = upper_dev; changeupper_info.upper_dev = upper_dev;
@ -6358,8 +6371,8 @@ int __dev_change_flags(struct net_device *dev, unsigned int flags)
} }
/* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
is important. Some (broken) drivers set IFF_PROMISC, when * is important. Some (broken) drivers set IFF_PROMISC, when
IFF_ALLMULTI is requested not asking us and not reporting. * IFF_ALLMULTI is requested not asking us and not reporting.
*/ */
if ((flags ^ dev->gflags) & IFF_ALLMULTI) { if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
int inc = (flags & IFF_ALLMULTI) ? 1 : -1; int inc = (flags & IFF_ALLMULTI) ? 1 : -1;
@ -6657,6 +6670,7 @@ EXPORT_SYMBOL(dev_change_xdp_fd);
static int dev_new_index(struct net *net) static int dev_new_index(struct net *net)
{ {
int ifindex = net->ifindex; int ifindex = net->ifindex;
for (;;) { for (;;) {
if (++ifindex <= 0) if (++ifindex <= 0)
ifindex = 1; ifindex = 1;
@ -6723,8 +6737,8 @@ static void rollback_registered_many(struct list_head *head)
/* Notify protocols, that we are about to destroy /* Notify protocols, that we are about to destroy
this device. They should clean all the things. * this device. They should clean all the things.
*/ */
call_netdevice_notifiers(NETDEV_UNREGISTER, dev); call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
if (!dev->rtnl_link_ops || if (!dev->rtnl_link_ops ||
@ -7070,6 +7084,7 @@ void netif_tx_stop_all_queues(struct net_device *dev)
for (i = 0; i < dev->num_tx_queues; i++) { for (i = 0; i < dev->num_tx_queues; i++) {
struct netdev_queue *txq = netdev_get_tx_queue(dev, i); struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
netif_tx_stop_queue(txq); netif_tx_stop_queue(txq);
} }
} }
@ -7544,17 +7559,17 @@ void netdev_freemem(struct net_device *dev)
} }
/** /**
* alloc_netdev_mqs - allocate network device * alloc_netdev_mqs - allocate network device
* @sizeof_priv: size of private data to allocate space for * @sizeof_priv: size of private data to allocate space for
* @name: device name format string * @name: device name format string
* @name_assign_type: origin of device name * @name_assign_type: origin of device name
* @setup: callback to initialize device * @setup: callback to initialize device
* @txqs: the number of TX subqueues to allocate * @txqs: the number of TX subqueues to allocate
* @rxqs: the number of RX subqueues to allocate * @rxqs: the number of RX subqueues to allocate
* *
* Allocates a struct net_device with private data area for driver use * Allocates a struct net_device with private data area for driver use
* and performs basic initialization. Also allocates subqueue structs * and performs basic initialization. Also allocates subqueue structs
* for each queue on the device. * for each queue on the device.
*/ */
struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name, struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
unsigned char name_assign_type, unsigned char name_assign_type,
@ -7666,13 +7681,13 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
EXPORT_SYMBOL(alloc_netdev_mqs); EXPORT_SYMBOL(alloc_netdev_mqs);
/** /**
* free_netdev - free network device * free_netdev - free network device
* @dev: device * @dev: device
* *
* This function does the last stage of destroying an allocated device * This function does the last stage of destroying an allocated device
* interface. The reference to the device object is released. * interface. The reference to the device object is released. If this
* If this is the last reference then it will be freed. * is the last reference then it will be freed.Must be called in process
* Must be called in process context. * context.
*/ */
void free_netdev(struct net_device *dev) void free_netdev(struct net_device *dev)
{ {
@ -7854,12 +7869,12 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char
dev_shutdown(dev); dev_shutdown(dev);
/* Notify protocols, that we are about to destroy /* Notify protocols, that we are about to destroy
this device. They should clean all the things. * this device. They should clean all the things.
*
Note that dev->reg_state stays at NETREG_REGISTERED. * Note that dev->reg_state stays at NETREG_REGISTERED.
This is wanted because this way 8021q and macvlan know * This is wanted because this way 8021q and macvlan know
the device is just moving and can keep their slaves up. * the device is just moving and can keep their slaves up.
*/ */
call_netdevice_notifiers(NETDEV_UNREGISTER, dev); call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
rcu_barrier(); rcu_barrier();
call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev); call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);