mirror of https://gitee.com/openkylin/linux.git
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: 1) Prevent index integer overflow in ptr_ring, from Jason Wang. 2) Program mvpp2 multicast filter properly, from Mikulas Patocka. 3) The bridge brport attribute file is write only and doesn't have a ->show() method, don't blindly invoke it. From Xin Long. 4) Inverted mask used in genphy_setup_forced(), from Ingo van Lil. 5) Fix multiple definition issue with if_ether.h UAPI header, from Hauke Mehrtens. 6) Fix GFP_KERNEL usage in atomic in RDS protocol code, from Sowmini Varadhan. 7) Revert XDP redirect support from thunderx driver, it is not implemented properly. From Jesper Dangaard Brouer. 8) Fix missing RTNL protection across some tipc operations, from Ying Xue. 9) Return the correct IV bytes in the TLS getsockopt code, from Boris Pismenny. 10) Take tclassid into consideration properly when doing FIB rule matching. From Stefano Brivio. 11) cxgb4 device needs more PCI VPD quirks, from Casey Leedom. 12) TUN driver doesn't align frags properly, and we can end up doing unaligned atomics on misaligned metadata. From Eric Dumazet. 13) Fix various crashes found using DEBUG_PREEMPT in rmnet driver, from Subash Abhinov Kasiviswanathan. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (56 commits) tg3: APE heartbeat changes mlxsw: spectrum_router: Do not unconditionally clear route offload indication net: qualcomm: rmnet: Fix possible null dereference in command processing net: qualcomm: rmnet: Fix warning seen with 64 bit stats net: qualcomm: rmnet: Fix crash on real dev unregistration sctp: remove the left unnecessary check for chunk in sctp_renege_events rxrpc: Work around usercopy check tun: fix tun_napi_alloc_frags() frag allocator udplite: fix partial checksum initialization skbuff: Fix comment mis-spelling. dn_getsockoptdecnet: move nf_{get/set}sockopt outside sock lock PCI/cxgb4: Extend T3 PCI quirk to T4+ devices cxgb4: fix trailing zero in CIM LA dump cxgb4: free up resources of pf 0-3 fib_semantics: Don't match route with mismatching tclassid NFC: llcp: Limit size of SDP URI tls: getsockopt return record sequence number tls: reset the crypto info if copy_from_user fails tls: retrun the correct IV in getsockopt docs: segmentation-offloads.txt: add SCTP info ...
This commit is contained in:
commit
79c0ef3e85
|
@ -13,6 +13,7 @@ The following technologies are described:
|
|||
* Generic Segmentation Offload - GSO
|
||||
* Generic Receive Offload - GRO
|
||||
* Partial Generic Segmentation Offload - GSO_PARTIAL
|
||||
* SCTP accelleration with GSO - GSO_BY_FRAGS
|
||||
|
||||
TCP Segmentation Offload
|
||||
========================
|
||||
|
@ -49,6 +50,10 @@ datagram into multiple IPv4 fragments. Many of the requirements for UDP
|
|||
fragmentation offload are the same as TSO. However the IPv4 ID for
|
||||
fragments should not increment as a single IPv4 datagram is fragmented.
|
||||
|
||||
UFO is deprecated: modern kernels will no longer generate UFO skbs, but can
|
||||
still receive them from tuntap and similar devices. Offload of UDP-based
|
||||
tunnel protocols is still supported.
|
||||
|
||||
IPIP, SIT, GRE, UDP Tunnel, and Remote Checksum Offloads
|
||||
========================================================
|
||||
|
||||
|
@ -83,10 +88,10 @@ SKB_GSO_UDP_TUNNEL_CSUM. These two additional tunnel types reflect the
|
|||
fact that the outer header also requests to have a non-zero checksum
|
||||
included in the outer header.
|
||||
|
||||
Finally there is SKB_GSO_REMCSUM which indicates that a given tunnel header
|
||||
has requested a remote checksum offload. In this case the inner headers
|
||||
will be left with a partial checksum and only the outer header checksum
|
||||
will be computed.
|
||||
Finally there is SKB_GSO_TUNNEL_REMCSUM which indicates that a given tunnel
|
||||
header has requested a remote checksum offload. In this case the inner
|
||||
headers will be left with a partial checksum and only the outer header
|
||||
checksum will be computed.
|
||||
|
||||
Generic Segmentation Offload
|
||||
============================
|
||||
|
@ -128,3 +133,28 @@ values for if the header was simply duplicated. The one exception to this
|
|||
is the outer IPv4 ID field. It is up to the device drivers to guarantee
|
||||
that the IPv4 ID field is incremented in the case that a given header does
|
||||
not have the DF bit set.
|
||||
|
||||
SCTP accelleration with GSO
|
||||
===========================
|
||||
|
||||
SCTP - despite the lack of hardware support - can still take advantage of
|
||||
GSO to pass one large packet through the network stack, rather than
|
||||
multiple small packets.
|
||||
|
||||
This requires a different approach to other offloads, as SCTP packets
|
||||
cannot be just segmented to (P)MTU. Rather, the chunks must be contained in
|
||||
IP segments, padding respected. So unlike regular GSO, SCTP can't just
|
||||
generate a big skb, set gso_size to the fragmentation point and deliver it
|
||||
to IP layer.
|
||||
|
||||
Instead, the SCTP protocol layer builds an skb with the segments correctly
|
||||
padded and stored as chained skbs, and skb_segment() splits based on those.
|
||||
To signal this, gso_size is set to the special value GSO_BY_FRAGS.
|
||||
|
||||
Therefore, any code in the core networking stack must be aware of the
|
||||
possibility that gso_size will be GSO_BY_FRAGS and handle that case
|
||||
appropriately. (For size checks, the skb_gso_validate_*_len family of
|
||||
helpers do this automatically.)
|
||||
|
||||
This also affects drivers with the NETIF_F_FRAGLIST & NETIF_F_GSO_SCTP bits
|
||||
set. Note also that NETIF_F_GSO_SCTP is included in NETIF_F_GSO_SOFTWARE.
|
||||
|
|
|
@ -820,7 +820,7 @@ static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
|
|||
|
||||
tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
|
||||
|
||||
udelay(10);
|
||||
usleep_range(10, 20);
|
||||
timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
|
||||
}
|
||||
|
||||
|
@ -922,8 +922,8 @@ static int tg3_ape_send_event(struct tg3 *tp, u32 event)
|
|||
if (!(apedata & APE_FW_STATUS_READY))
|
||||
return -EAGAIN;
|
||||
|
||||
/* Wait for up to 1 millisecond for APE to service previous event. */
|
||||
err = tg3_ape_event_lock(tp, 1000);
|
||||
/* Wait for up to 20 millisecond for APE to service previous event. */
|
||||
err = tg3_ape_event_lock(tp, 20000);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
|
@ -946,6 +946,7 @@ static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
|
|||
|
||||
switch (kind) {
|
||||
case RESET_KIND_INIT:
|
||||
tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++);
|
||||
tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
|
||||
APE_HOST_SEG_SIG_MAGIC);
|
||||
tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
|
||||
|
@ -962,13 +963,6 @@ static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
|
|||
event = APE_EVENT_STATUS_STATE_START;
|
||||
break;
|
||||
case RESET_KIND_SHUTDOWN:
|
||||
/* With the interface we are currently using,
|
||||
* APE does not track driver state. Wiping
|
||||
* out the HOST SEGMENT SIGNATURE forces
|
||||
* the APE to assume OS absent status.
|
||||
*/
|
||||
tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
|
||||
|
||||
if (device_may_wakeup(&tp->pdev->dev) &&
|
||||
tg3_flag(tp, WOL_ENABLE)) {
|
||||
tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
|
||||
|
@ -990,6 +984,18 @@ static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
|
|||
tg3_ape_send_event(tp, event);
|
||||
}
|
||||
|
||||
static void tg3_send_ape_heartbeat(struct tg3 *tp,
|
||||
unsigned long interval)
|
||||
{
|
||||
/* Check if hb interval has exceeded */
|
||||
if (!tg3_flag(tp, ENABLE_APE) ||
|
||||
time_before(jiffies, tp->ape_hb_jiffies + interval))
|
||||
return;
|
||||
|
||||
tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++);
|
||||
tp->ape_hb_jiffies = jiffies;
|
||||
}
|
||||
|
||||
static void tg3_disable_ints(struct tg3 *tp)
|
||||
{
|
||||
int i;
|
||||
|
@ -7262,6 +7268,7 @@ static int tg3_poll_msix(struct napi_struct *napi, int budget)
|
|||
}
|
||||
}
|
||||
|
||||
tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1);
|
||||
return work_done;
|
||||
|
||||
tx_recovery:
|
||||
|
@ -7344,6 +7351,7 @@ static int tg3_poll(struct napi_struct *napi, int budget)
|
|||
}
|
||||
}
|
||||
|
||||
tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1);
|
||||
return work_done;
|
||||
|
||||
tx_recovery:
|
||||
|
@ -10732,7 +10740,7 @@ static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
|
|||
if (tg3_flag(tp, ENABLE_APE))
|
||||
/* Write our heartbeat update interval to APE. */
|
||||
tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
|
||||
APE_HOST_HEARTBEAT_INT_DISABLE);
|
||||
APE_HOST_HEARTBEAT_INT_5SEC);
|
||||
|
||||
tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
|
||||
|
||||
|
@ -11077,6 +11085,9 @@ static void tg3_timer(struct timer_list *t)
|
|||
tp->asf_counter = tp->asf_multiplier;
|
||||
}
|
||||
|
||||
/* Update the APE heartbeat every 5 seconds.*/
|
||||
tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL);
|
||||
|
||||
spin_unlock(&tp->lock);
|
||||
|
||||
restart_timer:
|
||||
|
@ -16653,6 +16664,8 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
|
|||
pci_state_reg);
|
||||
|
||||
tg3_ape_lock_init(tp);
|
||||
tp->ape_hb_interval =
|
||||
msecs_to_jiffies(APE_HOST_HEARTBEAT_INT_5SEC);
|
||||
}
|
||||
|
||||
/* Set up tp->grc_local_ctrl before calling
|
||||
|
|
|
@ -2508,6 +2508,7 @@
|
|||
#define TG3_APE_LOCK_PHY3 5
|
||||
#define TG3_APE_LOCK_GPIO 7
|
||||
|
||||
#define TG3_APE_HB_INTERVAL (tp->ape_hb_interval)
|
||||
#define TG3_EEPROM_SB_F1R2_MBA_OFF 0x10
|
||||
|
||||
|
||||
|
@ -3423,6 +3424,10 @@ struct tg3 {
|
|||
struct device *hwmon_dev;
|
||||
bool link_up;
|
||||
bool pcierr_recovery;
|
||||
|
||||
u32 ape_hb;
|
||||
unsigned long ape_hb_interval;
|
||||
unsigned long ape_hb_jiffies;
|
||||
};
|
||||
|
||||
/* Accessor macros for chip and asic attributes
|
||||
|
|
|
@ -75,6 +75,8 @@ EXPORT_SYMBOL(cavium_ptp_get);
|
|||
|
||||
void cavium_ptp_put(struct cavium_ptp *ptp)
|
||||
{
|
||||
if (!ptp)
|
||||
return;
|
||||
pci_dev_put(ptp->pdev);
|
||||
}
|
||||
EXPORT_SYMBOL(cavium_ptp_put);
|
||||
|
|
|
@ -67,11 +67,6 @@ module_param(cpi_alg, int, S_IRUGO);
|
|||
MODULE_PARM_DESC(cpi_alg,
|
||||
"PFC algorithm (0=none, 1=VLAN, 2=VLAN16, 3=IP Diffserv)");
|
||||
|
||||
struct nicvf_xdp_tx {
|
||||
u64 dma_addr;
|
||||
u8 qidx;
|
||||
};
|
||||
|
||||
static inline u8 nicvf_netdev_qidx(struct nicvf *nic, u8 qidx)
|
||||
{
|
||||
if (nic->sqs_mode)
|
||||
|
@ -507,29 +502,14 @@ static int nicvf_init_resources(struct nicvf *nic)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void nicvf_unmap_page(struct nicvf *nic, struct page *page, u64 dma_addr)
|
||||
{
|
||||
/* Check if it's a recycled page, if not unmap the DMA mapping.
|
||||
* Recycled page holds an extra reference.
|
||||
*/
|
||||
if (page_ref_count(page) == 1) {
|
||||
dma_addr &= PAGE_MASK;
|
||||
dma_unmap_page_attrs(&nic->pdev->dev, dma_addr,
|
||||
RCV_FRAG_LEN + XDP_HEADROOM,
|
||||
DMA_FROM_DEVICE,
|
||||
DMA_ATTR_SKIP_CPU_SYNC);
|
||||
}
|
||||
}
|
||||
|
||||
static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog,
|
||||
struct cqe_rx_t *cqe_rx, struct snd_queue *sq,
|
||||
struct rcv_queue *rq, struct sk_buff **skb)
|
||||
{
|
||||
struct xdp_buff xdp;
|
||||
struct page *page;
|
||||
struct nicvf_xdp_tx *xdp_tx = NULL;
|
||||
u32 action;
|
||||
u16 len, err, offset = 0;
|
||||
u16 len, offset = 0;
|
||||
u64 dma_addr, cpu_addr;
|
||||
void *orig_data;
|
||||
|
||||
|
@ -543,7 +523,7 @@ static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog,
|
|||
cpu_addr = (u64)phys_to_virt(cpu_addr);
|
||||
page = virt_to_page((void *)cpu_addr);
|
||||
|
||||
xdp.data_hard_start = page_address(page) + RCV_BUF_HEADROOM;
|
||||
xdp.data_hard_start = page_address(page);
|
||||
xdp.data = (void *)cpu_addr;
|
||||
xdp_set_data_meta_invalid(&xdp);
|
||||
xdp.data_end = xdp.data + len;
|
||||
|
@ -563,7 +543,18 @@ static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog,
|
|||
|
||||
switch (action) {
|
||||
case XDP_PASS:
|
||||
nicvf_unmap_page(nic, page, dma_addr);
|
||||
/* Check if it's a recycled page, if not
|
||||
* unmap the DMA mapping.
|
||||
*
|
||||
* Recycled page holds an extra reference.
|
||||
*/
|
||||
if (page_ref_count(page) == 1) {
|
||||
dma_addr &= PAGE_MASK;
|
||||
dma_unmap_page_attrs(&nic->pdev->dev, dma_addr,
|
||||
RCV_FRAG_LEN + XDP_PACKET_HEADROOM,
|
||||
DMA_FROM_DEVICE,
|
||||
DMA_ATTR_SKIP_CPU_SYNC);
|
||||
}
|
||||
|
||||
/* Build SKB and pass on packet to network stack */
|
||||
*skb = build_skb(xdp.data,
|
||||
|
@ -576,20 +567,6 @@ static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog,
|
|||
case XDP_TX:
|
||||
nicvf_xdp_sq_append_pkt(nic, sq, (u64)xdp.data, dma_addr, len);
|
||||
return true;
|
||||
case XDP_REDIRECT:
|
||||
/* Save DMA address for use while transmitting */
|
||||
xdp_tx = (struct nicvf_xdp_tx *)page_address(page);
|
||||
xdp_tx->dma_addr = dma_addr;
|
||||
xdp_tx->qidx = nicvf_netdev_qidx(nic, cqe_rx->rq_idx);
|
||||
|
||||
err = xdp_do_redirect(nic->pnicvf->netdev, &xdp, prog);
|
||||
if (!err)
|
||||
return true;
|
||||
|
||||
/* Free the page on error */
|
||||
nicvf_unmap_page(nic, page, dma_addr);
|
||||
put_page(page);
|
||||
break;
|
||||
default:
|
||||
bpf_warn_invalid_xdp_action(action);
|
||||
/* fall through */
|
||||
|
@ -597,7 +574,18 @@ static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog,
|
|||
trace_xdp_exception(nic->netdev, prog, action);
|
||||
/* fall through */
|
||||
case XDP_DROP:
|
||||
nicvf_unmap_page(nic, page, dma_addr);
|
||||
/* Check if it's a recycled page, if not
|
||||
* unmap the DMA mapping.
|
||||
*
|
||||
* Recycled page holds an extra reference.
|
||||
*/
|
||||
if (page_ref_count(page) == 1) {
|
||||
dma_addr &= PAGE_MASK;
|
||||
dma_unmap_page_attrs(&nic->pdev->dev, dma_addr,
|
||||
RCV_FRAG_LEN + XDP_PACKET_HEADROOM,
|
||||
DMA_FROM_DEVICE,
|
||||
DMA_ATTR_SKIP_CPU_SYNC);
|
||||
}
|
||||
put_page(page);
|
||||
return true;
|
||||
}
|
||||
|
@ -1864,50 +1852,6 @@ static int nicvf_xdp(struct net_device *netdev, struct netdev_bpf *xdp)
|
|||
}
|
||||
}
|
||||
|
||||
static int nicvf_xdp_xmit(struct net_device *netdev, struct xdp_buff *xdp)
|
||||
{
|
||||
struct nicvf *nic = netdev_priv(netdev);
|
||||
struct nicvf *snic = nic;
|
||||
struct nicvf_xdp_tx *xdp_tx;
|
||||
struct snd_queue *sq;
|
||||
struct page *page;
|
||||
int err, qidx;
|
||||
|
||||
if (!netif_running(netdev) || !nic->xdp_prog)
|
||||
return -EINVAL;
|
||||
|
||||
page = virt_to_page(xdp->data);
|
||||
xdp_tx = (struct nicvf_xdp_tx *)page_address(page);
|
||||
qidx = xdp_tx->qidx;
|
||||
|
||||
if (xdp_tx->qidx >= nic->xdp_tx_queues)
|
||||
return -EINVAL;
|
||||
|
||||
/* Get secondary Qset's info */
|
||||
if (xdp_tx->qidx >= MAX_SND_QUEUES_PER_QS) {
|
||||
qidx = xdp_tx->qidx / MAX_SND_QUEUES_PER_QS;
|
||||
snic = (struct nicvf *)nic->snicvf[qidx - 1];
|
||||
if (!snic)
|
||||
return -EINVAL;
|
||||
qidx = xdp_tx->qidx % MAX_SND_QUEUES_PER_QS;
|
||||
}
|
||||
|
||||
sq = &snic->qs->sq[qidx];
|
||||
err = nicvf_xdp_sq_append_pkt(snic, sq, (u64)xdp->data,
|
||||
xdp_tx->dma_addr,
|
||||
xdp->data_end - xdp->data);
|
||||
if (err)
|
||||
return -ENOMEM;
|
||||
|
||||
nicvf_xdp_sq_doorbell(snic, sq, qidx);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void nicvf_xdp_flush(struct net_device *dev)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
static int nicvf_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr)
|
||||
{
|
||||
struct hwtstamp_config config;
|
||||
|
@ -1986,8 +1930,6 @@ static const struct net_device_ops nicvf_netdev_ops = {
|
|||
.ndo_fix_features = nicvf_fix_features,
|
||||
.ndo_set_features = nicvf_set_features,
|
||||
.ndo_bpf = nicvf_xdp,
|
||||
.ndo_xdp_xmit = nicvf_xdp_xmit,
|
||||
.ndo_xdp_flush = nicvf_xdp_flush,
|
||||
.ndo_do_ioctl = nicvf_ioctl,
|
||||
};
|
||||
|
||||
|
|
|
@ -204,7 +204,7 @@ static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, struct rbdr *rbdr,
|
|||
|
||||
/* Reserve space for header modifications by BPF program */
|
||||
if (rbdr->is_xdp)
|
||||
buf_len += XDP_HEADROOM;
|
||||
buf_len += XDP_PACKET_HEADROOM;
|
||||
|
||||
/* Check if it's recycled */
|
||||
if (pgcache)
|
||||
|
@ -224,9 +224,8 @@ static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, struct rbdr *rbdr,
|
|||
nic->rb_page = NULL;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
if (pgcache)
|
||||
pgcache->dma_addr = *rbuf + XDP_HEADROOM;
|
||||
pgcache->dma_addr = *rbuf + XDP_PACKET_HEADROOM;
|
||||
nic->rb_page_offset += buf_len;
|
||||
}
|
||||
|
||||
|
@ -1244,7 +1243,7 @@ int nicvf_xdp_sq_append_pkt(struct nicvf *nic, struct snd_queue *sq,
|
|||
int qentry;
|
||||
|
||||
if (subdesc_cnt > sq->xdp_free_cnt)
|
||||
return -1;
|
||||
return 0;
|
||||
|
||||
qentry = nicvf_get_sq_desc(sq, subdesc_cnt);
|
||||
|
||||
|
@ -1255,7 +1254,7 @@ int nicvf_xdp_sq_append_pkt(struct nicvf *nic, struct snd_queue *sq,
|
|||
|
||||
sq->xdp_desc_cnt += subdesc_cnt;
|
||||
|
||||
return 0;
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* Calculate no of SQ subdescriptors needed to transmit all
|
||||
|
@ -1656,7 +1655,7 @@ static void nicvf_unmap_rcv_buffer(struct nicvf *nic, u64 dma_addr,
|
|||
if (page_ref_count(page) != 1)
|
||||
return;
|
||||
|
||||
len += XDP_HEADROOM;
|
||||
len += XDP_PACKET_HEADROOM;
|
||||
/* Receive buffers in XDP mode are mapped from page start */
|
||||
dma_addr &= PAGE_MASK;
|
||||
}
|
||||
|
|
|
@ -11,7 +11,6 @@
|
|||
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/iommu.h>
|
||||
#include <linux/bpf.h>
|
||||
#include <net/xdp.h>
|
||||
#include "q_struct.h"
|
||||
|
||||
|
@ -94,9 +93,6 @@
|
|||
#define RCV_FRAG_LEN (SKB_DATA_ALIGN(DMA_BUFFER_LEN + NET_SKB_PAD) + \
|
||||
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
|
||||
|
||||
#define RCV_BUF_HEADROOM 128 /* To store dma address for XDP redirect */
|
||||
#define XDP_HEADROOM (XDP_PACKET_HEADROOM + RCV_BUF_HEADROOM)
|
||||
|
||||
#define MAX_CQES_FOR_TX ((SND_QUEUE_LEN / MIN_SQ_DESC_PER_PKT_XMIT) * \
|
||||
MAX_CQE_PER_PKT_XMIT)
|
||||
|
||||
|
|
|
@ -472,7 +472,7 @@ int cudbg_collect_cim_la(struct cudbg_init *pdbg_init,
|
|||
|
||||
if (is_t6(padap->params.chip)) {
|
||||
size = padap->params.cim_la_size / 10 + 1;
|
||||
size *= 11 * sizeof(u32);
|
||||
size *= 10 * sizeof(u32);
|
||||
} else {
|
||||
size = padap->params.cim_la_size / 8;
|
||||
size *= 8 * sizeof(u32);
|
||||
|
|
|
@ -102,7 +102,7 @@ static u32 cxgb4_get_entity_length(struct adapter *adap, u32 entity)
|
|||
case CUDBG_CIM_LA:
|
||||
if (is_t6(adap->params.chip)) {
|
||||
len = adap->params.cim_la_size / 10 + 1;
|
||||
len *= 11 * sizeof(u32);
|
||||
len *= 10 * sizeof(u32);
|
||||
} else {
|
||||
len = adap->params.cim_la_size / 8;
|
||||
len *= 8 * sizeof(u32);
|
||||
|
|
|
@ -4982,9 +4982,10 @@ static int cxgb4_iov_configure(struct pci_dev *pdev, int num_vfs)
|
|||
|
||||
pcie_fw = readl(adap->regs + PCIE_FW_A);
|
||||
/* Check if cxgb4 is the MASTER and fw is initialized */
|
||||
if (!(pcie_fw & PCIE_FW_INIT_F) ||
|
||||
if (num_vfs &&
|
||||
(!(pcie_fw & PCIE_FW_INIT_F) ||
|
||||
!(pcie_fw & PCIE_FW_MASTER_VLD_F) ||
|
||||
PCIE_FW_MASTER_G(pcie_fw) != CXGB4_UNIFIED_PF) {
|
||||
PCIE_FW_MASTER_G(pcie_fw) != CXGB4_UNIFIED_PF)) {
|
||||
dev_warn(&pdev->dev,
|
||||
"cxgb4 driver needs to be MASTER to support SRIOV\n");
|
||||
return -EOPNOTSUPP;
|
||||
|
@ -5599,24 +5600,24 @@ static void remove_one(struct pci_dev *pdev)
|
|||
#if IS_ENABLED(CONFIG_IPV6)
|
||||
t4_cleanup_clip_tbl(adapter);
|
||||
#endif
|
||||
iounmap(adapter->regs);
|
||||
if (!is_t4(adapter->params.chip))
|
||||
iounmap(adapter->bar2);
|
||||
pci_disable_pcie_error_reporting(pdev);
|
||||
if ((adapter->flags & DEV_ENABLED)) {
|
||||
pci_disable_device(pdev);
|
||||
adapter->flags &= ~DEV_ENABLED;
|
||||
}
|
||||
pci_release_regions(pdev);
|
||||
kfree(adapter->mbox_log);
|
||||
synchronize_rcu();
|
||||
kfree(adapter);
|
||||
}
|
||||
#ifdef CONFIG_PCI_IOV
|
||||
else {
|
||||
cxgb4_iov_configure(adapter->pdev, 0);
|
||||
}
|
||||
#endif
|
||||
iounmap(adapter->regs);
|
||||
pci_disable_pcie_error_reporting(pdev);
|
||||
if ((adapter->flags & DEV_ENABLED)) {
|
||||
pci_disable_device(pdev);
|
||||
adapter->flags &= ~DEV_ENABLED;
|
||||
}
|
||||
pci_release_regions(pdev);
|
||||
kfree(adapter->mbox_log);
|
||||
synchronize_rcu();
|
||||
kfree(adapter);
|
||||
}
|
||||
|
||||
/* "Shutdown" quiesces the device, stopping Ingress Packet and Interrupt
|
||||
|
|
|
@ -2637,7 +2637,6 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
|
|||
}
|
||||
|
||||
#define EEPROM_STAT_ADDR 0x7bfc
|
||||
#define VPD_SIZE 0x800
|
||||
#define VPD_BASE 0x400
|
||||
#define VPD_BASE_OLD 0
|
||||
#define VPD_LEN 1024
|
||||
|
@ -2704,15 +2703,6 @@ int t4_get_raw_vpd_params(struct adapter *adapter, struct vpd_params *p)
|
|||
if (!vpd)
|
||||
return -ENOMEM;
|
||||
|
||||
/* We have two VPD data structures stored in the adapter VPD area.
|
||||
* By default, Linux calculates the size of the VPD area by traversing
|
||||
* the first VPD area at offset 0x0, so we need to tell the OS what
|
||||
* our real VPD size is.
|
||||
*/
|
||||
ret = pci_set_vpd_size(adapter->pdev, VPD_SIZE);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
/* Card information normally starts at VPD_BASE but early cards had
|
||||
* it at 0.
|
||||
*/
|
||||
|
|
|
@ -791,6 +791,18 @@ static int ibmvnic_login(struct net_device *netdev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void release_login_buffer(struct ibmvnic_adapter *adapter)
|
||||
{
|
||||
kfree(adapter->login_buf);
|
||||
adapter->login_buf = NULL;
|
||||
}
|
||||
|
||||
static void release_login_rsp_buffer(struct ibmvnic_adapter *adapter)
|
||||
{
|
||||
kfree(adapter->login_rsp_buf);
|
||||
adapter->login_rsp_buf = NULL;
|
||||
}
|
||||
|
||||
static void release_resources(struct ibmvnic_adapter *adapter)
|
||||
{
|
||||
int i;
|
||||
|
@ -813,6 +825,10 @@ static void release_resources(struct ibmvnic_adapter *adapter)
|
|||
}
|
||||
}
|
||||
}
|
||||
kfree(adapter->napi);
|
||||
adapter->napi = NULL;
|
||||
|
||||
release_login_rsp_buffer(adapter);
|
||||
}
|
||||
|
||||
static int set_link_state(struct ibmvnic_adapter *adapter, u8 link_state)
|
||||
|
@ -1057,6 +1073,35 @@ static int ibmvnic_open(struct net_device *netdev)
|
|||
return rc;
|
||||
}
|
||||
|
||||
static void clean_rx_pools(struct ibmvnic_adapter *adapter)
|
||||
{
|
||||
struct ibmvnic_rx_pool *rx_pool;
|
||||
u64 rx_entries;
|
||||
int rx_scrqs;
|
||||
int i, j;
|
||||
|
||||
if (!adapter->rx_pool)
|
||||
return;
|
||||
|
||||
rx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
|
||||
rx_entries = adapter->req_rx_add_entries_per_subcrq;
|
||||
|
||||
/* Free any remaining skbs in the rx buffer pools */
|
||||
for (i = 0; i < rx_scrqs; i++) {
|
||||
rx_pool = &adapter->rx_pool[i];
|
||||
if (!rx_pool)
|
||||
continue;
|
||||
|
||||
netdev_dbg(adapter->netdev, "Cleaning rx_pool[%d]\n", i);
|
||||
for (j = 0; j < rx_entries; j++) {
|
||||
if (rx_pool->rx_buff[j].skb) {
|
||||
dev_kfree_skb_any(rx_pool->rx_buff[j].skb);
|
||||
rx_pool->rx_buff[j].skb = NULL;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void clean_tx_pools(struct ibmvnic_adapter *adapter)
|
||||
{
|
||||
struct ibmvnic_tx_pool *tx_pool;
|
||||
|
@ -1134,7 +1179,7 @@ static int __ibmvnic_close(struct net_device *netdev)
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
clean_rx_pools(adapter);
|
||||
clean_tx_pools(adapter);
|
||||
adapter->state = VNIC_CLOSED;
|
||||
return rc;
|
||||
|
@ -1670,8 +1715,6 @@ static int do_reset(struct ibmvnic_adapter *adapter,
|
|||
return 0;
|
||||
}
|
||||
|
||||
netif_carrier_on(netdev);
|
||||
|
||||
/* kick napi */
|
||||
for (i = 0; i < adapter->req_rx_queues; i++)
|
||||
napi_schedule(&adapter->napi[i]);
|
||||
|
@ -1679,6 +1722,8 @@ static int do_reset(struct ibmvnic_adapter *adapter,
|
|||
if (adapter->reset_reason != VNIC_RESET_FAILOVER)
|
||||
netdev_notify_peers(netdev);
|
||||
|
||||
netif_carrier_on(netdev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1853,6 +1898,7 @@ static int ibmvnic_poll(struct napi_struct *napi, int budget)
|
|||
be16_to_cpu(next->rx_comp.rc));
|
||||
/* free the entry */
|
||||
next->rx_comp.first = 0;
|
||||
dev_kfree_skb_any(rx_buff->skb);
|
||||
remove_buff_from_pool(adapter, rx_buff);
|
||||
continue;
|
||||
}
|
||||
|
@ -3013,6 +3059,7 @@ static void send_login(struct ibmvnic_adapter *adapter)
|
|||
struct vnic_login_client_data *vlcd;
|
||||
int i;
|
||||
|
||||
release_login_rsp_buffer(adapter);
|
||||
client_data_len = vnic_client_data_len(adapter);
|
||||
|
||||
buffer_size =
|
||||
|
@ -3708,6 +3755,7 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
|
|||
|
||||
dma_unmap_single(dev, adapter->login_buf_token, adapter->login_buf_sz,
|
||||
DMA_BIDIRECTIONAL);
|
||||
release_login_buffer(adapter);
|
||||
dma_unmap_single(dev, adapter->login_rsp_buf_token,
|
||||
adapter->login_rsp_buf_sz, DMA_BIDIRECTIONAL);
|
||||
|
||||
|
|
|
@ -7137,6 +7137,7 @@ static void mvpp2_set_rx_mode(struct net_device *dev)
|
|||
int id = port->id;
|
||||
bool allmulti = dev->flags & IFF_ALLMULTI;
|
||||
|
||||
retry:
|
||||
mvpp2_prs_mac_promisc_set(priv, id, dev->flags & IFF_PROMISC);
|
||||
mvpp2_prs_mac_multi_set(priv, id, MVPP2_PE_MAC_MC_ALL, allmulti);
|
||||
mvpp2_prs_mac_multi_set(priv, id, MVPP2_PE_MAC_MC_IP6, allmulti);
|
||||
|
@ -7144,9 +7145,13 @@ static void mvpp2_set_rx_mode(struct net_device *dev)
|
|||
/* Remove all port->id's mcast enries */
|
||||
mvpp2_prs_mcast_del_all(priv, id);
|
||||
|
||||
if (allmulti && !netdev_mc_empty(dev)) {
|
||||
netdev_for_each_mc_addr(ha, dev)
|
||||
mvpp2_prs_mac_da_accept(priv, id, ha->addr, true);
|
||||
if (!allmulti) {
|
||||
netdev_for_each_mc_addr(ha, dev) {
|
||||
if (mvpp2_prs_mac_da_accept(priv, id, ha->addr, true)) {
|
||||
allmulti = true;
|
||||
goto retry;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -788,6 +788,9 @@ static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp,
|
|||
u32 tb_id,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
struct mlxsw_sp_mr_table *mr4_table;
|
||||
struct mlxsw_sp_fib *fib4;
|
||||
struct mlxsw_sp_fib *fib6;
|
||||
struct mlxsw_sp_vr *vr;
|
||||
int err;
|
||||
|
||||
|
@ -796,29 +799,30 @@ static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp,
|
|||
NL_SET_ERR_MSG(extack, "spectrum: Exceeded number of supported virtual routers");
|
||||
return ERR_PTR(-EBUSY);
|
||||
}
|
||||
vr->fib4 = mlxsw_sp_fib_create(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4);
|
||||
if (IS_ERR(vr->fib4))
|
||||
return ERR_CAST(vr->fib4);
|
||||
vr->fib6 = mlxsw_sp_fib_create(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV6);
|
||||
if (IS_ERR(vr->fib6)) {
|
||||
err = PTR_ERR(vr->fib6);
|
||||
fib4 = mlxsw_sp_fib_create(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4);
|
||||
if (IS_ERR(fib4))
|
||||
return ERR_CAST(fib4);
|
||||
fib6 = mlxsw_sp_fib_create(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV6);
|
||||
if (IS_ERR(fib6)) {
|
||||
err = PTR_ERR(fib6);
|
||||
goto err_fib6_create;
|
||||
}
|
||||
vr->mr4_table = mlxsw_sp_mr_table_create(mlxsw_sp, vr->id,
|
||||
MLXSW_SP_L3_PROTO_IPV4);
|
||||
if (IS_ERR(vr->mr4_table)) {
|
||||
err = PTR_ERR(vr->mr4_table);
|
||||
mr4_table = mlxsw_sp_mr_table_create(mlxsw_sp, vr->id,
|
||||
MLXSW_SP_L3_PROTO_IPV4);
|
||||
if (IS_ERR(mr4_table)) {
|
||||
err = PTR_ERR(mr4_table);
|
||||
goto err_mr_table_create;
|
||||
}
|
||||
vr->fib4 = fib4;
|
||||
vr->fib6 = fib6;
|
||||
vr->mr4_table = mr4_table;
|
||||
vr->tb_id = tb_id;
|
||||
return vr;
|
||||
|
||||
err_mr_table_create:
|
||||
mlxsw_sp_fib_destroy(mlxsw_sp, vr->fib6);
|
||||
vr->fib6 = NULL;
|
||||
mlxsw_sp_fib_destroy(mlxsw_sp, fib6);
|
||||
err_fib6_create:
|
||||
mlxsw_sp_fib_destroy(mlxsw_sp, vr->fib4);
|
||||
vr->fib4 = NULL;
|
||||
mlxsw_sp_fib_destroy(mlxsw_sp, fib4);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
|
@ -3790,6 +3794,9 @@ mlxsw_sp_fib4_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry)
|
|||
struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
|
||||
int i;
|
||||
|
||||
if (!list_is_singular(&nh_grp->fib_list))
|
||||
return;
|
||||
|
||||
for (i = 0; i < nh_grp->count; i++) {
|
||||
struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
|
||||
|
||||
|
|
|
@ -43,12 +43,6 @@
|
|||
|
||||
/* Local Definitions and Declarations */
|
||||
|
||||
struct rmnet_walk_data {
|
||||
struct net_device *real_dev;
|
||||
struct list_head *head;
|
||||
struct rmnet_port *port;
|
||||
};
|
||||
|
||||
static int rmnet_is_real_dev_registered(const struct net_device *real_dev)
|
||||
{
|
||||
return rcu_access_pointer(real_dev->rx_handler) == rmnet_rx_handler;
|
||||
|
@ -112,17 +106,14 @@ static int rmnet_register_real_device(struct net_device *real_dev)
|
|||
static void rmnet_unregister_bridge(struct net_device *dev,
|
||||
struct rmnet_port *port)
|
||||
{
|
||||
struct net_device *rmnet_dev, *bridge_dev;
|
||||
struct rmnet_port *bridge_port;
|
||||
struct net_device *bridge_dev;
|
||||
|
||||
if (port->rmnet_mode != RMNET_EPMODE_BRIDGE)
|
||||
return;
|
||||
|
||||
/* bridge slave handling */
|
||||
if (!port->nr_rmnet_devs) {
|
||||
rmnet_dev = netdev_master_upper_dev_get_rcu(dev);
|
||||
netdev_upper_dev_unlink(dev, rmnet_dev);
|
||||
|
||||
bridge_dev = port->bridge_ep;
|
||||
|
||||
bridge_port = rmnet_get_port_rtnl(bridge_dev);
|
||||
|
@ -132,9 +123,6 @@ static void rmnet_unregister_bridge(struct net_device *dev,
|
|||
bridge_dev = port->bridge_ep;
|
||||
|
||||
bridge_port = rmnet_get_port_rtnl(bridge_dev);
|
||||
rmnet_dev = netdev_master_upper_dev_get_rcu(bridge_dev);
|
||||
netdev_upper_dev_unlink(bridge_dev, rmnet_dev);
|
||||
|
||||
rmnet_unregister_real_device(bridge_dev, bridge_port);
|
||||
}
|
||||
}
|
||||
|
@ -173,10 +161,6 @@ static int rmnet_newlink(struct net *src_net, struct net_device *dev,
|
|||
if (err)
|
||||
goto err1;
|
||||
|
||||
err = netdev_master_upper_dev_link(dev, real_dev, NULL, NULL, extack);
|
||||
if (err)
|
||||
goto err2;
|
||||
|
||||
port->rmnet_mode = mode;
|
||||
|
||||
hlist_add_head_rcu(&ep->hlnode, &port->muxed_ep[mux_id]);
|
||||
|
@ -193,8 +177,6 @@ static int rmnet_newlink(struct net *src_net, struct net_device *dev,
|
|||
|
||||
return 0;
|
||||
|
||||
err2:
|
||||
rmnet_vnd_dellink(mux_id, port, ep);
|
||||
err1:
|
||||
rmnet_unregister_real_device(real_dev, port);
|
||||
err0:
|
||||
|
@ -204,14 +186,13 @@ static int rmnet_newlink(struct net *src_net, struct net_device *dev,
|
|||
|
||||
static void rmnet_dellink(struct net_device *dev, struct list_head *head)
|
||||
{
|
||||
struct rmnet_priv *priv = netdev_priv(dev);
|
||||
struct net_device *real_dev;
|
||||
struct rmnet_endpoint *ep;
|
||||
struct rmnet_port *port;
|
||||
u8 mux_id;
|
||||
|
||||
rcu_read_lock();
|
||||
real_dev = netdev_master_upper_dev_get_rcu(dev);
|
||||
rcu_read_unlock();
|
||||
real_dev = priv->real_dev;
|
||||
|
||||
if (!real_dev || !rmnet_is_real_dev_registered(real_dev))
|
||||
return;
|
||||
|
@ -219,7 +200,6 @@ static void rmnet_dellink(struct net_device *dev, struct list_head *head)
|
|||
port = rmnet_get_port_rtnl(real_dev);
|
||||
|
||||
mux_id = rmnet_vnd_get_mux(dev);
|
||||
netdev_upper_dev_unlink(dev, real_dev);
|
||||
|
||||
ep = rmnet_get_endpoint(port, mux_id);
|
||||
if (ep) {
|
||||
|
@ -233,30 +213,13 @@ static void rmnet_dellink(struct net_device *dev, struct list_head *head)
|
|||
unregister_netdevice_queue(dev, head);
|
||||
}
|
||||
|
||||
static int rmnet_dev_walk_unreg(struct net_device *rmnet_dev, void *data)
|
||||
{
|
||||
struct rmnet_walk_data *d = data;
|
||||
struct rmnet_endpoint *ep;
|
||||
u8 mux_id;
|
||||
|
||||
mux_id = rmnet_vnd_get_mux(rmnet_dev);
|
||||
ep = rmnet_get_endpoint(d->port, mux_id);
|
||||
if (ep) {
|
||||
hlist_del_init_rcu(&ep->hlnode);
|
||||
rmnet_vnd_dellink(mux_id, d->port, ep);
|
||||
kfree(ep);
|
||||
}
|
||||
netdev_upper_dev_unlink(rmnet_dev, d->real_dev);
|
||||
unregister_netdevice_queue(rmnet_dev, d->head);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void rmnet_force_unassociate_device(struct net_device *dev)
|
||||
{
|
||||
struct net_device *real_dev = dev;
|
||||
struct rmnet_walk_data d;
|
||||
struct hlist_node *tmp_ep;
|
||||
struct rmnet_endpoint *ep;
|
||||
struct rmnet_port *port;
|
||||
unsigned long bkt_ep;
|
||||
LIST_HEAD(list);
|
||||
|
||||
if (!rmnet_is_real_dev_registered(real_dev))
|
||||
|
@ -264,16 +227,19 @@ static void rmnet_force_unassociate_device(struct net_device *dev)
|
|||
|
||||
ASSERT_RTNL();
|
||||
|
||||
d.real_dev = real_dev;
|
||||
d.head = &list;
|
||||
|
||||
port = rmnet_get_port_rtnl(dev);
|
||||
d.port = port;
|
||||
|
||||
rcu_read_lock();
|
||||
rmnet_unregister_bridge(dev, port);
|
||||
|
||||
netdev_walk_all_lower_dev_rcu(real_dev, rmnet_dev_walk_unreg, &d);
|
||||
hash_for_each_safe(port->muxed_ep, bkt_ep, tmp_ep, ep, hlnode) {
|
||||
unregister_netdevice_queue(ep->egress_dev, &list);
|
||||
rmnet_vnd_dellink(ep->mux_id, port, ep);
|
||||
|
||||
hlist_del_init_rcu(&ep->hlnode);
|
||||
kfree(ep);
|
||||
}
|
||||
|
||||
rcu_read_unlock();
|
||||
unregister_netdevice_many(&list);
|
||||
|
||||
|
@ -422,11 +388,6 @@ int rmnet_add_bridge(struct net_device *rmnet_dev,
|
|||
if (err)
|
||||
return -EBUSY;
|
||||
|
||||
err = netdev_master_upper_dev_link(slave_dev, rmnet_dev, NULL, NULL,
|
||||
extack);
|
||||
if (err)
|
||||
return -EINVAL;
|
||||
|
||||
slave_port = rmnet_get_port(slave_dev);
|
||||
slave_port->rmnet_mode = RMNET_EPMODE_BRIDGE;
|
||||
slave_port->bridge_ep = real_dev;
|
||||
|
@ -449,7 +410,6 @@ int rmnet_del_bridge(struct net_device *rmnet_dev,
|
|||
port->rmnet_mode = RMNET_EPMODE_VND;
|
||||
port->bridge_ep = NULL;
|
||||
|
||||
netdev_upper_dev_unlink(slave_dev, rmnet_dev);
|
||||
slave_port = rmnet_get_port(slave_dev);
|
||||
rmnet_unregister_real_device(slave_dev, slave_port);
|
||||
|
||||
|
|
|
@ -38,6 +38,11 @@ static u8 rmnet_map_do_flow_control(struct sk_buff *skb,
|
|||
}
|
||||
|
||||
ep = rmnet_get_endpoint(port, mux_id);
|
||||
if (!ep) {
|
||||
kfree_skb(skb);
|
||||
return RX_HANDLER_CONSUMED;
|
||||
}
|
||||
|
||||
vnd = ep->egress_dev;
|
||||
|
||||
ip_family = cmd->flow_control.ip_family;
|
||||
|
|
|
@ -121,7 +121,7 @@ static void rmnet_get_stats64(struct net_device *dev,
|
|||
memset(&total_stats, 0, sizeof(struct rmnet_vnd_stats));
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
pcpu_ptr = this_cpu_ptr(priv->pcpu_stats);
|
||||
pcpu_ptr = per_cpu_ptr(priv->pcpu_stats, cpu);
|
||||
|
||||
do {
|
||||
start = u64_stats_fetch_begin_irq(&pcpu_ptr->syncp);
|
||||
|
|
|
@ -2255,9 +2255,6 @@ static int ravb_wol_setup(struct net_device *ndev)
|
|||
/* Enable MagicPacket */
|
||||
ravb_modify(ndev, ECMR, ECMR_MPDE, ECMR_MPDE);
|
||||
|
||||
/* Increased clock usage so device won't be suspended */
|
||||
clk_enable(priv->clk);
|
||||
|
||||
return enable_irq_wake(priv->emac_irq);
|
||||
}
|
||||
|
||||
|
@ -2276,9 +2273,6 @@ static int ravb_wol_restore(struct net_device *ndev)
|
|||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
/* Restore clock usage count */
|
||||
clk_disable(priv->clk);
|
||||
|
||||
return disable_irq_wake(priv->emac_irq);
|
||||
}
|
||||
|
||||
|
|
|
@ -40,7 +40,6 @@
|
|||
#include <linux/slab.h>
|
||||
#include <linux/ethtool.h>
|
||||
#include <linux/if_vlan.h>
|
||||
#include <linux/clk.h>
|
||||
#include <linux/sh_eth.h>
|
||||
#include <linux/of_mdio.h>
|
||||
|
||||
|
@ -2304,7 +2303,7 @@ static void sh_eth_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
|
|||
wol->supported = 0;
|
||||
wol->wolopts = 0;
|
||||
|
||||
if (mdp->cd->magic && mdp->clk) {
|
||||
if (mdp->cd->magic) {
|
||||
wol->supported = WAKE_MAGIC;
|
||||
wol->wolopts = mdp->wol_enabled ? WAKE_MAGIC : 0;
|
||||
}
|
||||
|
@ -2314,7 +2313,7 @@ static int sh_eth_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
|
|||
{
|
||||
struct sh_eth_private *mdp = netdev_priv(ndev);
|
||||
|
||||
if (!mdp->cd->magic || !mdp->clk || wol->wolopts & ~WAKE_MAGIC)
|
||||
if (!mdp->cd->magic || wol->wolopts & ~WAKE_MAGIC)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
mdp->wol_enabled = !!(wol->wolopts & WAKE_MAGIC);
|
||||
|
@ -3153,11 +3152,6 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
|
|||
goto out_release;
|
||||
}
|
||||
|
||||
/* Get clock, if not found that's OK but Wake-On-Lan is unavailable */
|
||||
mdp->clk = devm_clk_get(&pdev->dev, NULL);
|
||||
if (IS_ERR(mdp->clk))
|
||||
mdp->clk = NULL;
|
||||
|
||||
ndev->base_addr = res->start;
|
||||
|
||||
spin_lock_init(&mdp->lock);
|
||||
|
@ -3278,7 +3272,7 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
|
|||
if (ret)
|
||||
goto out_napi_del;
|
||||
|
||||
if (mdp->cd->magic && mdp->clk)
|
||||
if (mdp->cd->magic)
|
||||
device_set_wakeup_capable(&pdev->dev, 1);
|
||||
|
||||
/* print device information */
|
||||
|
@ -3331,9 +3325,6 @@ static int sh_eth_wol_setup(struct net_device *ndev)
|
|||
/* Enable MagicPacket */
|
||||
sh_eth_modify(ndev, ECMR, ECMR_MPDE, ECMR_MPDE);
|
||||
|
||||
/* Increased clock usage so device won't be suspended */
|
||||
clk_enable(mdp->clk);
|
||||
|
||||
return enable_irq_wake(ndev->irq);
|
||||
}
|
||||
|
||||
|
@ -3359,9 +3350,6 @@ static int sh_eth_wol_restore(struct net_device *ndev)
|
|||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
/* Restore clock usage count */
|
||||
clk_disable(mdp->clk);
|
||||
|
||||
return disable_irq_wake(ndev->irq);
|
||||
}
|
||||
|
||||
|
|
|
@ -1382,7 +1382,7 @@ int genphy_setup_forced(struct phy_device *phydev)
|
|||
ctl |= BMCR_FULLDPLX;
|
||||
|
||||
return phy_modify(phydev, MII_BMCR,
|
||||
BMCR_LOOPBACK | BMCR_ISOLATE | BMCR_PDOWN, ctl);
|
||||
~(BMCR_LOOPBACK | BMCR_ISOLATE | BMCR_PDOWN), ctl);
|
||||
}
|
||||
EXPORT_SYMBOL(genphy_setup_forced);
|
||||
|
||||
|
|
|
@ -166,6 +166,8 @@ struct tbnet_ring {
|
|||
* @connected_work: Worker that finalizes the ThunderboltIP connection
|
||||
* setup and enables DMA paths for high speed data
|
||||
* transfers
|
||||
* @disconnect_work: Worker that handles tearing down the ThunderboltIP
|
||||
* connection
|
||||
* @rx_hdr: Copy of the currently processed Rx frame. Used when a
|
||||
* network packet consists of multiple Thunderbolt frames.
|
||||
* In host byte order.
|
||||
|
@ -190,6 +192,7 @@ struct tbnet {
|
|||
int login_retries;
|
||||
struct delayed_work login_work;
|
||||
struct work_struct connected_work;
|
||||
struct work_struct disconnect_work;
|
||||
struct thunderbolt_ip_frame_header rx_hdr;
|
||||
struct tbnet_ring rx_ring;
|
||||
atomic_t frame_id;
|
||||
|
@ -445,7 +448,7 @@ static int tbnet_handle_packet(const void *buf, size_t size, void *data)
|
|||
case TBIP_LOGOUT:
|
||||
ret = tbnet_logout_response(net, route, sequence, command_id);
|
||||
if (!ret)
|
||||
tbnet_tear_down(net, false);
|
||||
queue_work(system_long_wq, &net->disconnect_work);
|
||||
break;
|
||||
|
||||
default:
|
||||
|
@ -659,6 +662,13 @@ static void tbnet_login_work(struct work_struct *work)
|
|||
}
|
||||
}
|
||||
|
||||
static void tbnet_disconnect_work(struct work_struct *work)
|
||||
{
|
||||
struct tbnet *net = container_of(work, typeof(*net), disconnect_work);
|
||||
|
||||
tbnet_tear_down(net, false);
|
||||
}
|
||||
|
||||
static bool tbnet_check_frame(struct tbnet *net, const struct tbnet_frame *tf,
|
||||
const struct thunderbolt_ip_frame_header *hdr)
|
||||
{
|
||||
|
@ -881,6 +891,7 @@ static int tbnet_stop(struct net_device *dev)
|
|||
|
||||
napi_disable(&net->napi);
|
||||
|
||||
cancel_work_sync(&net->disconnect_work);
|
||||
tbnet_tear_down(net, true);
|
||||
|
||||
tb_ring_free(net->rx_ring.ring);
|
||||
|
@ -1195,6 +1206,7 @@ static int tbnet_probe(struct tb_service *svc, const struct tb_service_id *id)
|
|||
net = netdev_priv(dev);
|
||||
INIT_DELAYED_WORK(&net->login_work, tbnet_login_work);
|
||||
INIT_WORK(&net->connected_work, tbnet_connected_work);
|
||||
INIT_WORK(&net->disconnect_work, tbnet_disconnect_work);
|
||||
mutex_init(&net->connection_lock);
|
||||
atomic_set(&net->command_id, 0);
|
||||
atomic_set(&net->frame_id, 0);
|
||||
|
@ -1270,10 +1282,7 @@ static int __maybe_unused tbnet_suspend(struct device *dev)
|
|||
stop_login(net);
|
||||
if (netif_running(net->dev)) {
|
||||
netif_device_detach(net->dev);
|
||||
tb_ring_stop(net->rx_ring.ring);
|
||||
tb_ring_stop(net->tx_ring.ring);
|
||||
tbnet_free_buffers(&net->rx_ring);
|
||||
tbnet_free_buffers(&net->tx_ring);
|
||||
tbnet_tear_down(net, true);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -1489,27 +1489,23 @@ static struct sk_buff *tun_napi_alloc_frags(struct tun_file *tfile,
|
|||
skb->truesize += skb->data_len;
|
||||
|
||||
for (i = 1; i < it->nr_segs; i++) {
|
||||
struct page_frag *pfrag = ¤t->task_frag;
|
||||
size_t fragsz = it->iov[i].iov_len;
|
||||
unsigned long offset;
|
||||
struct page *page;
|
||||
void *data;
|
||||
|
||||
if (fragsz == 0 || fragsz > PAGE_SIZE) {
|
||||
err = -EINVAL;
|
||||
goto free;
|
||||
}
|
||||
|
||||
local_bh_disable();
|
||||
data = napi_alloc_frag(fragsz);
|
||||
local_bh_enable();
|
||||
if (!data) {
|
||||
if (!skb_page_frag_refill(fragsz, pfrag, GFP_KERNEL)) {
|
||||
err = -ENOMEM;
|
||||
goto free;
|
||||
}
|
||||
|
||||
page = virt_to_head_page(data);
|
||||
offset = data - page_address(page);
|
||||
skb_fill_page_desc(skb, i - 1, page, offset, fragsz);
|
||||
skb_fill_page_desc(skb, i - 1, pfrag->page,
|
||||
pfrag->offset, fragsz);
|
||||
page_ref_inc(pfrag->page);
|
||||
pfrag->offset += fragsz;
|
||||
}
|
||||
|
||||
return skb;
|
||||
|
|
|
@ -3419,22 +3419,29 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PORT_RIDGE,
|
|||
|
||||
static void quirk_chelsio_extend_vpd(struct pci_dev *dev)
|
||||
{
|
||||
pci_set_vpd_size(dev, 8192);
|
||||
int chip = (dev->device & 0xf000) >> 12;
|
||||
int func = (dev->device & 0x0f00) >> 8;
|
||||
int prod = (dev->device & 0x00ff) >> 0;
|
||||
|
||||
/*
|
||||
* If this is a T3-based adapter, there's a 1KB VPD area at offset
|
||||
* 0xc00 which contains the preferred VPD values. If this is a T4 or
|
||||
* later based adapter, the special VPD is at offset 0x400 for the
|
||||
* Physical Functions (the SR-IOV Virtual Functions have no VPD
|
||||
* Capabilities). The PCI VPD Access core routines will normally
|
||||
* compute the size of the VPD by parsing the VPD Data Structure at
|
||||
* offset 0x000. This will result in silent failures when attempting
|
||||
* to accesses these other VPD areas which are beyond those computed
|
||||
* limits.
|
||||
*/
|
||||
if (chip == 0x0 && prod >= 0x20)
|
||||
pci_set_vpd_size(dev, 8192);
|
||||
else if (chip >= 0x4 && func < 0x8)
|
||||
pci_set_vpd_size(dev, 2048);
|
||||
}
|
||||
|
||||
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x20, quirk_chelsio_extend_vpd);
|
||||
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x21, quirk_chelsio_extend_vpd);
|
||||
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x22, quirk_chelsio_extend_vpd);
|
||||
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x23, quirk_chelsio_extend_vpd);
|
||||
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x24, quirk_chelsio_extend_vpd);
|
||||
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x25, quirk_chelsio_extend_vpd);
|
||||
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x26, quirk_chelsio_extend_vpd);
|
||||
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x30, quirk_chelsio_extend_vpd);
|
||||
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x31, quirk_chelsio_extend_vpd);
|
||||
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x32, quirk_chelsio_extend_vpd);
|
||||
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x35, quirk_chelsio_extend_vpd);
|
||||
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x36, quirk_chelsio_extend_vpd);
|
||||
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x37, quirk_chelsio_extend_vpd);
|
||||
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, PCI_ANY_ID,
|
||||
quirk_chelsio_extend_vpd);
|
||||
|
||||
#ifdef CONFIG_ACPI
|
||||
/*
|
||||
|
|
|
@ -469,7 +469,7 @@ static inline int ptr_ring_consume_batched_bh(struct ptr_ring *r,
|
|||
*/
|
||||
static inline void **__ptr_ring_init_queue_alloc(unsigned int size, gfp_t gfp)
|
||||
{
|
||||
if (size * sizeof(void *) > KMALLOC_MAX_SIZE)
|
||||
if (size > KMALLOC_MAX_SIZE / sizeof(void *))
|
||||
return NULL;
|
||||
return kvmalloc_array(size, sizeof(void *), gfp | __GFP_ZERO);
|
||||
}
|
||||
|
|
|
@ -3646,7 +3646,7 @@ static inline bool __skb_checksum_validate_needed(struct sk_buff *skb,
|
|||
return true;
|
||||
}
|
||||
|
||||
/* For small packets <= CHECKSUM_BREAK peform checksum complete directly
|
||||
/* For small packets <= CHECKSUM_BREAK perform checksum complete directly
|
||||
* in checksum_init.
|
||||
*/
|
||||
#define CHECKSUM_BREAK 76
|
||||
|
|
|
@ -64,6 +64,7 @@ static inline int udplite_checksum_init(struct sk_buff *skb, struct udphdr *uh)
|
|||
UDP_SKB_CB(skb)->cscov = cscov;
|
||||
if (skb->ip_summed == CHECKSUM_COMPLETE)
|
||||
skb->ip_summed = CHECKSUM_NONE;
|
||||
skb->csum_valid = 0;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -23,7 +23,6 @@
|
|||
#define _UAPI_LINUX_IF_ETHER_H
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/libc-compat.h>
|
||||
|
||||
/*
|
||||
* IEEE 802.3 Ethernet magic constants. The frame sizes omit the preamble
|
||||
|
@ -151,6 +150,11 @@
|
|||
* This is an Ethernet frame header.
|
||||
*/
|
||||
|
||||
/* allow libcs like musl to deactivate this, glibc does not implement this. */
|
||||
#ifndef __UAPI_DEF_ETHHDR
|
||||
#define __UAPI_DEF_ETHHDR 1
|
||||
#endif
|
||||
|
||||
#if __UAPI_DEF_ETHHDR
|
||||
struct ethhdr {
|
||||
unsigned char h_dest[ETH_ALEN]; /* destination eth addr */
|
||||
|
|
|
@ -264,10 +264,4 @@
|
|||
|
||||
#endif /* __GLIBC__ */
|
||||
|
||||
/* Definitions for if_ether.h */
|
||||
/* allow libcs like musl to deactivate this, glibc does not implement this. */
|
||||
#ifndef __UAPI_DEF_ETHHDR
|
||||
#define __UAPI_DEF_ETHHDR 1
|
||||
#endif
|
||||
|
||||
#endif /* _UAPI_LIBC_COMPAT_H */
|
||||
|
|
|
@ -255,6 +255,9 @@ static ssize_t brport_show(struct kobject *kobj,
|
|||
struct brport_attribute *brport_attr = to_brport_attr(attr);
|
||||
struct net_bridge_port *p = to_brport(kobj);
|
||||
|
||||
if (!brport_attr->show)
|
||||
return -EINVAL;
|
||||
|
||||
return brport_attr->show(p, buf);
|
||||
}
|
||||
|
||||
|
|
|
@ -2382,8 +2382,11 @@ EXPORT_SYMBOL(netdev_set_num_tc);
|
|||
*/
|
||||
int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
|
||||
{
|
||||
bool disabling;
|
||||
int rc;
|
||||
|
||||
disabling = txq < dev->real_num_tx_queues;
|
||||
|
||||
if (txq < 1 || txq > dev->num_tx_queues)
|
||||
return -EINVAL;
|
||||
|
||||
|
@ -2399,15 +2402,19 @@ int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
|
|||
if (dev->num_tc)
|
||||
netif_setup_tc(dev, txq);
|
||||
|
||||
if (txq < dev->real_num_tx_queues) {
|
||||
dev->real_num_tx_queues = txq;
|
||||
|
||||
if (disabling) {
|
||||
synchronize_net();
|
||||
qdisc_reset_all_tx_gt(dev, txq);
|
||||
#ifdef CONFIG_XPS
|
||||
netif_reset_xps_queues_gt(dev, txq);
|
||||
#endif
|
||||
}
|
||||
} else {
|
||||
dev->real_num_tx_queues = txq;
|
||||
}
|
||||
|
||||
dev->real_num_tx_queues = txq;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(netif_set_real_num_tx_queues);
|
||||
|
|
|
@ -1338,6 +1338,12 @@ static int dn_setsockopt(struct socket *sock, int level, int optname, char __use
|
|||
lock_sock(sk);
|
||||
err = __dn_setsockopt(sock, level, optname, optval, optlen, 0);
|
||||
release_sock(sk);
|
||||
#ifdef CONFIG_NETFILTER
|
||||
/* we need to exclude all possible ENOPROTOOPTs except default case */
|
||||
if (err == -ENOPROTOOPT && optname != DSO_LINKINFO &&
|
||||
optname != DSO_STREAM && optname != DSO_SEQPACKET)
|
||||
err = nf_setsockopt(sk, PF_DECnet, optname, optval, optlen);
|
||||
#endif
|
||||
|
||||
return err;
|
||||
}
|
||||
|
@ -1445,15 +1451,6 @@ static int __dn_setsockopt(struct socket *sock, int level,int optname, char __us
|
|||
dn_nsp_send_disc(sk, 0x38, 0, sk->sk_allocation);
|
||||
break;
|
||||
|
||||
default:
|
||||
#ifdef CONFIG_NETFILTER
|
||||
return nf_setsockopt(sk, PF_DECnet, optname, optval, optlen);
|
||||
#endif
|
||||
case DSO_LINKINFO:
|
||||
case DSO_STREAM:
|
||||
case DSO_SEQPACKET:
|
||||
return -ENOPROTOOPT;
|
||||
|
||||
case DSO_MAXWINDOW:
|
||||
if (optlen != sizeof(unsigned long))
|
||||
return -EINVAL;
|
||||
|
@ -1501,6 +1498,12 @@ static int __dn_setsockopt(struct socket *sock, int level,int optname, char __us
|
|||
return -EINVAL;
|
||||
scp->info_loc = u.info;
|
||||
break;
|
||||
|
||||
case DSO_LINKINFO:
|
||||
case DSO_STREAM:
|
||||
case DSO_SEQPACKET:
|
||||
default:
|
||||
return -ENOPROTOOPT;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -1514,6 +1517,20 @@ static int dn_getsockopt(struct socket *sock, int level, int optname, char __use
|
|||
lock_sock(sk);
|
||||
err = __dn_getsockopt(sock, level, optname, optval, optlen, 0);
|
||||
release_sock(sk);
|
||||
#ifdef CONFIG_NETFILTER
|
||||
if (err == -ENOPROTOOPT && optname != DSO_STREAM &&
|
||||
optname != DSO_SEQPACKET && optname != DSO_CONACCEPT &&
|
||||
optname != DSO_CONREJECT) {
|
||||
int len;
|
||||
|
||||
if (get_user(len, optlen))
|
||||
return -EFAULT;
|
||||
|
||||
err = nf_getsockopt(sk, PF_DECnet, optname, optval, &len);
|
||||
if (err >= 0)
|
||||
err = put_user(len, optlen);
|
||||
}
|
||||
#endif
|
||||
|
||||
return err;
|
||||
}
|
||||
|
@ -1579,26 +1596,6 @@ static int __dn_getsockopt(struct socket *sock, int level,int optname, char __us
|
|||
r_data = &link;
|
||||
break;
|
||||
|
||||
default:
|
||||
#ifdef CONFIG_NETFILTER
|
||||
{
|
||||
int ret, len;
|
||||
|
||||
if (get_user(len, optlen))
|
||||
return -EFAULT;
|
||||
|
||||
ret = nf_getsockopt(sk, PF_DECnet, optname, optval, &len);
|
||||
if (ret >= 0)
|
||||
ret = put_user(len, optlen);
|
||||
return ret;
|
||||
}
|
||||
#endif
|
||||
case DSO_STREAM:
|
||||
case DSO_SEQPACKET:
|
||||
case DSO_CONACCEPT:
|
||||
case DSO_CONREJECT:
|
||||
return -ENOPROTOOPT;
|
||||
|
||||
case DSO_MAXWINDOW:
|
||||
if (r_len > sizeof(unsigned long))
|
||||
r_len = sizeof(unsigned long);
|
||||
|
@ -1630,6 +1627,13 @@ static int __dn_getsockopt(struct socket *sock, int level,int optname, char __us
|
|||
r_len = sizeof(unsigned char);
|
||||
r_data = &scp->info_rem;
|
||||
break;
|
||||
|
||||
case DSO_STREAM:
|
||||
case DSO_SEQPACKET:
|
||||
case DSO_CONACCEPT:
|
||||
case DSO_CONREJECT:
|
||||
default:
|
||||
return -ENOPROTOOPT;
|
||||
}
|
||||
|
||||
if (r_data) {
|
||||
|
|
|
@ -646,6 +646,11 @@ int fib_nh_match(struct fib_config *cfg, struct fib_info *fi,
|
|||
fi->fib_nh, cfg, extack))
|
||||
return 1;
|
||||
}
|
||||
#ifdef CONFIG_IP_ROUTE_CLASSID
|
||||
if (cfg->fc_flow &&
|
||||
cfg->fc_flow != fi->fib_nh->nh_tclassid)
|
||||
return 1;
|
||||
#endif
|
||||
if ((!cfg->fc_oif || cfg->fc_oif == fi->fib_nh->nh_oif) &&
|
||||
(!cfg->fc_gw || cfg->fc_gw == fi->fib_nh->nh_gw))
|
||||
return 0;
|
||||
|
|
|
@ -2027,6 +2027,24 @@ static inline void tcp_mtu_check_reprobe(struct sock *sk)
|
|||
}
|
||||
}
|
||||
|
||||
static bool tcp_can_coalesce_send_queue_head(struct sock *sk, int len)
|
||||
{
|
||||
struct sk_buff *skb, *next;
|
||||
|
||||
skb = tcp_send_head(sk);
|
||||
tcp_for_write_queue_from_safe(skb, next, sk) {
|
||||
if (len <= skb->len)
|
||||
break;
|
||||
|
||||
if (unlikely(TCP_SKB_CB(skb)->eor))
|
||||
return false;
|
||||
|
||||
len -= skb->len;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/* Create a new MTU probe if we are ready.
|
||||
* MTU probe is regularly attempting to increase the path MTU by
|
||||
* deliberately sending larger packets. This discovers routing
|
||||
|
@ -2099,6 +2117,9 @@ static int tcp_mtu_probe(struct sock *sk)
|
|||
return 0;
|
||||
}
|
||||
|
||||
if (!tcp_can_coalesce_send_queue_head(sk, probe_size))
|
||||
return -1;
|
||||
|
||||
/* We're allowed to probe. Build it now. */
|
||||
nskb = sk_stream_alloc_skb(sk, probe_size, GFP_ATOMIC, false);
|
||||
if (!nskb)
|
||||
|
@ -2134,6 +2155,10 @@ static int tcp_mtu_probe(struct sock *sk)
|
|||
/* We've eaten all the data from this skb.
|
||||
* Throw it away. */
|
||||
TCP_SKB_CB(nskb)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags;
|
||||
/* If this is the last SKB we copy and eor is set
|
||||
* we need to propagate it to the new skb.
|
||||
*/
|
||||
TCP_SKB_CB(nskb)->eor = TCP_SKB_CB(skb)->eor;
|
||||
tcp_unlink_write_queue(skb, sk);
|
||||
sk_wmem_free_skb(sk, skb);
|
||||
} else {
|
||||
|
|
|
@ -2024,6 +2024,11 @@ static inline int udp4_csum_init(struct sk_buff *skb, struct udphdr *uh,
|
|||
err = udplite_checksum_init(skb, uh);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (UDP_SKB_CB(skb)->partial_cov) {
|
||||
skb->csum = inet_compute_pseudo(skb, proto);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
/* Note, we are only interested in != 0 or == 0, thus the
|
||||
|
|
|
@ -73,6 +73,11 @@ int udp6_csum_init(struct sk_buff *skb, struct udphdr *uh, int proto)
|
|||
err = udplite_checksum_init(skb, uh);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (UDP_SKB_CB(skb)->partial_cov) {
|
||||
skb->csum = ip6_compute_pseudo(skb, proto);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
/* To support RFC 6936 (allow zero checksum in UDP/IPV6 for tunnels)
|
||||
|
|
|
@ -149,6 +149,10 @@ struct nfc_llcp_sdp_tlv *nfc_llcp_build_sdreq_tlv(u8 tid, char *uri,
|
|||
|
||||
pr_debug("uri: %s, len: %zu\n", uri, uri_len);
|
||||
|
||||
/* sdreq->tlv_len is u8, takes uri_len, + 3 for header, + 1 for NULL */
|
||||
if (WARN_ON_ONCE(uri_len > U8_MAX - 4))
|
||||
return NULL;
|
||||
|
||||
sdreq = kzalloc(sizeof(struct nfc_llcp_sdp_tlv), GFP_KERNEL);
|
||||
if (sdreq == NULL)
|
||||
return NULL;
|
||||
|
|
|
@ -61,7 +61,8 @@ static const struct nla_policy nfc_genl_policy[NFC_ATTR_MAX + 1] = {
|
|||
};
|
||||
|
||||
static const struct nla_policy nfc_sdp_genl_policy[NFC_SDP_ATTR_MAX + 1] = {
|
||||
[NFC_SDP_ATTR_URI] = { .type = NLA_STRING },
|
||||
[NFC_SDP_ATTR_URI] = { .type = NLA_STRING,
|
||||
.len = U8_MAX - 4 },
|
||||
[NFC_SDP_ATTR_SAP] = { .type = NLA_U8 },
|
||||
};
|
||||
|
||||
|
|
|
@ -224,7 +224,7 @@ static struct rds_connection *__rds_conn_create(struct net *net,
|
|||
if (rds_destroy_pending(conn))
|
||||
ret = -ENETDOWN;
|
||||
else
|
||||
ret = trans->conn_alloc(conn, gfp);
|
||||
ret = trans->conn_alloc(conn, GFP_ATOMIC);
|
||||
if (ret) {
|
||||
rcu_read_unlock();
|
||||
kfree(conn->c_path);
|
||||
|
|
|
@ -517,9 +517,10 @@ int rxrpc_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
|
|||
ret = put_cmsg(msg, SOL_RXRPC, RXRPC_USER_CALL_ID,
|
||||
sizeof(unsigned int), &id32);
|
||||
} else {
|
||||
unsigned long idl = call->user_call_ID;
|
||||
|
||||
ret = put_cmsg(msg, SOL_RXRPC, RXRPC_USER_CALL_ID,
|
||||
sizeof(unsigned long),
|
||||
&call->user_call_ID);
|
||||
sizeof(unsigned long), &idl);
|
||||
}
|
||||
if (ret < 0)
|
||||
goto error_unlock_call;
|
||||
|
|
|
@ -376,17 +376,12 @@ struct tcf_net {
|
|||
static unsigned int tcf_net_id;
|
||||
|
||||
static int tcf_block_insert(struct tcf_block *block, struct net *net,
|
||||
u32 block_index, struct netlink_ext_ack *extack)
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
struct tcf_net *tn = net_generic(net, tcf_net_id);
|
||||
int err;
|
||||
|
||||
err = idr_alloc_u32(&tn->idr, block, &block_index, block_index,
|
||||
GFP_KERNEL);
|
||||
if (err)
|
||||
return err;
|
||||
block->index = block_index;
|
||||
return 0;
|
||||
return idr_alloc_u32(&tn->idr, block, &block->index, block->index,
|
||||
GFP_KERNEL);
|
||||
}
|
||||
|
||||
static void tcf_block_remove(struct tcf_block *block, struct net *net)
|
||||
|
@ -397,6 +392,7 @@ static void tcf_block_remove(struct tcf_block *block, struct net *net)
|
|||
}
|
||||
|
||||
static struct tcf_block *tcf_block_create(struct net *net, struct Qdisc *q,
|
||||
u32 block_index,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
struct tcf_block *block;
|
||||
|
@ -419,10 +415,13 @@ static struct tcf_block *tcf_block_create(struct net *net, struct Qdisc *q,
|
|||
err = -ENOMEM;
|
||||
goto err_chain_create;
|
||||
}
|
||||
block->net = qdisc_net(q);
|
||||
block->refcnt = 1;
|
||||
block->net = net;
|
||||
block->q = q;
|
||||
block->index = block_index;
|
||||
|
||||
/* Don't store q pointer for blocks which are shared */
|
||||
if (!tcf_block_shared(block))
|
||||
block->q = q;
|
||||
return block;
|
||||
|
||||
err_chain_create:
|
||||
|
@ -518,13 +517,12 @@ int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
|
|||
}
|
||||
|
||||
if (!block) {
|
||||
block = tcf_block_create(net, q, extack);
|
||||
block = tcf_block_create(net, q, ei->block_index, extack);
|
||||
if (IS_ERR(block))
|
||||
return PTR_ERR(block);
|
||||
created = true;
|
||||
if (ei->block_index) {
|
||||
err = tcf_block_insert(block, net,
|
||||
ei->block_index, extack);
|
||||
if (tcf_block_shared(block)) {
|
||||
err = tcf_block_insert(block, net, extack);
|
||||
if (err)
|
||||
goto err_block_insert;
|
||||
}
|
||||
|
|
|
@ -96,7 +96,7 @@ struct tc_u_hnode {
|
|||
|
||||
struct tc_u_common {
|
||||
struct tc_u_hnode __rcu *hlist;
|
||||
struct tcf_block *block;
|
||||
void *ptr;
|
||||
int refcnt;
|
||||
struct idr handle_idr;
|
||||
struct hlist_node hnode;
|
||||
|
@ -330,9 +330,25 @@ static struct hlist_head *tc_u_common_hash;
|
|||
#define U32_HASH_SHIFT 10
|
||||
#define U32_HASH_SIZE (1 << U32_HASH_SHIFT)
|
||||
|
||||
static void *tc_u_common_ptr(const struct tcf_proto *tp)
|
||||
{
|
||||
struct tcf_block *block = tp->chain->block;
|
||||
|
||||
/* The block sharing is currently supported only
|
||||
* for classless qdiscs. In that case we use block
|
||||
* for tc_u_common identification. In case the
|
||||
* block is not shared, block->q is a valid pointer
|
||||
* and we can use that. That works for classful qdiscs.
|
||||
*/
|
||||
if (tcf_block_shared(block))
|
||||
return block;
|
||||
else
|
||||
return block->q;
|
||||
}
|
||||
|
||||
static unsigned int tc_u_hash(const struct tcf_proto *tp)
|
||||
{
|
||||
return hash_ptr(tp->chain->block, U32_HASH_SHIFT);
|
||||
return hash_ptr(tc_u_common_ptr(tp), U32_HASH_SHIFT);
|
||||
}
|
||||
|
||||
static struct tc_u_common *tc_u_common_find(const struct tcf_proto *tp)
|
||||
|
@ -342,7 +358,7 @@ static struct tc_u_common *tc_u_common_find(const struct tcf_proto *tp)
|
|||
|
||||
h = tc_u_hash(tp);
|
||||
hlist_for_each_entry(tc, &tc_u_common_hash[h], hnode) {
|
||||
if (tc->block == tp->chain->block)
|
||||
if (tc->ptr == tc_u_common_ptr(tp))
|
||||
return tc;
|
||||
}
|
||||
return NULL;
|
||||
|
@ -371,7 +387,7 @@ static int u32_init(struct tcf_proto *tp)
|
|||
kfree(root_ht);
|
||||
return -ENOBUFS;
|
||||
}
|
||||
tp_c->block = tp->chain->block;
|
||||
tp_c->ptr = tc_u_common_ptr(tp);
|
||||
INIT_HLIST_NODE(&tp_c->hnode);
|
||||
idr_init(&tp_c->handle_idr);
|
||||
|
||||
|
|
|
@ -81,6 +81,12 @@ const char *sctp_cname(const union sctp_subtype cid)
|
|||
case SCTP_CID_RECONF:
|
||||
return "RECONF";
|
||||
|
||||
case SCTP_CID_I_DATA:
|
||||
return "I_DATA";
|
||||
|
||||
case SCTP_CID_I_FWD_TSN:
|
||||
return "I_FWD_TSN";
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -897,15 +897,12 @@ int sctp_hash_transport(struct sctp_transport *t)
|
|||
rhl_for_each_entry_rcu(transport, tmp, list, node)
|
||||
if (transport->asoc->ep == t->asoc->ep) {
|
||||
rcu_read_unlock();
|
||||
err = -EEXIST;
|
||||
goto out;
|
||||
return -EEXIST;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
err = rhltable_insert_key(&sctp_transport_hashtable, &arg,
|
||||
&t->node, sctp_hash_params);
|
||||
|
||||
out:
|
||||
if (err)
|
||||
pr_err_once("insert transport fail, errno %d\n", err);
|
||||
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
*
|
||||
* This file is part of the SCTP kernel implementation
|
||||
*
|
||||
* These functions manipulate sctp tsn mapping array.
|
||||
* This file contains sctp stream maniuplation primitives and helpers.
|
||||
*
|
||||
* This SCTP implementation is free software;
|
||||
* you can redistribute it and/or modify it under the terms of
|
||||
|
|
|
@ -3,7 +3,8 @@
|
|||
*
|
||||
* This file is part of the SCTP kernel implementation
|
||||
*
|
||||
* These functions manipulate sctp stream queue/scheduling.
|
||||
* These functions implement sctp stream message interleaving, mostly
|
||||
* including I-DATA and I-FORWARD-TSN chunks process.
|
||||
*
|
||||
* This SCTP implementation is free software;
|
||||
* you can redistribute it and/or modify it under the terms of
|
||||
|
@ -954,12 +955,8 @@ static void sctp_renege_events(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
|
|||
__u32 freed = 0;
|
||||
__u16 needed;
|
||||
|
||||
if (chunk) {
|
||||
needed = ntohs(chunk->chunk_hdr->length);
|
||||
needed -= sizeof(struct sctp_idata_chunk);
|
||||
} else {
|
||||
needed = SCTP_DEFAULT_MAXWINDOW;
|
||||
}
|
||||
needed = ntohs(chunk->chunk_hdr->length) -
|
||||
sizeof(struct sctp_idata_chunk);
|
||||
|
||||
if (skb_queue_empty(&asoc->base.sk->sk_receive_queue)) {
|
||||
freed = sctp_ulpq_renege_list(ulpq, &ulpq->lobby, needed);
|
||||
|
@ -971,9 +968,8 @@ static void sctp_renege_events(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
|
|||
needed);
|
||||
}
|
||||
|
||||
if (chunk && freed >= needed)
|
||||
if (sctp_ulpevent_idata(ulpq, chunk, gfp) <= 0)
|
||||
sctp_intl_start_pd(ulpq, gfp);
|
||||
if (freed >= needed && sctp_ulpevent_idata(ulpq, chunk, gfp) <= 0)
|
||||
sctp_intl_start_pd(ulpq, gfp);
|
||||
|
||||
sk_mem_reclaim(asoc->base.sk);
|
||||
}
|
||||
|
|
|
@ -813,7 +813,7 @@ int tipc_nl_bearer_get(struct sk_buff *skb, struct genl_info *info)
|
|||
return err;
|
||||
}
|
||||
|
||||
int tipc_nl_bearer_disable(struct sk_buff *skb, struct genl_info *info)
|
||||
int __tipc_nl_bearer_disable(struct sk_buff *skb, struct genl_info *info)
|
||||
{
|
||||
int err;
|
||||
char *name;
|
||||
|
@ -835,20 +835,27 @@ int tipc_nl_bearer_disable(struct sk_buff *skb, struct genl_info *info)
|
|||
|
||||
name = nla_data(attrs[TIPC_NLA_BEARER_NAME]);
|
||||
|
||||
rtnl_lock();
|
||||
bearer = tipc_bearer_find(net, name);
|
||||
if (!bearer) {
|
||||
rtnl_unlock();
|
||||
if (!bearer)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
bearer_disable(net, bearer);
|
||||
rtnl_unlock();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int tipc_nl_bearer_enable(struct sk_buff *skb, struct genl_info *info)
|
||||
int tipc_nl_bearer_disable(struct sk_buff *skb, struct genl_info *info)
|
||||
{
|
||||
int err;
|
||||
|
||||
rtnl_lock();
|
||||
err = __tipc_nl_bearer_disable(skb, info);
|
||||
rtnl_unlock();
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
int __tipc_nl_bearer_enable(struct sk_buff *skb, struct genl_info *info)
|
||||
{
|
||||
int err;
|
||||
char *bearer;
|
||||
|
@ -890,15 +897,18 @@ int tipc_nl_bearer_enable(struct sk_buff *skb, struct genl_info *info)
|
|||
prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
|
||||
}
|
||||
|
||||
return tipc_enable_bearer(net, bearer, domain, prio, attrs);
|
||||
}
|
||||
|
||||
int tipc_nl_bearer_enable(struct sk_buff *skb, struct genl_info *info)
|
||||
{
|
||||
int err;
|
||||
|
||||
rtnl_lock();
|
||||
err = tipc_enable_bearer(net, bearer, domain, prio, attrs);
|
||||
if (err) {
|
||||
rtnl_unlock();
|
||||
return err;
|
||||
}
|
||||
err = __tipc_nl_bearer_enable(skb, info);
|
||||
rtnl_unlock();
|
||||
|
||||
return 0;
|
||||
return err;
|
||||
}
|
||||
|
||||
int tipc_nl_bearer_add(struct sk_buff *skb, struct genl_info *info)
|
||||
|
@ -944,7 +954,7 @@ int tipc_nl_bearer_add(struct sk_buff *skb, struct genl_info *info)
|
|||
return 0;
|
||||
}
|
||||
|
||||
int tipc_nl_bearer_set(struct sk_buff *skb, struct genl_info *info)
|
||||
int __tipc_nl_bearer_set(struct sk_buff *skb, struct genl_info *info)
|
||||
{
|
||||
int err;
|
||||
char *name;
|
||||
|
@ -965,22 +975,17 @@ int tipc_nl_bearer_set(struct sk_buff *skb, struct genl_info *info)
|
|||
return -EINVAL;
|
||||
name = nla_data(attrs[TIPC_NLA_BEARER_NAME]);
|
||||
|
||||
rtnl_lock();
|
||||
b = tipc_bearer_find(net, name);
|
||||
if (!b) {
|
||||
rtnl_unlock();
|
||||
if (!b)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (attrs[TIPC_NLA_BEARER_PROP]) {
|
||||
struct nlattr *props[TIPC_NLA_PROP_MAX + 1];
|
||||
|
||||
err = tipc_nl_parse_link_prop(attrs[TIPC_NLA_BEARER_PROP],
|
||||
props);
|
||||
if (err) {
|
||||
rtnl_unlock();
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
||||
if (props[TIPC_NLA_PROP_TOL])
|
||||
b->tolerance = nla_get_u32(props[TIPC_NLA_PROP_TOL]);
|
||||
|
@ -989,11 +994,21 @@ int tipc_nl_bearer_set(struct sk_buff *skb, struct genl_info *info)
|
|||
if (props[TIPC_NLA_PROP_WIN])
|
||||
b->window = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
|
||||
}
|
||||
rtnl_unlock();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int tipc_nl_bearer_set(struct sk_buff *skb, struct genl_info *info)
|
||||
{
|
||||
int err;
|
||||
|
||||
rtnl_lock();
|
||||
err = __tipc_nl_bearer_set(skb, info);
|
||||
rtnl_unlock();
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int __tipc_nl_add_media(struct tipc_nl_msg *msg,
|
||||
struct tipc_media *media, int nlflags)
|
||||
{
|
||||
|
@ -1115,7 +1130,7 @@ int tipc_nl_media_get(struct sk_buff *skb, struct genl_info *info)
|
|||
return err;
|
||||
}
|
||||
|
||||
int tipc_nl_media_set(struct sk_buff *skb, struct genl_info *info)
|
||||
int __tipc_nl_media_set(struct sk_buff *skb, struct genl_info *info)
|
||||
{
|
||||
int err;
|
||||
char *name;
|
||||
|
@ -1133,22 +1148,17 @@ int tipc_nl_media_set(struct sk_buff *skb, struct genl_info *info)
|
|||
return -EINVAL;
|
||||
name = nla_data(attrs[TIPC_NLA_MEDIA_NAME]);
|
||||
|
||||
rtnl_lock();
|
||||
m = tipc_media_find(name);
|
||||
if (!m) {
|
||||
rtnl_unlock();
|
||||
if (!m)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (attrs[TIPC_NLA_MEDIA_PROP]) {
|
||||
struct nlattr *props[TIPC_NLA_PROP_MAX + 1];
|
||||
|
||||
err = tipc_nl_parse_link_prop(attrs[TIPC_NLA_MEDIA_PROP],
|
||||
props);
|
||||
if (err) {
|
||||
rtnl_unlock();
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
||||
if (props[TIPC_NLA_PROP_TOL])
|
||||
m->tolerance = nla_get_u32(props[TIPC_NLA_PROP_TOL]);
|
||||
|
@ -1157,7 +1167,17 @@ int tipc_nl_media_set(struct sk_buff *skb, struct genl_info *info)
|
|||
if (props[TIPC_NLA_PROP_WIN])
|
||||
m->window = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
|
||||
}
|
||||
rtnl_unlock();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int tipc_nl_media_set(struct sk_buff *skb, struct genl_info *info)
|
||||
{
|
||||
int err;
|
||||
|
||||
rtnl_lock();
|
||||
err = __tipc_nl_media_set(skb, info);
|
||||
rtnl_unlock();
|
||||
|
||||
return err;
|
||||
}
|
||||
|
|
|
@ -188,15 +188,19 @@ extern struct tipc_media udp_media_info;
|
|||
#endif
|
||||
|
||||
int tipc_nl_bearer_disable(struct sk_buff *skb, struct genl_info *info);
|
||||
int __tipc_nl_bearer_disable(struct sk_buff *skb, struct genl_info *info);
|
||||
int tipc_nl_bearer_enable(struct sk_buff *skb, struct genl_info *info);
|
||||
int __tipc_nl_bearer_enable(struct sk_buff *skb, struct genl_info *info);
|
||||
int tipc_nl_bearer_dump(struct sk_buff *skb, struct netlink_callback *cb);
|
||||
int tipc_nl_bearer_get(struct sk_buff *skb, struct genl_info *info);
|
||||
int tipc_nl_bearer_set(struct sk_buff *skb, struct genl_info *info);
|
||||
int __tipc_nl_bearer_set(struct sk_buff *skb, struct genl_info *info);
|
||||
int tipc_nl_bearer_add(struct sk_buff *skb, struct genl_info *info);
|
||||
|
||||
int tipc_nl_media_dump(struct sk_buff *skb, struct netlink_callback *cb);
|
||||
int tipc_nl_media_get(struct sk_buff *skb, struct genl_info *info);
|
||||
int tipc_nl_media_set(struct sk_buff *skb, struct genl_info *info);
|
||||
int __tipc_nl_media_set(struct sk_buff *skb, struct genl_info *info);
|
||||
|
||||
int tipc_media_set_priority(const char *name, u32 new_value);
|
||||
int tipc_media_set_window(const char *name, u32 new_value);
|
||||
|
|
|
@ -200,7 +200,7 @@ int tipc_nl_net_dump(struct sk_buff *skb, struct netlink_callback *cb)
|
|||
return skb->len;
|
||||
}
|
||||
|
||||
int tipc_nl_net_set(struct sk_buff *skb, struct genl_info *info)
|
||||
int __tipc_nl_net_set(struct sk_buff *skb, struct genl_info *info)
|
||||
{
|
||||
struct net *net = sock_net(skb->sk);
|
||||
struct tipc_net *tn = net_generic(net, tipc_net_id);
|
||||
|
@ -241,10 +241,19 @@ int tipc_nl_net_set(struct sk_buff *skb, struct genl_info *info)
|
|||
if (!tipc_addr_node_valid(addr))
|
||||
return -EINVAL;
|
||||
|
||||
rtnl_lock();
|
||||
tipc_net_start(net, addr);
|
||||
rtnl_unlock();
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int tipc_nl_net_set(struct sk_buff *skb, struct genl_info *info)
|
||||
{
|
||||
int err;
|
||||
|
||||
rtnl_lock();
|
||||
err = __tipc_nl_net_set(skb, info);
|
||||
rtnl_unlock();
|
||||
|
||||
return err;
|
||||
}
|
||||
|
|
|
@ -47,5 +47,6 @@ void tipc_net_stop(struct net *net);
|
|||
|
||||
int tipc_nl_net_dump(struct sk_buff *skb, struct netlink_callback *cb);
|
||||
int tipc_nl_net_set(struct sk_buff *skb, struct genl_info *info);
|
||||
int __tipc_nl_net_set(struct sk_buff *skb, struct genl_info *info);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -285,10 +285,6 @@ static int __tipc_nl_compat_doit(struct tipc_nl_compat_cmd_doit *cmd,
|
|||
if (!trans_buf)
|
||||
return -ENOMEM;
|
||||
|
||||
err = (*cmd->transcode)(cmd, trans_buf, msg);
|
||||
if (err)
|
||||
goto trans_out;
|
||||
|
||||
attrbuf = kmalloc((tipc_genl_family.maxattr + 1) *
|
||||
sizeof(struct nlattr *), GFP_KERNEL);
|
||||
if (!attrbuf) {
|
||||
|
@ -296,27 +292,34 @@ static int __tipc_nl_compat_doit(struct tipc_nl_compat_cmd_doit *cmd,
|
|||
goto trans_out;
|
||||
}
|
||||
|
||||
err = nla_parse(attrbuf, tipc_genl_family.maxattr,
|
||||
(const struct nlattr *)trans_buf->data,
|
||||
trans_buf->len, NULL, NULL);
|
||||
if (err)
|
||||
goto parse_out;
|
||||
|
||||
doit_buf = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
|
||||
if (!doit_buf) {
|
||||
err = -ENOMEM;
|
||||
goto parse_out;
|
||||
goto attrbuf_out;
|
||||
}
|
||||
|
||||
doit_buf->sk = msg->dst_sk;
|
||||
|
||||
memset(&info, 0, sizeof(info));
|
||||
info.attrs = attrbuf;
|
||||
|
||||
rtnl_lock();
|
||||
err = (*cmd->transcode)(cmd, trans_buf, msg);
|
||||
if (err)
|
||||
goto doit_out;
|
||||
|
||||
err = nla_parse(attrbuf, tipc_genl_family.maxattr,
|
||||
(const struct nlattr *)trans_buf->data,
|
||||
trans_buf->len, NULL, NULL);
|
||||
if (err)
|
||||
goto doit_out;
|
||||
|
||||
doit_buf->sk = msg->dst_sk;
|
||||
|
||||
err = (*cmd->doit)(doit_buf, &info);
|
||||
doit_out:
|
||||
rtnl_unlock();
|
||||
|
||||
kfree_skb(doit_buf);
|
||||
parse_out:
|
||||
attrbuf_out:
|
||||
kfree(attrbuf);
|
||||
trans_out:
|
||||
kfree_skb(trans_buf);
|
||||
|
@ -722,13 +725,13 @@ static int tipc_nl_compat_link_set(struct tipc_nl_compat_cmd_doit *cmd,
|
|||
|
||||
media = tipc_media_find(lc->name);
|
||||
if (media) {
|
||||
cmd->doit = &tipc_nl_media_set;
|
||||
cmd->doit = &__tipc_nl_media_set;
|
||||
return tipc_nl_compat_media_set(skb, msg);
|
||||
}
|
||||
|
||||
bearer = tipc_bearer_find(msg->net, lc->name);
|
||||
if (bearer) {
|
||||
cmd->doit = &tipc_nl_bearer_set;
|
||||
cmd->doit = &__tipc_nl_bearer_set;
|
||||
return tipc_nl_compat_bearer_set(skb, msg);
|
||||
}
|
||||
|
||||
|
@ -1089,12 +1092,12 @@ static int tipc_nl_compat_handle(struct tipc_nl_compat_msg *msg)
|
|||
return tipc_nl_compat_dumpit(&dump, msg);
|
||||
case TIPC_CMD_ENABLE_BEARER:
|
||||
msg->req_type = TIPC_TLV_BEARER_CONFIG;
|
||||
doit.doit = tipc_nl_bearer_enable;
|
||||
doit.doit = __tipc_nl_bearer_enable;
|
||||
doit.transcode = tipc_nl_compat_bearer_enable;
|
||||
return tipc_nl_compat_doit(&doit, msg);
|
||||
case TIPC_CMD_DISABLE_BEARER:
|
||||
msg->req_type = TIPC_TLV_BEARER_NAME;
|
||||
doit.doit = tipc_nl_bearer_disable;
|
||||
doit.doit = __tipc_nl_bearer_disable;
|
||||
doit.transcode = tipc_nl_compat_bearer_disable;
|
||||
return tipc_nl_compat_doit(&doit, msg);
|
||||
case TIPC_CMD_SHOW_LINK_STATS:
|
||||
|
@ -1148,12 +1151,12 @@ static int tipc_nl_compat_handle(struct tipc_nl_compat_msg *msg)
|
|||
return tipc_nl_compat_dumpit(&dump, msg);
|
||||
case TIPC_CMD_SET_NODE_ADDR:
|
||||
msg->req_type = TIPC_TLV_NET_ADDR;
|
||||
doit.doit = tipc_nl_net_set;
|
||||
doit.doit = __tipc_nl_net_set;
|
||||
doit.transcode = tipc_nl_compat_net_set;
|
||||
return tipc_nl_compat_doit(&doit, msg);
|
||||
case TIPC_CMD_SET_NETID:
|
||||
msg->req_type = TIPC_TLV_UNSIGNED;
|
||||
doit.doit = tipc_nl_net_set;
|
||||
doit.doit = __tipc_nl_net_set;
|
||||
doit.transcode = tipc_nl_compat_net_set;
|
||||
return tipc_nl_compat_doit(&doit, msg);
|
||||
case TIPC_CMD_GET_NETID:
|
||||
|
|
|
@ -308,8 +308,11 @@ static int do_tls_getsockopt_tx(struct sock *sk, char __user *optval,
|
|||
goto out;
|
||||
}
|
||||
lock_sock(sk);
|
||||
memcpy(crypto_info_aes_gcm_128->iv, ctx->iv,
|
||||
memcpy(crypto_info_aes_gcm_128->iv,
|
||||
ctx->iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE,
|
||||
TLS_CIPHER_AES_GCM_128_IV_SIZE);
|
||||
memcpy(crypto_info_aes_gcm_128->rec_seq, ctx->rec_seq,
|
||||
TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE);
|
||||
release_sock(sk);
|
||||
if (copy_to_user(optval,
|
||||
crypto_info_aes_gcm_128,
|
||||
|
@ -375,7 +378,7 @@ static int do_tls_setsockopt_tx(struct sock *sk, char __user *optval,
|
|||
rc = copy_from_user(crypto_info, optval, sizeof(*crypto_info));
|
||||
if (rc) {
|
||||
rc = -EFAULT;
|
||||
goto out;
|
||||
goto err_crypto_info;
|
||||
}
|
||||
|
||||
/* check version */
|
||||
|
|
|
@ -1825,7 +1825,7 @@ static int unix_dgram_sendmsg(struct socket *sock, struct msghdr *msg,
|
|||
}
|
||||
|
||||
/* We use paged skbs for stream sockets, and limit occupancy to 32768
|
||||
* bytes, and a minimun of a full page.
|
||||
* bytes, and a minimum of a full page.
|
||||
*/
|
||||
#define UNIX_SKB_FRAGS_SZ (PAGE_SIZE << get_order(32768))
|
||||
|
||||
|
|
Loading…
Reference in New Issue