Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
This commit is contained in:
commit
f787d1debf
|
@ -825,6 +825,7 @@ CONFIG_QCOM_SMSM=y
|
|||
CONFIG_QCOM_WCNSS_CTRL=m
|
||||
CONFIG_ROCKCHIP_PM_DOMAINS=y
|
||||
CONFIG_COMMON_CLK_QCOM=y
|
||||
CONFIG_QCOM_CLK_RPM=y
|
||||
CONFIG_CHROME_PLATFORMS=y
|
||||
CONFIG_STAGING_BOARD=y
|
||||
CONFIG_CROS_EC_CHARDEV=m
|
||||
|
|
|
@ -478,11 +478,10 @@ extern unsigned long __must_check
|
|||
arm_copy_from_user(void *to, const void __user *from, unsigned long n);
|
||||
|
||||
static inline unsigned long __must_check
|
||||
__copy_from_user(void *to, const void __user *from, unsigned long n)
|
||||
__arch_copy_from_user(void *to, const void __user *from, unsigned long n)
|
||||
{
|
||||
unsigned int __ua_flags;
|
||||
|
||||
check_object_size(to, n, false);
|
||||
__ua_flags = uaccess_save_and_enable();
|
||||
n = arm_copy_from_user(to, from, n);
|
||||
uaccess_restore(__ua_flags);
|
||||
|
@ -495,18 +494,15 @@ extern unsigned long __must_check
|
|||
__copy_to_user_std(void __user *to, const void *from, unsigned long n);
|
||||
|
||||
static inline unsigned long __must_check
|
||||
__copy_to_user(void __user *to, const void *from, unsigned long n)
|
||||
__arch_copy_to_user(void __user *to, const void *from, unsigned long n)
|
||||
{
|
||||
#ifndef CONFIG_UACCESS_WITH_MEMCPY
|
||||
unsigned int __ua_flags;
|
||||
|
||||
check_object_size(from, n, true);
|
||||
__ua_flags = uaccess_save_and_enable();
|
||||
n = arm_copy_to_user(to, from, n);
|
||||
uaccess_restore(__ua_flags);
|
||||
return n;
|
||||
#else
|
||||
check_object_size(from, n, true);
|
||||
return arm_copy_to_user(to, from, n);
|
||||
#endif
|
||||
}
|
||||
|
@ -526,25 +522,49 @@ __clear_user(void __user *addr, unsigned long n)
|
|||
}
|
||||
|
||||
#else
|
||||
#define __copy_from_user(to, from, n) (memcpy(to, (void __force *)from, n), 0)
|
||||
#define __copy_to_user(to, from, n) (memcpy((void __force *)to, from, n), 0)
|
||||
#define __arch_copy_from_user(to, from, n) \
|
||||
(memcpy(to, (void __force *)from, n), 0)
|
||||
#define __arch_copy_to_user(to, from, n) \
|
||||
(memcpy((void __force *)to, from, n), 0)
|
||||
#define __clear_user(addr, n) (memset((void __force *)addr, 0, n), 0)
|
||||
#endif
|
||||
|
||||
static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
|
||||
static inline unsigned long __must_check
|
||||
__copy_from_user(void *to, const void __user *from, unsigned long n)
|
||||
{
|
||||
check_object_size(to, n, false);
|
||||
return __arch_copy_from_user(to, from, n);
|
||||
}
|
||||
|
||||
static inline unsigned long __must_check
|
||||
copy_from_user(void *to, const void __user *from, unsigned long n)
|
||||
{
|
||||
unsigned long res = n;
|
||||
|
||||
check_object_size(to, n, false);
|
||||
|
||||
if (likely(access_ok(VERIFY_READ, from, n)))
|
||||
res = __copy_from_user(to, from, n);
|
||||
res = __arch_copy_from_user(to, from, n);
|
||||
if (unlikely(res))
|
||||
memset(to + (n - res), 0, res);
|
||||
return res;
|
||||
}
|
||||
|
||||
static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
|
||||
static inline unsigned long __must_check
|
||||
__copy_to_user(void __user *to, const void *from, unsigned long n)
|
||||
{
|
||||
check_object_size(from, n, true);
|
||||
|
||||
return __arch_copy_to_user(to, from, n);
|
||||
}
|
||||
|
||||
static inline unsigned long __must_check
|
||||
copy_to_user(void __user *to, const void *from, unsigned long n)
|
||||
{
|
||||
check_object_size(from, n, true);
|
||||
|
||||
if (access_ok(VERIFY_WRITE, to, n))
|
||||
n = __copy_to_user(to, from, n);
|
||||
n = __arch_copy_to_user(to, from, n);
|
||||
return n;
|
||||
}
|
||||
|
||||
|
|
|
@ -67,7 +67,7 @@ ENTRY(__get_user_4)
|
|||
ENDPROC(__get_user_4)
|
||||
|
||||
ENTRY(__get_user_8)
|
||||
check_uaccess r0, 8, r1, r2, __get_user_bad
|
||||
check_uaccess r0, 8, r1, r2, __get_user_bad8
|
||||
#ifdef CONFIG_THUMB2_KERNEL
|
||||
5: TUSER(ldr) r2, [r0]
|
||||
6: TUSER(ldr) r3, [r0, #4]
|
||||
|
|
|
@ -347,7 +347,8 @@ early_param("disable_radix", parse_disable_radix);
|
|||
void __init mmu_early_init_devtree(void)
|
||||
{
|
||||
/* Disable radix mode based on kernel command line. */
|
||||
if (disable_radix)
|
||||
/* We don't yet have the machinery to do radix as a guest. */
|
||||
if (disable_radix || !(mfmsr() & MSR_HV))
|
||||
cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
|
||||
|
||||
if (early_radix_enabled())
|
||||
|
|
|
@ -160,11 +160,12 @@ void save_v86_state(struct kernel_vm86_regs *regs, int retval)
|
|||
|
||||
static void mark_screen_rdonly(struct mm_struct *mm)
|
||||
{
|
||||
struct vm_area_struct *vma;
|
||||
spinlock_t *ptl;
|
||||
pgd_t *pgd;
|
||||
pud_t *pud;
|
||||
pmd_t *pmd;
|
||||
pte_t *pte;
|
||||
spinlock_t *ptl;
|
||||
int i;
|
||||
|
||||
down_write(&mm->mmap_sem);
|
||||
|
@ -177,7 +178,7 @@ static void mark_screen_rdonly(struct mm_struct *mm)
|
|||
pmd = pmd_offset(pud, 0xA0000);
|
||||
|
||||
if (pmd_trans_huge(*pmd)) {
|
||||
struct vm_area_struct *vma = find_vma(mm, 0xA0000);
|
||||
vma = find_vma(mm, 0xA0000);
|
||||
split_huge_pmd(vma, pmd, 0xA0000);
|
||||
}
|
||||
if (pmd_none_or_clear_bad(pmd))
|
||||
|
|
|
@ -3758,7 +3758,7 @@ static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
|
|||
}
|
||||
|
||||
#ifdef CONFIG_CFQ_GROUP_IOSCHED
|
||||
static void check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio)
|
||||
static bool check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio)
|
||||
{
|
||||
struct cfq_data *cfqd = cic_to_cfqd(cic);
|
||||
struct cfq_queue *cfqq;
|
||||
|
@ -3775,15 +3775,7 @@ static void check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio)
|
|||
* spuriously on a newly created cic but there's no harm.
|
||||
*/
|
||||
if (unlikely(!cfqd) || likely(cic->blkcg_serial_nr == serial_nr))
|
||||
return;
|
||||
|
||||
/*
|
||||
* If we have a non-root cgroup, we can depend on that to
|
||||
* do proper throttling of writes. Turn off wbt for that
|
||||
* case, if it was enabled by default.
|
||||
*/
|
||||
if (nonroot_cg)
|
||||
wbt_disable_default(cfqd->queue);
|
||||
return nonroot_cg;
|
||||
|
||||
/*
|
||||
* Drop reference to queues. New queues will be assigned in new
|
||||
|
@ -3804,9 +3796,13 @@ static void check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio)
|
|||
}
|
||||
|
||||
cic->blkcg_serial_nr = serial_nr;
|
||||
return nonroot_cg;
|
||||
}
|
||||
#else
|
||||
static inline void check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio) { }
|
||||
static inline bool check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
#endif /* CONFIG_CFQ_GROUP_IOSCHED */
|
||||
|
||||
static struct cfq_queue **
|
||||
|
@ -4448,11 +4444,12 @@ cfq_set_request(struct request_queue *q, struct request *rq, struct bio *bio,
|
|||
const int rw = rq_data_dir(rq);
|
||||
const bool is_sync = rq_is_sync(rq);
|
||||
struct cfq_queue *cfqq;
|
||||
bool disable_wbt;
|
||||
|
||||
spin_lock_irq(q->queue_lock);
|
||||
|
||||
check_ioprio_changed(cic, bio);
|
||||
check_blkcg_changed(cic, bio);
|
||||
disable_wbt = check_blkcg_changed(cic, bio);
|
||||
new_queue:
|
||||
cfqq = cic_to_cfqq(cic, is_sync);
|
||||
if (!cfqq || cfqq == &cfqd->oom_cfqq) {
|
||||
|
@ -4488,6 +4485,10 @@ cfq_set_request(struct request_queue *q, struct request *rq, struct bio *bio,
|
|||
rq->elv.priv[0] = cfqq;
|
||||
rq->elv.priv[1] = cfqq->cfqg;
|
||||
spin_unlock_irq(q->queue_lock);
|
||||
|
||||
if (disable_wbt)
|
||||
wbt_disable_default(q);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -1817,7 +1817,7 @@ int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
|
|||
mgr->payloads[i].vcpi = req_payload.vcpi;
|
||||
} else if (mgr->payloads[i].num_slots) {
|
||||
mgr->payloads[i].num_slots = 0;
|
||||
drm_dp_destroy_payload_step1(mgr, port, port->vcpi.vcpi, &mgr->payloads[i]);
|
||||
drm_dp_destroy_payload_step1(mgr, port, mgr->payloads[i].vcpi, &mgr->payloads[i]);
|
||||
req_payload.payload_state = mgr->payloads[i].payload_state;
|
||||
mgr->payloads[i].start_slot = 0;
|
||||
}
|
||||
|
|
|
@ -205,8 +205,8 @@ static int radeon_cursor_move_locked(struct drm_crtc *crtc, int x, int y)
|
|||
}
|
||||
|
||||
if (x <= (crtc->x - w) || y <= (crtc->y - radeon_crtc->cursor_height) ||
|
||||
x >= (crtc->x + crtc->mode.crtc_hdisplay) ||
|
||||
y >= (crtc->y + crtc->mode.crtc_vdisplay))
|
||||
x >= (crtc->x + crtc->mode.hdisplay) ||
|
||||
y >= (crtc->y + crtc->mode.vdisplay))
|
||||
goto out_of_bounds;
|
||||
|
||||
x += xorigin;
|
||||
|
|
|
@ -475,30 +475,28 @@ static int i2c_dw_wait_bus_not_busy(struct dw_i2c_dev *dev)
|
|||
static void i2c_dw_xfer_init(struct dw_i2c_dev *dev)
|
||||
{
|
||||
struct i2c_msg *msgs = dev->msgs;
|
||||
u32 ic_tar = 0;
|
||||
u32 ic_con, ic_tar = 0;
|
||||
|
||||
/* Disable the adapter */
|
||||
__i2c_dw_enable_and_wait(dev, false);
|
||||
|
||||
/* if the slave address is ten bit address, enable 10BITADDR */
|
||||
if (dev->dynamic_tar_update_enabled) {
|
||||
ic_con = dw_readl(dev, DW_IC_CON);
|
||||
if (msgs[dev->msg_write_idx].flags & I2C_M_TEN) {
|
||||
ic_con |= DW_IC_CON_10BITADDR_MASTER;
|
||||
/*
|
||||
* If I2C_DYNAMIC_TAR_UPDATE is set, the 10-bit addressing
|
||||
* mode has to be enabled via bit 12 of IC_TAR register,
|
||||
* otherwise bit 4 of IC_CON is used.
|
||||
* mode has to be enabled via bit 12 of IC_TAR register.
|
||||
* We set it always as I2C_DYNAMIC_TAR_UPDATE can't be
|
||||
* detected from registers.
|
||||
*/
|
||||
if (msgs[dev->msg_write_idx].flags & I2C_M_TEN)
|
||||
ic_tar = DW_IC_TAR_10BITADDR_MASTER;
|
||||
ic_tar = DW_IC_TAR_10BITADDR_MASTER;
|
||||
} else {
|
||||
u32 ic_con = dw_readl(dev, DW_IC_CON);
|
||||
|
||||
if (msgs[dev->msg_write_idx].flags & I2C_M_TEN)
|
||||
ic_con |= DW_IC_CON_10BITADDR_MASTER;
|
||||
else
|
||||
ic_con &= ~DW_IC_CON_10BITADDR_MASTER;
|
||||
dw_writel(dev, ic_con, DW_IC_CON);
|
||||
ic_con &= ~DW_IC_CON_10BITADDR_MASTER;
|
||||
}
|
||||
|
||||
dw_writel(dev, ic_con, DW_IC_CON);
|
||||
|
||||
/*
|
||||
* Set the slave (target) address and enable 10-bit addressing mode
|
||||
* if applicable.
|
||||
|
@ -963,7 +961,6 @@ int i2c_dw_probe(struct dw_i2c_dev *dev)
|
|||
{
|
||||
struct i2c_adapter *adap = &dev->adapter;
|
||||
int r;
|
||||
u32 reg;
|
||||
|
||||
init_completion(&dev->cmd_complete);
|
||||
|
||||
|
@ -971,26 +968,6 @@ int i2c_dw_probe(struct dw_i2c_dev *dev)
|
|||
if (r)
|
||||
return r;
|
||||
|
||||
r = i2c_dw_acquire_lock(dev);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
/*
|
||||
* Test if dynamic TAR update is enabled in this controller by writing
|
||||
* to IC_10BITADDR_MASTER field in IC_CON: when it is enabled this
|
||||
* field is read-only so it should not succeed
|
||||
*/
|
||||
reg = dw_readl(dev, DW_IC_CON);
|
||||
dw_writel(dev, reg ^ DW_IC_CON_10BITADDR_MASTER, DW_IC_CON);
|
||||
|
||||
if ((dw_readl(dev, DW_IC_CON) & DW_IC_CON_10BITADDR_MASTER) ==
|
||||
(reg & DW_IC_CON_10BITADDR_MASTER)) {
|
||||
dev->dynamic_tar_update_enabled = true;
|
||||
dev_dbg(dev->dev, "Dynamic TAR update enabled");
|
||||
}
|
||||
|
||||
i2c_dw_release_lock(dev);
|
||||
|
||||
snprintf(adap->name, sizeof(adap->name),
|
||||
"Synopsys DesignWare I2C adapter");
|
||||
adap->retries = 3;
|
||||
|
|
|
@ -125,7 +125,6 @@ struct dw_i2c_dev {
|
|||
int (*acquire_lock)(struct dw_i2c_dev *dev);
|
||||
void (*release_lock)(struct dw_i2c_dev *dev);
|
||||
bool pm_runtime_disabled;
|
||||
bool dynamic_tar_update_enabled;
|
||||
};
|
||||
|
||||
#define ACCESS_SWAP 0x00000001
|
||||
|
|
|
@ -1231,6 +1231,7 @@ static const struct acpi_device_id elan_acpi_id[] = {
|
|||
{ "ELAN0000", 0 },
|
||||
{ "ELAN0100", 0 },
|
||||
{ "ELAN0600", 0 },
|
||||
{ "ELAN0605", 0 },
|
||||
{ "ELAN1000", 0 },
|
||||
{ }
|
||||
};
|
||||
|
|
|
@ -1706,10 +1706,10 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
|
|||
err = mmc_select_hs400(card);
|
||||
if (err)
|
||||
goto free_card;
|
||||
} else if (mmc_card_hs(card)) {
|
||||
} else {
|
||||
/* Select the desired bus width optionally */
|
||||
err = mmc_select_bus_width(card);
|
||||
if (err > 0) {
|
||||
if (err > 0 && mmc_card_hs(card)) {
|
||||
err = mmc_select_hs_ddr(card);
|
||||
if (err)
|
||||
goto free_card;
|
||||
|
|
|
@ -1666,7 +1666,7 @@ static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv,
|
|||
|
||||
free_buffers:
|
||||
/* compensate sw bpool counter changes */
|
||||
for (i--; i > 0; i--) {
|
||||
for (i--; i >= 0; i--) {
|
||||
dpaa_bp = dpaa_bpid2pool(sgt[i].bpid);
|
||||
if (dpaa_bp) {
|
||||
count_ptr = this_cpu_ptr(dpaa_bp->percpu_count);
|
||||
|
|
|
@ -2489,7 +2489,8 @@ static int vxlan_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
|
|||
|
||||
rt = vxlan_get_route(vxlan, dev, sock4, skb, 0, info->key.tos,
|
||||
info->key.u.ipv4.dst,
|
||||
&info->key.u.ipv4.src, dport, sport, NULL, info);
|
||||
&info->key.u.ipv4.src, dport, sport,
|
||||
&info->dst_cache, info);
|
||||
if (IS_ERR(rt))
|
||||
return PTR_ERR(rt);
|
||||
ip_rt_put(rt);
|
||||
|
@ -2500,7 +2501,8 @@ static int vxlan_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
|
|||
|
||||
ndst = vxlan6_get_route(vxlan, dev, sock6, skb, 0, info->key.tos,
|
||||
info->key.label, &info->key.u.ipv6.dst,
|
||||
&info->key.u.ipv6.src, dport, sport, NULL, info);
|
||||
&info->key.u.ipv6.src, dport, sport,
|
||||
&info->dst_cache, info);
|
||||
if (IS_ERR(ndst))
|
||||
return PTR_ERR(ndst);
|
||||
dst_release(ndst);
|
||||
|
|
|
@ -1629,6 +1629,28 @@ static void atom_deinit_dev(struct intel_ntb_dev *ndev)
|
|||
|
||||
/* Skylake Xeon NTB */
|
||||
|
||||
static int skx_poll_link(struct intel_ntb_dev *ndev)
|
||||
{
|
||||
u16 reg_val;
|
||||
int rc;
|
||||
|
||||
ndev->reg->db_iowrite(ndev->db_link_mask,
|
||||
ndev->self_mmio +
|
||||
ndev->self_reg->db_clear);
|
||||
|
||||
rc = pci_read_config_word(ndev->ntb.pdev,
|
||||
SKX_LINK_STATUS_OFFSET, ®_val);
|
||||
if (rc)
|
||||
return 0;
|
||||
|
||||
if (reg_val == ndev->lnk_sta)
|
||||
return 0;
|
||||
|
||||
ndev->lnk_sta = reg_val;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
static u64 skx_db_ioread(void __iomem *mmio)
|
||||
{
|
||||
return ioread64(mmio);
|
||||
|
@ -2852,7 +2874,7 @@ static struct intel_b2b_addr xeon_b2b_dsd_addr = {
|
|||
};
|
||||
|
||||
static const struct intel_ntb_reg skx_reg = {
|
||||
.poll_link = xeon_poll_link,
|
||||
.poll_link = skx_poll_link,
|
||||
.link_is_up = xeon_link_is_up,
|
||||
.db_ioread = skx_db_ioread,
|
||||
.db_iowrite = skx_db_iowrite,
|
||||
|
|
|
@ -1802,7 +1802,7 @@ ntb_transport_create_queue(void *data, struct device *client_dev,
|
|||
|
||||
node = dev_to_node(&ndev->dev);
|
||||
|
||||
free_queue = ffs(nt->qp_bitmap);
|
||||
free_queue = ffs(nt->qp_bitmap_free);
|
||||
if (!free_queue)
|
||||
goto err;
|
||||
|
||||
|
@ -2273,9 +2273,8 @@ module_init(ntb_transport_init);
|
|||
|
||||
static void __exit ntb_transport_exit(void)
|
||||
{
|
||||
debugfs_remove_recursive(nt_debugfs_dir);
|
||||
|
||||
ntb_unregister_client(&ntb_transport_client);
|
||||
bus_unregister(&ntb_transport_bus);
|
||||
debugfs_remove_recursive(nt_debugfs_dir);
|
||||
}
|
||||
module_exit(ntb_transport_exit);
|
||||
|
|
|
@ -265,6 +265,8 @@ static ssize_t perf_copy(struct pthr_ctx *pctx, char __iomem *dst,
|
|||
if (dma_submit_error(cookie))
|
||||
goto err_set_unmap;
|
||||
|
||||
dmaengine_unmap_put(unmap);
|
||||
|
||||
atomic_inc(&pctx->dma_sync);
|
||||
dma_async_issue_pending(chan);
|
||||
|
||||
|
|
|
@ -163,7 +163,7 @@ int reset_control_reset(struct reset_control *rstc)
|
|||
}
|
||||
|
||||
ret = rstc->rcdev->ops->reset(rstc->rcdev, rstc->id);
|
||||
if (rstc->shared && !ret)
|
||||
if (rstc->shared && ret)
|
||||
atomic_dec(&rstc->triggered_count);
|
||||
|
||||
return ret;
|
||||
|
|
|
@ -3323,4 +3323,4 @@ static int __init futex_init(void)
|
|||
|
||||
return 0;
|
||||
}
|
||||
__initcall(futex_init);
|
||||
core_initcall(futex_init);
|
||||
|
|
|
@ -1516,7 +1516,7 @@ static void call_console_drivers(int level,
|
|||
{
|
||||
struct console *con;
|
||||
|
||||
trace_console(text, len);
|
||||
trace_console_rcuidle(text, len);
|
||||
|
||||
if (!console_drivers)
|
||||
return;
|
||||
|
|
|
@ -347,17 +347,16 @@ static void tick_handle_periodic_broadcast(struct clock_event_device *dev)
|
|||
*
|
||||
* Called when the system enters a state where affected tick devices
|
||||
* might stop. Note: TICK_BROADCAST_FORCE cannot be undone.
|
||||
*
|
||||
* Called with interrupts disabled, so clockevents_lock is not
|
||||
* required here because the local clock event device cannot go away
|
||||
* under us.
|
||||
*/
|
||||
void tick_broadcast_control(enum tick_broadcast_mode mode)
|
||||
{
|
||||
struct clock_event_device *bc, *dev;
|
||||
struct tick_device *td;
|
||||
int cpu, bc_stopped;
|
||||
unsigned long flags;
|
||||
|
||||
/* Protects also the local clockevent device. */
|
||||
raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
|
||||
td = this_cpu_ptr(&tick_cpu_device);
|
||||
dev = td->evtdev;
|
||||
|
||||
|
@ -365,12 +364,11 @@ void tick_broadcast_control(enum tick_broadcast_mode mode)
|
|||
* Is the device not affected by the powerstate ?
|
||||
*/
|
||||
if (!dev || !(dev->features & CLOCK_EVT_FEAT_C3STOP))
|
||||
return;
|
||||
goto out;
|
||||
|
||||
if (!tick_device_is_functional(dev))
|
||||
return;
|
||||
goto out;
|
||||
|
||||
raw_spin_lock(&tick_broadcast_lock);
|
||||
cpu = smp_processor_id();
|
||||
bc = tick_broadcast_device.evtdev;
|
||||
bc_stopped = cpumask_empty(tick_broadcast_mask);
|
||||
|
@ -420,7 +418,8 @@ void tick_broadcast_control(enum tick_broadcast_mode mode)
|
|||
tick_broadcast_setup_oneshot(bc);
|
||||
}
|
||||
}
|
||||
raw_spin_unlock(&tick_broadcast_lock);
|
||||
out:
|
||||
raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(tick_broadcast_control);
|
||||
|
||||
|
|
|
@ -725,11 +725,6 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
|
|||
*/
|
||||
if (delta == 0) {
|
||||
tick_nohz_restart(ts, now);
|
||||
/*
|
||||
* Make sure next tick stop doesn't get fooled by past
|
||||
* clock deadline
|
||||
*/
|
||||
ts->next_tick = 0;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
@ -772,7 +767,7 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
|
|||
tick = expires;
|
||||
|
||||
/* Skip reprogram of event if its not changed */
|
||||
if (ts->tick_stopped && (expires == ts->next_tick))
|
||||
if (ts->tick_stopped && (expires == dev->next_event))
|
||||
goto out;
|
||||
|
||||
/*
|
||||
|
@ -792,8 +787,6 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
|
|||
trace_tick_stop(1, TICK_DEP_MASK_NONE);
|
||||
}
|
||||
|
||||
ts->next_tick = tick;
|
||||
|
||||
/*
|
||||
* If the expiration time == KTIME_MAX, then we simply stop
|
||||
* the tick timer.
|
||||
|
@ -809,10 +802,7 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
|
|||
else
|
||||
tick_program_event(tick, 1);
|
||||
out:
|
||||
/*
|
||||
* Update the estimated sleep length until the next timer
|
||||
* (not only the tick).
|
||||
*/
|
||||
/* Update the estimated sleep length */
|
||||
ts->sleep_length = ktime_sub(dev->next_event, now);
|
||||
return tick;
|
||||
}
|
||||
|
|
|
@ -27,7 +27,6 @@ enum tick_nohz_mode {
|
|||
* timer is modified for nohz sleeps. This is necessary
|
||||
* to resume the tick timer operation in the timeline
|
||||
* when the CPU returns from nohz sleep.
|
||||
* @next_tick: Next tick to be fired when in dynticks mode.
|
||||
* @tick_stopped: Indicator that the idle tick has been stopped
|
||||
* @idle_jiffies: jiffies at the entry to idle for idle time accounting
|
||||
* @idle_calls: Total number of idle calls
|
||||
|
@ -45,7 +44,6 @@ struct tick_sched {
|
|||
unsigned long check_clocks;
|
||||
enum tick_nohz_mode nohz_mode;
|
||||
ktime_t last_tick;
|
||||
ktime_t next_tick;
|
||||
int inidle;
|
||||
int tick_stopped;
|
||||
unsigned long idle_jiffies;
|
||||
|
|
|
@ -75,7 +75,7 @@ void tk_debug_account_sleep_time(struct timespec64 *t)
|
|||
int bin = min(fls(t->tv_sec), NUM_BINS-1);
|
||||
|
||||
sleep_time_bin[bin]++;
|
||||
pr_info("Suspended for %lld.%03lu seconds\n", (s64)t->tv_sec,
|
||||
t->tv_nsec / NSEC_PER_MSEC);
|
||||
printk_deferred(KERN_INFO "Suspended for %lld.%03lu seconds\n",
|
||||
(s64)t->tv_sec, t->tv_nsec / NSEC_PER_MSEC);
|
||||
}
|
||||
|
||||
|
|
|
@ -606,7 +606,8 @@ int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
|
|||
if (inet_csk(sk)->icsk_af_ops->conn_request(sk,
|
||||
skb) < 0)
|
||||
return 1;
|
||||
goto discard;
|
||||
consume_skb(skb);
|
||||
return 0;
|
||||
}
|
||||
if (dh->dccph_type == DCCP_PKT_RESET)
|
||||
goto discard;
|
||||
|
|
|
@ -1023,8 +1023,10 @@ static int ip6_dst_lookup_tail(struct net *net, const struct sock *sk,
|
|||
}
|
||||
#endif
|
||||
if (ipv6_addr_v4mapped(&fl6->saddr) &&
|
||||
!(ipv6_addr_v4mapped(&fl6->daddr) || ipv6_addr_any(&fl6->daddr)))
|
||||
return -EAFNOSUPPORT;
|
||||
!(ipv6_addr_v4mapped(&fl6->daddr) || ipv6_addr_any(&fl6->daddr))) {
|
||||
err = -EAFNOSUPPORT;
|
||||
goto out_err_release;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
|
|
|
@ -383,9 +383,6 @@ EXPORT_SYMBOL(hashbin_new);
|
|||
* for deallocating this structure if it's complex. If not the user can
|
||||
* just supply kfree, which should take care of the job.
|
||||
*/
|
||||
#ifdef CONFIG_LOCKDEP
|
||||
static int hashbin_lock_depth = 0;
|
||||
#endif
|
||||
int hashbin_delete( hashbin_t* hashbin, FREE_FUNC free_func)
|
||||
{
|
||||
irda_queue_t* queue;
|
||||
|
@ -396,22 +393,27 @@ int hashbin_delete( hashbin_t* hashbin, FREE_FUNC free_func)
|
|||
IRDA_ASSERT(hashbin->magic == HB_MAGIC, return -1;);
|
||||
|
||||
/* Synchronize */
|
||||
if ( hashbin->hb_type & HB_LOCK ) {
|
||||
spin_lock_irqsave_nested(&hashbin->hb_spinlock, flags,
|
||||
hashbin_lock_depth++);
|
||||
}
|
||||
if (hashbin->hb_type & HB_LOCK)
|
||||
spin_lock_irqsave(&hashbin->hb_spinlock, flags);
|
||||
|
||||
/*
|
||||
* Free the entries in the hashbin, TODO: use hashbin_clear when
|
||||
* it has been shown to work
|
||||
*/
|
||||
for (i = 0; i < HASHBIN_SIZE; i ++ ) {
|
||||
queue = dequeue_first((irda_queue_t**) &hashbin->hb_queue[i]);
|
||||
while (queue ) {
|
||||
if (free_func)
|
||||
(*free_func)(queue);
|
||||
queue = dequeue_first(
|
||||
(irda_queue_t**) &hashbin->hb_queue[i]);
|
||||
while (1) {
|
||||
queue = dequeue_first((irda_queue_t**) &hashbin->hb_queue[i]);
|
||||
|
||||
if (!queue)
|
||||
break;
|
||||
|
||||
if (free_func) {
|
||||
if (hashbin->hb_type & HB_LOCK)
|
||||
spin_unlock_irqrestore(&hashbin->hb_spinlock, flags);
|
||||
free_func(queue);
|
||||
if (hashbin->hb_type & HB_LOCK)
|
||||
spin_lock_irqsave(&hashbin->hb_spinlock, flags);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -420,12 +422,8 @@ int hashbin_delete( hashbin_t* hashbin, FREE_FUNC free_func)
|
|||
hashbin->magic = ~HB_MAGIC;
|
||||
|
||||
/* Release lock */
|
||||
if ( hashbin->hb_type & HB_LOCK) {
|
||||
if (hashbin->hb_type & HB_LOCK)
|
||||
spin_unlock_irqrestore(&hashbin->hb_spinlock, flags);
|
||||
#ifdef CONFIG_LOCKDEP
|
||||
hashbin_lock_depth--;
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
* Free the hashbin structure
|
||||
|
|
|
@ -1505,6 +1505,8 @@ static void __fanout_link(struct sock *sk, struct packet_sock *po)
|
|||
f->arr[f->num_members] = sk;
|
||||
smp_wmb();
|
||||
f->num_members++;
|
||||
if (f->num_members == 1)
|
||||
dev_add_pack(&f->prot_hook);
|
||||
spin_unlock(&f->lock);
|
||||
}
|
||||
|
||||
|
@ -1521,6 +1523,8 @@ static void __fanout_unlink(struct sock *sk, struct packet_sock *po)
|
|||
BUG_ON(i >= f->num_members);
|
||||
f->arr[i] = f->arr[f->num_members - 1];
|
||||
f->num_members--;
|
||||
if (f->num_members == 0)
|
||||
__dev_remove_pack(&f->prot_hook);
|
||||
spin_unlock(&f->lock);
|
||||
}
|
||||
|
||||
|
@ -1701,7 +1705,6 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
|
|||
match->prot_hook.func = packet_rcv_fanout;
|
||||
match->prot_hook.af_packet_priv = match;
|
||||
match->prot_hook.id_match = match_fanout_group;
|
||||
dev_add_pack(&match->prot_hook);
|
||||
list_add(&match->list, &fanout_list);
|
||||
}
|
||||
err = -EINVAL;
|
||||
|
@ -1726,7 +1729,12 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
|
|||
return err;
|
||||
}
|
||||
|
||||
static void fanout_release(struct sock *sk)
|
||||
/* If pkt_sk(sk)->fanout->sk_ref is zero, this function removes
|
||||
* pkt_sk(sk)->fanout from fanout_list and returns pkt_sk(sk)->fanout.
|
||||
* It is the responsibility of the caller to call fanout_release_data() and
|
||||
* free the returned packet_fanout (after synchronize_net())
|
||||
*/
|
||||
static struct packet_fanout *fanout_release(struct sock *sk)
|
||||
{
|
||||
struct packet_sock *po = pkt_sk(sk);
|
||||
struct packet_fanout *f;
|
||||
|
@ -1736,17 +1744,17 @@ static void fanout_release(struct sock *sk)
|
|||
if (f) {
|
||||
po->fanout = NULL;
|
||||
|
||||
if (atomic_dec_and_test(&f->sk_ref)) {
|
||||
if (atomic_dec_and_test(&f->sk_ref))
|
||||
list_del(&f->list);
|
||||
dev_remove_pack(&f->prot_hook);
|
||||
fanout_release_data(f);
|
||||
kfree(f);
|
||||
}
|
||||
else
|
||||
f = NULL;
|
||||
|
||||
if (po->rollover)
|
||||
kfree_rcu(po->rollover, rcu);
|
||||
}
|
||||
mutex_unlock(&fanout_mutex);
|
||||
|
||||
return f;
|
||||
}
|
||||
|
||||
static bool packet_extra_vlan_len_allowed(const struct net_device *dev,
|
||||
|
@ -2933,6 +2941,7 @@ static int packet_release(struct socket *sock)
|
|||
{
|
||||
struct sock *sk = sock->sk;
|
||||
struct packet_sock *po;
|
||||
struct packet_fanout *f;
|
||||
struct net *net;
|
||||
union tpacket_req_u req_u;
|
||||
|
||||
|
@ -2972,9 +2981,14 @@ static int packet_release(struct socket *sock)
|
|||
packet_set_ring(sk, &req_u, 1, 1);
|
||||
}
|
||||
|
||||
fanout_release(sk);
|
||||
f = fanout_release(sk);
|
||||
|
||||
synchronize_net();
|
||||
|
||||
if (f) {
|
||||
fanout_release_data(f);
|
||||
kfree(f);
|
||||
}
|
||||
/*
|
||||
* Now the socket is dead. No more input will appear.
|
||||
*/
|
||||
|
@ -3926,7 +3940,6 @@ static int packet_notifier(struct notifier_block *this,
|
|||
}
|
||||
if (msg == NETDEV_UNREGISTER) {
|
||||
packet_cached_dev_reset(po);
|
||||
fanout_release(sk);
|
||||
po->ifindex = -1;
|
||||
if (po->prot_hook.dev)
|
||||
dev_put(po->prot_hook.dev);
|
||||
|
|
Loading…
Reference in New Issue