mirror of https://gitee.com/openkylin/linux.git
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
This commit is contained in:
commit
18a4d8bf25
2
Makefile
2
Makefile
|
@ -2,7 +2,7 @@
|
|||
VERSION = 5
|
||||
PATCHLEVEL = 0
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc7
|
||||
EXTRAVERSION = -rc8
|
||||
NAME = Shy Crocodile
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
|
|
@ -212,10 +212,11 @@ K256:
|
|||
.global sha256_block_data_order
|
||||
.type sha256_block_data_order,%function
|
||||
sha256_block_data_order:
|
||||
.Lsha256_block_data_order:
|
||||
#if __ARM_ARCH__<7
|
||||
sub r3,pc,#8 @ sha256_block_data_order
|
||||
#else
|
||||
adr r3,sha256_block_data_order
|
||||
adr r3,.Lsha256_block_data_order
|
||||
#endif
|
||||
#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
|
||||
ldr r12,.LOPENSSL_armcap
|
||||
|
|
|
@ -93,10 +93,11 @@ K256:
|
|||
.global sha256_block_data_order
|
||||
.type sha256_block_data_order,%function
|
||||
sha256_block_data_order:
|
||||
.Lsha256_block_data_order:
|
||||
#if __ARM_ARCH__<7
|
||||
sub r3,pc,#8 @ sha256_block_data_order
|
||||
#else
|
||||
adr r3,sha256_block_data_order
|
||||
adr r3,.Lsha256_block_data_order
|
||||
#endif
|
||||
#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
|
||||
ldr r12,.LOPENSSL_armcap
|
||||
|
|
|
@ -274,10 +274,11 @@ WORD64(0x5fcb6fab,0x3ad6faec, 0x6c44198c,0x4a475817)
|
|||
.global sha512_block_data_order
|
||||
.type sha512_block_data_order,%function
|
||||
sha512_block_data_order:
|
||||
.Lsha512_block_data_order:
|
||||
#if __ARM_ARCH__<7
|
||||
sub r3,pc,#8 @ sha512_block_data_order
|
||||
#else
|
||||
adr r3,sha512_block_data_order
|
||||
adr r3,.Lsha512_block_data_order
|
||||
#endif
|
||||
#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
|
||||
ldr r12,.LOPENSSL_armcap
|
||||
|
|
|
@ -141,10 +141,11 @@ WORD64(0x5fcb6fab,0x3ad6faec, 0x6c44198c,0x4a475817)
|
|||
.global sha512_block_data_order
|
||||
.type sha512_block_data_order,%function
|
||||
sha512_block_data_order:
|
||||
.Lsha512_block_data_order:
|
||||
#if __ARM_ARCH__<7
|
||||
sub r3,pc,#8 @ sha512_block_data_order
|
||||
#else
|
||||
adr r3,sha512_block_data_order
|
||||
adr r3,.Lsha512_block_data_order
|
||||
#endif
|
||||
#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
|
||||
ldr r12,.LOPENSSL_armcap
|
||||
|
|
|
@ -158,8 +158,8 @@ ENTRY(hchacha_block_neon)
|
|||
mov w3, w2
|
||||
bl chacha_permute
|
||||
|
||||
st1 {v0.16b}, [x1], #16
|
||||
st1 {v3.16b}, [x1]
|
||||
st1 {v0.4s}, [x1], #16
|
||||
st1 {v3.4s}, [x1]
|
||||
|
||||
ldp x29, x30, [sp], #16
|
||||
ret
|
||||
|
@ -532,6 +532,10 @@ ENTRY(chacha_4block_xor_neon)
|
|||
add v3.4s, v3.4s, v19.4s
|
||||
add a2, a2, w8
|
||||
add a3, a3, w9
|
||||
CPU_BE( rev a0, a0 )
|
||||
CPU_BE( rev a1, a1 )
|
||||
CPU_BE( rev a2, a2 )
|
||||
CPU_BE( rev a3, a3 )
|
||||
|
||||
ld4r {v24.4s-v27.4s}, [x0], #16
|
||||
ld4r {v28.4s-v31.4s}, [x0]
|
||||
|
@ -552,6 +556,10 @@ ENTRY(chacha_4block_xor_neon)
|
|||
add v7.4s, v7.4s, v23.4s
|
||||
add a6, a6, w8
|
||||
add a7, a7, w9
|
||||
CPU_BE( rev a4, a4 )
|
||||
CPU_BE( rev a5, a5 )
|
||||
CPU_BE( rev a6, a6 )
|
||||
CPU_BE( rev a7, a7 )
|
||||
|
||||
// x8[0-3] += s2[0]
|
||||
// x9[0-3] += s2[1]
|
||||
|
@ -569,6 +577,10 @@ ENTRY(chacha_4block_xor_neon)
|
|||
add v11.4s, v11.4s, v27.4s
|
||||
add a10, a10, w8
|
||||
add a11, a11, w9
|
||||
CPU_BE( rev a8, a8 )
|
||||
CPU_BE( rev a9, a9 )
|
||||
CPU_BE( rev a10, a10 )
|
||||
CPU_BE( rev a11, a11 )
|
||||
|
||||
// x12[0-3] += s3[0]
|
||||
// x13[0-3] += s3[1]
|
||||
|
@ -586,6 +598,10 @@ ENTRY(chacha_4block_xor_neon)
|
|||
add v15.4s, v15.4s, v31.4s
|
||||
add a14, a14, w8
|
||||
add a15, a15, w9
|
||||
CPU_BE( rev a12, a12 )
|
||||
CPU_BE( rev a13, a13 )
|
||||
CPU_BE( rev a14, a14 )
|
||||
CPU_BE( rev a15, a15 )
|
||||
|
||||
// interleave 32-bit words in state n, n+1
|
||||
ldp w6, w7, [x2], #64
|
||||
|
|
|
@ -70,6 +70,8 @@ static struct platform_device bcm63xx_enet_shared_device = {
|
|||
|
||||
static int shared_device_registered;
|
||||
|
||||
static u64 enet_dmamask = DMA_BIT_MASK(32);
|
||||
|
||||
static struct resource enet0_res[] = {
|
||||
{
|
||||
.start = -1, /* filled at runtime */
|
||||
|
@ -99,6 +101,8 @@ static struct platform_device bcm63xx_enet0_device = {
|
|||
.resource = enet0_res,
|
||||
.dev = {
|
||||
.platform_data = &enet0_pd,
|
||||
.dma_mask = &enet_dmamask,
|
||||
.coherent_dma_mask = DMA_BIT_MASK(32),
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -131,6 +135,8 @@ static struct platform_device bcm63xx_enet1_device = {
|
|||
.resource = enet1_res,
|
||||
.dev = {
|
||||
.platform_data = &enet1_pd,
|
||||
.dma_mask = &enet_dmamask,
|
||||
.coherent_dma_mask = DMA_BIT_MASK(32),
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -157,6 +163,8 @@ static struct platform_device bcm63xx_enetsw_device = {
|
|||
.resource = enetsw_res,
|
||||
.dev = {
|
||||
.platform_data = &enetsw_pd,
|
||||
.dma_mask = &enet_dmamask,
|
||||
.coherent_dma_mask = DMA_BIT_MASK(32),
|
||||
},
|
||||
};
|
||||
|
||||
|
|
|
@ -54,10 +54,9 @@ unsigned long __xchg_small(volatile void *ptr, unsigned long val, unsigned int s
|
|||
unsigned long __cmpxchg_small(volatile void *ptr, unsigned long old,
|
||||
unsigned long new, unsigned int size)
|
||||
{
|
||||
u32 mask, old32, new32, load32;
|
||||
u32 mask, old32, new32, load32, load;
|
||||
volatile u32 *ptr32;
|
||||
unsigned int shift;
|
||||
u8 load;
|
||||
|
||||
/* Check that ptr is naturally aligned */
|
||||
WARN_ON((unsigned long)ptr & (size - 1));
|
||||
|
|
|
@ -384,7 +384,8 @@ static void __init bootmem_init(void)
|
|||
init_initrd();
|
||||
reserved_end = (unsigned long) PFN_UP(__pa_symbol(&_end));
|
||||
|
||||
memblock_reserve(PHYS_OFFSET, reserved_end << PAGE_SHIFT);
|
||||
memblock_reserve(PHYS_OFFSET,
|
||||
(reserved_end << PAGE_SHIFT) - PHYS_OFFSET);
|
||||
|
||||
/*
|
||||
* max_low_pfn is not a number of pages. The number of pages
|
||||
|
|
|
@ -31,8 +31,8 @@ static int vmmc_probe(struct platform_device *pdev)
|
|||
dma_addr_t dma;
|
||||
|
||||
cp1_base =
|
||||
(void *) CPHYSADDR(dma_alloc_coherent(NULL, CP1_SIZE,
|
||||
&dma, GFP_ATOMIC));
|
||||
(void *) CPHYSADDR(dma_alloc_coherent(&pdev->dev, CP1_SIZE,
|
||||
&dma, GFP_KERNEL));
|
||||
|
||||
gpio_count = of_gpio_count(pdev->dev.of_node);
|
||||
while (gpio_count > 0) {
|
||||
|
|
|
@ -841,7 +841,7 @@ union hv_gpa_page_range {
|
|||
* count is equal with how many entries of union hv_gpa_page_range can
|
||||
* be populated into the input parameter page.
|
||||
*/
|
||||
#define HV_MAX_FLUSH_REP_COUNT (PAGE_SIZE - 2 * sizeof(u64) / \
|
||||
#define HV_MAX_FLUSH_REP_COUNT ((PAGE_SIZE - 2 * sizeof(u64)) / \
|
||||
sizeof(union hv_gpa_page_range))
|
||||
|
||||
struct hv_guest_mapping_flush_list {
|
||||
|
|
|
@ -284,7 +284,7 @@ do { \
|
|||
__put_user_goto(x, ptr, "l", "k", "ir", label); \
|
||||
break; \
|
||||
case 8: \
|
||||
__put_user_goto_u64((__typeof__(*ptr))(x), ptr, label); \
|
||||
__put_user_goto_u64(x, ptr, label); \
|
||||
break; \
|
||||
default: \
|
||||
__put_user_bad(); \
|
||||
|
@ -431,8 +431,10 @@ do { \
|
|||
({ \
|
||||
__label__ __pu_label; \
|
||||
int __pu_err = -EFAULT; \
|
||||
__typeof__(*(ptr)) __pu_val; \
|
||||
__pu_val = x; \
|
||||
__uaccess_begin(); \
|
||||
__put_user_size((x), (ptr), (size), __pu_label); \
|
||||
__put_user_size(__pu_val, (ptr), (size), __pu_label); \
|
||||
__pu_err = 0; \
|
||||
__pu_label: \
|
||||
__uaccess_end(); \
|
||||
|
|
|
@ -117,67 +117,11 @@ __visible bool ex_handler_fprestore(const struct exception_table_entry *fixup,
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(ex_handler_fprestore);
|
||||
|
||||
/* Helper to check whether a uaccess fault indicates a kernel bug. */
|
||||
static bool bogus_uaccess(struct pt_regs *regs, int trapnr,
|
||||
unsigned long fault_addr)
|
||||
{
|
||||
/* This is the normal case: #PF with a fault address in userspace. */
|
||||
if (trapnr == X86_TRAP_PF && fault_addr < TASK_SIZE_MAX)
|
||||
return false;
|
||||
|
||||
/*
|
||||
* This code can be reached for machine checks, but only if the #MC
|
||||
* handler has already decided that it looks like a candidate for fixup.
|
||||
* This e.g. happens when attempting to access userspace memory which
|
||||
* the CPU can't access because of uncorrectable bad memory.
|
||||
*/
|
||||
if (trapnr == X86_TRAP_MC)
|
||||
return false;
|
||||
|
||||
/*
|
||||
* There are two remaining exception types we might encounter here:
|
||||
* - #PF for faulting accesses to kernel addresses
|
||||
* - #GP for faulting accesses to noncanonical addresses
|
||||
* Complain about anything else.
|
||||
*/
|
||||
if (trapnr != X86_TRAP_PF && trapnr != X86_TRAP_GP) {
|
||||
WARN(1, "unexpected trap %d in uaccess\n", trapnr);
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* This is a faulting memory access in kernel space, on a kernel
|
||||
* address, in a usercopy function. This can e.g. be caused by improper
|
||||
* use of helpers like __put_user and by improper attempts to access
|
||||
* userspace addresses in KERNEL_DS regions.
|
||||
* The one (semi-)legitimate exception are probe_kernel_{read,write}(),
|
||||
* which can be invoked from places like kgdb, /dev/mem (for reading)
|
||||
* and privileged BPF code (for reading).
|
||||
* The probe_kernel_*() functions set the kernel_uaccess_faults_ok flag
|
||||
* to tell us that faulting on kernel addresses, and even noncanonical
|
||||
* addresses, in a userspace accessor does not necessarily imply a
|
||||
* kernel bug, root might just be doing weird stuff.
|
||||
*/
|
||||
if (current->kernel_uaccess_faults_ok)
|
||||
return false;
|
||||
|
||||
/* This is bad. Refuse the fixup so that we go into die(). */
|
||||
if (trapnr == X86_TRAP_PF) {
|
||||
pr_emerg("BUG: pagefault on kernel address 0x%lx in non-whitelisted uaccess\n",
|
||||
fault_addr);
|
||||
} else {
|
||||
pr_emerg("BUG: GPF in non-whitelisted uaccess (non-canonical address?)\n");
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
__visible bool ex_handler_uaccess(const struct exception_table_entry *fixup,
|
||||
struct pt_regs *regs, int trapnr,
|
||||
unsigned long error_code,
|
||||
unsigned long fault_addr)
|
||||
{
|
||||
if (bogus_uaccess(regs, trapnr, fault_addr))
|
||||
return false;
|
||||
regs->ip = ex_fixup_addr(fixup);
|
||||
return true;
|
||||
}
|
||||
|
@ -188,8 +132,6 @@ __visible bool ex_handler_ext(const struct exception_table_entry *fixup,
|
|||
unsigned long error_code,
|
||||
unsigned long fault_addr)
|
||||
{
|
||||
if (bogus_uaccess(regs, trapnr, fault_addr))
|
||||
return false;
|
||||
/* Special hack for uaccess_err */
|
||||
current->thread.uaccess_err = 1;
|
||||
regs->ip = ex_fixup_addr(fixup);
|
||||
|
|
|
@ -30,7 +30,7 @@ static inline int cc_pm_init(struct cc_drvdata *drvdata)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void cc_pm_go(struct cc_drvdata *drvdata) {}
|
||||
static inline void cc_pm_go(struct cc_drvdata *drvdata) {}
|
||||
|
||||
static inline void cc_pm_fini(struct cc_drvdata *drvdata) {}
|
||||
|
||||
|
|
|
@ -405,6 +405,7 @@ struct amdgpu_crtc {
|
|||
struct amdgpu_flip_work *pflip_works;
|
||||
enum amdgpu_flip_status pflip_status;
|
||||
int deferred_flip_completion;
|
||||
u64 last_flip_vblank;
|
||||
/* pll sharing */
|
||||
struct amdgpu_atom_ss ss;
|
||||
bool ss_enabled;
|
||||
|
|
|
@ -303,12 +303,11 @@ static void dm_pflip_high_irq(void *interrupt_params)
|
|||
return;
|
||||
}
|
||||
|
||||
/* Update to correct count(s) if racing with vblank irq */
|
||||
amdgpu_crtc->last_flip_vblank = drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
|
||||
|
||||
/* wake up userspace */
|
||||
if (amdgpu_crtc->event) {
|
||||
/* Update to correct count(s) if racing with vblank irq */
|
||||
drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
|
||||
|
||||
drm_crtc_send_vblank_event(&amdgpu_crtc->base, amdgpu_crtc->event);
|
||||
|
||||
/* page flip completed. clean up */
|
||||
|
@ -4828,6 +4827,8 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
|
|||
to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
|
||||
int planes_count = 0;
|
||||
unsigned long flags;
|
||||
u64 last_flip_vblank;
|
||||
bool vrr_active = acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE;
|
||||
|
||||
/* update planes when needed */
|
||||
for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
|
||||
|
@ -4859,6 +4860,16 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
|
|||
/* In commit tail framework this cannot happen */
|
||||
WARN_ON(1);
|
||||
}
|
||||
|
||||
/* For variable refresh rate mode only:
|
||||
* Get vblank of last completed flip to avoid > 1 vrr flips per
|
||||
* video frame by use of throttling, but allow flip programming
|
||||
* anywhere in the possibly large variable vrr vblank interval
|
||||
* for fine-grained flip timing control and more opportunity to
|
||||
* avoid stutter on late submission of amdgpu_dm_do_flip() calls.
|
||||
*/
|
||||
last_flip_vblank = acrtc_attach->last_flip_vblank;
|
||||
|
||||
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
|
||||
|
||||
if (!pflip_needed || plane->type == DRM_PLANE_TYPE_OVERLAY) {
|
||||
|
@ -4882,10 +4893,18 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
|
|||
if (plane->type == DRM_PLANE_TYPE_PRIMARY)
|
||||
drm_crtc_vblank_get(crtc);
|
||||
|
||||
/* Use old throttling in non-vrr fixed refresh rate mode
|
||||
* to keep flip scheduling based on target vblank counts
|
||||
* working in a backwards compatible way, e.g., clients
|
||||
* using GLX_OML_sync_control extension.
|
||||
*/
|
||||
if (!vrr_active)
|
||||
last_flip_vblank = drm_crtc_vblank_count(crtc);
|
||||
|
||||
amdgpu_dm_do_flip(
|
||||
crtc,
|
||||
fb,
|
||||
(uint32_t)drm_crtc_vblank_count(crtc) + *wait_for_vblank,
|
||||
(uint32_t) last_flip_vblank + *wait_for_vblank,
|
||||
dc_state);
|
||||
}
|
||||
|
||||
|
|
|
@ -154,6 +154,10 @@ static int bochs_pci_probe(struct pci_dev *pdev,
|
|||
if (IS_ERR(dev))
|
||||
return PTR_ERR(dev);
|
||||
|
||||
ret = pci_enable_device(pdev);
|
||||
if (ret)
|
||||
goto err_free_dev;
|
||||
|
||||
dev->pdev = pdev;
|
||||
pci_set_drvdata(pdev, dev);
|
||||
|
||||
|
|
|
@ -1602,6 +1602,15 @@ int drm_atomic_helper_async_check(struct drm_device *dev,
|
|||
old_plane_state->crtc != new_plane_state->crtc)
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* FIXME: Since prepare_fb and cleanup_fb are always called on
|
||||
* the new_plane_state for async updates we need to block framebuffer
|
||||
* changes. This prevents use of a fb that's been cleaned up and
|
||||
* double cleanups from occuring.
|
||||
*/
|
||||
if (old_plane_state->fb != new_plane_state->fb)
|
||||
return -EINVAL;
|
||||
|
||||
funcs = plane->helper_private;
|
||||
if (!funcs->atomic_async_update)
|
||||
return -EINVAL;
|
||||
|
|
|
@ -144,7 +144,7 @@ dmar_alloc_pci_notify_info(struct pci_dev *dev, unsigned long event)
|
|||
for (tmp = dev; tmp; tmp = tmp->bus->self)
|
||||
level++;
|
||||
|
||||
size = sizeof(*info) + level * sizeof(struct acpi_dmar_pci_path);
|
||||
size = sizeof(*info) + level * sizeof(info->path[0]);
|
||||
if (size <= sizeof(dmar_pci_notify_info_buf)) {
|
||||
info = (struct dmar_pci_notify_info *)dmar_pci_notify_info_buf;
|
||||
} else {
|
||||
|
|
|
@ -2380,12 +2380,6 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
|
|||
snprintf(md->disk->disk_name, sizeof(md->disk->disk_name),
|
||||
"mmcblk%u%s", card->host->index, subname ? subname : "");
|
||||
|
||||
if (mmc_card_mmc(card))
|
||||
blk_queue_logical_block_size(md->queue.queue,
|
||||
card->ext_csd.data_sector_size);
|
||||
else
|
||||
blk_queue_logical_block_size(md->queue.queue, 512);
|
||||
|
||||
set_capacity(md->disk, size);
|
||||
|
||||
if (mmc_host_cmd23(card->host)) {
|
||||
|
|
|
@ -95,7 +95,7 @@ static void mmc_should_fail_request(struct mmc_host *host,
|
|||
if (!data)
|
||||
return;
|
||||
|
||||
if (cmd->error || data->error ||
|
||||
if ((cmd && cmd->error) || data->error ||
|
||||
!should_fail(&host->fail_mmc_request, data->blksz * data->blocks))
|
||||
return;
|
||||
|
||||
|
|
|
@ -355,6 +355,7 @@ static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card)
|
|||
{
|
||||
struct mmc_host *host = card->host;
|
||||
u64 limit = BLK_BOUNCE_HIGH;
|
||||
unsigned block_size = 512;
|
||||
|
||||
if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
|
||||
limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT;
|
||||
|
@ -368,7 +369,13 @@ static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card)
|
|||
blk_queue_max_hw_sectors(mq->queue,
|
||||
min(host->max_blk_count, host->max_req_size / 512));
|
||||
blk_queue_max_segments(mq->queue, host->max_segs);
|
||||
blk_queue_max_segment_size(mq->queue, host->max_seg_size);
|
||||
|
||||
if (mmc_card_mmc(card))
|
||||
block_size = card->ext_csd.data_sector_size;
|
||||
|
||||
blk_queue_logical_block_size(mq->queue, block_size);
|
||||
blk_queue_max_segment_size(mq->queue,
|
||||
round_down(host->max_seg_size, block_size));
|
||||
|
||||
INIT_WORK(&mq->recovery_work, mmc_mq_recovery_handler);
|
||||
INIT_WORK(&mq->complete_work, mmc_blk_mq_complete_work);
|
||||
|
|
|
@ -201,7 +201,7 @@ static int cqhci_host_alloc_tdl(struct cqhci_host *cq_host)
|
|||
cq_host->desc_size = cq_host->slot_sz * cq_host->num_slots;
|
||||
|
||||
cq_host->data_size = cq_host->trans_desc_len * cq_host->mmc->max_segs *
|
||||
(cq_host->num_slots - 1);
|
||||
cq_host->mmc->cqe_qdepth;
|
||||
|
||||
pr_debug("%s: cqhci: desc_size: %zu data_sz: %zu slot-sz: %d\n",
|
||||
mmc_hostname(cq_host->mmc), cq_host->desc_size, cq_host->data_size,
|
||||
|
@ -217,12 +217,21 @@ static int cqhci_host_alloc_tdl(struct cqhci_host *cq_host)
|
|||
cq_host->desc_size,
|
||||
&cq_host->desc_dma_base,
|
||||
GFP_KERNEL);
|
||||
if (!cq_host->desc_base)
|
||||
return -ENOMEM;
|
||||
|
||||
cq_host->trans_desc_base = dmam_alloc_coherent(mmc_dev(cq_host->mmc),
|
||||
cq_host->data_size,
|
||||
&cq_host->trans_desc_dma_base,
|
||||
GFP_KERNEL);
|
||||
if (!cq_host->desc_base || !cq_host->trans_desc_base)
|
||||
if (!cq_host->trans_desc_base) {
|
||||
dmam_free_coherent(mmc_dev(cq_host->mmc), cq_host->desc_size,
|
||||
cq_host->desc_base,
|
||||
cq_host->desc_dma_base);
|
||||
cq_host->desc_base = NULL;
|
||||
cq_host->desc_dma_base = 0;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
pr_debug("%s: cqhci: desc-base: 0x%p trans-base: 0x%p\n desc_dma 0x%llx trans_dma: 0x%llx\n",
|
||||
mmc_hostname(cq_host->mmc), cq_host->desc_base, cq_host->trans_desc_base,
|
||||
|
|
|
@ -1450,6 +1450,7 @@ static int mmc_spi_probe(struct spi_device *spi)
|
|||
mmc->caps &= ~MMC_CAP_NEEDS_POLL;
|
||||
mmc_gpiod_request_cd_irq(mmc);
|
||||
}
|
||||
mmc_detect_change(mmc, 0);
|
||||
|
||||
/* Index 1 is write protect/read only */
|
||||
status = mmc_gpiod_request_ro(mmc, NULL, 1, false, 0, NULL);
|
||||
|
|
|
@ -65,6 +65,7 @@ static const struct renesas_sdhi_of_data of_rcar_gen2_compatible = {
|
|||
.scc_offset = 0x0300,
|
||||
.taps = rcar_gen2_scc_taps,
|
||||
.taps_num = ARRAY_SIZE(rcar_gen2_scc_taps),
|
||||
.max_blk_count = 0xffffffff,
|
||||
};
|
||||
|
||||
/* Definitions for sampling clocks */
|
||||
|
|
|
@ -1095,11 +1095,12 @@ static void sdhci_esdhc_imx_hwinit(struct sdhci_host *host)
|
|||
writel(readl(host->ioaddr + SDHCI_HOST_CONTROL)
|
||||
| ESDHC_BURST_LEN_EN_INCR,
|
||||
host->ioaddr + SDHCI_HOST_CONTROL);
|
||||
|
||||
/*
|
||||
* erratum ESDHC_FLAG_ERR004536 fix for MX6Q TO1.2 and MX6DL
|
||||
* TO1.1, it's harmless for MX6SL
|
||||
*/
|
||||
writel(readl(host->ioaddr + 0x6c) | BIT(7),
|
||||
* erratum ESDHC_FLAG_ERR004536 fix for MX6Q TO1.2 and MX6DL
|
||||
* TO1.1, it's harmless for MX6SL
|
||||
*/
|
||||
writel(readl(host->ioaddr + 0x6c) & ~BIT(7),
|
||||
host->ioaddr + 0x6c);
|
||||
|
||||
/* disable DLL_CTRL delay line settings */
|
||||
|
|
|
@ -277,6 +277,11 @@ static inline void sd_ctrl_write32_as_16_and_16(struct tmio_mmc_host *host,
|
|||
iowrite16(val >> 16, host->ctl + ((addr + 2) << host->bus_shift));
|
||||
}
|
||||
|
||||
static inline void sd_ctrl_write32(struct tmio_mmc_host *host, int addr, u32 val)
|
||||
{
|
||||
iowrite32(val, host->ctl + (addr << host->bus_shift));
|
||||
}
|
||||
|
||||
static inline void sd_ctrl_write32_rep(struct tmio_mmc_host *host, int addr,
|
||||
const u32 *buf, int count)
|
||||
{
|
||||
|
|
|
@ -43,6 +43,7 @@
|
|||
#include <linux/regulator/consumer.h>
|
||||
#include <linux/mmc/sdio.h>
|
||||
#include <linux/scatterlist.h>
|
||||
#include <linux/sizes.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/swiotlb.h>
|
||||
#include <linux/workqueue.h>
|
||||
|
@ -629,7 +630,7 @@ static bool __tmio_mmc_sdcard_irq(struct tmio_mmc_host *host, int ireg,
|
|||
return false;
|
||||
}
|
||||
|
||||
static void __tmio_mmc_sdio_irq(struct tmio_mmc_host *host)
|
||||
static bool __tmio_mmc_sdio_irq(struct tmio_mmc_host *host)
|
||||
{
|
||||
struct mmc_host *mmc = host->mmc;
|
||||
struct tmio_mmc_data *pdata = host->pdata;
|
||||
|
@ -637,7 +638,7 @@ static void __tmio_mmc_sdio_irq(struct tmio_mmc_host *host)
|
|||
unsigned int sdio_status;
|
||||
|
||||
if (!(pdata->flags & TMIO_MMC_SDIO_IRQ))
|
||||
return;
|
||||
return false;
|
||||
|
||||
status = sd_ctrl_read16(host, CTL_SDIO_STATUS);
|
||||
ireg = status & TMIO_SDIO_MASK_ALL & ~host->sdio_irq_mask;
|
||||
|
@ -650,6 +651,8 @@ static void __tmio_mmc_sdio_irq(struct tmio_mmc_host *host)
|
|||
|
||||
if (mmc->caps & MMC_CAP_SDIO_IRQ && ireg & TMIO_SDIO_STAT_IOIRQ)
|
||||
mmc_signal_sdio_irq(mmc);
|
||||
|
||||
return ireg;
|
||||
}
|
||||
|
||||
irqreturn_t tmio_mmc_irq(int irq, void *devid)
|
||||
|
@ -668,9 +671,10 @@ irqreturn_t tmio_mmc_irq(int irq, void *devid)
|
|||
if (__tmio_mmc_sdcard_irq(host, ireg, status))
|
||||
return IRQ_HANDLED;
|
||||
|
||||
__tmio_mmc_sdio_irq(host);
|
||||
if (__tmio_mmc_sdio_irq(host))
|
||||
return IRQ_HANDLED;
|
||||
|
||||
return IRQ_HANDLED;
|
||||
return IRQ_NONE;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(tmio_mmc_irq);
|
||||
|
||||
|
@ -700,7 +704,10 @@ static int tmio_mmc_start_data(struct tmio_mmc_host *host,
|
|||
|
||||
/* Set transfer length / blocksize */
|
||||
sd_ctrl_write16(host, CTL_SD_XFER_LEN, data->blksz);
|
||||
sd_ctrl_write16(host, CTL_XFER_BLK_COUNT, data->blocks);
|
||||
if (host->mmc->max_blk_count >= SZ_64K)
|
||||
sd_ctrl_write32(host, CTL_XFER_BLK_COUNT, data->blocks);
|
||||
else
|
||||
sd_ctrl_write16(host, CTL_XFER_BLK_COUNT, data->blocks);
|
||||
|
||||
tmio_mmc_start_dma(host, data);
|
||||
|
||||
|
|
|
@ -4247,7 +4247,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
|
|||
.name = "Marvell 88E6190",
|
||||
.num_databases = 4096,
|
||||
.num_ports = 11, /* 10 + Z80 */
|
||||
.num_internal_phys = 11,
|
||||
.num_internal_phys = 9,
|
||||
.num_gpio = 16,
|
||||
.max_vid = 8191,
|
||||
.port_base_addr = 0x0,
|
||||
|
@ -4270,7 +4270,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
|
|||
.name = "Marvell 88E6190X",
|
||||
.num_databases = 4096,
|
||||
.num_ports = 11, /* 10 + Z80 */
|
||||
.num_internal_phys = 11,
|
||||
.num_internal_phys = 9,
|
||||
.num_gpio = 16,
|
||||
.max_vid = 8191,
|
||||
.port_base_addr = 0x0,
|
||||
|
@ -4293,7 +4293,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
|
|||
.name = "Marvell 88E6191",
|
||||
.num_databases = 4096,
|
||||
.num_ports = 11, /* 10 + Z80 */
|
||||
.num_internal_phys = 11,
|
||||
.num_internal_phys = 9,
|
||||
.max_vid = 8191,
|
||||
.port_base_addr = 0x0,
|
||||
.phy_base_addr = 0x0,
|
||||
|
@ -4340,7 +4340,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
|
|||
.name = "Marvell 88E6290",
|
||||
.num_databases = 4096,
|
||||
.num_ports = 11, /* 10 + Z80 */
|
||||
.num_internal_phys = 11,
|
||||
.num_internal_phys = 9,
|
||||
.num_gpio = 16,
|
||||
.max_vid = 8191,
|
||||
.port_base_addr = 0x0,
|
||||
|
@ -4502,7 +4502,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
|
|||
.name = "Marvell 88E6390",
|
||||
.num_databases = 4096,
|
||||
.num_ports = 11, /* 10 + Z80 */
|
||||
.num_internal_phys = 11,
|
||||
.num_internal_phys = 9,
|
||||
.num_gpio = 16,
|
||||
.max_vid = 8191,
|
||||
.port_base_addr = 0x0,
|
||||
|
@ -4525,7 +4525,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
|
|||
.name = "Marvell 88E6390X",
|
||||
.num_databases = 4096,
|
||||
.num_ports = 11, /* 10 + Z80 */
|
||||
.num_internal_phys = 11,
|
||||
.num_internal_phys = 9,
|
||||
.num_gpio = 16,
|
||||
.max_vid = 8191,
|
||||
.port_base_addr = 0x0,
|
||||
|
|
|
@ -190,7 +190,7 @@ int mv88e6xxx_port_set_duplex(struct mv88e6xxx_chip *chip, int port, int dup)
|
|||
/* normal duplex detection */
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
err = mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_MAC_CTL, reg);
|
||||
|
|
|
@ -267,6 +267,7 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
|
|||
struct phy *serdes;
|
||||
void __iomem *regs;
|
||||
char res_name[8];
|
||||
int phy_mode;
|
||||
u32 port;
|
||||
|
||||
if (of_property_read_u32(portnp, "reg", &port))
|
||||
|
@ -292,11 +293,11 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
|
|||
if (err)
|
||||
return err;
|
||||
|
||||
err = of_get_phy_mode(portnp);
|
||||
if (err < 0)
|
||||
phy_mode = of_get_phy_mode(portnp);
|
||||
if (phy_mode < 0)
|
||||
ocelot->ports[port]->phy_mode = PHY_INTERFACE_MODE_NA;
|
||||
else
|
||||
ocelot->ports[port]->phy_mode = err;
|
||||
ocelot->ports[port]->phy_mode = phy_mode;
|
||||
|
||||
switch (ocelot->ports[port]->phy_mode) {
|
||||
case PHY_INTERFACE_MODE_NA:
|
||||
|
@ -304,6 +305,13 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
|
|||
case PHY_INTERFACE_MODE_SGMII:
|
||||
break;
|
||||
case PHY_INTERFACE_MODE_QSGMII:
|
||||
/* Ensure clock signals and speed is set on all
|
||||
* QSGMII links
|
||||
*/
|
||||
ocelot_port_writel(ocelot->ports[port],
|
||||
DEV_CLOCK_CFG_LINK_SPEED
|
||||
(OCELOT_SPEED_1000),
|
||||
DEV_CLOCK_CFG);
|
||||
break;
|
||||
default:
|
||||
dev_err(ocelot->dev,
|
||||
|
|
|
@ -976,6 +976,13 @@ static const struct usb_device_id products[] = {
|
|||
0xff),
|
||||
.driver_info = (unsigned long)&qmi_wwan_info_quirk_dtr,
|
||||
},
|
||||
{ /* Quectel EG12/EM12 */
|
||||
USB_DEVICE_AND_INTERFACE_INFO(0x2c7c, 0x0512,
|
||||
USB_CLASS_VENDOR_SPEC,
|
||||
USB_SUBCLASS_VENDOR_SPEC,
|
||||
0xff),
|
||||
.driver_info = (unsigned long)&qmi_wwan_info_quirk_dtr,
|
||||
},
|
||||
|
||||
/* 3. Combined interface devices matching on interface number */
|
||||
{QMI_FIXED_INTF(0x0408, 0xea42, 4)}, /* Yota / Megafon M100-1 */
|
||||
|
@ -1343,17 +1350,20 @@ static bool quectel_ec20_detected(struct usb_interface *intf)
|
|||
return false;
|
||||
}
|
||||
|
||||
static bool quectel_ep06_diag_detected(struct usb_interface *intf)
|
||||
static bool quectel_diag_detected(struct usb_interface *intf)
|
||||
{
|
||||
struct usb_device *dev = interface_to_usbdev(intf);
|
||||
struct usb_interface_descriptor intf_desc = intf->cur_altsetting->desc;
|
||||
u16 id_vendor = le16_to_cpu(dev->descriptor.idVendor);
|
||||
u16 id_product = le16_to_cpu(dev->descriptor.idProduct);
|
||||
|
||||
if (le16_to_cpu(dev->descriptor.idVendor) == 0x2c7c &&
|
||||
le16_to_cpu(dev->descriptor.idProduct) == 0x0306 &&
|
||||
intf_desc.bNumEndpoints == 2)
|
||||
if (id_vendor != 0x2c7c || intf_desc.bNumEndpoints != 2)
|
||||
return false;
|
||||
|
||||
if (id_product == 0x0306 || id_product == 0x0512)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
else
|
||||
return false;
|
||||
}
|
||||
|
||||
static int qmi_wwan_probe(struct usb_interface *intf,
|
||||
|
@ -1390,13 +1400,13 @@ static int qmi_wwan_probe(struct usb_interface *intf,
|
|||
return -ENODEV;
|
||||
}
|
||||
|
||||
/* Quectel EP06/EM06/EG06 supports dynamic interface configuration, so
|
||||
/* Several Quectel modems supports dynamic interface configuration, so
|
||||
* we need to match on class/subclass/protocol. These values are
|
||||
* identical for the diagnostic- and QMI-interface, but bNumEndpoints is
|
||||
* different. Ignore the current interface if the number of endpoints
|
||||
* the number for the diag interface (two).
|
||||
*/
|
||||
if (quectel_ep06_diag_detected(intf))
|
||||
if (quectel_diag_detected(intf))
|
||||
return -ENODEV;
|
||||
|
||||
return usbnet_probe(intf, id);
|
||||
|
|
|
@ -2009,7 +2009,7 @@ static int twa_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
|
|||
struct Scsi_Host *host = NULL;
|
||||
TW_Device_Extension *tw_dev;
|
||||
unsigned long mem_addr, mem_len;
|
||||
int retval = -ENODEV;
|
||||
int retval;
|
||||
|
||||
retval = pci_enable_device(pdev);
|
||||
if (retval) {
|
||||
|
@ -2020,8 +2020,10 @@ static int twa_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
|
|||
pci_set_master(pdev);
|
||||
pci_try_set_mwi(pdev);
|
||||
|
||||
if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) ||
|
||||
dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) {
|
||||
retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
|
||||
if (retval)
|
||||
retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
|
||||
if (retval) {
|
||||
TW_PRINTK(host, TW_DRIVER, 0x23, "Failed to set dma mask");
|
||||
retval = -ENODEV;
|
||||
goto out_disable_device;
|
||||
|
@ -2240,8 +2242,10 @@ static int twa_resume(struct pci_dev *pdev)
|
|||
pci_set_master(pdev);
|
||||
pci_try_set_mwi(pdev);
|
||||
|
||||
if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) ||
|
||||
dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) {
|
||||
retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
|
||||
if (retval)
|
||||
retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
|
||||
if (retval) {
|
||||
TW_PRINTK(host, TW_DRIVER, 0x40, "Failed to set dma mask during resume");
|
||||
retval = -ENODEV;
|
||||
goto out_disable_device;
|
||||
|
|
|
@ -1573,8 +1573,10 @@ static int twl_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
|
|||
pci_set_master(pdev);
|
||||
pci_try_set_mwi(pdev);
|
||||
|
||||
if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) ||
|
||||
dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) {
|
||||
retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
|
||||
if (retval)
|
||||
retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
|
||||
if (retval) {
|
||||
TW_PRINTK(host, TW_DRIVER, 0x18, "Failed to set dma mask");
|
||||
retval = -ENODEV;
|
||||
goto out_disable_device;
|
||||
|
@ -1805,8 +1807,10 @@ static int twl_resume(struct pci_dev *pdev)
|
|||
pci_set_master(pdev);
|
||||
pci_try_set_mwi(pdev);
|
||||
|
||||
if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) ||
|
||||
dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) {
|
||||
retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
|
||||
if (retval)
|
||||
retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
|
||||
if (retval) {
|
||||
TW_PRINTK(host, TW_DRIVER, 0x25, "Failed to set dma mask during resume");
|
||||
retval = -ENODEV;
|
||||
goto out_disable_device;
|
||||
|
|
|
@ -769,9 +769,11 @@ static int asd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
|
|||
if (err)
|
||||
goto Err_remove;
|
||||
|
||||
err = -ENODEV;
|
||||
if (dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(64)) ||
|
||||
dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(32))) {
|
||||
err = dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(64));
|
||||
if (err)
|
||||
err = dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(32));
|
||||
if (err) {
|
||||
err = -ENODEV;
|
||||
asd_printk("no suitable DMA mask for %s\n", pci_name(dev));
|
||||
goto Err_remove;
|
||||
}
|
||||
|
|
|
@ -727,7 +727,7 @@ bfad_init_timer(struct bfad_s *bfad)
|
|||
int
|
||||
bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad)
|
||||
{
|
||||
int rc = -ENODEV;
|
||||
int rc = -ENODEV;
|
||||
|
||||
if (pci_enable_device(pdev)) {
|
||||
printk(KERN_ERR "pci_enable_device fail %p\n", pdev);
|
||||
|
@ -739,8 +739,12 @@ bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad)
|
|||
|
||||
pci_set_master(pdev);
|
||||
|
||||
if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) ||
|
||||
dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) {
|
||||
rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
|
||||
if (rc)
|
||||
rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
|
||||
|
||||
if (rc) {
|
||||
rc = -ENODEV;
|
||||
printk(KERN_ERR "dma_set_mask_and_coherent fail %p\n", pdev);
|
||||
goto out_release_region;
|
||||
}
|
||||
|
@ -1534,6 +1538,7 @@ bfad_pci_slot_reset(struct pci_dev *pdev)
|
|||
{
|
||||
struct bfad_s *bfad = pci_get_drvdata(pdev);
|
||||
u8 byte;
|
||||
int rc;
|
||||
|
||||
dev_printk(KERN_ERR, &pdev->dev,
|
||||
"bfad_pci_slot_reset flags: 0x%x\n", bfad->bfad_flags);
|
||||
|
@ -1561,8 +1566,11 @@ bfad_pci_slot_reset(struct pci_dev *pdev)
|
|||
pci_save_state(pdev);
|
||||
pci_set_master(pdev);
|
||||
|
||||
if (dma_set_mask_and_coherent(&bfad->pcidev->dev, DMA_BIT_MASK(64)) ||
|
||||
dma_set_mask_and_coherent(&bfad->pcidev->dev, DMA_BIT_MASK(32)))
|
||||
rc = dma_set_mask_and_coherent(&bfad->pcidev->dev, DMA_BIT_MASK(64));
|
||||
if (rc)
|
||||
rc = dma_set_mask_and_coherent(&bfad->pcidev->dev,
|
||||
DMA_BIT_MASK(32));
|
||||
if (rc)
|
||||
goto out_disable_device;
|
||||
|
||||
if (restart_bfa(bfad) == -1)
|
||||
|
|
|
@ -210,8 +210,11 @@ csio_pci_init(struct pci_dev *pdev, int *bars)
|
|||
pci_set_master(pdev);
|
||||
pci_try_set_mwi(pdev);
|
||||
|
||||
if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) ||
|
||||
dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) {
|
||||
rv = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
|
||||
if (rv)
|
||||
rv = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
|
||||
if (rv) {
|
||||
rv = -ENODEV;
|
||||
dev_err(&pdev->dev, "No suitable DMA available.\n");
|
||||
goto err_release_regions;
|
||||
}
|
||||
|
|
|
@ -2323,6 +2323,7 @@ static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev,
|
|||
struct Scsi_Host *shost;
|
||||
struct hisi_hba *hisi_hba;
|
||||
struct device *dev = &pdev->dev;
|
||||
int error;
|
||||
|
||||
shost = scsi_host_alloc(hw->sht, sizeof(*hisi_hba));
|
||||
if (!shost) {
|
||||
|
@ -2343,8 +2344,11 @@ static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev,
|
|||
if (hisi_sas_get_fw_info(hisi_hba) < 0)
|
||||
goto err_out;
|
||||
|
||||
if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)) &&
|
||||
dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32))) {
|
||||
error = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
|
||||
if (error)
|
||||
error = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
|
||||
|
||||
if (error) {
|
||||
dev_err(dev, "No usable DMA addressing method\n");
|
||||
goto err_out;
|
||||
}
|
||||
|
|
|
@ -2447,10 +2447,12 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
|||
if (rc)
|
||||
goto err_out_disable_device;
|
||||
|
||||
if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) ||
|
||||
dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) {
|
||||
rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
|
||||
if (rc)
|
||||
rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
|
||||
if (rc) {
|
||||
dev_err(dev, "No usable DMA addressing method\n");
|
||||
rc = -EIO;
|
||||
rc = -ENODEV;
|
||||
goto err_out_regions;
|
||||
}
|
||||
|
||||
|
|
|
@ -1292,6 +1292,7 @@ static int hptiop_probe(struct pci_dev *pcidev, const struct pci_device_id *id)
|
|||
dma_addr_t start_phy;
|
||||
void *start_virt;
|
||||
u32 offset, i, req_size;
|
||||
int rc;
|
||||
|
||||
dprintk("hptiop_probe(%p)\n", pcidev);
|
||||
|
||||
|
@ -1308,9 +1309,12 @@ static int hptiop_probe(struct pci_dev *pcidev, const struct pci_device_id *id)
|
|||
|
||||
/* Enable 64bit DMA if possible */
|
||||
iop_ops = (struct hptiop_adapter_ops *)id->driver_data;
|
||||
if (dma_set_mask(&pcidev->dev,
|
||||
DMA_BIT_MASK(iop_ops->hw_dma_bit_mask)) ||
|
||||
dma_set_mask(&pcidev->dev, DMA_BIT_MASK(32))) {
|
||||
rc = dma_set_mask(&pcidev->dev,
|
||||
DMA_BIT_MASK(iop_ops->hw_dma_bit_mask));
|
||||
if (rc)
|
||||
rc = dma_set_mask(&pcidev->dev, DMA_BIT_MASK(32));
|
||||
|
||||
if (rc) {
|
||||
printk(KERN_ERR "hptiop: fail to set dma_mask\n");
|
||||
goto disable_pci_device;
|
||||
}
|
||||
|
|
|
@ -7361,15 +7361,18 @@ lpfc_sli_pci_mem_setup(struct lpfc_hba *phba)
|
|||
unsigned long bar0map_len, bar2map_len;
|
||||
int i, hbq_count;
|
||||
void *ptr;
|
||||
int error = -ENODEV;
|
||||
int error;
|
||||
|
||||
if (!pdev)
|
||||
return error;
|
||||
return -ENODEV;
|
||||
|
||||
/* Set the device DMA mask size */
|
||||
if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) ||
|
||||
dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))
|
||||
error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
|
||||
if (error)
|
||||
error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
|
||||
if (error)
|
||||
return error;
|
||||
error = -ENODEV;
|
||||
|
||||
/* Get the bus address of Bar0 and Bar2 and the number of bytes
|
||||
* required by each mapping.
|
||||
|
@ -9742,11 +9745,13 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
|
|||
uint32_t if_type;
|
||||
|
||||
if (!pdev)
|
||||
return error;
|
||||
return -ENODEV;
|
||||
|
||||
/* Set the device DMA mask size */
|
||||
if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) ||
|
||||
dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))
|
||||
error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
|
||||
if (error)
|
||||
error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
/*
|
||||
|
|
|
@ -2598,7 +2598,6 @@ void scsi_device_resume(struct scsi_device *sdev)
|
|||
* device deleted during suspend)
|
||||
*/
|
||||
mutex_lock(&sdev->state_mutex);
|
||||
WARN_ON_ONCE(!sdev->quiesced_by);
|
||||
sdev->quiesced_by = NULL;
|
||||
blk_clear_pm_only(sdev->request_queue);
|
||||
if (sdev->sdev_state == SDEV_QUIESCE)
|
||||
|
|
|
@ -173,6 +173,7 @@ static struct afs_cell *afs_alloc_cell(struct afs_net *net,
|
|||
|
||||
rcu_assign_pointer(cell->vl_servers, vllist);
|
||||
cell->dns_expiry = TIME64_MAX;
|
||||
__clear_bit(AFS_CELL_FL_NO_LOOKUP_YET, &cell->flags);
|
||||
} else {
|
||||
cell->dns_expiry = ktime_get_real_seconds();
|
||||
}
|
||||
|
|
|
@ -859,6 +859,18 @@ static int hugetlbfs_migrate_page(struct address_space *mapping,
|
|||
rc = migrate_huge_page_move_mapping(mapping, newpage, page);
|
||||
if (rc != MIGRATEPAGE_SUCCESS)
|
||||
return rc;
|
||||
|
||||
/*
|
||||
* page_private is subpool pointer in hugetlb pages. Transfer to
|
||||
* new page. PagePrivate is not associated with page_private for
|
||||
* hugetlb pages and can not be set here as only page_huge_active
|
||||
* pages can be migrated.
|
||||
*/
|
||||
if (page_private(page)) {
|
||||
set_page_private(newpage, page_private(page));
|
||||
set_page_private(page, 0);
|
||||
}
|
||||
|
||||
if (mode != MIGRATE_SYNC_NO_COPY)
|
||||
migrate_page_copy(newpage, page);
|
||||
else
|
||||
|
|
|
@ -2698,7 +2698,6 @@ static long exact_copy_from_user(void *to, const void __user * from,
|
|||
if (!access_ok(from, n))
|
||||
return n;
|
||||
|
||||
current->kernel_uaccess_faults_ok++;
|
||||
while (n) {
|
||||
if (__get_user(c, f)) {
|
||||
memset(t, 0, n);
|
||||
|
@ -2708,7 +2707,6 @@ static long exact_copy_from_user(void *to, const void __user * from,
|
|||
f++;
|
||||
n--;
|
||||
}
|
||||
current->kernel_uaccess_faults_ok--;
|
||||
return n;
|
||||
}
|
||||
|
||||
|
|
|
@ -398,8 +398,6 @@ static ssize_t orangefs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter
|
|||
loff_t pos = iocb->ki_pos;
|
||||
ssize_t rc = 0;
|
||||
|
||||
BUG_ON(iocb->private);
|
||||
|
||||
gossip_debug(GOSSIP_FILE_DEBUG, "orangefs_file_read_iter\n");
|
||||
|
||||
orangefs_stats.reads++;
|
||||
|
@ -416,8 +414,6 @@ static ssize_t orangefs_file_write_iter(struct kiocb *iocb, struct iov_iter *ite
|
|||
loff_t pos;
|
||||
ssize_t rc;
|
||||
|
||||
BUG_ON(iocb->private);
|
||||
|
||||
gossip_debug(GOSSIP_FILE_DEBUG, "orangefs_file_write_iter\n");
|
||||
|
||||
inode_lock(file->f_mapping->host);
|
||||
|
|
|
@ -158,7 +158,7 @@ extern int sysctl_aarp_retransmit_limit;
|
|||
extern int sysctl_aarp_resolve_time;
|
||||
|
||||
#ifdef CONFIG_SYSCTL
|
||||
extern void atalk_register_sysctl(void);
|
||||
extern int atalk_register_sysctl(void);
|
||||
extern void atalk_unregister_sysctl(void);
|
||||
#else
|
||||
#define atalk_register_sysctl() do { } while(0)
|
||||
|
|
|
@ -739,12 +739,6 @@ struct task_struct {
|
|||
unsigned use_memdelay:1;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* May usercopy functions fault on kernel addresses?
|
||||
* This is not just a single bit because this can potentially nest.
|
||||
*/
|
||||
unsigned int kernel_uaccess_faults_ok;
|
||||
|
||||
unsigned long atomic_flags; /* Flags requiring atomic access. */
|
||||
|
||||
struct restart_block restart_block;
|
||||
|
|
|
@ -52,7 +52,10 @@ struct qdisc_size_table {
|
|||
struct qdisc_skb_head {
|
||||
struct sk_buff *head;
|
||||
struct sk_buff *tail;
|
||||
__u32 qlen;
|
||||
union {
|
||||
u32 qlen;
|
||||
atomic_t atomic_qlen;
|
||||
};
|
||||
spinlock_t lock;
|
||||
};
|
||||
|
||||
|
@ -466,27 +469,19 @@ static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz)
|
|||
BUILD_BUG_ON(sizeof(qcb->data) < sz);
|
||||
}
|
||||
|
||||
static inline int qdisc_qlen_cpu(const struct Qdisc *q)
|
||||
{
|
||||
return this_cpu_ptr(q->cpu_qstats)->qlen;
|
||||
}
|
||||
|
||||
static inline int qdisc_qlen(const struct Qdisc *q)
|
||||
{
|
||||
return q->q.qlen;
|
||||
}
|
||||
|
||||
static inline int qdisc_qlen_sum(const struct Qdisc *q)
|
||||
static inline u32 qdisc_qlen_sum(const struct Qdisc *q)
|
||||
{
|
||||
__u32 qlen = q->qstats.qlen;
|
||||
int i;
|
||||
u32 qlen = q->qstats.qlen;
|
||||
|
||||
if (q->flags & TCQ_F_NOLOCK) {
|
||||
for_each_possible_cpu(i)
|
||||
qlen += per_cpu_ptr(q->cpu_qstats, i)->qlen;
|
||||
} else {
|
||||
if (q->flags & TCQ_F_NOLOCK)
|
||||
qlen += atomic_read(&q->q.atomic_qlen);
|
||||
else
|
||||
qlen += q->q.qlen;
|
||||
}
|
||||
|
||||
return qlen;
|
||||
}
|
||||
|
@ -882,14 +877,14 @@ static inline void qdisc_qstats_cpu_backlog_inc(struct Qdisc *sch,
|
|||
this_cpu_add(sch->cpu_qstats->backlog, qdisc_pkt_len(skb));
|
||||
}
|
||||
|
||||
static inline void qdisc_qstats_cpu_qlen_inc(struct Qdisc *sch)
|
||||
static inline void qdisc_qstats_atomic_qlen_inc(struct Qdisc *sch)
|
||||
{
|
||||
this_cpu_inc(sch->cpu_qstats->qlen);
|
||||
atomic_inc(&sch->q.atomic_qlen);
|
||||
}
|
||||
|
||||
static inline void qdisc_qstats_cpu_qlen_dec(struct Qdisc *sch)
|
||||
static inline void qdisc_qstats_atomic_qlen_dec(struct Qdisc *sch)
|
||||
{
|
||||
this_cpu_dec(sch->cpu_qstats->qlen);
|
||||
atomic_dec(&sch->q.atomic_qlen);
|
||||
}
|
||||
|
||||
static inline void qdisc_qstats_cpu_requeues_inc(struct Qdisc *sch)
|
||||
|
|
|
@ -113,6 +113,28 @@ config KASAN_INLINE
|
|||
|
||||
endchoice
|
||||
|
||||
config KASAN_STACK_ENABLE
|
||||
bool "Enable stack instrumentation (unsafe)" if CC_IS_CLANG && !COMPILE_TEST
|
||||
default !(CLANG_VERSION < 90000)
|
||||
depends on KASAN
|
||||
help
|
||||
The LLVM stack address sanitizer has a know problem that
|
||||
causes excessive stack usage in a lot of functions, see
|
||||
https://bugs.llvm.org/show_bug.cgi?id=38809
|
||||
Disabling asan-stack makes it safe to run kernels build
|
||||
with clang-8 with KASAN enabled, though it loses some of
|
||||
the functionality.
|
||||
This feature is always disabled when compile-testing with clang-8
|
||||
or earlier to avoid cluttering the output in stack overflow
|
||||
warnings, but clang-8 users can still enable it for builds without
|
||||
CONFIG_COMPILE_TEST. On gcc and later clang versions it is
|
||||
assumed to always be safe to use and enabled by default.
|
||||
|
||||
config KASAN_STACK
|
||||
int
|
||||
default 1 if KASAN_STACK_ENABLE || CC_IS_GCC
|
||||
default 0
|
||||
|
||||
config KASAN_S390_4_LEVEL_PAGING
|
||||
bool "KASan: use 4-level paging"
|
||||
depends on KASAN && S390
|
||||
|
|
16
mm/hugetlb.c
16
mm/hugetlb.c
|
@ -3624,7 +3624,6 @@ static vm_fault_t hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
|
|||
copy_user_huge_page(new_page, old_page, address, vma,
|
||||
pages_per_huge_page(h));
|
||||
__SetPageUptodate(new_page);
|
||||
set_page_huge_active(new_page);
|
||||
|
||||
mmu_notifier_range_init(&range, mm, haddr, haddr + huge_page_size(h));
|
||||
mmu_notifier_invalidate_range_start(&range);
|
||||
|
@ -3645,6 +3644,7 @@ static vm_fault_t hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
|
|||
make_huge_pte(vma, new_page, 1));
|
||||
page_remove_rmap(old_page, true);
|
||||
hugepage_add_new_anon_rmap(new_page, vma, haddr);
|
||||
set_page_huge_active(new_page);
|
||||
/* Make the old page be freed below */
|
||||
new_page = old_page;
|
||||
}
|
||||
|
@ -3729,6 +3729,7 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
|
|||
pte_t new_pte;
|
||||
spinlock_t *ptl;
|
||||
unsigned long haddr = address & huge_page_mask(h);
|
||||
bool new_page = false;
|
||||
|
||||
/*
|
||||
* Currently, we are forced to kill the process in the event the
|
||||
|
@ -3790,7 +3791,7 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
|
|||
}
|
||||
clear_huge_page(page, address, pages_per_huge_page(h));
|
||||
__SetPageUptodate(page);
|
||||
set_page_huge_active(page);
|
||||
new_page = true;
|
||||
|
||||
if (vma->vm_flags & VM_MAYSHARE) {
|
||||
int err = huge_add_to_page_cache(page, mapping, idx);
|
||||
|
@ -3861,6 +3862,15 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
|
|||
}
|
||||
|
||||
spin_unlock(ptl);
|
||||
|
||||
/*
|
||||
* Only make newly allocated pages active. Existing pages found
|
||||
* in the pagecache could be !page_huge_active() if they have been
|
||||
* isolated for migration.
|
||||
*/
|
||||
if (new_page)
|
||||
set_page_huge_active(page);
|
||||
|
||||
unlock_page(page);
|
||||
out:
|
||||
return ret;
|
||||
|
@ -4095,7 +4105,6 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
|
|||
* the set_pte_at() write.
|
||||
*/
|
||||
__SetPageUptodate(page);
|
||||
set_page_huge_active(page);
|
||||
|
||||
mapping = dst_vma->vm_file->f_mapping;
|
||||
idx = vma_hugecache_offset(h, dst_vma, dst_addr);
|
||||
|
@ -4163,6 +4172,7 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
|
|||
update_mmu_cache(dst_vma, dst_addr, dst_pte);
|
||||
|
||||
spin_unlock(ptl);
|
||||
set_page_huge_active(page);
|
||||
if (vm_shared)
|
||||
unlock_page(page);
|
||||
ret = 0;
|
||||
|
|
|
@ -30,10 +30,8 @@ long __probe_kernel_read(void *dst, const void *src, size_t size)
|
|||
|
||||
set_fs(KERNEL_DS);
|
||||
pagefault_disable();
|
||||
current->kernel_uaccess_faults_ok++;
|
||||
ret = __copy_from_user_inatomic(dst,
|
||||
(__force const void __user *)src, size);
|
||||
current->kernel_uaccess_faults_ok--;
|
||||
pagefault_enable();
|
||||
set_fs(old_fs);
|
||||
|
||||
|
@ -60,9 +58,7 @@ long __probe_kernel_write(void *dst, const void *src, size_t size)
|
|||
|
||||
set_fs(KERNEL_DS);
|
||||
pagefault_disable();
|
||||
current->kernel_uaccess_faults_ok++;
|
||||
ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
|
||||
current->kernel_uaccess_faults_ok--;
|
||||
pagefault_enable();
|
||||
set_fs(old_fs);
|
||||
|
||||
|
@ -98,13 +94,11 @@ long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count)
|
|||
|
||||
set_fs(KERNEL_DS);
|
||||
pagefault_disable();
|
||||
current->kernel_uaccess_faults_ok++;
|
||||
|
||||
do {
|
||||
ret = __get_user(*dst++, (const char __user __force *)src++);
|
||||
} while (dst[-1] && ret == 0 && src - unsafe_addr < count);
|
||||
|
||||
current->kernel_uaccess_faults_ok--;
|
||||
dst[-1] = '\0';
|
||||
pagefault_enable();
|
||||
set_fs(old_fs);
|
||||
|
|
11
mm/migrate.c
11
mm/migrate.c
|
@ -1315,6 +1315,16 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
|
|||
lock_page(hpage);
|
||||
}
|
||||
|
||||
/*
|
||||
* Check for pages which are in the process of being freed. Without
|
||||
* page_mapping() set, hugetlbfs specific move page routine will not
|
||||
* be called and we could leak usage counts for subpools.
|
||||
*/
|
||||
if (page_private(hpage) && !page_mapping(hpage)) {
|
||||
rc = -EBUSY;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
if (PageAnon(hpage))
|
||||
anon_vma = page_get_anon_vma(hpage);
|
||||
|
||||
|
@ -1345,6 +1355,7 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
|
|||
put_new_page = NULL;
|
||||
}
|
||||
|
||||
out_unlock:
|
||||
unlock_page(hpage);
|
||||
out:
|
||||
if (rc != -EAGAIN)
|
||||
|
|
|
@ -2426,12 +2426,11 @@ int expand_downwards(struct vm_area_struct *vma,
|
|||
{
|
||||
struct mm_struct *mm = vma->vm_mm;
|
||||
struct vm_area_struct *prev;
|
||||
int error;
|
||||
int error = 0;
|
||||
|
||||
address &= PAGE_MASK;
|
||||
error = security_mmap_addr(address);
|
||||
if (error)
|
||||
return error;
|
||||
if (address < mmap_min_addr)
|
||||
return -EPERM;
|
||||
|
||||
/* Enforce stack_guard_gap */
|
||||
prev = vma->vm_prev;
|
||||
|
|
|
@ -2848,7 +2848,7 @@ static int shmem_create(struct inode *dir, struct dentry *dentry, umode_t mode,
|
|||
static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
|
||||
{
|
||||
struct inode *inode = d_inode(old_dentry);
|
||||
int ret;
|
||||
int ret = 0;
|
||||
|
||||
/*
|
||||
* No ordinary (disk based) filesystem counts links as inodes;
|
||||
|
|
|
@ -210,56 +210,34 @@ static const struct seq_operations atalk_seq_socket_ops = {
|
|||
.show = atalk_seq_socket_show,
|
||||
};
|
||||
|
||||
static struct proc_dir_entry *atalk_proc_dir;
|
||||
|
||||
int __init atalk_proc_init(void)
|
||||
{
|
||||
struct proc_dir_entry *p;
|
||||
int rc = -ENOMEM;
|
||||
if (!proc_mkdir("atalk", init_net.proc_net))
|
||||
return -ENOMEM;
|
||||
|
||||
atalk_proc_dir = proc_mkdir("atalk", init_net.proc_net);
|
||||
if (!atalk_proc_dir)
|
||||
if (!proc_create_seq("atalk/interface", 0444, init_net.proc_net,
|
||||
&atalk_seq_interface_ops))
|
||||
goto out;
|
||||
|
||||
p = proc_create_seq("interface", 0444, atalk_proc_dir,
|
||||
&atalk_seq_interface_ops);
|
||||
if (!p)
|
||||
goto out_interface;
|
||||
if (!proc_create_seq("atalk/route", 0444, init_net.proc_net,
|
||||
&atalk_seq_route_ops))
|
||||
goto out;
|
||||
|
||||
p = proc_create_seq("route", 0444, atalk_proc_dir,
|
||||
&atalk_seq_route_ops);
|
||||
if (!p)
|
||||
goto out_route;
|
||||
if (!proc_create_seq("atalk/socket", 0444, init_net.proc_net,
|
||||
&atalk_seq_socket_ops))
|
||||
goto out;
|
||||
|
||||
p = proc_create_seq("socket", 0444, atalk_proc_dir,
|
||||
&atalk_seq_socket_ops);
|
||||
if (!p)
|
||||
goto out_socket;
|
||||
if (!proc_create_seq_private("atalk/arp", 0444, init_net.proc_net,
|
||||
&aarp_seq_ops,
|
||||
sizeof(struct aarp_iter_state), NULL))
|
||||
goto out;
|
||||
|
||||
p = proc_create_seq_private("arp", 0444, atalk_proc_dir, &aarp_seq_ops,
|
||||
sizeof(struct aarp_iter_state), NULL);
|
||||
if (!p)
|
||||
goto out_arp;
|
||||
|
||||
rc = 0;
|
||||
out:
|
||||
return rc;
|
||||
out_arp:
|
||||
remove_proc_entry("socket", atalk_proc_dir);
|
||||
out_socket:
|
||||
remove_proc_entry("route", atalk_proc_dir);
|
||||
out_route:
|
||||
remove_proc_entry("interface", atalk_proc_dir);
|
||||
out_interface:
|
||||
remove_proc_entry("atalk", init_net.proc_net);
|
||||
goto out;
|
||||
remove_proc_subtree("atalk", init_net.proc_net);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
void __exit atalk_proc_exit(void)
|
||||
void atalk_proc_exit(void)
|
||||
{
|
||||
remove_proc_entry("interface", atalk_proc_dir);
|
||||
remove_proc_entry("route", atalk_proc_dir);
|
||||
remove_proc_entry("socket", atalk_proc_dir);
|
||||
remove_proc_entry("arp", atalk_proc_dir);
|
||||
remove_proc_entry("atalk", init_net.proc_net);
|
||||
remove_proc_subtree("atalk", init_net.proc_net);
|
||||
}
|
||||
|
|
|
@ -1910,12 +1910,16 @@ static const char atalk_err_snap[] __initconst =
|
|||
/* Called by proto.c on kernel start up */
|
||||
static int __init atalk_init(void)
|
||||
{
|
||||
int rc = proto_register(&ddp_proto, 0);
|
||||
int rc;
|
||||
|
||||
if (rc != 0)
|
||||
rc = proto_register(&ddp_proto, 0);
|
||||
if (rc)
|
||||
goto out;
|
||||
|
||||
(void)sock_register(&atalk_family_ops);
|
||||
rc = sock_register(&atalk_family_ops);
|
||||
if (rc)
|
||||
goto out_proto;
|
||||
|
||||
ddp_dl = register_snap_client(ddp_snap_id, atalk_rcv);
|
||||
if (!ddp_dl)
|
||||
printk(atalk_err_snap);
|
||||
|
@ -1923,12 +1927,33 @@ static int __init atalk_init(void)
|
|||
dev_add_pack(<alk_packet_type);
|
||||
dev_add_pack(&ppptalk_packet_type);
|
||||
|
||||
register_netdevice_notifier(&ddp_notifier);
|
||||
rc = register_netdevice_notifier(&ddp_notifier);
|
||||
if (rc)
|
||||
goto out_sock;
|
||||
|
||||
aarp_proto_init();
|
||||
atalk_proc_init();
|
||||
atalk_register_sysctl();
|
||||
rc = atalk_proc_init();
|
||||
if (rc)
|
||||
goto out_aarp;
|
||||
|
||||
rc = atalk_register_sysctl();
|
||||
if (rc)
|
||||
goto out_proc;
|
||||
out:
|
||||
return rc;
|
||||
out_proc:
|
||||
atalk_proc_exit();
|
||||
out_aarp:
|
||||
aarp_cleanup_module();
|
||||
unregister_netdevice_notifier(&ddp_notifier);
|
||||
out_sock:
|
||||
dev_remove_pack(&ppptalk_packet_type);
|
||||
dev_remove_pack(<alk_packet_type);
|
||||
unregister_snap_client(ddp_dl);
|
||||
sock_unregister(PF_APPLETALK);
|
||||
out_proto:
|
||||
proto_unregister(&ddp_proto);
|
||||
goto out;
|
||||
}
|
||||
module_init(atalk_init);
|
||||
|
||||
|
|
|
@ -45,9 +45,12 @@ static struct ctl_table atalk_table[] = {
|
|||
|
||||
static struct ctl_table_header *atalk_table_header;
|
||||
|
||||
void atalk_register_sysctl(void)
|
||||
int __init atalk_register_sysctl(void)
|
||||
{
|
||||
atalk_table_header = register_net_sysctl(&init_net, "net/appletalk", atalk_table);
|
||||
if (!atalk_table_header)
|
||||
return -ENOMEM;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void atalk_unregister_sysctl(void)
|
||||
|
|
|
@ -291,7 +291,6 @@ __gnet_stats_copy_queue_cpu(struct gnet_stats_queue *qstats,
|
|||
for_each_possible_cpu(i) {
|
||||
const struct gnet_stats_queue *qcpu = per_cpu_ptr(q, i);
|
||||
|
||||
qstats->qlen = 0;
|
||||
qstats->backlog += qcpu->backlog;
|
||||
qstats->drops += qcpu->drops;
|
||||
qstats->requeues += qcpu->requeues;
|
||||
|
@ -307,7 +306,6 @@ void __gnet_stats_copy_queue(struct gnet_stats_queue *qstats,
|
|||
if (cpu) {
|
||||
__gnet_stats_copy_queue_cpu(qstats, cpu);
|
||||
} else {
|
||||
qstats->qlen = q->qlen;
|
||||
qstats->backlog = q->backlog;
|
||||
qstats->drops = q->drops;
|
||||
qstats->requeues = q->requeues;
|
||||
|
|
|
@ -1539,6 +1539,9 @@ static int register_queue_kobjects(struct net_device *dev)
|
|||
error:
|
||||
netdev_queue_update_kobjects(dev, txq, 0);
|
||||
net_rx_queue_update_kobjects(dev, rxq, 0);
|
||||
#ifdef CONFIG_SYSFS
|
||||
kset_unregister(dev->queues_kset);
|
||||
#endif
|
||||
return error;
|
||||
}
|
||||
|
||||
|
|
|
@ -68,7 +68,7 @@ static inline struct sk_buff *__skb_dequeue_bad_txq(struct Qdisc *q)
|
|||
skb = __skb_dequeue(&q->skb_bad_txq);
|
||||
if (qdisc_is_percpu_stats(q)) {
|
||||
qdisc_qstats_cpu_backlog_dec(q, skb);
|
||||
qdisc_qstats_cpu_qlen_dec(q);
|
||||
qdisc_qstats_atomic_qlen_dec(q);
|
||||
} else {
|
||||
qdisc_qstats_backlog_dec(q, skb);
|
||||
q->q.qlen--;
|
||||
|
@ -108,7 +108,7 @@ static inline void qdisc_enqueue_skb_bad_txq(struct Qdisc *q,
|
|||
|
||||
if (qdisc_is_percpu_stats(q)) {
|
||||
qdisc_qstats_cpu_backlog_inc(q, skb);
|
||||
qdisc_qstats_cpu_qlen_inc(q);
|
||||
qdisc_qstats_atomic_qlen_inc(q);
|
||||
} else {
|
||||
qdisc_qstats_backlog_inc(q, skb);
|
||||
q->q.qlen++;
|
||||
|
@ -147,7 +147,7 @@ static inline int dev_requeue_skb_locked(struct sk_buff *skb, struct Qdisc *q)
|
|||
|
||||
qdisc_qstats_cpu_requeues_inc(q);
|
||||
qdisc_qstats_cpu_backlog_inc(q, skb);
|
||||
qdisc_qstats_cpu_qlen_inc(q);
|
||||
qdisc_qstats_atomic_qlen_inc(q);
|
||||
|
||||
skb = next;
|
||||
}
|
||||
|
@ -252,7 +252,7 @@ static struct sk_buff *dequeue_skb(struct Qdisc *q, bool *validate,
|
|||
skb = __skb_dequeue(&q->gso_skb);
|
||||
if (qdisc_is_percpu_stats(q)) {
|
||||
qdisc_qstats_cpu_backlog_dec(q, skb);
|
||||
qdisc_qstats_cpu_qlen_dec(q);
|
||||
qdisc_qstats_atomic_qlen_dec(q);
|
||||
} else {
|
||||
qdisc_qstats_backlog_dec(q, skb);
|
||||
q->q.qlen--;
|
||||
|
@ -645,7 +645,7 @@ static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc *qdisc,
|
|||
if (unlikely(err))
|
||||
return qdisc_drop_cpu(skb, qdisc, to_free);
|
||||
|
||||
qdisc_qstats_cpu_qlen_inc(qdisc);
|
||||
qdisc_qstats_atomic_qlen_inc(qdisc);
|
||||
/* Note: skb can not be used after skb_array_produce(),
|
||||
* so we better not use qdisc_qstats_cpu_backlog_inc()
|
||||
*/
|
||||
|
@ -670,7 +670,7 @@ static struct sk_buff *pfifo_fast_dequeue(struct Qdisc *qdisc)
|
|||
if (likely(skb)) {
|
||||
qdisc_qstats_cpu_backlog_dec(qdisc, skb);
|
||||
qdisc_bstats_cpu_update(qdisc, skb);
|
||||
qdisc_qstats_cpu_qlen_dec(qdisc);
|
||||
qdisc_qstats_atomic_qlen_dec(qdisc);
|
||||
}
|
||||
|
||||
return skb;
|
||||
|
@ -714,7 +714,6 @@ static void pfifo_fast_reset(struct Qdisc *qdisc)
|
|||
struct gnet_stats_queue *q = per_cpu_ptr(qdisc->cpu_qstats, i);
|
||||
|
||||
q->backlog = 0;
|
||||
q->qlen = 0;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -26,7 +26,7 @@ else
|
|||
CFLAGS_KASAN := $(CFLAGS_KASAN_SHADOW) \
|
||||
$(call cc-param,asan-globals=1) \
|
||||
$(call cc-param,asan-instrumentation-with-call-threshold=$(call_threshold)) \
|
||||
$(call cc-param,asan-stack=1) \
|
||||
$(call cc-param,asan-stack=$(CONFIG_KASAN_STACK)) \
|
||||
$(call cc-param,asan-use-after-scope=1) \
|
||||
$(call cc-param,asan-instrument-allocas=1)
|
||||
endif
|
||||
|
|
|
@ -4044,7 +4044,7 @@ static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm)
|
|||
}
|
||||
add_uevent_var(env, "PID=%d", kvm->userspace_pid);
|
||||
|
||||
if (kvm->debugfs_dentry) {
|
||||
if (!IS_ERR_OR_NULL(kvm->debugfs_dentry)) {
|
||||
char *tmp, *p = kmalloc(PATH_MAX, GFP_KERNEL);
|
||||
|
||||
if (p) {
|
||||
|
|
Loading…
Reference in New Issue