From 5dd8c4c3f18b2ebf3f5303c7c273396441db6c6c Mon Sep 17 00:00:00 2001 From: Ben Widawsky Date: Fri, 8 Nov 2013 10:20:06 -0800 Subject: [PATCH 001/312] drm/i915/bdw: Add BDW to ULT macro For what we care about ULT and ULX are interchangeable. We know of 3 types of pciids for these cases. I am not sure if at some point we will need to distinguish ULT and ULX. Cc: Paulo Zanoni Signed-off-by: Ben Widawsky Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_drv.h | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 8600c315b4c4..51951ef7e71d 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1755,8 +1755,13 @@ struct drm_i915_file_private { #define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile) #define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \ ((dev)->pdev->device & 0xFF00) == 0x0C00) -#define IS_ULT(dev) (IS_HASWELL(dev) && \ +#define IS_BDW_ULT(dev) (IS_BROADWELL(dev) && \ + (((dev)->pdev->device & 0xf) == 0x2 || \ + ((dev)->pdev->device & 0xf) == 0x6 || \ + ((dev)->pdev->device & 0xf) == 0xe)) +#define IS_HSW_ULT(dev) (IS_HASWELL(dev) && \ ((dev)->pdev->device & 0xFF00) == 0x0A00) +#define IS_ULT(dev) (IS_HSW_ULT(dev) || IS_BDW_ULT(dev)) #define IS_HSW_GT3(dev) (IS_HASWELL(dev) && \ ((dev)->pdev->device & 0x00F0) == 0x0020) #define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary) From f8e100621b072384ec1180563d933e86e2f7e2d5 Mon Sep 17 00:00:00 2001 From: Ben Widawsky Date: Mon, 11 Nov 2013 11:12:57 +0200 Subject: [PATCH 002/312] drm/i915/bdw: GEN8 backlight support Prior to Haswell the CPU control register for backlight (BLC_PWM_CPU_CTL) toggled the PCH baclight pin for us. This made some sense as there was no pin on the CPU. With Haswell came the introduction of a CPU backlight pin, but the interface was still controlled by software with the same mechnism. Behind the scenes, hardware did all the dirty work for us. Broadwell no longer provides this for free. If we want to use the PCH backlight pin [1] then we have to set the override bit BLC_PWM_PCH_CTL1 and program BLC_PWM_PCH_CTL2 for the PWM values. This patch implements that. This patch is compile tested only, and given that I rarely if ever touch this code, careful review is welcome. [1] According to Art, we know of no devices that exist which use the CPU pin (and remember it has existed already on HSW). If such a device does exist, we'll have to handle it properly - this is left as TODO until then. v2: Drop the abstraction prep patch, as a bigger backlight overhaul is in the works, and do just the mimimal bdw enabling now. (by Jani) CC: Art Runyan Signed-off-by: Ben Widawsky Signed-off-by: Jani Nikula Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_panel.c | 26 +++++++++++++++++++++++--- 1 file changed, 23 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c index f161ac02c4f6..e6f782d1c669 100644 --- a/drivers/gpu/drm/i915/intel_panel.c +++ b/drivers/gpu/drm/i915/intel_panel.c @@ -451,7 +451,9 @@ static u32 intel_panel_get_backlight(struct drm_device *dev, spin_lock_irqsave(&dev_priv->backlight.lock, flags); - if (HAS_PCH_SPLIT(dev)) { + if (IS_BROADWELL(dev)) { + val = I915_READ(BLC_PWM_PCH_CTL2) & BACKLIGHT_DUTY_CYCLE_MASK; + } else if (HAS_PCH_SPLIT(dev)) { val = I915_READ(BLC_PWM_CPU_CTL) & BACKLIGHT_DUTY_CYCLE_MASK; } else { if (IS_VALLEYVIEW(dev)) @@ -479,6 +481,13 @@ static u32 intel_panel_get_backlight(struct drm_device *dev, return val; } +static void intel_bdw_panel_set_backlight(struct drm_device *dev, u32 level) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + u32 val = I915_READ(BLC_PWM_PCH_CTL2) & ~BACKLIGHT_DUTY_CYCLE_MASK; + I915_WRITE(BLC_PWM_PCH_CTL2, val | level); +} + static void intel_pch_panel_set_backlight(struct drm_device *dev, u32 level) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -496,7 +505,9 @@ static void intel_panel_actually_set_backlight(struct drm_device *dev, DRM_DEBUG_DRIVER("set backlight PWM = %d\n", level); level = intel_panel_compute_brightness(dev, pipe, level); - if (HAS_PCH_SPLIT(dev)) + if (IS_BROADWELL(dev)) + return intel_bdw_panel_set_backlight(dev, level); + else if (HAS_PCH_SPLIT(dev)) return intel_pch_panel_set_backlight(dev, level); if (is_backlight_combination_mode(dev)) { @@ -666,7 +677,16 @@ void intel_panel_enable_backlight(struct intel_connector *connector) POSTING_READ(reg); I915_WRITE(reg, tmp | BLM_PWM_ENABLE); - if (HAS_PCH_SPLIT(dev) && + if (IS_BROADWELL(dev)) { + /* + * Broadwell requires PCH override to drive the PCH + * backlight pin. The above will configure the CPU + * backlight pin, which we don't plan to use. + */ + tmp = I915_READ(BLC_PWM_PCH_CTL1); + tmp |= BLM_PCH_OVERRIDE_ENABLE | BLM_PCH_PWM_ENABLE; + I915_WRITE(BLC_PWM_PCH_CTL1, tmp); + } else if (HAS_PCH_SPLIT(dev) && !(dev_priv->quirks & QUIRK_NO_PCH_PWM_ENABLE)) { tmp = I915_READ(BLC_PWM_PCH_CTL1); tmp |= BLM_PCH_PWM_ENABLE; From 935e8de97564a1ea22c3bb30ca2b20b12af7cbfb Mon Sep 17 00:00:00 2001 From: Ben Widawsky Date: Thu, 7 Nov 2013 21:40:47 -0800 Subject: [PATCH 003/312] drm/i915/bdw: Do gen6 style reset for gen8 This patch existed before, but was lost over time. Note that reset is still somewhat problematic in my limited testing (ie. module_reload will not pass) but it can be disabled with a module parameter, and support should be considered preliminary anyway. Signed-off-by: Ben Widawsky Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_uncore.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index f9883ceff946..6a4f9b615de1 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -782,6 +782,7 @@ static int gen6_do_reset(struct drm_device *dev) int intel_gpu_reset(struct drm_device *dev) { switch (INTEL_INFO(dev)->gen) { + case 8: case 7: case 6: return gen6_do_reset(dev); case 5: return ironlake_do_reset(dev); From 230f955f73074ec5b1b4389f3f26152eebf54404 Mon Sep 17 00:00:00 2001 From: Ben Widawsky Date: Thu, 7 Nov 2013 21:40:48 -0800 Subject: [PATCH 004/312] drm/i915/bdw: Free correct number of ppgtt pages I am unclear how this got messed up in the shuffle, but it did. Cc: Imre Deak Signed-off-by: Ben Widawsky Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_gem_gtt.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 3620a1b0a73c..5a3cc3189f1f 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -335,8 +335,8 @@ static void gen8_ppgtt_cleanup(struct i915_address_space *vm) kfree(ppgtt->gen8_pt_dma_addr[i]); } - __free_pages(ppgtt->gen8_pt_pages, ppgtt->num_pt_pages << PAGE_SHIFT); - __free_pages(ppgtt->pd_pages, ppgtt->num_pd_pages << PAGE_SHIFT); + __free_pages(ppgtt->gen8_pt_pages, get_order(ppgtt->num_pt_pages << PAGE_SHIFT)); + __free_pages(ppgtt->pd_pages, get_order(ppgtt->num_pd_pages << PAGE_SHIFT)); } /** From eb0d4b75d506050cb369df467b93b92360f5acaf Mon Sep 17 00:00:00 2001 From: Ben Widawsky Date: Thu, 7 Nov 2013 21:40:50 -0800 Subject: [PATCH 005/312] drm/i915/bdw: Add comment about gen8 HWS PGA This confused me some many times that I think it is appropriate to add a small comment to instruct the reader of the code that it is indeed doing what it is supposed to do. Signed-off-by: Ben Widawsky Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_ringbuffer.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index b620337e6d67..c2f09d456300 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -965,6 +965,7 @@ void intel_ring_setup_status_page(struct intel_ring_buffer *ring) } else if (IS_GEN6(ring->dev)) { mmio = RING_HWS_PGA_GEN6(ring->mmio_base); } else { + /* XXX: gen8 returns to sanity */ mmio = RING_HWS_PGA(ring->mmio_base); } From 3a2ffb65eec6dbda2fd8151894f51c18b42c8d41 Mon Sep 17 00:00:00 2001 From: Ben Widawsky Date: Thu, 7 Nov 2013 21:40:51 -0800 Subject: [PATCH 006/312] drm/i915/bdw: Limit GTT to 2GB Because of the way in which we're allocating the pages for the Aliasing PPGTT, we cannot actually successfully alloc enough space for anything greater than 2GB. Instead of a quick hack to fix this, we should defer until we have the real solution in place (allocating much less contiguous space). This wasn't found sooner because we didn't not have any systems supporting more than a 2GB GTT. Signed-off-by: Ben Widawsky Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_gem_gtt.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 5a3cc3189f1f..f69bdc741b80 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -1239,6 +1239,11 @@ static inline unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl) bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK; if (bdw_gmch_ctl) bdw_gmch_ctl = 1 << bdw_gmch_ctl; + if (bdw_gmch_ctl > 4) { + WARN_ON(!i915_preliminary_hw_support); + return 4<<20; + } + return bdw_gmch_ctl << 20; } From 596cc11e7a4a89bf6c45f955402d0bd0c7d51f13 Mon Sep 17 00:00:00 2001 From: Ben Widawsky Date: Mon, 11 Nov 2013 14:46:28 -0800 Subject: [PATCH 007/312] drm/i915/bdw: PIPE_[BC] I[ME]R moved to powerwell The pipe B and pipe C interrupt mask and enable registers are now part of the pipe, so disabling the pipe power wells will lost the contests of the registers. Art totally debugged this one! v2: Use the irq_lock to clarify code, and prevent future bugs (Daniel) Cc: Art Runyan Cc: Paulo Zanoni Signed-off-by: Ben Widawsky [danvet: Make sparse happy.] Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_pm.c | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 0a07d7c9cafc..33a8dbe64039 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -5684,6 +5684,7 @@ static void __intel_set_power_well(struct drm_device *dev, bool enable) { struct drm_i915_private *dev_priv = dev->dev_private; bool is_enabled, enable_requested; + unsigned long irqflags; uint32_t tmp; tmp = I915_READ(HSW_PWR_WELL_DRIVER); @@ -5701,9 +5702,24 @@ static void __intel_set_power_well(struct drm_device *dev, bool enable) HSW_PWR_WELL_STATE_ENABLED), 20)) DRM_ERROR("Timeout enabling power well\n"); } + + if (IS_BROADWELL(dev)) { + spin_lock_irqsave(&dev_priv->irq_lock, irqflags); + I915_WRITE(GEN8_DE_PIPE_IMR(PIPE_B), + dev_priv->de_irq_mask[PIPE_B]); + I915_WRITE(GEN8_DE_PIPE_IER(PIPE_B), + ~dev_priv->de_irq_mask[PIPE_B] | + GEN8_PIPE_VBLANK); + I915_WRITE(GEN8_DE_PIPE_IMR(PIPE_C), + dev_priv->de_irq_mask[PIPE_C]); + I915_WRITE(GEN8_DE_PIPE_IER(PIPE_C), + ~dev_priv->de_irq_mask[PIPE_C] | + GEN8_PIPE_VBLANK); + POSTING_READ(GEN8_DE_PIPE_IER(PIPE_C)); + spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); + } } else { if (enable_requested) { - unsigned long irqflags; enum pipe p; I915_WRITE(HSW_PWR_WELL_DRIVER, 0); From caaa4c804fae7bb654f7d00b35b8583280a9c52c Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Sat, 16 Nov 2013 17:46:02 +1100 Subject: [PATCH 008/312] KVM: PPC: Book3S HV: Fix physical address calculations This fixes a bug in kvmppc_do_h_enter() where the physical address for a page can be calculated incorrectly if transparent huge pages (THP) are active. Until THP came along, it was true that if we encountered a large (16M) page in kvmppc_do_h_enter(), then the associated memslot must be 16M aligned for both its guest physical address and the userspace address, and the physical address calculations in kvmppc_do_h_enter() assumed that. With THP, that is no longer true. In the case where we are using MMU notifiers and the page size that we get from the Linux page tables is larger than the page being mapped by the guest, we need to fill in some low-order bits of the physical address. Without THP, these bits would be the same in the guest physical address (gpa) and the host virtual address (hva). With THP, they can be different, and we need to use the bits from hva rather than gpa. In the case where we are not using MMU notifiers, the host physical address we get from the memslot->arch.slot_phys[] array already includes the low-order bits down to the PAGE_SIZE level, even if we are using large pages. Thus we can simplify the calculation in this case to just add in the remaining bits in the case where PAGE_SIZE is 64k and the guest is mapping a 4k page. The same bug exists in kvmppc_book3s_hv_page_fault(). The basic fix is to use psize (the page size from the HPTE) rather than pte_size (the page size from the Linux PTE) when updating the HPTE low word in r. That means that pfn needs to be computed to PAGE_SIZE granularity even if the Linux PTE is a huge page PTE. That can be arranged simply by doing the page_to_pfn() before setting page to the head of the compound page. If psize is less than PAGE_SIZE, then we need to make sure we only update the bits from PAGE_SIZE upwards, in order not to lose any sub-page offset bits in r. On the other hand, if psize is greater than PAGE_SIZE, we need to make sure we don't bring in non-zero low order bits in pfn, hence we mask (pfn << PAGE_SHIFT) with ~(psize - 1). Signed-off-by: Paul Mackerras Signed-off-by: Alexander Graf --- arch/powerpc/kvm/book3s_64_mmu_hv.c | 12 +++++++++--- arch/powerpc/kvm/book3s_hv_rm_mmu.c | 4 ++-- 2 files changed, 11 insertions(+), 5 deletions(-) diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c index f3ff587a8b7d..47bbeaf2d320 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_hv.c +++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c @@ -665,6 +665,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, return -EFAULT; } else { page = pages[0]; + pfn = page_to_pfn(page); if (PageHuge(page)) { page = compound_head(page); pte_size <<= compound_order(page); @@ -689,7 +690,6 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, } rcu_read_unlock_sched(); } - pfn = page_to_pfn(page); } ret = -EFAULT; @@ -707,8 +707,14 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, r = (r & ~(HPTE_R_W|HPTE_R_I|HPTE_R_G)) | HPTE_R_M; } - /* Set the HPTE to point to pfn */ - r = (r & ~(HPTE_R_PP0 - pte_size)) | (pfn << PAGE_SHIFT); + /* + * Set the HPTE to point to pfn. + * Since the pfn is at PAGE_SIZE granularity, make sure we + * don't mask out lower-order bits if psize < PAGE_SIZE. + */ + if (psize < PAGE_SIZE) + psize = PAGE_SIZE; + r = (r & ~(HPTE_R_PP0 - psize)) | ((pfn << PAGE_SHIFT) & ~(psize - 1)); if (hpte_is_writable(r) && !write_ok) r = hpte_make_readonly(r); ret = RESUME_GUEST; diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c index 9c515440ad1a..fddbf989f37e 100644 --- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c +++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c @@ -225,6 +225,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags, is_io = pa & (HPTE_R_I | HPTE_R_W); pte_size = PAGE_SIZE << (pa & KVMPPC_PAGE_ORDER_MASK); pa &= PAGE_MASK; + pa |= gpa & ~PAGE_MASK; } else { /* Translate to host virtual address */ hva = __gfn_to_hva_memslot(memslot, gfn); @@ -238,13 +239,12 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags, ptel = hpte_make_readonly(ptel); is_io = hpte_cache_bits(pte_val(pte)); pa = pte_pfn(pte) << PAGE_SHIFT; + pa |= hva & (pte_size - 1); } } if (pte_size < psize) return H_PARAMETER; - if (pa && pte_size > psize) - pa |= gpa & (pte_size - 1); ptel &= ~(HPTE_R_PP0 - psize); ptel |= pa; From f019b7ad76e6bdbc8462cbe17ad5b86a25fcdf24 Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Sat, 16 Nov 2013 17:46:03 +1100 Subject: [PATCH 009/312] KVM: PPC: Book3S HV: Refine barriers in guest entry/exit Some users have reported instances of the host hanging with secondary threads of a core waiting for the primary thread to exit the guest, and the primary thread stuck in nap mode. This prompted a review of the memory barriers in the guest entry/exit code, and this is the result. Most of these changes are the suggestions of Dean Burdick . The barriers between updating napping_threads and reading the entry_exit_count on the one hand, and updating entry_exit_count and reading napping_threads on the other, need to be isync not lwsync, since we need to ensure that either the napping_threads update or the entry_exit_count update get seen. It is not sufficient to order the load vs. lwarx, as lwsync does; we need to order the load vs. the stwcx., so we need isync. In addition, we need a full sync before sending IPIs to wake other threads from nap, to ensure that the write to the entry_exit_count is visible before the IPI occurs. Signed-off-by: Paul Mackerras Signed-off-by: Alexander Graf --- arch/powerpc/kvm/book3s_hv_rmhandlers.S | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index bc8de75b1925..bde28da69610 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -153,7 +153,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) 13: b machine_check_fwnmi - /* * We come in here when wakened from nap mode on a secondary hw thread. * Relocation is off and most register values are lost. @@ -224,6 +223,11 @@ kvm_start_guest: /* Clear our vcpu pointer so we don't come back in early */ li r0, 0 std r0, HSTATE_KVM_VCPU(r13) + /* + * Make sure we clear HSTATE_KVM_VCPU(r13) before incrementing + * the nap_count, because once the increment to nap_count is + * visible we could be given another vcpu. + */ lwsync /* Clear any pending IPI - we're an offline thread */ ld r5, HSTATE_XICS_PHYS(r13) @@ -241,7 +245,6 @@ kvm_start_guest: /* increment the nap count and then go to nap mode */ ld r4, HSTATE_KVM_VCORE(r13) addi r4, r4, VCORE_NAP_COUNT - lwsync /* make previous updates visible */ 51: lwarx r3, 0, r4 addi r3, r3, 1 stwcx. r3, 0, r4 @@ -990,14 +993,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) */ /* Increment the threads-exiting-guest count in the 0xff00 bits of vcore->entry_exit_count */ - lwsync ld r5,HSTATE_KVM_VCORE(r13) addi r6,r5,VCORE_ENTRY_EXIT 41: lwarx r3,0,r6 addi r0,r3,0x100 stwcx. r0,0,r6 bne 41b - lwsync + isync /* order stwcx. vs. reading napping_threads */ /* * At this point we have an interrupt that we have to pass @@ -1030,6 +1032,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) sld r0,r0,r4 andc. r3,r3,r0 /* no sense IPI'ing ourselves */ beq 43f + /* Order entry/exit update vs. IPIs */ + sync mulli r4,r4,PACA_SIZE /* get paca for thread 0 */ subf r6,r4,r13 42: andi. r0,r3,1 @@ -1638,10 +1642,10 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206) bge kvm_cede_exit stwcx. r4,0,r6 bne 31b + /* order napping_threads update vs testing entry_exit_count */ + isync li r0,1 stb r0,HSTATE_NAPPING(r13) - /* order napping_threads update vs testing entry_exit_count */ - lwsync mr r4,r3 lwz r7,VCORE_ENTRY_EXIT(r5) cmpwi r7,0x100 From bf3d32e1156c36c88b75960fd2e5457d5d75620b Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Sat, 16 Nov 2013 17:46:04 +1100 Subject: [PATCH 010/312] KVM: PPC: Book3S HV: Make tbacct_lock irq-safe Lockdep reported that there is a potential for deadlock because vcpu->arch.tbacct_lock is not irq-safe, and is sometimes taken inside the rq_lock (run-queue lock) in the scheduler, which is taken within interrupts. The lockdep splat looks like: ====================================================== [ INFO: HARDIRQ-safe -> HARDIRQ-unsafe lock order detected ] 3.12.0-rc5-kvm+ #8 Not tainted ------------------------------------------------------ qemu-system-ppc/4803 [HC0[0]:SC0[0]:HE0:SE1] is trying to acquire: (&(&vcpu->arch.tbacct_lock)->rlock){+.+...}, at: [] .kvmppc_core_vcpu_put_hv+0x2c/0xa0 and this task is already holding: (&rq->lock){-.-.-.}, at: [] .__schedule+0x180/0xaa0 which would create a new lock dependency: (&rq->lock){-.-.-.} -> (&(&vcpu->arch.tbacct_lock)->rlock){+.+...} but this new dependency connects a HARDIRQ-irq-safe lock: (&rq->lock){-.-.-.} ... which became HARDIRQ-irq-safe at: [] .lock_acquire+0xbc/0x190 [] ._raw_spin_lock+0x34/0x60 [] .scheduler_tick+0x54/0x180 [] .update_process_times+0x70/0xa0 [] .tick_periodic+0x3c/0xe0 [] .tick_handle_periodic+0x28/0xb0 [] .timer_interrupt+0x120/0x2e0 [] decrementer_common+0x168/0x180 [] .get_page_from_freelist+0x924/0xc10 [] .__alloc_pages_nodemask+0x200/0xba0 [] .alloc_pages_exact_nid+0x68/0x110 [] .page_cgroup_init+0x1e0/0x270 [] .start_kernel+0x3e0/0x4e4 [] .start_here_common+0x20/0x70 to a HARDIRQ-irq-unsafe lock: (&(&vcpu->arch.tbacct_lock)->rlock){+.+...} ... which became HARDIRQ-irq-unsafe at: ... [] .lock_acquire+0xbc/0x190 [] ._raw_spin_lock+0x34/0x60 [] .kvmppc_core_vcpu_load_hv+0x2c/0x100 [] .kvmppc_core_vcpu_load+0x2c/0x40 [] .kvm_arch_vcpu_load+0x10/0x30 [] .vcpu_load+0x64/0xd0 [] .kvm_vcpu_ioctl+0x68/0x730 [] .do_vfs_ioctl+0x4dc/0x7a0 [] .SyS_ioctl+0xc4/0xe0 [] syscall_exit+0x0/0x98 Some users have reported this deadlock occurring in practice, though the reports have been primarily on 3.10.x-based kernels. This fixes the problem by making tbacct_lock be irq-safe. Signed-off-by: Paul Mackerras Signed-off-by: Alexander Graf --- arch/powerpc/kvm/book3s_hv.c | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 072287f1c3bc..31d9cfb64a23 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -131,8 +131,9 @@ static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu) static void kvmppc_core_vcpu_load_hv(struct kvm_vcpu *vcpu, int cpu) { struct kvmppc_vcore *vc = vcpu->arch.vcore; + unsigned long flags; - spin_lock(&vcpu->arch.tbacct_lock); + spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags); if (vc->runner == vcpu && vc->vcore_state != VCORE_INACTIVE && vc->preempt_tb != TB_NIL) { vc->stolen_tb += mftb() - vc->preempt_tb; @@ -143,19 +144,20 @@ static void kvmppc_core_vcpu_load_hv(struct kvm_vcpu *vcpu, int cpu) vcpu->arch.busy_stolen += mftb() - vcpu->arch.busy_preempt; vcpu->arch.busy_preempt = TB_NIL; } - spin_unlock(&vcpu->arch.tbacct_lock); + spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags); } static void kvmppc_core_vcpu_put_hv(struct kvm_vcpu *vcpu) { struct kvmppc_vcore *vc = vcpu->arch.vcore; + unsigned long flags; - spin_lock(&vcpu->arch.tbacct_lock); + spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags); if (vc->runner == vcpu && vc->vcore_state != VCORE_INACTIVE) vc->preempt_tb = mftb(); if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST) vcpu->arch.busy_preempt = mftb(); - spin_unlock(&vcpu->arch.tbacct_lock); + spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags); } static void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr) @@ -486,11 +488,11 @@ static u64 vcore_stolen_time(struct kvmppc_vcore *vc, u64 now) */ if (vc->vcore_state != VCORE_INACTIVE && vc->runner->arch.run_task != current) { - spin_lock(&vc->runner->arch.tbacct_lock); + spin_lock_irq(&vc->runner->arch.tbacct_lock); p = vc->stolen_tb; if (vc->preempt_tb != TB_NIL) p += now - vc->preempt_tb; - spin_unlock(&vc->runner->arch.tbacct_lock); + spin_unlock_irq(&vc->runner->arch.tbacct_lock); } else { p = vc->stolen_tb; } @@ -512,10 +514,10 @@ static void kvmppc_create_dtl_entry(struct kvm_vcpu *vcpu, core_stolen = vcore_stolen_time(vc, now); stolen = core_stolen - vcpu->arch.stolen_logged; vcpu->arch.stolen_logged = core_stolen; - spin_lock(&vcpu->arch.tbacct_lock); + spin_lock_irq(&vcpu->arch.tbacct_lock); stolen += vcpu->arch.busy_stolen; vcpu->arch.busy_stolen = 0; - spin_unlock(&vcpu->arch.tbacct_lock); + spin_unlock_irq(&vcpu->arch.tbacct_lock); if (!dt || !vpa) return; memset(dt, 0, sizeof(struct dtl_entry)); @@ -1115,13 +1117,13 @@ static void kvmppc_remove_runnable(struct kvmppc_vcore *vc, if (vcpu->arch.state != KVMPPC_VCPU_RUNNABLE) return; - spin_lock(&vcpu->arch.tbacct_lock); + spin_lock_irq(&vcpu->arch.tbacct_lock); now = mftb(); vcpu->arch.busy_stolen += vcore_stolen_time(vc, now) - vcpu->arch.stolen_logged; vcpu->arch.busy_preempt = now; vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST; - spin_unlock(&vcpu->arch.tbacct_lock); + spin_unlock_irq(&vcpu->arch.tbacct_lock); --vc->n_runnable; list_del(&vcpu->arch.run_list); } From c9438092cae4a5bdbd146ca1385e85dcd6e847f8 Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Sat, 16 Nov 2013 17:46:05 +1100 Subject: [PATCH 011/312] KVM: PPC: Book3S HV: Take SRCU read lock around kvm_read_guest() call Running a kernel with CONFIG_PROVE_RCU=y yields the following diagnostic: =============================== [ INFO: suspicious RCU usage. ] 3.12.0-rc5-kvm+ #9 Not tainted ------------------------------- include/linux/kvm_host.h:473 suspicious rcu_dereference_check() usage! other info that might help us debug this: rcu_scheduler_active = 1, debug_locks = 0 1 lock held by qemu-system-ppc/4831: stack backtrace: CPU: 28 PID: 4831 Comm: qemu-system-ppc Not tainted 3.12.0-rc5-kvm+ #9 Call Trace: [c000000be462b2a0] [c00000000001644c] .show_stack+0x7c/0x1f0 (unreliable) [c000000be462b370] [c000000000ad57c0] .dump_stack+0x88/0xb4 [c000000be462b3f0] [c0000000001315e8] .lockdep_rcu_suspicious+0x138/0x180 [c000000be462b480] [c00000000007862c] .gfn_to_memslot+0x13c/0x170 [c000000be462b510] [c00000000007d384] .gfn_to_hva_prot+0x24/0x90 [c000000be462b5a0] [c00000000007d420] .kvm_read_guest_page+0x30/0xd0 [c000000be462b630] [c00000000007d528] .kvm_read_guest+0x68/0x110 [c000000be462b6e0] [c000000000084594] .kvmppc_rtas_hcall+0x34/0x180 [c000000be462b7d0] [c000000000097934] .kvmppc_pseries_do_hcall+0x74/0x830 [c000000be462b880] [c0000000000990e8] .kvmppc_vcpu_run_hv+0xff8/0x15a0 [c000000be462b9e0] [c0000000000839cc] .kvmppc_vcpu_run+0x2c/0x40 [c000000be462ba50] [c0000000000810b4] .kvm_arch_vcpu_ioctl_run+0x54/0x1b0 [c000000be462bae0] [c00000000007b508] .kvm_vcpu_ioctl+0x478/0x730 [c000000be462bca0] [c00000000025532c] .do_vfs_ioctl+0x4dc/0x7a0 [c000000be462bd80] [c0000000002556b4] .SyS_ioctl+0xc4/0xe0 [c000000be462be30] [c000000000009ee4] syscall_exit+0x0/0x98 To fix this, we take the SRCU read lock around the kvmppc_rtas_hcall() call. Signed-off-by: Paul Mackerras Signed-off-by: Alexander Graf --- arch/powerpc/kvm/book3s_hv.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 31d9cfb64a23..b51d5db78068 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -591,7 +591,9 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu) if (list_empty(&vcpu->kvm->arch.rtas_tokens)) return RESUME_HOST; + idx = srcu_read_lock(&vcpu->kvm->srcu); rc = kvmppc_rtas_hcall(vcpu); + srcu_read_unlock(&vcpu->kvm->srcu, idx); if (rc == -ENOENT) return RESUME_HOST; From 91648ec09c1ef69c4d840ab6dab391bfb452d554 Mon Sep 17 00:00:00 2001 From: pingfan liu Date: Fri, 15 Nov 2013 16:35:00 +0800 Subject: [PATCH 012/312] powerpc: kvm: fix rare but potential deadlock scene Since kvmppc_hv_find_lock_hpte() is called from both virtmode and realmode, so it can trigger the deadlock. Suppose the following scene: Two physical cpuM, cpuN, two VM instances A, B, each VM has a group of vcpus. If on cpuM, vcpu_A_1 holds bitlock X (HPTE_V_HVLOCK), then is switched out, and on cpuN, vcpu_A_2 try to lock X in realmode, then cpuN will be caught in realmode for a long time. What makes things even worse if the following happens, On cpuM, bitlockX is hold, on cpuN, Y is hold. vcpu_B_2 try to lock Y on cpuM in realmode vcpu_A_2 try to lock X on cpuN in realmode Oops! deadlock happens Signed-off-by: Liu Ping Fan Reviewed-by: Paul Mackerras CC: stable@vger.kernel.org Signed-off-by: Alexander Graf --- arch/powerpc/kvm/book3s_64_mmu_hv.c | 6 +++++- arch/powerpc/kvm/book3s_hv_rm_mmu.c | 4 ++++ 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c index 47bbeaf2d320..c5d148434c08 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_hv.c +++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c @@ -469,11 +469,14 @@ static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, slb_v = vcpu->kvm->arch.vrma_slb_v; } + preempt_disable(); /* Find the HPTE in the hash table */ index = kvmppc_hv_find_lock_hpte(kvm, eaddr, slb_v, HPTE_V_VALID | HPTE_V_ABSENT); - if (index < 0) + if (index < 0) { + preempt_enable(); return -ENOENT; + } hptep = (unsigned long *)(kvm->arch.hpt_virt + (index << 4)); v = hptep[0] & ~HPTE_V_HVLOCK; gr = kvm->arch.revmap[index].guest_rpte; @@ -481,6 +484,7 @@ static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, /* Unlock the HPTE */ asm volatile("lwsync" : : : "memory"); hptep[0] = v; + preempt_enable(); gpte->eaddr = eaddr; gpte->vpage = ((v & HPTE_V_AVPN) << 4) | ((eaddr >> 12) & 0xfff); diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c index fddbf989f37e..1931aa341a72 100644 --- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c +++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c @@ -749,6 +749,10 @@ static int slb_base_page_shift[4] = { 20, /* 1M, unsupported */ }; +/* When called from virtmode, this func should be protected by + * preempt_disable(), otherwise, the holding of HPTE_V_HVLOCK + * can trigger deadlock issue. + */ long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v, unsigned long valid) { From 6802cdc58d4fe66cffd6cd04ee55e65dd61eeeeb Mon Sep 17 00:00:00 2001 From: Simon Horman Date: Wed, 6 Nov 2013 09:41:09 +0900 Subject: [PATCH 013/312] ARM: shmobile: lager: phy fixup needs CONFIG_PHYLIB Do not build the phy fixup unless CONFIG_PHYLIB is enabled. Other than not being useful it is also not possible to link the code under this condition as phy_register_fixup_for_id(), mdiobus_read() and mdiobus_write() are absent. arch/arm/mach-shmobile/built-in.o: In function `lager_ksz8041_fixup': board-lager.c:(.text+0xb8): undefined reference to `mdiobus_read' board-lager.c:(.text+0xd4): undefined reference to `mdiobus_write' arch/arm/mach-shmobile/built-in.o: In function `lager_init': board-lager.c:(.init.text+0xafc): undefined reference to `phy_register_fixup_for_id' This problem was introduced by 48c8b96f21817aad ("ARM: shmobile: Lager: add Micrel KSZ8041 PHY fixup") Cc: Sergei Shtylyov Signed-off-by: Simon Horman --- arch/arm/mach-shmobile/board-lager.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/arch/arm/mach-shmobile/board-lager.c b/arch/arm/mach-shmobile/board-lager.c index a8d3ce646fb9..e0406fd37390 100644 --- a/arch/arm/mach-shmobile/board-lager.c +++ b/arch/arm/mach-shmobile/board-lager.c @@ -245,7 +245,9 @@ static void __init lager_init(void) { lager_add_standard_devices(); - phy_register_fixup_for_id("r8a7790-ether-ff:01", lager_ksz8041_fixup); + if (IS_ENABLED(CONFIG_PHYLIB)) + phy_register_fixup_for_id("r8a7790-ether-ff:01", + lager_ksz8041_fixup); } static const char * const lager_boards_compat_dt[] __initconst = { From e55bc55867585e6628359fd5496316576fe58a2f Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Sat, 9 Nov 2013 13:18:01 +0100 Subject: [PATCH 014/312] irqchip: renesas-intc-irqpin: Fix register bitfield shift calculation The SENSE register bitfield position is incorrectly computed for SoCs that use 2-bit IRQ sense fields. Fix it. This has been tested on the Marzen (H1) and Bockw (M1) boards. This bug has been present since the renesas-intc-irqpin driver was introduced by 443580486e3b9657 ("irqchip: Renesas INTC External IRQ pin driver") in v3.10-rc1. Signed-off-by: Laurent Pinchart Acked-by: Magnus Damm Tested-by: Simon Horman Signed-off-by: Simon Horman --- drivers/irqchip/irq-renesas-intc-irqpin.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/drivers/irqchip/irq-renesas-intc-irqpin.c b/drivers/irqchip/irq-renesas-intc-irqpin.c index 82cec63a9011..3ee78f02e5d7 100644 --- a/drivers/irqchip/irq-renesas-intc-irqpin.c +++ b/drivers/irqchip/irq-renesas-intc-irqpin.c @@ -149,8 +149,9 @@ static void intc_irqpin_read_modify_write(struct intc_irqpin_priv *p, static void intc_irqpin_mask_unmask_prio(struct intc_irqpin_priv *p, int irq, int do_mask) { - int bitfield_width = 4; /* PRIO assumed to have fixed bitfield width */ - int shift = (7 - irq) * bitfield_width; /* PRIO assumed to be 32-bit */ + /* The PRIO register is assumed to be 32-bit with fixed 4-bit fields. */ + int bitfield_width = 4; + int shift = 32 - (irq + 1) * bitfield_width; intc_irqpin_read_modify_write(p, INTC_IRQPIN_REG_PRIO, shift, bitfield_width, @@ -159,8 +160,9 @@ static void intc_irqpin_mask_unmask_prio(struct intc_irqpin_priv *p, static int intc_irqpin_set_sense(struct intc_irqpin_priv *p, int irq, int value) { + /* The SENSE register is assumed to be 32-bit. */ int bitfield_width = p->config.sense_bitfield_width; - int shift = (7 - irq) * bitfield_width; /* SENSE assumed to be 32-bit */ + int shift = 32 - (irq + 1) * bitfield_width; dev_dbg(&p->pdev->dev, "sense irq = %d, mode = %d\n", irq, value); From 3088f1085d1c08f31f02db26796555a66cdf7ca1 Mon Sep 17 00:00:00 2001 From: Kishon Vijay Abraham I Date: Mon, 25 Nov 2013 15:31:21 +0530 Subject: [PATCH 015/312] usb: dwc3: invoke phy_resume after phy_init usb_phy_set_suspend(phy, 0) is called before usb_phy_init. Fix it. Reported-by: Roger Quadros Signed-off-by: Kishon Vijay Abraham I Signed-off-by: Felipe Balbi --- drivers/usb/dwc3/core.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c index 74f9cf02da07..4dd1f7e2edc8 100644 --- a/drivers/usb/dwc3/core.c +++ b/drivers/usb/dwc3/core.c @@ -455,9 +455,6 @@ static int dwc3_probe(struct platform_device *pdev) if (IS_ERR(regs)) return PTR_ERR(regs); - usb_phy_set_suspend(dwc->usb2_phy, 0); - usb_phy_set_suspend(dwc->usb3_phy, 0); - spin_lock_init(&dwc->lock); platform_set_drvdata(pdev, dwc); @@ -488,6 +485,9 @@ static int dwc3_probe(struct platform_device *pdev) goto err0; } + usb_phy_set_suspend(dwc->usb2_phy, 0); + usb_phy_set_suspend(dwc->usb3_phy, 0); + ret = dwc3_event_buffers_setup(dwc); if (ret) { dev_err(dwc->dev, "failed to setup event buffers\n"); From 501fae512584e601d34945efccd35c5e5d759132 Mon Sep 17 00:00:00 2001 From: Kishon Vijay Abraham I Date: Mon, 25 Nov 2013 15:31:22 +0530 Subject: [PATCH 016/312] usb: dwc3: power off usb phy in error path usb phy was power'ed on but was never power'ed off in the error path. Fix it. Signed-off-by: Kishon Vijay Abraham I Signed-off-by: Felipe Balbi --- drivers/usb/dwc3/core.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c index 4dd1f7e2edc8..a49217ae3533 100644 --- a/drivers/usb/dwc3/core.c +++ b/drivers/usb/dwc3/core.c @@ -569,6 +569,8 @@ static int dwc3_probe(struct platform_device *pdev) dwc3_event_buffers_cleanup(dwc); err1: + usb_phy_set_suspend(dwc->usb2_phy, 1); + usb_phy_set_suspend(dwc->usb3_phy, 1); dwc3_core_exit(dwc); err0: From 0b55149033403d78f345fb613d8ad52d16c161fc Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Fri, 8 Nov 2013 01:43:17 -0800 Subject: [PATCH 017/312] usb: phy: twl6030-usb: signedness bug in twl6030_readb() "ret" needs to be signed for the error handling to work. Signed-off-by: Dan Carpenter Signed-off-by: Felipe Balbi --- drivers/usb/phy/phy-twl6030-usb.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/usb/phy/phy-twl6030-usb.c b/drivers/usb/phy/phy-twl6030-usb.c index 30e8a61552d4..bad57ce77ba5 100644 --- a/drivers/usb/phy/phy-twl6030-usb.c +++ b/drivers/usb/phy/phy-twl6030-usb.c @@ -127,7 +127,8 @@ static inline int twl6030_writeb(struct twl6030_usb *twl, u8 module, static inline u8 twl6030_readb(struct twl6030_usb *twl, u8 module, u8 address) { - u8 data, ret = 0; + u8 data; + int ret; ret = twl_i2c_read_u8(module, &data, address); if (ret >= 0) From 23de2278ebc3a2f971ce45ca5e5e35c9d5a74040 Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Thu, 21 Nov 2013 14:19:29 +0900 Subject: [PATCH 018/312] ARM: shmobile: r8a7790: Fix GPIO resources in DTS The r8a7790 GPIO resources are currently incorrect. Fix that by making them match the English r8a7790 v0.6 data sheet. Tested with GPIO LED using Lager DT reference. This problem has been present since GPIOs were added to the r8a7790 SoC by f98e10c88aa95bf7 ("ARM: shmobile: r8a7790: Add GPIO controller devices to device tree") in v3.12-rc1. Signed-off-by: Magnus Damm Acked-by: Laurent Pinchart Signed-off-by: Simon Horman --- arch/arm/boot/dts/r8a7790.dtsi | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/arch/arm/boot/dts/r8a7790.dtsi b/arch/arm/boot/dts/r8a7790.dtsi index ee845fad939b..46e1d7ef163f 100644 --- a/arch/arm/boot/dts/r8a7790.dtsi +++ b/arch/arm/boot/dts/r8a7790.dtsi @@ -87,9 +87,9 @@ gic: interrupt-controller@f1001000 { interrupts = <1 9 0xf04>; }; - gpio0: gpio@ffc40000 { + gpio0: gpio@e6050000 { compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar"; - reg = <0 0xffc40000 0 0x2c>; + reg = <0 0xe6050000 0 0x50>; interrupt-parent = <&gic>; interrupts = <0 4 0x4>; #gpio-cells = <2>; @@ -99,9 +99,9 @@ gpio0: gpio@ffc40000 { interrupt-controller; }; - gpio1: gpio@ffc41000 { + gpio1: gpio@e6051000 { compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar"; - reg = <0 0xffc41000 0 0x2c>; + reg = <0 0xe6051000 0 0x50>; interrupt-parent = <&gic>; interrupts = <0 5 0x4>; #gpio-cells = <2>; @@ -111,9 +111,9 @@ gpio1: gpio@ffc41000 { interrupt-controller; }; - gpio2: gpio@ffc42000 { + gpio2: gpio@e6052000 { compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar"; - reg = <0 0xffc42000 0 0x2c>; + reg = <0 0xe6052000 0 0x50>; interrupt-parent = <&gic>; interrupts = <0 6 0x4>; #gpio-cells = <2>; @@ -123,9 +123,9 @@ gpio2: gpio@ffc42000 { interrupt-controller; }; - gpio3: gpio@ffc43000 { + gpio3: gpio@e6053000 { compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar"; - reg = <0 0xffc43000 0 0x2c>; + reg = <0 0xe6053000 0 0x50>; interrupt-parent = <&gic>; interrupts = <0 7 0x4>; #gpio-cells = <2>; @@ -135,9 +135,9 @@ gpio3: gpio@ffc43000 { interrupt-controller; }; - gpio4: gpio@ffc44000 { + gpio4: gpio@e6054000 { compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar"; - reg = <0 0xffc44000 0 0x2c>; + reg = <0 0xe6054000 0 0x50>; interrupt-parent = <&gic>; interrupts = <0 8 0x4>; #gpio-cells = <2>; @@ -147,9 +147,9 @@ gpio4: gpio@ffc44000 { interrupt-controller; }; - gpio5: gpio@ffc45000 { + gpio5: gpio@e6055000 { compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar"; - reg = <0 0xffc45000 0 0x2c>; + reg = <0 0xe6055000 0 0x50>; interrupt-parent = <&gic>; interrupts = <0 9 0x4>; #gpio-cells = <2>; From 08239ca2a053dbc3b082916bdfbd88e5a9ad9267 Mon Sep 17 00:00:00 2001 From: Wei Yongjun Date: Thu, 28 Nov 2013 10:31:35 +0800 Subject: [PATCH 019/312] bcache: fix sparse non static symbol warning Fixes the following sparse warning: drivers/md/bcache/btree.c:2220:5: warning: symbol 'btree_insert_fn' was not declared. Should it be static? Signed-off-by: Wei Yongjun Signed-off-by: Kent Overstreet --- drivers/md/bcache/btree.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c index 5e2765aadce1..dc6e2be265b7 100644 --- a/drivers/md/bcache/btree.c +++ b/drivers/md/bcache/btree.c @@ -2217,7 +2217,7 @@ struct btree_insert_op { struct bkey *replace_key; }; -int btree_insert_fn(struct btree_op *b_op, struct btree *b) +static int btree_insert_fn(struct btree_op *b_op, struct btree *b) { struct btree_insert_op *op = container_of(b_op, struct btree_insert_op, op); From 87809942d3fa60bafb7a58d0bdb1c79e90a6821d Mon Sep 17 00:00:00 2001 From: Michele Baldessari Date: Mon, 25 Nov 2013 19:00:14 +0000 Subject: [PATCH 020/312] libata: add ATA_HORKAGE_BROKEN_FPDMA_AA quirk for Seagate Momentus SpinPoint M8 We've received multiple reports in Fedora via (BZ 907193) that the Seagate Momentus SpinPoint M8 errors out when enabling AA: [ 2.555905] ata2.00: failed to enable AA (error_mask=0x1) [ 2.568482] ata2.00: failed to enable AA (error_mask=0x1) Add the ATA_HORKAGE_BROKEN_FPDMA_AA for this specific harddisk. Reported-by: Nicholas Signed-off-by: Michele Baldessari Tested-by: Nicholas Acked-by: Alan Cox Signed-off-by: Tejun Heo Cc: stable@vger.kernel.org --- drivers/ata/libata-core.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 75b93678bbcd..dae73efe5dbf 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -4156,6 +4156,9 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { { "ST3320[68]13AS", "SD1[5-9]", ATA_HORKAGE_NONCQ | ATA_HORKAGE_FIRMWARE_WARN }, + /* Seagate Momentus SpinPoint M8 seem to have FPMDA_AA issues */ + { "ST1000LM024 HN-M101MBB", "2AR10001", ATA_HORKAGE_BROKEN_FPDMA_AA }, + /* Blacklist entries taken from Silicon Image 3124/3132 Windows driver .inf file - also several Linux problem reports */ { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, }, From f6308b36c411dc5afd6a6f73e6454722bfde57b7 Mon Sep 17 00:00:00 2001 From: Paul Drews Date: Mon, 25 Nov 2013 14:15:55 -0800 Subject: [PATCH 021/312] ACPI: Add BayTrail SoC GPIO and LPSS ACPI IDs This adds the new ACPI ID (INT33FC) for the BayTrail GPIO banks as seen on a BayTrail M System-On-Chip platform. This ACPI ID is used by the BayTrail GPIO (pinctrl) driver to manage the Low Power Subsystem (LPSS). Signed-off-by: Paul Drews Acked-by: Linus Walleij Signed-off-by: Rafael J. Wysocki --- drivers/acpi/acpi_lpss.c | 1 + drivers/pinctrl/pinctrl-baytrail.c | 1 + 2 files changed, 2 insertions(+) diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c index 6745fe137b9e..e60390597372 100644 --- a/drivers/acpi/acpi_lpss.c +++ b/drivers/acpi/acpi_lpss.c @@ -162,6 +162,7 @@ static const struct acpi_device_id acpi_lpss_device_ids[] = { { "80860F14", (unsigned long)&byt_sdio_dev_desc }, { "80860F41", (unsigned long)&byt_i2c_dev_desc }, { "INT33B2", }, + { "INT33FC", }, { "INT3430", (unsigned long)&lpt_dev_desc }, { "INT3431", (unsigned long)&lpt_dev_desc }, diff --git a/drivers/pinctrl/pinctrl-baytrail.c b/drivers/pinctrl/pinctrl-baytrail.c index 2832576d8b12..114f5ef4b73a 100644 --- a/drivers/pinctrl/pinctrl-baytrail.c +++ b/drivers/pinctrl/pinctrl-baytrail.c @@ -512,6 +512,7 @@ static const struct dev_pm_ops byt_gpio_pm_ops = { static const struct acpi_device_id byt_gpio_acpi_match[] = { { "INT33B2", 0 }, + { "INT33FC", 0 }, { } }; MODULE_DEVICE_TABLE(acpi, byt_gpio_acpi_match); From ae1495b12df1897d4f42842a7aa7276d920f6290 Mon Sep 17 00:00:00 2001 From: Theodore Ts'o Date: Mon, 2 Dec 2013 09:31:36 -0500 Subject: [PATCH 022/312] ext4: call ext4_error_inode() if jbd2_journal_dirty_metadata() fails While it's true that errors can only happen if there is a bug in jbd2_journal_dirty_metadata(), if a bug does happen, we need to halt the kernel or remount the file system read-only in order to avoid further data loss. The ext4_journal_abort_handle() function doesn't do any of this, and while it's likely that this call (since it doesn't adjust refcounts) will likely result in the file system eventually deadlocking since the current transaction will never be able to close, it's much cleaner to call let ext4's error handling system deal with this situation. There's a separate bug here which is that if certain jbd2 errors errors occur and file system is mounted errors=continue, the file system will probably eventually end grind to a halt as described above. But things have been this way in a long time, and usually when we have these sorts of errors it's pretty much a disaster --- and that's why the jbd2 layer aggressively retries memory allocations, which is the most likely cause of these jbd2 errors. Signed-off-by: "Theodore Ts'o" Reviewed-by: Jan Kara Cc: stable@vger.kernel.org --- fs/ext4/ext4_jbd2.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/fs/ext4/ext4_jbd2.c b/fs/ext4/ext4_jbd2.c index 17ac112ab101..3fe29de832c8 100644 --- a/fs/ext4/ext4_jbd2.c +++ b/fs/ext4/ext4_jbd2.c @@ -259,6 +259,15 @@ int __ext4_handle_dirty_metadata(const char *where, unsigned int line, if (WARN_ON_ONCE(err)) { ext4_journal_abort_handle(where, line, __func__, bh, handle, err); + ext4_error_inode(inode, where, line, + bh->b_blocknr, + "journal_dirty_metadata failed: " + "handle type %u started at line %u, " + "credits %u/%u, errcode %d", + handle->h_type, + handle->h_line_no, + handle->h_requested_credits, + handle->h_buffer_credits, err); } } else { if (inode) From 10becdb402af4fd4808a0491a726b96128c41076 Mon Sep 17 00:00:00 2001 From: Marek Vasut Date: Mon, 25 Nov 2013 09:47:00 +0100 Subject: [PATCH 023/312] ahci: imx: Explicitly clear IMX6Q_GPR13_SATA_MPLL_CLK_EN We must clear this IMX6Q_GPR13_SATA_MPLL_CLK_EN bit on i.MX6Q, otherwise Linux will fail to find the attached drive on some boards. This entire fix was: Reported-by: Eric Nelson Signed-off-by: Marek Vasut Reviewed-by: Shawn Guo Cc: Richard Zhu Cc: Linux-IDE Signed-off-by: Tejun Heo Cc: stable@vger.kernel.org --- drivers/ata/ahci_imx.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/ata/ahci_imx.c b/drivers/ata/ahci_imx.c index ae2d73fe321e..3e23e9941dad 100644 --- a/drivers/ata/ahci_imx.c +++ b/drivers/ata/ahci_imx.c @@ -113,7 +113,7 @@ static int imx6q_sata_init(struct device *dev, void __iomem *mmio) /* * set PHY Paremeters, two steps to configure the GPR13, * one write for rest of parameters, mask of first write - * is 0x07fffffd, and the other one write for setting + * is 0x07ffffff, and the other one write for setting * the mpll_clk_en. */ regmap_update_bits(imxpriv->gpr, 0x34, IMX6Q_GPR13_SATA_RX_EQ_VAL_MASK @@ -124,6 +124,7 @@ static int imx6q_sata_init(struct device *dev, void __iomem *mmio) | IMX6Q_GPR13_SATA_TX_ATTEN_MASK | IMX6Q_GPR13_SATA_TX_BOOST_MASK | IMX6Q_GPR13_SATA_TX_LVL_MASK + | IMX6Q_GPR13_SATA_MPLL_CLK_EN | IMX6Q_GPR13_SATA_TX_EDGE_RATE , IMX6Q_GPR13_SATA_RX_EQ_VAL_3_0_DB | IMX6Q_GPR13_SATA_RX_LOS_LVL_SATA2M From 027476642811f8559cbe00ef6cc54db230e48a20 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Mon, 2 Dec 2013 11:08:06 +0200 Subject: [PATCH 024/312] drm/i915: Take modeset locks around intel_modeset_setup_hw_state() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Some lower level things get angry if we don't have modeset locks during intel_modeset_setup_hw_state(). Actually the resume and lid_notify codepaths alreday hold the locks, but the init codepath doesn't, so fix that. Note: This slipped through since we only disable pipes if the plane/pipe linking doesn't match. Which is only relevant on older gen3 mobile machines, if the BIOS fails to set up our preferred linking. Signed-off-by: Ville Syrjälä Cc: stable@vger.kernel.org Tested-and-reported-by: Paul Bolle [danvet: Add note now that I could confirm my theory with the log files Paul Bolle provided.] Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_display.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 080f6fd4e839..114db51996bd 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -11046,7 +11046,9 @@ void intel_modeset_gem_init(struct drm_device *dev) intel_setup_overlay(dev); + drm_modeset_lock_all(dev); intel_modeset_setup_hw_state(dev, false); + drm_modeset_unlock_all(dev); } void intel_modeset_cleanup(struct drm_device *dev) From edd5b13313551d6b04acfb90d3db58ed3cf3c814 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 2 Dec 2013 17:39:09 +0000 Subject: [PATCH 025/312] drm/i915: Do not clobber config status after a forced restore of hw state We call intel_modeset_setup_hw_state() along two paths, driver load/resume and after a lid event notification. During initialisation of the driver, it is imperative that we reset the config state. This correctly sets up the initial connector statuses and prepares the hardware for a thorough probing. However, during a lid event, we only want to undo the damage caused by the bios by resetting our last known mode. In this cirumstance, we do not want to clobber our desired state. In order to try and keep sanity between the config state and our own tracking, do the drm_mode_config_reset() first along the load/resume paths before reading out the hw state and apply any definite known corrections. v2: "As discussed on irc I don't think we should force the connector state to anything here: Imo connector->status should reflect what we believe to be the true output connection state, whereas connector->encoder reflects whether this connector is wired up to a pipe. And since we no longer reject modeset on disconnected connectors and never nuked the pipe if the connector gets disconnected there's no reason for that - such policy is userspace's job. This regression has been introduced in commit 2e9388923e83bc4e2726f170a984621f1d582e77 Author: Daniel Vetter Date: Thu Oct 11 20:08:24 2012 +0200 drm/i915/crt: explicitly set up HOTPLUG_BITS on resume" so sayeth Daniel. Signed-off-by: Chris Wilson Cc: Daniel Vetter Cc: stable@vger.kernel.org (v3.8 and later) Cc: Paulo Zanoni Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_drv.c | 1 + drivers/gpu/drm/i915/intel_display.c | 3 +-- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 2e367a1c6a64..5b7b7e06cb3a 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -651,6 +651,7 @@ static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings) intel_modeset_init_hw(dev); drm_modeset_lock_all(dev); + drm_mode_config_reset(dev); intel_modeset_setup_hw_state(dev, true); drm_modeset_unlock_all(dev); diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 114db51996bd..8e44b16950cf 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -11036,8 +11036,6 @@ void intel_modeset_setup_hw_state(struct drm_device *dev, } intel_modeset_check_state(dev); - - drm_mode_config_reset(dev); } void intel_modeset_gem_init(struct drm_device *dev) @@ -11047,6 +11045,7 @@ void intel_modeset_gem_init(struct drm_device *dev) intel_setup_overlay(dev); drm_modeset_lock_all(dev); + drm_mode_config_reset(dev); intel_modeset_setup_hw_state(dev, false); drm_modeset_unlock_all(dev); } From 5ae68b413214e847a2b5c6d3c65778482542bc1a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Mon, 2 Dec 2013 11:23:39 +0200 Subject: [PATCH 026/312] drm/i915: Skip clock checks on BDW MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We don't have clock state readout support for DDI, so skip the pipe config clock checks on all DDI platforms. Signed-off-by: Ville Syrjälä Reviewed-by: Damien Lespiau Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_display.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 8e44b16950cf..8b8bde7dce53 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -9135,7 +9135,7 @@ intel_pipe_config_compare(struct drm_device *dev, if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) PIPE_CONF_CHECK_I(pipe_bpp); - if (!IS_HASWELL(dev)) { + if (!HAS_DDI(dev)) { PIPE_CONF_CHECK_CLOCK_FUZZY(adjusted_mode.crtc_clock); PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock); } From 4e8d2139802ce4f41936a687f06c560b12115247 Mon Sep 17 00:00:00 2001 From: Junho Ryu Date: Tue, 3 Dec 2013 18:10:28 -0500 Subject: [PATCH 027/312] ext4: fix use-after-free in ext4_mb_new_blocks ext4_mb_put_pa should hold pa->pa_lock before accessing pa->pa_count. While ext4_mb_use_preallocated checks pa->pa_deleted first and then increments pa->count later, ext4_mb_put_pa decrements pa->pa_count before holding pa->pa_lock and then sets pa->pa_deleted. * Free sequence ext4_mb_put_pa (1): atomic_dec_and_test pa->pa_count ext4_mb_put_pa (2): lock pa->pa_lock ext4_mb_put_pa (3): check pa->pa_deleted ext4_mb_put_pa (4): set pa->pa_deleted=1 ext4_mb_put_pa (5): unlock pa->pa_lock ext4_mb_put_pa (6): remove pa from a list ext4_mb_pa_callback: free pa * Use sequence ext4_mb_use_preallocated (1): iterate over preallocation ext4_mb_use_preallocated (2): lock pa->pa_lock ext4_mb_use_preallocated (3): check pa->pa_deleted ext4_mb_use_preallocated (4): increase pa->pa_count ext4_mb_use_preallocated (5): unlock pa->pa_lock ext4_mb_release_context: access pa * Use-after-free sequence [initial status] pa_deleted = 0, pa_count = 1> ext4_mb_use_preallocated (1): iterate over preallocation ext4_mb_use_preallocated (2): lock pa->pa_lock ext4_mb_use_preallocated (3): check pa->pa_deleted ext4_mb_put_pa (1): atomic_dec_and_test pa->pa_count [pa_count decremented] pa_deleted = 0, pa_count = 0> ext4_mb_use_preallocated (4): increase pa->pa_count [pa_count incremented] pa_deleted = 0, pa_count = 1> ext4_mb_use_preallocated (5): unlock pa->pa_lock ext4_mb_put_pa (2): lock pa->pa_lock ext4_mb_put_pa (3): check pa->pa_deleted ext4_mb_put_pa (4): set pa->pa_deleted=1 [race condition!] pa_deleted = 1, pa_count = 1> ext4_mb_put_pa (5): unlock pa->pa_lock ext4_mb_put_pa (6): remove pa from a list ext4_mb_pa_callback: free pa ext4_mb_release_context: access pa AddressSanitizer has detected use-after-free in ext4_mb_new_blocks Bug report: http://goo.gl/rG1On3 Signed-off-by: Junho Ryu Signed-off-by: "Theodore Ts'o" Cc: stable@vger.kernel.org --- fs/ext4/mballoc.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index 4d113efa024c..04766d9a29cd 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c @@ -3442,6 +3442,9 @@ static void ext4_mb_pa_callback(struct rcu_head *head) { struct ext4_prealloc_space *pa; pa = container_of(head, struct ext4_prealloc_space, u.pa_rcu); + + BUG_ON(atomic_read(&pa->pa_count)); + BUG_ON(pa->pa_deleted == 0); kmem_cache_free(ext4_pspace_cachep, pa); } @@ -3455,11 +3458,13 @@ static void ext4_mb_put_pa(struct ext4_allocation_context *ac, ext4_group_t grp; ext4_fsblk_t grp_blk; - if (!atomic_dec_and_test(&pa->pa_count) || pa->pa_free != 0) - return; - /* in this short window concurrent discard can set pa_deleted */ spin_lock(&pa->pa_lock); + if (!atomic_dec_and_test(&pa->pa_count) || pa->pa_free != 0) { + spin_unlock(&pa->pa_lock); + return; + } + if (pa->pa_deleted == 1) { spin_unlock(&pa->pa_lock); return; From 5946d089379a35dda0e531710b48fca05446a196 Mon Sep 17 00:00:00 2001 From: Eryu Guan Date: Tue, 3 Dec 2013 21:22:21 -0500 Subject: [PATCH 028/312] ext4: check for overlapping extents in ext4_valid_extent_entries() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit A corrupted ext4 may have out of order leaf extents, i.e. extent: lblk 0--1023, len 1024, pblk 9217, flags: LEAF UNINIT extent: lblk 1000--2047, len 1024, pblk 10241, flags: LEAF UNINIT ^^^^ overlap with previous extent Reading such extent could hit BUG_ON() in ext4_es_cache_extent(). BUG_ON(end < lblk); The problem is that __read_extent_tree_block() tries to cache holes as well but assumes 'lblk' is greater than 'prev' and passes underflowed length to ext4_es_cache_extent(). Fix it by checking for overlapping extents in ext4_valid_extent_entries(). I hit this when fuzz testing ext4, and am able to reproduce it by modifying the on-disk extent by hand. Also add the check for (ee_block + len - 1) in ext4_valid_extent() to make sure the value is not overflow. Ran xfstests on patched ext4 and no regression. Cc: Lukáš Czerner Signed-off-by: Eryu Guan Signed-off-by: "Theodore Ts'o" Cc: stable@vger.kernel.org --- fs/ext4/extents.c | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index 35f65cf4f318..267c9fb53bf9 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c @@ -360,8 +360,10 @@ static int ext4_valid_extent(struct inode *inode, struct ext4_extent *ext) { ext4_fsblk_t block = ext4_ext_pblock(ext); int len = ext4_ext_get_actual_len(ext); + ext4_lblk_t lblock = le32_to_cpu(ext->ee_block); + ext4_lblk_t last = lblock + len - 1; - if (len == 0) + if (lblock > last) return 0; return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, len); } @@ -387,11 +389,26 @@ static int ext4_valid_extent_entries(struct inode *inode, if (depth == 0) { /* leaf entries */ struct ext4_extent *ext = EXT_FIRST_EXTENT(eh); + struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es; + ext4_fsblk_t pblock = 0; + ext4_lblk_t lblock = 0; + ext4_lblk_t prev = 0; + int len = 0; while (entries) { if (!ext4_valid_extent(inode, ext)) return 0; + + /* Check for overlapping extents */ + lblock = le32_to_cpu(ext->ee_block); + len = ext4_ext_get_actual_len(ext); + if ((lblock <= prev) && prev) { + pblock = ext4_ext_pblock(ext); + es->s_last_error_block = cpu_to_le64(pblock); + return 0; + } ext++; entries--; + prev = lblock + len - 1; } } else { struct ext4_extent_idx *ext_idx = EXT_FIRST_INDEX(eh); From df4e7ac0bb70abc97fbfd9ef09671fc084b3f9db Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Tue, 3 Dec 2013 11:20:06 +0100 Subject: [PATCH 029/312] ext2: Fix oops in ext2_get_block() called from ext2_quota_write() ext2_quota_write() doesn't properly setup bh it passes to ext2_get_block() and thus we hit assertion BUG_ON(maxblocks == 0) in ext2_get_blocks() (or we could actually ask for mapping arbitrary number of blocks depending on whatever value was on stack). Fix ext2_quota_write() to properly fill in number of blocks to map. CC: stable@vger.kernel.org # >= 2.6.12 Reviewed-by: "Theodore Ts'o" Reviewed-by: Christoph Hellwig Reported-by: Christoph Hellwig Signed-off-by: Jan Kara --- fs/ext2/super.c | 1 + 1 file changed, 1 insertion(+) diff --git a/fs/ext2/super.c b/fs/ext2/super.c index 288534920fe5..20d6697bd638 100644 --- a/fs/ext2/super.c +++ b/fs/ext2/super.c @@ -1493,6 +1493,7 @@ static ssize_t ext2_quota_write(struct super_block *sb, int type, sb->s_blocksize - offset : towrite; tmp_bh.b_state = 0; + tmp_bh.b_size = sb->s_blocksize; err = ext2_get_block(inode, blk, &tmp_bh, 1); if (err < 0) goto out; From c94cae53f9e564484f906a79be5639fc66e8cb02 Mon Sep 17 00:00:00 2001 From: Eric Trudeau Date: Wed, 4 Dec 2013 11:39:33 +0000 Subject: [PATCH 030/312] XEN: Grant table address, xen_hvm_resume_frames, is a phys_addr not a pfn From: Eric Trudeau xen_hvm_resume_frames stores the physical address of the grant table. englighten.c was incorrectly setting it as if it was a page frame number. This caused the table to be mapped into the guest at an unexpected physical address. Additionally, a warning is improved to include the grant table address which failed in xen_remap. Signed-off-by: Eric Trudeau Signed-off-by: Stefano Stabellini --- arch/arm/xen/enlighten.c | 4 ++-- drivers/xen/grant-table.c | 3 ++- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c index 83e4f959ee47..6a288c7be49f 100644 --- a/arch/arm/xen/enlighten.c +++ b/arch/arm/xen/enlighten.c @@ -224,10 +224,10 @@ static int __init xen_guest_init(void) } if (of_address_to_resource(node, GRANT_TABLE_PHYSADDR, &res)) return 0; - xen_hvm_resume_frames = res.start >> PAGE_SHIFT; + xen_hvm_resume_frames = res.start; xen_events_irq = irq_of_parse_and_map(node, 0); pr_info("Xen %s support found, events_irq=%d gnttab_frame_pfn=%lx\n", - version, xen_events_irq, xen_hvm_resume_frames); + version, xen_events_irq, (xen_hvm_resume_frames >> PAGE_SHIFT)); xen_domain_type = XEN_HVM_DOMAIN; xen_setup_features(); diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c index 028387192b60..aa846a48f400 100644 --- a/drivers/xen/grant-table.c +++ b/drivers/xen/grant-table.c @@ -1176,7 +1176,8 @@ static int gnttab_setup(void) gnttab_shared.addr = xen_remap(xen_hvm_resume_frames, PAGE_SIZE * max_nr_gframes); if (gnttab_shared.addr == NULL) { - pr_warn("Failed to ioremap gnttab share frames!\n"); + pr_warn("Failed to ioremap gnttab share frames (addr=0x%08lx)!\n", + xen_hvm_resume_frames); return -ENOMEM; } } From 7f62b6ee767586ee7e5d12787dbaaaf47a91979a Mon Sep 17 00:00:00 2001 From: Nicolin Chen Date: Wed, 4 Dec 2013 11:18:36 +0800 Subject: [PATCH 031/312] ASoC: soc-pcm: Use valid condition for snd_soc_dai_digital_mute() in hw_free() The snd_soc_dai_digital_mute() here will be never executed because we only decrease codec->active in snd_soc_close(). Thus correct it. Signed-off-by: Nicolin Chen Signed-off-by: Mark Brown --- sound/soc/soc-pcm.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c index 11a90cd027fa..891b9a9bcbf8 100644 --- a/sound/soc/soc-pcm.c +++ b/sound/soc/soc-pcm.c @@ -600,12 +600,13 @@ static int soc_pcm_hw_free(struct snd_pcm_substream *substream) struct snd_soc_platform *platform = rtd->platform; struct snd_soc_dai *cpu_dai = rtd->cpu_dai; struct snd_soc_dai *codec_dai = rtd->codec_dai; - struct snd_soc_codec *codec = rtd->codec; + bool playback = substream->stream == SNDRV_PCM_STREAM_PLAYBACK; mutex_lock_nested(&rtd->pcm_mutex, rtd->pcm_subclass); /* apply codec digital mute */ - if (!codec->active) + if ((playback && codec_dai->playback_active == 1) || + (!playback && codec_dai->capture_active == 1)) snd_soc_dai_digital_mute(codec_dai, 1, substream->stream); /* free any machine hw params */ From a8f1f100ad994725a8295f6997524c57c72e06f5 Mon Sep 17 00:00:00 2001 From: Bo Shen Date: Wed, 4 Dec 2013 10:37:03 +0800 Subject: [PATCH 032/312] ASoC: atmel_ssc_dai: add dai trigger ops According to the SSC specifiation, it should be enabled after DMA is enabled. So, add trigger operation to make sure the right sequence. Signed-off-by: Bo Shen Tested-by: Richard Genoud Signed-off-by: Mark Brown --- sound/soc/atmel/atmel_ssc_dai.c | 30 +++++++++++++++++++++++++++++- 1 file changed, 29 insertions(+), 1 deletion(-) diff --git a/sound/soc/atmel/atmel_ssc_dai.c b/sound/soc/atmel/atmel_ssc_dai.c index 8697cedccd21..1ead3c977a51 100644 --- a/sound/soc/atmel/atmel_ssc_dai.c +++ b/sound/soc/atmel/atmel_ssc_dai.c @@ -648,7 +648,7 @@ static int atmel_ssc_prepare(struct snd_pcm_substream *substream, dma_params = ssc_p->dma_params[dir]; - ssc_writel(ssc_p->ssc->regs, CR, dma_params->mask->ssc_enable); + ssc_writel(ssc_p->ssc->regs, CR, dma_params->mask->ssc_disable); ssc_writel(ssc_p->ssc->regs, IDR, dma_params->mask->ssc_error); pr_debug("%s enabled SSC_SR=0x%08x\n", @@ -657,6 +657,33 @@ static int atmel_ssc_prepare(struct snd_pcm_substream *substream, return 0; } +static int atmel_ssc_trigger(struct snd_pcm_substream *substream, + int cmd, struct snd_soc_dai *dai) +{ + struct atmel_ssc_info *ssc_p = &ssc_info[dai->id]; + struct atmel_pcm_dma_params *dma_params; + int dir; + + if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) + dir = 0; + else + dir = 1; + + dma_params = ssc_p->dma_params[dir]; + + switch (cmd) { + case SNDRV_PCM_TRIGGER_START: + case SNDRV_PCM_TRIGGER_RESUME: + case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: + ssc_writel(ssc_p->ssc->regs, CR, dma_params->mask->ssc_enable); + break; + default: + ssc_writel(ssc_p->ssc->regs, CR, dma_params->mask->ssc_disable); + break; + } + + return 0; +} #ifdef CONFIG_PM static int atmel_ssc_suspend(struct snd_soc_dai *cpu_dai) @@ -731,6 +758,7 @@ static const struct snd_soc_dai_ops atmel_ssc_dai_ops = { .startup = atmel_ssc_startup, .shutdown = atmel_ssc_shutdown, .prepare = atmel_ssc_prepare, + .trigger = atmel_ssc_trigger, .hw_params = atmel_ssc_hw_params, .set_fmt = atmel_ssc_set_dai_fmt, .set_clkdiv = atmel_ssc_set_dai_clkdiv, From bc567a93502275755492141524935269dcf0ea1b Mon Sep 17 00:00:00 2001 From: Bo Shen Date: Wed, 4 Dec 2013 10:37:04 +0800 Subject: [PATCH 033/312] ASoC: sam9x5_wm8731: change to work in DSP A mode Change sam9x5 with wm8731 work in DSP A mode, this will fix the left/right channel swap issue. Signed-off-by: Bo Shen Tested-by: Richard Genoud Signed-off-by: Mark Brown --- sound/soc/atmel/sam9x5_wm8731.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sound/soc/atmel/sam9x5_wm8731.c b/sound/soc/atmel/sam9x5_wm8731.c index 1b372283bd01..7d6a9055874b 100644 --- a/sound/soc/atmel/sam9x5_wm8731.c +++ b/sound/soc/atmel/sam9x5_wm8731.c @@ -109,7 +109,7 @@ static int sam9x5_wm8731_driver_probe(struct platform_device *pdev) dai->stream_name = "WM8731 PCM"; dai->codec_dai_name = "wm8731-hifi"; dai->init = sam9x5_wm8731_init; - dai->dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF + dai->dai_fmt = SND_SOC_DAIFMT_DSP_A | SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBM_CFM; ret = snd_soc_of_parse_card_name(card, "atmel,model"); From 0d1430a3f4b7cfd8779b78740a4182321f3ca7f3 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 4 Dec 2013 14:52:06 +0000 Subject: [PATCH 034/312] drm/i915: Hold mutex across i915_gem_release Inorder to serialise the closing of the file descriptor and its subsequent release of client requests with i915_gem_free_request(), we need to hold the struct_mutex in i915_gem_release(). Failing to do so has the potential to trigger an OOPS, later with a use-after-free. Testcase: igt/gem_close_race Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=70874 Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=71029 Reported-by: Eric Anholt Signed-off-by: Chris Wilson Cc: stable@vger.kernel.org Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_dma.c | 2 ++ drivers/gpu/drm/i915/i915_gem_context.c | 2 -- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 0cab2d045135..ac9dac99b585 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -1848,8 +1848,10 @@ void i915_driver_lastclose(struct drm_device * dev) void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv) { + mutex_lock(&dev->struct_mutex); i915_gem_context_close(dev, file_priv); i915_gem_release(dev, file_priv); + mutex_unlock(&dev->struct_mutex); } void i915_driver_postclose(struct drm_device *dev, struct drm_file *file) diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index 72a3df32292f..4a05956162c1 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -347,10 +347,8 @@ void i915_gem_context_close(struct drm_device *dev, struct drm_file *file) { struct drm_i915_file_private *file_priv = file->driver_priv; - mutex_lock(&dev->struct_mutex); idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL); idr_destroy(&file_priv->context_idr); - mutex_unlock(&dev->struct_mutex); } static struct i915_hw_context * From 3459f11a8b16f40f9cde8e4281c2d5dd2ff1a732 Mon Sep 17 00:00:00 2001 From: Luiz Capitulino Date: Thu, 5 Dec 2013 13:04:10 +1030 Subject: [PATCH 035/312] virtio_balloon: update_balloon_size(): update correct field According to the virtio spec, the device configuration field that should be updated after an inflation or deflation operation is the 'actual' field, not the 'num_pages' one. Commit 855e0c5288177bcb193f6f6316952d2490478e1c swapped them in update_balloon_size(). Fix it. Signed-off-by: Luiz Capitulino Signed-off-by: Rusty Russell Fixes: 855e0c5288177bcb193f6f6316952d2490478e1c --- drivers/virtio/virtio_balloon.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c index c444654fc33f..5c4a95b516cf 100644 --- a/drivers/virtio/virtio_balloon.c +++ b/drivers/virtio/virtio_balloon.c @@ -285,7 +285,7 @@ static void update_balloon_size(struct virtio_balloon *vb) { __le32 actual = cpu_to_le32(vb->num_pages); - virtio_cwrite(vb->vdev, struct virtio_balloon_config, num_pages, + virtio_cwrite(vb->vdev, struct virtio_balloon_config, actual, &actual); } From cf30d52e2d11523c42048ab89ed4215b5021526a Mon Sep 17 00:00:00 2001 From: Maria Dimakopoulou Date: Thu, 5 Dec 2013 01:24:37 +0200 Subject: [PATCH 036/312] perf/x86: Fix constraint table end marker bug The EVENT_CONSTRAINT_END() macro defines the end marker as a constraint with a weight of zero. This was all fine until we blacklisted the corrupting memory events on Intel IvyBridge. These events are blacklisted by using a counter bitmask of zero. Thus, they also get a constraint weight of zero. The iteration macro: for_each_constraint tests the weight==0. Therefore, it was stopping at the first blacklisted event, i.e., 0xd0. The corrupting events were therefore considered as unconstrained and were scheduled on any of the generic counters. This patch fixes the end marker to have a weight of -1. With this, the blacklisted events get an empty constraint and cannot be scheduled which is what we want for now. Signed-off-by: Maria Dimakopoulou Reviewed-by: Stephane Eranian Cc: peterz@infradead.org Cc: ak@linux.intel.com Cc: jolsa@redhat.com Cc: zheng.z.yan@intel.com Link: http://lkml.kernel.org/r/20131204232437.GA10689@starlight Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_event.h | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h index fd00bb29425d..c1a861829d81 100644 --- a/arch/x86/kernel/cpu/perf_event.h +++ b/arch/x86/kernel/cpu/perf_event.h @@ -262,11 +262,20 @@ struct cpu_hw_events { __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK, \ HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST_HSW) -#define EVENT_CONSTRAINT_END \ - EVENT_CONSTRAINT(0, 0, 0) +/* + * We define the end marker as having a weight of -1 + * to enable blacklisting of events using a counter bitmask + * of zero and thus a weight of zero. + * The end marker has a weight that cannot possibly be + * obtained from counting the bits in the bitmask. + */ +#define EVENT_CONSTRAINT_END { .weight = -1 } +/* + * Check for end marker with weight == -1 + */ #define for_each_event_constraint(e, c) \ - for ((e) = (c); (e)->weight; (e)++) + for ((e) = (c); (e)->weight != -1; (e)++) /* * Extra registers for specific events. From 75704ecfbb4124139b78b71dd603f05d61abe689 Mon Sep 17 00:00:00 2001 From: Nicolin Chen Date: Wed, 4 Dec 2013 17:22:16 +0800 Subject: [PATCH 037/312] ASoC: wm8962: Enable SYSCLK provisonally before fetching generated DSPCLK_DIV DSPCLK_DIV can be only generated correctly after enabling SYSCLK. But if the current bias_level hasn't reached SND_SOC_BIAS_ON, DAPM won't enable SYSCLK, which would cause the calculation result from DSPCLK_DIV invalid since bit DSPCLK_DIV will be finally turned to its true value after DAPM enables SYSCLK while the driver won't calculate it again for the current instance. In this circumstance, a playback which needs non-zero DSPCLK_DIV would be distorted due to unexpected clock frequency resulted from an invalid DSPCLK_DIV value. So this patch provisionally enables the SYSCLK to get a valid DSPCLK_DIV for calculation and then disables it afterward. Signed-off-by: Nicolin Chen Acked-by: Charles Keepax Signed-off-by: Mark Brown --- sound/soc/codecs/wm8962.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/sound/soc/codecs/wm8962.c b/sound/soc/codecs/wm8962.c index 543c5c2631b6..0f17ed3e29f4 100644 --- a/sound/soc/codecs/wm8962.c +++ b/sound/soc/codecs/wm8962.c @@ -2439,7 +2439,20 @@ static void wm8962_configure_bclk(struct snd_soc_codec *codec) snd_soc_update_bits(codec, WM8962_CLOCKING_4, WM8962_SYSCLK_RATE_MASK, clocking4); + /* DSPCLK_DIV can be only generated correctly after enabling SYSCLK. + * So we here provisionally enable it and then disable it afterward + * if current bias_level hasn't reached SND_SOC_BIAS_ON. + */ + if (codec->dapm.bias_level != SND_SOC_BIAS_ON) + snd_soc_update_bits(codec, WM8962_CLOCKING2, + WM8962_SYSCLK_ENA_MASK, WM8962_SYSCLK_ENA); + dspclk = snd_soc_read(codec, WM8962_CLOCKING1); + + if (codec->dapm.bias_level != SND_SOC_BIAS_ON) + snd_soc_update_bits(codec, WM8962_CLOCKING2, + WM8962_SYSCLK_ENA_MASK, 0); + if (dspclk < 0) { dev_err(codec->dev, "Failed to read DSPCLK: %d\n", dspclk); return; From b1a0fbfdde65dffd83c84c006f84fa12041907c5 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 4 Dec 2013 10:12:40 -0500 Subject: [PATCH 038/312] percpu: fix spurious sparse warnings from DEFINE_PER_CPU() When CONFIG_DEBUG_FORCE_WEAK_PER_CPU or CONFIG_ARCH_NEEDS_WEAK_PER_CPU is set, DEFINE_PER_CPU() explodes into cryptic series of definitions to still allow using "static" for percpu variables while keeping all per-cpu symbols unique in the kernel image which is required for weak symbols. This ultimately converts the actual symbol to global whether DEFINE_PER_CPU() is prefixed with static or not. Unfortunately, the macro forgot to add explicit extern declartion of the actual symbol ending up defining global symbol without preceding declaration for static definitions which naturally don't have matching DECLARE_PER_CPU(). The only ill effect is triggering of the following warnings. fs/inode.c:74:8: warning: symbol 'nr_inodes' was not declared. Should it be static? fs/inode.c:75:8: warning: symbol 'nr_unused' was not declared. Should it be static? Fix it by adding extern declaration in the DEFINE_PER_CPU() macro. Signed-off-by: Tejun Heo Reported-by: Wanlong Gao Tested-by: Wanlong Gao --- include/linux/percpu-defs.h | 1 + 1 file changed, 1 insertion(+) diff --git a/include/linux/percpu-defs.h b/include/linux/percpu-defs.h index 57e890abe1f0..a5fc7d01aad6 100644 --- a/include/linux/percpu-defs.h +++ b/include/linux/percpu-defs.h @@ -69,6 +69,7 @@ __PCPU_DUMMY_ATTRS char __pcpu_scope_##name; \ extern __PCPU_DUMMY_ATTRS char __pcpu_unique_##name; \ __PCPU_DUMMY_ATTRS char __pcpu_unique_##name; \ + extern __PCPU_ATTRS(sec) __typeof__(type) name; \ __PCPU_ATTRS(sec) PER_CPU_DEF_ATTRIBUTES __weak \ __typeof__(type) name #else From f742a55231b950ed71ac8cc8beec0944ddda312c Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Fri, 6 Dec 2013 10:17:53 +0100 Subject: [PATCH 039/312] drm/i915: fix pm init ordering Shovel a bit more of the the code into the setup function, and call it earlier. Otherwise lockdep is unhappy since we cancel the delayed resume work before it's initialized. While at it also shovel the pc8 setup code into the same functions. I wanted to also ditch the header declaration of the hws pc8 functions, but for unfathomable reasons that stuff is in intel_display.c instead of intel_pm.c. Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=71980 Tested-by: Guo Jinxian Reviewed-by: Damien Lespiau Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_dma.c | 10 +--------- drivers/gpu/drm/i915/i915_drv.h | 2 -- drivers/gpu/drm/i915/intel_drv.h | 1 + drivers/gpu/drm/i915/intel_pm.c | 11 ++++++++++- 4 files changed, 12 insertions(+), 12 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index ac9dac99b585..197db0993a24 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -1490,16 +1490,9 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) spin_lock_init(&dev_priv->uncore.lock); spin_lock_init(&dev_priv->mm.object_stat_lock); mutex_init(&dev_priv->dpio_lock); - mutex_init(&dev_priv->rps.hw_lock); mutex_init(&dev_priv->modeset_restore_lock); - mutex_init(&dev_priv->pc8.lock); - dev_priv->pc8.requirements_met = false; - dev_priv->pc8.gpu_idle = false; - dev_priv->pc8.irqs_disabled = false; - dev_priv->pc8.enabled = false; - dev_priv->pc8.disable_count = 2; /* requirements_met + gpu_idle */ - INIT_DELAYED_WORK(&dev_priv->pc8.enable_work, hsw_enable_pc8_work); + intel_pm_setup(dev); intel_display_crc_init(dev); @@ -1603,7 +1596,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) } intel_irq_init(dev); - intel_pm_init(dev); intel_uncore_sanitize(dev); /* Try to make sure MCHBAR is enabled before poking at it */ diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index ccdbecca070d..6f04fa4b31fd 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1901,9 +1901,7 @@ void i915_queue_hangcheck(struct drm_device *dev); void i915_handle_error(struct drm_device *dev, bool wedged); extern void intel_irq_init(struct drm_device *dev); -extern void intel_pm_init(struct drm_device *dev); extern void intel_hpd_init(struct drm_device *dev); -extern void intel_pm_init(struct drm_device *dev); extern void intel_uncore_sanitize(struct drm_device *dev); extern void intel_uncore_early_sanitize(struct drm_device *dev); diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index a18e88b3e425..79f91f26e288 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -821,6 +821,7 @@ void intel_update_sprite_watermarks(struct drm_plane *plane, uint32_t sprite_width, int pixel_size, bool enabled, bool scaled); void intel_init_pm(struct drm_device *dev); +void intel_pm_setup(struct drm_device *dev); bool intel_fbc_enabled(struct drm_device *dev); void intel_update_fbc(struct drm_device *dev); void intel_gpu_ips_init(struct drm_i915_private *dev_priv); diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 6e0d5e075b15..e0dec95c764e 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -6130,10 +6130,19 @@ int vlv_freq_opcode(int ddr_freq, int val) return val; } -void intel_pm_init(struct drm_device *dev) +void intel_pm_setup(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; + mutex_init(&dev_priv->rps.hw_lock); + + mutex_init(&dev_priv->pc8.lock); + dev_priv->pc8.requirements_met = false; + dev_priv->pc8.gpu_idle = false; + dev_priv->pc8.irqs_disabled = false; + dev_priv->pc8.enabled = false; + dev_priv->pc8.disable_count = 2; /* requirements_met + gpu_idle */ + INIT_DELAYED_WORK(&dev_priv->pc8.enable_work, hsw_enable_pc8_work); INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work, intel_gen6_powersave_work); } From acc240d41ea1ab9c488a79219fb313b5b46265ae Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Thu, 5 Dec 2013 15:42:34 +0100 Subject: [PATCH 040/312] drm/i915: Fix use-after-free in do_switch MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit So apparently under ridiculous amounts of memory pressure we can get into trouble in do_switch when we try to move the old hw context backing storage object onto the active lists. With list debugging enabled that usually results in us chasing a poisoned pointer - which means we've hit upon a vma that has been removed from all lrus with list_del (and then deallocated, so it's a real use-after free). Ian Lister has done some great callchain chasing and noticed that we can reenter do_switch: i915_gem_do_execbuffer() i915_switch_context() do_switch() from = ring->last_context; i915_gem_object_pin() i915_gem_object_bind_to_gtt() ret = drm_mm_insert_node_in_range_generic(); // If the above call fails then it will try i915_gem_evict_something() // If that fails it will call i915_gem_evict_everything() ... i915_gem_evict_everything() i915_gpu_idle() i915_switch_context(DEFAULT_CONTEXT) Like with everything else where the shrinker or eviction code can invalidate pointers we need to reload relevant state. Note that there's no need to recheck whether a context switch is still required because: - Doing a switch to the same context is harmless (besides wasting a bit of energy). - This can only happen with the default context. But since that one's pinned we'll never call down into evict_everything under normal circumstances. Note that there's a little driver bringup fun involved namely that we could recourse into do_switch for the initial switch. Atm we're fine since we assign the context pointer only after the call to do_switch at driver load or resume time. And in the gpu reset case we skip the entire setup sequence (which might be a bug on its own, but definitely not this one here). Cc'ing stable since apparently ChromeOS guys are seeing this in the wild (and not just on artificial stress tests), see the reference. Note that in upstream code doesn't calle evict_everything directly from evict_something, that's an extension in this product branch. But we can still hit upon this bug (and apparently we do, see the linked backtraces). I've noticed this while trying to construct a testcase for this bug and utterly failed to provoke it. It looks like we need to driver the system squarly into the lowmem wall and provoke the shrinker to evict the context object by doing the last-ditch evict_everything call. Aside: There's currently no means to get a badly-fragmenting hw context object away from a bad spot in the upstream code. We should fix this by at least adding some code to evict_something to handle hw contexts. References: https://code.google.com/p/chromium/issues/detail?id=248191 Reported-by: Ian Lister Cc: Ian Lister Cc: stable@vger.kernel.org Cc: Ben Widawsky Cc: Stéphane Marchesin Cc: Bloomfield, Jon Tested-by: Rafael Barbalho Reviewed-by: Ian Lister Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_gem_context.c | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index 4a05956162c1..b0f42b9ca037 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -421,11 +421,21 @@ static int do_switch(struct i915_hw_context *to) if (ret) return ret; - /* Clear this page out of any CPU caches for coherent swap-in/out. Note + /* + * Pin can switch back to the default context if we end up calling into + * evict_everything - as a last ditch gtt defrag effort that also + * switches to the default context. Hence we need to reload from here. + */ + from = ring->last_context; + + /* + * Clear this page out of any CPU caches for coherent swap-in/out. Note * that thanks to write = false in this call and us not setting any gpu * write domains when putting a context object onto the active list * (when switching away from it), this won't block. - * XXX: We need a real interface to do this instead of trickery. */ + * + * XXX: We need a real interface to do this instead of trickery. + */ ret = i915_gem_object_set_to_gtt_domain(to->obj, false); if (ret) { i915_gem_object_unpin(to->obj); From 8515736604941334bd9e8fc01edea685a228acd5 Mon Sep 17 00:00:00 2001 From: Andrey Vagin Date: Fri, 6 Dec 2013 09:06:41 +0400 Subject: [PATCH 041/312] block: fix memory leaks on unplugging block device All objects, which are allocated in blk_mq_register_disk, must be released in blk_mq_unregister_disk. I use a KVM virtual machine and virtio disk to reproduce this issue. kmemleak: 18 new suspected memory leaks (see /sys/kernel/debug/kmemleak) $ cat /sys/kernel/debug/kmemleak | head -n 30 unreferenced object 0xffff8800b6636150 (size 8): comm "kworker/0:2", pid 65, jiffies 4294809903 (age 86.358s) hex dump (first 8 bytes): 76 69 72 74 69 6f 34 00 virtio4. backtrace: [] kmemleak_alloc+0x4e/0xb0 [] __kmalloc_track_caller+0xf5/0x260 [] kstrdup+0x31/0x60 [] sysfs_new_dirent+0x2e/0x140 [] create_dir+0x38/0xe0 [] sysfs_create_dir_ns+0x73/0xc0 [] kobject_add_internal+0xc9/0x340 [] kobject_add+0x65/0xb0 [] device_add+0x128/0x660 [] device_register+0x1a/0x20 [] register_virtio_device+0x98/0xe0 [] virtio_pci_probe+0x12e/0x1c0 [] local_pci_probe+0x45/0xa0 [] pci_device_probe+0x121/0x130 [] driver_probe_device+0x87/0x390 [] __device_attach+0x3b/0x40 unreferenced object 0xffff8800b65aa1d8 (size 144): Fixes: 320ae51feed5 (blk-mq: new multi-queue block IO queueing mechanism) Cc: Jens Axboe Signed-off-by: Andrey Vagin Signed-off-by: Jens Axboe --- block/blk-mq-sysfs.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c index ba6cf8e9aa0a..b91ce75bd35d 100644 --- a/block/blk-mq-sysfs.c +++ b/block/blk-mq-sysfs.c @@ -335,9 +335,22 @@ static struct kobj_type blk_mq_hw_ktype = { void blk_mq_unregister_disk(struct gendisk *disk) { struct request_queue *q = disk->queue; + struct blk_mq_hw_ctx *hctx; + struct blk_mq_ctx *ctx; + int i, j; + + queue_for_each_hw_ctx(q, hctx, i) { + hctx_for_each_ctx(hctx, ctx, j) { + kobject_del(&ctx->kobj); + kobject_put(&ctx->kobj); + } + kobject_del(&hctx->kobj); + kobject_put(&hctx->kobj); + } kobject_uevent(&q->mq_kobj, KOBJ_REMOVE); kobject_del(&q->mq_kobj); + kobject_put(&q->mq_kobj); kobject_put(&disk_to_dev(disk)->kobj); } From b6497b383f65dd1bc60ea1f5d70e157a268fbd15 Mon Sep 17 00:00:00 2001 From: Ian Campbell Date: Fri, 6 Dec 2013 17:55:56 +0000 Subject: [PATCH 042/312] xen: privcmd: do not return pages which we have failed to unmap This failure represents a hypervisor issue, but if it does occur then nothing good can come of returning pages which still refer to a foreign owned page into the general allocation pool. Instead we are forced to leak them. Log that we have done so. The potential for failure only exists for autotranslated guest (e.g. ARM and x86 PVH). Signed-off-by: Ian Campbell Signed-off-by: Stefano Stabellini Reviewed-by: David Vrabel --- drivers/xen/privcmd.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c index 8e74590fa1bb..569a13b9e856 100644 --- a/drivers/xen/privcmd.c +++ b/drivers/xen/privcmd.c @@ -533,12 +533,17 @@ static void privcmd_close(struct vm_area_struct *vma) { struct page **pages = vma->vm_private_data; int numpgs = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; + int rc; if (!xen_feature(XENFEAT_auto_translated_physmap) || !numpgs || !pages) return; - xen_unmap_domain_mfn_range(vma, numpgs, pages); - free_xenballooned_pages(numpgs, pages); + rc = xen_unmap_domain_mfn_range(vma, numpgs, pages); + if (rc == 0) + free_xenballooned_pages(numpgs, pages); + else + pr_crit("unable to unmap MFN range: leaking %d pages. rc=%d\n", + numpgs, rc); kfree(pages); } From 266ccd505e8acb98717819cef9d91d66c7b237cc Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Fri, 6 Dec 2013 15:07:32 -0500 Subject: [PATCH 043/312] cgroup: fix cgroup_create() error handling path ae7f164a09 ("cgroup: move cgroup->subsys[] assignment to online_css()") moved cgroup->subsys[] assignements later in cgroup_create() but didn't update error handling path accordingly leading to the following oops and leaking later css's after an online_css() failure. The oops is from cgroup destruction path being invoked on the partially constructed cgroup which is not ready to handle empty slots in cgrp->subsys[] array. BUG: unable to handle kernel NULL pointer dereference at 0000000000000008 IP: [] cgroup_destroy_locked+0x118/0x2f0 PGD a780a067 PUD aadbe067 PMD 0 Oops: 0000 [#1] SMP Modules linked in: CPU: 6 PID: 7360 Comm: mkdir Not tainted 3.13.0-rc2+ #69 Hardware name: task: ffff8800b9dbec00 ti: ffff8800a781a000 task.ti: ffff8800a781a000 RIP: 0010:[] [] cgroup_destroy_locked+0x118/0x2f0 RSP: 0018:ffff8800a781bd98 EFLAGS: 00010282 RAX: ffff880586903878 RBX: ffff880586903800 RCX: ffff880586903820 RDX: ffff880586903860 RSI: ffff8800a781bdb0 RDI: ffff880586903820 RBP: ffff8800a781bde8 R08: ffff88060e0b8048 R09: ffffffff811d7bc1 R10: 000000000000008c R11: 0000000000000001 R12: ffff8800a72286c0 R13: 0000000000000000 R14: ffffffff81cf7a40 R15: 0000000000000001 FS: 00007f60ecda57a0(0000) GS:ffff8806272c0000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 0000000000000008 CR3: 00000000a7a03000 CR4: 00000000000007e0 Stack: ffff880586903860 ffff880586903910 ffff8800a72286c0 ffff880586903820 ffffffff81cf7a40 ffff880586903800 ffff88060e0b8018 ffffffff81cf7a40 ffff8800b9dbec00 ffff8800b9dbf098 ffff8800a781bec8 ffffffff810ef5bf Call Trace: [] cgroup_mkdir+0x55f/0x5f0 [] vfs_mkdir+0xee/0x140 [] SyS_mkdirat+0x6e/0xf0 [] SyS_mkdir+0x19/0x20 [] system_call_fastpath+0x16/0x1b This patch moves reference bumping inside online_css() loop, clears css_ar[] as css's are brought online successfully, and updates err_destroy path so that either a css is fully online and destroyed by cgroup_destroy_locked() or the error path frees it. This creates a duplicate css free logic in the error path but it will be cleaned up soon. v2: Li pointed out that cgroup_destroy_locked() would do NULL-deref if invoked with a cgroup which doesn't have all css's populated. Update cgroup_destroy_locked() so that it skips NULL css's. Signed-off-by: Tejun Heo Acked-by: Li Zefan Reported-by: Vladimir Davydov Cc: stable@vger.kernel.org # v3.12+ --- kernel/cgroup.c | 31 +++++++++++++++++++++---------- 1 file changed, 21 insertions(+), 10 deletions(-) diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 8b729c278b64..bcb1755f410a 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -4426,14 +4426,6 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry, list_add_tail_rcu(&cgrp->sibling, &cgrp->parent->children); root->number_of_cgroups++; - /* each css holds a ref to the cgroup's dentry and the parent css */ - for_each_root_subsys(root, ss) { - struct cgroup_subsys_state *css = css_ar[ss->subsys_id]; - - dget(dentry); - css_get(css->parent); - } - /* hold a ref to the parent's dentry */ dget(parent->dentry); @@ -4445,6 +4437,13 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry, if (err) goto err_destroy; + /* each css holds a ref to the cgroup's dentry and parent css */ + dget(dentry); + css_get(css->parent); + + /* mark it consumed for error path */ + css_ar[ss->subsys_id] = NULL; + if (ss->broken_hierarchy && !ss->warned_broken_hierarchy && parent->parent) { pr_warning("cgroup: %s (%d) created nested cgroup for controller \"%s\" which has incomplete hierarchy support. Nested cgroups may change behavior in the future.\n", @@ -4491,6 +4490,14 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry, return err; err_destroy: + for_each_root_subsys(root, ss) { + struct cgroup_subsys_state *css = css_ar[ss->subsys_id]; + + if (css) { + percpu_ref_cancel_init(&css->refcnt); + ss->css_free(css); + } + } cgroup_destroy_locked(cgrp); mutex_unlock(&cgroup_mutex); mutex_unlock(&dentry->d_inode->i_mutex); @@ -4652,8 +4659,12 @@ static int cgroup_destroy_locked(struct cgroup *cgrp) * will be invoked to perform the rest of destruction once the * percpu refs of all css's are confirmed to be killed. */ - for_each_root_subsys(cgrp->root, ss) - kill_css(cgroup_css(cgrp, ss)); + for_each_root_subsys(cgrp->root, ss) { + struct cgroup_subsys_state *css = cgroup_css(cgrp, ss); + + if (css) + kill_css(css); + } /* * Mark @cgrp dead. This prevents further task migration and child From 851dd02b4f1fdb437586190f87217e3382a736bd Mon Sep 17 00:00:00 2001 From: Chris Ruehl Date: Wed, 4 Dec 2013 10:02:44 +0800 Subject: [PATCH 044/312] usb: phy-tegra-usb.c: wrong pointer check for remap UTMI usb: phy-tegra-usb.c: wrong pointer check for remap UTMI A wrong pointer was used to test the result of devm_ioremap() Acked-by: Stephen Warren Reviewed-by: Thierry Reding Signed-off-by: Chris Ruehl Acked-by: Venu Byravarasu Signed-off-by: Felipe Balbi --- drivers/usb/phy/phy-tegra-usb.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/usb/phy/phy-tegra-usb.c b/drivers/usb/phy/phy-tegra-usb.c index 82232acf1ab6..bbe4f8e6e8d7 100644 --- a/drivers/usb/phy/phy-tegra-usb.c +++ b/drivers/usb/phy/phy-tegra-usb.c @@ -876,7 +876,7 @@ static int utmi_phy_probe(struct tegra_usb_phy *tegra_phy, tegra_phy->pad_regs = devm_ioremap(&pdev->dev, res->start, resource_size(res)); - if (!tegra_phy->regs) { + if (!tegra_phy->pad_regs) { dev_err(&pdev->dev, "Failed to remap UTMI Pad regs\n"); return -ENOMEM; } From a8b14744429f90763f258ad0f69601cdcad610aa Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 4 Dec 2013 09:20:40 -0500 Subject: [PATCH 045/312] sysfs: give different locking key to regular and bin files 027a485d12e0 ("sysfs: use a separate locking class for open files depending on mmap") assigned different lockdep key to sysfs_open_file->mutex depending on whether the file implements mmap or not in an attempt to avoid spurious lockdep warning caused by merging of regular and bin file paths. While this restored some of the original behavior of using different locks (at least lockdep is concerned) for the different clases of files. The restoration wasn't full because now the lockdep key assignment depends on whether the file has mmap or not instead of whether it's a regular file or not. This means that bin files which don't implement mmap will get assigned the same lockdep class as regular files. This is problematic because file_operations for bin files still implements the mmap file operation and checking whether the sysfs file actually implements mmap happens in the file operation after grabbing @sysfs_open_file->mutex. We still end up adding locking dependency from mmap locking to sysfs_open_file->mutex to the regular file mutex which triggers spurious circular locking warning. Fix it by restoring the original behavior fully by differentiating lockdep key by whether the file is regular or bin, instead of the existence of mmap. Signed-off-by: Tejun Heo Reported-by: Dave Jones Link: http://lkml.kernel.org/g/20131203184324.GA11320@redhat.com Signed-off-by: Greg Kroah-Hartman --- fs/sysfs/file.c | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c index b94f93685093..35e7d08fe629 100644 --- a/fs/sysfs/file.c +++ b/fs/sysfs/file.c @@ -609,7 +609,7 @@ static int sysfs_open_file(struct inode *inode, struct file *file) struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata; struct kobject *kobj = attr_sd->s_parent->s_dir.kobj; struct sysfs_open_file *of; - bool has_read, has_write, has_mmap; + bool has_read, has_write; int error = -EACCES; /* need attr_sd for attr and ops, its parent for kobj */ @@ -621,7 +621,6 @@ static int sysfs_open_file(struct inode *inode, struct file *file) has_read = battr->read || battr->mmap; has_write = battr->write || battr->mmap; - has_mmap = battr->mmap; } else { const struct sysfs_ops *ops = sysfs_file_ops(attr_sd); @@ -633,7 +632,6 @@ static int sysfs_open_file(struct inode *inode, struct file *file) has_read = ops->show; has_write = ops->store; - has_mmap = false; } /* check perms and supported operations */ @@ -661,9 +659,9 @@ static int sysfs_open_file(struct inode *inode, struct file *file) * open file has a separate mutex, it's okay as long as those don't * happen on the same file. At this point, we can't easily give * each file a separate locking class. Let's differentiate on - * whether the file has mmap or not for now. + * whether the file is bin or not for now. */ - if (has_mmap) + if (sysfs_is_bin(attr_sd)) mutex_init(&of->mutex); else mutex_init(&of->mutex); From 9105bb149bbbc555d2e11ba5166dfe7a24eae09e Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sun, 8 Dec 2013 20:52:31 -0500 Subject: [PATCH 046/312] ext4: fix del_timer() misuse for ->s_err_report That thing should be del_timer_sync(); consider what happens if ext4_put_super() call of del_timer() happens to come just as it's getting run on another CPU. Since that timer reschedules itself to run next day, you are pretty much guaranteed that you'll end up with kfree'd scheduled timer, with usual fun consequences. AFAICS, that's -stable fodder all way back to 2010... [the second del_timer_sync() is almost certainly not needed, but it doesn't hurt either] Signed-off-by: Al Viro Signed-off-by: "Theodore Ts'o" Cc: stable@vger.kernel.org --- fs/ext4/super.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/fs/ext4/super.c b/fs/ext4/super.c index c977f4e4e63b..9d70c0c177c7 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -792,7 +792,7 @@ static void ext4_put_super(struct super_block *sb) } ext4_es_unregister_shrinker(sbi); - del_timer(&sbi->s_err_report); + del_timer_sync(&sbi->s_err_report); ext4_release_system_zone(sb); ext4_mb_release(sb); ext4_ext_release(sb); @@ -4184,7 +4184,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) } failed_mount3: ext4_es_unregister_shrinker(sbi); - del_timer(&sbi->s_err_report); + del_timer_sync(&sbi->s_err_report); if (sbi->s_flex_groups) ext4_kvfree(sbi->s_flex_groups); percpu_counter_destroy(&sbi->s_freeclusters_counter); From 30fac0f75da24dd5bb43c9e911d2039a984ac815 Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Sun, 8 Dec 2013 21:11:59 -0500 Subject: [PATCH 047/312] ext4: Do not reserve clusters when fs doesn't support extents When the filesystem doesn't support extents (like in ext2/3 compatibility modes), there is no need to reserve any clusters. Space estimates for writing are exact, hole punching doesn't need new metadata, and there are no unwritten extents to convert. This fixes a problem when filesystem still having some free space when accessed with a native ext2/3 driver suddently reports ENOSPC when accessed with ext4 driver. Reported-by: Geert Uytterhoeven Tested-by: Geert Uytterhoeven Reviewed-by: Lukas Czerner Signed-off-by: Jan Kara Signed-off-by: "Theodore Ts'o" Cc: stable@vger.kernel.org --- fs/ext4/super.c | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 9d70c0c177c7..1f7784de05b6 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -3316,10 +3316,18 @@ int ext4_calculate_overhead(struct super_block *sb) } -static ext4_fsblk_t ext4_calculate_resv_clusters(struct ext4_sb_info *sbi) +static ext4_fsblk_t ext4_calculate_resv_clusters(struct super_block *sb) { ext4_fsblk_t resv_clusters; + /* + * There's no need to reserve anything when we aren't using extents. + * The space estimates are exact, there are no unwritten extents, + * hole punching doesn't need new metadata... This is needed especially + * to keep ext2/3 backward compatibility. + */ + if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS)) + return 0; /* * By default we reserve 2% or 4096 clusters, whichever is smaller. * This should cover the situations where we can not afford to run @@ -3328,7 +3336,8 @@ static ext4_fsblk_t ext4_calculate_resv_clusters(struct ext4_sb_info *sbi) * allocation would require 1, or 2 blocks, higher numbers are * very rare. */ - resv_clusters = ext4_blocks_count(sbi->s_es) >> sbi->s_cluster_bits; + resv_clusters = ext4_blocks_count(EXT4_SB(sb)->s_es) >> + EXT4_SB(sb)->s_cluster_bits; do_div(resv_clusters, 50); resv_clusters = min_t(ext4_fsblk_t, resv_clusters, 4096); @@ -4071,10 +4080,10 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) "available"); } - err = ext4_reserve_clusters(sbi, ext4_calculate_resv_clusters(sbi)); + err = ext4_reserve_clusters(sbi, ext4_calculate_resv_clusters(sb)); if (err) { ext4_msg(sb, KERN_ERR, "failed to reserve %llu clusters for " - "reserved pool", ext4_calculate_resv_clusters(sbi)); + "reserved pool", ext4_calculate_resv_clusters(sb)); goto failed_mount4a; } From f6c07cad081ba222d63623d913aafba5586c1d2c Mon Sep 17 00:00:00 2001 From: Theodore Ts'o Date: Sun, 8 Dec 2013 21:12:59 -0500 Subject: [PATCH 048/312] jbd2: don't BUG but return ENOSPC if a handle runs out of space If a handle runs out of space, we currently stop the kernel with a BUG in jbd2_journal_dirty_metadata(). This makes it hard to figure out what might be going on. So return an error of ENOSPC, so we can let the file system layer figure out what is going on, to make it more likely we can get useful debugging information). This should make it easier to debug problems such as the one which was reported by: https://bugzilla.kernel.org/show_bug.cgi?id=44731 The only two callers of this function are ext4_handle_dirty_metadata() and ocfs2_journal_dirty(). The ocfs2 function will trigger a BUG_ON(), which means there will be no change in behavior. The ext4 function will call ext4_error_inode() which will print the useful debugging information and then handle the situation using ext4's error handling mechanisms (i.e., which might mean halting the kernel or remounting the file system read-only). Also, since both file systems already call WARN_ON(), drop the WARN_ON from jbd2_journal_dirty_metadata() to avoid two stack traces from being displayed. Signed-off-by: "Theodore Ts'o" Cc: ocfs2-devel@oss.oracle.com Acked-by: Joel Becker --- fs/jbd2/transaction.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c index 7aa9a32573bb..b0b74e58697b 100644 --- a/fs/jbd2/transaction.c +++ b/fs/jbd2/transaction.c @@ -1290,7 +1290,10 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh) * once a transaction -bzzz */ jh->b_modified = 1; - J_ASSERT_JH(jh, handle->h_buffer_credits > 0); + if (handle->h_buffer_credits <= 0) { + ret = -ENOSPC; + goto out_unlock_bh; + } handle->h_buffer_credits--; } @@ -1373,7 +1376,6 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh) jbd2_journal_put_journal_head(jh); out: JBUFFER_TRACE(jh, "exit"); - WARN_ON(ret); /* All errors are bugs, so dump the stack */ return ret; } From 75685071cd5b26d64fd2577330387aeab019ac97 Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Sun, 8 Dec 2013 21:13:59 -0500 Subject: [PATCH 049/312] jbd2: revise KERN_EMERG error messages Some of KERN_EMERG printk messages do not really deserve this log level and the one in log_wait_commit() is even rather useless (the journal has been previously aborted and *that* is where we should have been complaining). So make some messages just KERN_ERR and remove the useless message. Signed-off-by: Jan Kara Signed-off-by: "Theodore Ts'o" --- fs/jbd2/journal.c | 8 +++----- fs/jbd2/transaction.c | 10 +++++----- 2 files changed, 8 insertions(+), 10 deletions(-) diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c index 52032647dd4a..f66faedce6f0 100644 --- a/fs/jbd2/journal.c +++ b/fs/jbd2/journal.c @@ -702,7 +702,7 @@ int jbd2_log_wait_commit(journal_t *journal, tid_t tid) read_lock(&journal->j_state_lock); #ifdef CONFIG_JBD2_DEBUG if (!tid_geq(journal->j_commit_request, tid)) { - printk(KERN_EMERG + printk(KERN_ERR "%s: error: j_commit_request=%d, tid=%d\n", __func__, journal->j_commit_request, tid); } @@ -718,10 +718,8 @@ int jbd2_log_wait_commit(journal_t *journal, tid_t tid) } read_unlock(&journal->j_state_lock); - if (unlikely(is_journal_aborted(journal))) { - printk(KERN_EMERG "journal commit I/O error\n"); + if (unlikely(is_journal_aborted(journal))) err = -EIO; - } return err; } @@ -2645,7 +2643,7 @@ static void __exit journal_exit(void) #ifdef CONFIG_JBD2_DEBUG int n = atomic_read(&nr_journal_heads); if (n) - printk(KERN_EMERG "JBD2: leaked %d journal_heads!\n", n); + printk(KERN_ERR "JBD2: leaked %d journal_heads!\n", n); #endif jbd2_remove_jbd_stats_proc_entry(); jbd2_journal_destroy_caches(); diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c index b0b74e58697b..80797cfa8c49 100644 --- a/fs/jbd2/transaction.c +++ b/fs/jbd2/transaction.c @@ -932,7 +932,7 @@ do_get_write_access(handle_t *handle, struct journal_head *jh, jbd2_alloc(jh2bh(jh)->b_size, GFP_NOFS); if (!frozen_buffer) { - printk(KERN_EMERG + printk(KERN_ERR "%s: OOM for frozen_buffer\n", __func__); JBUFFER_TRACE(jh, "oom!"); @@ -1166,7 +1166,7 @@ int jbd2_journal_get_undo_access(handle_t *handle, struct buffer_head *bh) if (!jh->b_committed_data) { committed_data = jbd2_alloc(jh2bh(jh)->b_size, GFP_NOFS); if (!committed_data) { - printk(KERN_EMERG "%s: No memory for committed data\n", + printk(KERN_ERR "%s: No memory for committed data\n", __func__); err = -ENOMEM; goto out; @@ -1308,7 +1308,7 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh) JBUFFER_TRACE(jh, "fastpath"); if (unlikely(jh->b_transaction != journal->j_running_transaction)) { - printk(KERN_EMERG "JBD: %s: " + printk(KERN_ERR "JBD: %s: " "jh->b_transaction (%llu, %p, %u) != " "journal->j_running_transaction (%p, %u)", journal->j_devname, @@ -1335,7 +1335,7 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh) JBUFFER_TRACE(jh, "already on other transaction"); if (unlikely(jh->b_transaction != journal->j_committing_transaction)) { - printk(KERN_EMERG "JBD: %s: " + printk(KERN_ERR "JBD: %s: " "jh->b_transaction (%llu, %p, %u) != " "journal->j_committing_transaction (%p, %u)", journal->j_devname, @@ -1348,7 +1348,7 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh) ret = -EINVAL; } if (unlikely(jh->b_next_transaction != transaction)) { - printk(KERN_EMERG "JBD: %s: " + printk(KERN_ERR "JBD: %s: " "jh->b_next_transaction (%llu, %p, %u) != " "transaction (%p, %u)", journal->j_devname, From a67c848a8b9aa9e471f9eaadd2cb29cc527462cf Mon Sep 17 00:00:00 2001 From: Dmitry Monakhov Date: Sun, 8 Dec 2013 21:14:59 -0500 Subject: [PATCH 050/312] jbd2: rename obsoleted msg JBD->JBD2 Rename performed via: perl -pi -e 's/JBD:/JBD2:/g' fs/jbd2/*.c Signed-off-by: Dmitry Monakhov Signed-off-by: "Theodore Ts'o" Reviewed-by: Carlos Maiolino --- fs/jbd2/journal.c | 10 +++++----- fs/jbd2/recovery.c | 2 +- fs/jbd2/transaction.c | 6 +++--- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c index f66faedce6f0..5fa344afb49a 100644 --- a/fs/jbd2/journal.c +++ b/fs/jbd2/journal.c @@ -1525,13 +1525,13 @@ static int journal_get_superblock(journal_t *journal) if (JBD2_HAS_COMPAT_FEATURE(journal, JBD2_FEATURE_COMPAT_CHECKSUM) && JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2)) { /* Can't have checksum v1 and v2 on at the same time! */ - printk(KERN_ERR "JBD: Can't enable checksumming v1 and v2 " + printk(KERN_ERR "JBD2: Can't enable checksumming v1 and v2 " "at the same time!\n"); goto out; } if (!jbd2_verify_csum_type(journal, sb)) { - printk(KERN_ERR "JBD: Unknown checksum type\n"); + printk(KERN_ERR "JBD2: Unknown checksum type\n"); goto out; } @@ -1539,7 +1539,7 @@ static int journal_get_superblock(journal_t *journal) if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2)) { journal->j_chksum_driver = crypto_alloc_shash("crc32c", 0, 0); if (IS_ERR(journal->j_chksum_driver)) { - printk(KERN_ERR "JBD: Cannot load crc32c driver.\n"); + printk(KERN_ERR "JBD2: Cannot load crc32c driver.\n"); err = PTR_ERR(journal->j_chksum_driver); journal->j_chksum_driver = NULL; goto out; @@ -1548,7 +1548,7 @@ static int journal_get_superblock(journal_t *journal) /* Check superblock checksum */ if (!jbd2_superblock_csum_verify(journal, sb)) { - printk(KERN_ERR "JBD: journal checksum error\n"); + printk(KERN_ERR "JBD2: journal checksum error\n"); goto out; } @@ -1834,7 +1834,7 @@ int jbd2_journal_set_features (journal_t *journal, unsigned long compat, journal->j_chksum_driver = crypto_alloc_shash("crc32c", 0, 0); if (IS_ERR(journal->j_chksum_driver)) { - printk(KERN_ERR "JBD: Cannot load crc32c " + printk(KERN_ERR "JBD2: Cannot load crc32c " "driver.\n"); journal->j_chksum_driver = NULL; return 0; diff --git a/fs/jbd2/recovery.c b/fs/jbd2/recovery.c index 3929c50428b1..3b6bb19d60b1 100644 --- a/fs/jbd2/recovery.c +++ b/fs/jbd2/recovery.c @@ -594,7 +594,7 @@ static int do_one_pass(journal_t *journal, be32_to_cpu(tmp->h_sequence))) { brelse(obh); success = -EIO; - printk(KERN_ERR "JBD: Invalid " + printk(KERN_ERR "JBD2: Invalid " "checksum recovering " "block %llu in log\n", blocknr); diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c index 80797cfa8c49..8360674c85bc 100644 --- a/fs/jbd2/transaction.c +++ b/fs/jbd2/transaction.c @@ -1308,7 +1308,7 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh) JBUFFER_TRACE(jh, "fastpath"); if (unlikely(jh->b_transaction != journal->j_running_transaction)) { - printk(KERN_ERR "JBD: %s: " + printk(KERN_ERR "JBD2: %s: " "jh->b_transaction (%llu, %p, %u) != " "journal->j_running_transaction (%p, %u)", journal->j_devname, @@ -1335,7 +1335,7 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh) JBUFFER_TRACE(jh, "already on other transaction"); if (unlikely(jh->b_transaction != journal->j_committing_transaction)) { - printk(KERN_ERR "JBD: %s: " + printk(KERN_ERR "JBD2: %s: " "jh->b_transaction (%llu, %p, %u) != " "journal->j_committing_transaction (%p, %u)", journal->j_devname, @@ -1348,7 +1348,7 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh) ret = -EINVAL; } if (unlikely(jh->b_next_transaction != transaction)) { - printk(KERN_ERR "JBD: %s: " + printk(KERN_ERR "JBD2: %s: " "jh->b_next_transaction (%llu, %p, %u) != " "transaction (%p, %u)", journal->j_devname, From 11e6a09fba446ce799a9de51d2d99586024bbdaa Mon Sep 17 00:00:00 2001 From: Jakob Bornecrantz Date: Sun, 8 Dec 2013 23:39:04 -0800 Subject: [PATCH 051/312] drm/vmwgfx: Add max surface memory param Userspace uses this to workaround overcommit issues by flushing the command stream early. Signed-off-by: Jakob Bornecrantz Reviewed-by: Thomas Hellstrom --- drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c | 3 +++ include/uapi/drm/vmwgfx_drm.h | 1 + 2 files changed, 4 insertions(+) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c index a51f48e3e917..45d5b5ab6ca9 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c @@ -68,6 +68,9 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data, SVGA_FIFO_3D_HWVERSION)); break; } + case DRM_VMW_PARAM_MAX_SURF_MEMORY: + param->value = dev_priv->memory_size; + break; default: DRM_ERROR("Illegal vmwgfx get param request: %d\n", param->param); diff --git a/include/uapi/drm/vmwgfx_drm.h b/include/uapi/drm/vmwgfx_drm.h index bcb0912afe7a..f854ca4a1372 100644 --- a/include/uapi/drm/vmwgfx_drm.h +++ b/include/uapi/drm/vmwgfx_drm.h @@ -75,6 +75,7 @@ #define DRM_VMW_PARAM_FIFO_CAPS 4 #define DRM_VMW_PARAM_MAX_FB_SIZE 5 #define DRM_VMW_PARAM_FIFO_HW_VERSION 6 +#define DRM_VMW_PARAM_MAX_SURF_MEMORY 7 /** * struct drm_vmw_getparam_arg From d825a04387ff4ce66117306f2862c7cedca5c597 Mon Sep 17 00:00:00 2001 From: Alexander Graf Date: Fri, 29 Nov 2013 02:24:18 +0100 Subject: [PATCH 052/312] KVM: PPC: Book3S: PR: Don't clobber our exit handler id We call a C helper to save all svcpu fields into our vcpu. The C ABI states that r12 is considered volatile. However, we keep our exit handler id in r12 currently. So we need to save it away into a non-volatile register instead that definitely does get preserved across the C call. This bug usually didn't hit anyone yet since gcc is smart enough to generate code that doesn't even need r12 which means it stayed identical throughout the call by sheer luck. But we can't rely on that. Signed-off-by: Alexander Graf --- arch/powerpc/kvm/book3s_interrupts.S | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/arch/powerpc/kvm/book3s_interrupts.S b/arch/powerpc/kvm/book3s_interrupts.S index f4dd041c14ea..5e7cb32ce4dc 100644 --- a/arch/powerpc/kvm/book3s_interrupts.S +++ b/arch/powerpc/kvm/book3s_interrupts.S @@ -132,9 +132,17 @@ kvm_start_lightweight: * */ + PPC_LL r3, GPR4(r1) /* vcpu pointer */ + + /* + * kvmppc_copy_from_svcpu can clobber volatile registers, save + * the exit handler id to the vcpu and restore it from there later. + */ + stw r12, VCPU_TRAP(r3) + /* Transfer reg values from shadow vcpu back to vcpu struct */ /* On 64-bit, interrupts are still off at this point */ - PPC_LL r3, GPR4(r1) /* vcpu pointer */ + GET_SHADOW_VCPU(r4) bl FUNC(kvmppc_copy_from_svcpu) nop @@ -151,7 +159,6 @@ kvm_start_lightweight: */ ld r3, PACA_SPRG3(r13) mtspr SPRN_SPRG3, r3 - #endif /* CONFIG_PPC_BOOK3S_64 */ /* R7 = vcpu */ @@ -177,7 +184,7 @@ kvm_start_lightweight: PPC_STL r31, VCPU_GPR(R31)(r7) /* Pass the exit number as 3rd argument to kvmppc_handle_exit */ - mr r5, r12 + lwz r5, VCPU_TRAP(r7) /* Restore r3 (kvm_run) and r4 (vcpu) */ REST_2GPRS(3, r1) From c9dad7f9db4ed42de37d3f0ef2b2c0e10d5b6f92 Mon Sep 17 00:00:00 2001 From: Alexander Graf Date: Fri, 29 Nov 2013 02:27:23 +0100 Subject: [PATCH 053/312] KVM: PPC: Book3S: PR: Export kvmppc_copy_to|from_svcpu The kvmppc_copy_{to,from}_svcpu functions are publically visible, so we should also export them in a header for others C files to consume. So far we didn't need this because we only called it from asm code. The next patch will introduce a C caller. Signed-off-by: Alexander Graf --- arch/powerpc/include/asm/kvm_book3s.h | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h index 4a594b76674d..bc23b1ba7980 100644 --- a/arch/powerpc/include/asm/kvm_book3s.h +++ b/arch/powerpc/include/asm/kvm_book3s.h @@ -192,6 +192,10 @@ extern void kvmppc_load_up_vsx(void); extern u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst); extern ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst); extern int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd); +extern void kvmppc_copy_to_svcpu(struct kvmppc_book3s_shadow_vcpu *svcpu, + struct kvm_vcpu *vcpu); +extern void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu, + struct kvmppc_book3s_shadow_vcpu *svcpu); static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu) { From 40fdd8c88c4a5e9b26bfbed2215ac661f24aef07 Mon Sep 17 00:00:00 2001 From: Alexander Graf Date: Fri, 29 Nov 2013 02:29:00 +0100 Subject: [PATCH 054/312] KVM: PPC: Book3S: PR: Make svcpu -> vcpu store preempt savvy As soon as we get back to our "highmem" handler in virtual address space we may get preempted. Today the reason we can get preempted is that we replay interrupts and all the lazy logic thinks we have interrupts enabled. However, it's not hard to make the code interruptible and that way we can enable and handle interrupts even earlier. This fixes random guest crashes that happened with CONFIG_PREEMPT=y for me. Signed-off-by: Alexander Graf --- arch/powerpc/include/asm/kvm_book3s_asm.h | 1 + arch/powerpc/kvm/book3s_pr.c | 22 ++++++++++++++++++++++ 2 files changed, 23 insertions(+) diff --git a/arch/powerpc/include/asm/kvm_book3s_asm.h b/arch/powerpc/include/asm/kvm_book3s_asm.h index 0bd9348a4db9..412b2f389474 100644 --- a/arch/powerpc/include/asm/kvm_book3s_asm.h +++ b/arch/powerpc/include/asm/kvm_book3s_asm.h @@ -106,6 +106,7 @@ struct kvmppc_host_state { }; struct kvmppc_book3s_shadow_vcpu { + bool in_use; ulong gpr[14]; u32 cr; u32 xer; diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c index fe14ca3dd171..5b9e9063cfaf 100644 --- a/arch/powerpc/kvm/book3s_pr.c +++ b/arch/powerpc/kvm/book3s_pr.c @@ -66,6 +66,7 @@ static void kvmppc_core_vcpu_load_pr(struct kvm_vcpu *vcpu, int cpu) struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); memcpy(svcpu->slb, to_book3s(vcpu)->slb_shadow, sizeof(svcpu->slb)); svcpu->slb_max = to_book3s(vcpu)->slb_shadow_max; + svcpu->in_use = 0; svcpu_put(svcpu); #endif vcpu->cpu = smp_processor_id(); @@ -78,6 +79,9 @@ static void kvmppc_core_vcpu_put_pr(struct kvm_vcpu *vcpu) { #ifdef CONFIG_PPC_BOOK3S_64 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); + if (svcpu->in_use) { + kvmppc_copy_from_svcpu(vcpu, svcpu); + } memcpy(to_book3s(vcpu)->slb_shadow, svcpu->slb, sizeof(svcpu->slb)); to_book3s(vcpu)->slb_shadow_max = svcpu->slb_max; svcpu_put(svcpu); @@ -110,12 +114,26 @@ void kvmppc_copy_to_svcpu(struct kvmppc_book3s_shadow_vcpu *svcpu, svcpu->ctr = vcpu->arch.ctr; svcpu->lr = vcpu->arch.lr; svcpu->pc = vcpu->arch.pc; + svcpu->in_use = true; } /* Copy data touched by real-mode code from shadow vcpu back to vcpu */ void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu, struct kvmppc_book3s_shadow_vcpu *svcpu) { + /* + * vcpu_put would just call us again because in_use hasn't + * been updated yet. + */ + preempt_disable(); + + /* + * Maybe we were already preempted and synced the svcpu from + * our preempt notifiers. Don't bother touching this svcpu then. + */ + if (!svcpu->in_use) + goto out; + vcpu->arch.gpr[0] = svcpu->gpr[0]; vcpu->arch.gpr[1] = svcpu->gpr[1]; vcpu->arch.gpr[2] = svcpu->gpr[2]; @@ -139,6 +157,10 @@ void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu, vcpu->arch.fault_dar = svcpu->fault_dar; vcpu->arch.fault_dsisr = svcpu->fault_dsisr; vcpu->arch.last_inst = svcpu->last_inst; + svcpu->in_use = false; + +out: + preempt_enable(); } static int kvmppc_core_check_requests_pr(struct kvm_vcpu *vcpu) From 3d3319b45eea26df56c53aae1a65adf74c8ab12a Mon Sep 17 00:00:00 2001 From: Alexander Graf Date: Fri, 29 Nov 2013 02:32:31 +0100 Subject: [PATCH 055/312] KVM: PPC: Book3S: PR: Enable interrupts earlier Now that the svcpu sync is interrupt aware we can enable interrupts earlier in the exit code path again, moving 32bit and 64bit closer together. While at it, document the fact that we're always executing the exit path with interrupts enabled so that the next person doesn't trap over this. Signed-off-by: Alexander Graf --- arch/powerpc/kvm/book3s_interrupts.S | 6 +----- arch/powerpc/kvm/book3s_rmhandlers.S | 6 +----- 2 files changed, 2 insertions(+), 10 deletions(-) diff --git a/arch/powerpc/kvm/book3s_interrupts.S b/arch/powerpc/kvm/book3s_interrupts.S index 5e7cb32ce4dc..f779450cb07c 100644 --- a/arch/powerpc/kvm/book3s_interrupts.S +++ b/arch/powerpc/kvm/book3s_interrupts.S @@ -129,6 +129,7 @@ kvm_start_lightweight: * R12 = exit handler id * R13 = PACA * SVCPU.* = guest * + * MSR.EE = 1 * */ @@ -148,11 +149,6 @@ kvm_start_lightweight: nop #ifdef CONFIG_PPC_BOOK3S_64 - /* Re-enable interrupts */ - ld r3, HSTATE_HOST_MSR(r13) - ori r3, r3, MSR_EE - MTMSR_EERI(r3) - /* * Reload kernel SPRG3 value. * No need to save guest value as usermode can't modify SPRG3. diff --git a/arch/powerpc/kvm/book3s_rmhandlers.S b/arch/powerpc/kvm/book3s_rmhandlers.S index a38c4c9edab8..c3c5231adade 100644 --- a/arch/powerpc/kvm/book3s_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_rmhandlers.S @@ -153,15 +153,11 @@ _GLOBAL(kvmppc_entry_trampoline) li r6, MSR_IR | MSR_DR andc r6, r5, r6 /* Clear DR and IR in MSR value */ -#ifdef CONFIG_PPC_BOOK3S_32 /* * Set EE in HOST_MSR so that it's enabled when we get into our - * C exit handler function. On 64-bit we delay enabling - * interrupts until we have finished transferring stuff - * to or from the PACA. + * C exit handler function. */ ori r5, r5, MSR_EE -#endif mtsrr0 r7 mtsrr1 r6 RFI From 241bf43321a10815225f477bba96a42285a2da73 Mon Sep 17 00:00:00 2001 From: Stephen Warren Date: Fri, 6 Dec 2013 13:34:50 -0700 Subject: [PATCH 056/312] ASoC: tegra: fix uninitialized variables in set_fmt In tegra*_i2s_set_fmt(), in the (fmt == SND_SOC_DAIFMT_CBM_CFM) case, "val" is never assigned to, but left uninitialized. The other case does initialized it. Fix this by initializing val at the start of the function, and only ever ORing into it. Update the handling of "mask" so it works the same way for consistency. Update tegra20_spdif.c to use the same code-style for consistency, even though it doesn't happen to suffer from the same problem at present. Signed-off-by: Stephen Warren Reviewed-by: Thierry Reding Signed-off-by: Mark Brown Fixes: 0f163546a772 ("ASoC: tegra: use regmap more directly") Cc: --- sound/soc/tegra/tegra20_i2s.c | 6 +++--- sound/soc/tegra/tegra20_spdif.c | 10 +++++----- sound/soc/tegra/tegra30_i2s.c | 6 +++--- 3 files changed, 11 insertions(+), 11 deletions(-) diff --git a/sound/soc/tegra/tegra20_i2s.c b/sound/soc/tegra/tegra20_i2s.c index 364bf6a907e1..8c819f811470 100644 --- a/sound/soc/tegra/tegra20_i2s.c +++ b/sound/soc/tegra/tegra20_i2s.c @@ -74,7 +74,7 @@ static int tegra20_i2s_set_fmt(struct snd_soc_dai *dai, unsigned int fmt) { struct tegra20_i2s *i2s = snd_soc_dai_get_drvdata(dai); - unsigned int mask, val; + unsigned int mask = 0, val = 0; switch (fmt & SND_SOC_DAIFMT_INV_MASK) { case SND_SOC_DAIFMT_NB_NF: @@ -83,10 +83,10 @@ static int tegra20_i2s_set_fmt(struct snd_soc_dai *dai, return -EINVAL; } - mask = TEGRA20_I2S_CTRL_MASTER_ENABLE; + mask |= TEGRA20_I2S_CTRL_MASTER_ENABLE; switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { case SND_SOC_DAIFMT_CBS_CFS: - val = TEGRA20_I2S_CTRL_MASTER_ENABLE; + val |= TEGRA20_I2S_CTRL_MASTER_ENABLE; break; case SND_SOC_DAIFMT_CBM_CFM: break; diff --git a/sound/soc/tegra/tegra20_spdif.c b/sound/soc/tegra/tegra20_spdif.c index 08bc6931c7c7..8c7c1028e579 100644 --- a/sound/soc/tegra/tegra20_spdif.c +++ b/sound/soc/tegra/tegra20_spdif.c @@ -67,15 +67,15 @@ static int tegra20_spdif_hw_params(struct snd_pcm_substream *substream, { struct device *dev = dai->dev; struct tegra20_spdif *spdif = snd_soc_dai_get_drvdata(dai); - unsigned int mask, val; + unsigned int mask = 0, val = 0; int ret, spdifclock; - mask = TEGRA20_SPDIF_CTRL_PACK | - TEGRA20_SPDIF_CTRL_BIT_MODE_MASK; + mask |= TEGRA20_SPDIF_CTRL_PACK | + TEGRA20_SPDIF_CTRL_BIT_MODE_MASK; switch (params_format(params)) { case SNDRV_PCM_FORMAT_S16_LE: - val = TEGRA20_SPDIF_CTRL_PACK | - TEGRA20_SPDIF_CTRL_BIT_MODE_16BIT; + val |= TEGRA20_SPDIF_CTRL_PACK | + TEGRA20_SPDIF_CTRL_BIT_MODE_16BIT; break; default: return -EINVAL; diff --git a/sound/soc/tegra/tegra30_i2s.c b/sound/soc/tegra/tegra30_i2s.c index 231a785b3921..02247fee1cf7 100644 --- a/sound/soc/tegra/tegra30_i2s.c +++ b/sound/soc/tegra/tegra30_i2s.c @@ -118,7 +118,7 @@ static int tegra30_i2s_set_fmt(struct snd_soc_dai *dai, unsigned int fmt) { struct tegra30_i2s *i2s = snd_soc_dai_get_drvdata(dai); - unsigned int mask, val; + unsigned int mask = 0, val = 0; switch (fmt & SND_SOC_DAIFMT_INV_MASK) { case SND_SOC_DAIFMT_NB_NF: @@ -127,10 +127,10 @@ static int tegra30_i2s_set_fmt(struct snd_soc_dai *dai, return -EINVAL; } - mask = TEGRA30_I2S_CTRL_MASTER_ENABLE; + mask |= TEGRA30_I2S_CTRL_MASTER_ENABLE; switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { case SND_SOC_DAIFMT_CBS_CFS: - val = TEGRA30_I2S_CTRL_MASTER_ENABLE; + val |= TEGRA30_I2S_CTRL_MASTER_ENABLE; break; case SND_SOC_DAIFMT_CBM_CFM: break; From 5dfc03f141993c101c626c84d639019e98a4f39c Mon Sep 17 00:00:00 2001 From: Nicolin Chen Date: Fri, 6 Dec 2013 23:38:28 +0800 Subject: [PATCH 057/312] ASoC: fsl: imx-wm8962: Don't update bias_level in machine driver If we update it here, the set_bias_level() of Codec driver won't be normally called and we will then miss some essential procedures in set_bias_level() of the Codec driver. Thus drop it. Signed-off-by: Nicolin Chen Signed-off-by: Mark Brown --- sound/soc/fsl/imx-wm8962.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/sound/soc/fsl/imx-wm8962.c b/sound/soc/fsl/imx-wm8962.c index 72064e995687..72718e19a3c7 100644 --- a/sound/soc/fsl/imx-wm8962.c +++ b/sound/soc/fsl/imx-wm8962.c @@ -130,8 +130,6 @@ static int imx_wm8962_set_bias_level(struct snd_soc_card *card, break; } - dapm->bias_level = level; - return 0; } From 6b9f3e65282b3bd7ed77e7b2b1edfe7cfed48115 Mon Sep 17 00:00:00 2001 From: Stephen Warren Date: Tue, 3 Dec 2013 14:26:33 -0700 Subject: [PATCH 058/312] ASoC: don't leak on error in snd_dmaengine_pcm_register If snd_dmaengine_pcm_register()'s call to snd_soc_add_platform() fails, all objects allocated during registration are leaked. Fix this by adding error-handling code. Signed-off-by: Stephen Warren Acked-by: Lars-Peter Clausen Signed-off-by: Mark Brown --- sound/soc/soc-generic-dmaengine-pcm.c | 38 +++++++++++++++++++-------- 1 file changed, 27 insertions(+), 11 deletions(-) diff --git a/sound/soc/soc-generic-dmaengine-pcm.c b/sound/soc/soc-generic-dmaengine-pcm.c index cbc9c96ce1f4..41949af3baae 100644 --- a/sound/soc/soc-generic-dmaengine-pcm.c +++ b/sound/soc/soc-generic-dmaengine-pcm.c @@ -305,6 +305,20 @@ static void dmaengine_pcm_request_chan_of(struct dmaengine_pcm *pcm, } } +static void dmaengine_pcm_release_chan(struct dmaengine_pcm *pcm) +{ + unsigned int i; + + for (i = SNDRV_PCM_STREAM_PLAYBACK; i <= SNDRV_PCM_STREAM_CAPTURE; + i++) { + if (!pcm->chan[i]) + continue; + dma_release_channel(pcm->chan[i]); + if (pcm->flags & SND_DMAENGINE_PCM_FLAG_HALF_DUPLEX) + break; + } +} + /** * snd_dmaengine_pcm_register - Register a dmaengine based PCM device * @dev: The parent device for the PCM device @@ -315,6 +329,7 @@ int snd_dmaengine_pcm_register(struct device *dev, const struct snd_dmaengine_pcm_config *config, unsigned int flags) { struct dmaengine_pcm *pcm; + int ret; pcm = kzalloc(sizeof(*pcm), GFP_KERNEL); if (!pcm) @@ -326,11 +341,20 @@ int snd_dmaengine_pcm_register(struct device *dev, dmaengine_pcm_request_chan_of(pcm, dev); if (flags & SND_DMAENGINE_PCM_FLAG_NO_RESIDUE) - return snd_soc_add_platform(dev, &pcm->platform, + ret = snd_soc_add_platform(dev, &pcm->platform, &dmaengine_no_residue_pcm_platform); else - return snd_soc_add_platform(dev, &pcm->platform, + ret = snd_soc_add_platform(dev, &pcm->platform, &dmaengine_pcm_platform); + if (ret) + goto err_free_dma; + + return 0; + +err_free_dma: + dmaengine_pcm_release_chan(pcm); + kfree(pcm); + return ret; } EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_register); @@ -345,7 +369,6 @@ void snd_dmaengine_pcm_unregister(struct device *dev) { struct snd_soc_platform *platform; struct dmaengine_pcm *pcm; - unsigned int i; platform = snd_soc_lookup_platform(dev); if (!platform) @@ -353,15 +376,8 @@ void snd_dmaengine_pcm_unregister(struct device *dev) pcm = soc_platform_to_pcm(platform); - for (i = SNDRV_PCM_STREAM_PLAYBACK; i <= SNDRV_PCM_STREAM_CAPTURE; i++) { - if (pcm->chan[i]) { - dma_release_channel(pcm->chan[i]); - if (pcm->flags & SND_DMAENGINE_PCM_FLAG_HALF_DUPLEX) - break; - } - } - snd_soc_remove_platform(platform); + dmaengine_pcm_release_chan(pcm); kfree(pcm); } EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_unregister); From 4144bc861ed7934d56f16d2acd808d44af0fcc90 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bj=C3=B8rn=20Mork?= Date: Fri, 29 Nov 2013 20:17:45 +0100 Subject: [PATCH 059/312] usb: cdc-wdm: manage_power should always set needs_remote_wakeup MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Cc: stable Reported-by: Oliver Neukum Signed-off-by: Bjørn Mork Acked-by: Oliver Neukum Signed-off-by: Greg Kroah-Hartman --- drivers/usb/class/cdc-wdm.c | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c index 4d387596f3f0..0b23a8639311 100644 --- a/drivers/usb/class/cdc-wdm.c +++ b/drivers/usb/class/cdc-wdm.c @@ -854,13 +854,11 @@ static int wdm_manage_power(struct usb_interface *intf, int on) { /* need autopm_get/put here to ensure the usbcore sees the new value */ int rv = usb_autopm_get_interface(intf); - if (rv < 0) - goto err; intf->needs_remote_wakeup = on; - usb_autopm_put_interface(intf); -err: - return rv; + if (!rv) + usb_autopm_put_interface(intf); + return 0; } static int wdm_probe(struct usb_interface *intf, const struct usb_device_id *id) From 52d0dc7597c89b2ab779f3dcb9b9bf0800dd9218 Mon Sep 17 00:00:00 2001 From: Dmitry Kunilov Date: Tue, 3 Dec 2013 12:11:30 -0800 Subject: [PATCH 060/312] usb: serial: zte_ev: move support for ZTE AC2726 from zte_ev back to option ZTE AC2726 EVDO modem drops ppp connection every minute when driven by zte_ev but works fine when driven by option. Move the support for AC2726 back to option driver. Signed-off-by: Dmitry Kunilov Cc: stable Signed-off-by: Greg Kroah-Hartman --- drivers/usb/serial/option.c | 2 ++ drivers/usb/serial/zte_ev.c | 3 +-- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 496b7e39d5be..cc7a24154490 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -251,6 +251,7 @@ static void option_instat_callback(struct urb *urb); #define ZTE_PRODUCT_MF628 0x0015 #define ZTE_PRODUCT_MF626 0x0031 #define ZTE_PRODUCT_MC2718 0xffe8 +#define ZTE_PRODUCT_AC2726 0xfff1 #define BENQ_VENDOR_ID 0x04a5 #define BENQ_PRODUCT_H10 0x4068 @@ -1453,6 +1454,7 @@ static const struct usb_device_id option_ids[] = { { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x01) }, { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x05) }, { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x86, 0x10) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC2726, 0xff, 0xff, 0xff) }, { USB_DEVICE(BENQ_VENDOR_ID, BENQ_PRODUCT_H10) }, { USB_DEVICE(DLINK_VENDOR_ID, DLINK_PRODUCT_DWM_652) }, diff --git a/drivers/usb/serial/zte_ev.c b/drivers/usb/serial/zte_ev.c index fca4c752a4ed..eae2c873b39f 100644 --- a/drivers/usb/serial/zte_ev.c +++ b/drivers/usb/serial/zte_ev.c @@ -281,8 +281,7 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x19d2, 0xfffd) }, { USB_DEVICE(0x19d2, 0xfffc) }, { USB_DEVICE(0x19d2, 0xfffb) }, - /* AC2726, AC8710_V3 */ - { USB_DEVICE_AND_INTERFACE_INFO(0x19d2, 0xfff1, 0xff, 0xff, 0xff) }, + /* AC8710_V3 */ { USB_DEVICE(0x19d2, 0xfff6) }, { USB_DEVICE(0x19d2, 0xfff7) }, { USB_DEVICE(0x19d2, 0xfff8) }, From cc5c9eb67f912cb2c349b04063ff9f444affbc59 Mon Sep 17 00:00:00 2001 From: Fabio Estevam Date: Thu, 5 Dec 2013 15:20:49 +0800 Subject: [PATCH 061/312] usb: chipidea: host: Only disable the vbus regulator if it is not NULL Commit 40ed51a4b (usb: chipidea: host: add vbus regulator control) introduced a smatch complaint because regulator_disable() is called without checking whether ci->platdata->reg_vbus is not NULL. Fix this by adding the check. This patch is needed for 3.12 stable Cc: stable Reported-by: Dan Carpenter Signed-off-by: Fabio Estevam Signed-off-by: Peter Chen Signed-off-by: Greg Kroah-Hartman --- drivers/usb/chipidea/host.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/usb/chipidea/host.c b/drivers/usb/chipidea/host.c index 59e6020ea753..526cd77563d8 100644 --- a/drivers/usb/chipidea/host.c +++ b/drivers/usb/chipidea/host.c @@ -88,7 +88,8 @@ static int host_start(struct ci_hdrc *ci) return ret; disable_reg: - regulator_disable(ci->platdata->reg_vbus); + if (ci->platdata->reg_vbus) + regulator_disable(ci->platdata->reg_vbus); put_hcd: usb_put_hcd(hcd); From 5a1e1456fc633da5291285b1fff75d2a7507375b Mon Sep 17 00:00:00 2001 From: Peter Chen Date: Thu, 5 Dec 2013 15:20:50 +0800 Subject: [PATCH 062/312] usb: chipidea: fix nobody cared IRQ when booting with host role If we connect Male-A-To-Male-A cable between otg-host and host pc, the ci->vbus_active is set wrongly, and cause the controller run at peripheral mode when we load gadget module (ci_udc_start will be run), but the software runs at host mode due to id = 0. The ehci_irq can't handle suspend (USBi_SLI) interrupt which is enabled for peripheral mode, it causes no one will handle irq error. This patch is needed for 3.12 stable Cc: stable Acked-by: Michael Grzeschik Reported-by: Marc Kleine-Budde Tested-by: Marc Kleine-Budde Signed-off-by: Marc Kleine-Budde Signed-off-by: Peter Chen Signed-off-by: Greg Kroah-Hartman --- drivers/usb/chipidea/core.c | 4 ++++ drivers/usb/chipidea/udc.c | 3 --- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c index 5d8981c5235e..6e73f8cd60e5 100644 --- a/drivers/usb/chipidea/core.c +++ b/drivers/usb/chipidea/core.c @@ -642,6 +642,10 @@ static int ci_hdrc_probe(struct platform_device *pdev) : CI_ROLE_GADGET; } + /* only update vbus status for peripheral */ + if (ci->role == CI_ROLE_GADGET) + ci_handle_vbus_change(ci); + ret = ci_role_start(ci, ci->role); if (ret) { dev_err(dev, "can't start %s role\n", ci_role(ci)->name); diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c index b34c81969cba..69d20fbb38a2 100644 --- a/drivers/usb/chipidea/udc.c +++ b/drivers/usb/chipidea/udc.c @@ -1795,9 +1795,6 @@ static int udc_start(struct ci_hdrc *ci) pm_runtime_no_callbacks(&ci->gadget.dev); pm_runtime_enable(&ci->gadget.dev); - /* Update ci->vbus_active */ - ci_handle_vbus_change(ci); - return retval; destroy_eps: From 7122c3e9154b5d9a7422f68f02d8acf050fad2b0 Mon Sep 17 00:00:00 2001 From: Ming Lei Date: Tue, 10 Dec 2013 16:46:29 +1030 Subject: [PATCH 063/312] scripts/link-vmlinux.sh: only filter kernel symbols for arm Actually CONFIG_PAGE_OFFSET isn't same with PAGE_OFFSET, so it isn't easy to figue out PAGE_OFFSET defined in header file from scripts. Because CONFIG_PAGE_OFFSET may not be defined in some ARCHs( 64bit ARCH), or defined as bogus value in !MMU case, so this patch only applys the filter on ARM when CONFIG_PAGE_OFFSET is defined as the original problem is only on ARM. Cc: Cc: Rusty Russell Fixes: f6537f2f0eba4eba3354e48dbe3047db6d8b6254 Singed-off-by: Ming Lei Signed-off-by: Rusty Russell --- scripts/link-vmlinux.sh | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/scripts/link-vmlinux.sh b/scripts/link-vmlinux.sh index 32b10f53d0b4..2dcb37736d84 100644 --- a/scripts/link-vmlinux.sh +++ b/scripts/link-vmlinux.sh @@ -82,7 +82,9 @@ kallsyms() kallsymopt="${kallsymopt} --all-symbols" fi - kallsymopt="${kallsymopt} --page-offset=$CONFIG_PAGE_OFFSET" + if [ -n "${CONFIG_ARM}" ] && [ -n "${CONFIG_PAGE_OFFSET}" ]; then + kallsymopt="${kallsymopt} --page-offset=$CONFIG_PAGE_OFFSET" + fi local aflags="${KBUILD_AFLAGS} ${KBUILD_AFLAGS_KERNEL} \ ${NOSTDINC_FLAGS} ${LINUXINCLUDE} ${KBUILD_CPPFLAGS}" From ad071acb53110c8efd26ff1e5b5d57449b43833b Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 9 Dec 2013 10:37:24 +0000 Subject: [PATCH 064/312] drm/i915: Repeat eviction search after idling the GPU With the advent of hw context support, we gained some objects that are pinned for the duration of their request. That is we can make aperture space available by idling the GPU and in the process performing a context switch back to the always-pinned default context. As such, we should not conclude that there is no space in the aperture for the current object until we have unpinned any such context objects. Note that we also have the problem of outstanding pageflips preventing eviction of their framebuffer objects to resolve. Testcase: igt/gem_ctx_exec/eviction Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=72507 Signed-off-by: Chris Wilson Tested-by: lu hua Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_gem_evict.c | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c index b7376533633d..8f3adc7d0dc8 100644 --- a/drivers/gpu/drm/i915/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/i915_gem_evict.c @@ -88,6 +88,7 @@ i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm, } else drm_mm_init_scan(&vm->mm, min_size, alignment, cache_level); +search_again: /* First see if there is a large enough contiguous idle region... */ list_for_each_entry(vma, &vm->inactive_list, mm_list) { if (mark_free(vma, &unwind_list)) @@ -115,10 +116,17 @@ i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm, list_del_init(&vma->exec_list); } - /* We expect the caller to unpin, evict all and try again, or give up. - * So calling i915_gem_evict_vm() is unnecessary. + /* Can we unpin some objects such as idle hw contents, + * or pending flips? */ - return -ENOSPC; + ret = nonblocking ? -ENOSPC : i915_gpu_idle(dev); + if (ret) + return ret; + + /* Only idle the GPU and repeat the search once */ + i915_gem_retire_requests(dev); + nonblocking = true; + goto search_again; found: /* drm_mm doesn't allow any other other operations while From f2fa75cdf8b1e1c44353747a156ec25c7478c07a Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Tue, 10 Dec 2013 09:43:09 +0100 Subject: [PATCH 065/312] MAINTAINERS: update GPIO maintainers entry Add Alexandre Courbot as co-maintainer of the GPIO subsystem. Provide a pointer to the GPIO GIT tree. Update the documentation file path. Move around to put people and git tree on top. Acked-by: Alexandre Courbot Signed-off-by: Linus Walleij --- MAINTAINERS | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/MAINTAINERS b/MAINTAINERS index 13c15c83a46e..7e053e0fe38e 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -3765,9 +3765,11 @@ F: include/uapi/linux/gigaset_dev.h GPIO SUBSYSTEM M: Linus Walleij -S: Maintained +M: Alexandre Courbot L: linux-gpio@vger.kernel.org -F: Documentation/gpio.txt +T: git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-gpio.git +S: Maintained +F: Documentation/gpio/ F: drivers/gpio/ F: include/linux/gpio* F: include/asm-generic/gpio.h From 8620f394c4f9abd13e4fdf927d9c2bbeda74cde7 Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Tue, 26 Nov 2013 02:45:34 +0100 Subject: [PATCH 066/312] sh-pfc: Fix PINMUX_GPIO macro Commit 7cbb0e55e27e ("sh-pfc: Don't duplicate argument to PINMUX_GPIO macro") erronesouly modified the PINMUX_GPIO macro in a way that resulted in all pins being named "name". Fix the macro to name the pins correctly. Cc: stable@vger.kernel.org Signed-off-by: Laurent Pinchart Signed-off-by: Linus Walleij --- drivers/pinctrl/sh-pfc/sh_pfc.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/pinctrl/sh-pfc/sh_pfc.h b/drivers/pinctrl/sh-pfc/sh_pfc.h index 11bd0d970a52..e2142956a8e5 100644 --- a/drivers/pinctrl/sh-pfc/sh_pfc.h +++ b/drivers/pinctrl/sh-pfc/sh_pfc.h @@ -254,7 +254,7 @@ struct sh_pfc_soc_info { #define PINMUX_GPIO(_pin) \ [GPIO_##_pin] = { \ .pin = (u16)-1, \ - .name = __stringify(name), \ + .name = __stringify(GPIO_##_pin), \ .enum_id = _pin##_DATA, \ } From f5837ec11f8cfa6d53ebc5806582771b2c9988c6 Mon Sep 17 00:00:00 2001 From: Roger Quadros Date: Thu, 5 Dec 2013 11:23:35 +0200 Subject: [PATCH 067/312] gpio: twl4030: Fix regression for twl gpio LED output Commit 0b2aa8be introduced a regression that causes failure in setting LED GPO direction to OUT. This causes USB host probe failures for Beagleboard C4. platform usb_phy_gen_xceiv.2: Driver usb_phy_gen_xceiv requests probe deferral hsusb2_vcc: Failed to request enable GPIO510: -22 reg-fixed-voltage reg-fixed-voltage.0.auto: Failed to register regulator: -22 reg-fixed-voltage: probe of reg-fixed-voltage.0.auto failed with error -22 direction_out/direction_in must return 0 if the operation succeeded. Also, don't update direction flag and output data if twl4030_set_gpio_direction() failed inside twl_direction_out(); Cc: stable@vger.kernel.org Signed-off-by: Roger Quadros Acked-by: Tony Lindgren Signed-off-by: Linus Walleij --- drivers/gpio/gpio-twl4030.c | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/drivers/gpio/gpio-twl4030.c b/drivers/gpio/gpio-twl4030.c index b97d6a6577b9..f9996899c1f2 100644 --- a/drivers/gpio/gpio-twl4030.c +++ b/drivers/gpio/gpio-twl4030.c @@ -300,7 +300,7 @@ static int twl_direction_in(struct gpio_chip *chip, unsigned offset) if (offset < TWL4030_GPIO_MAX) ret = twl4030_set_gpio_direction(offset, 1); else - ret = -EINVAL; + ret = -EINVAL; /* LED outputs can't be set as input */ if (!ret) priv->direction &= ~BIT(offset); @@ -354,11 +354,20 @@ static void twl_set(struct gpio_chip *chip, unsigned offset, int value) static int twl_direction_out(struct gpio_chip *chip, unsigned offset, int value) { struct gpio_twl4030_priv *priv = to_gpio_twl4030(chip); - int ret = -EINVAL; + int ret = 0; mutex_lock(&priv->mutex); - if (offset < TWL4030_GPIO_MAX) + if (offset < TWL4030_GPIO_MAX) { ret = twl4030_set_gpio_direction(offset, 0); + if (ret) { + mutex_unlock(&priv->mutex); + return ret; + } + } + + /* + * LED gpios i.e. offset >= TWL4030_GPIO_MAX are always output + */ priv->direction |= BIT(offset); mutex_unlock(&priv->mutex); From 09ca27579ee5a1b6f537b82165080b85c1febac8 Mon Sep 17 00:00:00 2001 From: Axel Lin Date: Wed, 20 Nov 2013 10:15:11 +0800 Subject: [PATCH 068/312] clocksource: time-efm32: Select CLKSRC_MMIO MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The time-efm32 driver uses the clocksource MMIO functions. Thus it needs to select CLKSRC_MMIO in Kconfig. Signed-off-by: Axel Lin Signed-off-by: Daniel Lezcano Acked-by: Uwe Kleine-König --- drivers/clocksource/Kconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig index 5c07a56962db..634c4d6dd45a 100644 --- a/drivers/clocksource/Kconfig +++ b/drivers/clocksource/Kconfig @@ -75,6 +75,7 @@ config CLKSRC_DBX500_PRCMU_SCHED_CLOCK config CLKSRC_EFM32 bool "Clocksource for Energy Micro's EFM32 SoCs" if !ARCH_EFM32 depends on OF && ARM && (ARCH_EFM32 || COMPILE_TEST) + select CLKSRC_MMIO default ARCH_EFM32 help Support to use the timers of EFM32 SoCs as clock source and clock From c813eff078588733a3d1a46c033c2d59d66a263b Mon Sep 17 00:00:00 2001 From: Ezequiel Garcia Date: Tue, 26 Nov 2013 18:20:14 -0300 Subject: [PATCH 069/312] clocksource: armada-370-xp: Register sched_clock after the counter reset This commit registers the sched_clock _after_ the counter reset (instead of before). This removes the timestamp 'jump' in kernel log messages. Before this change: [ 0.000000] sched_clock: 32 bits at 25MHz, resolution 40ns, wraps every 171798691800ns [ 0.000000] Initializing Coherency fabric [ 0.000000] Aurora cache controller enabled [ 0.000000] l2x0: 16 ways, CACHE_ID 0x00000100, AUX_CTRL 0x1a696b12, Cache size: 1024 kB [ 163.507447] Calibrating delay loop... 1325.05 BogoMIPS (lpj=662528) [ 163.521419] pid_max: default: 32768 minimum: 301 [ 163.526185] Mount-cache hash table entries: 512 [ 163.531095] CPU: Testing write buffer coherency: ok After this change: [ 0.000000] sched_clock: 32 bits at 25MHz, resolution 40ns, wraps every 171798691800ns [ 0.000000] Initializing Coherency fabric [ 0.000000] Aurora cache controller enabled [ 0.000000] l2x0: 16 ways, CACHE_ID 0x00000100, AUX_CTRL 0x1a696b12, Cache size: 1024 kB [ 0.016849] Calibrating delay loop... 1325.05 BogoMIPS (lpj=662528) [ 0.030820] pid_max: default: 32768 minimum: 301 [ 0.035588] Mount-cache hash table entries: 512 [ 0.040500] CPU: Testing write buffer coherency: ok Signed-off-by: Ezequiel Garcia Signed-off-by: Daniel Lezcano Acked-by: Jason Cooper --- drivers/clocksource/time-armada-370-xp.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/clocksource/time-armada-370-xp.c b/drivers/clocksource/time-armada-370-xp.c index d8e47e502785..4e7f6802e840 100644 --- a/drivers/clocksource/time-armada-370-xp.c +++ b/drivers/clocksource/time-armada-370-xp.c @@ -255,11 +255,6 @@ static void __init armada_370_xp_timer_common_init(struct device_node *np) ticks_per_jiffy = (timer_clk + HZ / 2) / HZ; - /* - * Set scale and timer for sched_clock. - */ - sched_clock_register(armada_370_xp_read_sched_clock, 32, timer_clk); - /* * Setup free-running clocksource timer (interrupts * disabled). @@ -270,6 +265,11 @@ static void __init armada_370_xp_timer_common_init(struct device_node *np) timer_ctrl_clrset(0, TIMER0_EN | TIMER0_RELOAD_EN | TIMER0_DIV(TIMER_DIVIDER_SHIFT)); + /* + * Set scale and timer for sched_clock. + */ + sched_clock_register(armada_370_xp_read_sched_clock, 32, timer_clk); + clocksource_mmio_init(timer_base + TIMER0_VAL_OFF, "armada_370_xp_clocksource", timer_clk, 300, 32, clocksource_mmio_readl_down); From 4c4b053235fa73db1ea241aa5a6b021afb0ed8db Mon Sep 17 00:00:00 2001 From: Thierry Reding Date: Sat, 19 Oct 2013 00:49:48 +0200 Subject: [PATCH 070/312] clocksource: clksrc-of: Do not drop unheld reference on device node MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When booting a recent kernel on ARM with OF_DYNAMIC enabled, the kernel warns about the following: [ 0.000000] ERROR: Bad of_node_put() on /timer@50004600 [ 0.000000] CPU: 0 PID: 0 Comm: swapper/0 Not tainted 3.12.0-rc5-next-20131017-00077-gedfd827-dirty #406 [ 0.000000] [] (unwind_backtrace+0x0/0xf4) from [] (show_stack+0x10/0x14) [ 0.000000] [] (show_stack+0x10/0x14) from [] (dump_stack+0x9c/0xc8) [ 0.000000] [] (dump_stack+0x9c/0xc8) from [] (of_node_release+0x90/0x9c) [ 0.000000] [] (of_node_release+0x90/0x9c) from [] (of_find_matching_node_and_match+0x78/0xb4) [ 0.000000] [] (of_find_matching_node_and_match+0x78/0xb4) from [] (clocksource_of_init+0x60/0x70) [ 0.000000] [] (clocksource_of_init+0x60/0x70) from [] (start_kernel+0x1f4/0x33c) [ 0.000000] [] (start_kernel+0x1f4/0x33c) from [<80008074>] (0x80008074) This is caused by clocksource_of_init() dropping a reference on the device node that it never took. The reference taken by the loop is implicitly dropped on subsequent iterations. See the implementation of and the comment on top of the of_find_matching_node_and_match() function for reference (no pun intended). Signed-off-by: Thierry Reding Signed-off-by: Daniel Lezcano Cc: Uwe Kleine-König --- drivers/clocksource/clksrc-of.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/clocksource/clksrc-of.c b/drivers/clocksource/clksrc-of.c index 35639cf4e5a2..b9ddd9e3a2f5 100644 --- a/drivers/clocksource/clksrc-of.c +++ b/drivers/clocksource/clksrc-of.c @@ -35,6 +35,5 @@ void __init clocksource_of_init(void) init_func = match->data; init_func(np); - of_node_put(np); } } From 6db50bb67598668c525f12e2f7191f5d03ca46f2 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Mon, 2 Dec 2013 09:29:35 +0000 Subject: [PATCH 071/312] clocksource: sunxi: Stop timer from ticking before enabling interrupts The sun4i timer can still be ticking when we enable the interrupt. If another timer is actually used (A7 architected timer, for example), odds are that the interrupt will eventually fire with the event_handler pointer being NULL. The obvious fix it to stop the timer before registering the interrupt. Observed and tested on sun7i (cubietruck). Cc: Daniel Lezcano Acked-by: Maxime Ripard Signed-off-by: Marc Zyngier Signed-off-by: Daniel Lezcano --- drivers/clocksource/sun4i_timer.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/clocksource/sun4i_timer.c b/drivers/clocksource/sun4i_timer.c index 2fb4695a28d8..a4f6119aafd8 100644 --- a/drivers/clocksource/sun4i_timer.c +++ b/drivers/clocksource/sun4i_timer.c @@ -179,6 +179,9 @@ static void __init sun4i_timer_init(struct device_node *node) writel(TIMER_CTL_CLK_SRC(TIMER_CTL_CLK_SRC_OSC24M), timer_base + TIMER_CTL_REG(0)); + /* Make sure timer is stopped before playing with interrupts */ + sun4i_clkevt_time_stop(0); + ret = setup_irq(irq, &sun4i_timer_irq); if (ret) pr_warn("failed to setup irq %d\n", irq); From 85dc6ee1237c8a4a7742e6abab96a20389b7d682 Mon Sep 17 00:00:00 2001 From: Dinh Nguyen Date: Tue, 10 Dec 2013 19:49:18 +0100 Subject: [PATCH 072/312] clocksource: dw_apb_timer_of: Fix read_sched_clock The read_sched_clock should return the ~value because the clock is a countdown implementation. read_sched_clock() should be the same as __apbt_read_clocksource(). Signed-off-by: Dinh Nguyen Signed-off-by: Daniel Lezcano --- drivers/clocksource/dw_apb_timer_of.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/clocksource/dw_apb_timer_of.c b/drivers/clocksource/dw_apb_timer_of.c index 45ba8aecc729..b29d7cdadd82 100644 --- a/drivers/clocksource/dw_apb_timer_of.c +++ b/drivers/clocksource/dw_apb_timer_of.c @@ -108,7 +108,7 @@ static void __init add_clocksource(struct device_node *source_timer) static u64 read_sched_clock(void) { - return __raw_readl(sched_io_base); + return ~__raw_readl(sched_io_base); } static const struct of_device_id sptimer_ids[] __initconst = { From 9ab4727c1d41e50b67aecde4bf11879560a3ca78 Mon Sep 17 00:00:00 2001 From: Dinh Nguyen Date: Tue, 10 Dec 2013 19:49:18 +0100 Subject: [PATCH 073/312] clocksource: dw_apb_timer_of: Fix support for dts binding "snps,dw-apb-timer" In commit 620f5e1cbf (dts: Rename DW APB timer compatible strings), both "snps,dw-apb-timer-sp" and "snps,dw-apb-timer-osc" were deprecated in place of "snps,dw-apb-timer". But the driver also needs to be udpated in order to support this new binding "snps,dw-apb-timer". Signed-off-by: Dinh Nguyen Signed-off-by: Daniel Lezcano --- drivers/clocksource/dw_apb_timer_of.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/clocksource/dw_apb_timer_of.c b/drivers/clocksource/dw_apb_timer_of.c index b29d7cdadd82..2a2ea2717f3a 100644 --- a/drivers/clocksource/dw_apb_timer_of.c +++ b/drivers/clocksource/dw_apb_timer_of.c @@ -113,7 +113,6 @@ static u64 read_sched_clock(void) static const struct of_device_id sptimer_ids[] __initconst = { { .compatible = "picochip,pc3x2-rtc" }, - { .compatible = "snps,dw-apb-timer-sp" }, { /* Sentinel */ }, }; @@ -151,4 +150,6 @@ static void __init dw_apb_timer_init(struct device_node *timer) num_called++; } CLOCKSOURCE_OF_DECLARE(pc3x2_timer, "picochip,pc3x2-timer", dw_apb_timer_init); -CLOCKSOURCE_OF_DECLARE(apb_timer, "snps,dw-apb-timer-osc", dw_apb_timer_init); +CLOCKSOURCE_OF_DECLARE(apb_timer_osc, "snps,dw-apb-timer-osc", dw_apb_timer_init); +CLOCKSOURCE_OF_DECLARE(apb_timer_sp, "snps,dw-apb-timer-sp", dw_apb_timer_init); +CLOCKSOURCE_OF_DECLARE(apb_timer, "snps,dw-apb-timer", dw_apb_timer_init); From 6962d914f317b119e0db7189199b21ec77a4b3e0 Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Mon, 9 Dec 2013 14:53:36 +0100 Subject: [PATCH 074/312] xhci: Limit the spurious wakeup fix only to HP machines We've got regression reports that my previous fix for spurious wakeups after S5 on HP Haswell machines leads to the automatic reboot at shutdown on some machines. It turned out that the fix for one side triggers another BIOS bug in other side. So, it's exclusive. Since the original S5 wakeups have been confirmed only on HP machines, it'd be safer to apply it only to limited machines. As a wild guess, limiting to machines with HP PCI SSID should suffice. This patch should be backported to kernels as old as 3.12, that contain the commit 638298dc66ea36623dbc2757a24fc2c4ab41b016 "xhci: Fix spurious wakeups after S5 on Haswell". Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=66171 Cc: stable@vger.kernel.org Signed-off-by: Takashi Iwai Signed-off-by: Sarah Sharp Tested-by: Reported-by: Niklas Schnelle Reported-by: Giorgos Reported-by: --- drivers/usb/host/xhci-pci.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index b8dffd59eb25..73f5208714a4 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c @@ -128,7 +128,12 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) * any other sleep) on Haswell machines with LPT and LPT-LP * with the new Intel BIOS */ - xhci->quirks |= XHCI_SPURIOUS_WAKEUP; + /* Limit the quirk to only known vendors, as this triggers + * yet another BIOS bug on some other machines + * https://bugzilla.kernel.org/show_bug.cgi?id=66171 + */ + if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP) + xhci->quirks |= XHCI_SPURIOUS_WAKEUP; } if (pdev->vendor == PCI_VENDOR_ID_ETRON && pdev->device == PCI_DEVICE_ID_ASROCK_P67) { From 2f5ae4901acc6fdee939718658fa9f59c4f2217b Mon Sep 17 00:00:00 2001 From: Russell King Date: Sun, 27 Oct 2013 15:26:33 +0000 Subject: [PATCH 075/312] DRM: Armada: implement lastclose() for fbhelper Call drm_fb_helper_restore_fbdev_mode() upon last close so that in the event of the X server crashing, we have some kind of mode restored. Reviewed-by: Rob Clark Reviewed-by: Thierry Reding Signed-off-by: Russell King --- drivers/gpu/drm/armada/armada_drm.h | 1 + drivers/gpu/drm/armada/armada_drv.c | 7 ++++++- drivers/gpu/drm/armada/armada_fbdev.c | 10 ++++++++++ 3 files changed, 17 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/armada/armada_drm.h b/drivers/gpu/drm/armada/armada_drm.h index eef09ec9a5ff..a72cae03b99b 100644 --- a/drivers/gpu/drm/armada/armada_drm.h +++ b/drivers/gpu/drm/armada/armada_drm.h @@ -103,6 +103,7 @@ void armada_drm_queue_unref_work(struct drm_device *, extern const struct drm_mode_config_funcs armada_drm_mode_config_funcs; int armada_fbdev_init(struct drm_device *); +void armada_fbdev_lastclose(struct drm_device *); void armada_fbdev_fini(struct drm_device *); int armada_overlay_plane_create(struct drm_device *, unsigned long); diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c index 7aede900a221..15425d23c9cc 100644 --- a/drivers/gpu/drm/armada/armada_drv.c +++ b/drivers/gpu/drm/armada/armada_drv.c @@ -321,6 +321,11 @@ static struct drm_ioctl_desc armada_ioctls[] = { DRM_UNLOCKED), }; +static void armada_drm_lastclose(struct drm_device *dev) +{ + armada_fbdev_lastclose(dev); +} + static const struct file_operations armada_drm_fops = { .owner = THIS_MODULE, .llseek = no_llseek, @@ -337,7 +342,7 @@ static struct drm_driver armada_drm_driver = { .open = NULL, .preclose = NULL, .postclose = NULL, - .lastclose = NULL, + .lastclose = armada_drm_lastclose, .unload = armada_drm_unload, .get_vblank_counter = drm_vblank_count, .enable_vblank = armada_drm_enable_vblank, diff --git a/drivers/gpu/drm/armada/armada_fbdev.c b/drivers/gpu/drm/armada/armada_fbdev.c index dd5ea77dac96..743570e99f90 100644 --- a/drivers/gpu/drm/armada/armada_fbdev.c +++ b/drivers/gpu/drm/armada/armada_fbdev.c @@ -177,6 +177,16 @@ int armada_fbdev_init(struct drm_device *dev) return ret; } +void armada_fbdev_lastclose(struct drm_device *dev) +{ + struct armada_private *priv = dev->dev_private; + + drm_modeset_lock_all(dev); + if (priv->fbdev) + drm_fb_helper_restore_fbdev_mode(priv->fbdev); + drm_modeset_unlock_all(dev); +} + void armada_fbdev_fini(struct drm_device *dev) { struct armada_private *priv = dev->dev_private; From 077acbab67a785b5f5ae60e8932fcf6028072359 Mon Sep 17 00:00:00 2001 From: Russell King Date: Sun, 27 Oct 2013 15:35:27 +0000 Subject: [PATCH 076/312] DRM: Armada: destroy framebuffer after helper Destroy the framebuffer only after the helper, since the helper may still be referencing the framebufer at this point. Reviewed-by: Rob Clark Reviewed-by: Thierry Reding Signed-off-by: Russell King --- drivers/gpu/drm/armada/armada_fbdev.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/armada/armada_fbdev.c b/drivers/gpu/drm/armada/armada_fbdev.c index 743570e99f90..b348b16f81e2 100644 --- a/drivers/gpu/drm/armada/armada_fbdev.c +++ b/drivers/gpu/drm/armada/armada_fbdev.c @@ -202,11 +202,11 @@ void armada_fbdev_fini(struct drm_device *dev) framebuffer_release(info); } + drm_fb_helper_fini(fbh); + if (fbh->fb) fbh->fb->funcs->destroy(fbh->fb); - drm_fb_helper_fini(fbh); - priv->fbdev = NULL; } } From 7513e09596374bb7fbbecfee945fecb5b357c0e9 Mon Sep 17 00:00:00 2001 From: Russell King Date: Wed, 27 Nov 2013 15:46:55 +0000 Subject: [PATCH 077/312] DRM: Armada: fix printing of phys_addr_t/dma_addr_t These can be 64-bit quantities, so fix them up appropriately. Reviewed-by: Rob Clark Reviewed-by: Thierry Reding Signed-off-by: Russell King --- drivers/gpu/drm/armada/armada_fbdev.c | 6 +++--- drivers/gpu/drm/armada/armada_gem.c | 5 +++-- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/armada/armada_fbdev.c b/drivers/gpu/drm/armada/armada_fbdev.c index b348b16f81e2..948cb14c561e 100644 --- a/drivers/gpu/drm/armada/armada_fbdev.c +++ b/drivers/gpu/drm/armada/armada_fbdev.c @@ -105,9 +105,9 @@ static int armada_fb_create(struct drm_fb_helper *fbh, drm_fb_helper_fill_fix(info, dfb->fb.pitches[0], dfb->fb.depth); drm_fb_helper_fill_var(info, fbh, sizes->fb_width, sizes->fb_height); - DRM_DEBUG_KMS("allocated %dx%d %dbpp fb: 0x%08x\n", - dfb->fb.width, dfb->fb.height, - dfb->fb.bits_per_pixel, obj->phys_addr); + DRM_DEBUG_KMS("allocated %dx%d %dbpp fb: 0x%08llx\n", + dfb->fb.width, dfb->fb.height, dfb->fb.bits_per_pixel, + (unsigned long long)obj->phys_addr); return 0; diff --git a/drivers/gpu/drm/armada/armada_gem.c b/drivers/gpu/drm/armada/armada_gem.c index 9f2356bae7fd..adc7c2da75bb 100644 --- a/drivers/gpu/drm/armada/armada_gem.c +++ b/drivers/gpu/drm/armada/armada_gem.c @@ -172,8 +172,9 @@ armada_gem_linear_back(struct drm_device *dev, struct armada_gem_object *obj) obj->dev_addr = obj->linear->start; } - DRM_DEBUG_DRIVER("obj %p phys %#x dev %#x\n", - obj, obj->phys_addr, obj->dev_addr); + DRM_DEBUG_DRIVER("obj %p phys %#llx dev %#llx\n", obj, + (unsigned long long)obj->phys_addr, + (unsigned long long)obj->dev_addr); return 0; } From 5cd5268806435b6476184430b2806db988c903f0 Mon Sep 17 00:00:00 2001 From: Russell King Date: Sat, 7 Dec 2013 16:28:39 +0000 Subject: [PATCH 078/312] DRM: Armada: prime refcounting bug fix Commit 011c2282c74d changed the way refcounting on imported dma_bufs works, and this hadn't been spotted while forward-porting Armada. Reflect the changes in that commit into the Armada driver. Reviewed-by: Rob Clark Reviewed-by: Thierry Reding Signed-off-by: Russell King --- drivers/gpu/drm/armada/armada_gem.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/armada/armada_gem.c b/drivers/gpu/drm/armada/armada_gem.c index adc7c2da75bb..887816f43476 100644 --- a/drivers/gpu/drm/armada/armada_gem.c +++ b/drivers/gpu/drm/armada/armada_gem.c @@ -558,7 +558,6 @@ armada_gem_prime_import(struct drm_device *dev, struct dma_buf *buf) * refcount on the gem object itself. */ drm_gem_object_reference(obj); - dma_buf_put(buf); return obj; } } @@ -574,6 +573,7 @@ armada_gem_prime_import(struct drm_device *dev, struct dma_buf *buf) } dobj->obj.import_attach = attach; + get_dma_buf(buf); /* * Don't call dma_buf_map_attachment() here - it maps the From c1b1731d2002b93857810aa5da856b43f46bb470 Mon Sep 17 00:00:00 2001 From: Sachin Kamat Date: Fri, 6 Dec 2013 17:51:18 +0530 Subject: [PATCH 079/312] drivers: phy: Fix memory leak 'phy' was not being freed upon error in one of the cases. Adjust the 'goto's to fix this. Signed-off-by: Sachin Kamat Signed-off-by: Kishon Vijay Abraham I Signed-off-by: Greg Kroah-Hartman --- drivers/phy/phy-core.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c index 03cf8fb81554..712b358a5756 100644 --- a/drivers/phy/phy-core.c +++ b/drivers/phy/phy-core.c @@ -453,7 +453,7 @@ struct phy *phy_create(struct device *dev, const struct phy_ops *ops, if (id < 0) { dev_err(dev, "unable to get id\n"); ret = id; - goto err0; + goto err1; } device_initialize(&phy->dev); @@ -468,11 +468,11 @@ struct phy *phy_create(struct device *dev, const struct phy_ops *ops, ret = dev_set_name(&phy->dev, "phy-%s.%d", dev_name(dev), id); if (ret) - goto err1; + goto err2; ret = device_add(&phy->dev); if (ret) - goto err1; + goto err2; if (pm_runtime_enabled(dev)) { pm_runtime_enable(&phy->dev); @@ -481,11 +481,11 @@ struct phy *phy_create(struct device *dev, const struct phy_ops *ops, return phy; -err1: +err2: ida_remove(&phy_ida, phy->id); put_device(&phy->dev); +err1: kfree(phy); - err0: return ERR_PTR(ret); } From 52797d293287f5a98bd686616527b102b077ac39 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Fri, 6 Dec 2013 17:51:19 +0530 Subject: [PATCH 080/312] drivers: phy: tweaks to phy_create() If this was called with a NULL "dev" then it lead to a NULL dereference when we called dev_WARN(). I have changed it to WARN_ON() so that we get a stack dump and can fix the caller. The rest of this patch is just cleanup like returning directly instead of having do-nothing gotos. Using descriptive labels instead of GW-BASIC style "err0" and "err1". I also flipped the order of put_device() and ida_remove() so they are a mirror reflection of the order they were allocated. Signed-off-by: Dan Carpenter Signed-off-by: Kishon Vijay Abraham I Signed-off-by: Greg Kroah-Hartman --- drivers/phy/phy-core.c | 26 ++++++++++---------------- 1 file changed, 10 insertions(+), 16 deletions(-) diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c index 712b358a5756..58e0e9739028 100644 --- a/drivers/phy/phy-core.c +++ b/drivers/phy/phy-core.c @@ -437,23 +437,18 @@ struct phy *phy_create(struct device *dev, const struct phy_ops *ops, int id; struct phy *phy; - if (!dev) { - dev_WARN(dev, "no device provided for PHY\n"); - ret = -EINVAL; - goto err0; - } + if (WARN_ON(!dev)) + return ERR_PTR(-EINVAL); phy = kzalloc(sizeof(*phy), GFP_KERNEL); - if (!phy) { - ret = -ENOMEM; - goto err0; - } + if (!phy) + return ERR_PTR(-ENOMEM); id = ida_simple_get(&phy_ida, 0, 0, GFP_KERNEL); if (id < 0) { dev_err(dev, "unable to get id\n"); ret = id; - goto err1; + goto free_phy; } device_initialize(&phy->dev); @@ -468,11 +463,11 @@ struct phy *phy_create(struct device *dev, const struct phy_ops *ops, ret = dev_set_name(&phy->dev, "phy-%s.%d", dev_name(dev), id); if (ret) - goto err2; + goto put_dev; ret = device_add(&phy->dev); if (ret) - goto err2; + goto put_dev; if (pm_runtime_enabled(dev)) { pm_runtime_enable(&phy->dev); @@ -481,12 +476,11 @@ struct phy *phy_create(struct device *dev, const struct phy_ops *ops, return phy; -err2: - ida_remove(&phy_ida, phy->id); +put_dev: put_device(&phy->dev); -err1: + ida_remove(&phy_ida, phy->id); +free_phy: kfree(phy); -err0: return ERR_PTR(ret); } EXPORT_SYMBOL_GPL(phy_create); From 8820784203ac24279be23a6e3d85d216c46d65ee Mon Sep 17 00:00:00 2001 From: Kishon Vijay Abraham I Date: Fri, 6 Dec 2013 17:51:20 +0530 Subject: [PATCH 081/312] phy: kconfig: add depends on "USB_PHY" to OMAP_USB2 and TWL4030_USB Fixes warning: (OMAP_USB2 && TWL4030_USB) selects USB_PHY which has unmet direct dependencies (USB_SUPPORT) that shows up while disabling USB_SUPPORT from menuconfig. Reported-by: Russell King Signed-off-by: Kishon Vijay Abraham I Acked-by: Felipe Balbi Signed-off-by: Greg Kroah-Hartman --- drivers/phy/Kconfig | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig index a344f3d52361..330ef2d06567 100644 --- a/drivers/phy/Kconfig +++ b/drivers/phy/Kconfig @@ -24,8 +24,8 @@ config PHY_EXYNOS_MIPI_VIDEO config OMAP_USB2 tristate "OMAP USB2 PHY Driver" depends on ARCH_OMAP2PLUS + depends on USB_PHY select GENERIC_PHY - select USB_PHY select OMAP_CONTROL_USB help Enable this to support the transceiver that is part of SOC. This @@ -36,8 +36,8 @@ config OMAP_USB2 config TWL4030_USB tristate "TWL4030 USB Transceiver Driver" depends on TWL4030_CORE && REGULATOR_TWL4030 && USB_MUSB_OMAP2PLUS + depends on USB_PHY select GENERIC_PHY - select USB_PHY help Enable this to support the USB OTG transceiver on TWL4030 family chips (including the TWL5030 and TPS659x0 devices). From f5f972102d5c12729f0a35fce266b580aaa03f66 Mon Sep 17 00:00:00 2001 From: Scott Wood Date: Fri, 22 Nov 2013 15:52:29 -0600 Subject: [PATCH 082/312] powerpc/kvm/booke: Fix build break due to stack frame size warning Commit ce11e48b7fdd256ec68b932a89b397a790566031 ("KVM: PPC: E500: Add userspace debug stub support") added "struct thread_struct" to the stack of kvmppc_vcpu_run(). thread_struct is 1152 bytes on my build, compared to 48 bytes for the recently-introduced "struct debug_reg". Use the latter instead. This fixes the following error: cc1: warnings being treated as errors arch/powerpc/kvm/booke.c: In function 'kvmppc_vcpu_run': arch/powerpc/kvm/booke.c:760:1: error: the frame size of 1424 bytes is larger than 1024 bytes make[2]: *** [arch/powerpc/kvm/booke.o] Error 1 make[1]: *** [arch/powerpc/kvm] Error 2 make[1]: *** Waiting for unfinished jobs.... Signed-off-by: Scott Wood Signed-off-by: Alexander Graf --- arch/powerpc/include/asm/switch_to.h | 2 +- arch/powerpc/kernel/process.c | 32 ++++++++++++++-------------- arch/powerpc/kvm/booke.c | 12 +++++------ 3 files changed, 23 insertions(+), 23 deletions(-) diff --git a/arch/powerpc/include/asm/switch_to.h b/arch/powerpc/include/asm/switch_to.h index 9ee12610af02..aace90547614 100644 --- a/arch/powerpc/include/asm/switch_to.h +++ b/arch/powerpc/include/asm/switch_to.h @@ -35,7 +35,7 @@ extern void giveup_vsx(struct task_struct *); extern void enable_kernel_spe(void); extern void giveup_spe(struct task_struct *); extern void load_up_spe(struct task_struct *); -extern void switch_booke_debug_regs(struct thread_struct *new_thread); +extern void switch_booke_debug_regs(struct debug_reg *new_debug); #ifndef CONFIG_SMP extern void discard_lazy_cpu_state(void); diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 75c2d1009985..83530af028b8 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -339,7 +339,7 @@ static void set_debug_reg_defaults(struct thread_struct *thread) #endif } -static void prime_debug_regs(struct thread_struct *thread) +static void prime_debug_regs(struct debug_reg *debug) { /* * We could have inherited MSR_DE from userspace, since @@ -348,22 +348,22 @@ static void prime_debug_regs(struct thread_struct *thread) */ mtmsr(mfmsr() & ~MSR_DE); - mtspr(SPRN_IAC1, thread->debug.iac1); - mtspr(SPRN_IAC2, thread->debug.iac2); + mtspr(SPRN_IAC1, debug->iac1); + mtspr(SPRN_IAC2, debug->iac2); #if CONFIG_PPC_ADV_DEBUG_IACS > 2 - mtspr(SPRN_IAC3, thread->debug.iac3); - mtspr(SPRN_IAC4, thread->debug.iac4); + mtspr(SPRN_IAC3, debug->iac3); + mtspr(SPRN_IAC4, debug->iac4); #endif - mtspr(SPRN_DAC1, thread->debug.dac1); - mtspr(SPRN_DAC2, thread->debug.dac2); + mtspr(SPRN_DAC1, debug->dac1); + mtspr(SPRN_DAC2, debug->dac2); #if CONFIG_PPC_ADV_DEBUG_DVCS > 0 - mtspr(SPRN_DVC1, thread->debug.dvc1); - mtspr(SPRN_DVC2, thread->debug.dvc2); + mtspr(SPRN_DVC1, debug->dvc1); + mtspr(SPRN_DVC2, debug->dvc2); #endif - mtspr(SPRN_DBCR0, thread->debug.dbcr0); - mtspr(SPRN_DBCR1, thread->debug.dbcr1); + mtspr(SPRN_DBCR0, debug->dbcr0); + mtspr(SPRN_DBCR1, debug->dbcr1); #ifdef CONFIG_BOOKE - mtspr(SPRN_DBCR2, thread->debug.dbcr2); + mtspr(SPRN_DBCR2, debug->dbcr2); #endif } /* @@ -371,11 +371,11 @@ static void prime_debug_regs(struct thread_struct *thread) * debug registers, set the debug registers from the values * stored in the new thread. */ -void switch_booke_debug_regs(struct thread_struct *new_thread) +void switch_booke_debug_regs(struct debug_reg *new_debug) { if ((current->thread.debug.dbcr0 & DBCR0_IDM) - || (new_thread->debug.dbcr0 & DBCR0_IDM)) - prime_debug_regs(new_thread); + || (new_debug->dbcr0 & DBCR0_IDM)) + prime_debug_regs(new_debug); } EXPORT_SYMBOL_GPL(switch_booke_debug_regs); #else /* !CONFIG_PPC_ADV_DEBUG_REGS */ @@ -683,7 +683,7 @@ struct task_struct *__switch_to(struct task_struct *prev, #endif /* CONFIG_SMP */ #ifdef CONFIG_PPC_ADV_DEBUG_REGS - switch_booke_debug_regs(&new->thread); + switch_booke_debug_regs(&new->thread.debug); #else /* * For PPC_BOOK3S_64, we use the hw-breakpoint interfaces that would diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c index 53e65a210b9a..0591e05db74b 100644 --- a/arch/powerpc/kvm/booke.c +++ b/arch/powerpc/kvm/booke.c @@ -681,7 +681,7 @@ int kvmppc_core_check_requests(struct kvm_vcpu *vcpu) int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) { int ret, s; - struct thread_struct thread; + struct debug_reg debug; #ifdef CONFIG_PPC_FPU struct thread_fp_state fp; int fpexc_mode; @@ -723,9 +723,9 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) #endif /* Switch to guest debug context */ - thread.debug = vcpu->arch.shadow_dbg_reg; - switch_booke_debug_regs(&thread); - thread.debug = current->thread.debug; + debug = vcpu->arch.shadow_dbg_reg; + switch_booke_debug_regs(&debug); + debug = current->thread.debug; current->thread.debug = vcpu->arch.shadow_dbg_reg; kvmppc_fix_ee_before_entry(); @@ -736,8 +736,8 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) We also get here with interrupts enabled. */ /* Switch back to user space debug context */ - switch_booke_debug_regs(&thread); - current->thread.debug = thread.debug; + switch_booke_debug_regs(&debug); + current->thread.debug = debug; #ifdef CONFIG_PPC_FPU kvmppc_save_guest_fp(vcpu); From f01b3926ee645974f549f4a6921268142047717c Mon Sep 17 00:00:00 2001 From: Patrick McHardy Date: Sun, 8 Dec 2013 16:52:31 +0000 Subject: [PATCH 083/312] netfilter: SYNPROXY target: restrict to INPUT/FORWARD Fix a crash in synproxy_send_tcp() when using the SYNPROXY target in the PREROUTING chain caused by missing routing information. Reported-by: Nicki P. Signed-off-by: Patrick McHardy Signed-off-by: Pablo Neira Ayuso --- net/ipv4/netfilter/ipt_SYNPROXY.c | 1 + net/ipv6/netfilter/ip6t_SYNPROXY.c | 1 + 2 files changed, 2 insertions(+) diff --git a/net/ipv4/netfilter/ipt_SYNPROXY.c b/net/ipv4/netfilter/ipt_SYNPROXY.c index f13bd91d9a56..a313c3fbeb46 100644 --- a/net/ipv4/netfilter/ipt_SYNPROXY.c +++ b/net/ipv4/netfilter/ipt_SYNPROXY.c @@ -423,6 +423,7 @@ static void synproxy_tg4_destroy(const struct xt_tgdtor_param *par) static struct xt_target synproxy_tg4_reg __read_mostly = { .name = "SYNPROXY", .family = NFPROTO_IPV4, + .hooks = (1 << NF_INET_LOCAL_IN) | (1 << NF_INET_FORWARD), .target = synproxy_tg4, .targetsize = sizeof(struct xt_synproxy_info), .checkentry = synproxy_tg4_check, diff --git a/net/ipv6/netfilter/ip6t_SYNPROXY.c b/net/ipv6/netfilter/ip6t_SYNPROXY.c index f78f41aca8e9..a0d17270117c 100644 --- a/net/ipv6/netfilter/ip6t_SYNPROXY.c +++ b/net/ipv6/netfilter/ip6t_SYNPROXY.c @@ -446,6 +446,7 @@ static void synproxy_tg6_destroy(const struct xt_tgdtor_param *par) static struct xt_target synproxy_tg6_reg __read_mostly = { .name = "SYNPROXY", .family = NFPROTO_IPV6, + .hooks = (1 << NF_INET_LOCAL_IN) | (1 << NF_INET_FORWARD), .target = synproxy_tg6, .targetsize = sizeof(struct xt_synproxy_info), .checkentry = synproxy_tg6_check, From 6c719faca2aceca72f1bf5b1645c1734ed3e9447 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Tue, 10 Dec 2013 13:20:59 +0100 Subject: [PATCH 084/312] drm/i915: don't update the dri1 breadcrumb with modesetting The update is horribly racy since it doesn't protect at all against concurrent closing of the master fd. And it can't really since that requires us to grab a mutex. Instead of jumping through hoops and offloading this to a worker thread just block this bit of code for the modesetting driver. Note that the race is fairly easy to hit since we call the breadcrumb function for any interrupt. So the vblank interrupt (which usually keeps going for a bit) is enough. But even if we'd block this and only update the breadcrumb for user interrupts from the CS we could hit this race with kms/gem userspace: If a non-master is waiting somewhere (and hence has interrupts enabled) and the master closes its fd (probably due to crashing). v2: Add a code comment to explain why fixing this for real isn't really worth it. Also improve the commit message a bit. v3: Fix the spelling in the comment. Reported-by: Eugene Shatokhin Cc: Eugene Shatokhin Cc: stable@vger.kernel.org Acked-by: Chris Wilson Tested-by: Eugene Shatokhin Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_dma.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 197db0993a24..5c648425c1e0 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -83,6 +83,14 @@ void i915_update_dri1_breadcrumb(struct drm_device *dev) drm_i915_private_t *dev_priv = dev->dev_private; struct drm_i915_master_private *master_priv; + /* + * The dri breadcrumb update races against the drm master disappearing. + * Instead of trying to fix this (this is by far not the only ums issue) + * just don't do the update in kms mode. + */ + if (drm_core_check_feature(dev, DRIVER_MODESET)) + return; + if (dev->primary->master) { master_priv = dev->primary->master->driver_priv; if (master_priv->sarea_priv) From 8e8339a3a1069141985daaa2521ba304509ddecd Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 11 Dec 2013 11:09:53 +0100 Subject: [PATCH 085/312] sched: Initialize power_orig for overlapping groups Yinghai reported that he saw a /0 in sg_capacity on his EX parts. Make sure to always initialize power_orig now that we actually use it. Ideally build_sched_domains() -> init_sched_groups_power() would also initialize this; but for some yet unexplained reason some setups seem to miss updates there. Reported-by: Yinghai Lu Tested-by: Yinghai Lu Signed-off-by: Peter Zijlstra Link: http://lkml.kernel.org/n/tip-l8ng2m9uml6fhibln8wqpom7@git.kernel.org Signed-off-by: Ingo Molnar --- kernel/sched/core.c | 1 + 1 file changed, 1 insertion(+) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index e85cda20ab2b..19af58f3a261 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -5112,6 +5112,7 @@ build_overlap_sched_groups(struct sched_domain *sd, int cpu) * die on a /0 trap. */ sg->sgp->power = SCHED_POWER_SCALE * cpumask_weight(sg_span); + sg->sgp->power_orig = sg->sgp->power; /* * Make sure the first group of this domain contains the From ba1f14fbe70965ae0fb1655a5275a62723f65b77 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 28 Nov 2013 14:26:41 +0100 Subject: [PATCH 086/312] sched: Remove PREEMPT_NEED_RESCHED from generic code While hunting a preemption issue with Alexander, Ben noticed that the currently generic PREEMPT_NEED_RESCHED stuff is horribly broken for load-store architectures. We currently rely on the IPI to fold TIF_NEED_RESCHED into PREEMPT_NEED_RESCHED, but when this IPI lands while we already have a load for the preempt-count but before the store, the store will erase the PREEMPT_NEED_RESCHED change. The current preempt-count only works on load-store archs because interrupts are assumed to be completely balanced wrt their preempt_count fiddling; the previous preempt_count load will match the preempt_count state after the interrupt and therefore nothing gets lost. This patch removes the PREEMPT_NEED_RESCHED usage from generic code and pushes it into x86 arch code; the generic code goes back to relying on TIF_NEED_RESCHED. Boot tested on x86_64 and compile tested on ppc64. Reported-by: Benjamin Herrenschmidt Reported-and-Tested-by: Alexander Graf Signed-off-by: Peter Zijlstra Cc: Linus Torvalds Link: http://lkml.kernel.org/r/20131128132641.GP10022@twins.programming.kicks-ass.net Signed-off-by: Ingo Molnar --- arch/x86/include/asm/preempt.h | 11 +++++++++++ include/asm-generic/preempt.h | 35 +++++++++++----------------------- include/linux/sched.h | 2 -- 3 files changed, 22 insertions(+), 26 deletions(-) diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h index 8729723636fd..c8b051933b1b 100644 --- a/arch/x86/include/asm/preempt.h +++ b/arch/x86/include/asm/preempt.h @@ -7,6 +7,12 @@ DECLARE_PER_CPU(int, __preempt_count); +/* + * We use the PREEMPT_NEED_RESCHED bit as an inverted NEED_RESCHED such + * that a decrement hitting 0 means we can and should reschedule. + */ +#define PREEMPT_ENABLED (0 + PREEMPT_NEED_RESCHED) + /* * We mask the PREEMPT_NEED_RESCHED bit so as not to confuse all current users * that think a non-zero value indicates we cannot preempt. @@ -74,6 +80,11 @@ static __always_inline void __preempt_count_sub(int val) __this_cpu_add_4(__preempt_count, -val); } +/* + * Because we keep PREEMPT_NEED_RESCHED set when we do _not_ need to reschedule + * a decrement which hits zero means we have no preempt_count and should + * reschedule. + */ static __always_inline bool __preempt_count_dec_and_test(void) { GEN_UNARY_RMWcc("decl", __preempt_count, __percpu_arg(0), "e"); diff --git a/include/asm-generic/preempt.h b/include/asm-generic/preempt.h index ddf2b420ac8f..1cd3f5d767a8 100644 --- a/include/asm-generic/preempt.h +++ b/include/asm-generic/preempt.h @@ -3,13 +3,11 @@ #include -/* - * We mask the PREEMPT_NEED_RESCHED bit so as not to confuse all current users - * that think a non-zero value indicates we cannot preempt. - */ +#define PREEMPT_ENABLED (0) + static __always_inline int preempt_count(void) { - return current_thread_info()->preempt_count & ~PREEMPT_NEED_RESCHED; + return current_thread_info()->preempt_count; } static __always_inline int *preempt_count_ptr(void) @@ -17,11 +15,6 @@ static __always_inline int *preempt_count_ptr(void) return ¤t_thread_info()->preempt_count; } -/* - * We now loose PREEMPT_NEED_RESCHED and cause an extra reschedule; however the - * alternative is loosing a reschedule. Better schedule too often -- also this - * should be a very rare operation. - */ static __always_inline void preempt_count_set(int pc) { *preempt_count_ptr() = pc; @@ -41,28 +34,17 @@ static __always_inline void preempt_count_set(int pc) task_thread_info(p)->preempt_count = PREEMPT_ENABLED; \ } while (0) -/* - * We fold the NEED_RESCHED bit into the preempt count such that - * preempt_enable() can decrement and test for needing to reschedule with a - * single instruction. - * - * We invert the actual bit, so that when the decrement hits 0 we know we both - * need to resched (the bit is cleared) and can resched (no preempt count). - */ - static __always_inline void set_preempt_need_resched(void) { - *preempt_count_ptr() &= ~PREEMPT_NEED_RESCHED; } static __always_inline void clear_preempt_need_resched(void) { - *preempt_count_ptr() |= PREEMPT_NEED_RESCHED; } static __always_inline bool test_preempt_need_resched(void) { - return !(*preempt_count_ptr() & PREEMPT_NEED_RESCHED); + return false; } /* @@ -81,7 +63,12 @@ static __always_inline void __preempt_count_sub(int val) static __always_inline bool __preempt_count_dec_and_test(void) { - return !--*preempt_count_ptr(); + /* + * Because of load-store architectures cannot do per-cpu atomic + * operations; we cannot use PREEMPT_NEED_RESCHED because it might get + * lost. + */ + return !--*preempt_count_ptr() && tif_need_resched(); } /* @@ -89,7 +76,7 @@ static __always_inline bool __preempt_count_dec_and_test(void) */ static __always_inline bool should_resched(void) { - return unlikely(!*preempt_count_ptr()); + return unlikely(!preempt_count() && tif_need_resched()); } #ifdef CONFIG_PREEMPT diff --git a/include/linux/sched.h b/include/linux/sched.h index 768b037dfacb..96d674ba3876 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -440,8 +440,6 @@ struct task_cputime { .sum_exec_runtime = 0, \ } -#define PREEMPT_ENABLED (PREEMPT_NEED_RESCHED) - #ifdef CONFIG_PREEMPT_COUNT #define PREEMPT_DISABLED (1 + PREEMPT_ENABLED) #else From be5e610c0fd6ef772cafb9e0bd4128134804aef3 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 18 Nov 2013 18:27:06 +0100 Subject: [PATCH 087/312] math64: Add mul_u64_u32_shr() Introduce mul_u64_u32_shr() as proposed by Andy a while back; it allows using 64x64->128 muls on 64bit archs and recent GCC which defines __SIZEOF_INT128__ and __int128. (This new method will be used by the scheduler.) Signed-off-by: Peter Zijlstra Cc: fweisbec@gmail.com Cc: Andy Lutomirski Cc: Linus Torvalds Link: http://lkml.kernel.org/n/tip-hxjoeuzmrcaumR0uZwjpe2pv@git.kernel.org Signed-off-by: Ingo Molnar --- arch/x86/Kconfig | 1 + include/linux/math64.h | 30 ++++++++++++++++++++++++++++++ init/Kconfig | 6 ++++++ 3 files changed, 37 insertions(+) diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index e903c71f7e69..0952ecd60eca 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -26,6 +26,7 @@ config X86 select HAVE_AOUT if X86_32 select HAVE_UNSTABLE_SCHED_CLOCK select ARCH_SUPPORTS_NUMA_BALANCING + select ARCH_SUPPORTS_INT128 if X86_64 select ARCH_WANTS_PROT_NUMA_PROT_NONE select HAVE_IDE select HAVE_OPROFILE diff --git a/include/linux/math64.h b/include/linux/math64.h index 69ed5f5e9f6e..c45c089bfdac 100644 --- a/include/linux/math64.h +++ b/include/linux/math64.h @@ -133,4 +133,34 @@ __iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder) return ret; } +#if defined(CONFIG_ARCH_SUPPORTS_INT128) && defined(__SIZEOF_INT128__) + +#ifndef mul_u64_u32_shr +static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift) +{ + return (u64)(((unsigned __int128)a * mul) >> shift); +} +#endif /* mul_u64_u32_shr */ + +#else + +#ifndef mul_u64_u32_shr +static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift) +{ + u32 ah, al; + u64 ret; + + al = a; + ah = a >> 32; + + ret = ((u64)al * mul) >> shift; + if (ah) + ret += ((u64)ah * mul) << (32 - shift); + + return ret; +} +#endif /* mul_u64_u32_shr */ + +#endif + #endif /* _LINUX_MATH64_H */ diff --git a/init/Kconfig b/init/Kconfig index 79383d3aa5dc..4e5d96ab2034 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -809,6 +809,12 @@ config GENERIC_SCHED_CLOCK config ARCH_SUPPORTS_NUMA_BALANCING bool +# +# For architectures that know their GCC __int128 support is sound +# +config ARCH_SUPPORTS_INT128 + bool + # For architectures that (ab)use NUMA to represent different memory regions # all cpu-local but of different latencies, such as SuperH. # From 9dbdb155532395ba000c5d5d187658b0e17e529f Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 18 Nov 2013 18:27:06 +0100 Subject: [PATCH 088/312] sched/fair: Rework sched_fair time accounting Christian suffers from a bad BIOS that wrecks his i5's TSC sync. This results in him occasionally seeing time going backwards - which crashes the scheduler ... Most of our time accounting can actually handle that except the most common one; the tick time update of sched_fair. There is a further problem with that code; previously we assumed that because we get a tick every TICK_NSEC our time delta could never exceed 32bits and math was simpler. However, ever since Frederic managed to get NO_HZ_FULL merged; this is no longer the case since now a task can run for a long time indeed without getting a tick. It only takes about ~4.2 seconds to overflow our u32 in nanoseconds. This means we not only need to better deal with time going backwards; but also means we need to be able to deal with large deltas. This patch reworks the entire code and uses mul_u64_u32_shr() as proposed by Andy a long while ago. We express our virtual time scale factor in a u32 multiplier and shift right and the 32bit mul_u64_u32_shr() implementation reduces to a single 32x32->64 multiply if the time delta is still short (common case). For 64bit a 64x64->128 multiply can be used if ARCH_SUPPORTS_INT128. Reported-and-Tested-by: Christian Engelmayer Signed-off-by: Peter Zijlstra Cc: fweisbec@gmail.com Cc: Paul Turner Cc: Stanislaw Gruszka Cc: Andy Lutomirski Cc: Linus Torvalds Cc: Andrew Morton Link: http://lkml.kernel.org/r/20131118172706.GI3866@twins.programming.kicks-ass.net Signed-off-by: Ingo Molnar --- include/linux/sched.h | 3 +- kernel/sched/fair.c | 146 +++++++++++++++++++----------------------- 2 files changed, 67 insertions(+), 82 deletions(-) diff --git a/include/linux/sched.h b/include/linux/sched.h index 96d674ba3876..53f97eb8dbc7 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -930,7 +930,8 @@ struct pipe_inode_info; struct uts_namespace; struct load_weight { - unsigned long weight, inv_weight; + unsigned long weight; + u32 inv_weight; }; struct sched_avg { diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index fd773ade1a31..9030da7bcb15 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -178,59 +178,61 @@ void sched_init_granularity(void) update_sysctl(); } -#if BITS_PER_LONG == 32 -# define WMULT_CONST (~0UL) -#else -# define WMULT_CONST (1UL << 32) -#endif - +#define WMULT_CONST (~0U) #define WMULT_SHIFT 32 -/* - * Shift right and round: - */ -#define SRR(x, y) (((x) + (1UL << ((y) - 1))) >> (y)) - -/* - * delta *= weight / lw - */ -static unsigned long -calc_delta_mine(unsigned long delta_exec, unsigned long weight, - struct load_weight *lw) +static void __update_inv_weight(struct load_weight *lw) { - u64 tmp; + unsigned long w; - /* - * weight can be less than 2^SCHED_LOAD_RESOLUTION for task group sched - * entities since MIN_SHARES = 2. Treat weight as 1 if less than - * 2^SCHED_LOAD_RESOLUTION. - */ - if (likely(weight > (1UL << SCHED_LOAD_RESOLUTION))) - tmp = (u64)delta_exec * scale_load_down(weight); + if (likely(lw->inv_weight)) + return; + + w = scale_load_down(lw->weight); + + if (BITS_PER_LONG > 32 && unlikely(w >= WMULT_CONST)) + lw->inv_weight = 1; + else if (unlikely(!w)) + lw->inv_weight = WMULT_CONST; else - tmp = (u64)delta_exec; + lw->inv_weight = WMULT_CONST / w; +} - if (!lw->inv_weight) { - unsigned long w = scale_load_down(lw->weight); +/* + * delta_exec * weight / lw.weight + * OR + * (delta_exec * (weight * lw->inv_weight)) >> WMULT_SHIFT + * + * Either weight := NICE_0_LOAD and lw \e prio_to_wmult[], in which case + * we're guaranteed shift stays positive because inv_weight is guaranteed to + * fit 32 bits, and NICE_0_LOAD gives another 10 bits; therefore shift >= 22. + * + * Or, weight =< lw.weight (because lw.weight is the runqueue weight), thus + * weight/lw.weight <= 1, and therefore our shift will also be positive. + */ +static u64 __calc_delta(u64 delta_exec, unsigned long weight, struct load_weight *lw) +{ + u64 fact = scale_load_down(weight); + int shift = WMULT_SHIFT; - if (BITS_PER_LONG > 32 && unlikely(w >= WMULT_CONST)) - lw->inv_weight = 1; - else if (unlikely(!w)) - lw->inv_weight = WMULT_CONST; - else - lw->inv_weight = WMULT_CONST / w; + __update_inv_weight(lw); + + if (unlikely(fact >> 32)) { + while (fact >> 32) { + fact >>= 1; + shift--; + } } - /* - * Check whether we'd overflow the 64-bit multiplication: - */ - if (unlikely(tmp > WMULT_CONST)) - tmp = SRR(SRR(tmp, WMULT_SHIFT/2) * lw->inv_weight, - WMULT_SHIFT/2); - else - tmp = SRR(tmp * lw->inv_weight, WMULT_SHIFT); + /* hint to use a 32x32->64 mul */ + fact = (u64)(u32)fact * lw->inv_weight; - return (unsigned long)min(tmp, (u64)(unsigned long)LONG_MAX); + while (fact >> 32) { + fact >>= 1; + shift--; + } + + return mul_u64_u32_shr(delta_exec, fact, shift); } @@ -443,7 +445,7 @@ find_matching_se(struct sched_entity **se, struct sched_entity **pse) #endif /* CONFIG_FAIR_GROUP_SCHED */ static __always_inline -void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, unsigned long delta_exec); +void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec); /************************************************************** * Scheduling class tree data structure manipulation methods: @@ -612,11 +614,10 @@ int sched_proc_update_handler(struct ctl_table *table, int write, /* * delta /= w */ -static inline unsigned long -calc_delta_fair(unsigned long delta, struct sched_entity *se) +static inline u64 calc_delta_fair(u64 delta, struct sched_entity *se) { if (unlikely(se->load.weight != NICE_0_LOAD)) - delta = calc_delta_mine(delta, NICE_0_LOAD, &se->load); + delta = __calc_delta(delta, NICE_0_LOAD, &se->load); return delta; } @@ -665,7 +666,7 @@ static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se) update_load_add(&lw, se->load.weight); load = &lw; } - slice = calc_delta_mine(slice, se->load.weight, load); + slice = __calc_delta(slice, se->load.weight, load); } return slice; } @@ -703,47 +704,32 @@ void init_task_runnable_average(struct task_struct *p) #endif /* - * Update the current task's runtime statistics. Skip current tasks that - * are not in our scheduling class. + * Update the current task's runtime statistics. */ -static inline void -__update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr, - unsigned long delta_exec) -{ - unsigned long delta_exec_weighted; - - schedstat_set(curr->statistics.exec_max, - max((u64)delta_exec, curr->statistics.exec_max)); - - curr->sum_exec_runtime += delta_exec; - schedstat_add(cfs_rq, exec_clock, delta_exec); - delta_exec_weighted = calc_delta_fair(delta_exec, curr); - - curr->vruntime += delta_exec_weighted; - update_min_vruntime(cfs_rq); -} - static void update_curr(struct cfs_rq *cfs_rq) { struct sched_entity *curr = cfs_rq->curr; u64 now = rq_clock_task(rq_of(cfs_rq)); - unsigned long delta_exec; + u64 delta_exec; if (unlikely(!curr)) return; - /* - * Get the amount of time the current task was running - * since the last time we changed load (this cannot - * overflow on 32 bits): - */ - delta_exec = (unsigned long)(now - curr->exec_start); - if (!delta_exec) + delta_exec = now - curr->exec_start; + if (unlikely((s64)delta_exec <= 0)) return; - __update_curr(cfs_rq, curr, delta_exec); curr->exec_start = now; + schedstat_set(curr->statistics.exec_max, + max(delta_exec, curr->statistics.exec_max)); + + curr->sum_exec_runtime += delta_exec; + schedstat_add(cfs_rq, exec_clock, delta_exec); + + curr->vruntime += calc_delta_fair(delta_exec, curr); + update_min_vruntime(cfs_rq); + if (entity_is_task(curr)) { struct task_struct *curtask = task_of(curr); @@ -3015,8 +3001,7 @@ static void expire_cfs_rq_runtime(struct cfs_rq *cfs_rq) } } -static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, - unsigned long delta_exec) +static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) { /* dock delta_exec before expiring quota (as it could span periods) */ cfs_rq->runtime_remaining -= delta_exec; @@ -3034,7 +3019,7 @@ static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, } static __always_inline -void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, unsigned long delta_exec) +void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) { if (!cfs_bandwidth_used() || !cfs_rq->runtime_enabled) return; @@ -3574,8 +3559,7 @@ static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq) return rq_clock_task(rq_of(cfs_rq)); } -static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, - unsigned long delta_exec) {} +static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) {} static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq) {} static void check_enqueue_throttle(struct cfs_rq *cfs_rq) {} static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) {} From 02ab71cdae248533620abefa1d46097581457110 Mon Sep 17 00:00:00 2001 From: Stefano Stabellini Date: Mon, 9 Dec 2013 15:55:11 +0000 Subject: [PATCH 089/312] xen/arm64: do not call the swiotlb functions twice On arm64 the dma_map_ops implementation is based on the swiotlb. swiotlb-xen, used by default in dom0 on Xen, is also based on the swiotlb. Avoid calling into the default arm64 dma_map_ops functions from xen_dma_map_page, xen_dma_unmap_page, xen_dma_sync_single_for_cpu, and xen_dma_sync_single_for_device otherwise we end up calling into the swiotlb twice. When arm64 gets a non-swiotlb based implementation of dma_map_ops, we'll probably have to reintroduce dma_map_ops calls in page-coherent.h. Signed-off-by: Stefano Stabellini CC: catalin.marinas@arm.com CC: Will.Deacon@arm.com CC: Ian.Campbell@citrix.com --- arch/arm64/include/asm/xen/page-coherent.h | 4 ---- 1 file changed, 4 deletions(-) diff --git a/arch/arm64/include/asm/xen/page-coherent.h b/arch/arm64/include/asm/xen/page-coherent.h index 2820f1a6eebe..dde3fc9c49f0 100644 --- a/arch/arm64/include/asm/xen/page-coherent.h +++ b/arch/arm64/include/asm/xen/page-coherent.h @@ -23,25 +23,21 @@ static inline void xen_dma_map_page(struct device *hwdev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs) { - __generic_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs); } static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs) { - __generic_dma_ops(hwdev)->unmap_page(hwdev, handle, size, dir, attrs); } static inline void xen_dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t handle, size_t size, enum dma_data_direction dir) { - __generic_dma_ops(hwdev)->sync_single_for_cpu(hwdev, handle, size, dir); } static inline void xen_dma_sync_single_for_device(struct device *hwdev, dma_addr_t handle, size_t size, enum dma_data_direction dir) { - __generic_dma_ops(hwdev)->sync_single_for_device(hwdev, handle, size, dir); } #endif /* _ASM_ARM64_XEN_PAGE_COHERENT_H */ From a7892f32cc3534d4cc0e64b245fbf47a8e364652 Mon Sep 17 00:00:00 2001 From: Ian Campbell Date: Wed, 11 Dec 2013 17:02:27 +0000 Subject: [PATCH 090/312] arm: xen: foreign mapping PTEs are special. These mappings are in fact special and require special handling in privcmd, which already exists. Failure to mark the PTE as special on arm64 causes all sorts of bad PTE fun. e.g. e.g.: BUG: Bad page map in process xl pte:e0004077b33f53 pmd:4079575003 page:ffffffbce1a2f328 count:1 mapcount:-1 mapping: (null) index:0x0 page flags: 0x4000000000000014(referenced|dirty) addr:0000007fb5259000 vm_flags:040644fa anon_vma: (null) mapping:ffffffc03a6fda58 index:0 vma->vm_ops->fault: privcmd_fault+0x0/0x38 vma->vm_file->f_op->mmap: privcmd_mmap+0x0/0x2c CPU: 0 PID: 2657 Comm: xl Not tainted 3.12.0+ #102 Call trace: [] dump_backtrace+0x0/0x12c [] show_stack+0x14/0x1c [] dump_stack+0x70/0x90 [] print_bad_pte+0x12c/0x1bc [] unmap_single_vma+0x4cc/0x700 [] unmap_vmas+0x68/0xb4 [] unmap_region+0xcc/0x1d4 [] do_munmap+0x218/0x314 [] vm_munmap+0x44/0x64 [] SyS_munmap+0x24/0x34 Where unmap_single_vma contains inlined -> unmap_page_range -> zap_pud_range -> zap_pmd_range -> zap_pte_range -> print_bad_pte. Or: BUG: Bad page state in process xl pfn:4077b4d page:ffffffbce1a2f8d8 count:0 mapcount:-1 mapping: (null) index:0x0 page flags: 0x4000000000000014(referenced|dirty) Modules linked in: CPU: 0 PID: 2657 Comm: xl Tainted: G B 3.12.0+ #102 Call trace: [] dump_backtrace+0x0/0x12c [] show_stack+0x14/0x1c [] dump_stack+0x70/0x90 [] bad_page+0xc4/0x110 [] free_pages_prepare+0xd0/0xd8 [] free_hot_cold_page+0x28/0x178 [] free_hot_cold_page_list+0x38/0x60 [] release_pages+0x190/0x1dc [] unmap_region+0x15c/0x1d4 [] do_munmap+0x218/0x314 [] vm_munmap+0x44/0x64 [] SyS_munmap+0x24/0x34 x86 already gets this correct. 32-bit arm gets away with this because there is not PTE_SPECIAL bit in the PTE there and the vm_normal_page fallback path does the right thing. Signed-off-by: Ian Campbell Signed-off-by: Stefano Stabellini --- arch/arm/xen/enlighten.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c index 6a288c7be49f..85501238b425 100644 --- a/arch/arm/xen/enlighten.c +++ b/arch/arm/xen/enlighten.c @@ -96,7 +96,7 @@ static int remap_pte_fn(pte_t *ptep, pgtable_t token, unsigned long addr, struct remap_data *info = data; struct page *page = info->pages[info->index++]; unsigned long pfn = page_to_pfn(page); - pte_t pte = pfn_pte(pfn, info->prot); + pte_t pte = pte_mkspecial(pfn_pte(pfn, info->prot)); if (map_foreign_page(pfn, info->fgmfn, info->domid)) return -EFAULT; From 2306bfb208b9c403592a0513b08f2e6ad53056d5 Mon Sep 17 00:00:00 2001 From: Eric Seppanen Date: Thu, 21 Nov 2013 14:49:56 -0800 Subject: [PATCH 091/312] iscsi-target: return -EINVAL on oversized configfs parameter The iSCSI CHAP auth parameters are already copied with respect for the destination buffer size. Return -EINVAL instead of silently truncating the input. Signed-off-by: Eric Seppanen Signed-off-by: Nicholas Bellinger --- drivers/target/iscsi/iscsi_target_configfs.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c index e3318edb233d..1c0088fe9e99 100644 --- a/drivers/target/iscsi/iscsi_target_configfs.c +++ b/drivers/target/iscsi/iscsi_target_configfs.c @@ -474,7 +474,8 @@ static ssize_t __iscsi_##prefix##_store_##name( \ \ if (!capable(CAP_SYS_ADMIN)) \ return -EPERM; \ - \ + if (count >= sizeof(auth->name)) \ + return -EINVAL; \ snprintf(auth->name, sizeof(auth->name), "%s", page); \ if (!strncmp("NULL", auth->name, 4)) \ auth->naf_flags &= ~flags; \ From a51d5229d10dd3a337b674ce8603437d2996c5c3 Mon Sep 17 00:00:00 2001 From: Roland Dreier Date: Sat, 23 Nov 2013 10:35:58 -0800 Subject: [PATCH 092/312] target: Remove write-only stats fields and lock from struct se_node_acl Commit 04f3b31bff72 ("iscsi-target: Convert iscsi_session statistics to atomic_long_t") removed the updating of these fields in iscsi (the only fabric driver that ever touched these counters), and the core has no way to report or otherwise use the values. Remove the last remnants of these counters. Signed-off-by: Roland Dreier Signed-off-by: Nicholas Bellinger --- drivers/target/target_core_tpg.c | 2 -- include/target/target_core_base.h | 4 ---- 2 files changed, 6 deletions(-) diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c index f697f8baec54..f755712a9a0d 100644 --- a/drivers/target/target_core_tpg.c +++ b/drivers/target/target_core_tpg.c @@ -278,7 +278,6 @@ struct se_node_acl *core_tpg_check_initiator_node_acl( snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname); acl->se_tpg = tpg; acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX); - spin_lock_init(&acl->stats_lock); acl->dynamic_node_acl = 1; tpg->se_tpg_tfo->set_default_node_attributes(acl); @@ -406,7 +405,6 @@ struct se_node_acl *core_tpg_add_initiator_node_acl( snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname); acl->se_tpg = tpg; acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX); - spin_lock_init(&acl->stats_lock); tpg->se_tpg_tfo->set_default_node_attributes(acl); diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index 45412a6afa69..9f1dda659c5a 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h @@ -517,10 +517,6 @@ struct se_node_acl { u32 acl_index; #define MAX_ACL_TAG_SIZE 64 char acl_tag[MAX_ACL_TAG_SIZE]; - u64 num_cmds; - u64 read_bytes; - u64 write_bytes; - spinlock_t stats_lock; /* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */ atomic_t acl_pr_ref_count; struct se_dev_entry **device_list; From 4454b66cb67f14c33cd70ddcf0ff4985b26324b7 Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Mon, 25 Nov 2013 14:53:57 -0800 Subject: [PATCH 093/312] iscsi-target: Fix-up all zero data-length CDBs with R/W_BIT set This patch changes special case handling for ISCSI_OP_SCSI_CMD where an initiator sends a zero length Expected Data Transfer Length (EDTL), but still sets the WRITE and/or READ flag bits when no payload transfer is requested. Many, many moons ago two special cases where added for an ancient version of ESX that has long since been fixed, so instead of adding a new special case for the reported bug with a Broadcom 57800 NIC, go ahead and always strip off the incorrect WRITE + READ flag bits. Also, avoid sending a reject here, as RFC-3720 does mandate this case be handled without protocol error. Reported-by: Witold Bazakbal <865perl@wp.pl> Tested-by: Witold Bazakbal <865perl@wp.pl> Cc: #3.1+ Signed-off-by: Nicholas Bellinger --- drivers/target/iscsi/iscsi_target.c | 26 ++++++++++++-------------- 1 file changed, 12 insertions(+), 14 deletions(-) diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index d70e9119e906..02182ab017b1 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c @@ -823,24 +823,22 @@ int iscsit_setup_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd, if (((hdr->flags & ISCSI_FLAG_CMD_READ) || (hdr->flags & ISCSI_FLAG_CMD_WRITE)) && !hdr->data_length) { /* - * Vmware ESX v3.0 uses a modified Cisco Initiator (v3.4.2) - * that adds support for RESERVE/RELEASE. There is a bug - * add with this new functionality that sets R/W bits when - * neither CDB carries any READ or WRITE datapayloads. + * From RFC-3720 Section 10.3.1: + * + * "Either or both of R and W MAY be 1 when either the + * Expected Data Transfer Length and/or Bidirectional Read + * Expected Data Transfer Length are 0" + * + * For this case, go ahead and clear the unnecssary bits + * to avoid any confusion with ->data_direction. */ - if ((hdr->cdb[0] == 0x16) || (hdr->cdb[0] == 0x17)) { - hdr->flags &= ~ISCSI_FLAG_CMD_READ; - hdr->flags &= ~ISCSI_FLAG_CMD_WRITE; - goto done; - } + hdr->flags &= ~ISCSI_FLAG_CMD_READ; + hdr->flags &= ~ISCSI_FLAG_CMD_WRITE; - pr_err("ISCSI_FLAG_CMD_READ or ISCSI_FLAG_CMD_WRITE" + pr_warn("ISCSI_FLAG_CMD_READ or ISCSI_FLAG_CMD_WRITE" " set when Expected Data Transfer Length is 0 for" - " CDB: 0x%02x. Bad iSCSI Initiator.\n", hdr->cdb[0]); - return iscsit_add_reject_cmd(cmd, - ISCSI_REASON_BOOKMARK_INVALID, buf); + " CDB: 0x%02x, Fixing up flags\n", hdr->cdb[0]); } -done: if (!(hdr->flags & ISCSI_FLAG_CMD_READ) && !(hdr->flags & ISCSI_FLAG_CMD_WRITE) && (hdr->data_length != 0)) { From 94a7111043d99819cd0a72d9b3174c7054adb2a0 Mon Sep 17 00:00:00 2001 From: Wei Yongjun Date: Tue, 29 Oct 2013 09:56:34 +0800 Subject: [PATCH 094/312] iser-target: fix error return code in isert_create_device_ib_res() Fix to return a negative error code from the error handling case instead of 0, as done elsewhere in this function. Signed-off-by: Wei Yongjun Cc: #3.10+ Signed-off-by: Nicholas Bellinger --- drivers/infiniband/ulp/isert/ib_isert.c | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index 6be57c38638d..78f6e92c571f 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c @@ -264,21 +264,29 @@ isert_create_device_ib_res(struct isert_device *device) isert_cq_event_callback, (void *)&cq_desc[i], ISER_MAX_RX_CQ_LEN, i); - if (IS_ERR(device->dev_rx_cq[i])) + if (IS_ERR(device->dev_rx_cq[i])) { + ret = PTR_ERR(device->dev_rx_cq[i]); + device->dev_rx_cq[i] = NULL; goto out_cq; + } device->dev_tx_cq[i] = ib_create_cq(device->ib_device, isert_cq_tx_callback, isert_cq_event_callback, (void *)&cq_desc[i], ISER_MAX_TX_CQ_LEN, i); - if (IS_ERR(device->dev_tx_cq[i])) + if (IS_ERR(device->dev_tx_cq[i])) { + ret = PTR_ERR(device->dev_tx_cq[i]); + device->dev_tx_cq[i] = NULL; + goto out_cq; + } + + ret = ib_req_notify_cq(device->dev_rx_cq[i], IB_CQ_NEXT_COMP); + if (ret) goto out_cq; - if (ib_req_notify_cq(device->dev_rx_cq[i], IB_CQ_NEXT_COMP)) - goto out_cq; - - if (ib_req_notify_cq(device->dev_tx_cq[i], IB_CQ_NEXT_COMP)) + ret = ib_req_notify_cq(device->dev_tx_cq[i], IB_CQ_NEXT_COMP); + if (ret) goto out_cq; } From 63832aabec12a28a41a221773ab3819d30ba0a67 Mon Sep 17 00:00:00 2001 From: Shivaram Upadhyayula Date: Tue, 10 Dec 2013 16:06:40 +0530 Subject: [PATCH 095/312] qla2xxx: Fix schedule_delayed_work() for target timeout calculations This patch fixes two cases in qla_target.c code where the schedule_delayed_work() value was being incorrectly calculated from sess->expires - jiffies. Signed-off-by: Shivaram U Cc: #3.6+ Signed-off-by: Nicholas Bellinger --- drivers/scsi/qla2xxx/qla_target.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index 596480022b0a..3bb0a1d1622a 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -471,7 +471,7 @@ static void qlt_schedule_sess_for_deletion(struct qla_tgt_sess *sess, schedule_delayed_work(&tgt->sess_del_work, 0); else schedule_delayed_work(&tgt->sess_del_work, - jiffies - sess->expires); + sess->expires - jiffies); } /* ha->hardware_lock supposed to be held on entry */ @@ -550,13 +550,14 @@ static void qlt_del_sess_work_fn(struct delayed_work *work) struct scsi_qla_host *vha = tgt->vha; struct qla_hw_data *ha = vha->hw; struct qla_tgt_sess *sess; - unsigned long flags; + unsigned long flags, elapsed; spin_lock_irqsave(&ha->hardware_lock, flags); while (!list_empty(&tgt->del_sess_list)) { sess = list_entry(tgt->del_sess_list.next, typeof(*sess), del_list_entry); - if (time_after_eq(jiffies, sess->expires)) { + elapsed = jiffies; + if (time_after_eq(elapsed, sess->expires)) { qlt_undelete_sess(sess); ql_dbg(ql_dbg_tgt_mgt, vha, 0xf004, @@ -566,7 +567,7 @@ static void qlt_del_sess_work_fn(struct delayed_work *work) ha->tgt.tgt_ops->put_sess(sess); } else { schedule_delayed_work(&tgt->sess_del_work, - jiffies - sess->expires); + sess->expires - elapsed); break; } } From a3adadf3018102c24754e0b53a5515c40fbaff4a Mon Sep 17 00:00:00 2001 From: Eric Leblond Date: Thu, 12 Dec 2013 08:51:59 +0100 Subject: [PATCH 096/312] netfilter: nft_reject: fix endianness in dump function The dump function in nft_reject_ipv4 was not converting a u32 field to network order before sending it to userspace, this needs to happen for consistency with other nf_tables and nfnetlink subsystems. Signed-off-by: Eric Leblond Signed-off-by: Pablo Neira Ayuso --- net/ipv4/netfilter/nft_reject_ipv4.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/ipv4/netfilter/nft_reject_ipv4.c b/net/ipv4/netfilter/nft_reject_ipv4.c index fff5ba1a33b7..4a5e94ac314a 100644 --- a/net/ipv4/netfilter/nft_reject_ipv4.c +++ b/net/ipv4/netfilter/nft_reject_ipv4.c @@ -72,7 +72,7 @@ static int nft_reject_dump(struct sk_buff *skb, const struct nft_expr *expr) { const struct nft_reject *priv = nft_expr_priv(expr); - if (nla_put_be32(skb, NFTA_REJECT_TYPE, priv->type)) + if (nla_put_be32(skb, NFTA_REJECT_TYPE, htonl(priv->type))) goto nla_put_failure; switch (priv->type) { From 9ae9ab522094406915c27dc729d3ecef7b44af72 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 4 Dec 2013 09:52:58 +0000 Subject: [PATCH 097/312] drm/i915: Prevent double unref following alloc failure during execbuffer Whilst looking up the objects required for an execbuffer, an untimely allocation failure in creating the vma results in the object being unreferenced from two lists. The ownership during the lookup is meant to be moved from the list of objects being looked to the vma, and this double unreference upon error results in a use-after-free. Fixes regression from commit 27173f1f95db5e74ceb35fe9a2f2f348ea11bac9 Author: Ben Widawsky Date: Wed Aug 14 11:38:36 2013 +0200 drm/i915: Convert execbuf code to use vmas Based on the fix by Ben Widawsky. Signed-off-by: Chris Wilson Cc: Ben Widawsky Cc: stable@vger.kernel.org [danvet: Bikeshed the crucial comment above the ownership transfer as discussed on irc.] Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_gem_execbuffer.c | 28 +++++++++++++++------- 1 file changed, 20 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index b7e787fb4649..a3ba9a8cd687 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -93,7 +93,7 @@ eb_lookup_vmas(struct eb_vmas *eb, { struct drm_i915_gem_object *obj; struct list_head objects; - int i, ret = 0; + int i, ret; INIT_LIST_HEAD(&objects); spin_lock(&file->table_lock); @@ -106,7 +106,7 @@ eb_lookup_vmas(struct eb_vmas *eb, DRM_DEBUG("Invalid object handle %d at index %d\n", exec[i].handle, i); ret = -ENOENT; - goto out; + goto err; } if (!list_empty(&obj->obj_exec_link)) { @@ -114,7 +114,7 @@ eb_lookup_vmas(struct eb_vmas *eb, DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n", obj, exec[i].handle, i); ret = -EINVAL; - goto out; + goto err; } drm_gem_object_reference(&obj->base); @@ -123,9 +123,13 @@ eb_lookup_vmas(struct eb_vmas *eb, spin_unlock(&file->table_lock); i = 0; - list_for_each_entry(obj, &objects, obj_exec_link) { + while (!list_empty(&objects)) { struct i915_vma *vma; + obj = list_first_entry(&objects, + struct drm_i915_gem_object, + obj_exec_link); + /* * NOTE: We can leak any vmas created here when something fails * later on. But that's no issue since vma_unbind can deal with @@ -138,10 +142,12 @@ eb_lookup_vmas(struct eb_vmas *eb, if (IS_ERR(vma)) { DRM_DEBUG("Failed to lookup VMA\n"); ret = PTR_ERR(vma); - goto out; + goto err; } + /* Transfer ownership from the objects list to the vmas list. */ list_add_tail(&vma->exec_list, &eb->vmas); + list_del_init(&obj->obj_exec_link); vma->exec_entry = &exec[i]; if (eb->and < 0) { @@ -155,16 +161,22 @@ eb_lookup_vmas(struct eb_vmas *eb, ++i; } + return 0; -out: + +err: while (!list_empty(&objects)) { obj = list_first_entry(&objects, struct drm_i915_gem_object, obj_exec_link); list_del_init(&obj->obj_exec_link); - if (ret) - drm_gem_object_unreference(&obj->base); + drm_gem_object_unreference(&obj->base); } + /* + * Objects already transfered to the vmas list will be unreferenced by + * eb_destroy. + */ + return ret; } From 4db080f9e93411c3c41ec402244da28e2bbde835 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 4 Dec 2013 11:37:09 +0000 Subject: [PATCH 098/312] drm/i915: Fix erroneous dereference of batch_obj inside reset_status As the rings may be processed and their requests deallocated in a different order to the natural retirement during a reset, /* Whilst this request exists, batch_obj will be on the * active_list, and so will hold the active reference. Only when this * request is retired will the the batch_obj be moved onto the * inactive_list and lose its active reference. Hence we do not need * to explicitly hold another reference here. */ is violated, and the batch_obj may be dereferenced after it had been freed on another ring. This can be simply avoided by processing the status update prior to deallocating any requests. Fixes regression (a possible OOPS following a GPU hang) from commit aa60c664e6df502578454621c3a9b1f087ff8d25 Author: Mika Kuoppala Date: Wed Jun 12 15:13:20 2013 +0300 drm/i915: find guilty batch buffer on ring resets Signed-off-by: Chris Wilson Cc: Mika Kuoppala Cc: stable@vger.kernel.org Reviewed-by: Mika Kuoppala [danvet: Add the code comment Chris supplied.] Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_gem.c | 34 +++++++++++++++++++++++---------- 1 file changed, 24 insertions(+), 10 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 621c7c67a643..76d3d1ab73c6 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2343,15 +2343,24 @@ static void i915_gem_free_request(struct drm_i915_gem_request *request) kfree(request); } -static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv, - struct intel_ring_buffer *ring) +static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv, + struct intel_ring_buffer *ring) { - u32 completed_seqno; - u32 acthd; + u32 completed_seqno = ring->get_seqno(ring, false); + u32 acthd = intel_ring_get_active_head(ring); + struct drm_i915_gem_request *request; - acthd = intel_ring_get_active_head(ring); - completed_seqno = ring->get_seqno(ring, false); + list_for_each_entry(request, &ring->request_list, list) { + if (i915_seqno_passed(completed_seqno, request->seqno)) + continue; + i915_set_reset_status(ring, request, acthd); + } +} + +static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv, + struct intel_ring_buffer *ring) +{ while (!list_empty(&ring->request_list)) { struct drm_i915_gem_request *request; @@ -2359,9 +2368,6 @@ static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv, struct drm_i915_gem_request, list); - if (request->seqno > completed_seqno) - i915_set_reset_status(ring, request, acthd); - i915_gem_free_request(request); } @@ -2403,8 +2409,16 @@ void i915_gem_reset(struct drm_device *dev) struct intel_ring_buffer *ring; int i; + /* + * Before we free the objects from the requests, we need to inspect + * them for finding the guilty party. As the requests only borrow + * their reference to the objects, the inspection must be done first. + */ for_each_ring(ring, dev_priv, i) - i915_gem_reset_ring_lists(dev_priv, ring); + i915_gem_reset_ring_status(dev_priv, ring); + + for_each_ring(ring, dev_priv, i) + i915_gem_reset_ring_cleanup(dev_priv, ring); i915_gem_cleanup_ringbuffer(dev); From 8333f0fe133be420ce3fcddfd568c3a559ab274e Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Mon, 2 Dec 2013 18:15:51 -0500 Subject: [PATCH 099/312] drm/radeon: Fix sideport problems on certain RS690 boards Some RS690 boards with 64MB of sideport memory show up as having 128MB sideport + 256MB of UMA. In this case, just skip the sideport memory and use UMA. This fixes rendering corruption and should improve performance. bug: https://bugs.freedesktop.org/show_bug.cgi?id=35457 Signed-off-by: Alex Deucher Cc: stable@vger.kernel.org --- drivers/gpu/drm/radeon/rs690.c | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c index 1c560629575a..e7dab069cccf 100644 --- a/drivers/gpu/drm/radeon/rs690.c +++ b/drivers/gpu/drm/radeon/rs690.c @@ -162,6 +162,16 @@ static void rs690_mc_init(struct radeon_device *rdev) base = RREG32_MC(R_000100_MCCFG_FB_LOCATION); base = G_000100_MC_FB_START(base) << 16; rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); + /* Some boards seem to be configured for 128MB of sideport memory, + * but really only have 64MB. Just skip the sideport and use + * UMA memory. + */ + if (rdev->mc.igp_sideport_enabled && + (rdev->mc.real_vram_size == (384 * 1024 * 1024))) { + base += 128 * 1024 * 1024; + rdev->mc.real_vram_size -= 128 * 1024 * 1024; + rdev->mc.mc_vram_size = rdev->mc.real_vram_size; + } /* Use K8 direct mapping for fast fb access. */ rdev->fastfb_working = false; From 693e0cb052c607e2d41edf9e9f1fa99ff8c266c1 Mon Sep 17 00:00:00 2001 From: David Henningsson Date: Thu, 12 Dec 2013 09:52:03 +0100 Subject: [PATCH 100/312] ALSA: hda - Add enable_msi=0 workaround for four HP machines While enabling these machines, we found we would sometimes lose an interrupt if we change hardware volume during playback, and that disabling msi fixed this issue. (Losing the interrupt caused underruns and crackling audio, as the one second timeout is usually bigger than the period size.) The machines were all machines from HP, running AMD Hudson controller, and Realtek ALC282 codec. Cc: stable@vger.kernel.org BugLink: https://bugs.launchpad.net/bugs/1260225 Signed-off-by: David Henningsson Signed-off-by: Takashi Iwai --- sound/pci/hda/hda_intel.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index 27aa14007cbd..956871d8b3d2 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -3433,6 +3433,10 @@ static void check_probe_mask(struct azx *chip, int dev) * white/black-list for enable_msi */ static struct snd_pci_quirk msi_black_list[] = { + SND_PCI_QUIRK(0x103c, 0x2191, "HP", 0), /* AMD Hudson */ + SND_PCI_QUIRK(0x103c, 0x2192, "HP", 0), /* AMD Hudson */ + SND_PCI_QUIRK(0x103c, 0x21f7, "HP", 0), /* AMD Hudson */ + SND_PCI_QUIRK(0x103c, 0x21fa, "HP", 0), /* AMD Hudson */ SND_PCI_QUIRK(0x1043, 0x81f2, "ASUS", 0), /* Athlon64 X2 + nvidia */ SND_PCI_QUIRK(0x1043, 0x81f6, "ASUS", 0), /* nvidia */ SND_PCI_QUIRK(0x1043, 0x822d, "ASUS", 0), /* Athlon64 X2 + nvidia MCP55 */ From 1522f9c79cd29c8f55120777ddc8ba63b9216509 Mon Sep 17 00:00:00 2001 From: Martin Andersson Date: Sat, 7 Dec 2013 23:22:10 +0100 Subject: [PATCH 101/312] drm/radeon/dpm: Fix hwmon crash Commit ec39f64bba3421c2060fcbd1aeb6eec81fe0a42d (drm/radeon/dpm: Convert to use devm_hwmon_register_with_groups) converted one usage of dev_get_drvdata, but there were two more. bug: https://bugs.freedesktop.org/show_bug.cgi?id=72457 Signed-off-by: Martin Andersson Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/radeon_pm.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index dc75bb603ea5..984097b907ef 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c @@ -552,8 +552,7 @@ static ssize_t radeon_hwmon_show_temp_thresh(struct device *dev, struct device_attribute *attr, char *buf) { - struct drm_device *ddev = dev_get_drvdata(dev); - struct radeon_device *rdev = ddev->dev_private; + struct radeon_device *rdev = dev_get_drvdata(dev); int hyst = to_sensor_dev_attr(attr)->index; int temp; @@ -580,8 +579,7 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj, struct attribute *attr, int index) { struct device *dev = container_of(kobj, struct device, kobj); - struct drm_device *ddev = dev_get_drvdata(dev); - struct radeon_device *rdev = ddev->dev_private; + struct radeon_device *rdev = dev_get_drvdata(dev); /* Skip limit attributes if DPM is not enabled */ if (rdev->pm.pm_method != PM_METHOD_DPM && From 7819678faec569f90c243d877c6f948dfb03f3a9 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Mon, 9 Dec 2013 17:38:51 -0500 Subject: [PATCH 102/312] drm/radeon/cik: plug in missing blit callback I implemented support for this, but forget to hook up the callback so the driver can actually use it. On asics with a dedicated DMA engine, we use the DMA engine for buffer migration so this is just for testing purposes. Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/radeon_asic.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c index e354ce94cdd1..c0425bb6223a 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.c +++ b/drivers/gpu/drm/radeon/radeon_asic.c @@ -2021,7 +2021,7 @@ static struct radeon_asic ci_asic = { .hdmi_setmode = &evergreen_hdmi_setmode, }, .copy = { - .blit = NULL, + .blit = &cik_copy_cpdma, .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, .dma = &cik_copy_dma, .dma_ring_index = R600_RING_TYPE_DMA_INDEX, @@ -2122,7 +2122,7 @@ static struct radeon_asic kv_asic = { .hdmi_setmode = &evergreen_hdmi_setmode, }, .copy = { - .blit = NULL, + .blit = &cik_copy_cpdma, .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, .dma = &cik_copy_dma, .dma_ring_index = R600_RING_TYPE_DMA_INDEX, From 1b3abef830db98c11d7f916a483abaf2501f3323 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Tue, 10 Dec 2013 17:57:37 +0100 Subject: [PATCH 103/312] drm/radeon: fix typo in cik_copy_dma MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Otherwise we end up with a rather strange looking result. Signed-off-by: Christian König Tested-by: Tom Stellard Cc: stable@vger.kernel.org Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/cik_sdma.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c index 0300727a4f70..d08b83c6267b 100644 --- a/drivers/gpu/drm/radeon/cik_sdma.c +++ b/drivers/gpu/drm/radeon/cik_sdma.c @@ -458,7 +458,7 @@ int cik_copy_dma(struct radeon_device *rdev, radeon_ring_write(ring, 0); /* src/dst endian swap */ radeon_ring_write(ring, src_offset & 0xffffffff); radeon_ring_write(ring, upper_32_bits(src_offset) & 0xffffffff); - radeon_ring_write(ring, dst_offset & 0xfffffffc); + radeon_ring_write(ring, dst_offset & 0xffffffff); radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xffffffff); src_offset += cur_size_in_bytes; dst_offset += cur_size_in_bytes; From 227ae10f17a5f2fd1307b7e582b603ef7bbb7e97 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Wed, 11 Dec 2013 11:43:58 -0500 Subject: [PATCH 104/312] drm/radeon: add missing display tiling setup for oland Fixes improperly set up display params for 2D tiling on oland. Signed-off-by: Alex Deucher Cc: stable@vger.kernel.org --- drivers/gpu/drm/radeon/atombios_crtc.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index 80a20120e625..b1970596a782 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c @@ -1196,7 +1196,9 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc, } else if ((rdev->family == CHIP_TAHITI) || (rdev->family == CHIP_PITCAIRN)) fb_format |= SI_GRPH_PIPE_CONFIG(SI_ADDR_SURF_P8_32x32_8x16); - else if (rdev->family == CHIP_VERDE) + else if ((rdev->family == CHIP_VERDE) || + (rdev->family == CHIP_OLAND) || + (rdev->family == CHIP_HAINAN)) /* for completeness. HAINAN has no display hw */ fb_format |= SI_GRPH_PIPE_CONFIG(SI_ADDR_SURF_P4_8x16); switch (radeon_crtc->crtc_id) { From 59aebe2b32466f7ed5b79f18646be6dddc926a6d Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 12 Dec 2013 09:05:06 -0500 Subject: [PATCH 105/312] Revert "drm/radeon: Implement radeon_pci_shutdown" This causes a race condition between drm_dev_unregister() and pci_driver.shutdown at shutdown or driver unload time. We need to revisit how to properly support kexec within the drm. This reverts commit 846ae41ae99d314bf2a02784152208a6ddf7eddc. --- drivers/gpu/drm/radeon/radeon_drv.c | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index 9f5ff28864f6..1958b36ad0e5 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c @@ -508,15 +508,6 @@ static const struct file_operations radeon_driver_kms_fops = { #endif }; - -static void -radeon_pci_shutdown(struct pci_dev *pdev) -{ - struct drm_device *dev = pci_get_drvdata(pdev); - - radeon_driver_unload_kms(dev); -} - static struct drm_driver kms_driver = { .driver_features = DRIVER_USE_AGP | @@ -586,7 +577,6 @@ static struct pci_driver radeon_kms_pci_driver = { .probe = radeon_pci_probe, .remove = radeon_pci_remove, .driver.pm = &radeon_pm_ops, - .shutdown = radeon_pci_shutdown, }; static int __init radeon_init(void) From 4cc629b7a20945ce35628179180329b6bc9e552b Mon Sep 17 00:00:00 2001 From: Stephen Boyd Date: Tue, 10 Dec 2013 15:19:03 -0800 Subject: [PATCH 106/312] gpio: msm: Fix irq mask/unmask by writing bits instead of numbers We should be writing bits here but instead we're writing the numbers that correspond to the bits we want to write. Fix it by wrapping the numbers in the BIT() macro. This fixes gpios acting as interrupts. Cc: stable@vger.kernel.org Signed-off-by: Stephen Boyd Signed-off-by: Linus Walleij --- drivers/gpio/gpio-msm-v2.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpio/gpio-msm-v2.c b/drivers/gpio/gpio-msm-v2.c index 7b37300973db..2baf0ddf7e02 100644 --- a/drivers/gpio/gpio-msm-v2.c +++ b/drivers/gpio/gpio-msm-v2.c @@ -252,7 +252,7 @@ static void msm_gpio_irq_mask(struct irq_data *d) spin_lock_irqsave(&tlmm_lock, irq_flags); writel(TARGET_PROC_NONE, GPIO_INTR_CFG_SU(gpio)); - clear_gpio_bits(INTR_RAW_STATUS_EN | INTR_ENABLE, GPIO_INTR_CFG(gpio)); + clear_gpio_bits(BIT(INTR_RAW_STATUS_EN) | BIT(INTR_ENABLE), GPIO_INTR_CFG(gpio)); __clear_bit(gpio, msm_gpio.enabled_irqs); spin_unlock_irqrestore(&tlmm_lock, irq_flags); } @@ -264,7 +264,7 @@ static void msm_gpio_irq_unmask(struct irq_data *d) spin_lock_irqsave(&tlmm_lock, irq_flags); __set_bit(gpio, msm_gpio.enabled_irqs); - set_gpio_bits(INTR_RAW_STATUS_EN | INTR_ENABLE, GPIO_INTR_CFG(gpio)); + set_gpio_bits(BIT(INTR_RAW_STATUS_EN) | BIT(INTR_ENABLE), GPIO_INTR_CFG(gpio)); writel(TARGET_PROC_SCORPION, GPIO_INTR_CFG_SU(gpio)); spin_unlock_irqrestore(&tlmm_lock, irq_flags); } From 8808b64daac68a2c85366c767a3ef850824ede74 Mon Sep 17 00:00:00 2001 From: Valentine Barshak Date: Fri, 29 Nov 2013 22:04:09 +0400 Subject: [PATCH 107/312] gpio: rcar: Fix level interrupt handling According to the manual, if a port is set for level detection using the corresponding bit in the edge/level select register and an external level interrupt signal is asserted, the corresponding bit in INTDT does not use the FF to hold the input. Thus, writing 1 to the corresponding bits in INTCLR cannot clear the corresponding bits in the INTDT register. Instead, when an external input signal is stopped, the corresponding bit in INTDT is cleared automatically. Since the INTDT bit cannot be cleared for the level interrupts until the interrupt signal is stopped, we end up with the infinite loop when using deferred (threaded) IRQ handling. Since a deferred interrupt is disabled by the low-level handler and re-enabled only when the deferred handler is completed, Fix the issue by dropping disabled interrupts from the pending mask as suggested by Laurent Pinchart Changes in V2: * Drop disabled interrupts from pending mask altogether instead of dropping level interrupts one by one once they get handled. Signed-off-by: Valentine Barshak Acked-by: Laurent Pinchart Acked-by: Magnus Damm Signed-off-by: Linus Walleij --- drivers/gpio/gpio-rcar.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c index fe088a30567a..8b7e719a68c3 100644 --- a/drivers/gpio/gpio-rcar.c +++ b/drivers/gpio/gpio-rcar.c @@ -169,7 +169,8 @@ static irqreturn_t gpio_rcar_irq_handler(int irq, void *dev_id) u32 pending; unsigned int offset, irqs_handled = 0; - while ((pending = gpio_rcar_read(p, INTDT))) { + while ((pending = gpio_rcar_read(p, INTDT) & + gpio_rcar_read(p, INTMSK))) { offset = __ffs(pending); gpio_rcar_write(p, INTCLR, BIT(offset)); generic_handle_irq(irq_find_mapping(p->irq_domain, offset)); From 0f58411d4fd704d8958879fb08751eae3573271b Mon Sep 17 00:00:00 2001 From: Ilia Mirkin Date: Thu, 5 Dec 2013 09:42:49 -0500 Subject: [PATCH 108/312] drm: don't double-free on driver load error MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit All instances of drm_dev_register are followed by drm_dev_free on failure. Don't free dev->control/render/primary on failure, as they will be freed by drm_dev_free since commit 8f6599da8e (drm: delay minor destruction to drm_dev_free()). Instead unplug them. Signed-off-by: Ilia Mirkin Reported-and-tested-by: Bruno Prémont Reviewed-by: David Herrmann Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_stub.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c index f53d5246979c..66dd3a001cf1 100644 --- a/drivers/gpu/drm/drm_stub.c +++ b/drivers/gpu/drm/drm_stub.c @@ -566,11 +566,11 @@ int drm_dev_register(struct drm_device *dev, unsigned long flags) if (dev->driver->unload) dev->driver->unload(dev); err_primary_node: - drm_put_minor(dev->primary); + drm_unplug_minor(dev->primary); err_render_node: - drm_put_minor(dev->render); + drm_unplug_minor(dev->render); err_control_node: - drm_put_minor(dev->control); + drm_unplug_minor(dev->control); err_agp: if (dev->driver->bus->agp_destroy) dev->driver->bus->agp_destroy(dev); From 36aa1b180e7398e2bd27642760bfaa4ad8c65ab6 Mon Sep 17 00:00:00 2001 From: Ulrich Weigand Date: Thu, 12 Dec 2013 15:59:34 +1100 Subject: [PATCH 109/312] powerpc: PTRACE_PEEKUSR always returns FPR0 There is a bug in using ptrace to access FPRs via PTRACE_PEEKUSR / PTRACE_POKEUSR. In effect, trying to access any of the FPRs always really accesses FPR0, which does seriously break debugging :-) The problem seems to have been introduced by commit 3ad26e5c4459d (Merge branch 'for-kvm' into next). [ It is indeed a merge conflict between Paul's FPU/VSX state rework and my LE patches - Anton ] Signed-off-by: Ulrich Weigand Signed-off-by: Anton Blanchard Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/kernel/ptrace.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c index 75fb40498b41..2e3d2bf536c5 100644 --- a/arch/powerpc/kernel/ptrace.c +++ b/arch/powerpc/kernel/ptrace.c @@ -1555,7 +1555,7 @@ long arch_ptrace(struct task_struct *child, long request, flush_fp_to_thread(child); if (fpidx < (PT_FPSCR - PT_FPR0)) - memcpy(&tmp, &child->thread.fp_state.fpr, + memcpy(&tmp, &child->thread.TS_FPR(fpidx), sizeof(long)); else tmp = child->thread.fp_state.fpscr; @@ -1588,7 +1588,7 @@ long arch_ptrace(struct task_struct *child, long request, flush_fp_to_thread(child); if (fpidx < (PT_FPSCR - PT_FPR0)) - memcpy(&child->thread.fp_state.fpr, &data, + memcpy(&child->thread.TS_FPR(fpidx), &data, sizeof(long)); else child->thread.fp_state.fpscr = data; From 01666c8ee2b6afcd31de2064fbb7c097a75e5089 Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Thu, 12 Dec 2013 15:59:35 +1100 Subject: [PATCH 110/312] powerpc: Fix endian issue in setup-common.c During on LE boot we see: Partition configured for 1073741824 cpus, operating system maximum is 2048. Clearly missing a byteswap here. Signed-off-by: Anton Blanchard Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/kernel/setup-common.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c index febc80445d25..bc76cc6b419c 100644 --- a/arch/powerpc/kernel/setup-common.c +++ b/arch/powerpc/kernel/setup-common.c @@ -479,7 +479,7 @@ void __init smp_setup_cpu_maps(void) if (machine_is(pseries) && firmware_has_feature(FW_FEATURE_LPAR) && (dn = of_find_node_by_path("/rtas"))) { int num_addr_cell, num_size_cell, maxcpus; - const unsigned int *ireg; + const __be32 *ireg; num_addr_cell = of_n_addr_cells(dn); num_size_cell = of_n_size_cells(dn); @@ -489,7 +489,7 @@ void __init smp_setup_cpu_maps(void) if (!ireg) goto out; - maxcpus = ireg[num_addr_cell + num_size_cell]; + maxcpus = be32_to_cpup(ireg + num_addr_cell + num_size_cell); /* Double maxcpus for processors which have SMT capability */ if (cpu_has_feature(CPU_FTR_SMT)) From f8a1883a833bbad8e6b5ec4f0918b7797e652d65 Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Thu, 12 Dec 2013 15:59:36 +1100 Subject: [PATCH 111/312] powerpc: Fix topology core_id endian issue on LE builds cpu_to_core_id() is missing a byteswap: cat /sys/devices/system/cpu/cpu63/topology/core_id 201326592 Signed-off-by: Anton Blanchard Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/kernel/smp.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index a3b64f3bf9a2..c1cf4a1522d9 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c @@ -580,7 +580,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle) int cpu_to_core_id(int cpu) { struct device_node *np; - const int *reg; + const __be32 *reg; int id = -1; np = of_get_cpu_node(cpu, NULL); @@ -591,7 +591,7 @@ int cpu_to_core_id(int cpu) if (!reg) goto out; - id = *reg; + id = be32_to_cpup(reg); out: of_node_put(np); return id; From ca5de4e652ea09c0c18ebbd12dd4c2149271245a Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Thu, 12 Dec 2013 15:59:37 +1100 Subject: [PATCH 112/312] powerpc/pseries: Fix endian issues in /proc/ppc64/lparcfg Some obvious issues: cat /proc/ppc64/lparcfg ... partition_id=16777216 ... partition_potential_processors=268435456 Signed-off-by: Anton Blanchard Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/platforms/pseries/lparcfg.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/arch/powerpc/platforms/pseries/lparcfg.c b/arch/powerpc/platforms/pseries/lparcfg.c index e738007eae64..c9fecf09b8fa 100644 --- a/arch/powerpc/platforms/pseries/lparcfg.c +++ b/arch/powerpc/platforms/pseries/lparcfg.c @@ -157,7 +157,7 @@ static void parse_ppp_data(struct seq_file *m) { struct hvcall_ppp_data ppp_data; struct device_node *root; - const int *perf_level; + const __be32 *perf_level; int rc; rc = h_get_ppp(&ppp_data); @@ -201,7 +201,7 @@ static void parse_ppp_data(struct seq_file *m) perf_level = of_get_property(root, "ibm,partition-performance-parameters-level", NULL); - if (perf_level && (*perf_level >= 1)) { + if (perf_level && (be32_to_cpup(perf_level) >= 1)) { seq_printf(m, "physical_procs_allocated_to_virtualization=%d\n", ppp_data.phys_platform_procs); @@ -435,7 +435,7 @@ static int pseries_lparcfg_data(struct seq_file *m, void *v) int partition_potential_processors; int partition_active_processors; struct device_node *rtas_node; - const int *lrdrp = NULL; + const __be32 *lrdrp = NULL; rtas_node = of_find_node_by_path("/rtas"); if (rtas_node) @@ -444,7 +444,7 @@ static int pseries_lparcfg_data(struct seq_file *m, void *v) if (lrdrp == NULL) { partition_potential_processors = vdso_data->processorCount; } else { - partition_potential_processors = *(lrdrp + 4); + partition_potential_processors = be32_to_cpup(lrdrp + 4); } of_node_put(rtas_node); @@ -654,7 +654,7 @@ static int lparcfg_data(struct seq_file *m, void *v) const char *model = ""; const char *system_id = ""; const char *tmp; - const unsigned int *lp_index_ptr; + const __be32 *lp_index_ptr; unsigned int lp_index = 0; seq_printf(m, "%s %s\n", MODULE_NAME, MODULE_VERS); @@ -670,7 +670,7 @@ static int lparcfg_data(struct seq_file *m, void *v) lp_index_ptr = of_get_property(rootdn, "ibm,partition-no", NULL); if (lp_index_ptr) - lp_index = *lp_index_ptr; + lp_index = be32_to_cpup(lp_index_ptr); of_node_put(rootdn); } seq_printf(m, "serial_number=%s\n", system_id); From 9fa2984d1b5d9736a88c813ae89c160a2f9d5308 Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Thu, 12 Dec 2013 15:59:38 +1100 Subject: [PATCH 113/312] powerpc/pseries: Fix endian issues in nvram code The NVRAM code has a number of endian issues. I noticed a very confused error log count: RTAS: 100663330 -------- RTAS event begin -------- 100663330 == 0x06000022. 0x6 LE error logs and 0x22 BE error logs. The pstore code has similar issues - if we write an oops in one endian and attempt to read it in another we get junk. Make both of these formats big endian, and byteswap as required. Signed-off-by: Anton Blanchard Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/platforms/pseries/nvram.c | 46 +++++++++++++------------- 1 file changed, 23 insertions(+), 23 deletions(-) diff --git a/arch/powerpc/platforms/pseries/nvram.c b/arch/powerpc/platforms/pseries/nvram.c index 7bfaf58d4664..d7096f2f7751 100644 --- a/arch/powerpc/platforms/pseries/nvram.c +++ b/arch/powerpc/platforms/pseries/nvram.c @@ -43,8 +43,8 @@ static char nvram_buf[NVRW_CNT]; /* assume this is in the first 4GB */ static DEFINE_SPINLOCK(nvram_lock); struct err_log_info { - int error_type; - unsigned int seq_num; + __be32 error_type; + __be32 seq_num; }; struct nvram_os_partition { @@ -79,9 +79,9 @@ static const char *pseries_nvram_os_partitions[] = { }; struct oops_log_info { - u16 version; - u16 report_length; - u64 timestamp; + __be16 version; + __be16 report_length; + __be64 timestamp; } __attribute__((packed)); static void oops_to_nvram(struct kmsg_dumper *dumper, @@ -291,8 +291,8 @@ int nvram_write_os_partition(struct nvram_os_partition *part, char * buff, length = part->size; } - info.error_type = err_type; - info.seq_num = error_log_cnt; + info.error_type = cpu_to_be32(err_type); + info.seq_num = cpu_to_be32(error_log_cnt); tmp_index = part->index; @@ -364,8 +364,8 @@ int nvram_read_partition(struct nvram_os_partition *part, char *buff, } if (part->os_partition) { - *error_log_cnt = info.seq_num; - *err_type = info.error_type; + *error_log_cnt = be32_to_cpu(info.seq_num); + *err_type = be32_to_cpu(info.error_type); } return 0; @@ -529,9 +529,9 @@ static int zip_oops(size_t text_len) pr_err("nvram: logging uncompressed oops/panic report\n"); return -1; } - oops_hdr->version = OOPS_HDR_VERSION; - oops_hdr->report_length = (u16) zipped_len; - oops_hdr->timestamp = get_seconds(); + oops_hdr->version = cpu_to_be16(OOPS_HDR_VERSION); + oops_hdr->report_length = cpu_to_be16(zipped_len); + oops_hdr->timestamp = cpu_to_be64(get_seconds()); return 0; } @@ -574,9 +574,9 @@ static int nvram_pstore_write(enum pstore_type_id type, clobbering_unread_rtas_event()) return -1; - oops_hdr->version = OOPS_HDR_VERSION; - oops_hdr->report_length = (u16) size; - oops_hdr->timestamp = get_seconds(); + oops_hdr->version = cpu_to_be16(OOPS_HDR_VERSION); + oops_hdr->report_length = cpu_to_be16(size); + oops_hdr->timestamp = cpu_to_be64(get_seconds()); if (compressed) err_type = ERR_TYPE_KERNEL_PANIC_GZ; @@ -670,16 +670,16 @@ static ssize_t nvram_pstore_read(u64 *id, enum pstore_type_id *type, size_t length, hdr_size; oops_hdr = (struct oops_log_info *)buff; - if (oops_hdr->version < OOPS_HDR_VERSION) { + if (be16_to_cpu(oops_hdr->version) < OOPS_HDR_VERSION) { /* Old format oops header had 2-byte record size */ hdr_size = sizeof(u16); - length = oops_hdr->version; + length = be16_to_cpu(oops_hdr->version); time->tv_sec = 0; time->tv_nsec = 0; } else { hdr_size = sizeof(*oops_hdr); - length = oops_hdr->report_length; - time->tv_sec = oops_hdr->timestamp; + length = be16_to_cpu(oops_hdr->report_length); + time->tv_sec = be64_to_cpu(oops_hdr->timestamp); time->tv_nsec = 0; } *buf = kmalloc(length, GFP_KERNEL); @@ -889,13 +889,13 @@ static void oops_to_nvram(struct kmsg_dumper *dumper, kmsg_dump_get_buffer(dumper, false, oops_data, oops_data_sz, &text_len); err_type = ERR_TYPE_KERNEL_PANIC; - oops_hdr->version = OOPS_HDR_VERSION; - oops_hdr->report_length = (u16) text_len; - oops_hdr->timestamp = get_seconds(); + oops_hdr->version = cpu_to_be16(OOPS_HDR_VERSION); + oops_hdr->report_length = cpu_to_be16(text_len); + oops_hdr->timestamp = cpu_to_be64(get_seconds()); } (void) nvram_write_os_partition(&oops_log_partition, oops_buf, - (int) (sizeof(*oops_hdr) + oops_hdr->report_length), err_type, + (int) (sizeof(*oops_hdr) + text_len), err_type, ++oops_count); spin_unlock_irqrestore(&lock, flags); From 5091f0c969d2d0639db2efddce7352e7c48069ba Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Thu, 12 Dec 2013 15:59:39 +1100 Subject: [PATCH 114/312] powerpc/pseries: Fix PCIE link speed endian issue We need to byteswap ibm,pcie-link-speed-stats. Signed-off-by: Anton Blanchard Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/platforms/pseries/pci.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/arch/powerpc/platforms/pseries/pci.c b/arch/powerpc/platforms/pseries/pci.c index 5f93856cdf47..70670a2d9cf2 100644 --- a/arch/powerpc/platforms/pseries/pci.c +++ b/arch/powerpc/platforms/pseries/pci.c @@ -113,7 +113,7 @@ int pseries_root_bridge_prepare(struct pci_host_bridge *bridge) { struct device_node *dn, *pdn; struct pci_bus *bus; - const uint32_t *pcie_link_speed_stats; + const __be32 *pcie_link_speed_stats; bus = bridge->bus; @@ -122,7 +122,7 @@ int pseries_root_bridge_prepare(struct pci_host_bridge *bridge) return 0; for (pdn = dn; pdn != NULL; pdn = of_get_next_parent(pdn)) { - pcie_link_speed_stats = (const uint32_t *) of_get_property(pdn, + pcie_link_speed_stats = of_get_property(pdn, "ibm,pcie-link-speed-stats", NULL); if (pcie_link_speed_stats) break; @@ -135,7 +135,7 @@ int pseries_root_bridge_prepare(struct pci_host_bridge *bridge) return 0; } - switch (pcie_link_speed_stats[0]) { + switch (be32_to_cpup(pcie_link_speed_stats)) { case 0x01: bus->max_bus_speed = PCIE_SPEED_2_5GT; break; @@ -147,7 +147,7 @@ int pseries_root_bridge_prepare(struct pci_host_bridge *bridge) break; } - switch (pcie_link_speed_stats[1]) { + switch (be32_to_cpup(pcie_link_speed_stats)) { case 0x01: bus->cur_bus_speed = PCIE_SPEED_2_5GT; break; From 8d15315537578a9ef5d35912d06127205dd82be8 Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Thu, 12 Dec 2013 15:59:40 +1100 Subject: [PATCH 115/312] powerpc/pseries: Fix endian issues in MSI code The MSI code is miscalculating quotas in little endian mode. Add required byteswaps to fix this. Before we claimed a quota of 65536, after the patch we see the correct value of 256. Signed-off-by: Anton Blanchard Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/platforms/pseries/msi.c | 28 +++++++++++++++------------- 1 file changed, 15 insertions(+), 13 deletions(-) diff --git a/arch/powerpc/platforms/pseries/msi.c b/arch/powerpc/platforms/pseries/msi.c index 6d2f0abce6fa..0c882e83c4ce 100644 --- a/arch/powerpc/platforms/pseries/msi.c +++ b/arch/powerpc/platforms/pseries/msi.c @@ -130,7 +130,8 @@ static int check_req(struct pci_dev *pdev, int nvec, char *prop_name) { struct device_node *dn; struct pci_dn *pdn; - const u32 *req_msi; + const __be32 *p; + u32 req_msi; pdn = pci_get_pdn(pdev); if (!pdn) @@ -138,19 +139,20 @@ static int check_req(struct pci_dev *pdev, int nvec, char *prop_name) dn = pdn->node; - req_msi = of_get_property(dn, prop_name, NULL); - if (!req_msi) { + p = of_get_property(dn, prop_name, NULL); + if (!p) { pr_debug("rtas_msi: No %s on %s\n", prop_name, dn->full_name); return -ENOENT; } - if (*req_msi < nvec) { + req_msi = be32_to_cpup(p); + if (req_msi < nvec) { pr_debug("rtas_msi: %s requests < %d MSIs\n", prop_name, nvec); - if (*req_msi == 0) /* Be paranoid */ + if (req_msi == 0) /* Be paranoid */ return -ENOSPC; - return *req_msi; + return req_msi; } return 0; @@ -171,7 +173,7 @@ static int check_req_msix(struct pci_dev *pdev, int nvec) static struct device_node *find_pe_total_msi(struct pci_dev *dev, int *total) { struct device_node *dn; - const u32 *p; + const __be32 *p; dn = of_node_get(pci_device_to_OF_node(dev)); while (dn) { @@ -179,7 +181,7 @@ static struct device_node *find_pe_total_msi(struct pci_dev *dev, int *total) if (p) { pr_debug("rtas_msi: found prop on dn %s\n", dn->full_name); - *total = *p; + *total = be32_to_cpup(p); return dn; } @@ -232,13 +234,13 @@ struct msi_counts { static void *count_non_bridge_devices(struct device_node *dn, void *data) { struct msi_counts *counts = data; - const u32 *p; + const __be32 *p; u32 class; pr_debug("rtas_msi: counting %s\n", dn->full_name); p = of_get_property(dn, "class-code", NULL); - class = p ? *p : 0; + class = p ? be32_to_cpup(p) : 0; if ((class >> 8) != PCI_CLASS_BRIDGE_PCI) counts->num_devices++; @@ -249,7 +251,7 @@ static void *count_non_bridge_devices(struct device_node *dn, void *data) static void *count_spare_msis(struct device_node *dn, void *data) { struct msi_counts *counts = data; - const u32 *p; + const __be32 *p; int req; if (dn == counts->requestor) @@ -260,11 +262,11 @@ static void *count_spare_msis(struct device_node *dn, void *data) req = 0; p = of_get_property(dn, "ibm,req#msi", NULL); if (p) - req = *p; + req = be32_to_cpup(p); p = of_get_property(dn, "ibm,req#msi-x", NULL); if (p) - req = max(req, (int)*p); + req = max(req, (int)be32_to_cpup(p)); } if (req < counts->quota) From a29e30efa3a1b253fd1252731aa774a3544c5c3b Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Thu, 12 Dec 2013 15:59:41 +1100 Subject: [PATCH 116/312] powerpc: Fix endian issues in crash dump code A couple more device tree properties that need byte swapping. Signed-off-by: Anton Blanchard Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/kernel/crash_dump.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/powerpc/kernel/crash_dump.c b/arch/powerpc/kernel/crash_dump.c index 779a78c26435..11c1d069d920 100644 --- a/arch/powerpc/kernel/crash_dump.c +++ b/arch/powerpc/kernel/crash_dump.c @@ -124,15 +124,15 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf, void crash_free_reserved_phys_range(unsigned long begin, unsigned long end) { unsigned long addr; - const u32 *basep, *sizep; + const __be32 *basep, *sizep; unsigned int rtas_start = 0, rtas_end = 0; basep = of_get_property(rtas.dev, "linux,rtas-base", NULL); sizep = of_get_property(rtas.dev, "rtas-size", NULL); if (basep && sizep) { - rtas_start = *basep; - rtas_end = *basep + *sizep; + rtas_start = be32_to_cpup(basep); + rtas_end = rtas_start + be32_to_cpup(sizep); } for (addr = begin; addr < end; addr += PAGE_SIZE) { From 01a9dbccbdfc7a93950b791405e476c510f73ab0 Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Fri, 13 Dec 2013 15:53:43 +1100 Subject: [PATCH 117/312] powerpc/powernv: Fix endian issue in opal_xscom_read opal_xscom_read uses a pointer to return the data so we need to byteswap it on LE builds. Signed-off-by: Anton Blanchard Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/include/asm/opal.h | 2 +- arch/powerpc/platforms/powernv/opal-xscom.c | 4 +++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/arch/powerpc/include/asm/opal.h b/arch/powerpc/include/asm/opal.h index 033c06be1d84..522f50c6c146 100644 --- a/arch/powerpc/include/asm/opal.h +++ b/arch/powerpc/include/asm/opal.h @@ -720,7 +720,7 @@ int64_t opal_pci_next_error(uint64_t phb_id, uint64_t *first_frozen_pe, int64_t opal_pci_poll(uint64_t phb_id); int64_t opal_return_cpu(void); -int64_t opal_xscom_read(uint32_t gcid, uint32_t pcb_addr, uint64_t *val); +int64_t opal_xscom_read(uint32_t gcid, uint32_t pcb_addr, __be64 *val); int64_t opal_xscom_write(uint32_t gcid, uint32_t pcb_addr, uint64_t val); int64_t opal_lpc_write(uint32_t chip_id, enum OpalLPCAddressType addr_type, diff --git a/arch/powerpc/platforms/powernv/opal-xscom.c b/arch/powerpc/platforms/powernv/opal-xscom.c index 4d99a8fd55ac..4fbf276ac99e 100644 --- a/arch/powerpc/platforms/powernv/opal-xscom.c +++ b/arch/powerpc/platforms/powernv/opal-xscom.c @@ -96,9 +96,11 @@ static int opal_scom_read(scom_map_t map, u64 reg, u64 *value) { struct opal_scom_map *m = map; int64_t rc; + __be64 v; reg = opal_scom_unmangle(reg); - rc = opal_xscom_read(m->chip, m->addr + reg, (uint64_t *)__pa(value)); + rc = opal_xscom_read(m->chip, m->addr + reg, (__be64 *)__pa(&v)); + *value = be64_to_cpu(v); return opal_xscom_err_xlate(rc); } From 803c2d2f84da9dc2619449994af34d27148ab20d Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Fri, 13 Dec 2013 15:56:06 +1100 Subject: [PATCH 118/312] powerpc/powernv: Fix OPAL LPC access in Little Endian We are passing pointers to the firmware for reads, we need to properly convert the result as OPAL is always BE. Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/include/asm/opal.h | 2 +- arch/powerpc/platforms/powernv/opal-lpc.c | 12 ++++++------ 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/arch/powerpc/include/asm/opal.h b/arch/powerpc/include/asm/opal.h index 522f50c6c146..7bdcf340016c 100644 --- a/arch/powerpc/include/asm/opal.h +++ b/arch/powerpc/include/asm/opal.h @@ -726,7 +726,7 @@ int64_t opal_xscom_write(uint32_t gcid, uint32_t pcb_addr, uint64_t val); int64_t opal_lpc_write(uint32_t chip_id, enum OpalLPCAddressType addr_type, uint32_t addr, uint32_t data, uint32_t sz); int64_t opal_lpc_read(uint32_t chip_id, enum OpalLPCAddressType addr_type, - uint32_t addr, uint32_t *data, uint32_t sz); + uint32_t addr, __be32 *data, uint32_t sz); int64_t opal_validate_flash(uint64_t buffer, uint32_t *size, uint32_t *result); int64_t opal_manage_flash(uint8_t op); int64_t opal_update_flash(uint64_t blk_list); diff --git a/arch/powerpc/platforms/powernv/opal-lpc.c b/arch/powerpc/platforms/powernv/opal-lpc.c index e7e59e4f9892..79d83cad3d67 100644 --- a/arch/powerpc/platforms/powernv/opal-lpc.c +++ b/arch/powerpc/platforms/powernv/opal-lpc.c @@ -24,25 +24,25 @@ static int opal_lpc_chip_id = -1; static u8 opal_lpc_inb(unsigned long port) { int64_t rc; - uint32_t data; + __be32 data; if (opal_lpc_chip_id < 0 || port > 0xffff) return 0xff; rc = opal_lpc_read(opal_lpc_chip_id, OPAL_LPC_IO, port, &data, 1); - return rc ? 0xff : data; + return rc ? 0xff : be32_to_cpu(data); } static __le16 __opal_lpc_inw(unsigned long port) { int64_t rc; - uint32_t data; + __be32 data; if (opal_lpc_chip_id < 0 || port > 0xfffe) return 0xffff; if (port & 1) return (__le16)opal_lpc_inb(port) << 8 | opal_lpc_inb(port + 1); rc = opal_lpc_read(opal_lpc_chip_id, OPAL_LPC_IO, port, &data, 2); - return rc ? 0xffff : data; + return rc ? 0xffff : be32_to_cpu(data); } static u16 opal_lpc_inw(unsigned long port) { @@ -52,7 +52,7 @@ static u16 opal_lpc_inw(unsigned long port) static __le32 __opal_lpc_inl(unsigned long port) { int64_t rc; - uint32_t data; + __be32 data; if (opal_lpc_chip_id < 0 || port > 0xfffc) return 0xffffffff; @@ -62,7 +62,7 @@ static __le32 __opal_lpc_inl(unsigned long port) (__le32)opal_lpc_inb(port + 2) << 8 | opal_lpc_inb(port + 3); rc = opal_lpc_read(opal_lpc_chip_id, OPAL_LPC_IO, port, &data, 4); - return rc ? 0xffffffff : data; + return rc ? 0xffffffff : be32_to_cpu(data); } static u32 opal_lpc_inl(unsigned long port) From f984841bc0c4d596c4e95d1798f318291c4f78f5 Mon Sep 17 00:00:00 2001 From: Jason Cooper Date: Mon, 9 Dec 2013 11:15:56 -0800 Subject: [PATCH 119/312] dma: mv_xor: remove mv_desc_get_dest_addr() The following commit: 54f8d501e842 dmaengine: remove DMA unmap from drivers removed the last caller to mv_desc_get_dest_addr(), creating the warning: drivers/dma/mv_xor.c:57:12: warning: mv_desc_get_dest_addr defined but not used [-Wunused-function] Remove it. Signed-off-by: Jason Cooper Acked-by: Vinod Koul Signed-off-by: Dan Williams --- drivers/dma/mv_xor.c | 6 ------ 1 file changed, 6 deletions(-) diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c index 7807f0ef4e20..23bcc9158cbc 100644 --- a/drivers/dma/mv_xor.c +++ b/drivers/dma/mv_xor.c @@ -54,12 +54,6 @@ static void mv_desc_init(struct mv_xor_desc_slot *desc, unsigned long flags) hw_desc->desc_command = (1 << 31); } -static u32 mv_desc_get_dest_addr(struct mv_xor_desc_slot *desc) -{ - struct mv_xor_desc *hw_desc = desc->hw_desc; - return hw_desc->phy_dest_addr; -} - static void mv_desc_set_byte_count(struct mv_xor_desc_slot *desc, u32 byte_count) { From d7fb0300fe663a5e338e3adeb7f811a2f9727e6c Mon Sep 17 00:00:00 2001 From: Olof Johansson Date: Mon, 9 Dec 2013 11:15:57 -0800 Subject: [PATCH 120/312] dmaengine: at_hdmac: remove unused function commit 54f8d501e8428 ('dmaengine: remove DMA unmap from drivers') refactored some code which resulted in an unused function in the at_hdmac driver: drivers/dma/at_hdmac_regs.h:350:23: warning: 'chan2parent' defined but not used [-Wunused-function] Fixes: 54f8d501e8428 ('dmaengine: remove DMA unmap from drivers') Signed-off-by: Olof Johansson Cc: Bartlomiej Zolnierkiewicz Acked-by: Nicolas Ferre Acked-by: Bartlomiej Zolnierkiewicz Acked-by: Vinod Koul Signed-off-by: Dan Williams --- drivers/dma/at_hdmac_regs.h | 4 ---- 1 file changed, 4 deletions(-) diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h index f31d647acdfa..2787aba60c6b 100644 --- a/drivers/dma/at_hdmac_regs.h +++ b/drivers/dma/at_hdmac_regs.h @@ -347,10 +347,6 @@ static struct device *chan2dev(struct dma_chan *chan) { return &chan->dev->device; } -static struct device *chan2parent(struct dma_chan *chan) -{ - return chan->dev->device.parent; -} #if defined(VERBOSE_DEBUG) static void vdbg_dump_regs(struct at_dma_chan *atchan) From 6aa2731ce2c7bd1305b553b5fc14ae4856d36569 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Mon, 9 Dec 2013 11:15:59 -0800 Subject: [PATCH 121/312] dma: fix build warnings in ppc4xx drivers/dma/ppc4xx/adma.c:1507:6: warning: unused variable 'i' [-Wunused-variable] - due to unmap reworks drivers/dma/ppc4xx/adma.c:3900:2: warning: format '%s' expects a matching 'char *' argument [-Wformat] - due to memset removal drivers/dma/ppc4xx/adma.c:538:13: warning: 'ppc440spe_desc_init_memset' defined but not used [-Wunused-function] - due to memset removal Cc: Bartlomiej Zolnierkiewicz Signed-off-by: Dan Williams --- drivers/dma/ppc4xx/adma.c | 27 +-------------------------- 1 file changed, 1 insertion(+), 26 deletions(-) diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c index 8da48c6b2a38..8bba298535b0 100644 --- a/drivers/dma/ppc4xx/adma.c +++ b/drivers/dma/ppc4xx/adma.c @@ -532,29 +532,6 @@ static void ppc440spe_desc_init_memcpy(struct ppc440spe_adma_desc_slot *desc, hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2; } -/** - * ppc440spe_desc_init_memset - initialize the descriptor for MEMSET operation - */ -static void ppc440spe_desc_init_memset(struct ppc440spe_adma_desc_slot *desc, - int value, unsigned long flags) -{ - struct dma_cdb *hw_desc = desc->hw_desc; - - memset(desc->hw_desc, 0, sizeof(struct dma_cdb)); - desc->hw_next = NULL; - desc->src_cnt = 1; - desc->dst_cnt = 1; - - if (flags & DMA_PREP_INTERRUPT) - set_bit(PPC440SPE_DESC_INT, &desc->flags); - else - clear_bit(PPC440SPE_DESC_INT, &desc->flags); - - hw_desc->sg1u = hw_desc->sg1l = cpu_to_le32((u32)value); - hw_desc->sg3u = hw_desc->sg3l = cpu_to_le32((u32)value); - hw_desc->opc = DMA_CDB_OPC_DFILL128; -} - /** * ppc440spe_desc_set_src_addr - set source address into the descriptor */ @@ -1504,8 +1481,6 @@ static dma_cookie_t ppc440spe_adma_run_tx_complete_actions( struct ppc440spe_adma_chan *chan, dma_cookie_t cookie) { - int i; - BUG_ON(desc->async_tx.cookie < 0); if (desc->async_tx.cookie > 0) { cookie = desc->async_tx.cookie; @@ -3898,7 +3873,7 @@ static void ppc440spe_adma_init_capabilities(struct ppc440spe_adma_device *adev) ppc440spe_adma_prep_dma_interrupt; } pr_info("%s: AMCC(R) PPC440SP(E) ADMA Engine: " - "( %s%s%s%s%s%s%s)\n", + "( %s%s%s%s%s%s)\n", dev_name(adev->dev), dma_has_cap(DMA_PQ, adev->common.cap_mask) ? "pq " : "", dma_has_cap(DMA_PQ_VAL, adev->common.cap_mask) ? "pq_val " : "", From bbc76560d488c437dbddff72242b0a07e42a0fd0 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Mon, 9 Dec 2013 11:16:00 -0800 Subject: [PATCH 122/312] dma: fix fsldma build warnings drivers/dma/fsldma.c: In function 'fsldma_cleanup_descriptor': drivers/dma/fsldma.c:860:6: warning: unused variable 'len' [-Wunused-variable] drivers/dma/fsldma.c:859:13: warning: unused variable 'dst' [-Wunused-variable] drivers/dma/fsldma.c:858:13: warning: unused variable 'src' [-Wunused-variable] drivers/dma/fsldma.c:857:17: warning: unused variable 'dev' [-Wunused-variable] - due to unmap changes drivers/dma/fsldma.c: In function 'fsl_dma_tx_submit': drivers/dma/fsldma.c:428:2: warning: 'cookie' may be used uninitialized in this function [-Wuninitialized] - long standing warning Cc: Bartlomiej Zolnierkiewicz Cc: Li Yang Cc: Zhang Wei Signed-off-by: Dan Williams --- drivers/dma/fsldma.c | 31 +------------------------------ 1 file changed, 1 insertion(+), 30 deletions(-) diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c index 7086a16a55f2..f157c6f76b32 100644 --- a/drivers/dma/fsldma.c +++ b/drivers/dma/fsldma.c @@ -86,11 +86,6 @@ static void set_desc_cnt(struct fsldma_chan *chan, hw->count = CPU_TO_DMA(chan, count, 32); } -static u32 get_desc_cnt(struct fsldma_chan *chan, struct fsl_desc_sw *desc) -{ - return DMA_TO_CPU(chan, desc->hw.count, 32); -} - static void set_desc_src(struct fsldma_chan *chan, struct fsl_dma_ld_hw *hw, dma_addr_t src) { @@ -101,16 +96,6 @@ static void set_desc_src(struct fsldma_chan *chan, hw->src_addr = CPU_TO_DMA(chan, snoop_bits | src, 64); } -static dma_addr_t get_desc_src(struct fsldma_chan *chan, - struct fsl_desc_sw *desc) -{ - u64 snoop_bits; - - snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) - ? ((u64)FSL_DMA_SATR_SREADTYPE_SNOOP_READ << 32) : 0; - return DMA_TO_CPU(chan, desc->hw.src_addr, 64) & ~snoop_bits; -} - static void set_desc_dst(struct fsldma_chan *chan, struct fsl_dma_ld_hw *hw, dma_addr_t dst) { @@ -121,16 +106,6 @@ static void set_desc_dst(struct fsldma_chan *chan, hw->dst_addr = CPU_TO_DMA(chan, snoop_bits | dst, 64); } -static dma_addr_t get_desc_dst(struct fsldma_chan *chan, - struct fsl_desc_sw *desc) -{ - u64 snoop_bits; - - snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) - ? ((u64)FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE << 32) : 0; - return DMA_TO_CPU(chan, desc->hw.dst_addr, 64) & ~snoop_bits; -} - static void set_desc_next(struct fsldma_chan *chan, struct fsl_dma_ld_hw *hw, dma_addr_t next) { @@ -408,7 +383,7 @@ static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx) struct fsl_desc_sw *desc = tx_to_fsl_desc(tx); struct fsl_desc_sw *child; unsigned long flags; - dma_cookie_t cookie; + dma_cookie_t cookie = -EINVAL; spin_lock_irqsave(&chan->desc_lock, flags); @@ -854,10 +829,6 @@ static void fsldma_cleanup_descriptor(struct fsldma_chan *chan, struct fsl_desc_sw *desc) { struct dma_async_tx_descriptor *txd = &desc->async_tx; - struct device *dev = chan->common.device->dev; - dma_addr_t src = get_desc_src(chan, desc); - dma_addr_t dst = get_desc_dst(chan, desc); - u32 len = get_desc_cnt(chan, desc); /* Run the link descriptor callback function */ if (txd->callback) { From 745c00daf9a75bacb53d0fe8635a132673ab0b46 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Mon, 9 Dec 2013 11:16:01 -0800 Subject: [PATCH 123/312] dmatest: fix build warning on mips drivers/dma/dmatest.c:543:11: warning: passing argument 1 of 'virt_to_phys' makes pointer from integer without a cast [enabled by default] mips expects virt_to_phys() to take a pointer. Fix up the types accordingly. Signed-off-by: Dan Williams --- drivers/dma/dmatest.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index 20f9a3aaf926..9dfcaf5c1288 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c @@ -539,9 +539,9 @@ static int dmatest_func(void *data) um->len = params->buf_size; for (i = 0; i < src_cnt; i++) { - unsigned long buf = (unsigned long) thread->srcs[i]; + void *buf = thread->srcs[i]; struct page *pg = virt_to_page(buf); - unsigned pg_off = buf & ~PAGE_MASK; + unsigned pg_off = (unsigned long) buf & ~PAGE_MASK; um->addr[i] = dma_map_page(dev->dev, pg, pg_off, um->len, DMA_TO_DEVICE); @@ -559,9 +559,9 @@ static int dmatest_func(void *data) /* map with DMA_BIDIRECTIONAL to force writeback/invalidate */ dsts = &um->addr[src_cnt]; for (i = 0; i < dst_cnt; i++) { - unsigned long buf = (unsigned long) thread->dsts[i]; + void *buf = thread->dsts[i]; struct page *pg = virt_to_page(buf); - unsigned pg_off = buf & ~PAGE_MASK; + unsigned pg_off = (unsigned long) buf & ~PAGE_MASK; dsts[i] = dma_map_page(dev->dev, pg, pg_off, um->len, DMA_BIDIRECTIONAL); From 8e5ee258d98a6643227d958361aec2a62559b804 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Mon, 9 Dec 2013 11:16:03 -0800 Subject: [PATCH 124/312] dma: fix build warnings in txx9 The unmap rework missed this: drivers/dma/txx9dmac.c:409:25: warning: unused variable 'ds' [-Wunused-variable] Cc: Bartlomiej Zolnierkiewicz Signed-off-by: Dan Williams --- drivers/dma/txx9dmac.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/dma/txx9dmac.c b/drivers/dma/txx9dmac.c index bae6c29f5502..17686caf64d5 100644 --- a/drivers/dma/txx9dmac.c +++ b/drivers/dma/txx9dmac.c @@ -406,7 +406,6 @@ txx9dmac_descriptor_complete(struct txx9dmac_chan *dc, dma_async_tx_callback callback; void *param; struct dma_async_tx_descriptor *txd = &desc->txd; - struct txx9dmac_slave *ds = dc->chan.private; dev_vdbg(chan2dev(&dc->chan), "descriptor %u %p complete\n", txd->cookie, desc); From 3cc377b9ae4bd3133bf8ba388d2b2b66b2b973c1 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Mon, 9 Dec 2013 10:33:16 -0800 Subject: [PATCH 125/312] dmaengine: fix enable for high order unmap pools The higher order mempools support raid operations, and we want to disable them when raid support is not enabled. Making them conditional on ASYNC_TX_DMA is not sufficient as other users (specifically dmatest) will also issue raid operations. Make raid drivers explicitly request that the core carry the higher order pools. Reported-by: Ezequiel Garcia Tested-by: Ezequiel Garcia Signed-off-by: Dan Williams --- drivers/dma/Kconfig | 6 ++++++ drivers/dma/dmaengine.c | 2 +- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 446687cc2334..132a4fd375b2 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -62,6 +62,7 @@ config INTEL_IOATDMA tristate "Intel I/OAT DMA support" depends on PCI && X86 select DMA_ENGINE + select DMA_ENGINE_RAID select DCA help Enable support for the Intel(R) I/OAT DMA engine present @@ -112,6 +113,7 @@ config MV_XOR bool "Marvell XOR engine support" depends on PLAT_ORION select DMA_ENGINE + select DMA_ENGINE_RAID select ASYNC_TX_ENABLE_CHANNEL_SWITCH ---help--- Enable support for the Marvell XOR engine. @@ -187,6 +189,7 @@ config AMCC_PPC440SPE_ADMA tristate "AMCC PPC440SPe ADMA support" depends on 440SPe || 440SP select DMA_ENGINE + select DMA_ENGINE_RAID select ARCH_HAS_ASYNC_TX_FIND_CHANNEL select ASYNC_TX_ENABLE_CHANNEL_SWITCH help @@ -377,4 +380,7 @@ config DMATEST Simple DMA test client. Say N unless you're debugging a DMA Device driver. +config DMA_ENGINE_RAID + bool + endif diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index ea806bdc12ef..b601024f518c 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -912,7 +912,7 @@ struct dmaengine_unmap_pool { #define __UNMAP_POOL(x) { .size = x, .name = "dmaengine-unmap-" __stringify(x) } static struct dmaengine_unmap_pool unmap_pool[] = { __UNMAP_POOL(2), - #if IS_ENABLED(CONFIG_ASYNC_TX_DMA) + #if IS_ENABLED(CONFIG_DMA_ENGINE_RAID) __UNMAP_POOL(16), __UNMAP_POOL(128), __UNMAP_POOL(256), From d16695a75019ac4baad7a117dc86d1d292e09115 Mon Sep 17 00:00:00 2001 From: Ezequiel Garcia Date: Tue, 10 Dec 2013 09:32:36 -0300 Subject: [PATCH 126/312] dma: mv_xor: Use dmaengine_unmap_data for the self-tests The driver-specific unmap code was removed in: commit 54f8d501e842879143e867e70996574a54d1e130 Author: Bartlomiej Zolnierkiewicz Date: Fri Oct 18 19:35:32 2013 +0200 dmaengine: remove DMA unmap from drivers which had the side-effect of not unmapping the self-test mappings. Fix this by using dmaengine_unmap_data in the self-test routines. In addition, since dmaengine_unmap() assumes that all mappings were created with dma_map_page, this commit changes the single mapping to a page mapping to avoid an incorrect unmapping of the memcpy self-test. The allocation could be changed to be alloc_page(), but sticking to kmalloc results in a less intrusive patch. The size of the test buffer is increased, since dma_map_page() seem to fail when the source and destination pages are the same page. Signed-off-by: Ezequiel Garcia Signed-off-by: Dan Williams --- drivers/dma/mv_xor.c | 73 +++++++++++++++++++++++++++++++------------- 1 file changed, 51 insertions(+), 22 deletions(-) diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c index 23bcc9158cbc..79620887d637 100644 --- a/drivers/dma/mv_xor.c +++ b/drivers/dma/mv_xor.c @@ -781,7 +781,6 @@ static void mv_xor_issue_pending(struct dma_chan *chan) /* * Perform a transaction to verify the HW works. */ -#define MV_XOR_TEST_SIZE 2000 static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan) { @@ -791,20 +790,21 @@ static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan) struct dma_chan *dma_chan; dma_cookie_t cookie; struct dma_async_tx_descriptor *tx; + struct dmaengine_unmap_data *unmap; int err = 0; - src = kmalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL); + src = kmalloc(sizeof(u8) * PAGE_SIZE, GFP_KERNEL); if (!src) return -ENOMEM; - dest = kzalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL); + dest = kzalloc(sizeof(u8) * PAGE_SIZE, GFP_KERNEL); if (!dest) { kfree(src); return -ENOMEM; } /* Fill in src buffer */ - for (i = 0; i < MV_XOR_TEST_SIZE; i++) + for (i = 0; i < PAGE_SIZE; i++) ((u8 *) src)[i] = (u8)i; dma_chan = &mv_chan->dmachan; @@ -813,14 +813,26 @@ static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan) goto out; } - dest_dma = dma_map_single(dma_chan->device->dev, dest, - MV_XOR_TEST_SIZE, DMA_FROM_DEVICE); + unmap = dmaengine_get_unmap_data(dma_chan->device->dev, 2, GFP_KERNEL); + if (!unmap) { + err = -ENOMEM; + goto free_resources; + } - src_dma = dma_map_single(dma_chan->device->dev, src, - MV_XOR_TEST_SIZE, DMA_TO_DEVICE); + src_dma = dma_map_page(dma_chan->device->dev, virt_to_page(src), 0, + PAGE_SIZE, DMA_TO_DEVICE); + unmap->to_cnt = 1; + unmap->addr[0] = src_dma; + + dest_dma = dma_map_page(dma_chan->device->dev, virt_to_page(dest), 0, + PAGE_SIZE, DMA_FROM_DEVICE); + unmap->from_cnt = 1; + unmap->addr[1] = dest_dma; + + unmap->len = PAGE_SIZE; tx = mv_xor_prep_dma_memcpy(dma_chan, dest_dma, src_dma, - MV_XOR_TEST_SIZE, 0); + PAGE_SIZE, 0); cookie = mv_xor_tx_submit(tx); mv_xor_issue_pending(dma_chan); async_tx_ack(tx); @@ -835,8 +847,8 @@ static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan) } dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma, - MV_XOR_TEST_SIZE, DMA_FROM_DEVICE); - if (memcmp(src, dest, MV_XOR_TEST_SIZE)) { + PAGE_SIZE, DMA_FROM_DEVICE); + if (memcmp(src, dest, PAGE_SIZE)) { dev_err(dma_chan->device->dev, "Self-test copy failed compare, disabling\n"); err = -ENODEV; @@ -844,6 +856,7 @@ static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan) } free_resources: + dmaengine_unmap_put(unmap); mv_xor_free_chan_resources(dma_chan); out: kfree(src); @@ -861,13 +874,15 @@ mv_xor_xor_self_test(struct mv_xor_chan *mv_chan) dma_addr_t dma_srcs[MV_XOR_NUM_SRC_TEST]; dma_addr_t dest_dma; struct dma_async_tx_descriptor *tx; + struct dmaengine_unmap_data *unmap; struct dma_chan *dma_chan; dma_cookie_t cookie; u8 cmp_byte = 0; u32 cmp_word; int err = 0; + int src_count = MV_XOR_NUM_SRC_TEST; - for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) { + for (src_idx = 0; src_idx < src_count; src_idx++) { xor_srcs[src_idx] = alloc_page(GFP_KERNEL); if (!xor_srcs[src_idx]) { while (src_idx--) @@ -884,13 +899,13 @@ mv_xor_xor_self_test(struct mv_xor_chan *mv_chan) } /* Fill in src buffers */ - for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) { + for (src_idx = 0; src_idx < src_count; src_idx++) { u8 *ptr = page_address(xor_srcs[src_idx]); for (i = 0; i < PAGE_SIZE; i++) ptr[i] = (1 << src_idx); } - for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) + for (src_idx = 0; src_idx < src_count; src_idx++) cmp_byte ^= (u8) (1 << src_idx); cmp_word = (cmp_byte << 24) | (cmp_byte << 16) | @@ -904,16 +919,29 @@ mv_xor_xor_self_test(struct mv_xor_chan *mv_chan) goto out; } - /* test xor */ - dest_dma = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE, - DMA_FROM_DEVICE); + unmap = dmaengine_get_unmap_data(dma_chan->device->dev, src_count + 1, + GFP_KERNEL); + if (!unmap) { + err = -ENOMEM; + goto free_resources; + } - for (i = 0; i < MV_XOR_NUM_SRC_TEST; i++) - dma_srcs[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i], - 0, PAGE_SIZE, DMA_TO_DEVICE); + /* test xor */ + for (i = 0; i < src_count; i++) { + unmap->addr[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i], + 0, PAGE_SIZE, DMA_TO_DEVICE); + dma_srcs[i] = unmap->addr[i]; + unmap->to_cnt++; + } + + unmap->addr[src_count] = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE, + DMA_FROM_DEVICE); + dest_dma = unmap->addr[src_count]; + unmap->from_cnt = 1; + unmap->len = PAGE_SIZE; tx = mv_xor_prep_dma_xor(dma_chan, dest_dma, dma_srcs, - MV_XOR_NUM_SRC_TEST, PAGE_SIZE, 0); + src_count, PAGE_SIZE, 0); cookie = mv_xor_tx_submit(tx); mv_xor_issue_pending(dma_chan); @@ -942,9 +970,10 @@ mv_xor_xor_self_test(struct mv_xor_chan *mv_chan) } free_resources: + dmaengine_unmap_put(unmap); mv_xor_free_chan_resources(dma_chan); out: - src_idx = MV_XOR_NUM_SRC_TEST; + src_idx = src_count; while (src_idx--) __free_page(xor_srcs[src_idx]); __free_page(dest); From 0be8253fa2b4385e6246387db1d6067366e987ba Mon Sep 17 00:00:00 2001 From: Russell King Date: Thu, 12 Dec 2013 23:59:08 +0000 Subject: [PATCH 127/312] dmaengine: mv_xor: fix oops when channels fail to initialise When a channel fails to initialise, we error out and clean up any previously unregistered channels by walking the entire xordev->channels array. Unfortunately, there are paths which end up storing an error pointer in this array, which we then try and dereference in the cleanup code, which causes an oops. Fix this by avoiding writing invalid pointers to this array in the first place. Tested-by: Aaro Koskinen Signed-off-by: Russell King Signed-off-by: Dan Williams --- drivers/dma/mv_xor.c | 24 +++++++++++++----------- 1 file changed, 13 insertions(+), 11 deletions(-) diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c index 79620887d637..53fb0c8365b0 100644 --- a/drivers/dma/mv_xor.c +++ b/drivers/dma/mv_xor.c @@ -1199,6 +1199,7 @@ static int mv_xor_probe(struct platform_device *pdev) int i = 0; for_each_child_of_node(pdev->dev.of_node, np) { + struct mv_xor_chan *chan; dma_cap_mask_t cap_mask; int irq; @@ -1216,21 +1217,21 @@ static int mv_xor_probe(struct platform_device *pdev) goto err_channel_add; } - xordev->channels[i] = - mv_xor_channel_add(xordev, pdev, i, - cap_mask, irq); - if (IS_ERR(xordev->channels[i])) { - ret = PTR_ERR(xordev->channels[i]); - xordev->channels[i] = NULL; + chan = mv_xor_channel_add(xordev, pdev, i, + cap_mask, irq); + if (IS_ERR(chan)) { + ret = PTR_ERR(chan); irq_dispose_mapping(irq); goto err_channel_add; } + xordev->channels[i] = chan; i++; } } else if (pdata && pdata->channels) { for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) { struct mv_xor_channel_data *cd; + struct mv_xor_chan *chan; int irq; cd = &pdata->channels[i]; @@ -1245,13 +1246,14 @@ static int mv_xor_probe(struct platform_device *pdev) goto err_channel_add; } - xordev->channels[i] = - mv_xor_channel_add(xordev, pdev, i, - cd->cap_mask, irq); - if (IS_ERR(xordev->channels[i])) { - ret = PTR_ERR(xordev->channels[i]); + chan = mv_xor_channel_add(xordev, pdev, i, + cd->cap_mask, irq); + if (IS_ERR(chan)) { + ret = PTR_ERR(chan); goto err_channel_add; } + + xordev->channels[i] = chan; } } From c29cb5eb8157a0049c882672a7f941261f23ea34 Mon Sep 17 00:00:00 2001 From: Hui Wang Date: Fri, 13 Dec 2013 11:57:05 +0800 Subject: [PATCH 128/312] ALSA: hda - Add Dell headset detection quirk for three laptop models On the Dell machines with codec whose Subsystem Id is 0x10280610, 0x10280629 or 0x1028063e, no external microphone can be detected when plugging a 3-ring headset. If we add "model=dell-headset-multi" for the snd-hda-intel.ko, the problem will disappear. The codecs on these machines belong to alc_269 family. BugLink: https://bugs.launchpad.net/bugs/1260303 Cc: David Henningsson Cc: stable@vger.kernel.org Signed-off-by: Hui Wang Signed-off-by: Takashi Iwai --- sound/pci/hda/patch_realtek.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 34de5dc2fe9b..5ab8e1631190 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -4247,11 +4247,14 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1028, 0x0606, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x0608, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x0609, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1028, 0x0610, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x0613, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x0614, "Dell Inspiron 3135", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x0616, "Dell Vostro 5470", ALC290_FIXUP_MONO_SPEAKERS), SND_PCI_QUIRK(0x1028, 0x061f, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1028, 0x0629, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x0638, "Dell Inspiron 5439", ALC290_FIXUP_MONO_SPEAKERS), + SND_PCI_QUIRK(0x1028, 0x063e, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x063f, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x15cc, "Dell X5 Precision", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x15cd, "Dell X5 Precision", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE), From 8194ee27764b1a86fa7a6b0d411f0a225a6abd5f Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Fri, 13 Dec 2013 00:57:03 -0800 Subject: [PATCH 129/312] dmaengine: fix sleep in atomic BUG: sleeping function called from invalid context at mm/mempool.c:203 in_atomic(): 1, irqs_disabled(): 0, pid: 43502, name: linbug no locks held by linbug/43502. CPU: 7 PID: 43502 Comm: linbug Not tainted 3.13.0-rc1+ #15 Hardware name: 0000000000000010 ffff88005ebd1878 ffffffff8172d512 ffff8801752bc1c0 ffff8801752bc1c0 ffff88005ebd1898 ffffffff8109d1f6 ffff88005f9a3c58 ffff880177f0f080 ffff88005ebd1918 ffffffff81161f43 ffff88005ebd18f8 Call Trace: [] dump_stack+0x4e/0x68 [] __might_sleep+0xe6/0x120 [] mempool_alloc+0x93/0x170 [] ? mark_held_locks+0x74/0x140 [] ? follow_page_mask+0x556/0x600 [] dmaengine_get_unmap_data+0x2e/0x60 [] dma_async_memcpy_pg_to_pg+0x41/0x1c0 [] dma_async_memcpy_buf_to_pg+0x50/0x60 [] dma_memcpy_to_iovec+0xfc/0x190 [] dma_skb_copy_datagram_iovec+0x6f/0x2b0 Signed-off-by: Dan Williams --- drivers/dma/dmaengine.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index b601024f518c..ef63b9058f3c 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -1054,7 +1054,7 @@ dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg, dma_cookie_t cookie; unsigned long flags; - unmap = dmaengine_get_unmap_data(dev->dev, 2, GFP_NOIO); + unmap = dmaengine_get_unmap_data(dev->dev, 2, GFP_NOWAIT); if (!unmap) return -ENOMEM; From 380108d891acf8db5cf0d477176c7ed2b62b7928 Mon Sep 17 00:00:00 2001 From: Julien Grall Date: Tue, 3 Dec 2013 15:40:37 +0000 Subject: [PATCH 130/312] xen/block: Correctly define structures in public headers on ARM32 and ARM64 On ARM (32 bits and 64 bits), the double-word is 8-bytes aligned. This will result on different structure from Xen and Linux repositories. As Linux is using __packed__ attribute, it must have a 4-bytes padding before each "id" field. This change breaks guest block support with older kernel. IMHO, it's acceptable because Xen on ARM is still on Tech Preview and the hypercall ABI is not yet freezed. Only one architecture (x86_32) doesn't have 64-bit ABI for the block interface. Don't add padding if Linux is compiled for this architecture. Signed-off-by: Julien Grall Acked-by: Roger Pau Monne Acked-by: David Vrabel Cc: Boris Ostrovsky Cc: Ian Campbell Acked-by: Stefano Stabellini [I had asked for confirmation that it did not break x86 and Ian went beyound the call of duty to confirm it. Also a internal regression bucket with 32/64 dom0 with 32/64 domU (PV and HVM) confirmed no regressions. ABI changes are a drag..] Signed-off-by: Konrad Rzeszutek Wilk --- include/xen/interface/io/blkif.h | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/include/xen/interface/io/blkif.h b/include/xen/interface/io/blkif.h index 65e12099ef89..ae665ac59c36 100644 --- a/include/xen/interface/io/blkif.h +++ b/include/xen/interface/io/blkif.h @@ -146,7 +146,7 @@ struct blkif_request_segment_aligned { struct blkif_request_rw { uint8_t nr_segments; /* number of segments */ blkif_vdev_t handle; /* only for read/write requests */ -#ifdef CONFIG_X86_64 +#ifndef CONFIG_X86_32 uint32_t _pad1; /* offsetof(blkif_request,u.rw.id) == 8 */ #endif uint64_t id; /* private guest value, echoed in resp */ @@ -163,7 +163,7 @@ struct blkif_request_discard { uint8_t flag; /* BLKIF_DISCARD_SECURE or zero. */ #define BLKIF_DISCARD_SECURE (1<<0) /* ignored if discard-secure=0 */ blkif_vdev_t _pad1; /* only for read/write requests */ -#ifdef CONFIG_X86_64 +#ifndef CONFIG_X86_32 uint32_t _pad2; /* offsetof(blkif_req..,u.discard.id)==8*/ #endif uint64_t id; /* private guest value, echoed in resp */ @@ -175,7 +175,7 @@ struct blkif_request_discard { struct blkif_request_other { uint8_t _pad1; blkif_vdev_t _pad2; /* only for read/write requests */ -#ifdef CONFIG_X86_64 +#ifndef CONFIG_X86_32 uint32_t _pad3; /* offsetof(blkif_req..,u.other.id)==8*/ #endif uint64_t id; /* private guest value, echoed in resp */ @@ -184,7 +184,7 @@ struct blkif_request_other { struct blkif_request_indirect { uint8_t indirect_op; uint16_t nr_segments; -#ifdef CONFIG_X86_64 +#ifndef CONFIG_X86_32 uint32_t _pad1; /* offsetof(blkif_...,u.indirect.id) == 8 */ #endif uint64_t id; @@ -192,7 +192,7 @@ struct blkif_request_indirect { blkif_vdev_t handle; uint16_t _pad2; grant_ref_t indirect_grefs[BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST]; -#ifdef CONFIG_X86_64 +#ifndef CONFIG_X86_32 uint32_t _pad3; /* make it 64 byte aligned */ #else uint64_t _pad3; /* make it 64 byte aligned */ From d7ec435fdd03cfee70dba934ee384acc87bd6d00 Mon Sep 17 00:00:00 2001 From: David Howells Date: Fri, 13 Dec 2013 15:20:19 +0000 Subject: [PATCH 131/312] X.509: Fix certificate gathering Fix the gathering of certificates from both the source tree and the build tree to correctly calculate the pathnames of all the certificates. The problem was that if the default generated cert, signing_key.x509, didn't exist then it would not have a path attached and if it did, it would have a path attached. This means that the contents of kernel/.x509.list would change between the first compilation in a directory and the second. After the second it would remain stable because the signing_key.x509 file exists. The consequence was that the kernel would get relinked unconditionally on the second recompilation. The second recompilation would also show something like this: X.509 certificate list changed CERTS kernel/x509_certificate_list - Including cert /home/torvalds/v2.6/linux/signing_key.x509 AS kernel/system_certificates.o LD kernel/built-in.o which is why the relink would happen. Unfortunately, it isn't a simple matter of just sticking a path on the front of the filename of the certificate in the build directory as make can't then work out how to build it. So the path has to be prepended to the name for sorting and duplicate elimination and then removed for the make rule if it is in the build tree. Reported-by: Linus Torvalds Signed-off-by: David Howells --- kernel/Makefile | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/kernel/Makefile b/kernel/Makefile index bbaf7d59c1bb..c23bb0b30293 100644 --- a/kernel/Makefile +++ b/kernel/Makefile @@ -137,9 +137,10 @@ $(obj)/timeconst.h: $(obj)/hz.bc $(src)/timeconst.bc FORCE ############################################################################### ifeq ($(CONFIG_SYSTEM_TRUSTED_KEYRING),y) X509_CERTIFICATES-y := $(wildcard *.x509) $(wildcard $(srctree)/*.x509) -X509_CERTIFICATES-$(CONFIG_MODULE_SIG) += signing_key.x509 -X509_CERTIFICATES := $(sort $(foreach CERT,$(X509_CERTIFICATES-y), \ +X509_CERTIFICATES-$(CONFIG_MODULE_SIG) += $(objtree)/signing_key.x509 +X509_CERTIFICATES-raw := $(sort $(foreach CERT,$(X509_CERTIFICATES-y), \ $(or $(realpath $(CERT)),$(CERT)))) +X509_CERTIFICATES := $(subst $(realpath $(objtree))/,,$(X509_CERTIFICATES-raw)) ifeq ($(X509_CERTIFICATES),) $(warning *** No X.509 certificates found ***) From f46a3cbbebdaa5ca7b3ab23d7b81925dbe152bcb Mon Sep 17 00:00:00 2001 From: Kirill Tkhai Date: Tue, 10 Dec 2013 22:39:57 +0400 Subject: [PATCH 132/312] KEYS: Remove files generated when SYSTEM_TRUSTED_KEYRING=y Always remove generated SYSTEM_TRUSTED_KEYRING files while doing make mrproper. Signed-off-by: Kirill Tkhai Signed-off-by: David Howells --- kernel/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/Makefile b/kernel/Makefile index c23bb0b30293..bc010ee272b6 100644 --- a/kernel/Makefile +++ b/kernel/Makefile @@ -165,9 +165,9 @@ $(obj)/x509_certificate_list: $(X509_CERTIFICATES) $(obj)/.x509.list targets += $(obj)/.x509.list $(obj)/.x509.list: @echo $(X509_CERTIFICATES) >$@ +endif clean-files := x509_certificate_list .x509.list -endif ifeq ($(CONFIG_MODULE_SIG),y) ############################################################################### From 6bd364d82920be726c2d678e7ba9e27112686e11 Mon Sep 17 00:00:00 2001 From: Xiao Guangrong Date: Fri, 13 Dec 2013 15:00:32 +0800 Subject: [PATCH 133/312] KEYS: fix uninitialized persistent_keyring_register_sem We run into this bug: [ 2736.063245] Unable to handle kernel paging request for data at address 0x00000000 [ 2736.063293] Faulting instruction address: 0xc00000000037efb0 [ 2736.063300] Oops: Kernel access of bad area, sig: 11 [#1] [ 2736.063303] SMP NR_CPUS=2048 NUMA pSeries [ 2736.063310] Modules linked in: sg nfsv3 rpcsec_gss_krb5 nfsv4 dns_resolver nfs fscache nf_conntrack_netbios_ns nf_conntrack_broadcast ipt_MASQUERADE ip6table_mangle ip6table_security ip6table_raw ip6t_REJECT iptable_nat nf_nat_ipv4 iptable_mangle iptable_security iptable_raw ipt_REJECT nf_conntrack_ipv4 nf_defrag_ipv4 xt_conntrack ebtable_filter ebtables ip6table_filter iptable_filter ip_tables ip6table_nat nf_conntrack_ipv6 nf_defrag_ipv6 nf_nat_ipv6 nf_nat nf_conntrack ip6_tables ibmveth pseries_rng nx_crypto nfsd auth_rpcgss nfs_acl lockd sunrpc binfmt_misc xfs libcrc32c dm_service_time sd_mod crc_t10dif crct10dif_common ibmvfc scsi_transport_fc scsi_tgt dm_mirror dm_region_hash dm_log dm_multipath dm_mod [ 2736.063383] CPU: 1 PID: 7128 Comm: ssh Not tainted 3.10.0-48.el7.ppc64 #1 [ 2736.063389] task: c000000131930120 ti: c0000001319a0000 task.ti: c0000001319a0000 [ 2736.063394] NIP: c00000000037efb0 LR: c0000000006c40f8 CTR: 0000000000000000 [ 2736.063399] REGS: c0000001319a3870 TRAP: 0300 Not tainted (3.10.0-48.el7.ppc64) [ 2736.063403] MSR: 8000000000009032 CR: 28824242 XER: 20000000 [ 2736.063415] SOFTE: 0 [ 2736.063418] CFAR: c00000000000908c [ 2736.063421] DAR: 0000000000000000, DSISR: 40000000 [ 2736.063425] GPR00: c0000000006c40f8 c0000001319a3af0 c000000001074788 c0000001319a3bf0 GPR04: 0000000000000000 0000000000000000 0000000000000020 000000000000000a GPR08: fffffffe00000002 00000000ffff0000 0000000080000001 c000000000924888 GPR12: 0000000028824248 c000000007e00400 00001fffffa0f998 0000000000000000 GPR16: 0000000000000022 00001fffffa0f998 0000010022e92470 0000000000000000 GPR20: 0000000000000000 0000000000000000 0000000000000000 0000000000000000 GPR24: 0000000000000000 c000000000f4a828 00003ffffe527108 0000000000000000 GPR28: c000000000f4a730 c000000000f4a828 0000000000000000 c0000001319a3bf0 [ 2736.063498] NIP [c00000000037efb0] .__list_add+0x30/0x110 [ 2736.063504] LR [c0000000006c40f8] .rwsem_down_write_failed+0x78/0x264 [ 2736.063508] PACATMSCRATCH [800000000280f032] [ 2736.063511] Call Trace: [ 2736.063516] [c0000001319a3af0] [c0000001319a3b80] 0xc0000001319a3b80 (unreliable) [ 2736.063523] [c0000001319a3b80] [c0000000006c40f8] .rwsem_down_write_failed+0x78/0x264 [ 2736.063530] [c0000001319a3c50] [c0000000006c1bb0] .down_write+0x70/0x78 [ 2736.063536] [c0000001319a3cd0] [c0000000002e5ffc] .keyctl_get_persistent+0x20c/0x320 [ 2736.063542] [c0000001319a3dc0] [c0000000002e2388] .SyS_keyctl+0x238/0x260 [ 2736.063548] [c0000001319a3e30] [c000000000009e7c] syscall_exit+0x0/0x7c [ 2736.063553] Instruction dump: [ 2736.063556] 7c0802a6 fba1ffe8 fbc1fff0 fbe1fff8 7cbd2b78 7c9e2378 7c7f1b78 f8010010 [ 2736.063566] f821ff71 e8a50008 7fa52040 40de00c0 7fbd2840 40de0094 7fbff040 [ 2736.063579] ---[ end trace 2708241785538296 ]--- It's caused by uninitialized persistent_keyring_register_sem. The bug was introduced by commit f36f8c75, two typos are in that commit: CONFIG_KEYS_KERBEROS_CACHE should be CONFIG_PERSISTENT_KEYRINGS and krb_cache_register_sem should be persistent_keyring_register_sem. Signed-off-by: Xiao Guangrong Signed-off-by: David Howells --- kernel/user.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/kernel/user.c b/kernel/user.c index a3a0dbfda329..c006131beb77 100644 --- a/kernel/user.c +++ b/kernel/user.c @@ -51,9 +51,9 @@ struct user_namespace init_user_ns = { .owner = GLOBAL_ROOT_UID, .group = GLOBAL_ROOT_GID, .proc_inum = PROC_USER_INIT_INO, -#ifdef CONFIG_KEYS_KERBEROS_CACHE - .krb_cache_register_sem = - __RWSEM_INITIALIZER(init_user_ns.krb_cache_register_sem), +#ifdef CONFIG_PERSISTENT_KEYRINGS + .persistent_keyring_register_sem = + __RWSEM_INITIALIZER(init_user_ns.persistent_keyring_register_sem), #endif }; EXPORT_SYMBOL_GPL(init_user_ns); From 3cafea3076423987726023235e548af1d534ff1a Mon Sep 17 00:00:00 2001 From: James Solner Date: Wed, 6 Nov 2013 12:53:36 -0600 Subject: [PATCH 134/312] Add Documentation/module-signing.txt file This patch adds the Documentation/module-signing.txt file that is currently missing from the Documentation directory. The init/Kconfig file references the Documentation/module-signing.txt file to explain how kernel module signing works. This patch supplies this documentation. Signed-off-by: James Solner Signed-off-by: David Howells --- Documentation/module-signing.txt | 240 +++++++++++++++++++++++++++++++ 1 file changed, 240 insertions(+) create mode 100644 Documentation/module-signing.txt diff --git a/Documentation/module-signing.txt b/Documentation/module-signing.txt new file mode 100644 index 000000000000..2b40e04d3c49 --- /dev/null +++ b/Documentation/module-signing.txt @@ -0,0 +1,240 @@ + ============================== + KERNEL MODULE SIGNING FACILITY + ============================== + +CONTENTS + + - Overview. + - Configuring module signing. + - Generating signing keys. + - Public keys in the kernel. + - Manually signing modules. + - Signed modules and stripping. + - Loading signed modules. + - Non-valid signatures and unsigned modules. + - Administering/protecting the private key. + + +======== +OVERVIEW +======== + +The kernel module signing facility cryptographically signs modules during +installation and then checks the signature upon loading the module. This +allows increased kernel security by disallowing the loading of unsigned modules +or modules signed with an invalid key. Module signing increases security by +making it harder to load a malicious module into the kernel. The module +signature checking is done by the kernel so that it is not necessary to have +trusted userspace bits. + +This facility uses X.509 ITU-T standard certificates to encode the public keys +involved. The signatures are not themselves encoded in any industrial standard +type. The facility currently only supports the RSA public key encryption +standard (though it is pluggable and permits others to be used). The possible +hash algorithms that can be used are SHA-1, SHA-224, SHA-256, SHA-384, and +SHA-512 (the algorithm is selected by data in the signature). + + +========================== +CONFIGURING MODULE SIGNING +========================== + +The module signing facility is enabled by going to the "Enable Loadable Module +Support" section of the kernel configuration and turning on + + CONFIG_MODULE_SIG "Module signature verification" + +This has a number of options available: + + (1) "Require modules to be validly signed" (CONFIG_MODULE_SIG_FORCE) + + This specifies how the kernel should deal with a module that has a + signature for which the key is not known or a module that is unsigned. + + If this is off (ie. "permissive"), then modules for which the key is not + available and modules that are unsigned are permitted, but the kernel will + be marked as being tainted. + + If this is on (ie. "restrictive"), only modules that have a valid + signature that can be verified by a public key in the kernel's possession + will be loaded. All other modules will generate an error. + + Irrespective of the setting here, if the module has a signature block that + cannot be parsed, it will be rejected out of hand. + + + (2) "Automatically sign all modules" (CONFIG_MODULE_SIG_ALL) + + If this is on then modules will be automatically signed during the + modules_install phase of a build. If this is off, then the modules must + be signed manually using: + + scripts/sign-file + + + (3) "Which hash algorithm should modules be signed with?" + + This presents a choice of which hash algorithm the installation phase will + sign the modules with: + + CONFIG_SIG_SHA1 "Sign modules with SHA-1" + CONFIG_SIG_SHA224 "Sign modules with SHA-224" + CONFIG_SIG_SHA256 "Sign modules with SHA-256" + CONFIG_SIG_SHA384 "Sign modules with SHA-384" + CONFIG_SIG_SHA512 "Sign modules with SHA-512" + + The algorithm selected here will also be built into the kernel (rather + than being a module) so that modules signed with that algorithm can have + their signatures checked without causing a dependency loop. + + +======================= +GENERATING SIGNING KEYS +======================= + +Cryptographic keypairs are required to generate and check signatures. A +private key is used to generate a signature and the corresponding public key is +used to check it. The private key is only needed during the build, after which +it can be deleted or stored securely. The public key gets built into the +kernel so that it can be used to check the signatures as the modules are +loaded. + +Under normal conditions, the kernel build will automatically generate a new +keypair using openssl if one does not exist in the files: + + signing_key.priv + signing_key.x509 + +during the building of vmlinux (the public part of the key needs to be built +into vmlinux) using parameters in the: + + x509.genkey + +file (which is also generated if it does not already exist). + +It is strongly recommended that you provide your own x509.genkey file. + +Most notably, in the x509.genkey file, the req_distinguished_name section +should be altered from the default: + + [ req_distinguished_name ] + O = Magrathea + CN = Glacier signing key + emailAddress = slartibartfast@magrathea.h2g2 + +The generated RSA key size can also be set with: + + [ req ] + default_bits = 4096 + + +It is also possible to manually generate the key private/public files using the +x509.genkey key generation configuration file in the root node of the Linux +kernel sources tree and the openssl command. The following is an example to +generate the public/private key files: + + openssl req -new -nodes -utf8 -sha256 -days 36500 -batch -x509 \ + -config x509.genkey -outform DER -out signing_key.x509 \ + -keyout signing_key.priv + + +========================= +PUBLIC KEYS IN THE KERNEL +========================= + +The kernel contains a ring of public keys that can be viewed by root. They're +in a keyring called ".system_keyring" that can be seen by: + + [root@deneb ~]# cat /proc/keys + ... + 223c7853 I------ 1 perm 1f030000 0 0 keyring .system_keyring: 1 + 302d2d52 I------ 1 perm 1f010000 0 0 asymmetri Fedora kernel signing key: d69a84e6bce3d216b979e9505b3e3ef9a7118079: X509.RSA a7118079 [] + ... + +Beyond the public key generated specifically for module signing, any file +placed in the kernel source root directory or the kernel build root directory +whose name is suffixed with ".x509" will be assumed to be an X.509 public key +and will be added to the keyring. + +Further, the architecture code may take public keys from a hardware store and +add those in also (e.g. from the UEFI key database). + +Finally, it is possible to add additional public keys by doing: + + keyctl padd asymmetric "" [.system_keyring-ID] <[key-file] + +e.g.: + + keyctl padd asymmetric "" 0x223c7853 Date: Wed, 11 Dec 2013 16:58:42 +0000 Subject: [PATCH 135/312] xen/balloon: Seperate the auto-translate logic properly (v2) The auto-xlat logic vs the non-xlat means that we don't need to for auto-xlat guests (like PVH, HVM or ARM): - use P2M - use scratch page. However the code in increase_reservation does modify the p2m for auto_translate guests, but not in decrease_reservation. Fix that by avoiding any p2m modifications in both increase_reservation and decrease_reservation for auto_translated guests. And also avoid allocating or using scratch pages for auto_translated guests. Lastly, since !auto-xlat is really another way of saying 'xen_pv' remove the redundant 'xen_pv_domain' check. Signed-off-by: Stefano Stabellini [v2: Updated the description] Signed-off-by: Konrad Rzeszutek Wilk --- drivers/xen/balloon.c | 63 +++++++++++++++++++++++-------------------- 1 file changed, 34 insertions(+), 29 deletions(-) diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c index 55ea73f7c70b..4c02e2b94103 100644 --- a/drivers/xen/balloon.c +++ b/drivers/xen/balloon.c @@ -350,17 +350,19 @@ static enum bp_state increase_reservation(unsigned long nr_pages) pfn = page_to_pfn(page); - set_phys_to_machine(pfn, frame_list[i]); - #ifdef CONFIG_XEN_HAVE_PVMMU - /* Link back into the page tables if not highmem. */ - if (xen_pv_domain() && !PageHighMem(page)) { - int ret; - ret = HYPERVISOR_update_va_mapping( - (unsigned long)__va(pfn << PAGE_SHIFT), - mfn_pte(frame_list[i], PAGE_KERNEL), - 0); - BUG_ON(ret); + if (!xen_feature(XENFEAT_auto_translated_physmap)) { + set_phys_to_machine(pfn, frame_list[i]); + + /* Link back into the page tables if not highmem. */ + if (!PageHighMem(page)) { + int ret; + ret = HYPERVISOR_update_va_mapping( + (unsigned long)__va(pfn << PAGE_SHIFT), + mfn_pte(frame_list[i], PAGE_KERNEL), + 0); + BUG_ON(ret); + } } #endif @@ -378,7 +380,6 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp) enum bp_state state = BP_DONE; unsigned long pfn, i; struct page *page; - struct page *scratch_page; int ret; struct xen_memory_reservation reservation = { .address_bits = 0, @@ -411,27 +412,29 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp) scrub_page(page); +#ifdef CONFIG_XEN_HAVE_PVMMU /* * Ballooned out frames are effectively replaced with * a scratch frame. Ensure direct mappings and the * p2m are consistent. */ - scratch_page = get_balloon_scratch_page(); -#ifdef CONFIG_XEN_HAVE_PVMMU - if (xen_pv_domain() && !PageHighMem(page)) { - ret = HYPERVISOR_update_va_mapping( - (unsigned long)__va(pfn << PAGE_SHIFT), - pfn_pte(page_to_pfn(scratch_page), - PAGE_KERNEL_RO), 0); - BUG_ON(ret); - } -#endif if (!xen_feature(XENFEAT_auto_translated_physmap)) { unsigned long p; + struct page *scratch_page = get_balloon_scratch_page(); + + if (!PageHighMem(page)) { + ret = HYPERVISOR_update_va_mapping( + (unsigned long)__va(pfn << PAGE_SHIFT), + pfn_pte(page_to_pfn(scratch_page), + PAGE_KERNEL_RO), 0); + BUG_ON(ret); + } p = page_to_pfn(scratch_page); __set_phys_to_machine(pfn, pfn_to_mfn(p)); + + put_balloon_scratch_page(); } - put_balloon_scratch_page(); +#endif balloon_append(pfn_to_page(pfn)); } @@ -627,15 +630,17 @@ static int __init balloon_init(void) if (!xen_domain()) return -ENODEV; - for_each_online_cpu(cpu) - { - per_cpu(balloon_scratch_page, cpu) = alloc_page(GFP_KERNEL); - if (per_cpu(balloon_scratch_page, cpu) == NULL) { - pr_warn("Failed to allocate balloon_scratch_page for cpu %d\n", cpu); - return -ENOMEM; + if (!xen_feature(XENFEAT_auto_translated_physmap)) { + for_each_online_cpu(cpu) + { + per_cpu(balloon_scratch_page, cpu) = alloc_page(GFP_KERNEL); + if (per_cpu(balloon_scratch_page, cpu) == NULL) { + pr_warn("Failed to allocate balloon_scratch_page for cpu %d\n", cpu); + return -ENOMEM; + } } + register_cpu_notifier(&balloon_cpu_notifier); } - register_cpu_notifier(&balloon_cpu_notifier); pr_info("Initialising balloon driver\n"); From 86b58d13134ef14f09f8c8f37797ccc37cf823a3 Mon Sep 17 00:00:00 2001 From: "Yan, Zheng" Date: Thu, 5 Dec 2013 12:38:59 +0800 Subject: [PATCH 136/312] ceph: initialize inode before instantiating dentry commit b18825a7c8 (Put a small type field into struct dentry::d_flags) put a type field into struct dentry::d_flags. __d_instantiate() set the field by checking inode->i_mode. So we should initialize inode before instantiating dentry when handling mds reply. Fixes: http://tracker.ceph.com/issues/6930 Signed-off-by: Yan, Zheng Reviewed-by: Sage Weil --- fs/ceph/inode.c | 136 +++++++++++++++++++++--------------------------- 1 file changed, 58 insertions(+), 78 deletions(-) diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c index 9a8e396aed89..278fd2891288 100644 --- a/fs/ceph/inode.c +++ b/fs/ceph/inode.c @@ -978,7 +978,6 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req, struct ceph_mds_reply_inode *ininfo; struct ceph_vino vino; struct ceph_fs_client *fsc = ceph_sb_to_client(sb); - int i = 0; int err = 0; dout("fill_trace %p is_dentry %d is_target %d\n", req, @@ -1039,6 +1038,29 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req, } } + if (rinfo->head->is_target) { + vino.ino = le64_to_cpu(rinfo->targeti.in->ino); + vino.snap = le64_to_cpu(rinfo->targeti.in->snapid); + + in = ceph_get_inode(sb, vino); + if (IS_ERR(in)) { + err = PTR_ERR(in); + goto done; + } + req->r_target_inode = in; + + err = fill_inode(in, &rinfo->targeti, NULL, + session, req->r_request_started, + (le32_to_cpu(rinfo->head->result) == 0) ? + req->r_fmode : -1, + &req->r_caps_reservation); + if (err < 0) { + pr_err("fill_inode badness %p %llx.%llx\n", + in, ceph_vinop(in)); + goto done; + } + } + /* * ignore null lease/binding on snapdir ENOENT, or else we * will have trouble splicing in the virtual snapdir later @@ -1108,7 +1130,6 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req, ceph_dentry(req->r_old_dentry)->offset); dn = req->r_old_dentry; /* use old_dentry */ - in = dn->d_inode; } /* null dentry? */ @@ -1130,44 +1151,28 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req, } /* attach proper inode */ - ininfo = rinfo->targeti.in; - vino.ino = le64_to_cpu(ininfo->ino); - vino.snap = le64_to_cpu(ininfo->snapid); - in = dn->d_inode; - if (!in) { - in = ceph_get_inode(sb, vino); - if (IS_ERR(in)) { - pr_err("fill_trace bad get_inode " - "%llx.%llx\n", vino.ino, vino.snap); - err = PTR_ERR(in); - d_drop(dn); - goto done; - } + if (!dn->d_inode) { + ihold(in); dn = splice_dentry(dn, in, &have_lease, true); if (IS_ERR(dn)) { err = PTR_ERR(dn); goto done; } req->r_dentry = dn; /* may have spliced */ - ihold(in); - } else if (ceph_ino(in) == vino.ino && - ceph_snap(in) == vino.snap) { - ihold(in); - } else { + } else if (dn->d_inode && dn->d_inode != in) { dout(" %p links to %p %llx.%llx, not %llx.%llx\n", - dn, in, ceph_ino(in), ceph_snap(in), - vino.ino, vino.snap); + dn, dn->d_inode, ceph_vinop(dn->d_inode), + ceph_vinop(in)); have_lease = false; - in = NULL; } if (have_lease) update_dentry_lease(dn, rinfo->dlease, session, req->r_request_started); dout(" final dn %p\n", dn); - i++; - } else if ((req->r_op == CEPH_MDS_OP_LOOKUPSNAP || - req->r_op == CEPH_MDS_OP_MKSNAP) && !req->r_aborted) { + } else if (!req->r_aborted && + (req->r_op == CEPH_MDS_OP_LOOKUPSNAP || + req->r_op == CEPH_MDS_OP_MKSNAP)) { struct dentry *dn = req->r_dentry; /* fill out a snapdir LOOKUPSNAP dentry */ @@ -1177,52 +1182,15 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req, ininfo = rinfo->targeti.in; vino.ino = le64_to_cpu(ininfo->ino); vino.snap = le64_to_cpu(ininfo->snapid); - in = ceph_get_inode(sb, vino); - if (IS_ERR(in)) { - pr_err("fill_inode get_inode badness %llx.%llx\n", - vino.ino, vino.snap); - err = PTR_ERR(in); - d_delete(dn); - goto done; - } dout(" linking snapped dir %p to dn %p\n", in, dn); + ihold(in); dn = splice_dentry(dn, in, NULL, true); if (IS_ERR(dn)) { err = PTR_ERR(dn); goto done; } req->r_dentry = dn; /* may have spliced */ - ihold(in); - rinfo->head->is_dentry = 1; /* fool notrace handlers */ } - - if (rinfo->head->is_target) { - vino.ino = le64_to_cpu(rinfo->targeti.in->ino); - vino.snap = le64_to_cpu(rinfo->targeti.in->snapid); - - if (in == NULL || ceph_ino(in) != vino.ino || - ceph_snap(in) != vino.snap) { - in = ceph_get_inode(sb, vino); - if (IS_ERR(in)) { - err = PTR_ERR(in); - goto done; - } - } - req->r_target_inode = in; - - err = fill_inode(in, - &rinfo->targeti, NULL, - session, req->r_request_started, - (le32_to_cpu(rinfo->head->result) == 0) ? - req->r_fmode : -1, - &req->r_caps_reservation); - if (err < 0) { - pr_err("fill_inode badness %p %llx.%llx\n", - in, ceph_vinop(in)); - goto done; - } - } - done: dout("fill_trace done err=%d\n", err); return err; @@ -1272,7 +1240,7 @@ int ceph_readdir_prepopulate(struct ceph_mds_request *req, struct qstr dname; struct dentry *dn; struct inode *in; - int err = 0, i; + int err = 0, ret, i; struct inode *snapdir = NULL; struct ceph_mds_request_head *rhead = req->r_request->front.iov_base; struct ceph_dentry_info *di; @@ -1305,6 +1273,7 @@ int ceph_readdir_prepopulate(struct ceph_mds_request *req, ceph_fill_dirfrag(parent->d_inode, rinfo->dir_dir); } + /* FIXME: release caps/leases if error occurs */ for (i = 0; i < rinfo->dir_nr; i++) { struct ceph_vino vino; @@ -1329,9 +1298,10 @@ int ceph_readdir_prepopulate(struct ceph_mds_request *req, err = -ENOMEM; goto out; } - err = ceph_init_dentry(dn); - if (err < 0) { + ret = ceph_init_dentry(dn); + if (ret < 0) { dput(dn); + err = ret; goto out; } } else if (dn->d_inode && @@ -1351,9 +1321,6 @@ int ceph_readdir_prepopulate(struct ceph_mds_request *req, spin_unlock(&parent->d_lock); } - di = dn->d_fsdata; - di->offset = ceph_make_fpos(frag, i + r_readdir_offset); - /* inode */ if (dn->d_inode) { in = dn->d_inode; @@ -1366,26 +1333,39 @@ int ceph_readdir_prepopulate(struct ceph_mds_request *req, err = PTR_ERR(in); goto out; } - dn = splice_dentry(dn, in, NULL, false); - if (IS_ERR(dn)) - dn = NULL; } if (fill_inode(in, &rinfo->dir_in[i], NULL, session, req->r_request_started, -1, &req->r_caps_reservation) < 0) { pr_err("fill_inode badness on %p\n", in); + if (!dn->d_inode) + iput(in); + d_drop(dn); goto next_item; } - if (dn) - update_dentry_lease(dn, rinfo->dir_dlease[i], - req->r_session, - req->r_request_started); + + if (!dn->d_inode) { + dn = splice_dentry(dn, in, NULL, false); + if (IS_ERR(dn)) { + err = PTR_ERR(dn); + dn = NULL; + goto next_item; + } + } + + di = dn->d_fsdata; + di->offset = ceph_make_fpos(frag, i + r_readdir_offset); + + update_dentry_lease(dn, rinfo->dir_dlease[i], + req->r_session, + req->r_request_started); next_item: if (dn) dput(dn); } - req->r_did_prepopulate = true; + if (err == 0) + req->r_did_prepopulate = true; out: if (snapdir) { From 56f91aad69444d650237295f68c195b74d888d95 Mon Sep 17 00:00:00 2001 From: Li Wang Date: Wed, 13 Nov 2013 15:22:14 +0800 Subject: [PATCH 137/312] ceph: Avoid data inconsistency due to d-cache aliasing in readpage() If the length of data to be read in readpage() is exactly PAGE_CACHE_SIZE, the original code does not flush d-cache for data consistency after finishing reading. This patches fixes this. Signed-off-by: Li Wang Signed-off-by: Sage Weil --- fs/ceph/addr.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c index 1e561c059539..ec3ba43b9faa 100644 --- a/fs/ceph/addr.c +++ b/fs/ceph/addr.c @@ -210,9 +210,13 @@ static int readpage_nounlock(struct file *filp, struct page *page) if (err < 0) { SetPageError(page); goto out; - } else if (err < PAGE_CACHE_SIZE) { + } else { + if (err < PAGE_CACHE_SIZE) { /* zero fill remainder of page */ - zero_user_segment(page, err, PAGE_CACHE_SIZE); + zero_user_segment(page, err, PAGE_CACHE_SIZE); + } else { + flush_dcache_page(page); + } } SetPageUptodate(page); From 96b4026878d9dac71bd4c3d6e05c7fbb16a3e0aa Mon Sep 17 00:00:00 2001 From: Paulo Zanoni Date: Fri, 6 Dec 2013 20:29:01 -0200 Subject: [PATCH 138/312] drm/i915: change CRTC assertion on LCPLL disable Currently, PC8 is enabled at modeset_global_resources, which is called after intel_modeset_update_state. Due to this, there's a small race condition on the case where we start enabling PC8, then do a modeset while PC8 is still being enabled. The racing condition triggers a WARN because intel_modeset_update_state will mark the CRTC as enabled, then the thread that's still enabling PC8 might look at the data structure and think that PC8 is being enabled while a pipe is enabled. Despite the WARN, this is not really a bug since we'll wait for the PC8-enabling thread to finish when we call modeset_global_resources. The spec says the CRTC cannot be enabled when we disable LCPLL, so we had a check for crtc->base.enabled. If we change to crtc->active we will still prevent disabling LCPLL while the CRTC is enabled, and we will also prevent the WARN above. This is a replacement for the previous patch named "drm/i915: get/put PC8 when we get/put a CRTC" Testcase: igt/pm_pc8/modeset-lpsp-stress-no-wait Signed-off-by: Paulo Zanoni (cherry picked from commit 798183c54799fbe1e5a5bfabb3a8c0505ffd2149 from -next due to Dave's report.) Reported-by: Dave Jones Cc: stable@vger.kernel.org Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_display.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 8b8bde7dce53..4049a65a9f8c 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -6303,7 +6303,7 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv) uint32_t val; list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) - WARN(crtc->base.enabled, "CRTC for pipe %c enabled\n", + WARN(crtc->active, "CRTC for pipe %c enabled\n", pipe_name(crtc->pipe)); WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on\n"); From be3d26b0588ca5f4db6b50d13e92afb0ac57d6ab Mon Sep 17 00:00:00 2001 From: Paulo Zanoni Date: Fri, 13 Dec 2013 17:46:38 -0200 Subject: [PATCH 139/312] drm/i915: get a PC8 reference when enabling the power well In the current code, at haswell_modeset_global_resources, first we decide if we want to enable/disable the power well, then we decide if we want to enable/disable PC8. On the case where we're enabling PC8 this works fine, but on the case where we disable PC8 due to a non-eDP monitor being enabled, we first enable the power well and then disable PC8. Although wrong, this doesn't seem to be causing any problems now, and we don't even see anything in dmesg. But the patches for runtime D3 turn this problem into a real bug, so we need to fix it. This fixes the "modeset-non-lpsp" subtest from the "pm_pc8" test from intel-gpu-tools. v2: - Rebase (i915_disable_power_well). v3: - More reabase. v4: - Rebase on top of -fixes instead of -nightly. This is commit d62292c8f778772d1b6ec125d461c8c16fdc0417 in -next, but we need it in -fixes to address Dave's report. Signed-off-by: Paulo Zanoni Reviewed-by: Rodrigo Vivi Reported-by: Dave Jones Cc: stable@vger.kernel.org Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_pm.c | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 3657ab43c8fd..26c29c173221 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -5688,6 +5688,8 @@ static void __intel_set_power_well(struct drm_device *dev, bool enable) unsigned long irqflags; uint32_t tmp; + WARN_ON(dev_priv->pc8.enabled); + tmp = I915_READ(HSW_PWR_WELL_DRIVER); is_enabled = tmp & HSW_PWR_WELL_STATE_ENABLED; enable_requested = tmp & HSW_PWR_WELL_ENABLE_REQUEST; @@ -5747,16 +5749,24 @@ static void __intel_set_power_well(struct drm_device *dev, bool enable) static void __intel_power_well_get(struct drm_device *dev, struct i915_power_well *power_well) { - if (!power_well->count++) + struct drm_i915_private *dev_priv = dev->dev_private; + + if (!power_well->count++) { + hsw_disable_package_c8(dev_priv); __intel_set_power_well(dev, true); + } } static void __intel_power_well_put(struct drm_device *dev, struct i915_power_well *power_well) { + struct drm_i915_private *dev_priv = dev->dev_private; + WARN_ON(!power_well->count); - if (!--power_well->count && i915_disable_power_well) + if (!--power_well->count && i915_disable_power_well) { __intel_set_power_well(dev, false); + hsw_enable_package_c8(dev_priv); + } } void intel_display_power_get(struct drm_device *dev, From cb12057256ec58b6f798855b27e120579d6c9aee Mon Sep 17 00:00:00 2001 From: Tomasz Figa Date: Fri, 13 Dec 2013 20:59:39 +0100 Subject: [PATCH 140/312] ARM: s3c64xx: dt: Fix boot failure due to double clock initialization Commit 4178bac ARM: call of_clk_init from default time_init handler added implicit call to of_clk_init() from default time_init callback, but it did not change platforms calling it from other callbacks, despite of not having custom time_init callbacks. This caused double clock initialization on such platforms, leading to boot failures. An example of such platform is mach-s3c64xx. This patch fixes boot failure on s3c64xx by dropping custom init_irq callback, which had a call to of_clk_init() and moving system reset initialization to init_machine callback. This allows us to have clocks initialized properly without a need to have custom init_time or init_irq callbacks. Signed-off-by: Tomasz Figa Signed-off-by: Olof Johansson --- arch/arm/mach-s3c64xx/mach-s3c64xx-dt.c | 11 +---------- 1 file changed, 1 insertion(+), 10 deletions(-) diff --git a/arch/arm/mach-s3c64xx/mach-s3c64xx-dt.c b/arch/arm/mach-s3c64xx/mach-s3c64xx-dt.c index 7eb9a10fc1af..2fddf38192df 100644 --- a/arch/arm/mach-s3c64xx/mach-s3c64xx-dt.c +++ b/arch/arm/mach-s3c64xx/mach-s3c64xx-dt.c @@ -8,8 +8,6 @@ * published by the Free Software Foundation. */ -#include -#include #include #include @@ -48,15 +46,9 @@ static void __init s3c64xx_dt_map_io(void) panic("SoC is not S3C64xx!"); } -static void __init s3c64xx_dt_init_irq(void) -{ - of_clk_init(NULL); - samsung_wdt_reset_of_init(); - irqchip_init(); -}; - static void __init s3c64xx_dt_init_machine(void) { + samsung_wdt_reset_of_init(); of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); } @@ -79,7 +71,6 @@ DT_MACHINE_START(S3C6400_DT, "Samsung S3C64xx (Flattened Device Tree)") /* Maintainer: Tomasz Figa */ .dt_compat = s3c64xx_dt_compat, .map_io = s3c64xx_dt_map_io, - .init_irq = s3c64xx_dt_init_irq, .init_machine = s3c64xx_dt_init_machine, .restart = s3c64xx_dt_restart, MACHINE_END From ce027ed98fd176710fb14be9d6015697b62436f0 Mon Sep 17 00:00:00 2001 From: Stefan Richter Date: Sun, 15 Dec 2013 16:18:01 +0100 Subject: [PATCH 141/312] firewire: sbp2: bring back WRITE SAME support Commit 54b2b50c20a6 "[SCSI] Disable WRITE SAME for RAID and virtual host adapter drivers" disabled WRITE SAME support for all SBP-2 attached targets. But as described in the changelog of commit b0ea5f19d3d8 "firewire: sbp2: allow WRITE SAME and REPORT SUPPORTED OPERATION CODES", it is not required to blacklist WRITE SAME. Bring the feature back by reverting the sbp2.c hunk of commit 54b2b50c20a6. Signed-off-by: Stefan Richter Cc: stable@kernel.org --- drivers/firewire/sbp2.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c index b0bb056458a3..281029daf98c 100644 --- a/drivers/firewire/sbp2.c +++ b/drivers/firewire/sbp2.c @@ -1623,7 +1623,6 @@ static struct scsi_host_template scsi_driver_template = { .cmd_per_lun = 1, .can_queue = 1, .sdev_attrs = sbp2_scsi_sysfs_attrs, - .no_write_same = 1, }; MODULE_AUTHOR("Kristian Hoegsberg "); From c00850dd6c517169734ec60eed99502c0912a7d3 Mon Sep 17 00:00:00 2001 From: Rashika Date: Sat, 14 Dec 2013 18:42:14 +0530 Subject: [PATCH 142/312] RDMA/cxgb4: Make _c4iw_write_mem_dma() static MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This patch marks the function _c4iw_write_mem_dma() as static because it is not used outside this file, which fixes the warning: drivers/infiniband/hw/cxgb4/mem.c:176:5: warning: no previous prototype for ‘_c4iw_write_mem_dma’ [-Wmissing-prototypes] Signed-off-by: Rashika Kheria Acked-by: Steve Wise Reviewed-by: Josh Triplett Signed-off-by: Roland Dreier --- drivers/infiniband/hw/cxgb4/mem.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c index 4cb8eb24497c..84e45006451c 100644 --- a/drivers/infiniband/hw/cxgb4/mem.c +++ b/drivers/infiniband/hw/cxgb4/mem.c @@ -173,7 +173,7 @@ static int _c4iw_write_mem_inline(struct c4iw_rdev *rdev, u32 addr, u32 len, return ret; } -int _c4iw_write_mem_dma(struct c4iw_rdev *rdev, u32 addr, u32 len, void *data) +static int _c4iw_write_mem_dma(struct c4iw_rdev *rdev, u32 addr, u32 len, void *data) { u32 remain = len; u32 dmalen; From 128d6637cce0747e21b8936e449b1c78a497b189 Mon Sep 17 00:00:00 2001 From: Beomho Seo Date: Mon, 9 Dec 2013 02:17:00 +0000 Subject: [PATCH 143/312] iio: cm36651: Changed return value of read function A return value of callback have been changed to IIO_VAL_INT. If not IIO_VAL_INT, driver will print wrong value(*_read_int_time). A follow up patch will deal with a related bug in the new event handling code. Signed-off-by: Beomho Seo Signed-off-by: Jonathan Cameron --- drivers/iio/light/cm36651.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/iio/light/cm36651.c b/drivers/iio/light/cm36651.c index 21df57130018..0922e39b0ea9 100644 --- a/drivers/iio/light/cm36651.c +++ b/drivers/iio/light/cm36651.c @@ -387,7 +387,7 @@ static int cm36651_read_int_time(struct cm36651_data *cm36651, return -EINVAL; } - return IIO_VAL_INT_PLUS_MICRO; + return IIO_VAL_INT; } static int cm36651_write_int_time(struct cm36651_data *cm36651, From 6b59ba609bb61e4fa2ecca7827f170ac07842d64 Mon Sep 17 00:00:00 2001 From: Steve Wise Date: Thu, 21 Nov 2013 15:40:14 -0600 Subject: [PATCH 144/312] RDMA/iwcm: Don't touch cm_id after deref in rem_ref rem_ref() calls iwcm_deref_id(), which will wake up any blockers on cm_id_priv->destroy_comp if the refcnt hits 0. That will unblock someone in iw_destroy_cm_id() which will free the cmid. If that happens before rem_ref() calls test_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags), then the test_bit() will touch freed memory. The fix is to read the bit first, then deref. We should never be in iw_destroy_cm_id() with IWCM_F_CALLBACK_DESTROY set, and there is a BUG_ON() to make sure of that. Signed-off-by: Steve Wise Signed-off-by: Roland Dreier --- drivers/infiniband/core/iwcm.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c index c47c2034ca71..0717940ec3b5 100644 --- a/drivers/infiniband/core/iwcm.c +++ b/drivers/infiniband/core/iwcm.c @@ -181,9 +181,16 @@ static void add_ref(struct iw_cm_id *cm_id) static void rem_ref(struct iw_cm_id *cm_id) { struct iwcm_id_private *cm_id_priv; + int cb_destroy; + cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); - if (iwcm_deref_id(cm_id_priv) && - test_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags)) { + + /* + * Test bit before deref in case the cm_id gets freed on another + * thread. + */ + cb_destroy = test_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags); + if (iwcm_deref_id(cm_id_priv) && cb_destroy) { BUG_ON(!list_empty(&cm_id_priv->work_list)); free_cm_id(cm_id_priv); } From b25b4427e9dfba073cf9bc86603956ed395eb6e3 Mon Sep 17 00:00:00 2001 From: Ilia Mirkin Date: Wed, 11 Dec 2013 22:19:01 -0500 Subject: [PATCH 145/312] drm/nouveau: only runtime suspend by default in optimus configuration The intent was to only enable it by default for optimus, e.g. see the runtime_idle callback. The suspend callback may be called directly, e.g. as a result of nouveau_crtc_set_config. Reported-by: Stefan Lippers-Hollmann Signed-off-by: Ilia Mirkin Tested-by: Stefan Lippers-Hollmann Cc: stable@vger.kernel.org Signed-off-by: Dave Airlie --- drivers/gpu/drm/nouveau/nouveau_drm.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index 7a3759f1c41a..98a22e6e27a1 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c @@ -858,6 +858,12 @@ static int nouveau_pmops_runtime_suspend(struct device *dev) if (nouveau_runtime_pm == 0) return -EINVAL; + /* are we optimus enabled? */ + if (nouveau_runtime_pm == -1 && !nouveau_is_optimus() && !nouveau_is_v1_dsm()) { + DRM_DEBUG_DRIVER("failing to power off - not optimus\n"); + return -EINVAL; + } + nv_debug_level(SILENT); drm_kms_helper_poll_disable(drm_dev); vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_OFF); From 1b1ccee1e821e59c2a45c95b007aeb2c9dafd9be Mon Sep 17 00:00:00 2001 From: Krzysztof Kozlowski Date: Wed, 11 Dec 2013 15:07:43 +0100 Subject: [PATCH 146/312] mfd: s2mps11: Fix build after regmap field rename in sec-core.c Fix building of s2mps11 regulator and clock drivers after renaming regmap field in struct sec_pmic_dev in commit: - "mfd/rtc: s5m: Fix register updating by adding regmap for RTC" Signed-off-by: Krzysztof Kozlowski Cc: Kyungmin Park Cc: Marek Szyprowski Signed-off-by: Mark Brown --- drivers/clk/clk-s2mps11.c | 6 +++--- drivers/regulator/s2mps11.c | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/clk/clk-s2mps11.c b/drivers/clk/clk-s2mps11.c index 7be41e676a64..00a3abe103a5 100644 --- a/drivers/clk/clk-s2mps11.c +++ b/drivers/clk/clk-s2mps11.c @@ -60,7 +60,7 @@ static int s2mps11_clk_prepare(struct clk_hw *hw) struct s2mps11_clk *s2mps11 = to_s2mps11_clk(hw); int ret; - ret = regmap_update_bits(s2mps11->iodev->regmap, + ret = regmap_update_bits(s2mps11->iodev->regmap_pmic, S2MPS11_REG_RTC_CTRL, s2mps11->mask, s2mps11->mask); if (!ret) @@ -74,7 +74,7 @@ static void s2mps11_clk_unprepare(struct clk_hw *hw) struct s2mps11_clk *s2mps11 = to_s2mps11_clk(hw); int ret; - ret = regmap_update_bits(s2mps11->iodev->regmap, S2MPS11_REG_RTC_CTRL, + ret = regmap_update_bits(s2mps11->iodev->regmap_pmic, S2MPS11_REG_RTC_CTRL, s2mps11->mask, ~s2mps11->mask); if (!ret) @@ -174,7 +174,7 @@ static int s2mps11_clk_probe(struct platform_device *pdev) s2mps11_clk->hw.init = &s2mps11_clks_init[i]; s2mps11_clk->mask = 1 << i; - ret = regmap_read(s2mps11_clk->iodev->regmap, + ret = regmap_read(s2mps11_clk->iodev->regmap_pmic, S2MPS11_REG_RTC_CTRL, &val); if (ret < 0) goto err_reg; diff --git a/drivers/regulator/s2mps11.c b/drivers/regulator/s2mps11.c index 333677d68d0e..9e61922d8230 100644 --- a/drivers/regulator/s2mps11.c +++ b/drivers/regulator/s2mps11.c @@ -438,7 +438,7 @@ static int s2mps11_pmic_probe(struct platform_device *pdev) platform_set_drvdata(pdev, s2mps11); config.dev = &pdev->dev; - config.regmap = iodev->regmap; + config.regmap = iodev->regmap_pmic; config.driver_data = s2mps11; for (i = 0; i < S2MPS11_REGULATOR_MAX; i++) { if (!reg_np) { From 6fec88712cea016b1fc929fee53f67e3993194a6 Mon Sep 17 00:00:00 2001 From: Paul Bolle Date: Mon, 16 Dec 2013 11:34:21 +0100 Subject: [PATCH 147/312] ahci: bail out on ICH6 before using AHCI BAR The check for "combined mode" (which disables ahci support) on ICH6 is done after the first use of AHCI BAR. But if ahci is not enabled AHCI BAR is initialized to 0x00000000. (At least it is on the ICH6-M I tested this on. If I understand the datasheet correctly it should also be on ICH6R.) This apparently makes the call of pcim_iomap_regions_request_all() return -EINVAL. And we end up with ahci: probe of 0000:00:1f.2 failed with error -22 (at warning level) in the logs. So check for "combined mode" before calling pcim_iomap_regions_request_all(). Signed-off-by: Paul Bolle Signed-off-by: Tejun Heo --- drivers/ata/ahci.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index 14f1e9506338..c0ed4f273cf2 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c @@ -1238,15 +1238,6 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (rc) return rc; - /* AHCI controllers often implement SFF compatible interface. - * Grab all PCI BARs just in case. - */ - rc = pcim_iomap_regions_request_all(pdev, 1 << ahci_pci_bar, DRV_NAME); - if (rc == -EBUSY) - pcim_pin_device(pdev); - if (rc) - return rc; - if (pdev->vendor == PCI_VENDOR_ID_INTEL && (pdev->device == 0x2652 || pdev->device == 0x2653)) { u8 map; @@ -1263,6 +1254,15 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) } } + /* AHCI controllers often implement SFF compatible interface. + * Grab all PCI BARs just in case. + */ + rc = pcim_iomap_regions_request_all(pdev, 1 << ahci_pci_bar, DRV_NAME); + if (rc == -EBUSY) + pcim_pin_device(pdev); + if (rc) + return rc; + hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL); if (!hpriv) return -ENOMEM; From c4602c1c818bd6626178d6d3fcc152d9f2f48ac0 Mon Sep 17 00:00:00 2001 From: Miao Xie Date: Mon, 16 Dec 2013 15:20:01 +0800 Subject: [PATCH 148/312] ftrace: Initialize the ftrace profiler for each possible cpu Ftrace currently initializes only the online CPUs. This implementation has two problems: - If we online a CPU after we enable the function profile, and then run the test, we will lose the trace information on that CPU. Steps to reproduce: # echo 0 > /sys/devices/system/cpu/cpu1/online # cd /tracing/ # echo >> set_ftrace_filter # echo 1 > function_profile_enabled # echo 1 > /sys/devices/system/cpu/cpu1/online # run test - If we offline a CPU before we enable the function profile, we will not clear the trace information when we enable the function profile. It will trouble the users. Steps to reproduce: # cd /tracing/ # echo >> set_ftrace_filter # echo 1 > function_profile_enabled # run test # cat trace_stat/function* # echo 0 > /sys/devices/system/cpu/cpu1/online # echo 0 > function_profile_enabled # echo 1 > function_profile_enabled # cat trace_stat/function* # run test # cat trace_stat/function* So it is better that we initialize the ftrace profiler for each possible cpu every time we enable the function profile instead of just the online ones. Link: http://lkml.kernel.org/r/1387178401-10619-1-git-send-email-miaox@cn.fujitsu.com Cc: stable@vger.kernel.org # 2.6.31+ Signed-off-by: Miao Xie Signed-off-by: Steven Rostedt --- kernel/trace/ftrace.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 0e9f9eaade2f..72a0f81dc5a8 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -775,7 +775,7 @@ static int ftrace_profile_init(void) int cpu; int ret = 0; - for_each_online_cpu(cpu) { + for_each_possible_cpu(cpu) { ret = ftrace_profile_init_cpu(cpu); if (ret) break; From b8bd6dc36186fe99afa7b73e9e2d9a98ad5c4865 Mon Sep 17 00:00:00 2001 From: "Robin H. Johnson" Date: Mon, 16 Dec 2013 09:31:19 -0800 Subject: [PATCH 149/312] libata: disable a disk via libata.force params A user on StackExchange had a failing SSD that's soldered directly onto the motherboard of his system. The BIOS does not give any option to disable it at all, so he can't just hide it from the OS via the BIOS. The old IDE layer had hdX=noprobe override for situations like this, but that was never ported to the libata layer. This patch implements a disable flag for libata.force. Example use: libata.force=2.0:disable [v2 of the patch, removed the nodisable flag per Tejun Heo] Signed-off-by: Robin H. Johnson Signed-off-by: Tejun Heo Cc: stable@vger.kernel.org Link: http://unix.stackexchange.com/questions/102648/how-to-tell-linux-kernel-3-0-to-completely-ignore-a-failing-disk Link: http://askubuntu.com/questions/352836/how-can-i-tell-linux-kernel-to-completely-ignore-a-disk-as-if-it-was-not-even-co Link: http://superuser.com/questions/599333/how-to-disable-kernel-probing-for-drive --- Documentation/kernel-parameters.txt | 2 ++ drivers/ata/libata-core.c | 1 + 2 files changed, 3 insertions(+) diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index 50680a59a2ff..b9e9bd854298 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt @@ -1529,6 +1529,8 @@ bytes respectively. Such letter suffixes can also be entirely omitted. * atapi_dmadir: Enable ATAPI DMADIR bridge support + * disable: Disable this device. + If there are multiple matching configurations changing the same attribute, the last one is used. diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index dae73efe5dbf..ff0158481d53 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -6522,6 +6522,7 @@ static int __init ata_parse_force_one(char **cur, { "norst", .lflags = ATA_LFLAG_NO_HRST | ATA_LFLAG_NO_SRST }, { "rstonce", .lflags = ATA_LFLAG_RST_ONCE }, { "atapi_dmadir", .horkage_on = ATA_HORKAGE_ATAPI_DMADIR }, + { "disable", .horkage_on = ATA_HORKAGE_DISABLE }, }; char *start = *cur, *p = *cur; char *id, *val, *endp; From d386735588c3e22129c2bc6eb64fc1d37a8f805c Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Sun, 8 Dec 2013 23:23:57 -0800 Subject: [PATCH 150/312] drm/ttm: Fix accesses through vmas with only partial coverage VMAs covering a bo but that didn't start at the same address space offset as the bo they were mapping were incorrectly generating SEGFAULT errors in the fault handler. Reported-by: Joseph Dolinak Signed-off-by: Thomas Hellstrom Reviewed-by: Jakob Bornecrantz Cc: stable@vger.kernel.org --- drivers/gpu/drm/ttm/ttm_bo_vm.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c index b249ab9b1eb2..6440eeac22d2 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c @@ -169,9 +169,9 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) } page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) + - drm_vma_node_start(&bo->vma_node) - vma->vm_pgoff; - page_last = vma_pages(vma) + - drm_vma_node_start(&bo->vma_node) - vma->vm_pgoff; + vma->vm_pgoff - drm_vma_node_start(&bo->vma_node); + page_last = vma_pages(vma) + vma->vm_pgoff - + drm_vma_node_start(&bo->vma_node); if (unlikely(page_offset >= bo->num_pages)) { retval = VM_FAULT_SIGBUS; From 309243ec14fde1149e1c66f19746e239e86caf39 Mon Sep 17 00:00:00 2001 From: Yann Droneaud Date: Wed, 11 Dec 2013 23:01:44 +0100 Subject: [PATCH 151/312] IB/core: const'ify inbuf in struct ib_udata Userspace input buffer is not modified by kernel, so it can be 'const'. This is also a prerequisite to remove the implicit cast from INIT_UDATA(). Link: http://marc.info/?i=cover.1386798254.git.ydroneaud@opteya.com> Signed-off-by: Yann Droneaud Signed-off-by: Roland Dreier --- drivers/infiniband/core/uverbs.h | 2 +- include/rdma/ib_verbs.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h index bdc842e9faef..9879568aed8c 100644 --- a/drivers/infiniband/core/uverbs.h +++ b/drivers/infiniband/core/uverbs.h @@ -49,7 +49,7 @@ #define INIT_UDATA(udata, ibuf, obuf, ilen, olen) \ do { \ - (udata)->inbuf = (void __user *) (ibuf); \ + (udata)->inbuf = (const void __user *) (ibuf); \ (udata)->outbuf = (void __user *) (obuf); \ (udata)->inlen = (ilen); \ (udata)->outlen = (olen); \ diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 979874c627ee..61e1935c91b1 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -978,7 +978,7 @@ struct ib_uobject { }; struct ib_udata { - void __user *inbuf; + const void __user *inbuf; void __user *outbuf; size_t inlen; size_t outlen; From 317929cd8ef32121fb6574f12d02621f78f24732 Mon Sep 17 00:00:00 2001 From: Santosh Shilimkar Date: Sat, 23 Nov 2013 17:31:27 -0500 Subject: [PATCH 152/312] MAINTAINERS: Add keystone git tree information Update the Keystone entry to add git tree information. Signed-off-by: Santosh Shilimkar --- MAINTAINERS | 1 + 1 file changed, 1 insertion(+) diff --git a/MAINTAINERS b/MAINTAINERS index 8285ed4676b6..25b945b9c71f 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -1013,6 +1013,7 @@ M: Santosh Shilimkar L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained F: arch/arm/mach-keystone/ +T: git git://git.kernel.org/pub/scm/linux/kernel/git/ssantosh/linux-keystone.git ARM/LOGICPD PXA270 MACHINE SUPPORT M: Lennert Buytenhek From cffa8e3b5c7dcf7617268561397eb236651446f8 Mon Sep 17 00:00:00 2001 From: Santosh Shilimkar Date: Sat, 23 Nov 2013 17:35:23 -0500 Subject: [PATCH 153/312] MAINTAINERS: Add keystone clock drivers Cc: Mike Turquette Signed-off-by: Santosh Shilimkar --- MAINTAINERS | 1 + 1 file changed, 1 insertion(+) diff --git a/MAINTAINERS b/MAINTAINERS index 25b945b9c71f..249651f422e8 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -1013,6 +1013,7 @@ M: Santosh Shilimkar L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained F: arch/arm/mach-keystone/ +F: drivers/clk/keystone/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/ssantosh/linux-keystone.git ARM/LOGICPD PXA270 MACHINE SUPPORT From f665c0f852316ff99e9eb7f71f34d43003f8e139 Mon Sep 17 00:00:00 2001 From: Stefan Priebe Date: Sat, 16 Nov 2013 21:26:52 +0100 Subject: [PATCH 154/312] bcache: kthread don't set writeback task to INTERUPTIBLE at the beginning (schedule_timout_interuptible) and others do his on their own This prevents wrong load average calculation (load of 1 per thread) Signed-off-by: Kent Overstreet --- drivers/md/bcache/writeback.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c index 99053b1251be..484e57d7012c 100644 --- a/drivers/md/bcache/writeback.c +++ b/drivers/md/bcache/writeback.c @@ -500,8 +500,6 @@ int bch_cached_dev_writeback_init(struct cached_dev *dc) if (IS_ERR(dc->writeback_thread)) return PTR_ERR(dc->writeback_thread); - set_task_state(dc->writeback_thread, TASK_INTERRUPTIBLE); - INIT_DELAYED_WORK(&dc->writeback_rate_update, update_writeback_rate); schedule_delayed_work(&dc->writeback_rate_update, dc->writeback_rate_update_seconds * HZ); From ce2b3f595e1c56639085645e0130426e443008c0 Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Thu, 28 Nov 2013 17:28:37 -0800 Subject: [PATCH 155/312] bcache: Use uninterruptible sleep in writeback We're just waiting on kthread_should_stop(), nothing else, so interruptible sleep was wrong here. Signed-off-by: Kent Overstreet --- drivers/md/bcache/writeback.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c index 484e57d7012c..3cd931d3f26c 100644 --- a/drivers/md/bcache/writeback.c +++ b/drivers/md/bcache/writeback.c @@ -241,7 +241,7 @@ static void read_dirty(struct cached_dev *dc) if (KEY_START(&w->key) != dc->last_read || jiffies_to_msecs(delay) > 50) while (!kthread_should_stop() && delay) - delay = schedule_timeout_interruptible(delay); + delay = schedule_timeout_uninterruptible(delay); dc->last_read = KEY_OFFSET(&w->key); @@ -438,7 +438,7 @@ static int bch_writeback_thread(void *arg) while (delay && !kthread_should_stop() && !test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags)) - delay = schedule_timeout_interruptible(delay); + delay = schedule_timeout_uninterruptible(delay); } } From d24a6e1087030b6da286df9433add5fa2f21b83b Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Sun, 10 Nov 2013 21:55:27 -0800 Subject: [PATCH 156/312] bcache: Fix dirty_data accounting Dirty data accounting wasn't quite right - firstly, we were adding the key we're inserting after it could have merged with another dirty key already in the btree, and secondly we could sometimes pass the wrong offset to bcache_dev_sectors_dirty_add() for dirty data we were overwriting - which is important when tracking dirty data by stripe. NOTE FOR BACKPORTERS: For 3.10 (and 3.11?) there's other accounting fixes necessary that got squashed in with other patches; the full patch against 3.10 is 408cc2f47eeac93a, available at: git://evilpiepirate.org/~kent/linux-bcache.git bcache-3.10-writeback-fixes Signed-off-by: Kent Overstreet Cc: linux-stable # >= v3.10 diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c index 2a46036..4a12b2f 100644 --- a/drivers/md/bcache/btree.c +++ b/drivers/md/bcache/btree.c @@ -1817,7 +1817,8 @@ static bool fix_overlapping_extents(struct btree *b, struct bkey *insert, if (KEY_START(k) > KEY_START(insert) + sectors_found) goto check_failed; - if (KEY_PTRS(replace_key) != KEY_PTRS(k)) + if (KEY_PTRS(k) != KEY_PTRS(replace_key) || + KEY_DIRTY(k) != KEY_DIRTY(replace_key)) goto check_failed; /* skip past gen */ --- drivers/md/bcache/btree.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c index dc6e2be265b7..c36745e5986a 100644 --- a/drivers/md/bcache/btree.c +++ b/drivers/md/bcache/btree.c @@ -1817,7 +1817,8 @@ static bool fix_overlapping_extents(struct btree *b, struct bkey *insert, if (KEY_START(k) > KEY_START(insert) + sectors_found) goto check_failed; - if (KEY_PTRS(replace_key) != KEY_PTRS(k)) + if (KEY_PTRS(k) != KEY_PTRS(replace_key) || + KEY_DIRTY(k) != KEY_DIRTY(replace_key)) goto check_failed; /* skip past gen */ From 9eb8ebeb2471b8cbaa078066aeb85fe5d46f5304 Mon Sep 17 00:00:00 2001 From: Nicholas Swenson Date: Tue, 22 Oct 2013 13:19:23 -0700 Subject: [PATCH 157/312] bcache: Fix for can_attach_cache() Signed-off-by: Nicholas Swenson Signed-off-by: Kent Overstreet --- drivers/md/bcache/super.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index dec15cd2d797..c57bfa071a57 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -1676,7 +1676,7 @@ static void run_cache_set(struct cache_set *c) static bool can_attach_cache(struct cache *ca, struct cache_set *c) { return ca->sb.block_size == c->sb.block_size && - ca->sb.bucket_size == c->sb.block_size && + ca->sb.bucket_size == c->sb.bucket_size && ca->sb.nr_in_set == c->sb.nr_in_set; } From 97d11a660fd906dbea3dccd2638495d8497c3c81 Mon Sep 17 00:00:00 2001 From: Nicholas Swenson Date: Wed, 23 Oct 2013 17:35:26 -0700 Subject: [PATCH 158/312] bcache: Fix heap_peek() macro Signed-off-by: Nicholas Swenson Signed-off-by: Kent Overstreet --- drivers/md/bcache/util.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/md/bcache/util.h b/drivers/md/bcache/util.h index 362c4b3f8b4a..1030c6020e98 100644 --- a/drivers/md/bcache/util.h +++ b/drivers/md/bcache/util.h @@ -110,7 +110,7 @@ do { \ _r; \ }) -#define heap_peek(h) ((h)->size ? (h)->data[0] : NULL) +#define heap_peek(h) ((h)->used ? (h)->data[0] : NULL) #define heap_full(h) ((h)->used == (h)->size) From bee63f40cb5f5e8ab2abfbc85acde99cc0acd4b5 Mon Sep 17 00:00:00 2001 From: Nicholas Swenson Date: Thu, 31 Oct 2013 19:25:18 -0700 Subject: [PATCH 159/312] bcache: fix for gc crashing when no sectors are used Signed-off-by: Nicholas Swenson Signed-off-by: Kent Overstreet --- drivers/md/bcache/movinggc.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/md/bcache/movinggc.c b/drivers/md/bcache/movinggc.c index 7c1275e66025..46c952379fab 100644 --- a/drivers/md/bcache/movinggc.c +++ b/drivers/md/bcache/movinggc.c @@ -184,7 +184,8 @@ static bool bucket_cmp(struct bucket *l, struct bucket *r) static unsigned bucket_heap_top(struct cache *ca) { - return GC_SECTORS_USED(heap_peek(&ca->heap)); + struct bucket *b; + return (b = heap_peek(&ca->heap)) ? GC_SECTORS_USED(b) : 0; } void bch_moving_gc(struct cache_set *c) From 981aa8c091e164ea51dd1e81b71a1f3852bbcceb Mon Sep 17 00:00:00 2001 From: Nicholas Swenson Date: Thu, 7 Nov 2013 17:53:19 -0800 Subject: [PATCH 160/312] bcache: bugfix - moving_gc now moves only correct buckets Removed gc_move_threshold because picking buckets only by threshold could lead moving extra buckets (ei. if there are buckets at the threshold that aren't supposed to be moved do to space considerations). This is replaced by a GC_MOVE bit in the gc_mark bitmask. Now only marked buckets get moved. Signed-off-by: Nicholas Swenson Signed-off-by: Kent Overstreet --- drivers/md/bcache/alloc.c | 2 ++ drivers/md/bcache/bcache.h | 6 +++--- drivers/md/bcache/movinggc.c | 8 +++----- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c index 2b46bf1d7e40..4c9852d92b0a 100644 --- a/drivers/md/bcache/alloc.c +++ b/drivers/md/bcache/alloc.c @@ -421,9 +421,11 @@ long bch_bucket_alloc(struct cache *ca, unsigned watermark, bool wait) if (watermark <= WATERMARK_METADATA) { SET_GC_MARK(b, GC_MARK_METADATA); + SET_GC_MOVE(b, 0); b->prio = BTREE_PRIO; } else { SET_GC_MARK(b, GC_MARK_RECLAIMABLE); + SET_GC_MOVE(b, 0); b->prio = INITIAL_PRIO; } diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h index 4beb55a0ff30..a7b1a7631ed2 100644 --- a/drivers/md/bcache/bcache.h +++ b/drivers/md/bcache/bcache.h @@ -197,7 +197,7 @@ struct bucket { uint8_t disk_gen; uint8_t last_gc; /* Most out of date gen in the btree */ uint8_t gc_gen; - uint16_t gc_mark; + uint16_t gc_mark; /* Bitfield used by GC. See below for field */ }; /* @@ -209,7 +209,8 @@ BITMASK(GC_MARK, struct bucket, gc_mark, 0, 2); #define GC_MARK_RECLAIMABLE 0 #define GC_MARK_DIRTY 1 #define GC_MARK_METADATA 2 -BITMASK(GC_SECTORS_USED, struct bucket, gc_mark, 2, 14); +BITMASK(GC_SECTORS_USED, struct bucket, gc_mark, 2, 13); +BITMASK(GC_MOVE, struct bucket, gc_mark, 15, 1); #include "journal.h" #include "stats.h" @@ -445,7 +446,6 @@ struct cache { * call prio_write() to keep gens from wrapping. */ uint8_t need_save_prio; - unsigned gc_move_threshold; /* * If nonzero, we know we aren't going to find any buckets to invalidate diff --git a/drivers/md/bcache/movinggc.c b/drivers/md/bcache/movinggc.c index 46c952379fab..30f347d4e609 100644 --- a/drivers/md/bcache/movinggc.c +++ b/drivers/md/bcache/movinggc.c @@ -25,10 +25,9 @@ static bool moving_pred(struct keybuf *buf, struct bkey *k) unsigned i; for (i = 0; i < KEY_PTRS(k); i++) { - struct cache *ca = PTR_CACHE(c, k, i); struct bucket *g = PTR_BUCKET(c, k, i); - if (GC_SECTORS_USED(g) < ca->gc_move_threshold) + if (GC_MOVE(g)) return true; } @@ -227,9 +226,8 @@ void bch_moving_gc(struct cache_set *c) sectors_to_move -= GC_SECTORS_USED(b); } - ca->gc_move_threshold = bucket_heap_top(ca); - - pr_debug("threshold %u", ca->gc_move_threshold); + while (heap_pop(&ca->heap, b, bucket_cmp)) + SET_GC_MOVE(b, 1); } mutex_unlock(&c->bucket_lock); From bf0a628a95dba7f983b6047cea695fb066fb2512 Mon Sep 17 00:00:00 2001 From: Nicholas Swenson Date: Tue, 26 Nov 2013 19:14:23 -0800 Subject: [PATCH 161/312] bcache: fix for gc and writeback race Garbage collector needs to check keys in the writeback keybuf to make sure it's not invalidating buckets to which the writeback keys point to. Signed-off-by: Nicholas Swenson Signed-off-by: Kent Overstreet --- drivers/md/bcache/btree.c | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c index c36745e5986a..31bb53fcc67a 100644 --- a/drivers/md/bcache/btree.c +++ b/drivers/md/bcache/btree.c @@ -1561,6 +1561,28 @@ size_t bch_btree_gc_finish(struct cache_set *c) SET_GC_MARK(PTR_BUCKET(c, &c->uuid_bucket, i), GC_MARK_METADATA); + /* don't reclaim buckets to which writeback keys point */ + rcu_read_lock(); + for (i = 0; i < c->nr_uuids; i++) { + struct bcache_device *d = c->devices[i]; + struct cached_dev *dc; + struct keybuf_key *w, *n; + unsigned j; + + if (!d || UUID_FLASH_ONLY(&c->uuids[i])) + continue; + dc = container_of(d, struct cached_dev, disk); + + spin_lock(&dc->writeback_keys.lock); + rbtree_postorder_for_each_entry_safe(w, n, + &dc->writeback_keys.keys, node) + for (j = 0; j < KEY_PTRS(&w->key); j++) + SET_GC_MARK(PTR_BUCKET(c, &w->key, j), + GC_MARK_DIRTY); + spin_unlock(&dc->writeback_keys.lock); + } + rcu_read_unlock(); + for_each_cache(ca, c, i) { uint64_t *i; From 6d3d1a9c542b19dff1c7d7c8354d0869e4655287 Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Mon, 16 Dec 2013 14:12:09 -0800 Subject: [PATCH 162/312] bcache: bugfix for race between moving_gc and bucket_invalidate There is a possibility for a bucket to be invalidated by the allocator while moving_gc was copying it's contents to another bucket, if the bucket only held cached data. To prevent this moving checks for a stale ptr (to an invalidated bucket), before and after reads. It it finds one, it simply ignores moving that data. This only affects bcache if the moving_gc was turned on, note that it's off by default. Signed-off-by: Nicholas Swenson Signed-off-by: Kent Overstreet --- drivers/md/bcache/movinggc.c | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/drivers/md/bcache/movinggc.c b/drivers/md/bcache/movinggc.c index 30f347d4e609..f2f0998c4a91 100644 --- a/drivers/md/bcache/movinggc.c +++ b/drivers/md/bcache/movinggc.c @@ -64,11 +64,16 @@ static void write_moving_finish(struct closure *cl) static void read_moving_endio(struct bio *bio, int error) { + struct bbio *b = container_of(bio, struct bbio, bio); struct moving_io *io = container_of(bio->bi_private, struct moving_io, cl); if (error) io->op.error = error; + else if (!KEY_DIRTY(&b->key) && + ptr_stale(io->op.c, &b->key, 0)) { + io->op.error = -EINTR; + } bch_bbio_endio(io->op.c, bio, error, "reading data to move"); } @@ -140,6 +145,11 @@ static void read_moving(struct cache_set *c) if (!w) break; + if (ptr_stale(c, &w->key, 0)) { + bch_keybuf_del(&c->moving_gc_keys, w); + continue; + } + io = kzalloc(sizeof(struct moving_io) + sizeof(struct bio_vec) * DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS), GFP_KERNEL); From 16749c23c00c686ed168471963e3ddb0f3fcd855 Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Mon, 11 Nov 2013 13:58:34 -0800 Subject: [PATCH 163/312] bcache: New writeback PD controller The old writeback PD controller could get into states where it had throttled all the way down and take way too long to recover - it was too complicated to really understand what it was doing. This rewrites a good chunk of it to hopefully be simpler and make more sense, and it also pays more attention to units which should make the behaviour a bit easier to understand. Signed-off-by: Kent Overstreet --- drivers/md/bcache/bcache.h | 6 ++--- drivers/md/bcache/sysfs.c | 50 ++++++++++++++++++++--------------- drivers/md/bcache/util.c | 8 +++++- drivers/md/bcache/writeback.c | 47 ++++++++++++++++---------------- 4 files changed, 62 insertions(+), 49 deletions(-) diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h index a7b1a7631ed2..754f43177483 100644 --- a/drivers/md/bcache/bcache.h +++ b/drivers/md/bcache/bcache.h @@ -373,14 +373,14 @@ struct cached_dev { unsigned char writeback_percent; unsigned writeback_delay; - int writeback_rate_change; - int64_t writeback_rate_derivative; uint64_t writeback_rate_target; + int64_t writeback_rate_proportional; + int64_t writeback_rate_derivative; + int64_t writeback_rate_change; unsigned writeback_rate_update_seconds; unsigned writeback_rate_d_term; unsigned writeback_rate_p_term_inverse; - unsigned writeback_rate_d_smooth; }; enum alloc_watermarks { diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c index 80d4c2bee18a..a1f85612f0b3 100644 --- a/drivers/md/bcache/sysfs.c +++ b/drivers/md/bcache/sysfs.c @@ -83,7 +83,6 @@ rw_attribute(writeback_rate); rw_attribute(writeback_rate_update_seconds); rw_attribute(writeback_rate_d_term); rw_attribute(writeback_rate_p_term_inverse); -rw_attribute(writeback_rate_d_smooth); read_attribute(writeback_rate_debug); read_attribute(stripe_size); @@ -129,31 +128,41 @@ SHOW(__bch_cached_dev) var_printf(writeback_running, "%i"); var_print(writeback_delay); var_print(writeback_percent); - sysfs_print(writeback_rate, dc->writeback_rate.rate); + sysfs_hprint(writeback_rate, dc->writeback_rate.rate << 9); var_print(writeback_rate_update_seconds); var_print(writeback_rate_d_term); var_print(writeback_rate_p_term_inverse); - var_print(writeback_rate_d_smooth); if (attr == &sysfs_writeback_rate_debug) { + char rate[20]; char dirty[20]; - char derivative[20]; char target[20]; - bch_hprint(dirty, - bcache_dev_sectors_dirty(&dc->disk) << 9); - bch_hprint(derivative, dc->writeback_rate_derivative << 9); + char proportional[20]; + char derivative[20]; + char change[20]; + s64 next_io; + + bch_hprint(rate, dc->writeback_rate.rate << 9); + bch_hprint(dirty, bcache_dev_sectors_dirty(&dc->disk) << 9); bch_hprint(target, dc->writeback_rate_target << 9); + bch_hprint(proportional,dc->writeback_rate_proportional << 9); + bch_hprint(derivative, dc->writeback_rate_derivative << 9); + bch_hprint(change, dc->writeback_rate_change << 9); + + next_io = div64_s64(dc->writeback_rate.next - local_clock(), + NSEC_PER_MSEC); return sprintf(buf, - "rate:\t\t%u\n" - "change:\t\t%i\n" + "rate:\t\t%s/sec\n" "dirty:\t\t%s\n" + "target:\t\t%s\n" + "proportional:\t%s\n" "derivative:\t%s\n" - "target:\t\t%s\n", - dc->writeback_rate.rate, - dc->writeback_rate_change, - dirty, derivative, target); + "change:\t\t%s/sec\n" + "next io:\t%llims\n", + rate, dirty, target, proportional, + derivative, change, next_io); } sysfs_hprint(dirty_data, @@ -189,6 +198,7 @@ STORE(__cached_dev) struct kobj_uevent_env *env; #define d_strtoul(var) sysfs_strtoul(var, dc->var) +#define d_strtoul_nonzero(var) sysfs_strtoul_clamp(var, dc->var, 1, INT_MAX) #define d_strtoi_h(var) sysfs_hatoi(var, dc->var) sysfs_strtoul(data_csum, dc->disk.data_csum); @@ -197,16 +207,15 @@ STORE(__cached_dev) d_strtoul(writeback_metadata); d_strtoul(writeback_running); d_strtoul(writeback_delay); - sysfs_strtoul_clamp(writeback_rate, - dc->writeback_rate.rate, 1, 1000000); + sysfs_strtoul_clamp(writeback_percent, dc->writeback_percent, 0, 40); - d_strtoul(writeback_rate_update_seconds); + sysfs_strtoul_clamp(writeback_rate, + dc->writeback_rate.rate, 1, INT_MAX); + + d_strtoul_nonzero(writeback_rate_update_seconds); d_strtoul(writeback_rate_d_term); - d_strtoul(writeback_rate_p_term_inverse); - sysfs_strtoul_clamp(writeback_rate_p_term_inverse, - dc->writeback_rate_p_term_inverse, 1, INT_MAX); - d_strtoul(writeback_rate_d_smooth); + d_strtoul_nonzero(writeback_rate_p_term_inverse); d_strtoi_h(sequential_cutoff); d_strtoi_h(readahead); @@ -313,7 +322,6 @@ static struct attribute *bch_cached_dev_files[] = { &sysfs_writeback_rate_update_seconds, &sysfs_writeback_rate_d_term, &sysfs_writeback_rate_p_term_inverse, - &sysfs_writeback_rate_d_smooth, &sysfs_writeback_rate_debug, &sysfs_dirty_data, &sysfs_stripe_size, diff --git a/drivers/md/bcache/util.c b/drivers/md/bcache/util.c index 462214eeacbe..bb37618e7664 100644 --- a/drivers/md/bcache/util.c +++ b/drivers/md/bcache/util.c @@ -209,7 +209,13 @@ uint64_t bch_next_delay(struct bch_ratelimit *d, uint64_t done) { uint64_t now = local_clock(); - d->next += div_u64(done, d->rate); + d->next += div_u64(done * NSEC_PER_SEC, d->rate); + + if (time_before64(now + NSEC_PER_SEC, d->next)) + d->next = now + NSEC_PER_SEC; + + if (time_after64(now - NSEC_PER_SEC * 2, d->next)) + d->next = now - NSEC_PER_SEC * 2; return time_after64(d->next, now) ? div_u64(d->next - now, NSEC_PER_SEC / HZ) diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c index 3cd931d3f26c..6c44fe059c27 100644 --- a/drivers/md/bcache/writeback.c +++ b/drivers/md/bcache/writeback.c @@ -30,38 +30,40 @@ static void __update_writeback_rate(struct cached_dev *dc) /* PD controller */ - int change = 0; - int64_t error; int64_t dirty = bcache_dev_sectors_dirty(&dc->disk); int64_t derivative = dirty - dc->disk.sectors_dirty_last; + int64_t proportional = dirty - target; + int64_t change; dc->disk.sectors_dirty_last = dirty; - derivative *= dc->writeback_rate_d_term; - derivative = clamp(derivative, -dirty, dirty); + /* Scale to sectors per second */ + + proportional *= dc->writeback_rate_update_seconds; + proportional = div_s64(proportional, dc->writeback_rate_p_term_inverse); + + derivative = div_s64(derivative, dc->writeback_rate_update_seconds); derivative = ewma_add(dc->disk.sectors_dirty_derivative, derivative, - dc->writeback_rate_d_smooth, 0); + (dc->writeback_rate_d_term / + dc->writeback_rate_update_seconds) ?: 1, 0); - /* Avoid divide by zero */ - if (!target) - goto out; + derivative *= dc->writeback_rate_d_term; + derivative = div_s64(derivative, dc->writeback_rate_p_term_inverse); - error = div64_s64((dirty + derivative - target) << 8, target); - - change = div_s64((dc->writeback_rate.rate * error) >> 8, - dc->writeback_rate_p_term_inverse); + change = proportional + derivative; /* Don't increase writeback rate if the device isn't keeping up */ if (change > 0 && time_after64(local_clock(), - dc->writeback_rate.next + 10 * NSEC_PER_MSEC)) + dc->writeback_rate.next + NSEC_PER_MSEC)) change = 0; dc->writeback_rate.rate = - clamp_t(int64_t, dc->writeback_rate.rate + change, + clamp_t(int64_t, (int64_t) dc->writeback_rate.rate + change, 1, NSEC_PER_MSEC); -out: + + dc->writeback_rate_proportional = proportional; dc->writeback_rate_derivative = derivative; dc->writeback_rate_change = change; dc->writeback_rate_target = target; @@ -87,15 +89,11 @@ static void update_writeback_rate(struct work_struct *work) static unsigned writeback_delay(struct cached_dev *dc, unsigned sectors) { - uint64_t ret; - if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) || !dc->writeback_percent) return 0; - ret = bch_next_delay(&dc->writeback_rate, sectors * 10000000ULL); - - return min_t(uint64_t, ret, HZ); + return bch_next_delay(&dc->writeback_rate, sectors); } struct dirty_io { @@ -476,6 +474,8 @@ void bch_sectors_dirty_init(struct cached_dev *dc) bch_btree_map_keys(&op.op, dc->disk.c, &KEY(op.inode, 0, 0), sectors_dirty_init_fn, 0); + + dc->disk.sectors_dirty_last = bcache_dev_sectors_dirty(&dc->disk); } int bch_cached_dev_writeback_init(struct cached_dev *dc) @@ -490,10 +490,9 @@ int bch_cached_dev_writeback_init(struct cached_dev *dc) dc->writeback_delay = 30; dc->writeback_rate.rate = 1024; - dc->writeback_rate_update_seconds = 30; - dc->writeback_rate_d_term = 16; - dc->writeback_rate_p_term_inverse = 64; - dc->writeback_rate_d_smooth = 8; + dc->writeback_rate_update_seconds = 5; + dc->writeback_rate_d_term = 30; + dc->writeback_rate_p_term_inverse = 6000; dc->writeback_thread = kthread_create(bch_writeback_thread, dc, "bcache_writeback"); From cf872776fc84128bb779ce2b83a37c884c3203ae Mon Sep 17 00:00:00 2001 From: Peter Hurley Date: Wed, 11 Dec 2013 21:11:58 -0500 Subject: [PATCH 164/312] tty: Fix hang at ldsem_down_read() When a controlling tty is being hung up and the hang up is waiting for a just-signalled tty reader or writer to exit, and a new tty reader/writer tries to acquire an ldisc reference concurrently with the ldisc reference release from the signalled reader/writer, the hangup can hang. The new reader/writer is sleeping in ldsem_down_read() and the hangup is sleeping in ldsem_down_write() [1]. The new reader/writer fails to wakeup the waiting hangup because the wrong lock count value is checked (the old lock count rather than the new lock count) to see if the lock is unowned. Change helper function to return the new lock count if the cmpxchg was successful; document this behavior. [1] edited dmesg log from reporter SysRq : Show Blocked State task PC stack pid father systemd D ffff88040c4f0000 0 1 0 0x00000000 ffff88040c49fbe0 0000000000000046 ffff88040c4a0000 ffff88040c49ffd8 00000000001d3980 00000000001d3980 ffff88040c4a0000 ffff88040593d840 ffff88040c49fb40 ffffffff810a4cc0 0000000000000006 0000000000000023 Call Trace: [] ? sched_clock_cpu+0x9f/0xe4 [] ? sched_clock_cpu+0x9f/0xe4 [] ? sched_clock_cpu+0x9f/0xe4 [] ? sched_clock_cpu+0x9f/0xe4 [] schedule+0x24/0x5e [] schedule_timeout+0x15b/0x1ec [] ? sched_clock_cpu+0x9f/0xe4 [] ? _raw_spin_unlock_irq+0x24/0x26 [] down_read_failed+0xe3/0x1b9 [] ldsem_down_read+0x8b/0xa5 [] ? tty_ldisc_ref_wait+0x1b/0x44 [] tty_ldisc_ref_wait+0x1b/0x44 [] tty_write+0x7d/0x28a [] redirected_tty_write+0x8d/0x98 [] ? tty_write+0x28a/0x28a [] do_loop_readv_writev+0x56/0x79 [] do_readv_writev+0x1b0/0x1ff [] ? do_vfs_ioctl+0x32a/0x489 [] ? final_putname+0x1d/0x3a [] vfs_writev+0x2e/0x49 [] SyS_writev+0x47/0xaa [] system_call_fastpath+0x16/0x1b bash D ffffffff81c104c0 0 5469 5302 0x00000082 ffff8800cf817ac0 0000000000000046 ffff8804086b22a0 ffff8800cf817fd8 00000000001d3980 00000000001d3980 ffff8804086b22a0 ffff8800cf817a48 000000000000b9a0 ffff8800cf817a78 ffffffff81004675 ffff8800cf817a44 Call Trace: [] ? dump_trace+0x165/0x29c [] ? sched_clock_cpu+0x9f/0xe4 [] ? save_stack_trace+0x26/0x41 [] schedule+0x24/0x5e [] schedule_timeout+0x15b/0x1ec [] ? sched_clock_cpu+0x9f/0xe4 [] ? down_write_failed+0xa3/0x1c9 [] ? _raw_spin_unlock_irq+0x24/0x26 [] down_write_failed+0xab/0x1c9 [] ldsem_down_write+0x79/0xb1 [] ? tty_ldisc_lock_pair_timeout+0xa5/0xd9 [] tty_ldisc_lock_pair_timeout+0xa5/0xd9 [] tty_ldisc_hangup+0xc4/0x218 [] __tty_hangup+0x2e2/0x3ed [] disassociate_ctty+0x63/0x226 [] do_exit+0x79f/0xa11 [] ? get_signal_to_deliver+0x206/0x62f [] ? lock_release_holdtime.part.8+0xf/0x16e [] do_group_exit+0x47/0xb5 [] get_signal_to_deliver+0x241/0x62f [] do_signal+0x43/0x59d [] ? __audit_syscall_exit+0x21a/0x2a8 [] ? lock_release_holdtime.part.8+0xf/0x16e [] do_notify_resume+0x54/0x6c [] int_signal+0x12/0x17 Reported-by: Sami Farin Cc: # 3.12.x Signed-off-by: Peter Hurley Signed-off-by: Greg Kroah-Hartman --- drivers/tty/tty_ldsem.c | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/drivers/tty/tty_ldsem.c b/drivers/tty/tty_ldsem.c index 22fad8ad5ac2..d8a55e87877f 100644 --- a/drivers/tty/tty_ldsem.c +++ b/drivers/tty/tty_ldsem.c @@ -86,11 +86,21 @@ static inline long ldsem_atomic_update(long delta, struct ld_semaphore *sem) return atomic_long_add_return(delta, (atomic_long_t *)&sem->count); } +/* + * ldsem_cmpxchg() updates @*old with the last-known sem->count value. + * Returns 1 if count was successfully changed; @*old will have @new value. + * Returns 0 if count was not changed; @*old will have most recent sem->count + */ static inline int ldsem_cmpxchg(long *old, long new, struct ld_semaphore *sem) { - long tmp = *old; - *old = atomic_long_cmpxchg(&sem->count, *old, new); - return *old == tmp; + long tmp = atomic_long_cmpxchg(&sem->count, *old, new); + if (tmp == *old) { + *old = new; + return 1; + } else { + *old = tmp; + return 0; + } } /* From 9bd7d20c45157c175bfd45449e904a404506f99c Mon Sep 17 00:00:00 2001 From: wangweidong Date: Fri, 13 Dec 2013 11:00:10 +0800 Subject: [PATCH 165/312] sctp: loading sctp when load sctp_probe when I modprobe sctp_probe, it failed with "FATAL: ". I found that sctp should load before sctp_probe register jprobe. So I add a sctp_setup_jprobe for loading 'sctp' when first failed to register jprobe, just do this similar to dccp_probe. v2: add MODULE_SOFTDEP and check of request_module, as suggested by Neil Signed-off-by: Wang Weidong Acked-by: Neil Horman Signed-off-by: David S. Miller --- net/sctp/probe.c | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/net/sctp/probe.c b/net/sctp/probe.c index 53c452efb40b..5e68b94ee640 100644 --- a/net/sctp/probe.c +++ b/net/sctp/probe.c @@ -38,6 +38,7 @@ #include #include +MODULE_SOFTDEP("pre: sctp"); MODULE_AUTHOR("Wei Yongjun "); MODULE_DESCRIPTION("SCTP snooper"); MODULE_LICENSE("GPL"); @@ -182,6 +183,20 @@ static struct jprobe sctp_recv_probe = { .entry = jsctp_sf_eat_sack, }; +static __init int sctp_setup_jprobe(void) +{ + int ret = register_jprobe(&sctp_recv_probe); + + if (ret) { + if (request_module("sctp")) + goto out; + ret = register_jprobe(&sctp_recv_probe); + } + +out: + return ret; +} + static __init int sctpprobe_init(void) { int ret = -ENOMEM; @@ -202,7 +217,7 @@ static __init int sctpprobe_init(void) &sctpprobe_fops)) goto free_kfifo; - ret = register_jprobe(&sctp_recv_probe); + ret = sctp_setup_jprobe(); if (ret) goto remove_proc; From 6979f8d28049879e6147767d93ba6732c8bd94f4 Mon Sep 17 00:00:00 2001 From: James Hogan Date: Tue, 10 Dec 2013 22:28:04 +0000 Subject: [PATCH 166/312] serial: 8250_dw: Fix LCR workaround regression Commit c49436b657d0 (serial: 8250_dw: Improve unwritable LCR workaround) caused a regression. It added a check that the LCR was written properly to detect and workaround the busy quirk, but the behaviour of bit 5 (UART_LCR_SPAR) differs between IP versions 3.00a and 3.14c per the docs. On older versions this caused the check to fail and it would repeatedly force idle and rewrite the LCR register, causing delays and preventing any input from serial being received. This is fixed by masking out UART_LCR_SPAR before making the comparison. Signed-off-by: James Hogan Cc: Greg Kroah-Hartman Cc: Jiri Slaby Cc: Tim Kryger Cc: Ezequiel Garcia Cc: Matt Porter Cc: Markus Mayer Tested-by: Tim Kryger Tested-by: Ezequiel Garcia Tested-by: Heikki Krogerus Signed-off-by: Greg Kroah-Hartman --- drivers/tty/serial/8250/8250_dw.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c index 4658e3e0ec42..5f096c74da7e 100644 --- a/drivers/tty/serial/8250/8250_dw.c +++ b/drivers/tty/serial/8250/8250_dw.c @@ -96,7 +96,8 @@ static void dw8250_serial_out(struct uart_port *p, int offset, int value) if (offset == UART_LCR) { int tries = 1000; while (tries--) { - if (value == p->serial_in(p, UART_LCR)) + unsigned int lcr = p->serial_in(p, UART_LCR); + if ((value & ~UART_LCR_SPAR) == (lcr & ~UART_LCR_SPAR)) return; dw8250_force_idle(p); writeb(value, p->membase + (UART_LCR << p->regshift)); @@ -132,7 +133,8 @@ static void dw8250_serial_out32(struct uart_port *p, int offset, int value) if (offset == UART_LCR) { int tries = 1000; while (tries--) { - if (value == p->serial_in(p, UART_LCR)) + unsigned int lcr = p->serial_in(p, UART_LCR); + if ((value & ~UART_LCR_SPAR) == (lcr & ~UART_LCR_SPAR)) return; dw8250_force_idle(p); writel(value, p->membase + (UART_LCR << p->regshift)); From 49d45a31b71d7d9da74485922bdb63faf3dc9684 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Rafa=C5=82=20Mi=C5=82ecki?= Date: Sat, 7 Dec 2013 13:22:42 +0100 Subject: [PATCH 167/312] drm/edid: add quirk for BPC in Samsung NP700G7A-S01PL notebook MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This bug in EDID was exposed by: commit eccea7920cfb009c2fa40e9ecdce8c36f61cab66 Author: Alex Deucher Date: Mon Mar 26 15:12:54 2012 -0400 drm/radeon/kms: improve bpc handling (v2) Which resulted in kind of regression in 3.5. This fixes https://bugs.freedesktop.org/show_bug.cgi?id=70934 Signed-off-by: Rafał Miłecki Cc: stable@vger.kernel.org Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_edid.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 0a1e4a5f4234..8835dcddfac3 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -68,6 +68,8 @@ #define EDID_QUIRK_DETAILED_SYNC_PP (1 << 6) /* Force reduced-blanking timings for detailed modes */ #define EDID_QUIRK_FORCE_REDUCED_BLANKING (1 << 7) +/* Force 8bpc */ +#define EDID_QUIRK_FORCE_8BPC (1 << 8) struct detailed_mode_closure { struct drm_connector *connector; @@ -128,6 +130,9 @@ static struct edid_quirk { /* Medion MD 30217 PG */ { "MED", 0x7b8, EDID_QUIRK_PREFER_LARGE_75 }, + + /* Panel in Samsung NP700G7A-S01PL notebook reports 6bpc */ + { "SEC", 0xd033, EDID_QUIRK_FORCE_8BPC }, }; /* @@ -3435,6 +3440,9 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid) drm_add_display_info(edid, &connector->display_info); + if (quirks & EDID_QUIRK_FORCE_8BPC) + connector->display_info.bpc = 8; + return num_modes; } EXPORT_SYMBOL(drm_add_edid_modes); From 7cd0c298f6e09476f474b7ae6c1ca876c5d7f881 Mon Sep 17 00:00:00 2001 From: Felipe Balbi Date: Tue, 10 Dec 2013 17:00:06 -0600 Subject: [PATCH 168/312] usb: phy: fix driver dependencies both isp1301-omap and fsl_usb2_otg drivers depend on usb_bus_start_enum() which is only defined if CONFIG_USB != n. There is a problem, however, where both those drivers could be statically linked, while CONFIG_USB=m. Fix the problem by fixing driver dependency. Signed-off-by: Felipe Balbi Signed-off-by: Greg Kroah-Hartman --- drivers/usb/phy/Kconfig | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/usb/phy/Kconfig b/drivers/usb/phy/Kconfig index 08e2f39027ec..2b41c636a52a 100644 --- a/drivers/usb/phy/Kconfig +++ b/drivers/usb/phy/Kconfig @@ -19,8 +19,9 @@ config AB8500_USB in host mode, low speed. config FSL_USB2_OTG - bool "Freescale USB OTG Transceiver Driver" + tristate "Freescale USB OTG Transceiver Driver" depends on USB_EHCI_FSL && USB_FSL_USB2 && PM_RUNTIME + depends on USB select USB_OTG select USB_PHY help @@ -29,6 +30,7 @@ config FSL_USB2_OTG config ISP1301_OMAP tristate "Philips ISP1301 with OMAP OTG" depends on I2C && ARCH_OMAP_OTG + depends on USB select USB_PHY help If you say yes here you get support for the Philips ISP1301 From a1c31f1d057130cc63e72a09189410d169db7ecf Mon Sep 17 00:00:00 2001 From: Alexey Khoroshilov Date: Sat, 7 Dec 2013 03:10:35 +0400 Subject: [PATCH 169/312] can: ems_usb: fix urb leaks on failure paths There are a couple failure paths where urb leaks. Is spare code within ems_usb_start_xmit(), usb_free_urb() should be used to deallocate urb instead of usb_unanchor_urb(). In ems_usb_start() there is no usb_free_urb() if usb_submit_urb() fails. Found by Linux Driver Verification project (linuxtesting.org). Signed-off-by: Alexey Khoroshilov Acked-by: Sebastian Haas Signed-off-by: Marc Kleine-Budde --- drivers/net/can/usb/ems_usb.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c index 5f9a7ad9b964..8aeec0b4601a 100644 --- a/drivers/net/can/usb/ems_usb.c +++ b/drivers/net/can/usb/ems_usb.c @@ -625,6 +625,7 @@ static int ems_usb_start(struct ems_usb *dev) usb_unanchor_urb(urb); usb_free_coherent(dev->udev, RX_BUFFER_SIZE, buf, urb->transfer_dma); + usb_free_urb(urb); break; } @@ -798,8 +799,8 @@ static netdev_tx_t ems_usb_start_xmit(struct sk_buff *skb, struct net_device *ne * allowed (MAX_TX_URBS). */ if (!context) { - usb_unanchor_urb(urb); usb_free_coherent(dev->udev, size, buf, urb->transfer_dma); + usb_free_urb(urb); netdev_warn(netdev, "couldn't find free context\n"); From 20fb4eb96fb0350d28fc4d7cbfd5506711079592 Mon Sep 17 00:00:00 2001 From: Marc Kleine-Budde Date: Sat, 14 Dec 2013 14:36:25 +0100 Subject: [PATCH 170/312] can: peak_usb: fix mem leak in pcan_usb_pro_init() This patch fixes a memory leak in pcan_usb_pro_init(). In patch f14e224 net: can: peak_usb: Do not do dma on the stack the struct pcan_usb_pro_fwinfo *fi and struct pcan_usb_pro_blinfo *bi were converted from stack to dynamic allocation va kmalloc(). However the corresponding kfree() was not introduced. This patch adds the missing kfree(). Cc: linux-stable # v3.10 Reported-by: Stephane Grosjean Signed-off-by: Marc Kleine-Budde --- drivers/net/can/usb/peak_usb/pcan_usb_pro.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c index 8ee9d1556e6e..263dd921edc4 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c @@ -927,6 +927,9 @@ static int pcan_usb_pro_init(struct peak_usb_device *dev) /* set LED in default state (end of init phase) */ pcan_usb_pro_set_led(dev, 0, 1); + kfree(bi); + kfree(fi); + return 0; err_out: From f78dea064c5f7de07de4912a6e5136dbc443d614 Mon Sep 17 00:00:00 2001 From: Marc Carino Date: Mon, 16 Dec 2013 18:15:53 -0800 Subject: [PATCH 171/312] libata: implement ATA_HORKAGE_NO_NCQ_TRIM and apply it to Micro M500 SSDs Certain drives cannot handle queued TRIM commands properly, even though support is indicated in the IDENTIFY DEVICE buffer. This patch allows for disabling the commands for the affected drives and apply it to the Micron/Crucial M500 SSDs which exhibit incorrect protocol behavior when issued queued TRIM commands, which could lead to silent data corruption. tj: Merged two unnecessarily split patches and made minor edits including shortening horkage name. Signed-off-by: Marc Carino Signed-off-by: Tejun Heo Link: http://lkml.kernel.org/g/1387246554-7311-1-git-send-email-marc.ceeeee@gmail.com Cc: stable@vger.kernel.org # 3.12+ --- drivers/ata/libata-core.c | 15 +++++++++++++-- include/linux/libata.h | 1 + 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index ff0158481d53..1393a5890ed5 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -2149,9 +2149,16 @@ static int ata_dev_config_ncq(struct ata_device *dev, "failed to get NCQ Send/Recv Log Emask 0x%x\n", err_mask); } else { + u8 *cmds = dev->ncq_send_recv_cmds; + dev->flags |= ATA_DFLAG_NCQ_SEND_RECV; - memcpy(dev->ncq_send_recv_cmds, ap->sector_buf, - ATA_LOG_NCQ_SEND_RECV_SIZE); + memcpy(cmds, ap->sector_buf, ATA_LOG_NCQ_SEND_RECV_SIZE); + + if (dev->horkage & ATA_HORKAGE_NO_NCQ_TRIM) { + ata_dev_dbg(dev, "disabling queued TRIM support\n"); + cmds[ATA_LOG_NCQ_SEND_RECV_DSM_OFFSET] &= + ~ATA_LOG_NCQ_SEND_RECV_DSM_TRIM; + } } } @@ -4205,6 +4212,10 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { { "PIONEER DVD-RW DVR-212D", NULL, ATA_HORKAGE_NOSETXFER }, { "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER }, + /* devices that don't properly handle queued TRIM commands */ + { "Micron_M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, }, + { "Crucial_CT???M500SSD1", NULL, ATA_HORKAGE_NO_NCQ_TRIM, }, + /* End Marker */ { } }; diff --git a/include/linux/libata.h b/include/linux/libata.h index 0e23c26485f4..9b503376738f 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -418,6 +418,7 @@ enum { ATA_HORKAGE_DUMP_ID = (1 << 16), /* dump IDENTIFY data */ ATA_HORKAGE_MAX_SEC_LBA48 = (1 << 17), /* Set max sects to 65535 */ ATA_HORKAGE_ATAPI_DMADIR = (1 << 18), /* device requires dmadir */ + ATA_HORKAGE_NO_NCQ_TRIM = (1 << 19), /* don't use queued TRIM */ /* DMA mask for user DMA control: User visible values; DO NOT renumber */ From c1a71504e9715812a2d15e7c03b5aa147ae70ded Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Tue, 17 Dec 2013 11:13:39 +0800 Subject: [PATCH 172/312] cgroup: don't recycle cgroup id until all csses' have been destroyed Hugh reported this bug: > CONFIG_MEMCG_SWAP is broken in 3.13-rc. Try something like this: > > mkdir -p /tmp/tmpfs /tmp/memcg > mount -t tmpfs -o size=1G tmpfs /tmp/tmpfs > mount -t cgroup -o memory memcg /tmp/memcg > mkdir /tmp/memcg/old > echo 512M >/tmp/memcg/old/memory.limit_in_bytes > echo $$ >/tmp/memcg/old/tasks > cp /dev/zero /tmp/tmpfs/zero 2>/dev/null > echo $$ >/tmp/memcg/tasks > rmdir /tmp/memcg/old > sleep 1 # let rmdir work complete > mkdir /tmp/memcg/new > umount /tmp/tmpfs > dmesg | grep WARNING > rmdir /tmp/memcg/new > umount /tmp/memcg > > Shows lots of WARNING: CPU: 1 PID: 1006 at kernel/res_counter.c:91 > res_counter_uncharge_locked+0x1f/0x2f() > > Breakage comes from 34c00c319ce7 ("memcg: convert to use cgroup id"). > > The lifetime of a cgroup id is different from the lifetime of the > css id it replaced: memsw's css_get()s do nothing to hold on to the > old cgroup id, it soon gets recycled to a new cgroup, which then > mysteriously inherits the old's swap, without any charge for it. Instead of removing cgroup id right after all the csses have been offlined, we should do that after csses have been destroyed. To make sure an invalid css pointer won't be returned after the css is destroyed, make sure css_from_id() returns NULL in this case. tj: Updated comment to note planned changes for cgrp->id. Reported-by: Hugh Dickins Signed-off-by: Li Zefan Reviewed-by: Michal Hocko Signed-off-by: Tejun Heo --- kernel/cgroup.c | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/kernel/cgroup.c b/kernel/cgroup.c index bcb1755f410a..bc1dcabe9217 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -890,6 +890,16 @@ static void cgroup_diput(struct dentry *dentry, struct inode *inode) struct cgroup *cgrp = dentry->d_fsdata; BUG_ON(!(cgroup_is_dead(cgrp))); + + /* + * XXX: cgrp->id is only used to look up css's. As cgroup + * and css's lifetimes will be decoupled, it should be made + * per-subsystem and moved to css->id so that lookups are + * successful until the target css is released. + */ + idr_remove(&cgrp->root->cgroup_idr, cgrp->id); + cgrp->id = -1; + call_rcu(&cgrp->rcu_head, cgroup_free_rcu); } else { struct cfent *cfe = __d_cfe(dentry); @@ -4268,6 +4278,7 @@ static void css_release(struct percpu_ref *ref) struct cgroup_subsys_state *css = container_of(ref, struct cgroup_subsys_state, refcnt); + rcu_assign_pointer(css->cgroup->subsys[css->ss->subsys_id], NULL); call_rcu(&css->rcu_head, css_free_rcu_fn); } @@ -4733,14 +4744,6 @@ static void cgroup_destroy_css_killed(struct cgroup *cgrp) /* delete this cgroup from parent->children */ list_del_rcu(&cgrp->sibling); - /* - * We should remove the cgroup object from idr before its grace - * period starts, so we won't be looking up a cgroup while the - * cgroup is being freed. - */ - idr_remove(&cgrp->root->cgroup_idr, cgrp->id); - cgrp->id = -1; - dput(d); set_bit(CGRP_RELEASABLE, &parent->flags); From 280484e708a3cc38fe6807718caa460e744c0b20 Mon Sep 17 00:00:00 2001 From: Charles Keepax Date: Tue, 17 Dec 2013 13:16:16 +0000 Subject: [PATCH 173/312] ASoC: wm5110: Correct HPOUT3 DAPM route typo Reported-by: Kyung-Kwee Ryu Signed-off-by: Charles Keepax Signed-off-by: Mark Brown Cc: stable@vger.kernel.org --- sound/soc/codecs/wm5110.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sound/soc/codecs/wm5110.c b/sound/soc/codecs/wm5110.c index c3c7396a6181..b3d0b284cca1 100644 --- a/sound/soc/codecs/wm5110.c +++ b/sound/soc/codecs/wm5110.c @@ -1037,7 +1037,7 @@ static const struct snd_soc_dapm_route wm5110_dapm_routes[] = { { "AEC Loopback", "HPOUT3L", "OUT3L" }, { "AEC Loopback", "HPOUT3R", "OUT3R" }, { "HPOUT3L", NULL, "OUT3L" }, - { "HPOUT3R", NULL, "OUT3L" }, + { "HPOUT3R", NULL, "OUT3R" }, { "AEC Loopback", "SPKOUTL", "OUT4L" }, { "SPKOUTLN", NULL, "OUT4L" }, From 443772776c69ac9293d66b4d69fd9af16299cc2a Mon Sep 17 00:00:00 2001 From: Alexander Shishkin Date: Mon, 16 Dec 2013 14:17:36 +0200 Subject: [PATCH 174/312] perf: Disable all pmus on unthrottling and rescheduling Currently, only one PMU in a context gets disabled during unthrottling and event_sched_{out,in}(), however, events in one context may belong to different pmus, which results in PMUs being reprogrammed while they are still enabled. This means that mixed PMU use [which is rare in itself] resulted in potentially completely unreliable results: corrupted events, bogus results, etc. This patch temporarily disables PMUs that correspond to each event in the context while these events are being modified. Signed-off-by: Alexander Shishkin Reviewed-by: Andi Kleen Signed-off-by: Peter Zijlstra Cc: Frederic Weisbecker Cc: Mike Galbraith Cc: Paul Mackerras Cc: Stephane Eranian Cc: Alexander Shishkin Link: http://lkml.kernel.org/r/1387196256-8030-1-git-send-email-alexander.shishkin@linux.intel.com Signed-off-by: Ingo Molnar --- kernel/events/core.c | 21 ++++++++++++++++++--- 1 file changed, 18 insertions(+), 3 deletions(-) diff --git a/kernel/events/core.c b/kernel/events/core.c index 72348dc192c1..f5744010a8d2 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -1396,6 +1396,8 @@ event_sched_out(struct perf_event *event, if (event->state != PERF_EVENT_STATE_ACTIVE) return; + perf_pmu_disable(event->pmu); + event->state = PERF_EVENT_STATE_INACTIVE; if (event->pending_disable) { event->pending_disable = 0; @@ -1412,6 +1414,8 @@ event_sched_out(struct perf_event *event, ctx->nr_freq--; if (event->attr.exclusive || !cpuctx->active_oncpu) cpuctx->exclusive = 0; + + perf_pmu_enable(event->pmu); } static void @@ -1652,6 +1656,7 @@ event_sched_in(struct perf_event *event, struct perf_event_context *ctx) { u64 tstamp = perf_event_time(event); + int ret = 0; if (event->state <= PERF_EVENT_STATE_OFF) return 0; @@ -1674,10 +1679,13 @@ event_sched_in(struct perf_event *event, */ smp_wmb(); + perf_pmu_disable(event->pmu); + if (event->pmu->add(event, PERF_EF_START)) { event->state = PERF_EVENT_STATE_INACTIVE; event->oncpu = -1; - return -EAGAIN; + ret = -EAGAIN; + goto out; } event->tstamp_running += tstamp - event->tstamp_stopped; @@ -1693,7 +1701,10 @@ event_sched_in(struct perf_event *event, if (event->attr.exclusive) cpuctx->exclusive = 1; - return 0; +out: + perf_pmu_enable(event->pmu); + + return ret; } static int @@ -2743,6 +2754,8 @@ static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx, if (!event_filter_match(event)) continue; + perf_pmu_disable(event->pmu); + hwc = &event->hw; if (hwc->interrupts == MAX_INTERRUPTS) { @@ -2752,7 +2765,7 @@ static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx, } if (!event->attr.freq || !event->attr.sample_freq) - continue; + goto next; /* * stop the event and update event->count @@ -2774,6 +2787,8 @@ static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx, perf_adjust_period(event, period, delta, false); event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0); + next: + perf_pmu_enable(event->pmu); } perf_pmu_enable(ctx->pmu); From 189b84fb54490ae24111124346a8e63f8e019385 Mon Sep 17 00:00:00 2001 From: Vince Weaver Date: Fri, 13 Dec 2013 15:52:25 -0500 Subject: [PATCH 175/312] perf: Document the new transaction sample type Commit fdfbbd07e91f8fe3871 ("perf: Add generic transaction flags") added support for PERF_SAMPLE_TRANSACTION but forgot to add documentation for the sample type to include/uapi/linux/perf_event.h Signed-off-by: Vince Weaver Signed-off-by: Peter Zijlstra Cc: Andi Kleen Link: http://lkml.kernel.org/r/alpine.DEB.2.02.1312131548450.10372@pianoman.cluster.toy Signed-off-by: Ingo Molnar --- include/uapi/linux/perf_event.h | 1 + 1 file changed, 1 insertion(+) diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h index e1802d6153ae..959d454f76a1 100644 --- a/include/uapi/linux/perf_event.h +++ b/include/uapi/linux/perf_event.h @@ -679,6 +679,7 @@ enum perf_event_type { * * { u64 weight; } && PERF_SAMPLE_WEIGHT * { u64 data_src; } && PERF_SAMPLE_DATA_SRC + * { u64 transaction; } && PERF_SAMPLE_TRANSACTION * }; */ PERF_RECORD_SAMPLE = 9, From 5d4cf996cf134e8ddb4f906b8197feb9267c2b77 Mon Sep 17 00:00:00 2001 From: Mel Gorman Date: Tue, 17 Dec 2013 09:21:25 +0000 Subject: [PATCH 176/312] sched: Assign correct scheduling domain to 'sd_llc' Commit 42eb088e (sched: Avoid NULL dereference on sd_busy) corrected a NULL dereference on sd_busy but the fix also altered what scheduling domain it used for the 'sd_llc' percpu variable. One impact of this is that a task selecting a runqueue may consider idle CPUs that are not cache siblings as candidates for running. Tasks are then running on CPUs that are not cache hot. This was found through bisection where ebizzy threads were not seeing equal performance and it looked like a scheduling fairness issue. This patch mitigates but does not completely fix the problem on all machines tested implying there may be an additional bug or a common root cause. Here are the average range of performance seen by individual ebizzy threads. It was tested on top of candidate patches related to x86 TLB range flushing. 4-core machine 3.13.0-rc3 3.13.0-rc3 vanilla fixsd-v3r3 Mean 1 0.00 ( 0.00%) 0.00 ( 0.00%) Mean 2 0.34 ( 0.00%) 0.10 ( 70.59%) Mean 3 1.29 ( 0.00%) 0.93 ( 27.91%) Mean 4 7.08 ( 0.00%) 0.77 ( 89.12%) Mean 5 193.54 ( 0.00%) 2.14 ( 98.89%) Mean 6 151.12 ( 0.00%) 2.06 ( 98.64%) Mean 7 115.38 ( 0.00%) 2.04 ( 98.23%) Mean 8 108.65 ( 0.00%) 1.92 ( 98.23%) 8-core machine Mean 1 0.00 ( 0.00%) 0.00 ( 0.00%) Mean 2 0.40 ( 0.00%) 0.21 ( 47.50%) Mean 3 23.73 ( 0.00%) 0.89 ( 96.25%) Mean 4 12.79 ( 0.00%) 1.04 ( 91.87%) Mean 5 13.08 ( 0.00%) 2.42 ( 81.50%) Mean 6 23.21 ( 0.00%) 69.46 (-199.27%) Mean 7 15.85 ( 0.00%) 101.72 (-541.77%) Mean 8 109.37 ( 0.00%) 19.13 ( 82.51%) Mean 12 124.84 ( 0.00%) 28.62 ( 77.07%) Mean 16 113.50 ( 0.00%) 24.16 ( 78.71%) It's eliminated for one machine and reduced for another. Signed-off-by: Mel Gorman Signed-off-by: Peter Zijlstra Cc: Alex Shi Cc: Andrew Morton Cc: Fengguang Wu Cc: H Peter Anvin Cc: Linus Torvalds Link: http://lkml.kernel.org/r/20131217092124.GV11295@suse.de Signed-off-by: Ingo Molnar --- kernel/sched/core.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 19af58f3a261..a88f4a485c5e 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -4902,6 +4902,7 @@ DEFINE_PER_CPU(struct sched_domain *, sd_asym); static void update_top_cache_domain(int cpu) { struct sched_domain *sd; + struct sched_domain *busy_sd = NULL; int id = cpu; int size = 1; @@ -4909,9 +4910,9 @@ static void update_top_cache_domain(int cpu) if (sd) { id = cpumask_first(sched_domain_span(sd)); size = cpumask_weight(sched_domain_span(sd)); - sd = sd->parent; /* sd_busy */ + busy_sd = sd->parent; /* sd_busy */ } - rcu_assign_pointer(per_cpu(sd_busy, cpu), sd); + rcu_assign_pointer(per_cpu(sd_busy, cpu), busy_sd); rcu_assign_pointer(per_cpu(sd_llc, cpu), sd); per_cpu(sd_llc_size, cpu) = size; From 757dfcaa41844595964f1220f1d33182dae49976 Mon Sep 17 00:00:00 2001 From: Kirill Tkhai Date: Wed, 27 Nov 2013 19:59:13 +0400 Subject: [PATCH 177/312] sched/rt: Fix rq's cpupri leak while enqueue/dequeue child RT entities This patch touches the RT group scheduling case. Functions inc_rt_prio_smp() and dec_rt_prio_smp() change (global) rq's priority, while rt_rq passed to them may be not the top-level rt_rq. This is wrong, because changing of priority on a child level does not guarantee that the priority is the highest all over the rq. So, this leak makes RT balancing unusable. The short example: the task having the highest priority among all rq's RT tasks (no one other task has the same priority) are waking on a throttle rt_rq. The rq's cpupri is set to the task's priority equivalent, but real rq->rt.highest_prio.curr is less. The patch below fixes the problem. Signed-off-by: Kirill Tkhai Signed-off-by: Peter Zijlstra CC: Steven Rostedt CC: stable@vger.kernel.org Link: http://lkml.kernel.org/r/49231385567953@web4m.yandex.ru Signed-off-by: Ingo Molnar --- kernel/sched/rt.c | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index 7d57275fc396..1c4065575fa2 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -901,6 +901,13 @@ inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) { struct rq *rq = rq_of_rt_rq(rt_rq); +#ifdef CONFIG_RT_GROUP_SCHED + /* + * Change rq's cpupri only if rt_rq is the top queue. + */ + if (&rq->rt != rt_rq) + return; +#endif if (rq->online && prio < prev_prio) cpupri_set(&rq->rd->cpupri, rq->cpu, prio); } @@ -910,6 +917,13 @@ dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) { struct rq *rq = rq_of_rt_rq(rt_rq); +#ifdef CONFIG_RT_GROUP_SCHED + /* + * Change rq's cpupri only if rt_rq is the top queue. + */ + if (&rq->rt != rt_rq) + return; +#endif if (rq->online && rt_rq->highest_prio.curr != prev_prio) cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr); } From 02fc17c10258ad70c1b9a93f8884bdaf0ac3f766 Mon Sep 17 00:00:00 2001 From: Jean-Francois Moine Date: Wed, 27 Nov 2013 21:10:24 +0100 Subject: [PATCH 178/312] ASoC: kirkwood: Fix the CPU DAI rates This patch fixes the rates declared in the CPU DAI parameters: - SNDRV_PCM_RATE_KNOT and the discrete rates SNDRV_PCM_RATE_xxx should not be used with SNDRV_PCM_RATE_CONTINUOUS, - SNDRV_PCM_RATE_CONTINUOUS asks for rate_min and rate_max, - the device may do streaming down to 5512Hz. Signed-off-by: Jean-Francois Moine Signed-off-by: Mark Brown --- sound/soc/kirkwood/kirkwood-i2s.c | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/sound/soc/kirkwood/kirkwood-i2s.c b/sound/soc/kirkwood/kirkwood-i2s.c index 0b18f654b413..3920a5e8125f 100644 --- a/sound/soc/kirkwood/kirkwood-i2s.c +++ b/sound/soc/kirkwood/kirkwood-i2s.c @@ -473,17 +473,17 @@ static struct snd_soc_dai_driver kirkwood_i2s_dai_extclk[2] = { .playback = { .channels_min = 1, .channels_max = 2, - .rates = SNDRV_PCM_RATE_8000_192000 | - SNDRV_PCM_RATE_CONTINUOUS | - SNDRV_PCM_RATE_KNOT, + .rates = SNDRV_PCM_RATE_CONTINUOUS, + .rate_min = 5512, + .rate_max = 192000, .formats = KIRKWOOD_I2S_FORMATS, }, .capture = { .channels_min = 1, .channels_max = 2, - .rates = SNDRV_PCM_RATE_8000_192000 | - SNDRV_PCM_RATE_CONTINUOUS | - SNDRV_PCM_RATE_KNOT, + .rates = SNDRV_PCM_RATE_CONTINUOUS, + .rate_min = 5512, + .rate_max = 192000, .formats = KIRKWOOD_I2S_FORMATS, }, .ops = &kirkwood_i2s_dai_ops, @@ -494,17 +494,17 @@ static struct snd_soc_dai_driver kirkwood_i2s_dai_extclk[2] = { .playback = { .channels_min = 1, .channels_max = 2, - .rates = SNDRV_PCM_RATE_8000_192000 | - SNDRV_PCM_RATE_CONTINUOUS | - SNDRV_PCM_RATE_KNOT, + .rates = SNDRV_PCM_RATE_CONTINUOUS, + .rate_min = 5512, + .rate_max = 192000, .formats = KIRKWOOD_SPDIF_FORMATS, }, .capture = { .channels_min = 1, .channels_max = 2, - .rates = SNDRV_PCM_RATE_8000_192000 | - SNDRV_PCM_RATE_CONTINUOUS | - SNDRV_PCM_RATE_KNOT, + .rates = SNDRV_PCM_RATE_CONTINUOUS, + .rate_min = 5512, + .rate_max = 192000, .formats = KIRKWOOD_SPDIF_FORMATS, }, .ops = &kirkwood_i2s_dai_ops, From 533518a43ab9d662c864a9b63a6723050bfea488 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 12 Dec 2013 12:13:17 -0500 Subject: [PATCH 179/312] drm/radeon/dce6: set correct number of audio pins DCE6.0, 8.x has 6 DCE6.1 has 4 Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/dce6_afmt.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/radeon/dce6_afmt.c b/drivers/gpu/drm/radeon/dce6_afmt.c index de86493cbc44..ab59fd76f2d9 100644 --- a/drivers/gpu/drm/radeon/dce6_afmt.c +++ b/drivers/gpu/drm/radeon/dce6_afmt.c @@ -308,7 +308,9 @@ int dce6_audio_init(struct radeon_device *rdev) rdev->audio.enabled = true; if (ASIC_IS_DCE8(rdev)) - rdev->audio.num_pins = 7; + rdev->audio.num_pins = 6; + else if (ASIC_IS_DCE61(rdev)) + rdev->audio.num_pins = 4; else rdev->audio.num_pins = 6; From c745fe611ca42295c9d91d8e305d27983e9132ef Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Mon, 9 Dec 2013 17:46:59 -0500 Subject: [PATCH 180/312] drm/radeon/dpm: disable ss on Cayman Spread spectrum seems to cause hangs when dynamic clock switching is enabled. Disable it for now. This does not affect performance or the amount of power saved. Tracked down by Martin Andersson. bug: https://bugs.freedesktop.org/show_bug.cgi?id=69723 Signed-off-by: Alex Deucher Cc: stable@vger.kernel.org --- drivers/gpu/drm/radeon/rv770_dpm.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/gpu/drm/radeon/rv770_dpm.c b/drivers/gpu/drm/radeon/rv770_dpm.c index 913b025ae9b3..374499db20c7 100644 --- a/drivers/gpu/drm/radeon/rv770_dpm.c +++ b/drivers/gpu/drm/radeon/rv770_dpm.c @@ -2328,6 +2328,12 @@ void rv770_get_engine_memory_ss(struct radeon_device *rdev) pi->mclk_ss = radeon_atombios_get_asic_ss_info(rdev, &ss, ASIC_INTERNAL_MEMORY_SS, 0); + /* disable ss, causes hangs on some cayman boards */ + if (rdev->family == CHIP_CAYMAN) { + pi->sclk_ss = false; + pi->mclk_ss = false; + } + if (pi->sclk_ss || pi->mclk_ss) pi->dynamic_ss = true; else From b67ce39a30976171e7b96b30a94a0216ab89df97 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Fri, 13 Dec 2013 09:05:49 -0500 Subject: [PATCH 181/312] drm/radeon: check for 0 count in speaker allocation and SAD code If there is no speaker allocation block or SAD block, bail early. bug: https://bugs.freedesktop.org/show_bug.cgi?id=72283 Signed-off-by: Alex Deucher Cc: stable@vger.kernel.org --- drivers/gpu/drm/radeon/dce6_afmt.c | 4 ++-- drivers/gpu/drm/radeon/evergreen_hdmi.c | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/radeon/dce6_afmt.c b/drivers/gpu/drm/radeon/dce6_afmt.c index ab59fd76f2d9..713a5d359901 100644 --- a/drivers/gpu/drm/radeon/dce6_afmt.c +++ b/drivers/gpu/drm/radeon/dce6_afmt.c @@ -174,7 +174,7 @@ void dce6_afmt_write_speaker_allocation(struct drm_encoder *encoder) } sad_count = drm_edid_to_speaker_allocation(radeon_connector->edid, &sadb); - if (sad_count < 0) { + if (sad_count <= 0) { DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count); return; } @@ -235,7 +235,7 @@ void dce6_afmt_write_sad_regs(struct drm_encoder *encoder) } sad_count = drm_edid_to_sad(radeon_connector->edid, &sads); - if (sad_count < 0) { + if (sad_count <= 0) { DRM_ERROR("Couldn't read SADs: %d\n", sad_count); return; } diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c index aa695c4feb3d..0c6d5cef4cf1 100644 --- a/drivers/gpu/drm/radeon/evergreen_hdmi.c +++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c @@ -118,7 +118,7 @@ static void dce4_afmt_write_speaker_allocation(struct drm_encoder *encoder) } sad_count = drm_edid_to_speaker_allocation(radeon_connector->edid, &sadb); - if (sad_count < 0) { + if (sad_count <= 0) { DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count); return; } @@ -173,7 +173,7 @@ static void evergreen_hdmi_write_sad_regs(struct drm_encoder *encoder) } sad_count = drm_edid_to_sad(radeon_connector->edid, &sads); - if (sad_count < 0) { + if (sad_count <= 0) { DRM_ERROR("Couldn't read SADs: %d\n", sad_count); return; } From 3a8c92086d1c05ae1cf6a52ffd8dde2851e6b1d3 Mon Sep 17 00:00:00 2001 From: Mark Tinguely Date: Sat, 5 Oct 2013 21:48:25 -0500 Subject: [PATCH 182/312] xfs: fix memory leak in xfs_dir2_node_removename Fix the leak of kernel memory in xfs_dir2_node_removename() when xfs_dir2_leafn_remove() returns an error code. Signed-off-by: Mark Tinguely Reviewed-by: Ben Myers Signed-off-by: Ben Myers (cherry picked from commit ef701600fd26cace9d513ee174688a2b83832126) --- fs/xfs/xfs_dir2_node.c | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/fs/xfs/xfs_dir2_node.c b/fs/xfs/xfs_dir2_node.c index 56369d4509d5..48c7d18f68c3 100644 --- a/fs/xfs/xfs_dir2_node.c +++ b/fs/xfs/xfs_dir2_node.c @@ -2067,12 +2067,12 @@ xfs_dir2_node_lookup( */ int /* error */ xfs_dir2_node_removename( - xfs_da_args_t *args) /* operation arguments */ + struct xfs_da_args *args) /* operation arguments */ { - xfs_da_state_blk_t *blk; /* leaf block */ + struct xfs_da_state_blk *blk; /* leaf block */ int error; /* error return value */ int rval; /* operation return value */ - xfs_da_state_t *state; /* btree cursor */ + struct xfs_da_state *state; /* btree cursor */ trace_xfs_dir2_node_removename(args); @@ -2084,19 +2084,18 @@ xfs_dir2_node_removename( state->mp = args->dp->i_mount; state->blocksize = state->mp->m_dirblksize; state->node_ents = state->mp->m_dir_node_ents; - /* - * Look up the entry we're deleting, set up the cursor. - */ + + /* Look up the entry we're deleting, set up the cursor. */ error = xfs_da3_node_lookup_int(state, &rval); if (error) - rval = error; - /* - * Didn't find it, upper layer screwed up. - */ + goto out_free; + + /* Didn't find it, upper layer screwed up. */ if (rval != EEXIST) { - xfs_da_state_free(state); - return rval; + error = rval; + goto out_free; } + blk = &state->path.blk[state->path.active - 1]; ASSERT(blk->magic == XFS_DIR2_LEAFN_MAGIC); ASSERT(state->extravalid); @@ -2107,7 +2106,7 @@ xfs_dir2_node_removename( error = xfs_dir2_leafn_remove(args, blk->bp, blk->index, &state->extrablk, &rval); if (error) - return error; + goto out_free; /* * Fix the hash values up the btree. */ @@ -2122,6 +2121,7 @@ xfs_dir2_node_removename( */ if (!error) error = xfs_dir2_node_to_leaf(state); +out_free: xfs_da_state_free(state); return error; } From 30d161c9aacb4b719789c30623b9eec7d1aa1d08 Mon Sep 17 00:00:00 2001 From: Jie Liu Date: Tue, 26 Nov 2013 21:38:54 +0800 Subject: [PATCH 183/312] xfs: fix false assertion at xfs_qm_vop_create_dqattach After the previous fix, there still has another ASSERT failure if turning off any type of quota while fsstress is running at the same time. Backtrace in this case: [ 50.867897] XFS: Assertion failed: XFS_IS_GQUOTA_ON(mp), file: fs/xfs/xfs_qm.c, line: 2118 [ 50.867924] ------------[ cut here ]------------ ... [ 50.867957] Kernel BUG at ffffffffa0b55a32 [verbose debug info unavailable] [ 50.867999] invalid opcode: 0000 [#1] SMP [ 50.869407] Call Trace: [ 50.869446] [] xfs_qm_vop_create_dqattach+0x19a/0x2d0 [xfs] [ 50.869512] [] xfs_create+0x5c5/0x6a0 [xfs] [ 50.869564] [] xfs_vn_mknod+0xac/0x1d0 [xfs] [ 50.869615] [] xfs_vn_mkdir+0x16/0x20 [xfs] [ 50.869655] [] vfs_mkdir+0x95/0x130 [ 50.869689] [] SyS_mkdirat+0xaa/0xe0 [ 50.869723] [] SyS_mkdir+0x19/0x20 [ 50.869757] [] system_call_fastpath+0x1a/0x1f [ 50.869793] Code: 2e 0f 1f 84 00 00 00 00 00 0f 1f 44 00 00 55 48 89 [ 50.870003] RIP [] assfail+0x22/0x30 [xfs] [ 50.870050] RSP [ 50.879251] ---[ end trace c93a2b342341c65b ]--- We're hitting the ASSERT(XFS_IS_*QUOTA_ON(mp)) in xfs_qm_vop_create_dqattach(), however the assertion itself is not right IMHO. While performing quota off, we firstly clear the XFS_*QUOTA_ACTIVE bit(s) from struct xfs_mount without taking any special locks, see xfs_qm_scall_quotaoff(). Hence there is no guarantee that the desired quota is still active. Signed-off-by: Jie Liu Reviewed-by: Christoph Hellwig Signed-off-by: Ben Myers (cherry picked from commit 37eb9706ebf5b99d14c6086cdeef2c2f73f9c9fb) --- fs/xfs/xfs_qm.c | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c index 14a4996cfec6..588e4909c589 100644 --- a/fs/xfs/xfs_qm.c +++ b/fs/xfs/xfs_qm.c @@ -2082,24 +2082,21 @@ xfs_qm_vop_create_dqattach( ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); ASSERT(XFS_IS_QUOTA_RUNNING(mp)); - if (udqp) { + if (udqp && XFS_IS_UQUOTA_ON(mp)) { ASSERT(ip->i_udquot == NULL); - ASSERT(XFS_IS_UQUOTA_ON(mp)); ASSERT(ip->i_d.di_uid == be32_to_cpu(udqp->q_core.d_id)); ip->i_udquot = xfs_qm_dqhold(udqp); xfs_trans_mod_dquot(tp, udqp, XFS_TRANS_DQ_ICOUNT, 1); } - if (gdqp) { + if (gdqp && XFS_IS_GQUOTA_ON(mp)) { ASSERT(ip->i_gdquot == NULL); - ASSERT(XFS_IS_GQUOTA_ON(mp)); ASSERT(ip->i_d.di_gid == be32_to_cpu(gdqp->q_core.d_id)); ip->i_gdquot = xfs_qm_dqhold(gdqp); xfs_trans_mod_dquot(tp, gdqp, XFS_TRANS_DQ_ICOUNT, 1); } - if (pdqp) { + if (pdqp && XFS_IS_PQUOTA_ON(mp)) { ASSERT(ip->i_pdquot == NULL); - ASSERT(XFS_IS_PQUOTA_ON(mp)); ASSERT(xfs_get_projid(ip) == be32_to_cpu(pdqp->q_core.d_id)); ip->i_pdquot = xfs_qm_dqhold(pdqp); From 5c22727895bf61cb851835be0d30260fb36de648 Mon Sep 17 00:00:00 2001 From: Jie Liu Date: Tue, 26 Nov 2013 21:38:34 +0800 Subject: [PATCH 184/312] xfs: fix assertion failure at xfs_setattr_nonsize For CRC enabled v5 super block, change a file's ownership can simply trigger an ASSERT failure at xfs_setattr_nonsize() if both group and project quota are enabled, i.e, [ 305.337609] XFS: Assertion failed: !XFS_IS_PQUOTA_ON(mp), file: fs/xfs/xfs_iops.c, line: 621 [ 305.339250] Kernel BUG at ffffffffa0a7fa32 [verbose debug info unavailable] [ 305.383939] Call Trace: [ 305.385536] [] xfs_setattr_nonsize+0x69a/0x720 [xfs] [ 305.387142] [] xfs_vn_setattr+0x29/0x70 [xfs] [ 305.388727] [] notify_change+0x1a8/0x350 [ 305.390298] [] chown_common+0xfd/0x110 [ 305.391868] [] SyS_fchownat+0xaf/0x110 [ 305.393440] [] SyS_lchown+0x20/0x30 [ 305.394995] [] system_call_fastpath+0x1a/0x1f [ 305.399870] RIP [] assfail+0x22/0x30 [xfs] This fix adjust the assertion to check if the super block support both quota inodes or not. Signed-off-by: Jie Liu Reviewed-by: Christoph Hellwig Signed-off-by: Ben Myers (cherry picked from commit 5a01dd54f4a7fb513062070c5acef20d13cad980) --- fs/xfs/xfs_iops.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c index 27e0e544e963..104455b8046c 100644 --- a/fs/xfs/xfs_iops.c +++ b/fs/xfs/xfs_iops.c @@ -618,7 +618,8 @@ xfs_setattr_nonsize( } if (!gid_eq(igid, gid)) { if (XFS_IS_QUOTA_RUNNING(mp) && XFS_IS_GQUOTA_ON(mp)) { - ASSERT(!XFS_IS_PQUOTA_ON(mp)); + ASSERT(xfs_sb_version_has_pquotino(&mp->m_sb) || + !XFS_IS_PQUOTA_ON(mp)); ASSERT(mask & ATTR_GID); ASSERT(gdqp); olddquot2 = xfs_qm_vop_chown(tp, ip, From 718cc6f88cbfc4fbd39609f28c4c86883945f90d Mon Sep 17 00:00:00 2001 From: Jie Liu Date: Tue, 26 Nov 2013 21:38:49 +0800 Subject: [PATCH 185/312] xfs: fix infinite loop by detaching the group/project hints from user dquot xfs_quota(8) will hang up if trying to turn group/project quota off before the user quota is off, this could be 100% reproduced by: # mount -ouquota,gquota /dev/sda7 /xfs # mkdir /xfs/test # xfs_quota -xc 'off -g' /xfs <-- hangs up # echo w > /proc/sysrq-trigger # dmesg SysRq : Show Blocked State task PC stack pid father xfs_quota D 0000000000000000 0 27574 2551 0x00000000 [snip] Call Trace: [] schedule+0xad/0xc0 [] schedule_timeout+0x35e/0x3c0 [] ? mark_held_locks+0x176/0x1c0 [] ? call_timer_fn+0x2c0/0x2c0 [] ? xfs_qm_shrink_count+0x30/0x30 [xfs] [] schedule_timeout_uninterruptible+0x26/0x30 [] xfs_qm_dquot_walk+0x235/0x260 [xfs] [] ? xfs_perag_get+0x1d8/0x2d0 [xfs] [] ? xfs_perag_get+0x5/0x2d0 [xfs] [] ? xfs_inode_ag_iterator+0xae/0xf0 [xfs] [] ? xfs_trans_free_dqinfo+0x50/0x50 [xfs] [] ? xfs_inode_ag_iterator+0xcf/0xf0 [xfs] [] xfs_qm_dqpurge_all+0x66/0xb0 [xfs] [] xfs_qm_scall_quotaoff+0x20a/0x5f0 [xfs] [] xfs_fs_set_xstate+0x136/0x180 [xfs] [] do_quotactl+0x53a/0x6b0 [] ? iput+0x5b/0x90 [] SyS_quotactl+0x167/0x1d0 [] ? trace_hardirqs_on_thunk+0x3a/0x3f [] system_call_fastpath+0x16/0x1b It's fine if we turn user quota off at first, then turn off other kind of quotas if they are enabled since the group/project dquot refcount is decreased to zero once the user quota if off. Otherwise, those dquots refcount is non-zero due to the user dquot might refer to them as hint(s). Hence, above operation cause an infinite loop at xfs_qm_dquot_walk() while trying to purge dquot cache. This problem has been around since Linux 3.4, it was introduced by: [ b84a3a9675 xfs: remove the per-filesystem list of dquots ] Originally we will release the group dquot pointers because the user dquots maybe carrying around as a hint via xfs_qm_detach_gdquots(). However, with above change, there is no such work to be done before purging group/project dquot cache. In order to solve this problem, this patch introduces a special routine xfs_qm_dqpurge_hints(), and it would release the group/project dquot pointers the user dquots maybe carrying around as a hint, and then it will proceed to purge the user dquot cache if requested. Cc: stable@vger.kernel.org Signed-off-by: Jie Liu Reviewed-by: Dave Chinner Signed-off-by: Ben Myers (cherry picked from commit df8052e7dae00bde6f21b40b6e3e1099770f3afc) --- fs/xfs/xfs_qm.c | 71 ++++++++++++++++++++++++++++++++++--------------- 1 file changed, 50 insertions(+), 21 deletions(-) diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c index 588e4909c589..dd88f0e27bd8 100644 --- a/fs/xfs/xfs_qm.c +++ b/fs/xfs/xfs_qm.c @@ -134,8 +134,6 @@ xfs_qm_dqpurge( { struct xfs_mount *mp = dqp->q_mount; struct xfs_quotainfo *qi = mp->m_quotainfo; - struct xfs_dquot *gdqp = NULL; - struct xfs_dquot *pdqp = NULL; xfs_dqlock(dqp); if ((dqp->dq_flags & XFS_DQ_FREEING) || dqp->q_nrefs != 0) { @@ -143,21 +141,6 @@ xfs_qm_dqpurge( return EAGAIN; } - /* - * If this quota has a hint attached, prepare for releasing it now. - */ - gdqp = dqp->q_gdquot; - if (gdqp) { - xfs_dqlock(gdqp); - dqp->q_gdquot = NULL; - } - - pdqp = dqp->q_pdquot; - if (pdqp) { - xfs_dqlock(pdqp); - dqp->q_pdquot = NULL; - } - dqp->dq_flags |= XFS_DQ_FREEING; xfs_dqflock(dqp); @@ -206,11 +189,47 @@ xfs_qm_dqpurge( XFS_STATS_DEC(xs_qm_dquot_unused); xfs_qm_dqdestroy(dqp); + return 0; +} + +/* + * Release the group or project dquot pointers the user dquots maybe carrying + * around as a hint, and proceed to purge the user dquot cache if requested. +*/ +STATIC int +xfs_qm_dqpurge_hints( + struct xfs_dquot *dqp, + void *data) +{ + struct xfs_dquot *gdqp = NULL; + struct xfs_dquot *pdqp = NULL; + uint flags = *((uint *)data); + + xfs_dqlock(dqp); + if (dqp->dq_flags & XFS_DQ_FREEING) { + xfs_dqunlock(dqp); + return EAGAIN; + } + + /* If this quota has a hint attached, prepare for releasing it now */ + gdqp = dqp->q_gdquot; + if (gdqp) + dqp->q_gdquot = NULL; + + pdqp = dqp->q_pdquot; + if (pdqp) + dqp->q_pdquot = NULL; + + xfs_dqunlock(dqp); if (gdqp) - xfs_qm_dqput(gdqp); + xfs_qm_dqrele(gdqp); if (pdqp) - xfs_qm_dqput(pdqp); + xfs_qm_dqrele(pdqp); + + if (flags & XFS_QMOPT_UQUOTA) + return xfs_qm_dqpurge(dqp, NULL); + return 0; } @@ -222,8 +241,18 @@ xfs_qm_dqpurge_all( struct xfs_mount *mp, uint flags) { - if (flags & XFS_QMOPT_UQUOTA) - xfs_qm_dquot_walk(mp, XFS_DQ_USER, xfs_qm_dqpurge, NULL); + /* + * We have to release group/project dquot hint(s) from the user dquot + * at first if they are there, otherwise we would run into an infinite + * loop while walking through radix tree to purge other type of dquots + * since their refcount is not zero if the user dquot refers to them + * as hint. + * + * Call the special xfs_qm_dqpurge_hints() will end up go through the + * general xfs_qm_dqpurge() against user dquot cache if requested. + */ + xfs_qm_dquot_walk(mp, XFS_DQ_USER, xfs_qm_dqpurge_hints, &flags); + if (flags & XFS_QMOPT_GQUOTA) xfs_qm_dquot_walk(mp, XFS_DQ_GROUP, xfs_qm_dqpurge, NULL); if (flags & XFS_QMOPT_PQUOTA) From 809625ca544bdc9e02a5728c1939a76dc29decb1 Mon Sep 17 00:00:00 2001 From: Namjae Jeon Date: Sun, 8 Dec 2013 23:33:50 +0900 Subject: [PATCH 186/312] MAINTAINERS: fix incorrect mail address of XFS maintainer When I tried to send the patches to XFS Maintainers, I got returned mail included delivery fail message for Dave's mail. Maybe, Dave Chinner mail address is incorrect. I try to fix it correctly. Signed-off-by: Namjae Jeon Reviewed-by: Ben Myers Signed-off-by: Ben Myers (cherry picked from commit db10bddc7d4f412bcd8630fc479fa1eb009e325b) --- MAINTAINERS | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/MAINTAINERS b/MAINTAINERS index f216db847022..f5d0bddb6cfa 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -9530,7 +9530,7 @@ F: drivers/xen/*swiotlb* XFS FILESYSTEM P: Silicon Graphics Inc -M: Dave Chinner +M: Dave Chinner M: Ben Myers M: xfs@oss.sgi.com L: xfs@oss.sgi.com From 6e708bcf6583f663da9fe7bc5292cc62f0a8410d Mon Sep 17 00:00:00 2001 From: Dave Chinner Date: Fri, 22 Nov 2013 10:41:16 +1100 Subject: [PATCH 187/312] xfs: align initial file allocations correctly The function xfs_bmap_isaeof() is used to indicate that an allocation is occurring at or past the end of file, and as such should be aligned to the underlying storage geometry if possible. Commit 27a3f8f ("xfs: introduce xfs_bmap_last_extent") changed the behaviour of this function for empty files - it turned off allocation alignment for this case accidentally. Hence large initial allocations from direct IO are not getting correctly aligned to the underlying geometry, and that is cause write performance to drop in alignment sensitive configurations. Fix it by considering allocation into empty files as requiring aligned allocation again. Signed-off-by: Dave Chinner Reviewed-by: Christoph Hellwig Signed-off-by: Ben Myers (cherry picked from commit f9b395a8ef8f34d19cae2cde361e19c96e097fad) --- fs/xfs/xfs_bmap.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c index 3ef11b22e750..8401f11f378f 100644 --- a/fs/xfs/xfs_bmap.c +++ b/fs/xfs/xfs_bmap.c @@ -1635,7 +1635,7 @@ xfs_bmap_last_extent( * blocks at the end of the file which do not start at the previous data block, * we will try to align the new blocks at stripe unit boundaries. * - * Returns 0 in bma->aeof if the file (fork) is empty as any new write will be + * Returns 1 in bma->aeof if the file (fork) is empty as any new write will be * at, or past the EOF. */ STATIC int @@ -1650,9 +1650,14 @@ xfs_bmap_isaeof( bma->aeof = 0; error = xfs_bmap_last_extent(NULL, bma->ip, whichfork, &rec, &is_empty); - if (error || is_empty) + if (error) return error; + if (is_empty) { + bma->aeof = 1; + return 0; + } + /* * Check if we are allocation or past the last extent, or at least into * the last delayed allocated extent. From 83a0adc3f93aae4ab9c59113e3145c7bdb2b4a8c Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 17 Dec 2013 00:03:52 -0800 Subject: [PATCH 188/312] xfs: remove xfsbdstrat error The xfsbdstrat helper is a small but useless wrapper for xfs_buf_iorequest that handles the case of a shut down filesystem. Most of the users have private, uncached buffers that can just be freed in this case, but the complex error handling in xfs_bioerror_relse messes up the case when it's called without a locked buffer. Remove xfsbdstrat and opencode the error handling in the callers. All but one can simply return an error and don't need to deal with buffer state, and the one caller that cares about the buffer state could do with a major cleanup as well, but we'll defer that to later. Signed-off-by: Christoph Hellwig Reviewed-by: Ben Myers Signed-off-by: Ben Myers --- fs/xfs/xfs_bmap_util.c | 14 ++++++++++++-- fs/xfs/xfs_buf.c | 27 ++++++--------------------- fs/xfs/xfs_buf.h | 5 ++--- fs/xfs/xfs_log_recover.c | 13 +++++++++++-- fs/xfs/xfs_trans_buf.c | 13 ++++++++++++- 5 files changed, 43 insertions(+), 29 deletions(-) diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c index 5887e41c0323..1394106ed22d 100644 --- a/fs/xfs/xfs_bmap_util.c +++ b/fs/xfs/xfs_bmap_util.c @@ -1187,7 +1187,12 @@ xfs_zero_remaining_bytes( XFS_BUF_UNWRITE(bp); XFS_BUF_READ(bp); XFS_BUF_SET_ADDR(bp, xfs_fsb_to_db(ip, imap.br_startblock)); - xfsbdstrat(mp, bp); + + if (XFS_FORCED_SHUTDOWN(mp)) { + error = XFS_ERROR(EIO); + break; + } + xfs_buf_iorequest(bp); error = xfs_buf_iowait(bp); if (error) { xfs_buf_ioerror_alert(bp, @@ -1200,7 +1205,12 @@ xfs_zero_remaining_bytes( XFS_BUF_UNDONE(bp); XFS_BUF_UNREAD(bp); XFS_BUF_WRITE(bp); - xfsbdstrat(mp, bp); + + if (XFS_FORCED_SHUTDOWN(mp)) { + error = XFS_ERROR(EIO); + break; + } + xfs_buf_iorequest(bp); error = xfs_buf_iowait(bp); if (error) { xfs_buf_ioerror_alert(bp, diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c index c7f0b77dcb00..9fa9c4304613 100644 --- a/fs/xfs/xfs_buf.c +++ b/fs/xfs/xfs_buf.c @@ -698,7 +698,11 @@ xfs_buf_read_uncached( bp->b_flags |= XBF_READ; bp->b_ops = ops; - xfsbdstrat(target->bt_mount, bp); + if (XFS_FORCED_SHUTDOWN(target->bt_mount)) { + xfs_buf_relse(bp); + return NULL; + } + xfs_buf_iorequest(bp); xfs_buf_iowait(bp); return bp; } @@ -1089,7 +1093,7 @@ xfs_bioerror( * This is meant for userdata errors; metadata bufs come with * iodone functions attached, so that we can track down errors. */ -STATIC int +int xfs_bioerror_relse( struct xfs_buf *bp) { @@ -1164,25 +1168,6 @@ xfs_bwrite( return error; } -/* - * Wrapper around bdstrat so that we can stop data from going to disk in case - * we are shutting down the filesystem. Typically user data goes thru this - * path; one of the exceptions is the superblock. - */ -void -xfsbdstrat( - struct xfs_mount *mp, - struct xfs_buf *bp) -{ - if (XFS_FORCED_SHUTDOWN(mp)) { - trace_xfs_bdstrat_shut(bp, _RET_IP_); - xfs_bioerror_relse(bp); - return; - } - - xfs_buf_iorequest(bp); -} - STATIC void _xfs_buf_ioend( xfs_buf_t *bp, diff --git a/fs/xfs/xfs_buf.h b/fs/xfs/xfs_buf.h index e65683361017..7e41b08017f7 100644 --- a/fs/xfs/xfs_buf.h +++ b/fs/xfs/xfs_buf.h @@ -269,9 +269,6 @@ extern void xfs_buf_unlock(xfs_buf_t *); /* Buffer Read and Write Routines */ extern int xfs_bwrite(struct xfs_buf *bp); - -extern void xfsbdstrat(struct xfs_mount *, struct xfs_buf *); - extern void xfs_buf_ioend(xfs_buf_t *, int); extern void xfs_buf_ioerror(xfs_buf_t *, int); extern void xfs_buf_ioerror_alert(struct xfs_buf *, const char *func); @@ -282,6 +279,8 @@ extern void xfs_buf_iomove(xfs_buf_t *, size_t, size_t, void *, #define xfs_buf_zero(bp, off, len) \ xfs_buf_iomove((bp), (off), (len), NULL, XBRW_ZERO) +extern int xfs_bioerror_relse(struct xfs_buf *); + static inline int xfs_buf_geterror(xfs_buf_t *bp) { return bp ? bp->b_error : ENOMEM; diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c index b6b669df40f3..eae16920655b 100644 --- a/fs/xfs/xfs_log_recover.c +++ b/fs/xfs/xfs_log_recover.c @@ -193,7 +193,10 @@ xlog_bread_noalign( bp->b_io_length = nbblks; bp->b_error = 0; - xfsbdstrat(log->l_mp, bp); + if (XFS_FORCED_SHUTDOWN(log->l_mp)) + return XFS_ERROR(EIO); + + xfs_buf_iorequest(bp); error = xfs_buf_iowait(bp); if (error) xfs_buf_ioerror_alert(bp, __func__); @@ -4397,7 +4400,13 @@ xlog_do_recover( XFS_BUF_READ(bp); XFS_BUF_UNASYNC(bp); bp->b_ops = &xfs_sb_buf_ops; - xfsbdstrat(log->l_mp, bp); + + if (XFS_FORCED_SHUTDOWN(log->l_mp)) { + xfs_buf_relse(bp); + return XFS_ERROR(EIO); + } + + xfs_buf_iorequest(bp); error = xfs_buf_iowait(bp); if (error) { xfs_buf_ioerror_alert(bp, __func__); diff --git a/fs/xfs/xfs_trans_buf.c b/fs/xfs/xfs_trans_buf.c index c035d11b7734..647b6f1d8923 100644 --- a/fs/xfs/xfs_trans_buf.c +++ b/fs/xfs/xfs_trans_buf.c @@ -314,7 +314,18 @@ xfs_trans_read_buf_map( ASSERT(bp->b_iodone == NULL); XFS_BUF_READ(bp); bp->b_ops = ops; - xfsbdstrat(tp->t_mountp, bp); + + /* + * XXX(hch): clean up the error handling here to be less + * of a mess.. + */ + if (XFS_FORCED_SHUTDOWN(mp)) { + trace_xfs_bdstrat_shut(bp, _RET_IP_); + xfs_bioerror_relse(bp); + } else { + xfs_buf_iorequest(bp); + } + error = xfs_buf_iowait(bp); if (error) { xfs_buf_ioerror_alert(bp, __func__); From 33177f05364c6cd13b06d0f3500dad07cf4647c2 Mon Sep 17 00:00:00 2001 From: Dave Chinner Date: Thu, 12 Dec 2013 16:34:36 +1100 Subject: [PATCH 189/312] xfs: swalloc doesn't align allocations properly When swalloc is specified as a mount option, allocations are supposed to be aligned to the stripe width rather than the stripe unit of the underlying filesystem. However, it does not do this. What the implementation does is round up the allocation size to a stripe width, hence ensuring that all allocations span a full stripe width. It does not, however, ensure that that allocation is aligned to a stripe width, and hence the allocations can span multiple underlying stripes and so still see RMW cycles for things like direct IO on MD RAID. So, if the swalloc mount option is set, change the allocation alignment in xfs_bmap_btalloc() to use the stripe width rather than the stripe unit. Signed-off-by: Dave Chinner Reviewed-by: Christoph Hellwig Reviewed-by: Ben Myers Signed-off-by: Ben Myers --- fs/xfs/xfs_bmap.c | 23 +++++++++++++++++------ 1 file changed, 17 insertions(+), 6 deletions(-) diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c index 8401f11f378f..3b2c14b6f0fb 100644 --- a/fs/xfs/xfs_bmap.c +++ b/fs/xfs/xfs_bmap.c @@ -3648,10 +3648,19 @@ xfs_bmap_btalloc( int isaligned; int tryagain; int error; + int stripe_align; ASSERT(ap->length); mp = ap->ip->i_mount; + + /* stripe alignment for allocation is determined by mount parameters */ + stripe_align = 0; + if (mp->m_swidth && (mp->m_flags & XFS_MOUNT_SWALLOC)) + stripe_align = mp->m_swidth; + else if (mp->m_dalign) + stripe_align = mp->m_dalign; + align = ap->userdata ? xfs_get_extsz_hint(ap->ip) : 0; if (unlikely(align)) { error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev, @@ -3660,6 +3669,8 @@ xfs_bmap_btalloc( ASSERT(!error); ASSERT(ap->length); } + + nullfb = *ap->firstblock == NULLFSBLOCK; fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp, *ap->firstblock); if (nullfb) { @@ -3735,7 +3746,7 @@ xfs_bmap_btalloc( */ if (!ap->flist->xbf_low && ap->aeof) { if (!ap->offset) { - args.alignment = mp->m_dalign; + args.alignment = stripe_align; atype = args.type; isaligned = 1; /* @@ -3760,13 +3771,13 @@ xfs_bmap_btalloc( * of minlen+alignment+slop doesn't go up * between the calls. */ - if (blen > mp->m_dalign && blen <= args.maxlen) - nextminlen = blen - mp->m_dalign; + if (blen > stripe_align && blen <= args.maxlen) + nextminlen = blen - stripe_align; else nextminlen = args.minlen; - if (nextminlen + mp->m_dalign > args.minlen + 1) + if (nextminlen + stripe_align > args.minlen + 1) args.minalignslop = - nextminlen + mp->m_dalign - + nextminlen + stripe_align - args.minlen - 1; else args.minalignslop = 0; @@ -3788,7 +3799,7 @@ xfs_bmap_btalloc( */ args.type = atype; args.fsbno = ap->blkno; - args.alignment = mp->m_dalign; + args.alignment = stripe_align; args.minlen = nextminlen; args.minalignslop = 0; isaligned = 1; From ac8809f9ab01a73de1a47b5a37bd8dcca8712fb3 Mon Sep 17 00:00:00 2001 From: Dave Chinner Date: Thu, 12 Dec 2013 16:34:38 +1100 Subject: [PATCH 190/312] xfs: abort metadata writeback on permanent errors If we are doing aysnc writeback of metadata, we can get write errors but have nobody to report them to. At the moment, we simply attempt to reissue the write from io completion in the hope that it's a transient error. When it's not a transient error, the buffer is stuck forever in this loop, and we cannot break out of it. Eventually, unmount will hang because the AIL cannot be emptied and everything goes downhill from them. To solve this problem, only retry the write IO once before aborting it. We don't throw the buffer away because some transient errors can last minutes (e.g. FC path failover) or even hours (thin provisioned devices that have run out of backing space) before they go away. Hence we really want to keep trying until we can't try any more. Because the buffer was not cleaned, however, it does not get removed from the AIL and hence the next pass across the AIL will start IO on it again. As such, we still get the "retry forever" semantics that we currently have, but we allow other access to the buffer in the mean time. Meanwhile the filesystem can continue to modify the buffer and relog it, so the IO errors won't hang the log or the filesystem. Now when we are pushing the AIL, we can see all these "permanent IO error" buffers and we can issue a warning about failures before we retry the IO. We can also catch these buffers when unmounting an issue a corruption warning, too. Signed-off-by: Dave Chinner Reviewed-by: Christoph Hellwig Signed-off-by: Ben Myers --- fs/xfs/xfs_buf.c | 10 ++++++++-- fs/xfs/xfs_buf.h | 6 +++++- fs/xfs/xfs_buf_item.c | 21 +++++++++++++++++++-- 3 files changed, 32 insertions(+), 5 deletions(-) diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c index 9fa9c4304613..afe7645e4b2b 100644 --- a/fs/xfs/xfs_buf.c +++ b/fs/xfs/xfs_buf.c @@ -1156,7 +1156,7 @@ xfs_bwrite( ASSERT(xfs_buf_islocked(bp)); bp->b_flags |= XBF_WRITE; - bp->b_flags &= ~(XBF_ASYNC | XBF_READ | _XBF_DELWRI_Q); + bp->b_flags &= ~(XBF_ASYNC | XBF_READ | _XBF_DELWRI_Q | XBF_WRITE_FAIL); xfs_bdstrat_cb(bp); @@ -1501,6 +1501,12 @@ xfs_wait_buftarg( struct xfs_buf *bp; bp = list_first_entry(&dispose, struct xfs_buf, b_lru); list_del_init(&bp->b_lru); + if (bp->b_flags & XBF_WRITE_FAIL) { + xfs_alert(btp->bt_mount, +"Corruption Alert: Buffer at block 0x%llx had permanent write failures!\n" +"Please run xfs_repair to determine the extent of the problem.", + (long long)bp->b_bn); + } xfs_buf_rele(bp); } if (loop++ != 0) @@ -1784,7 +1790,7 @@ __xfs_buf_delwri_submit( blk_start_plug(&plug); list_for_each_entry_safe(bp, n, io_list, b_list) { - bp->b_flags &= ~(_XBF_DELWRI_Q | XBF_ASYNC); + bp->b_flags &= ~(_XBF_DELWRI_Q | XBF_ASYNC | XBF_WRITE_FAIL); bp->b_flags |= XBF_WRITE; if (!wait) { diff --git a/fs/xfs/xfs_buf.h b/fs/xfs/xfs_buf.h index 7e41b08017f7..1cf21a4a9f22 100644 --- a/fs/xfs/xfs_buf.h +++ b/fs/xfs/xfs_buf.h @@ -45,6 +45,7 @@ typedef enum { #define XBF_ASYNC (1 << 4) /* initiator will not wait for completion */ #define XBF_DONE (1 << 5) /* all pages in the buffer uptodate */ #define XBF_STALE (1 << 6) /* buffer has been staled, do not find it */ +#define XBF_WRITE_FAIL (1 << 24)/* async writes have failed on this buffer */ /* I/O hints for the BIO layer */ #define XBF_SYNCIO (1 << 10)/* treat this buffer as synchronous I/O */ @@ -70,6 +71,7 @@ typedef unsigned int xfs_buf_flags_t; { XBF_ASYNC, "ASYNC" }, \ { XBF_DONE, "DONE" }, \ { XBF_STALE, "STALE" }, \ + { XBF_WRITE_FAIL, "WRITE_FAIL" }, \ { XBF_SYNCIO, "SYNCIO" }, \ { XBF_FUA, "FUA" }, \ { XBF_FLUSH, "FLUSH" }, \ @@ -80,6 +82,7 @@ typedef unsigned int xfs_buf_flags_t; { _XBF_DELWRI_Q, "DELWRI_Q" }, \ { _XBF_COMPOUND, "COMPOUND" } + /* * Internal state flags. */ @@ -300,7 +303,8 @@ extern void xfs_buf_terminate(void); #define XFS_BUF_ZEROFLAGS(bp) \ ((bp)->b_flags &= ~(XBF_READ|XBF_WRITE|XBF_ASYNC| \ - XBF_SYNCIO|XBF_FUA|XBF_FLUSH)) + XBF_SYNCIO|XBF_FUA|XBF_FLUSH| \ + XBF_WRITE_FAIL)) void xfs_buf_stale(struct xfs_buf *bp); #define XFS_BUF_UNSTALE(bp) ((bp)->b_flags &= ~XBF_STALE) diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c index a64f67ba25d3..2227b9b050bb 100644 --- a/fs/xfs/xfs_buf_item.c +++ b/fs/xfs/xfs_buf_item.c @@ -496,6 +496,14 @@ xfs_buf_item_unpin( } } +/* + * Buffer IO error rate limiting. Limit it to no more than 10 messages per 30 + * seconds so as to not spam logs too much on repeated detection of the same + * buffer being bad.. + */ + +DEFINE_RATELIMIT_STATE(xfs_buf_write_fail_rl_state, 30 * HZ, 10); + STATIC uint xfs_buf_item_push( struct xfs_log_item *lip, @@ -524,6 +532,14 @@ xfs_buf_item_push( trace_xfs_buf_item_push(bip); + /* has a previous flush failed due to IO errors? */ + if ((bp->b_flags & XBF_WRITE_FAIL) && + ___ratelimit(&xfs_buf_write_fail_rl_state, "XFS:")) { + xfs_warn(bp->b_target->bt_mount, +"Detected failing async write on buffer block 0x%llx. Retrying async write.\n", + (long long)bp->b_bn); + } + if (!xfs_buf_delwri_queue(bp, buffer_list)) rval = XFS_ITEM_FLUSHING; xfs_buf_unlock(bp); @@ -1096,8 +1112,9 @@ xfs_buf_iodone_callbacks( xfs_buf_ioerror(bp, 0); /* errno of 0 unsets the flag */ - if (!XFS_BUF_ISSTALE(bp)) { - bp->b_flags |= XBF_WRITE | XBF_ASYNC | XBF_DONE; + if (!(bp->b_flags & (XBF_STALE|XBF_WRITE_FAIL))) { + bp->b_flags |= XBF_WRITE | XBF_ASYNC | + XBF_DONE | XBF_WRITE_FAIL; xfs_buf_iorequest(bp); } else { xfs_buf_relse(bp); From ed697e1aaf7237b1a62af39f64463b05c262808d Mon Sep 17 00:00:00 2001 From: JongHo Kim Date: Tue, 17 Dec 2013 23:02:24 +0900 Subject: [PATCH 191/312] ALSA: Add SNDRV_PCM_STATE_PAUSED case in wait_for_avail function When the process is sleeping at the SNDRV_PCM_STATE_PAUSED state from the wait_for_avail function, the sleep process will be woken by timeout(10 seconds). Even if the sleep process wake up by timeout, by this patch, the process will continue with sleep and wait for the other state. Signed-off-by: JongHo Kim Cc: Signed-off-by: Takashi Iwai --- sound/core/pcm_lib.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c index 6e03b465e44e..a2104671f51d 100644 --- a/sound/core/pcm_lib.c +++ b/sound/core/pcm_lib.c @@ -1937,6 +1937,8 @@ static int wait_for_avail(struct snd_pcm_substream *substream, case SNDRV_PCM_STATE_DISCONNECTED: err = -EBADFD; goto _endloop; + case SNDRV_PCM_STATE_PAUSED: + continue; } if (!tout) { snd_printd("%s write error (DMA or IRQ trouble?)\n", From d24c195f90cb1adb178d26d84c722d4b9e551e05 Mon Sep 17 00:00:00 2001 From: Mika Westerberg Date: Tue, 10 Dec 2013 12:56:59 +0200 Subject: [PATCH 192/312] serial: 8250_dw: add new ACPI IDs Newer Intel PCHs with LPSS have the same Designware controllers than Haswell but ACPI IDs are different. Add these IDs to the driver list. Signed-off-by: Mika Westerberg Cc: stable Signed-off-by: Greg Kroah-Hartman --- drivers/tty/serial/8250/8250_dw.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c index 5f096c74da7e..06525f10e364 100644 --- a/drivers/tty/serial/8250/8250_dw.c +++ b/drivers/tty/serial/8250/8250_dw.c @@ -457,6 +457,8 @@ MODULE_DEVICE_TABLE(of, dw8250_of_match); static const struct acpi_device_id dw8250_acpi_match[] = { { "INT33C4", 0 }, { "INT33C5", 0 }, + { "INT3434", 0 }, + { "INT3435", 0 }, { "80860F0A", 0 }, { }, }; From 1075a6e2dc7e2a96efc417b98dd98f57fdae985d Mon Sep 17 00:00:00 2001 From: Peter Hurley Date: Mon, 9 Dec 2013 18:06:07 -0500 Subject: [PATCH 193/312] n_tty: Fix apparent order of echoed output With block processing of echoed output, observed output order is still required. Push completed echoes and echo commands prior to output. Introduce echo_mark echo buffer index, which tracks completed echo commands; ie., those submitted via commit_echoes but which may not have been committed. Ensure that completed echoes are output prior to subsequent terminal writes in process_echoes(). Fixes newline/prompt output order in cooked mode shell. Cc: # 3.12.x : 39434ab n_tty: Fix missing newline echo Reported-by: Karl Dahlke Reported-by: Mikulas Patocka Signed-off-by: Peter Hurley Tested-by: Karl Dahlke Signed-off-by: Greg Kroah-Hartman --- drivers/tty/n_tty.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c index 268b62768f2b..34aacaaae14a 100644 --- a/drivers/tty/n_tty.c +++ b/drivers/tty/n_tty.c @@ -93,6 +93,7 @@ struct n_tty_data { size_t canon_head; size_t echo_head; size_t echo_commit; + size_t echo_mark; DECLARE_BITMAP(char_map, 256); /* private to n_tty_receive_overrun (single-threaded) */ @@ -336,6 +337,7 @@ static void reset_buffer_flags(struct n_tty_data *ldata) { ldata->read_head = ldata->canon_head = ldata->read_tail = 0; ldata->echo_head = ldata->echo_tail = ldata->echo_commit = 0; + ldata->echo_mark = 0; ldata->line_start = 0; ldata->erasing = 0; @@ -787,6 +789,7 @@ static void commit_echoes(struct tty_struct *tty) size_t head; head = ldata->echo_head; + ldata->echo_mark = head; old = ldata->echo_commit - ldata->echo_tail; /* Process committed echoes if the accumulated # of bytes @@ -811,10 +814,11 @@ static void process_echoes(struct tty_struct *tty) size_t echoed; if ((!L_ECHO(tty) && !L_ECHONL(tty)) || - ldata->echo_commit == ldata->echo_tail) + ldata->echo_mark == ldata->echo_tail) return; mutex_lock(&ldata->output_lock); + ldata->echo_commit = ldata->echo_mark; echoed = __process_echoes(tty); mutex_unlock(&ldata->output_lock); @@ -822,6 +826,7 @@ static void process_echoes(struct tty_struct *tty) tty->ops->flush_chars(tty); } +/* NB: echo_mark and echo_head should be equivalent here */ static void flush_echoes(struct tty_struct *tty) { struct n_tty_data *ldata = tty->disc_data; From a885b3ccc74d8e38074e1c43a47c354c5ea0b01e Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 17 Dec 2013 14:34:50 +0000 Subject: [PATCH 194/312] drm/i915: Use the correct GMCH_CTRL register for Sandybridge+ MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The GMCH_CTRL register (or MGCC in the spec) is at a different address on Sandybridge, and the address to which we currently write to is undefined. These stray writes appear to upset (hard hang) my Ivybridge machine whilst it is in UEFI mode. Note that the register is still marked as locked RO on Sandybridge, so vgaarb is still dysfunctional. Signed-off-by: Chris Wilson Cc: Jani Nikula Cc: Ville Syrjälä Cc: stable@vger.kernel.org Reviewed-by: Jani Nikula Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_display.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 4049a65a9f8c..54e82a80cf50 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -11126,14 +11126,15 @@ void intel_connector_attach_encoder(struct intel_connector *connector, int intel_modeset_vga_set_state(struct drm_device *dev, bool state) { struct drm_i915_private *dev_priv = dev->dev_private; + unsigned reg = INTEL_INFO(dev)->gen >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL; u16 gmch_ctrl; - pci_read_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, &gmch_ctrl); + pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl); if (state) gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE; else gmch_ctrl |= INTEL_GMCH_VGA_DISABLE; - pci_write_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, gmch_ctrl); + pci_write_config_word(dev_priv->bridge_dev, reg, gmch_ctrl); return 0; } From f447ef4a56dee4b68a91460bcdfe06b5011085f2 Mon Sep 17 00:00:00 2001 From: Josh Boyer Date: Fri, 11 Oct 2013 08:45:51 -0400 Subject: [PATCH 195/312] cpupower: Fix segfault due to incorrect getopt_long arugments If a user calls 'cpupower set --perf-bias 15', the process will end with a SIGSEGV in libc because cpupower-set passes a NULL optarg to the atoi call. This is because the getopt_long structure currently has all of the options as having an optional_argument when they really have a required argument. We change the structure to use required_argument to match the short options and it resolves the issue. This fixes https://bugzilla.redhat.com/show_bug.cgi?id=1000439 Signed-off-by: Josh Boyer Cc: Dominik Brodowski Cc: Thomas Renninger Cc: stable@vger.kernel.org Signed-off-by: Linus Torvalds --- tools/power/cpupower/utils/cpupower-set.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tools/power/cpupower/utils/cpupower-set.c b/tools/power/cpupower/utils/cpupower-set.c index dc4de3762111..bcf1d2f0b791 100644 --- a/tools/power/cpupower/utils/cpupower-set.c +++ b/tools/power/cpupower/utils/cpupower-set.c @@ -18,9 +18,9 @@ #include "helpers/bitmask.h" static struct option set_opts[] = { - { .name = "perf-bias", .has_arg = optional_argument, .flag = NULL, .val = 'b'}, - { .name = "sched-mc", .has_arg = optional_argument, .flag = NULL, .val = 'm'}, - { .name = "sched-smt", .has_arg = optional_argument, .flag = NULL, .val = 's'}, + { .name = "perf-bias", .has_arg = required_argument, .flag = NULL, .val = 'b'}, + { .name = "sched-mc", .has_arg = required_argument, .flag = NULL, .val = 'm'}, + { .name = "sched-smt", .has_arg = required_argument, .flag = NULL, .val = 's'}, { }, }; From 50dc875f2e6e2e04aed3b3033eb0ac99192d6d02 Mon Sep 17 00:00:00 2001 From: Jason Wang Date: Fri, 13 Dec 2013 17:21:27 +0800 Subject: [PATCH 196/312] netvsc: don't flush peers notifying work during setting mtu There's a possible deadlock if we flush the peers notifying work during setting mtu: [ 22.991149] ====================================================== [ 22.991173] [ INFO: possible circular locking dependency detected ] [ 22.991198] 3.10.0-54.0.1.el7.x86_64.debug #1 Not tainted [ 22.991219] ------------------------------------------------------- [ 22.991243] ip/974 is trying to acquire lock: [ 22.991261] ((&(&net_device_ctx->dwork)->work)){+.+.+.}, at: [] flush_work+0x5/0x2e0 [ 22.991307] but task is already holding lock: [ 22.991330] (rtnl_mutex){+.+.+.}, at: [] rtnetlink_rcv+0x1b/0x40 [ 22.991367] which lock already depends on the new lock. [ 22.991398] the existing dependency chain (in reverse order) is: [ 22.991426] -> #1 (rtnl_mutex){+.+.+.}: [ 22.991449] [] __lock_acquire+0xb19/0x1260 [ 22.991477] [] lock_acquire+0xa2/0x1f0 [ 22.991501] [] mutex_lock_nested+0x89/0x4f0 [ 22.991529] [] rtnl_lock+0x17/0x20 [ 22.991552] [] netdev_notify_peers+0x12/0x30 [ 22.991579] [] netvsc_send_garp+0x22/0x30 [hv_netvsc] [ 22.991610] [] process_one_work+0x211/0x6e0 [ 22.991637] [] worker_thread+0x11b/0x3a0 [ 22.991663] [] kthread+0xed/0x100 [ 22.991686] [] ret_from_fork+0x7c/0xb0 [ 22.991715] -> #0 ((&(&net_device_ctx->dwork)->work)){+.+.+.}: [ 22.991715] [] check_prevs_add+0x967/0x970 [ 22.991715] [] __lock_acquire+0xb19/0x1260 [ 22.991715] [] lock_acquire+0xa2/0x1f0 [ 22.991715] [] flush_work+0x4e/0x2e0 [ 22.991715] [] __cancel_work_timer+0x95/0x130 [ 22.991715] [] cancel_delayed_work_sync+0x13/0x20 [ 22.991715] [] netvsc_change_mtu+0x84/0x200 [hv_netvsc] [ 22.991715] [] dev_set_mtu+0x34/0x80 [ 22.991715] [] do_setlink+0x23a/0xa00 [ 22.991715] [] rtnl_newlink+0x394/0x5e0 [ 22.991715] [] rtnetlink_rcv_msg+0x9c/0x260 [ 22.991715] [] netlink_rcv_skb+0xa9/0xc0 [ 22.991715] [] rtnetlink_rcv+0x2a/0x40 [ 22.991715] [] netlink_unicast+0xdd/0x190 [ 22.991715] [] netlink_sendmsg+0x337/0x750 [ 22.991715] [] sock_sendmsg+0x99/0xd0 [ 22.991715] [] ___sys_sendmsg+0x39e/0x3b0 [ 22.991715] [] __sys_sendmsg+0x42/0x80 [ 22.991715] [] SyS_sendmsg+0x12/0x20 [ 22.991715] [] system_call_fastpath+0x16/0x1b This is because we hold the rtnl_lock() before ndo_change_mtu() and try to flush the work in netvsc_change_mtu(), in the mean time, netdev_notify_peers() may be called from worker and also trying to hold the rtnl_lock. This will lead the flush won't succeed forever. Solve this by not canceling and flushing the work, this is safe because the transmission done by NETDEV_NOTIFY_PEERS was synchronized with the netif_tx_disable() called by netvsc_change_mtu(). Reported-by: Yaju Cao Tested-by: Yaju Cao Cc: K. Y. Srinivasan Cc: Haiyang Zhang Signed-off-by: Jason Wang Acked-by: Haiyang Zhang Signed-off-by: David S. Miller --- drivers/net/hyperv/netvsc_drv.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 524f713f6017..f8135725bcf6 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -327,7 +327,6 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu) return -EINVAL; nvdev->start_remove = true; - cancel_delayed_work_sync(&ndevctx->dwork); cancel_work_sync(&ndevctx->work); netif_tx_disable(ndev); rndis_filter_device_remove(hdev); From e47eb5dfb296bf217e9ebee7b2f07486670b9c1b Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Sun, 15 Dec 2013 10:53:46 -0800 Subject: [PATCH 197/312] udp: ipv4: do not use sk_dst_lock from softirq context Using sk_dst_lock from softirq context is not supported right now. Instead of adding BH protection everywhere, udp_sk_rx_dst_set() can instead use xchg(), as suggested by David. Reported-by: Fengguang Wu Fixes: 975022310233 ("udp: ipv4: must add synchronization in udp_sk_rx_dst_set()") Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- net/ipv4/udp.c | 13 ++++--------- 1 file changed, 4 insertions(+), 9 deletions(-) diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 62c19fdd102d..f140048334ce 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -1600,20 +1600,15 @@ static void flush_stack(struct sock **stack, unsigned int count, } /* For TCP sockets, sk_rx_dst is protected by socket lock - * For UDP, we use sk_dst_lock to guard against concurrent changes. + * For UDP, we use xchg() to guard against concurrent changes. */ static void udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst) { struct dst_entry *old; - spin_lock(&sk->sk_dst_lock); - old = sk->sk_rx_dst; - if (likely(old != dst)) { - dst_hold(dst); - sk->sk_rx_dst = dst; - dst_release(old); - } - spin_unlock(&sk->sk_dst_lock); + dst_hold(dst); + old = xchg(&sk->sk_rx_dst, dst); + dst_release(old); } /* From d1fc50247693bf9116184c19ca16386ff14f5aed Mon Sep 17 00:00:00 2001 From: Steffen Klassert Date: Fri, 13 Dec 2013 12:56:05 +0100 Subject: [PATCH 198/312] MAINTAINERS: Update the IPsec maintainer entry Add the IPsec git trees and some pure IPsec modules to the IPsec section in the MAINTAINERS file. Signed-off-by: Steffen Klassert Signed-off-by: David S. Miller --- MAINTAINERS | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/MAINTAINERS b/MAINTAINERS index 1344816c4c06..b8af16d5cf63 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -5911,12 +5911,21 @@ M: Steffen Klassert M: Herbert Xu M: "David S. Miller" L: netdev@vger.kernel.org -T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net.git +T: git git://git.kernel.org/pub/scm/linux/kernel/git/klassert/ipsec.git +T: git git://git.kernel.org/pub/scm/linux/kernel/git/klassert/ipsec-next.git S: Maintained F: net/xfrm/ F: net/key/ F: net/ipv4/xfrm* +F: net/ipv4/esp4.c +F: net/ipv4/ah4.c +F: net/ipv4/ipcomp.c +F: net/ipv4/ip_vti.c F: net/ipv6/xfrm* +F: net/ipv6/esp6.c +F: net/ipv6/ah6.c +F: net/ipv6/ipcomp6.c +F: net/ipv6/ip6_vti.c F: include/uapi/linux/xfrm.h F: include/net/xfrm.h From 37ab4fa7844a044dc21fde45e2a0fc2f3c3b6490 Mon Sep 17 00:00:00 2001 From: Sasha Levin Date: Fri, 13 Dec 2013 10:54:22 -0500 Subject: [PATCH 199/312] net: unix: allow bind to fail on mutex lock This is similar to the set_peek_off patch where calling bind while the socket is stuck in unix_dgram_recvmsg() will block and cause a hung task spew after a while. This is also the last place that did a straightforward mutex_lock(), so there shouldn't be any more of these patches. Signed-off-by: Sasha Levin Signed-off-by: David S. Miller --- net/unix/af_unix.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index a0ca162e5bd5..a427623ee574 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -718,7 +718,9 @@ static int unix_autobind(struct socket *sock) int err; unsigned int retries = 0; - mutex_lock(&u->readlock); + err = mutex_lock_interruptible(&u->readlock); + if (err) + return err; err = 0; if (u->addr) @@ -877,7 +879,9 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) goto out; addr_len = err; - mutex_lock(&u->readlock); + err = mutex_lock_interruptible(&u->readlock); + if (err) + goto out; err = -EINVAL; if (u->addr) From 188b04d580ab7acf11eb77cb564692050c10edfe Mon Sep 17 00:00:00 2001 From: Hannes Frederic Sowa Date: Sat, 14 Dec 2013 04:42:13 +0100 Subject: [PATCH 200/312] ipv4: improve documentation of ip_no_pmtu_disc Signed-off-by: Hannes Frederic Sowa Signed-off-by: David S. Miller --- Documentation/networking/ip-sysctl.txt | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt index 3c12d9a7ed00..8a984e994e61 100644 --- a/Documentation/networking/ip-sysctl.txt +++ b/Documentation/networking/ip-sysctl.txt @@ -16,8 +16,12 @@ ip_default_ttl - INTEGER Default: 64 (as recommended by RFC1700) ip_no_pmtu_disc - BOOLEAN - Disable Path MTU Discovery. - default FALSE + Disable Path MTU Discovery. If enabled and a + fragmentation-required ICMP is received, the PMTU to this + destination will be set to min_pmtu (see below). You will need + to raise min_pmtu to the smallest interface MTU on your system + manually if you want to avoid locally generated fragments. + Default: FALSE min_pmtu - INTEGER default 552 - minimum discovered Path MTU From c5fe7a41ad51e0c2df8381e9c85d7343b36b2c1e Mon Sep 17 00:00:00 2001 From: Jonathan Cameron Date: Wed, 11 Dec 2013 18:45:00 +0000 Subject: [PATCH 201/312] staging:iio:mag:hmc5843 fix incorrect endianness of channel as a result of missuse of the IIO_ST macro. This driver sets the shift value equal to IIO_BE (or 1) rather than setting that to 0 and specificying the endianness. This means the channel type is missreported as [be|le]:u16/16>>1 where the be|le is dependent on the cpu native endianness, rather than be:u16/16>>0 resulting in any userspace code using this information, miss converting the channel and generating thoroughly trashed data. Signed-off-by: Jonathan Cameron Acked-by: Lars-Peter Clausen Cc: stable@vger.kernel.org --- drivers/staging/iio/magnetometer/hmc5843.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/drivers/staging/iio/magnetometer/hmc5843.c b/drivers/staging/iio/magnetometer/hmc5843.c index 99421f90d189..0485d7f39867 100644 --- a/drivers/staging/iio/magnetometer/hmc5843.c +++ b/drivers/staging/iio/magnetometer/hmc5843.c @@ -451,7 +451,12 @@ static irqreturn_t hmc5843_trigger_handler(int irq, void *p) .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) | \ BIT(IIO_CHAN_INFO_SAMP_FREQ), \ .scan_index = idx, \ - .scan_type = IIO_ST('s', 16, 16, IIO_BE), \ + .scan_type = { \ + .sign = 's', \ + .realbits = 16, \ + .storagebits = 16, \ + .endianness = IIO_BE, \ + }, \ } static const struct iio_chan_spec hmc5843_channels[] = { From 3425c0f7ac61f2fcfb7f2728e9b7ba7e27aec429 Mon Sep 17 00:00:00 2001 From: Jonathan Cameron Date: Wed, 11 Dec 2013 18:45:00 +0000 Subject: [PATCH 202/312] iio:imu:adis16400 fix pressure channel scan type A single channel in this driver was using the IIO_ST macro. This does not provide a parameter for setting the endianness of the channel. Thus this channel will have been reported as whatever is the native endianness of the cpu rather than big endian. This means it would be incorrect on little endian platforms. Signed-off-by: Jonathan Cameron Acked-by: Lars-Peter Clausen Cc: stable@vger.kernel.org --- drivers/iio/imu/adis16400_core.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/drivers/iio/imu/adis16400_core.c b/drivers/iio/imu/adis16400_core.c index 3fb7757a1028..368660dfe135 100644 --- a/drivers/iio/imu/adis16400_core.c +++ b/drivers/iio/imu/adis16400_core.c @@ -651,7 +651,12 @@ static const struct iio_chan_spec adis16448_channels[] = { .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), .address = ADIS16448_BARO_OUT, .scan_index = ADIS16400_SCAN_BARO, - .scan_type = IIO_ST('s', 16, 16, 0), + .scan_type = { + .sign = 's', + .realbits = 16, + .storagebits = 16, + .endianness = IIO_BE, + }, }, ADIS16400_TEMP_CHAN(ADIS16448_TEMP_OUT, 12), IIO_CHAN_SOFT_TIMESTAMP(11) From e39d99059ad7f75d7ae2d3c59055d3c476cdb0d9 Mon Sep 17 00:00:00 2001 From: Jonathan Cameron Date: Wed, 11 Dec 2013 18:45:00 +0000 Subject: [PATCH 203/312] iio:adc:ad7887 Fix channel reported endianness from cpu to big endian Note this also sets the endianness to big endian whereas it would previously have defaulted to the cpu endian. Hence technically this is a bug fix on LE platforms. Signed-off-by: Jonathan Cameron Acked-by: Lars-Peter Clausen Cc: stable@vger.kernel.org --- drivers/iio/adc/ad7887.c | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/drivers/iio/adc/ad7887.c b/drivers/iio/adc/ad7887.c index acb7f90359a3..749a6cadab8b 100644 --- a/drivers/iio/adc/ad7887.c +++ b/drivers/iio/adc/ad7887.c @@ -200,7 +200,13 @@ static const struct ad7887_chip_info ad7887_chip_info_tbl[] = { .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), .address = 1, .scan_index = 1, - .scan_type = IIO_ST('u', 12, 16, 0), + .scan_type = { + .sign = 'u', + .realbits = 12, + .storagebits = 16, + .shift = 0, + .endianness = IIO_BE, + }, }, .channel[1] = { .type = IIO_VOLTAGE, @@ -210,7 +216,13 @@ static const struct ad7887_chip_info ad7887_chip_info_tbl[] = { .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), .address = 0, .scan_index = 0, - .scan_type = IIO_ST('u', 12, 16, 0), + .scan_type = { + .sign = 'u', + .realbits = 12, + .storagebits = 16, + .shift = 0, + .endianness = IIO_BE, + }, }, .channel[2] = IIO_CHAN_SOFT_TIMESTAMP(2), .int_vref_mv = 2500, From 0283f7a100882684ad32b768f9f1ad81658a0b92 Mon Sep 17 00:00:00 2001 From: Ian Abbott Date: Fri, 13 Dec 2013 12:00:30 +0000 Subject: [PATCH 204/312] staging: comedi: 8255_pci: fix for newer PCI-DIO48H At some point, Measurement Computing / ComputerBoards redesigned the PCI-DIO48H to use a PLX PCI interface chip instead of an AMCC chip. This meant they had to put their hardware registers in the PCI BAR 2 region instead of PCI BAR 1. Unfortunately, they kept the same PCI device ID for the new design. This means the driver recognizes the newer cards, but doesn't work (and is likely to screw up the local configuration registers of the PLX chip) because it's using the wrong region. Since the PCI subvendor and subdevice IDs were both zero on the old design, but are the same as the vendor and device on the new design, we can tell the old design and new design apart easily enough. Split the existing entry for the PCI-DIO48H in `pci_8255_boards[]` into two new entries, referenced by different entries in the PCI device ID table `pci_8255_pci_table[]`. Use the same board name for both entries. Signed-off-by: Ian Abbott Cc: stablle # 3.10.y # 3.11.y # 3.12.y # 3.13.y Acked-by: H Hartley Sweeten Signed-off-by: Greg Kroah-Hartman --- drivers/staging/comedi/drivers/8255_pci.c | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/drivers/staging/comedi/drivers/8255_pci.c b/drivers/staging/comedi/drivers/8255_pci.c index 432e3f9c3301..c55f234b29e6 100644 --- a/drivers/staging/comedi/drivers/8255_pci.c +++ b/drivers/staging/comedi/drivers/8255_pci.c @@ -63,7 +63,8 @@ enum pci_8255_boardid { BOARD_ADLINK_PCI7296, BOARD_CB_PCIDIO24, BOARD_CB_PCIDIO24H, - BOARD_CB_PCIDIO48H, + BOARD_CB_PCIDIO48H_OLD, + BOARD_CB_PCIDIO48H_NEW, BOARD_CB_PCIDIO96H, BOARD_NI_PCIDIO96, BOARD_NI_PCIDIO96B, @@ -106,11 +107,16 @@ static const struct pci_8255_boardinfo pci_8255_boards[] = { .dio_badr = 2, .n_8255 = 1, }, - [BOARD_CB_PCIDIO48H] = { + [BOARD_CB_PCIDIO48H_OLD] = { .name = "cb_pci-dio48h", .dio_badr = 1, .n_8255 = 2, }, + [BOARD_CB_PCIDIO48H_NEW] = { + .name = "cb_pci-dio48h", + .dio_badr = 2, + .n_8255 = 2, + }, [BOARD_CB_PCIDIO96H] = { .name = "cb_pci-dio96h", .dio_badr = 2, @@ -263,7 +269,10 @@ static DEFINE_PCI_DEVICE_TABLE(pci_8255_pci_table) = { { PCI_VDEVICE(ADLINK, 0x7296), BOARD_ADLINK_PCI7296 }, { PCI_VDEVICE(CB, 0x0028), BOARD_CB_PCIDIO24 }, { PCI_VDEVICE(CB, 0x0014), BOARD_CB_PCIDIO24H }, - { PCI_VDEVICE(CB, 0x000b), BOARD_CB_PCIDIO48H }, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_CB, 0x000b, 0x0000, 0x0000), + .driver_data = BOARD_CB_PCIDIO48H_OLD }, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_CB, 0x000b, PCI_VENDOR_ID_CB, 0x000b), + .driver_data = BOARD_CB_PCIDIO48H_NEW }, { PCI_VDEVICE(CB, 0x0017), BOARD_CB_PCIDIO96H }, { PCI_VDEVICE(NI, 0x0160), BOARD_NI_PCIDIO96 }, { PCI_VDEVICE(NI, 0x1630), BOARD_NI_PCIDIO96B }, From c6236c0ce39c809c336ca929f68cf8ad02cf94e0 Mon Sep 17 00:00:00 2001 From: H Hartley Sweeten Date: Tue, 10 Dec 2013 16:31:25 -0700 Subject: [PATCH 205/312] staging: comedi: drivers: fix return value of comedi_load_firmware() Some of the callback functions that upload the firmware in the comedi drivers return a positive value indicating the number of bytes sent to the device. Detect this condition and just return '0' to indicate a successful upload. Reported-by: Bernd Porr Signed-off-by: H Hartley Sweeten Acked-by: Ian Abbott Acked-by: Dan Carpenter Cc: stable Signed-off-by: Greg Kroah-Hartman --- drivers/staging/comedi/drivers.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/staging/comedi/drivers.c b/drivers/staging/comedi/drivers.c index 8f02bf66e20b..4964d2a2fc7d 100644 --- a/drivers/staging/comedi/drivers.c +++ b/drivers/staging/comedi/drivers.c @@ -446,7 +446,7 @@ int comedi_load_firmware(struct comedi_device *dev, release_firmware(fw); } - return ret; + return ret < 0 ? ret : 0; } EXPORT_SYMBOL_GPL(comedi_load_firmware); From 53385d2d1de84f4036a0919ec46964c4e81b83f5 Mon Sep 17 00:00:00 2001 From: Bob Gilligan Date: Sun, 15 Dec 2013 13:39:56 -0800 Subject: [PATCH 206/312] neigh: Netlink notification for administrative NUD state change The neighbour code sends up an RTM_NEWNEIGH netlink notification if the NUD state of a neighbour cache entry is changed by a timer (e.g. from REACHABLE to STALE), even if the lladdr of the entry has not changed. But an administrative change to the the NUD state of a neighbour cache entry that does not change the lladdr (e.g. via "ip -4 neigh change ... nud ...") does not trigger a netlink notification. This means that netlink listeners will not hear about administrative NUD state changes such as from a resolved state to PERMANENT. This patch changes the neighbor code to generate an RTM_NEWNEIGH message when the NUD state of an entry is changed administratively. Signed-off-by: Bob Gilligan Acked-by: Nicolas Dichtel Signed-off-by: David S. Miller --- net/core/neighbour.c | 1 + 1 file changed, 1 insertion(+) diff --git a/net/core/neighbour.c b/net/core/neighbour.c index ca15f32821fb..36b1443f9ae4 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c @@ -1161,6 +1161,7 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new, neigh->parms->reachable_time : 0))); neigh->nud_state = new; + notify = 1; } if (lladdr != neigh->ha) { From 7022ef8b2a673db3d12a348331181f579afe9b22 Mon Sep 17 00:00:00 2001 From: Wei Yongjun Date: Mon, 16 Dec 2013 10:45:05 +0800 Subject: [PATCH 207/312] xen-netback: fix fragments error handling in checksum_setup_ip() Fix to return -EPROTO error if fragments detected in checksum_setup_ip(). Fixes: 1431fb31ecba ('xen-netback: fix fragment detection in checksum setup') Signed-off-by: Wei Yongjun Reviewed-by: Paul Durrant Acked-by: Ian Campbell Signed-off-by: David S. Miller --- drivers/net/xen-netback/netback.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index e884ee1fe7ed..27bbe58dcbe7 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c @@ -1197,6 +1197,9 @@ static int checksum_setup_ip(struct xenvif *vif, struct sk_buff *skb, err = -EPROTO; + if (fragment) + goto out; + switch (ip_hdr(skb)->protocol) { case IPPROTO_TCP: err = maybe_pull_tail(skb, From fb5f1834c3221e459324c6885eaad75429f722a5 Mon Sep 17 00:00:00 2001 From: Boris BREZILLON Date: Sun, 8 Dec 2013 15:59:59 +0100 Subject: [PATCH 208/312] usb: ohci-at91: fix irq and iomem resource retrieval When using dt resources retrieval (interrupts and reg properties) there is no predefined order for these resources in the platform dev resources table. Retrieve resources using platform_get_resource and platform_get_irq functions instead of direct resource table entries to avoid resource type mismatch. Signed-off-by: Boris BREZILLON Acked-by: Nicolas Ferre Signed-off-by: Alan Stern Reviewed-by: Tomasz Figa Signed-off-by: Greg Kroah-Hartman --- drivers/usb/host/ohci-at91.c | 24 ++++++++++++++---------- 1 file changed, 14 insertions(+), 10 deletions(-) diff --git a/drivers/usb/host/ohci-at91.c b/drivers/usb/host/ohci-at91.c index 418444ebb1b8..8c356af79409 100644 --- a/drivers/usb/host/ohci-at91.c +++ b/drivers/usb/host/ohci-at91.c @@ -136,23 +136,27 @@ static int usb_hcd_at91_probe(const struct hc_driver *driver, struct ohci_hcd *ohci; int retval; struct usb_hcd *hcd = NULL; + struct device *dev = &pdev->dev; + struct resource *res; + int irq; - if (pdev->num_resources != 2) { - pr_debug("hcd probe: invalid num_resources"); - return -ENODEV; + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + dev_dbg(dev, "hcd probe: missing memory resource\n"); + return -ENXIO; } - if ((pdev->resource[0].flags != IORESOURCE_MEM) - || (pdev->resource[1].flags != IORESOURCE_IRQ)) { - pr_debug("hcd probe: invalid resource type\n"); - return -ENODEV; + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + dev_dbg(dev, "hcd probe: missing irq resource\n"); + return irq; } hcd = usb_create_hcd(driver, &pdev->dev, "at91"); if (!hcd) return -ENOMEM; - hcd->rsrc_start = pdev->resource[0].start; - hcd->rsrc_len = resource_size(&pdev->resource[0]); + hcd->rsrc_start = res->start; + hcd->rsrc_len = resource_size(res); if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) { pr_debug("request_mem_region failed\n"); @@ -199,7 +203,7 @@ static int usb_hcd_at91_probe(const struct hc_driver *driver, ohci->num_ports = board->ports; at91_start_hc(pdev); - retval = usb_add_hcd(hcd, pdev->resource[1].start, IRQF_SHARED); + retval = usb_add_hcd(hcd, irq, IRQF_SHARED); if (retval == 0) return retval; From b84caae486135d588fb200973b0be8cb8a511edf Mon Sep 17 00:00:00 2001 From: Himanshu Madhani Date: Mon, 16 Dec 2013 15:36:56 -0500 Subject: [PATCH 209/312] qlcnic: Fix usage of netif_tx_{wake, stop} api during link change. o Driver was using netif_tx_{stop,wake}_all_queues() api during link change event. Remove these api calls to manage queue start/stop event, as core networking stack will manage this based on netif_carrier_{on,off} call. These API's were modified as part of commit id 012ec81223aa45d2b80aeafb77392fd1a19c7b10 ("qlcnic: Multi Tx queue support for 82xx Series adapter.") Signed-off-by: Shahed Shaikh Signed-off-by: Himanshu Madhani Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c index 0149c9495347..eda6c691d897 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c @@ -687,17 +687,11 @@ void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup) if (adapter->ahw->linkup && !linkup) { netdev_info(netdev, "NIC Link is down\n"); adapter->ahw->linkup = 0; - if (netif_running(netdev)) { - netif_carrier_off(netdev); - netif_tx_stop_all_queues(netdev); - } + netif_carrier_off(netdev); } else if (!adapter->ahw->linkup && linkup) { netdev_info(netdev, "NIC Link is up\n"); adapter->ahw->linkup = 1; - if (netif_running(netdev)) { - netif_carrier_on(netdev); - netif_wake_queue(netdev); - } + netif_carrier_on(netdev); } } From 3bf517df0d99f1cf5d369c73ab68e0afe6a3c2f9 Mon Sep 17 00:00:00 2001 From: Himanshu Madhani Date: Mon, 16 Dec 2013 15:36:57 -0500 Subject: [PATCH 210/312] qlcnic: Fix diagnostic test for all adapters. o Driver should re-allocate all Tx queues after completing diagnostic tests. This regression was added by commit id c2c5e3a0681bb1945c0cb211a5f4baa22cb2cbb3 ("qlcnic: Enable diagnostic test for multiple Tx queues.") Signed-off-by: Himanshu Madhani Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c | 2 ++ drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c | 1 - 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c index b36c02fafcfd..6d3edf6b6a96 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c @@ -948,6 +948,7 @@ static int qlcnic_irq_test(struct net_device *netdev) struct qlcnic_hardware_context *ahw = adapter->ahw; struct qlcnic_cmd_args cmd; int ret, drv_sds_rings = adapter->drv_sds_rings; + int drv_tx_rings = adapter->drv_tx_rings; if (qlcnic_83xx_check(adapter)) return qlcnic_83xx_interrupt_test(netdev); @@ -980,6 +981,7 @@ static int qlcnic_irq_test(struct net_device *netdev) clear_diag_irq: adapter->drv_sds_rings = drv_sds_rings; + adapter->drv_tx_rings = drv_tx_rings; clear_bit(__QLCNIC_RESETTING, &adapter->state); return ret; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c index 05c1eef8df13..aa019c398e9b 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c @@ -1940,7 +1940,6 @@ int qlcnic_diag_alloc_res(struct net_device *netdev, int test) qlcnic_detach(adapter); adapter->drv_sds_rings = QLCNIC_SINGLE_RING; - adapter->drv_tx_rings = QLCNIC_SINGLE_RING; adapter->ahw->diag_test = test; adapter->ahw->linkup = 0; From f9566265d7b44fea789072dcfa9a454e7e433af6 Mon Sep 17 00:00:00 2001 From: Himanshu Madhani Date: Mon, 16 Dec 2013 15:36:58 -0500 Subject: [PATCH 211/312] qlcnic: Fix TSS/RSS ring validation logic. o TSS/RSS ring validation does not take into account that either of these ring values can be 0. This patch fixes this validation and would fail set_channel operation if any of these ring value is 0. This regression was added as part of commit id 34e8c406fda5b5a9d2e126a92bab84cd28e3b5fa ("qlcnic: refactor Tx/SDS ring calculation and validation in driver.") Signed-off-by: Himanshu Madhani Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c | 4 ++-- drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c | 6 +++++- drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c | 1 + 3 files changed, 8 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c index 89208e5b25d6..fae1b7171576 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c @@ -2073,8 +2073,8 @@ int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter) ahw->nic_mode = QLCNIC_DEFAULT_MODE; adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver; ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry; - adapter->max_sds_rings = ahw->max_rx_ques; - adapter->max_tx_rings = ahw->max_tx_ques; + adapter->max_sds_rings = QLCNIC_MAX_SDS_RINGS; + adapter->max_tx_rings = QLCNIC_MAX_TX_RINGS; } else { return -EIO; } diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c index 6d3edf6b6a96..78f5e815139e 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c @@ -667,9 +667,13 @@ qlcnic_set_ringparam(struct net_device *dev, static int qlcnic_validate_ring_count(struct qlcnic_adapter *adapter, u8 rx_ring, u8 tx_ring) { + if (rx_ring == 0 || tx_ring == 0) + return -EINVAL; + if (rx_ring != 0) { if (rx_ring > adapter->max_sds_rings) { - netdev_err(adapter->netdev, "Invalid ring count, SDS ring count %d should not be greater than max %d driver sds rings.\n", + netdev_err(adapter->netdev, + "Invalid ring count, SDS ring count %d should not be greater than max %d driver sds rings.\n", rx_ring, adapter->max_sds_rings); return -EINVAL; } diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c index aa019c398e9b..2c8cac0c6a55 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c @@ -1178,6 +1178,7 @@ qlcnic_initialize_nic(struct qlcnic_adapter *adapter) } else { adapter->ahw->nic_mode = QLCNIC_DEFAULT_MODE; adapter->max_tx_rings = QLCNIC_MAX_HW_TX_RINGS; + adapter->max_sds_rings = QLCNIC_MAX_SDS_RINGS; adapter->flags &= ~QLCNIC_ESWITCH_ENABLED; } From b17a44d8b86f48e34011b884a934231ae2928d66 Mon Sep 17 00:00:00 2001 From: Himanshu Madhani Date: Mon, 16 Dec 2013 15:36:59 -0500 Subject: [PATCH 212/312] qlcnic: Fix TSS/RSS validation for 83xx/84xx series adapter. o Current code was not allowing the user to configure more than one Tx ring using ethtool for 83xx/84xx adapter. This regression was introduced by commit id 18afc102fdcb95d6c7d57f2967a06f2f8fe3ba4c ("qlcnic: Enable multiple Tx queue support for 83xx/84xx Series adapter.") Signed-off-by: Himanshu Madhani Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c | 11 +---------- 1 file changed, 1 insertion(+), 10 deletions(-) diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c index 78f5e815139e..e3be2760665c 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c @@ -680,21 +680,12 @@ static int qlcnic_validate_ring_count(struct qlcnic_adapter *adapter, } if (tx_ring != 0) { - if (qlcnic_82xx_check(adapter) && - (tx_ring > adapter->max_tx_rings)) { + if (tx_ring > adapter->max_tx_rings) { netdev_err(adapter->netdev, "Invalid ring count, Tx ring count %d should not be greater than max %d driver Tx rings.\n", tx_ring, adapter->max_tx_rings); return -EINVAL; } - - if (qlcnic_83xx_check(adapter) && - (tx_ring > QLCNIC_SINGLE_RING)) { - netdev_err(adapter->netdev, - "Invalid ring count, Tx ring count %d should not be greater than %d driver Tx rings.\n", - tx_ring, QLCNIC_SINGLE_RING); - return -EINVAL; - } } return 0; From 3fc38e267bfbea5e33e2222d6babc3b06c2bb642 Mon Sep 17 00:00:00 2001 From: Manish Chopra Date: Mon, 16 Dec 2013 15:37:00 -0500 Subject: [PATCH 213/312] qlcnic: Fix memory allocation o Use vzalloc() instead of kzalloc() for allocation of bootloader size memory. kzalloc() may fail to allocate the size of bootloader Signed-off-by: Manish Chopra Signed-off-by: Himanshu Madhani Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c index fae1b7171576..cac0503bda31 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c @@ -1254,24 +1254,24 @@ static int qlcnic_83xx_copy_bootloader(struct qlcnic_adapter *adapter) if (size & 0xF) size = (size + 16) & ~0xF; - p_cache = kzalloc(size, GFP_KERNEL); + p_cache = vzalloc(size); if (p_cache == NULL) return -ENOMEM; ret = qlcnic_83xx_lockless_flash_read32(adapter, src, p_cache, size / sizeof(u32)); if (ret) { - kfree(p_cache); + vfree(p_cache); return ret; } /* 16 byte write to MS memory */ ret = qlcnic_83xx_ms_mem_write128(adapter, dest, (u32 *)p_cache, size / 16); if (ret) { - kfree(p_cache); + vfree(p_cache); return ret; } - kfree(p_cache); + vfree(p_cache); return ret; } From 30fa15f64e40c4cf037e3379e55c2323b5d992e2 Mon Sep 17 00:00:00 2001 From: Manish Chopra Date: Mon, 16 Dec 2013 15:37:01 -0500 Subject: [PATCH 214/312] qlcnic: Allow firmware dump collection when auto firmware recovery is disabled o Allow driver to collect firmware dump, during a forced firmware dump operation, when auto firmware recovery is disabled. Also, during this operation, driver should not allow reset recovery to be performed. Signed-off-by: Manish Chopra Signed-off-by: Himanshu Madhani Signed-off-by: David S. Miller --- .../ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h | 1 + .../ethernet/qlogic/qlcnic/qlcnic_83xx_init.c | 55 ++++++++++++------- 2 files changed, 36 insertions(+), 20 deletions(-) diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h index 4cae6caa6bfa..a6a33508e401 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h @@ -662,4 +662,5 @@ pci_ers_result_t qlcnic_83xx_io_error_detected(struct pci_dev *, pci_channel_state_t); pci_ers_result_t qlcnic_83xx_io_slot_reset(struct pci_dev *); void qlcnic_83xx_io_resume(struct pci_dev *); +void qlcnic_83xx_stop_hw(struct qlcnic_adapter *); #endif diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c index cac0503bda31..918e18ddf038 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c @@ -740,6 +740,7 @@ static int qlcnic_83xx_idc_unknown_state(struct qlcnic_adapter *adapter) adapter->ahw->idc.err_code = -EIO; dev_err(&adapter->pdev->dev, "%s: Device in unknown state\n", __func__); + clear_bit(__QLCNIC_RESETTING, &adapter->state); return 0; } @@ -818,7 +819,6 @@ static int qlcnic_83xx_idc_ready_state(struct qlcnic_adapter *adapter) struct qlcnic_hardware_context *ahw = adapter->ahw; struct qlcnic_mailbox *mbx = ahw->mailbox; int ret = 0; - u32 owner; u32 val; /* Perform NIC configuration based ready state entry actions */ @@ -848,9 +848,9 @@ static int qlcnic_83xx_idc_ready_state(struct qlcnic_adapter *adapter) set_bit(__QLCNIC_RESETTING, &adapter->state); qlcnic_83xx_idc_enter_need_reset_state(adapter, 1); } else { - owner = qlcnic_83xx_idc_find_reset_owner_id(adapter); - if (ahw->pci_func == owner) - qlcnic_dump_fw(adapter); + netdev_info(adapter->netdev, "%s: Auto firmware recovery is disabled\n", + __func__); + qlcnic_83xx_idc_enter_failed_state(adapter, 1); } return -EIO; } @@ -948,13 +948,26 @@ static int qlcnic_83xx_idc_need_quiesce_state(struct qlcnic_adapter *adapter) return 0; } -static int qlcnic_83xx_idc_failed_state(struct qlcnic_adapter *adapter) +static void qlcnic_83xx_idc_failed_state(struct qlcnic_adapter *adapter) { - dev_err(&adapter->pdev->dev, "%s: please restart!!\n", __func__); - clear_bit(__QLCNIC_RESETTING, &adapter->state); - adapter->ahw->idc.err_code = -EIO; + struct qlcnic_hardware_context *ahw = adapter->ahw; + u32 val, owner; - return 0; + val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL); + if (val & QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY) { + owner = qlcnic_83xx_idc_find_reset_owner_id(adapter); + if (ahw->pci_func == owner) { + qlcnic_83xx_stop_hw(adapter); + qlcnic_dump_fw(adapter); + } + } + + netdev_warn(adapter->netdev, "%s: Reboot will be required to recover the adapter!!\n", + __func__); + clear_bit(__QLCNIC_RESETTING, &adapter->state); + ahw->idc.err_code = -EIO; + + return; } static int qlcnic_83xx_idc_quiesce_state(struct qlcnic_adapter *adapter) @@ -1063,12 +1076,6 @@ void qlcnic_83xx_idc_poll_dev_state(struct work_struct *work) adapter->ahw->idc.prev_state = adapter->ahw->idc.curr_state; qlcnic_83xx_periodic_tasks(adapter); - /* Do not reschedule if firmaware is in hanged state and auto - * recovery is disabled - */ - if ((adapter->flags & QLCNIC_FW_HANG) && !qlcnic_auto_fw_reset) - return; - /* Re-schedule the function */ if (test_bit(QLC_83XX_MODULE_LOADED, &adapter->ahw->idc.status)) qlcnic_schedule_work(adapter, qlcnic_83xx_idc_poll_dev_state, @@ -1219,10 +1226,10 @@ void qlcnic_83xx_idc_request_reset(struct qlcnic_adapter *adapter, u32 key) } val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL); - if ((val & QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY) || - !qlcnic_auto_fw_reset) { - dev_err(&adapter->pdev->dev, - "%s:failed, device in non reset mode\n", __func__); + if (val & QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY) { + netdev_info(adapter->netdev, "%s: Auto firmware recovery is disabled\n", + __func__); + qlcnic_83xx_idc_enter_failed_state(adapter, 0); qlcnic_83xx_unlock_driver(adapter); return; } @@ -1939,7 +1946,7 @@ static void qlcnic_83xx_exec_template_cmd(struct qlcnic_adapter *p_dev, p_dev->ahw->reset.seq_index = index; } -static void qlcnic_83xx_stop_hw(struct qlcnic_adapter *p_dev) +void qlcnic_83xx_stop_hw(struct qlcnic_adapter *p_dev) { p_dev->ahw->reset.seq_index = 0; @@ -1994,6 +2001,14 @@ static int qlcnic_83xx_restart_hw(struct qlcnic_adapter *adapter) val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL); if (!(val & QLC_83XX_IDC_GRACEFULL_RESET)) qlcnic_dump_fw(adapter); + + if (val & QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY) { + netdev_info(adapter->netdev, "%s: Auto firmware recovery is disabled\n", + __func__); + qlcnic_83xx_idc_enter_failed_state(adapter, 1); + return err; + } + qlcnic_83xx_init_hw(adapter); if (qlcnic_83xx_copy_bootloader(adapter)) From e49df7947a48b04ee57ee0fa0c4110ef05024c4f Mon Sep 17 00:00:00 2001 From: Manish chopra Date: Mon, 16 Dec 2013 15:37:02 -0500 Subject: [PATCH 215/312] qlcnic: Fix mailbox processing during diagnostic test o Do not enable mailbox polling in case of legacy interrupt. Process mailbox AEN/response from the interrupt. Signed-off-by: Manish Chopra Signed-off-by: Himanshu Madhani Signed-off-by: David S. Miller --- .../ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c | 32 ++++--------------- 1 file changed, 6 insertions(+), 26 deletions(-) diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c index b1cb0ffb15c7..ab66e7f3e01e 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c @@ -447,8 +447,9 @@ irqreturn_t qlcnic_83xx_intr(int irq, void *data) qlcnic_83xx_poll_process_aen(adapter); - if (ahw->diag_test == QLCNIC_INTERRUPT_TEST) { - ahw->diag_cnt++; + if (ahw->diag_test) { + if (ahw->diag_test == QLCNIC_INTERRUPT_TEST) + ahw->diag_cnt++; qlcnic_83xx_enable_legacy_msix_mbx_intr(adapter); return IRQ_HANDLED; } @@ -1345,11 +1346,6 @@ static int qlcnic_83xx_diag_alloc_res(struct net_device *netdev, int test, } if (adapter->ahw->diag_test == QLCNIC_LOOPBACK_TEST) { - /* disable and free mailbox interrupt */ - if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) { - qlcnic_83xx_enable_mbx_poll(adapter); - qlcnic_83xx_free_mbx_intr(adapter); - } adapter->ahw->loopback_state = 0; adapter->ahw->hw_ops->setup_link_event(adapter, 1); } @@ -1363,33 +1359,20 @@ static void qlcnic_83xx_diag_free_res(struct net_device *netdev, { struct qlcnic_adapter *adapter = netdev_priv(netdev); struct qlcnic_host_sds_ring *sds_ring; - int ring, err; + int ring; clear_bit(__QLCNIC_DEV_UP, &adapter->state); if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST) { for (ring = 0; ring < adapter->drv_sds_rings; ring++) { sds_ring = &adapter->recv_ctx->sds_rings[ring]; - qlcnic_83xx_disable_intr(adapter, sds_ring); - if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) - qlcnic_83xx_enable_mbx_poll(adapter); + if (adapter->flags & QLCNIC_MSIX_ENABLED) + qlcnic_83xx_disable_intr(adapter, sds_ring); } } qlcnic_fw_destroy_ctx(adapter); qlcnic_detach(adapter); - if (adapter->ahw->diag_test == QLCNIC_LOOPBACK_TEST) { - if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) { - err = qlcnic_83xx_setup_mbx_intr(adapter); - qlcnic_83xx_disable_mbx_poll(adapter); - if (err) { - dev_err(&adapter->pdev->dev, - "%s: failed to setup mbx interrupt\n", - __func__); - goto out; - } - } - } adapter->ahw->diag_test = 0; adapter->drv_sds_rings = drv_sds_rings; @@ -1399,9 +1382,6 @@ static void qlcnic_83xx_diag_free_res(struct net_device *netdev, if (netif_running(netdev)) __qlcnic_up(adapter, netdev); - if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST && - !(adapter->flags & QLCNIC_MSIX_ENABLED)) - qlcnic_83xx_disable_mbx_poll(adapter); out: netif_device_attach(netdev); } From 0951c5c214e525185572204b587e3b6762d121f4 Mon Sep 17 00:00:00 2001 From: Manish chopra Date: Mon, 16 Dec 2013 15:37:03 -0500 Subject: [PATCH 216/312] qlcnic: Dump mailbox registers when mailbox command times out. Signed-off-by: Manish Chopra Signed-off-by: Himanshu Madhani Signed-off-by: David S. Miller --- .../net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c index ab66e7f3e01e..6055d397a29e 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c @@ -3734,6 +3734,19 @@ static void qlcnic_83xx_decode_mbx_rsp(struct qlcnic_adapter *adapter, return; } +static inline void qlcnic_dump_mailbox_registers(struct qlcnic_adapter *adapter) +{ + struct qlcnic_hardware_context *ahw = adapter->ahw; + u32 offset; + + offset = QLCRDX(ahw, QLCNIC_DEF_INT_MASK); + dev_info(&adapter->pdev->dev, "Mbx interrupt mask=0x%x, Mbx interrupt enable=0x%x, Host mbx control=0x%x, Fw mbx control=0x%x", + readl(ahw->pci_base0 + offset), + QLCRDX(ahw, QLCNIC_MBX_INTR_ENBL), + QLCRDX(ahw, QLCNIC_HOST_MBX_CTRL), + QLCRDX(ahw, QLCNIC_FW_MBX_CTRL)); +} + static void qlcnic_83xx_mailbox_worker(struct work_struct *work) { struct qlcnic_mailbox *mbx = container_of(work, struct qlcnic_mailbox, @@ -3778,6 +3791,8 @@ static void qlcnic_83xx_mailbox_worker(struct work_struct *work) __func__, cmd->cmd_op, cmd->type, ahw->pci_func, ahw->op_mode); clear_bit(QLC_83XX_MBX_READY, &mbx->status); + qlcnic_dump_mailbox_registers(adapter); + qlcnic_83xx_get_mbx_data(adapter, cmd); qlcnic_dump_mbx(adapter, cmd); qlcnic_83xx_idc_request_reset(adapter, QLCNIC_FORCE_FW_DUMP_KEY); From c2db11eca089b60148ded7467b956a547e8a2ae0 Mon Sep 17 00:00:00 2001 From: Soren Brinkmann Date: Mon, 2 Dec 2013 11:38:38 -0800 Subject: [PATCH 217/312] tty: xuartps: Properly guard sysrq specific code Commit 'tty: xuartps: Implement BREAK detection, add SYSRQ support' (0c0c47bc40a2e358d593b2d7fb93b50027fbfc0c) introduced sysrq support without properly guarding sysrq specific code which results in build errors when sysrq is disabled: DNAME=KBUILD_STR(xilinx_uartps)" -c -o drivers/tty/serial/.tmp_xilinx_uartps.o drivers/tty/serial/xilinx_uartps.c drivers/tty/serial/xilinx_uartps.c: In function 'xuartps_isr': drivers/tty/serial/xilinx_uartps.c:247:5: error: 'struct uart_port' has no member named 'sysrq' drivers/tty/serial/xilinx_uartps.c:247:5: error: 'struct uart_port' has no member named 'sysrq' drivers/tty/serial/xilinx_uartps.c:247:5: error: 'struct uart_port' has no member named 'sysrq' make[3]: *** [drivers/tty/serial/xilinx_uartps.o] Error 1 Reported-by: Masanari Iida Cc: Vlad Lungu Signed-off-by: Soren Brinkmann Signed-off-by: Mark Brown Signed-off-by: Greg Kroah-Hartman --- drivers/tty/serial/xilinx_uartps.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c index e46e9f3f19b9..f619ad5b5eae 100644 --- a/drivers/tty/serial/xilinx_uartps.c +++ b/drivers/tty/serial/xilinx_uartps.c @@ -240,6 +240,7 @@ static irqreturn_t xuartps_isr(int irq, void *dev_id) continue; } +#ifdef SUPPORT_SYSRQ /* * uart_handle_sysrq_char() doesn't work if * spinlocked, for some reason @@ -253,6 +254,7 @@ static irqreturn_t xuartps_isr(int irq, void *dev_id) } spin_lock(&port->lock); } +#endif port->icount.rx++; From 130f769e81fc472beb2211320777e26050e3fa15 Mon Sep 17 00:00:00 2001 From: Tomi Valkeinen Date: Mon, 16 Dec 2013 09:14:48 +0200 Subject: [PATCH 218/312] Revert "ARM: OMAP2+: Remove legacy mux code for display.c" Commit e30b06f4d5f000c31a7747a7e7ada78a5fd419a1 (ARM: OMAP2+: Remove legacy mux code for display.c) removed non-DT DSI and HDMI pinmuxing. However, DSI pinmuxing is still needed, and removing that caused DSI displays not to work. This reverts the DSI parts of the commit. Signed-off-by: Tomi Valkeinen Signed-off-by: Tony Lindgren --- arch/arm/mach-omap2/display.c | 38 +++++++++++++++++++++++++++++++++++ 1 file changed, 38 insertions(+) diff --git a/arch/arm/mach-omap2/display.c b/arch/arm/mach-omap2/display.c index 58347bb874a0..4cf165502b35 100644 --- a/arch/arm/mach-omap2/display.c +++ b/arch/arm/mach-omap2/display.c @@ -101,13 +101,51 @@ static const struct omap_dss_hwmod_data omap4_dss_hwmod_data[] __initconst = { { "dss_hdmi", "omapdss_hdmi", -1 }, }; +static int omap4_dsi_mux_pads(int dsi_id, unsigned lanes) +{ + u32 enable_mask, enable_shift; + u32 pipd_mask, pipd_shift; + u32 reg; + + if (dsi_id == 0) { + enable_mask = OMAP4_DSI1_LANEENABLE_MASK; + enable_shift = OMAP4_DSI1_LANEENABLE_SHIFT; + pipd_mask = OMAP4_DSI1_PIPD_MASK; + pipd_shift = OMAP4_DSI1_PIPD_SHIFT; + } else if (dsi_id == 1) { + enable_mask = OMAP4_DSI2_LANEENABLE_MASK; + enable_shift = OMAP4_DSI2_LANEENABLE_SHIFT; + pipd_mask = OMAP4_DSI2_PIPD_MASK; + pipd_shift = OMAP4_DSI2_PIPD_SHIFT; + } else { + return -ENODEV; + } + + reg = omap4_ctrl_pad_readl(OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_DSIPHY); + + reg &= ~enable_mask; + reg &= ~pipd_mask; + + reg |= (lanes << enable_shift) & enable_mask; + reg |= (lanes << pipd_shift) & pipd_mask; + + omap4_ctrl_pad_writel(reg, OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_DSIPHY); + + return 0; +} + static int omap_dsi_enable_pads(int dsi_id, unsigned lane_mask) { + if (cpu_is_omap44xx()) + return omap4_dsi_mux_pads(dsi_id, lane_mask); + return 0; } static void omap_dsi_disable_pads(int dsi_id, unsigned lane_mask) { + if (cpu_is_omap44xx()) + omap4_dsi_mux_pads(dsi_id, 0); } static int omap_dss_set_min_bus_tput(struct device *dev, unsigned long tput) From 82832046e285952f14e707647f9ef82466c883cc Mon Sep 17 00:00:00 2001 From: Russell King Date: Mon, 16 Dec 2013 11:33:24 +0000 Subject: [PATCH 219/312] imx-drm: imx-drm-core: fix error cleanup path for imx_drm_add_crtc() imx_drm_add_crtc() was kfree'ing the imx_drm_crtc structure while leaving it on the list of CRTCs. Delete it from the list first. Signed-off-by: Russell King Acked-by: Sascha Hauer Signed-off-by: Greg Kroah-Hartman --- drivers/staging/imx-drm/imx-drm-core.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/staging/imx-drm/imx-drm-core.c b/drivers/staging/imx-drm/imx-drm-core.c index 6bd015ac9d68..b5ff45f3c55b 100644 --- a/drivers/staging/imx-drm/imx-drm-core.c +++ b/drivers/staging/imx-drm/imx-drm-core.c @@ -528,6 +528,7 @@ int imx_drm_add_crtc(struct drm_crtc *crtc, return 0; err_register: + list_del(&imx_drm_crtc->list); kfree(imx_drm_crtc); err_alloc: err_busy: From 8007875f0619f36d885ba357b587e673d6f20704 Mon Sep 17 00:00:00 2001 From: Russell King Date: Mon, 16 Dec 2013 11:33:44 +0000 Subject: [PATCH 220/312] imx-drm: imx-drm-core: fix DRM cleanup paths We must call drm_vblank_cleanup() on the error cleanup and unload paths after we've had a successful call to drm_vblank_init(). Ensure that the calls are in the reverse order to the initialisation order. Signed-off-by: Russell King Acked-by: Sascha Hauer Signed-off-by: Greg Kroah-Hartman --- drivers/staging/imx-drm/imx-drm-core.c | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/drivers/staging/imx-drm/imx-drm-core.c b/drivers/staging/imx-drm/imx-drm-core.c index b5ff45f3c55b..aff4bd47693e 100644 --- a/drivers/staging/imx-drm/imx-drm-core.c +++ b/drivers/staging/imx-drm/imx-drm-core.c @@ -88,8 +88,9 @@ static int imx_drm_driver_unload(struct drm_device *drm) imx_drm_device_put(); - drm_mode_config_cleanup(imxdrm->drm); + drm_vblank_cleanup(imxdrm->drm); drm_kms_helper_poll_fini(imxdrm->drm); + drm_mode_config_cleanup(imxdrm->drm); return 0; } @@ -428,11 +429,11 @@ static int imx_drm_driver_load(struct drm_device *drm, unsigned long flags) ret = drm_mode_group_init_legacy_group(imxdrm->drm, &imxdrm->drm->primary->mode_group); if (ret) - goto err_init; + goto err_kms; ret = drm_vblank_init(imxdrm->drm, MAX_CRTC); if (ret) - goto err_init; + goto err_kms; /* * with vblank_disable_allowed = true, vblank interrupt will be disabled @@ -441,12 +442,19 @@ static int imx_drm_driver_load(struct drm_device *drm, unsigned long flags) */ imxdrm->drm->vblank_disable_allowed = true; - if (!imx_drm_device_get()) + if (!imx_drm_device_get()) { ret = -EINVAL; + goto err_vblank; + } - ret = 0; + mutex_unlock(&imxdrm->mutex); + return 0; -err_init: +err_vblank: + drm_vblank_cleanup(drm); +err_kms: + drm_kms_helper_poll_fini(drm); + drm_mode_config_cleanup(drm); mutex_unlock(&imxdrm->mutex); return ret; From 4ae078d58ac7fadace7eb30bade8da649ecc4ded Mon Sep 17 00:00:00 2001 From: Russell King Date: Mon, 16 Dec 2013 11:34:25 +0000 Subject: [PATCH 221/312] imx-drm: ipu-v3: fix potential CRTC device registration race Clean up the IPUv3 CRTC device registration; we don't need a separate function just to call platform_device_register_data(), and we don't need the return value converted at all. Update the IPU client id under a mutex, so that parallel probing doesn't race. Signed-off-by: Russell King Acked-by: Sascha Hauer Signed-off-by: Greg Kroah-Hartman --- drivers/staging/imx-drm/ipu-v3/ipu-common.c | 32 ++++++++++----------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/drivers/staging/imx-drm/ipu-v3/ipu-common.c b/drivers/staging/imx-drm/ipu-v3/ipu-common.c index 7a22ce619ed2..97ca6924dbb3 100644 --- a/drivers/staging/imx-drm/ipu-v3/ipu-common.c +++ b/drivers/staging/imx-drm/ipu-v3/ipu-common.c @@ -996,35 +996,35 @@ static const struct ipu_platform_reg client_reg[] = { }, }; +static DEFINE_MUTEX(ipu_client_id_mutex); static int ipu_client_id; -static int ipu_add_subdevice_pdata(struct device *dev, - const struct ipu_platform_reg *reg) -{ - struct platform_device *pdev; - - pdev = platform_device_register_data(dev, reg->name, ipu_client_id++, - ®->pdata, sizeof(struct ipu_platform_reg)); - - return PTR_ERR_OR_ZERO(pdev); -} - static int ipu_add_client_devices(struct ipu_soc *ipu) { - int ret; - int i; + struct device *dev = ipu->dev; + unsigned i; + int id, ret; + + mutex_lock(&ipu_client_id_mutex); + id = ipu_client_id; + ipu_client_id += ARRAY_SIZE(client_reg); + mutex_unlock(&ipu_client_id_mutex); for (i = 0; i < ARRAY_SIZE(client_reg); i++) { const struct ipu_platform_reg *reg = &client_reg[i]; - ret = ipu_add_subdevice_pdata(ipu->dev, reg); - if (ret) + struct platform_device *pdev; + + pdev = platform_device_register_data(dev, reg->name, + id++, ®->pdata, sizeof(reg->pdata)); + + if (IS_ERR(pdev)) goto err_register; } return 0; err_register: - platform_device_unregister_children(to_platform_device(ipu->dev)); + platform_device_unregister_children(to_platform_device(dev)); return ret; } From bd5121bbb6b9a89d9d13878fa5bb1e833168ae18 Mon Sep 17 00:00:00 2001 From: Russell King Date: Mon, 16 Dec 2013 12:38:30 +0000 Subject: [PATCH 222/312] imx-drm: imx-tve: don't call sleeping functions beneath enable_lock spinlock Enable lock claims that it is serializing tve_enable/disable calls. However, DRM already serialises mode sets with a mutex, which prevents encoder/connector functions being called concurrently. Secondly, holding a spinlock while calling clk_prepare_enable() is wrong; it will cause a might_sleep() warning should that debugging be enabled. So, let's just get rid of the enable_lock. Signed-off-by: Russell King Acked-by: Sascha Hauer Signed-off-by: Greg Kroah-Hartman --- drivers/staging/imx-drm/imx-tve.c | 9 --------- 1 file changed, 9 deletions(-) diff --git a/drivers/staging/imx-drm/imx-tve.c b/drivers/staging/imx-drm/imx-tve.c index 680f4c8fa081..2c44fef8d58b 100644 --- a/drivers/staging/imx-drm/imx-tve.c +++ b/drivers/staging/imx-drm/imx-tve.c @@ -114,7 +114,6 @@ struct imx_tve { struct drm_encoder encoder; struct imx_drm_encoder *imx_drm_encoder; struct device *dev; - spinlock_t enable_lock; /* serializes tve_enable/disable */ spinlock_t lock; /* register lock */ bool enabled; int mode; @@ -146,10 +145,8 @@ __releases(&tve->lock) static void tve_enable(struct imx_tve *tve) { - unsigned long flags; int ret; - spin_lock_irqsave(&tve->enable_lock, flags); if (!tve->enabled) { tve->enabled = true; clk_prepare_enable(tve->clk); @@ -169,23 +166,18 @@ static void tve_enable(struct imx_tve *tve) TVE_CD_SM_IEN | TVE_CD_LM_IEN | TVE_CD_MON_END_IEN); - - spin_unlock_irqrestore(&tve->enable_lock, flags); } static void tve_disable(struct imx_tve *tve) { - unsigned long flags; int ret; - spin_lock_irqsave(&tve->enable_lock, flags); if (tve->enabled) { tve->enabled = false; ret = regmap_update_bits(tve->regmap, TVE_COM_CONF_REG, TVE_IPU_CLK_EN | TVE_EN, 0); clk_disable_unprepare(tve->clk); } - spin_unlock_irqrestore(&tve->enable_lock, flags); } static int tve_setup_tvout(struct imx_tve *tve) @@ -601,7 +593,6 @@ static int imx_tve_probe(struct platform_device *pdev) tve->dev = &pdev->dev; spin_lock_init(&tve->lock); - spin_lock_init(&tve->enable_lock); ddc_node = of_parse_phandle(np, "ddc", 0); if (ddc_node) { From 942325c8b234fd67db976bd529d48320a6a73171 Mon Sep 17 00:00:00 2001 From: Russell King Date: Mon, 16 Dec 2013 12:38:50 +0000 Subject: [PATCH 223/312] imx-drm: imx-drm-core: use defined constant for number of CRTCs. We have this definition, there's no reason not to use it. Signed-off-by: Russell King Acked-by: Sascha Hauer Signed-off-by: Greg Kroah-Hartman --- drivers/staging/imx-drm/imx-drm-core.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/staging/imx-drm/imx-drm-core.c b/drivers/staging/imx-drm/imx-drm-core.c index aff4bd47693e..b52b8273d652 100644 --- a/drivers/staging/imx-drm/imx-drm-core.c +++ b/drivers/staging/imx-drm/imx-drm-core.c @@ -200,8 +200,8 @@ static void imx_drm_driver_preclose(struct drm_device *drm, if (!file->is_master) return; - for (i = 0; i < 4; i++) - imx_drm_disable_vblank(drm , i); + for (i = 0; i < MAX_CRTC; i++) + imx_drm_disable_vblank(drm, i); } static const struct file_operations imx_drm_driver_fops = { From 9fe73d46edd358fc154f7332c8ff312e067255a0 Mon Sep 17 00:00:00 2001 From: Russell King Date: Mon, 16 Dec 2013 12:39:11 +0000 Subject: [PATCH 224/312] imx-drm: imx-drm-core: make imx_drm_crtc_register() safer imx_drm_crtc_register() doesn't clean up the CRTC upon failure, which leaves the CRTC attached to the DRM device. Also, it does setup after attaching the CRTC to the DRM device. Fix this by reordering the function such that we do the setup before drm_crtc_init(): this fixes both issues. Signed-off-by: Russell King Acked-by: Sascha Hauer Signed-off-by: Greg Kroah-Hartman --- drivers/staging/imx-drm/imx-drm-core.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/staging/imx-drm/imx-drm-core.c b/drivers/staging/imx-drm/imx-drm-core.c index b52b8273d652..19e431dee493 100644 --- a/drivers/staging/imx-drm/imx-drm-core.c +++ b/drivers/staging/imx-drm/imx-drm-core.c @@ -377,8 +377,6 @@ static int imx_drm_crtc_register(struct imx_drm_crtc *imx_drm_crtc) struct imx_drm_device *imxdrm = __imx_drm_device(); int ret; - drm_crtc_init(imxdrm->drm, imx_drm_crtc->crtc, - imx_drm_crtc->imx_drm_helper_funcs.crtc_funcs); ret = drm_mode_crtc_set_gamma_size(imx_drm_crtc->crtc, 256); if (ret) return ret; @@ -386,6 +384,9 @@ static int imx_drm_crtc_register(struct imx_drm_crtc *imx_drm_crtc) drm_crtc_helper_add(imx_drm_crtc->crtc, imx_drm_crtc->imx_drm_helper_funcs.crtc_helper_funcs); + drm_crtc_init(imxdrm->drm, imx_drm_crtc->crtc, + imx_drm_crtc->imx_drm_helper_funcs.crtc_funcs); + drm_mode_group_reinit(imxdrm->drm); return 0; From fd6040ed57d8f200ab0cc2abf706c54995a48370 Mon Sep 17 00:00:00 2001 From: Russell King Date: Mon, 16 Dec 2013 12:39:31 +0000 Subject: [PATCH 225/312] imx-drm: imx-drm-core: improve safety of imx_drm_add_crtc() We must not add more CRTCs than we have declared to the vblank helpers, otherwise we overflow their arrays. Force failure if we exceed the number of CRTCs. Signed-off-by: Russell King Acked-by: Sascha Hauer Signed-off-by: Greg Kroah-Hartman --- drivers/staging/imx-drm/imx-drm-core.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/drivers/staging/imx-drm/imx-drm-core.c b/drivers/staging/imx-drm/imx-drm-core.c index 19e431dee493..96e4eee344ef 100644 --- a/drivers/staging/imx-drm/imx-drm-core.c +++ b/drivers/staging/imx-drm/imx-drm-core.c @@ -501,6 +501,15 @@ int imx_drm_add_crtc(struct drm_crtc *crtc, mutex_lock(&imxdrm->mutex); + /* + * The vblank arrays are dimensioned by MAX_CRTC - we can't + * pass IDs greater than this to those functions. + */ + if (imxdrm->pipes >= MAX_CRTC) { + ret = -EINVAL; + goto err_busy; + } + if (imxdrm->drm->open_count) { ret = -EBUSY; goto err_busy; From 34cf865d54813aab3497838132fb1bbd293f4054 Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Wed, 18 Dec 2013 00:44:44 -0500 Subject: [PATCH 226/312] ext4: fix deadlock when writing in ENOSPC conditions Akira-san has been reporting rare deadlocks of his machine when running xfstests test 269 on ext4 filesystem. The problem turned out to be in ext4_da_reserve_metadata() and ext4_da_reserve_space() which called ext4_should_retry_alloc() while holding i_data_sem. Since ext4_should_retry_alloc() can force a transaction commit, this is a lock ordering violation and leads to deadlocks. Fix the problem by just removing the retry loops. These functions should just report ENOSPC to the caller (e.g. ext4_da_write_begin()) and that function must take care of retrying after dropping all necessary locks. Reported-and-tested-by: Akira Fujita Reviewed-by: Zheng Liu Signed-off-by: Jan Kara Signed-off-by: "Theodore Ts'o" Cc: stable@vger.kernel.org --- fs/ext4/inode.c | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 075763474118..61d49ff22c81 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -1206,7 +1206,6 @@ static int ext4_journalled_write_end(struct file *file, */ static int ext4_da_reserve_metadata(struct inode *inode, ext4_lblk_t lblock) { - int retries = 0; struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); struct ext4_inode_info *ei = EXT4_I(inode); unsigned int md_needed; @@ -1218,7 +1217,6 @@ static int ext4_da_reserve_metadata(struct inode *inode, ext4_lblk_t lblock) * in order to allocate nrblocks * worse case is one extent per block */ -repeat: spin_lock(&ei->i_block_reservation_lock); /* * ext4_calc_metadata_amount() has side effects, which we have @@ -1238,10 +1236,6 @@ static int ext4_da_reserve_metadata(struct inode *inode, ext4_lblk_t lblock) ei->i_da_metadata_calc_len = save_len; ei->i_da_metadata_calc_last_lblock = save_last_lblock; spin_unlock(&ei->i_block_reservation_lock); - if (ext4_should_retry_alloc(inode->i_sb, &retries)) { - cond_resched(); - goto repeat; - } return -ENOSPC; } ei->i_reserved_meta_blocks += md_needed; @@ -1255,7 +1249,6 @@ static int ext4_da_reserve_metadata(struct inode *inode, ext4_lblk_t lblock) */ static int ext4_da_reserve_space(struct inode *inode, ext4_lblk_t lblock) { - int retries = 0; struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); struct ext4_inode_info *ei = EXT4_I(inode); unsigned int md_needed; @@ -1277,7 +1270,6 @@ static int ext4_da_reserve_space(struct inode *inode, ext4_lblk_t lblock) * in order to allocate nrblocks * worse case is one extent per block */ -repeat: spin_lock(&ei->i_block_reservation_lock); /* * ext4_calc_metadata_amount() has side effects, which we have @@ -1297,10 +1289,6 @@ static int ext4_da_reserve_space(struct inode *inode, ext4_lblk_t lblock) ei->i_da_metadata_calc_len = save_len; ei->i_da_metadata_calc_last_lblock = save_last_lblock; spin_unlock(&ei->i_block_reservation_lock); - if (ext4_should_retry_alloc(inode->i_sb, &retries)) { - cond_resched(); - goto repeat; - } dquot_release_reservation_block(inode, EXT4_C2B(sbi, 1)); return -ENOSPC; } From 36e7bb38028d3d812aa7749208249d600a30c22c Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Mon, 11 Nov 2013 19:29:47 +0530 Subject: [PATCH 227/312] powerpc: book3s: kvm: Don't abuse host r2 in exit path We don't use PACATOC for PR. Avoid updating HOST_R2 with PR KVM mode when both HV and PR are enabled in the kernel. Without this we get the below crash (qemu) Unable to handle kernel paging request for data at address 0xffffffffffff8310 Faulting instruction address: 0xc00000000001d5a4 cpu 0x2: Vector: 300 (Data Access) at [c0000001dc53aef0] pc: c00000000001d5a4: .vtime_delta.isra.1+0x34/0x1d0 lr: c00000000001d760: .vtime_account_system+0x20/0x60 sp: c0000001dc53b170 msr: 8000000000009032 dar: ffffffffffff8310 dsisr: 40000000 current = 0xc0000001d76c62d0 paca = 0xc00000000fef1100 softe: 0 irq_happened: 0x01 pid = 4472, comm = qemu-system-ppc enter ? for help [c0000001dc53b200] c00000000001d760 .vtime_account_system+0x20/0x60 [c0000001dc53b290] c00000000008d050 .kvmppc_handle_exit_pr+0x60/0xa50 [c0000001dc53b340] c00000000008f51c kvm_start_lightweight+0xb4/0xc4 [c0000001dc53b510] c00000000008cdf0 .kvmppc_vcpu_run_pr+0x150/0x2e0 [c0000001dc53b9e0] c00000000008341c .kvmppc_vcpu_run+0x2c/0x40 [c0000001dc53ba50] c000000000080af4 .kvm_arch_vcpu_ioctl_run+0x54/0x1b0 [c0000001dc53bae0] c00000000007b4c8 .kvm_vcpu_ioctl+0x478/0x730 [c0000001dc53bca0] c0000000002140cc .do_vfs_ioctl+0x4ac/0x770 [c0000001dc53bd80] c0000000002143e8 .SyS_ioctl+0x58/0xb0 [c0000001dc53be30] c000000000009e58 syscall_exit+0x0/0x98 Signed-off-by: Alexander Graf --- arch/powerpc/include/asm/kvm_book3s_asm.h | 1 + arch/powerpc/kernel/asm-offsets.c | 1 + arch/powerpc/kvm/book3s_hv_rmhandlers.S | 7 +++---- 3 files changed, 5 insertions(+), 4 deletions(-) diff --git a/arch/powerpc/include/asm/kvm_book3s_asm.h b/arch/powerpc/include/asm/kvm_book3s_asm.h index 412b2f389474..192917d2239c 100644 --- a/arch/powerpc/include/asm/kvm_book3s_asm.h +++ b/arch/powerpc/include/asm/kvm_book3s_asm.h @@ -79,6 +79,7 @@ struct kvmppc_host_state { ulong vmhandler; ulong scratch0; ulong scratch1; + ulong scratch2; u8 in_guest; u8 restore_hid5; u8 napping; diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index 2ea5cc033ec8..d3de01066f7d 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c @@ -576,6 +576,7 @@ int main(void) HSTATE_FIELD(HSTATE_VMHANDLER, vmhandler); HSTATE_FIELD(HSTATE_SCRATCH0, scratch0); HSTATE_FIELD(HSTATE_SCRATCH1, scratch1); + HSTATE_FIELD(HSTATE_SCRATCH2, scratch2); HSTATE_FIELD(HSTATE_IN_GUEST, in_guest); HSTATE_FIELD(HSTATE_RESTORE_HID5, restore_hid5); HSTATE_FIELD(HSTATE_NAPPING, napping); diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index bde28da69610..be4fa04a37c9 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -754,15 +754,14 @@ kvmppc_interrupt_hv: * guest CR, R12 saved in shadow VCPU SCRATCH1/0 * guest R13 saved in SPRN_SCRATCH0 */ - /* abuse host_r2 as third scratch area; we get r2 from PACATOC(r13) */ - std r9, HSTATE_HOST_R2(r13) + std r9, HSTATE_SCRATCH2(r13) lbz r9, HSTATE_IN_GUEST(r13) cmpwi r9, KVM_GUEST_MODE_HOST_HV beq kvmppc_bad_host_intr #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE cmpwi r9, KVM_GUEST_MODE_GUEST - ld r9, HSTATE_HOST_R2(r13) + ld r9, HSTATE_SCRATCH2(r13) beq kvmppc_interrupt_pr #endif /* We're now back in the host but in guest MMU context */ @@ -782,7 +781,7 @@ kvmppc_interrupt_hv: std r6, VCPU_GPR(R6)(r9) std r7, VCPU_GPR(R7)(r9) std r8, VCPU_GPR(R8)(r9) - ld r0, HSTATE_HOST_R2(r13) + ld r0, HSTATE_SCRATCH2(r13) std r0, VCPU_GPR(R9)(r9) std r10, VCPU_GPR(R10)(r9) std r11, VCPU_GPR(R11)(r9) From df9059bb64023da9f27e56a94a3e2b8f4b6336a9 Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Mon, 16 Dec 2013 13:31:46 +1100 Subject: [PATCH 228/312] KVM: PPC: Book3S HV: Don't drop low-order page address bits Commit caaa4c804fae ("KVM: PPC: Book3S HV: Fix physical address calculations") unfortunately resulted in some low-order address bits getting dropped in the case where the guest is creating a 4k HPTE and the host page size is 64k. By getting the low-order bits from hva rather than gpa we miss out on bits 12 - 15 in this case, since hva is at page granularity. This puts the missing bits back in. Reported-by: Alexey Kardashevskiy Signed-off-by: Paul Mackerras Signed-off-by: Alexander Graf --- arch/powerpc/kvm/book3s_hv_rm_mmu.c | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c index 1931aa341a72..8689e2e30857 100644 --- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c +++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c @@ -240,6 +240,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags, is_io = hpte_cache_bits(pte_val(pte)); pa = pte_pfn(pte) << PAGE_SHIFT; pa |= hva & (pte_size - 1); + pa |= gpa & ~PAGE_MASK; } } From 939fd1e8d9deff206f12bd9d4e54aa7a4bd0ffd6 Mon Sep 17 00:00:00 2001 From: Charles Keepax Date: Wed, 18 Dec 2013 09:25:49 +0000 Subject: [PATCH 229/312] ASoC: wm_adsp: Add small delay while polling DSP RAM start Some devices are getting very close to the limit whilst polling the RAM start, this patch adds a small delay to this loop to give a longer startup timeout. Signed-off-by: Charles Keepax Signed-off-by: Mark Brown Cc: stable@vger.kernel.org --- sound/soc/codecs/wm_adsp.c | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c index 46ec0e9744d4..4fbcab63e61f 100644 --- a/sound/soc/codecs/wm_adsp.c +++ b/sound/soc/codecs/wm_adsp.c @@ -1474,13 +1474,17 @@ static int wm_adsp2_ena(struct wm_adsp *dsp) return ret; /* Wait for the RAM to start, should be near instantaneous */ - count = 0; - do { + for (count = 0; count < 10; ++count) { ret = regmap_read(dsp->regmap, dsp->base + ADSP2_STATUS1, &val); if (ret != 0) return ret; - } while (!(val & ADSP2_RAM_RDY) && ++count < 10); + + if (val & ADSP2_RAM_RDY) + break; + + msleep(1); + } if (!(val & ADSP2_RAM_RDY)) { adsp_err(dsp, "Failed to start DSP RAM\n"); From f0199bc5e3a3ec13f9bc938556517ec430b36437 Mon Sep 17 00:00:00 2001 From: Bo Shen Date: Wed, 18 Dec 2013 11:26:23 +0800 Subject: [PATCH 230/312] ASoC: wm8904: fix DSP mode B configuration When wm8904 work in DSP mode B, we still need to configure it to work in DSP mode. Or else, it will work in Right Justified mode. Signed-off-by: Bo Shen Acked-by: Charles Keepax Signed-off-by: Mark Brown Cc: stable@vger.kernel.org --- sound/soc/codecs/wm8904.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sound/soc/codecs/wm8904.c b/sound/soc/codecs/wm8904.c index 3938fb1c203e..53bbfac6a83a 100644 --- a/sound/soc/codecs/wm8904.c +++ b/sound/soc/codecs/wm8904.c @@ -1444,7 +1444,7 @@ static int wm8904_set_fmt(struct snd_soc_dai *dai, unsigned int fmt) switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { case SND_SOC_DAIFMT_DSP_B: - aif1 |= WM8904_AIF_LRCLK_INV; + aif1 |= 0x3 | WM8904_AIF_LRCLK_INV; case SND_SOC_DAIFMT_DSP_A: aif1 |= 0x3; break; From 3a6c5d8ad0a9253aafb76df3577edcb68c09b939 Mon Sep 17 00:00:00 2001 From: Hui Wang Date: Wed, 18 Dec 2013 18:09:56 +0800 Subject: [PATCH 231/312] ALSA: hda - Add Dell headset detection quirk for one more laptop model On the Dell machines with codec whose Subsystem Id is 0x10280640, no external microphone can be detected when plugging a 3-ring headset. Using ALC255_FIXUP_DELL1_MIC_NO_PRESENCE can fix this problem. The codec (Vendor ID: 0x10ec0255) on the machine belongs to alc_269 family. BugLink: https://bugs.launchpad.net/bugs/1260303 Cc: David Henningsson Cc: stable@vger.kernel.org Signed-off-by: Hui Wang Signed-off-by: Takashi Iwai --- sound/pci/hda/patch_realtek.c | 1 + 1 file changed, 1 insertion(+) diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 5ab8e1631190..c5646941539a 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -4256,6 +4256,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1028, 0x0638, "Dell Inspiron 5439", ALC290_FIXUP_MONO_SPEAKERS), SND_PCI_QUIRK(0x1028, 0x063e, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x063f, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1028, 0x0640, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x15cc, "Dell X5 Precision", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x15cd, "Dell X5 Precision", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2), From 0baf8f6a2ac86c2c40ed0cacab8ea3d17371a1bb Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Mon, 2 Dec 2013 18:01:30 +0000 Subject: [PATCH 232/312] dma: pl330: ensure DMA descriptors are zero-initialised I see the following splat with 3.13-rc1 when attempting to perform DMA: [ 253.004516] Alignment trap: not handling instruction e1902f9f at [] [ 253.004583] Unhandled fault: alignment exception (0x221) at 0xdfdfdfd7 [ 253.004646] Internal error: : 221 [#1] PREEMPT SMP ARM [ 253.004691] Modules linked in: dmatest(+) [last unloaded: dmatest] [ 253.004798] CPU: 0 PID: 671 Comm: kthreadd Not tainted 3.13.0-rc1+ #2 [ 253.004864] task: df9b0900 ti: df03e000 task.ti: df03e000 [ 253.004937] PC is at dmaengine_unmap_put+0x14/0x34 [ 253.005010] LR is at pl330_tasklet+0x3c8/0x550 [ 253.005087] pc : [] lr : [] psr: a00e0193 [ 253.005087] sp : df03fe48 ip : 00000000 fp : df03bf18 [ 253.005178] r10: bf00e108 r9 : 00000001 r8 : 00000000 [ 253.005245] r7 : df837040 r6 : dfb41800 r5 : df837048 r4 : df837000 [ 253.005316] r3 : dfdfdfcf r2 : dfb41f80 r1 : df837048 r0 : dfdfdfd7 [ 253.005384] Flags: NzCv IRQs off FIQs on Mode SVC_32 ISA ARM Segment kernel [ 253.005459] Control: 30c5387d Table: 9fb9ba80 DAC: fffffffd [ 253.005520] Process kthreadd (pid: 671, stack limit = 0xdf03e248) This is due to desc->txd.unmap containing garbage (uninitialised memory). Rather than add another dummy initialisation to _init_desc, instead ensure that the descriptors are zero-initialised during allocation and remove the dummy, per-field initialisation. Cc: Andriy Shevchenko Acked-by: Jassi Brar Signed-off-by: Will Deacon Acked-by: Vinod Koul Signed-off-by: Dan Williams --- drivers/dma/pl330.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index cdf0483b8f2d..536632f6479c 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -2492,12 +2492,9 @@ static dma_cookie_t pl330_tx_submit(struct dma_async_tx_descriptor *tx) static inline void _init_desc(struct dma_pl330_desc *desc) { - desc->pchan = NULL; desc->req.x = &desc->px; desc->req.token = desc; desc->rqcfg.swap = SWAP_NO; - desc->rqcfg.privileged = 0; - desc->rqcfg.insnaccess = 0; desc->rqcfg.scctl = SCCTRL0; desc->rqcfg.dcctl = DCCTRL0; desc->req.cfg = &desc->rqcfg; @@ -2517,7 +2514,7 @@ static int add_desc(struct dma_pl330_dmac *pdmac, gfp_t flg, int count) if (!pdmac) return 0; - desc = kmalloc(count * sizeof(*desc), flg); + desc = kcalloc(count, sizeof(*desc), flg); if (!desc) return 0; From 77873803363c9e831fc1d1e6895c084279090c22 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 17 Dec 2013 10:09:32 -0800 Subject: [PATCH 233/312] net_dma: mark broken net_dma can cause data to be copied to a stale mapping if a copy-on-write fault occurs during dma. The application sees missing data. The following trace is triggered by modifying the kernel to WARN if it ever triggers copy-on-write on a page that is undergoing dma: WARNING: CPU: 24 PID: 2529 at lib/dma-debug.c:485 debug_dma_assert_idle+0xd2/0x120() ioatdma 0000:00:04.0: DMA-API: cpu touching an active dma mapped page [pfn=0x16bcd9] Modules linked in: iTCO_wdt iTCO_vendor_support ioatdma lpc_ich pcspkr dca CPU: 24 PID: 2529 Comm: linbug Tainted: G W 3.13.0-rc1+ #353 00000000000001e5 ffff88016f45f688 ffffffff81751041 ffff88017ab0ef70 ffff88016f45f6d8 ffff88016f45f6c8 ffffffff8104ed9c ffffffff810f3646 ffff8801768f4840 0000000000000282 ffff88016f6cca10 00007fa2bb699349 Call Trace: [] dump_stack+0x46/0x58 [] warn_slowpath_common+0x8c/0xc0 [] ? ftrace_pid_func+0x26/0x30 [] warn_slowpath_fmt+0x46/0x50 [] debug_dma_assert_idle+0xd2/0x120 [] do_wp_page+0xd0/0x790 [] handle_mm_fault+0x51c/0xde0 [] ? copy_user_enhanced_fast_string+0x9/0x20 [] __do_page_fault+0x19c/0x530 [] ? _raw_spin_lock_bh+0x16/0x40 [] ? trace_clock_local+0x9/0x10 [] ? rb_reserve_next_event+0x64/0x310 [] ? ioat2_dma_prep_memcpy_lock+0x60/0x130 [ioatdma] [] do_page_fault+0xe/0x10 [] page_fault+0x22/0x30 [] ? __kfree_skb+0x51/0xd0 [] ? copy_user_enhanced_fast_string+0x9/0x20 [] ? memcpy_toiovec+0x52/0xa0 [] skb_copy_datagram_iovec+0x5f/0x2a0 [] tcp_rcv_established+0x674/0x7f0 [] tcp_v4_do_rcv+0x2e5/0x4a0 [..] ---[ end trace e30e3b01191b7617 ]--- Mapped at: [] debug_dma_map_page+0xb9/0x160 [] dma_async_memcpy_pg_to_pg+0x127/0x210 [] dma_memcpy_pg_to_iovec+0x119/0x1f0 [] dma_skb_copy_datagram_iovec+0x11c/0x2b0 [] tcp_rcv_established+0x74a/0x7f0: ...the problem is that the receive path falls back to cpu-copy in several locations and this trace is just one of the areas. A few options were considered to fix this: 1/ sync all dma whenever a cpu copy branch is taken 2/ modify the page fault handler to hold off while dma is in-flight Option 1 adds yet more cpu overhead to an "offload" that struggles to compete with cpu-copy. Option 2 adds checks for behavior that is already documented as broken when using get_user_pages(). At a minimum a debug mode is warranted to catch and flag these violations of the dma-api vs get_user_pages(). Thanks to David for his reproducer. Cc: Cc: Dave Jiang Cc: Vinod Koul Cc: Alexander Duyck Reported-by: David Whipple Acked-by: David S. Miller Signed-off-by: Dan Williams --- drivers/dma/Kconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 132a4fd375b2..c823daaf9043 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -355,6 +355,7 @@ config NET_DMA bool "Network: TCP receive copy offload" depends on DMA_ENGINE && NET default (INTEL_IOATDMA || FSL_DMA) + depends on BROKEN help This enables the use of DMA engines in the network stack to offload receive copy-to-user operations, freeing CPU cycles. From c97102ba96324da330078ad8619ba4dfe840dbe3 Mon Sep 17 00:00:00 2001 From: Vivek Goyal Date: Wed, 18 Dec 2013 17:08:31 -0800 Subject: [PATCH 234/312] kexec: migrate to reboot cpu Commit 1b3a5d02ee07 ("reboot: move arch/x86 reboot= handling to generic kernel") moved reboot= handling to generic code. In the process it also removed the code in native_machine_shutdown() which are moving reboot process to reboot_cpu/cpu0. I guess that thought must have been that all reboot paths are calling migrate_to_reboot_cpu(), so we don't need this special handling. But kexec reboot path (kernel_kexec()) is not calling migrate_to_reboot_cpu() so above change broke kexec. Now reboot can happen on non-boot cpu and when INIT is sent in second kerneo to bring up BP, it brings down the machine. So start calling migrate_to_reboot_cpu() in kexec reboot path to avoid this problem. Bisected by WANG Chao. Reported-by: Matthew Whitehead Reported-by: Dave Young Signed-off-by: Vivek Goyal Tested-by: Baoquan He Tested-by: WANG Chao Acked-by: H. Peter Anvin Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/reboot.h | 1 + kernel/kexec.c | 1 + kernel/reboot.c | 2 +- 3 files changed, 3 insertions(+), 1 deletion(-) diff --git a/include/linux/reboot.h b/include/linux/reboot.h index 8e00f9f6f963..9e7db9e73cc1 100644 --- a/include/linux/reboot.h +++ b/include/linux/reboot.h @@ -43,6 +43,7 @@ extern int unregister_reboot_notifier(struct notifier_block *); * Architecture-specific implementations of sys_reboot commands. */ +extern void migrate_to_reboot_cpu(void); extern void machine_restart(char *cmd); extern void machine_halt(void); extern void machine_power_off(void); diff --git a/kernel/kexec.c b/kernel/kexec.c index d0d8fca54065..9c970167e402 100644 --- a/kernel/kexec.c +++ b/kernel/kexec.c @@ -1680,6 +1680,7 @@ int kernel_kexec(void) { kexec_in_progress = true; kernel_restart_prepare(NULL); + migrate_to_reboot_cpu(); printk(KERN_EMERG "Starting new kernel\n"); machine_shutdown(); } diff --git a/kernel/reboot.c b/kernel/reboot.c index f813b3474646..662c83fc16b7 100644 --- a/kernel/reboot.c +++ b/kernel/reboot.c @@ -104,7 +104,7 @@ int unregister_reboot_notifier(struct notifier_block *nb) } EXPORT_SYMBOL(unregister_reboot_notifier); -static void migrate_to_reboot_cpu(void) +void migrate_to_reboot_cpu(void) { /* The boot cpu is always logical cpu 0 */ int cpu = reboot_cpu; From 2b4847e73004c10ae6666c2e27b5c5430aed8698 Mon Sep 17 00:00:00 2001 From: Mel Gorman Date: Wed, 18 Dec 2013 17:08:32 -0800 Subject: [PATCH 235/312] mm: numa: serialise parallel get_user_page against THP migration Base pages are unmapped and flushed from cache and TLB during normal page migration and replaced with a migration entry that causes any parallel NUMA hinting fault or gup to block until migration completes. THP does not unmap pages due to a lack of support for migration entries at a PMD level. This allows races with get_user_pages and get_user_pages_fast which commit 3f926ab945b6 ("mm: Close races between THP migration and PMD numa clearing") made worse by introducing a pmd_clear_flush(). This patch forces get_user_page (fast and normal) on a pmd_numa page to go through the slow get_user_page path where it will serialise against THP migration and properly account for the NUMA hinting fault. On the migration side the page table lock is taken for each PTE update. Signed-off-by: Mel Gorman Reviewed-by: Rik van Riel Cc: Alex Thorlton Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/x86/mm/gup.c | 13 +++++++++++++ mm/huge_memory.c | 24 ++++++++++++++++-------- mm/migrate.c | 38 +++++++++++++++++++++++++++++++------- 3 files changed, 60 insertions(+), 15 deletions(-) diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c index dd74e46828c0..0596e8e0cc19 100644 --- a/arch/x86/mm/gup.c +++ b/arch/x86/mm/gup.c @@ -83,6 +83,12 @@ static noinline int gup_pte_range(pmd_t pmd, unsigned long addr, pte_t pte = gup_get_pte(ptep); struct page *page; + /* Similar to the PMD case, NUMA hinting must take slow path */ + if (pte_numa(pte)) { + pte_unmap(ptep); + return 0; + } + if ((pte_flags(pte) & (mask | _PAGE_SPECIAL)) != mask) { pte_unmap(ptep); return 0; @@ -167,6 +173,13 @@ static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end, if (pmd_none(pmd) || pmd_trans_splitting(pmd)) return 0; if (unlikely(pmd_large(pmd))) { + /* + * NUMA hinting faults need to be handled in the GUP + * slowpath for accounting purposes and so that they + * can be serialised against THP migration. + */ + if (pmd_numa(pmd)) + return 0; if (!gup_huge_pmd(pmd, addr, next, write, pages, nr)) return 0; } else { diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 33a5dc492810..51f069303ab9 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1243,6 +1243,10 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, if ((flags & FOLL_DUMP) && is_huge_zero_pmd(*pmd)) return ERR_PTR(-EFAULT); + /* Full NUMA hinting faults to serialise migration in fault paths */ + if ((flags & FOLL_NUMA) && pmd_numa(*pmd)) + goto out; + page = pmd_page(*pmd); VM_BUG_ON(!PageHead(page)); if (flags & FOLL_TOUCH) { @@ -1323,23 +1327,27 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, /* If the page was locked, there are no parallel migrations */ if (page_locked) goto clear_pmdnuma; + } - /* - * Otherwise wait for potential migrations and retry. We do - * relock and check_same as the page may no longer be mapped. - * As the fault is being retried, do not account for it. - */ + /* + * If there are potential migrations, wait for completion and retry. We + * do not relock and check_same as the page may no longer be mapped. + * Furtermore, even if the page is currently misplaced, there is no + * guarantee it is still misplaced after the migration completes. + */ + if (!page_locked) { spin_unlock(ptl); wait_on_page_locked(page); page_nid = -1; goto out; } - /* Page is misplaced, serialise migrations and parallel THP splits */ + /* + * Page is misplaced. Page lock serialises migrations. Acquire anon_vma + * to serialises splits + */ get_page(page); spin_unlock(ptl); - if (!page_locked) - lock_page(page); anon_vma = page_lock_anon_vma_read(page); /* Confirm the PMD did not change while page_table_lock was released */ diff --git a/mm/migrate.c b/mm/migrate.c index bb940045fe85..2cabbd5fa5bf 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -1722,6 +1722,7 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm, struct page *new_page = NULL; struct mem_cgroup *memcg = NULL; int page_lru = page_is_file_cache(page); + pmd_t orig_entry; /* * Rate-limit the amount of data that is being migrated to a node. @@ -1756,7 +1757,8 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm, /* Recheck the target PMD */ ptl = pmd_lock(mm, pmd); - if (unlikely(!pmd_same(*pmd, entry))) { + if (unlikely(!pmd_same(*pmd, entry) || page_count(page) != 2)) { +fail_putback: spin_unlock(ptl); /* Reverse changes made by migrate_page_copy() */ @@ -1786,16 +1788,34 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm, */ mem_cgroup_prepare_migration(page, new_page, &memcg); + orig_entry = *pmd; entry = mk_pmd(new_page, vma->vm_page_prot); - entry = pmd_mknonnuma(entry); - entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); entry = pmd_mkhuge(entry); + entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); + /* + * Clear the old entry under pagetable lock and establish the new PTE. + * Any parallel GUP will either observe the old page blocking on the + * page lock, block on the page table lock or observe the new page. + * The SetPageUptodate on the new page and page_add_new_anon_rmap + * guarantee the copy is visible before the pagetable update. + */ + flush_cache_range(vma, haddr, haddr + HPAGE_PMD_SIZE); + page_add_new_anon_rmap(new_page, vma, haddr); pmdp_clear_flush(vma, haddr, pmd); set_pmd_at(mm, haddr, pmd, entry); - page_add_new_anon_rmap(new_page, vma, haddr); update_mmu_cache_pmd(vma, address, &entry); + + if (page_count(page) != 2) { + set_pmd_at(mm, haddr, pmd, orig_entry); + flush_tlb_range(vma, haddr, haddr + HPAGE_PMD_SIZE); + update_mmu_cache_pmd(vma, address, &entry); + page_remove_rmap(new_page); + goto fail_putback; + } + page_remove_rmap(page); + /* * Finish the charge transaction under the page table lock to * prevent split_huge_page() from dividing up the charge @@ -1820,9 +1840,13 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm, out_fail: count_vm_events(PGMIGRATE_FAIL, HPAGE_PMD_NR); out_dropref: - entry = pmd_mknonnuma(entry); - set_pmd_at(mm, haddr, pmd, entry); - update_mmu_cache_pmd(vma, address, &entry); + ptl = pmd_lock(mm, pmd); + if (pmd_same(*pmd, entry)) { + entry = pmd_mknonnuma(entry); + set_pmd_at(mm, haddr, pmd, entry); + update_mmu_cache_pmd(vma, address, &entry); + } + spin_unlock(ptl); unlock_page(page); put_page(page); From f714f4f20e59ea6eea264a86b9a51fd51b88fc54 Mon Sep 17 00:00:00 2001 From: Mel Gorman Date: Wed, 18 Dec 2013 17:08:33 -0800 Subject: [PATCH 236/312] mm: numa: call MMU notifiers on THP migration MMU notifiers must be called on THP page migration or secondary MMUs will get very confused. Signed-off-by: Mel Gorman Reviewed-by: Rik van Riel Cc: Alex Thorlton Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/migrate.c | 22 ++++++++++++++-------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/mm/migrate.c b/mm/migrate.c index 2cabbd5fa5bf..be787d506fbb 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -36,6 +36,7 @@ #include #include #include +#include #include @@ -1716,12 +1717,13 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm, struct page *page, int node) { spinlock_t *ptl; - unsigned long haddr = address & HPAGE_PMD_MASK; pg_data_t *pgdat = NODE_DATA(node); int isolated = 0; struct page *new_page = NULL; struct mem_cgroup *memcg = NULL; int page_lru = page_is_file_cache(page); + unsigned long mmun_start = address & HPAGE_PMD_MASK; + unsigned long mmun_end = mmun_start + HPAGE_PMD_SIZE; pmd_t orig_entry; /* @@ -1756,10 +1758,12 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm, WARN_ON(PageLRU(new_page)); /* Recheck the target PMD */ + mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); ptl = pmd_lock(mm, pmd); if (unlikely(!pmd_same(*pmd, entry) || page_count(page) != 2)) { fail_putback: spin_unlock(ptl); + mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); /* Reverse changes made by migrate_page_copy() */ if (TestClearPageActive(new_page)) @@ -1800,15 +1804,16 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm, * The SetPageUptodate on the new page and page_add_new_anon_rmap * guarantee the copy is visible before the pagetable update. */ - flush_cache_range(vma, haddr, haddr + HPAGE_PMD_SIZE); - page_add_new_anon_rmap(new_page, vma, haddr); - pmdp_clear_flush(vma, haddr, pmd); - set_pmd_at(mm, haddr, pmd, entry); + flush_cache_range(vma, mmun_start, mmun_end); + page_add_new_anon_rmap(new_page, vma, mmun_start); + pmdp_clear_flush(vma, mmun_start, pmd); + set_pmd_at(mm, mmun_start, pmd, entry); + flush_tlb_range(vma, mmun_start, mmun_end); update_mmu_cache_pmd(vma, address, &entry); if (page_count(page) != 2) { - set_pmd_at(mm, haddr, pmd, orig_entry); - flush_tlb_range(vma, haddr, haddr + HPAGE_PMD_SIZE); + set_pmd_at(mm, mmun_start, pmd, orig_entry); + flush_tlb_range(vma, mmun_start, mmun_end); update_mmu_cache_pmd(vma, address, &entry); page_remove_rmap(new_page); goto fail_putback; @@ -1823,6 +1828,7 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm, */ mem_cgroup_end_migration(memcg, page, new_page, true); spin_unlock(ptl); + mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); unlock_page(new_page); unlock_page(page); @@ -1843,7 +1849,7 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm, ptl = pmd_lock(mm, pmd); if (pmd_same(*pmd, entry)) { entry = pmd_mknonnuma(entry); - set_pmd_at(mm, haddr, pmd, entry); + set_pmd_at(mm, mmun_start, pmd, entry); update_mmu_cache_pmd(vma, address, &entry); } spin_unlock(ptl); From 67f87463d3a3362424efcbe8b40e4772fd34fc61 Mon Sep 17 00:00:00 2001 From: Mel Gorman Date: Wed, 18 Dec 2013 17:08:34 -0800 Subject: [PATCH 237/312] mm: clear pmd_numa before invalidating On x86, PMD entries are similar to _PAGE_PROTNONE protection and are handled as NUMA hinting faults. The following two page table protection bits are what defines them _PAGE_NUMA:set _PAGE_PRESENT:clear A PMD is considered present if any of the _PAGE_PRESENT, _PAGE_PROTNONE, _PAGE_PSE or _PAGE_NUMA bits are set. If pmdp_invalidate encounters a pmd_numa, it clears the present bit leaving _PAGE_NUMA which will be considered not present by the CPU but present by pmd_present. The existing caller of pmdp_invalidate should handle it but it's an inconsistent state for a PMD. This patch keeps the state consistent when calling pmdp_invalidate. Signed-off-by: Mel Gorman Reviewed-by: Rik van Riel Cc: Alex Thorlton Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/pgtable-generic.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/mm/pgtable-generic.c b/mm/pgtable-generic.c index cbb38545d9d6..e84cad27a801 100644 --- a/mm/pgtable-generic.c +++ b/mm/pgtable-generic.c @@ -191,6 +191,9 @@ pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp) void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp) { + pmd_t entry = *pmdp; + if (pmd_numa(entry)) + entry = pmd_mknonnuma(entry); set_pmd_at(vma->vm_mm, address, pmdp, pmd_mknotpresent(*pmdp)); flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); } From 5a6dac3ec5f583cc8ee7bc53b5500a207c4ca433 Mon Sep 17 00:00:00 2001 From: Mel Gorman Date: Wed, 18 Dec 2013 17:08:36 -0800 Subject: [PATCH 238/312] mm: numa: do not clear PMD during PTE update scan If the PMD is flushed then a parallel fault in handle_mm_fault() will enter the pmd_none and do_huge_pmd_anonymous_page() path where it'll attempt to insert a huge zero page. This is wasteful so the patch avoids clearing the PMD when setting pmd_numa. Signed-off-by: Mel Gorman Reviewed-by: Rik van Riel Cc: Alex Thorlton Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/huge_memory.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 51f069303ab9..420826efda48 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1539,7 +1539,7 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, */ if (!is_huge_zero_page(page) && !pmd_numa(*pmd)) { - entry = pmdp_get_and_clear(mm, addr, pmd); + entry = *pmd; entry = pmd_mknuma(entry); ret = HPAGE_PMD_NR; } From 0c5f83c23ca703d32f930393825487257a5cde6d Mon Sep 17 00:00:00 2001 From: Mel Gorman Date: Wed, 18 Dec 2013 17:08:37 -0800 Subject: [PATCH 239/312] mm: numa: do not clear PTE for pte_numa update The TLB must be flushed if the PTE is updated but change_pte_range is clearing the PTE while marking PTEs pte_numa without necessarily flushing the TLB if it reinserts the same entry. Without the flush, it's conceivable that two processors have different TLBs for the same virtual address and at the very least it would generate spurious faults. This patch only unmaps the pages in change_pte_range for a full protection change. [riel@redhat.com: write pte_numa pte back to the page tables] Signed-off-by: Mel Gorman Signed-off-by: Rik van Riel Reviewed-by: Rik van Riel Cc: Alex Thorlton Cc: Chegu Vinod Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/mprotect.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/mm/mprotect.c b/mm/mprotect.c index 26667971c824..1291a053b167 100644 --- a/mm/mprotect.c +++ b/mm/mprotect.c @@ -52,17 +52,19 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd, pte_t ptent; bool updated = false; - ptent = ptep_modify_prot_start(mm, addr, pte); if (!prot_numa) { + ptent = ptep_modify_prot_start(mm, addr, pte); ptent = pte_modify(ptent, newprot); updated = true; } else { struct page *page; + ptent = *pte; page = vm_normal_page(vma, addr, oldpte); if (page) { if (!pte_numa(oldpte)) { ptent = pte_mknuma(ptent); + set_pte_at(mm, addr, pte, ptent); updated = true; } } @@ -79,7 +81,10 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd, if (updated) pages++; - ptep_modify_prot_commit(mm, addr, pte, ptent); + + /* Only !prot_numa always clears the pte */ + if (!prot_numa) + ptep_modify_prot_commit(mm, addr, pte, ptent); } else if (IS_ENABLED(CONFIG_MIGRATION) && !pte_file(oldpte)) { swp_entry_t entry = pte_to_swp_entry(oldpte); From c3a489cac38d43ea6dc4ac240473b44b46deecf7 Mon Sep 17 00:00:00 2001 From: Mel Gorman Date: Wed, 18 Dec 2013 17:08:38 -0800 Subject: [PATCH 240/312] mm: numa: ensure anon_vma is locked to prevent parallel THP splits The anon_vma lock prevents parallel THP splits and any associated complexity that arises when handling splits during THP migration. This patch checks if the lock was successfully acquired and bails from THP migration if it failed for any reason. Signed-off-by: Mel Gorman Reviewed-by: Rik van Riel Cc: Alex Thorlton Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/huge_memory.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 420826efda48..dbafffa5e2ee 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1359,6 +1359,13 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, goto out_unlock; } + /* Bail if we fail to protect against THP splits for any reason */ + if (unlikely(!anon_vma)) { + put_page(page); + page_nid = -1; + goto clear_pmdnuma; + } + /* * Migrate the THP to the requested node, returns with page unlocked * and pmd_numa cleared. From eb4489f69f224356193364dc2762aa009738ca7f Mon Sep 17 00:00:00 2001 From: Mel Gorman Date: Wed, 18 Dec 2013 17:08:39 -0800 Subject: [PATCH 241/312] mm: numa: avoid unnecessary work on the failure path If a PMD changes during a THP migration then migration aborts but the failure path is doing more work than is necessary. Signed-off-by: Mel Gorman Reviewed-by: Rik van Riel Cc: Alex Thorlton Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/migrate.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/mm/migrate.c b/mm/migrate.c index be787d506fbb..a987525810ae 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -1780,7 +1780,8 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm, putback_lru_page(page); mod_zone_page_state(page_zone(page), NR_ISOLATED_ANON + page_lru, -HPAGE_PMD_NR); - goto out_fail; + + goto out_unlock; } /* @@ -1854,6 +1855,7 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm, } spin_unlock(ptl); +out_unlock: unlock_page(page); put_page(page); return 0; From 3c67f474558748b604e247d92b55dfe89654c81d Mon Sep 17 00:00:00 2001 From: Mel Gorman Date: Wed, 18 Dec 2013 17:08:40 -0800 Subject: [PATCH 242/312] sched: numa: skip inaccessible VMAs Inaccessible VMA should not be trapping NUMA hint faults. Skip them. Signed-off-by: Mel Gorman Reviewed-by: Rik van Riel Cc: Alex Thorlton Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/sched/fair.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 9030da7bcb15..c7395d97e4cb 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -1738,6 +1738,13 @@ void task_numa_work(struct callback_head *work) (vma->vm_file && (vma->vm_flags & (VM_READ|VM_WRITE)) == (VM_READ))) continue; + /* + * Skip inaccessible VMAs to avoid any confusion between + * PROT_NONE and NUMA hinting ptes + */ + if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))) + continue; + do { start = max(start, vma->vm_start); end = ALIGN(start + (pages << PAGE_SHIFT), HPAGE_SIZE); From 1667918b6483b12a6496bf54151b827b8235d7b1 Mon Sep 17 00:00:00 2001 From: Mel Gorman Date: Wed, 18 Dec 2013 17:08:41 -0800 Subject: [PATCH 243/312] mm: numa: clear numa hinting information on mprotect On a protection change it is no longer clear if the page should be still accessible. This patch clears the NUMA hinting fault bits on a protection change. Signed-off-by: Mel Gorman Reviewed-by: Rik van Riel Cc: Alex Thorlton Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/huge_memory.c | 2 ++ mm/mprotect.c | 2 ++ 2 files changed, 4 insertions(+) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index dbafffa5e2ee..70e7429fd8ea 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1532,6 +1532,8 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, ret = 1; if (!prot_numa) { entry = pmdp_get_and_clear(mm, addr, pmd); + if (pmd_numa(entry)) + entry = pmd_mknonnuma(entry); entry = pmd_modify(entry, newprot); ret = HPAGE_PMD_NR; BUG_ON(pmd_write(entry)); diff --git a/mm/mprotect.c b/mm/mprotect.c index 1291a053b167..f8421722acb9 100644 --- a/mm/mprotect.c +++ b/mm/mprotect.c @@ -54,6 +54,8 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd, if (!prot_numa) { ptent = ptep_modify_prot_start(mm, addr, pte); + if (pte_numa(ptent)) + ptent = pte_mknonnuma(ptent); ptent = pte_modify(ptent, newprot); updated = true; } else { From de466bd628e8d663fdf3f791bc8db318ee85c714 Mon Sep 17 00:00:00 2001 From: Mel Gorman Date: Wed, 18 Dec 2013 17:08:42 -0800 Subject: [PATCH 244/312] mm: numa: avoid unnecessary disruption of NUMA hinting during migration do_huge_pmd_numa_page() handles the case where there is parallel THP migration. However, by the time it is checked the NUMA hinting information has already been disrupted. This patch adds an earlier check with some helpers. Signed-off-by: Mel Gorman Reviewed-by: Rik van Riel Cc: Alex Thorlton Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/migrate.h | 9 +++++++++ mm/huge_memory.c | 22 ++++++++++++++++------ mm/migrate.c | 12 ++++++++++++ 3 files changed, 37 insertions(+), 6 deletions(-) diff --git a/include/linux/migrate.h b/include/linux/migrate.h index f5096b58b20d..b7717d74da7f 100644 --- a/include/linux/migrate.h +++ b/include/linux/migrate.h @@ -90,10 +90,19 @@ static inline int migrate_huge_page_move_mapping(struct address_space *mapping, #endif /* CONFIG_MIGRATION */ #ifdef CONFIG_NUMA_BALANCING +extern bool pmd_trans_migrating(pmd_t pmd); +extern void wait_migrate_huge_page(struct anon_vma *anon_vma, pmd_t *pmd); extern int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma, int node); extern bool migrate_ratelimited(int node); #else +static inline bool pmd_trans_migrating(pmd_t pmd) +{ + return false; +} +static inline void wait_migrate_huge_page(struct anon_vma *anon_vma, pmd_t *pmd) +{ +} static inline int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma, int node) { diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 70e7429fd8ea..7de1bf85f683 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -882,6 +882,10 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, ret = 0; goto out_unlock; } + + /* mmap_sem prevents this happening but warn if that changes */ + WARN_ON(pmd_trans_migrating(pmd)); + if (unlikely(pmd_trans_splitting(pmd))) { /* split huge page running from under us */ spin_unlock(src_ptl); @@ -1299,6 +1303,17 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, if (unlikely(!pmd_same(pmd, *pmdp))) goto out_unlock; + /* + * If there are potential migrations, wait for completion and retry + * without disrupting NUMA hinting information. Do not relock and + * check_same as the page may no longer be mapped. + */ + if (unlikely(pmd_trans_migrating(*pmdp))) { + spin_unlock(ptl); + wait_migrate_huge_page(vma->anon_vma, pmdp); + goto out; + } + page = pmd_page(pmd); BUG_ON(is_huge_zero_page(page)); page_nid = page_to_nid(page); @@ -1329,12 +1344,7 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, goto clear_pmdnuma; } - /* - * If there are potential migrations, wait for completion and retry. We - * do not relock and check_same as the page may no longer be mapped. - * Furtermore, even if the page is currently misplaced, there is no - * guarantee it is still misplaced after the migration completes. - */ + /* Migration could have started since the pmd_trans_migrating check */ if (!page_locked) { spin_unlock(ptl); wait_on_page_locked(page); diff --git a/mm/migrate.c b/mm/migrate.c index a987525810ae..cfb419085261 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -1655,6 +1655,18 @@ int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page) return 1; } +bool pmd_trans_migrating(pmd_t pmd) +{ + struct page *page = pmd_page(pmd); + return PageLocked(page); +} + +void wait_migrate_huge_page(struct anon_vma *anon_vma, pmd_t *pmd) +{ + struct page *page = pmd_page(*pmd); + wait_on_page_locked(page); +} + /* * Attempt to migrate a misplaced page to the specified destination * node. Caller is expected to have an elevated reference count on From 20841405940e7be0617612d521e206e4b6b325db Mon Sep 17 00:00:00 2001 From: Rik van Riel Date: Wed, 18 Dec 2013 17:08:44 -0800 Subject: [PATCH 245/312] mm: fix TLB flush race between migration, and change_protection_range There are a few subtle races, between change_protection_range (used by mprotect and change_prot_numa) on one side, and NUMA page migration and compaction on the other side. The basic race is that there is a time window between when the PTE gets made non-present (PROT_NONE or NUMA), and the TLB is flushed. During that time, a CPU may continue writing to the page. This is fine most of the time, however compaction or the NUMA migration code may come in, and migrate the page away. When that happens, the CPU may continue writing, through the cached translation, to what is no longer the current memory location of the process. This only affects x86, which has a somewhat optimistic pte_accessible. All other architectures appear to be safe, and will either always flush, or flush whenever there is a valid mapping, even with no permissions (SPARC). The basic race looks like this: CPU A CPU B CPU C load TLB entry make entry PTE/PMD_NUMA fault on entry read/write old page start migrating page change PTE/PMD to new page read/write old page [*] flush TLB reload TLB from new entry read/write new page lose data [*] the old page may belong to a new user at this point! The obvious fix is to flush remote TLB entries, by making sure that pte_accessible aware of the fact that PROT_NONE and PROT_NUMA memory may still be accessible if there is a TLB flush pending for the mm. This should fix both NUMA migration and compaction. [mgorman@suse.de: fix build] Signed-off-by: Rik van Riel Signed-off-by: Mel Gorman Cc: Alex Thorlton Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/sparc/include/asm/pgtable_64.h | 4 +-- arch/x86/include/asm/pgtable.h | 11 ++++++-- include/asm-generic/pgtable.h | 2 +- include/linux/mm_types.h | 44 +++++++++++++++++++++++++++++ kernel/fork.c | 1 + mm/huge_memory.c | 7 +++++ mm/mprotect.c | 2 ++ mm/pgtable-generic.c | 5 ++-- 8 files changed, 69 insertions(+), 7 deletions(-) diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h index 8358dc144959..0f9e94537eee 100644 --- a/arch/sparc/include/asm/pgtable_64.h +++ b/arch/sparc/include/asm/pgtable_64.h @@ -619,7 +619,7 @@ static inline unsigned long pte_present(pte_t pte) } #define pte_accessible pte_accessible -static inline unsigned long pte_accessible(pte_t a) +static inline unsigned long pte_accessible(struct mm_struct *mm, pte_t a) { return pte_val(a) & _PAGE_VALID; } @@ -847,7 +847,7 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, * SUN4V NOTE: _PAGE_VALID is the same value in both the SUN4U * and SUN4V pte layout, so this inline test is fine. */ - if (likely(mm != &init_mm) && pte_accessible(orig)) + if (likely(mm != &init_mm) && pte_accessible(mm, orig)) tlb_batch_add(mm, addr, ptep, orig, fullmm); } diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index 3d1999458709..bbc8b12fa443 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h @@ -452,9 +452,16 @@ static inline int pte_present(pte_t a) } #define pte_accessible pte_accessible -static inline int pte_accessible(pte_t a) +static inline bool pte_accessible(struct mm_struct *mm, pte_t a) { - return pte_flags(a) & _PAGE_PRESENT; + if (pte_flags(a) & _PAGE_PRESENT) + return true; + + if ((pte_flags(a) & (_PAGE_PROTNONE | _PAGE_NUMA)) && + mm_tlb_flush_pending(mm)) + return true; + + return false; } static inline int pte_hidden(pte_t pte) diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h index f330d28e4d0e..b12079afbd5f 100644 --- a/include/asm-generic/pgtable.h +++ b/include/asm-generic/pgtable.h @@ -217,7 +217,7 @@ static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b) #endif #ifndef pte_accessible -# define pte_accessible(pte) ((void)(pte),1) +# define pte_accessible(mm, pte) ((void)(pte), 1) #endif #ifndef flush_tlb_fix_spurious_fault diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index bd299418a934..e5c49c30460f 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -442,6 +442,14 @@ struct mm_struct { /* numa_scan_seq prevents two threads setting pte_numa */ int numa_scan_seq; +#endif +#if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION) + /* + * An operation with batched TLB flushing is going on. Anything that + * can move process memory needs to flush the TLB when moving a + * PROT_NONE or PROT_NUMA mapped page. + */ + bool tlb_flush_pending; #endif struct uprobes_state uprobes_state; }; @@ -459,4 +467,40 @@ static inline cpumask_t *mm_cpumask(struct mm_struct *mm) return mm->cpu_vm_mask_var; } +#if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION) +/* + * Memory barriers to keep this state in sync are graciously provided by + * the page table locks, outside of which no page table modifications happen. + * The barriers below prevent the compiler from re-ordering the instructions + * around the memory barriers that are already present in the code. + */ +static inline bool mm_tlb_flush_pending(struct mm_struct *mm) +{ + barrier(); + return mm->tlb_flush_pending; +} +static inline void set_tlb_flush_pending(struct mm_struct *mm) +{ + mm->tlb_flush_pending = true; + barrier(); +} +/* Clearing is done after a TLB flush, which also provides a barrier. */ +static inline void clear_tlb_flush_pending(struct mm_struct *mm) +{ + barrier(); + mm->tlb_flush_pending = false; +} +#else +static inline bool mm_tlb_flush_pending(struct mm_struct *mm) +{ + return false; +} +static inline void set_tlb_flush_pending(struct mm_struct *mm) +{ +} +static inline void clear_tlb_flush_pending(struct mm_struct *mm) +{ +} +#endif + #endif /* _LINUX_MM_TYPES_H */ diff --git a/kernel/fork.c b/kernel/fork.c index 728d5be9548c..5721f0e3f2da 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -537,6 +537,7 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p) spin_lock_init(&mm->page_table_lock); mm_init_aio(mm); mm_init_owner(mm, p); + clear_tlb_flush_pending(mm); if (likely(!mm_alloc_pgd(mm))) { mm->def_flags = 0; diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 7de1bf85f683..3d2783e10596 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1376,6 +1376,13 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, goto clear_pmdnuma; } + /* + * The page_table_lock above provides a memory barrier + * with change_protection_range. + */ + if (mm_tlb_flush_pending(mm)) + flush_tlb_range(vma, haddr, haddr + HPAGE_PMD_SIZE); + /* * Migrate the THP to the requested node, returns with page unlocked * and pmd_numa cleared. diff --git a/mm/mprotect.c b/mm/mprotect.c index f8421722acb9..bb53a6591aea 100644 --- a/mm/mprotect.c +++ b/mm/mprotect.c @@ -188,6 +188,7 @@ static unsigned long change_protection_range(struct vm_area_struct *vma, BUG_ON(addr >= end); pgd = pgd_offset(mm, addr); flush_cache_range(vma, addr, end); + set_tlb_flush_pending(mm); do { next = pgd_addr_end(addr, end); if (pgd_none_or_clear_bad(pgd)) @@ -199,6 +200,7 @@ static unsigned long change_protection_range(struct vm_area_struct *vma, /* Only flush the TLB if we actually modified any entries: */ if (pages) flush_tlb_range(vma, start, end); + clear_tlb_flush_pending(mm); return pages; } diff --git a/mm/pgtable-generic.c b/mm/pgtable-generic.c index e84cad27a801..a8b919925934 100644 --- a/mm/pgtable-generic.c +++ b/mm/pgtable-generic.c @@ -110,9 +110,10 @@ int pmdp_clear_flush_young(struct vm_area_struct *vma, pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) { + struct mm_struct *mm = (vma)->vm_mm; pte_t pte; - pte = ptep_get_and_clear((vma)->vm_mm, address, ptep); - if (pte_accessible(pte)) + pte = ptep_get_and_clear(mm, address, ptep); + if (pte_accessible(mm, pte)) flush_tlb_page(vma, address); return pte; } From af2c1401e6f9177483be4fad876d0073669df9df Mon Sep 17 00:00:00 2001 From: Mel Gorman Date: Wed, 18 Dec 2013 17:08:45 -0800 Subject: [PATCH 246/312] mm: numa: guarantee that tlb_flush_pending updates are visible before page table updates According to documentation on barriers, stores issued before a LOCK can complete after the lock implying that it's possible tlb_flush_pending can be visible after a page table update. As per revised documentation, this patch adds a smp_mb__before_spinlock to guarantee the correct ordering. Signed-off-by: Mel Gorman Acked-by: Paul E. McKenney Reviewed-by: Rik van Riel Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mm_types.h | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index e5c49c30460f..ad0616f2fe2c 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -482,7 +482,12 @@ static inline bool mm_tlb_flush_pending(struct mm_struct *mm) static inline void set_tlb_flush_pending(struct mm_struct *mm) { mm->tlb_flush_pending = true; - barrier(); + + /* + * Guarantee that the tlb_flush_pending store does not leak into the + * critical section updating the page tables + */ + smp_mb__before_spinlock(); } /* Clearing is done after a TLB flush, which also provides a barrier. */ static inline void clear_tlb_flush_pending(struct mm_struct *mm) From b0943d61b8fa420180f92f64ef67662b4f6cc493 Mon Sep 17 00:00:00 2001 From: Mel Gorman Date: Wed, 18 Dec 2013 17:08:46 -0800 Subject: [PATCH 247/312] mm: numa: defer TLB flush for THP migration as long as possible THP migration can fail for a variety of reasons. Avoid flushing the TLB to deal with THP migration races until the copy is ready to start. Signed-off-by: Mel Gorman Reviewed-by: Rik van Riel Cc: Alex Thorlton Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/huge_memory.c | 7 ------- mm/migrate.c | 3 +++ 2 files changed, 3 insertions(+), 7 deletions(-) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 3d2783e10596..7de1bf85f683 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1376,13 +1376,6 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, goto clear_pmdnuma; } - /* - * The page_table_lock above provides a memory barrier - * with change_protection_range. - */ - if (mm_tlb_flush_pending(mm)) - flush_tlb_range(vma, haddr, haddr + HPAGE_PMD_SIZE); - /* * Migrate the THP to the requested node, returns with page unlocked * and pmd_numa cleared. diff --git a/mm/migrate.c b/mm/migrate.c index cfb419085261..e9b710201335 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -1759,6 +1759,9 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm, goto out_fail; } + if (mm_tlb_flush_pending(mm)) + flush_tlb_range(vma, mmun_start, mmun_end); + /* Prepare a page as a migration target */ __set_page_locked(new_page); SetPageSwapBacked(new_page); From 73f038b863dfe98acabc7c36c17342b84ad52e94 Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Wed, 18 Dec 2013 17:08:47 -0800 Subject: [PATCH 248/312] mm: page_alloc: exclude unreclaimable allocations from zone fairness policy Dave Hansen noted a regression in a microbenchmark that loops around open() and close() on an 8-node NUMA machine and bisected it down to commit 81c0a2bb515f ("mm: page_alloc: fair zone allocator policy"). That change forces the slab allocations of the file descriptor to spread out to all 8 nodes, causing remote references in the page allocator and slab. The round-robin policy is only there to provide fairness among memory allocations that are reclaimed involuntarily based on pressure in each zone. It does not make sense to apply it to unreclaimable kernel allocations that are freed manually, in this case instantly after the allocation, and incur the remote reference costs twice for no reason. Only round-robin allocations that are usually freed through page reclaim or slab shrinking. Bisected by Dave Hansen. Signed-off-by: Johannes Weiner Cc: Dave Hansen Cc: Mel Gorman Reviewed-by: Rik van Riel Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/page_alloc.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 580a5f075ed0..f861d0257e90 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1920,7 +1920,8 @@ get_page_from_freelist(gfp_t gfp_mask, nodemask_t *nodemask, unsigned int order, * back to remote zones that do not partake in the * fairness round-robin cycle of this zonelist. */ - if (alloc_flags & ALLOC_WMARK_LOW) { + if ((alloc_flags & ALLOC_WMARK_LOW) && + (gfp_mask & GFP_MOVABLE_MASK)) { if (zone_page_state(zone, NR_ALLOC_BATCH) <= 0) continue; if (zone_reclaim_mode && From 84ed8a99058e61567f495cc43118344261641c5f Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Wed, 18 Dec 2013 17:08:48 -0800 Subject: [PATCH 249/312] sh: always link in helper functions extracted from libgcc E.g. landisk_defconfig, which has CONFIG_NTFS_FS=m: ERROR: "__ashrdi3" [fs/ntfs/ntfs.ko] undefined! For "lib-y", if no symbols in a compilation unit are referenced by other units, the compilation unit will not be included in vmlinux. This breaks modules that do reference those symbols. Use "obj-y" instead to fix this. http://kisskb.ellerman.id.au/kisskb/buildresult/8838077/ This doesn't fix all cases. There are others, e.g. udivsi3. This is also not limited to sh, many architectures handle this in the same way. A simple solution is to unconditionally include all helper functions. A more complex solution is to make the choice of "lib-y" or "obj-y" depend on CONFIG_MODULES: obj-$(CONFIG_MODULES) += ... lib-y($CONFIG_MODULES) += ... Signed-off-by: Geert Uytterhoeven Cc: Paul Mundt Tested-by: Nobuhiro Iwamatsu Reviewed-by: Nobuhiro Iwamatsu Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/sh/lib/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/sh/lib/Makefile b/arch/sh/lib/Makefile index 7b95f29e3174..3baff31e58cf 100644 --- a/arch/sh/lib/Makefile +++ b/arch/sh/lib/Makefile @@ -6,7 +6,7 @@ lib-y = delay.o memmove.o memchr.o \ checksum.o strlen.o div64.o div64-generic.o # Extracted from libgcc -lib-y += movmem.o ashldi3.o ashrdi3.o lshrdi3.o \ +obj-y += movmem.o ashldi3.o ashrdi3.o lshrdi3.o \ ashlsi3.o ashrsi3.o ashiftrt.o lshrsi3.o \ udiv_qrnnd.o From a844f38671d45ee6d981cfe41c5c4c2334578d73 Mon Sep 17 00:00:00 2001 From: Sima Baymani Date: Wed, 18 Dec 2013 17:08:49 -0800 Subject: [PATCH 250/312] mm: add missing dependency in Kconfig Eliminate the following (rand)config warning by adding missing PROC_FS dependency: warning: (HWPOISON_INJECT && MEM_SOFT_DIRTY) selects PROC_PAGE_MONITOR which has unmet direct dependencies (PROC_FS && MMU) Signed-off-by: Sima Baymani Suggested-by: David Rientjes Acked-by: David Rientjes Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/Kconfig b/mm/Kconfig index eb69f352401d..723bbe04a0b0 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -543,7 +543,7 @@ config ZSWAP config MEM_SOFT_DIRTY bool "Track memory changes" - depends on CHECKPOINT_RESTORE && HAVE_ARCH_SOFT_DIRTY + depends on CHECKPOINT_RESTORE && HAVE_ARCH_SOFT_DIRTY && PROC_FS select PROC_PAGE_MONITOR help This option enables memory changes tracking by introducing a From b0e5fd7359f1ce8db4ccb862b3aa80d2f2cbf4d0 Mon Sep 17 00:00:00 2001 From: Joonsoo Kim Date: Wed, 18 Dec 2013 17:08:51 -0800 Subject: [PATCH 251/312] mm/mempolicy: correct putback method for isolate pages if failed queue_pages_range() isolates hugetlbfs pages and putback_lru_pages() can't handle these. We should change it to putback_movable_pages(). Naoya said that it is worth going into stable, because it can break in-use hugepage list. Signed-off-by: Joonsoo Kim Acked-by: Rafael Aquini Reviewed-by: Naoya Horiguchi Reviewed-by: Wanpeng Li Cc: Christoph Lameter Cc: Vlastimil Babka Cc: Wanpeng Li Cc: Mel Gorman Cc: Rik van Riel Cc: Vlastimil Babka Cc: Zhang Yanfei Cc: [3.12.x] Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/mempolicy.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/mempolicy.c b/mm/mempolicy.c index eca4a3129129..6d04d372be29 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -1318,7 +1318,7 @@ static long do_mbind(unsigned long start, unsigned long len, if (nr_failed && (flags & MPOL_MF_STRICT)) err = -EIO; } else - putback_lru_pages(&pagelist); + putback_movable_pages(&pagelist); up_write(&mm->mmap_sem); mpol_out: From 6815bf3f233e0b10c99a758497d5d236063b010b Mon Sep 17 00:00:00 2001 From: Joonsoo Kim Date: Wed, 18 Dec 2013 17:08:52 -0800 Subject: [PATCH 252/312] mm/compaction: respect ignore_skip_hint in update_pageblock_skip update_pageblock_skip() only fits to compaction which tries to isolate by pageblock unit. If isolate_migratepages_range() is called by CMA, it try to isolate regardless of pageblock unit and it don't reference get_pageblock_skip() by ignore_skip_hint. We should also respect it on update_pageblock_skip() to prevent from setting the wrong information. Signed-off-by: Joonsoo Kim Acked-by: Vlastimil Babka Reviewed-by: Naoya Horiguchi Reviewed-by: Wanpeng Li Cc: Christoph Lameter Cc: Rafael Aquini Cc: Vlastimil Babka Cc: Wanpeng Li Cc: Mel Gorman Cc: Rik van Riel Cc: Zhang Yanfei Cc: [3.7+] Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/compaction.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/mm/compaction.c b/mm/compaction.c index 805165bcd3dd..f58bcd016f43 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -134,6 +134,10 @@ static void update_pageblock_skip(struct compact_control *cc, bool migrate_scanner) { struct zone *zone = cc->zone; + + if (cc->ignore_skip_hint) + return; + if (!page) return; From a49ecbcd7b0d5a1cda7d60e03df402dd0ef76ac8 Mon Sep 17 00:00:00 2001 From: Jianguo Wu Date: Wed, 18 Dec 2013 17:08:54 -0800 Subject: [PATCH 253/312] mm/memory-failure.c: recheck PageHuge() after hugetlb page migrate successfully After a successful hugetlb page migration by soft offline, the source page will either be freed into hugepage_freelists or buddy(over-commit page). If page is in buddy, page_hstate(page) will be NULL. It will hit a NULL pointer dereference in dequeue_hwpoisoned_huge_page(). BUG: unable to handle kernel NULL pointer dereference at 0000000000000058 IP: [] dequeue_hwpoisoned_huge_page+0x131/0x1d0 PGD c23762067 PUD c24be2067 PMD 0 Oops: 0000 [#1] SMP So check PageHuge(page) after call migrate_pages() successfully. Signed-off-by: Jianguo Wu Tested-by: Naoya Horiguchi Reviewed-by: Naoya Horiguchi Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memory-failure.c | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/mm/memory-failure.c b/mm/memory-failure.c index b7c171602ba1..db08af92c6fc 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -1505,10 +1505,16 @@ static int soft_offline_huge_page(struct page *page, int flags) if (ret > 0) ret = -EIO; } else { - set_page_hwpoison_huge_page(hpage); - dequeue_hwpoisoned_huge_page(hpage); - atomic_long_add(1 << compound_order(hpage), - &num_poisoned_pages); + /* overcommit hugetlb page will be freed to buddy */ + if (PageHuge(page)) { + set_page_hwpoison_huge_page(hpage); + dequeue_hwpoisoned_huge_page(hpage); + atomic_long_add(1 << compound_order(hpage), + &num_poisoned_pages); + } else { + SetPageHWPoison(page); + atomic_long_inc(&num_poisoned_pages); + } } return ret; } From 584ec9794293ff69f597f5bec23296e62e94e857 Mon Sep 17 00:00:00 2001 From: Davidlohr Bueso Date: Wed, 18 Dec 2013 17:08:55 -0800 Subject: [PATCH 254/312] MAINTAINERS: add Davidlohr as GPT maintainer Add a new entry for the GPT standard. Any future changes will now be routed through linux-efi. Signed-off-by: Davidlohr Bueso Acked-by: Matt Fleming Cc: Jens Axboe Acked-by: Matt Domsch Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- MAINTAINERS | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/MAINTAINERS b/MAINTAINERS index 75e3db96dec5..3c99c2556405 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -3833,6 +3833,12 @@ T: git git://linuxtv.org/media_tree.git S: Maintained F: drivers/media/usb/gspca/ +GUID PARTITION TABLE (GPT) +M: Davidlohr Bueso +L: linux-efi@vger.kernel.org +S: Maintained +F: block/partitions/efi.* + STK1160 USB VIDEO CAPTURE DRIVER M: Ezequiel Garcia L: linux-media@vger.kernel.org From 11c731e81bb0d8d2e835447a2dd645b34bb74706 Mon Sep 17 00:00:00 2001 From: Wanpeng Li Date: Wed, 18 Dec 2013 17:08:56 -0800 Subject: [PATCH 255/312] mm/mempolicy: fix !vma in new_vma_page() BUG_ON(!vma) assumption is introduced by commit 0bf598d863e3 ("mbind: add BUG_ON(!vma) in new_vma_page()"), however, even if address = __vma_address(page, vma); and vma->start < address < vma->end page_address_in_vma() may still return -EFAULT because of many other conditions in it. As a result the while loop in new_vma_page() may end with vma=NULL. This patch revert the commit and also fix the potential dereference NULL pointer reported by Dan. http://marc.info/?l=linux-mm&m=137689530323257&w=2 kernel BUG at mm/mempolicy.c:1204! invalid opcode: 0000 [#1] PREEMPT SMP DEBUG_PAGEALLOC CPU: 3 PID: 7056 Comm: trinity-child3 Not tainted 3.13.0-rc3+ #2 task: ffff8801ca5295d0 ti: ffff88005ab20000 task.ti: ffff88005ab20000 RIP: new_vma_page+0x70/0x90 RSP: 0000:ffff88005ab21db0 EFLAGS: 00010246 RAX: fffffffffffffff2 RBX: 0000000000000000 RCX: 0000000000000000 RDX: 0000000008040075 RSI: ffff8801c3d74600 RDI: ffffea00079a8b80 RBP: ffff88005ab21dc8 R08: 0000000000000004 R09: 0000000000000000 R10: 0000000000000000 R11: 0000000000000000 R12: fffffffffffffff2 R13: ffffea00079a8b80 R14: 0000000000400000 R15: 0000000000400000 FS: 00007ff49c6f4740(0000) GS:ffff880244e00000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 00007ff49c68f994 CR3: 000000005a205000 CR4: 00000000001407e0 DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 Stack: ffffea00079a8b80 ffffea00079a8bc0 ffffea00079a8ba0 ffff88005ab21e50 ffffffff811adc7a 0000000000000000 ffff8801ca5295d0 0000000464e224f8 0000000000000000 0000000000000002 0000000000000000 ffff88020ce75c00 Call Trace: migrate_pages+0x12a/0x850 SYSC_mbind+0x513/0x6a0 SyS_mbind+0xe/0x10 ia32_do_call+0x13/0x13 Code: 85 c0 75 2f 4c 89 e1 48 89 da 31 f6 bf da 00 02 00 65 44 8b 04 25 08 f7 1c 00 e8 ec fd ff ff 5b 41 5c 41 5d 5d c3 0f 1f 44 00 00 <0f> 0b 66 0f 1f 44 00 00 4c 89 e6 48 89 df ba 01 00 00 00 e8 48 RIP [] new_vma_page+0x70/0x90 RSP Signed-off-by: Wanpeng Li Reported-by: Dave Jones Reported-by: Sasha Levin Reviewed-by: Naoya Horiguchi Reviewed-by: Bob Liu Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/mempolicy.c | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 6d04d372be29..0cd2c4d4e270 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -1197,14 +1197,16 @@ static struct page *new_vma_page(struct page *page, unsigned long private, int * break; vma = vma->vm_next; } - /* - * queue_pages_range() confirms that @page belongs to some vma, - * so vma shouldn't be NULL. - */ - BUG_ON(!vma); - if (PageHuge(page)) - return alloc_huge_page_noerr(vma, address, 1); + if (PageHuge(page)) { + if (vma) + return alloc_huge_page_noerr(vma, address, 1); + else + return NULL; + } + /* + * if !vma, alloc_page_vma() will use task or system default policy + */ return alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address); } #else From 7ac181568342ec618d4da1ab03c2babcaa7ee84f Mon Sep 17 00:00:00 2001 From: Jan Beulich Date: Wed, 18 Dec 2013 17:08:57 -0800 Subject: [PATCH 256/312] fix build with make 3.80 According to Documentation/Changes, make 3.80 is still being supported for building the kernel, hence make files must not make (unconditional) use of features introduced only in newer versions. Commit 1bf49dd4be0b ("./Makefile: export initial ramdisk compression config option") however introduced "else ifeq" constructs which make 3.80 doesn't understand. Replace the logic there with more conventional (in the kernel build infrastructure) list constructs (except that the list here is intentionally limited to exactly one element). Signed-off-by: Jan Beulich Cc: P J P Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- Makefile | 20 +++++++------------- 1 file changed, 7 insertions(+), 13 deletions(-) diff --git a/Makefile b/Makefile index 858a147fd836..89cee8625651 100644 --- a/Makefile +++ b/Makefile @@ -732,19 +732,13 @@ export mod_strip_cmd # Select initial ramdisk compression format, default is gzip(1). # This shall be used by the dracut(8) tool while creating an initramfs image. # -INITRD_COMPRESS=gzip -ifeq ($(CONFIG_RD_BZIP2), y) - INITRD_COMPRESS=bzip2 -else ifeq ($(CONFIG_RD_LZMA), y) - INITRD_COMPRESS=lzma -else ifeq ($(CONFIG_RD_XZ), y) - INITRD_COMPRESS=xz -else ifeq ($(CONFIG_RD_LZO), y) - INITRD_COMPRESS=lzo -else ifeq ($(CONFIG_RD_LZ4), y) - INITRD_COMPRESS=lz4 -endif -export INITRD_COMPRESS +INITRD_COMPRESS-y := gzip +INITRD_COMPRESS-$(CONFIG_RD_BZIP2) := bzip2 +INITRD_COMPRESS-$(CONFIG_RD_LZMA) := lzma +INITRD_COMPRESS-$(CONFIG_RD_XZ) := xz +INITRD_COMPRESS-$(CONFIG_RD_LZO) := lzo +INITRD_COMPRESS-$(CONFIG_RD_LZ4) := lz4 +export INITRD_COMPRESS := $(INITRD_COMPRESS-y) ifdef CONFIG_MODULE_SIG_ALL MODSECKEY = ./signing_key.priv From 98398c32f6687ee1e1f3ae084effb4b75adb0747 Mon Sep 17 00:00:00 2001 From: Jianguo Wu Date: Wed, 18 Dec 2013 17:08:59 -0800 Subject: [PATCH 257/312] mm/hugetlb: check for pte NULL pointer in __page_check_address() In __page_check_address(), if address's pud is not present, huge_pte_offset() will return NULL, we should check the return value. Signed-off-by: Jianguo Wu Cc: Naoya Horiguchi Cc: Mel Gorman Cc: qiuxishi Cc: Hanjun Guo Acked-by: Kirill A. Shutemov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/rmap.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/mm/rmap.c b/mm/rmap.c index 55c8b8dc9ffb..068522d8502a 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -600,7 +600,11 @@ pte_t *__page_check_address(struct page *page, struct mm_struct *mm, spinlock_t *ptl; if (unlikely(PageHuge(page))) { + /* when pud is not present, pte will be NULL */ pte = huge_pte_offset(mm, address); + if (!pte) + return NULL; + ptl = huge_pte_lockptr(page_hstate(page), mm, pte); goto check; } From 9fb444f22f09cfad3798e3610f7dc62f8a385ee8 Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Wed, 11 Dec 2013 03:48:15 +0100 Subject: [PATCH 258/312] ARM: shmobile: armadillo: Add PWM backlight power supply Commit 22ceeee16eb8f0d04de3ef43a5174fb30ec18af9 ("pwm-backlight: Add power supply support") added a mandatory power supply for the PWM backlight. Add a fixed 5V regulator to board code with a consumer supply entry for the backlight device. Signed-off-by: Laurent Pinchart Signed-off-by: Simon Horman (cherry picked from commit ad11cb9a5cf96346f1240995c672cdbb5501785c) Signed-off-by: Simon Horman --- arch/arm/mach-shmobile/board-armadillo800eva.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/arch/arm/mach-shmobile/board-armadillo800eva.c b/arch/arm/mach-shmobile/board-armadillo800eva.c index 958e3cbf0ac2..c18689123023 100644 --- a/arch/arm/mach-shmobile/board-armadillo800eva.c +++ b/arch/arm/mach-shmobile/board-armadillo800eva.c @@ -614,6 +614,11 @@ static struct regulator_consumer_supply fixed3v3_power_consumers[] = { REGULATOR_SUPPLY("vqmmc", "sh_mmcif"), }; +/* Fixed 3.3V regulator used by LCD backlight */ +static struct regulator_consumer_supply fixed5v0_power_consumers[] = { + REGULATOR_SUPPLY("power", "pwm-backlight.0"), +}; + /* Fixed 3.3V regulator to be used by SDHI0 */ static struct regulator_consumer_supply vcc_sdhi0_consumers[] = { REGULATOR_SUPPLY("vmmc", "sh_mobile_sdhi.0"), @@ -1196,6 +1201,8 @@ static void __init eva_init(void) regulator_register_always_on(0, "fixed-3.3V", fixed3v3_power_consumers, ARRAY_SIZE(fixed3v3_power_consumers), 3300000); + regulator_register_always_on(3, "fixed-5.0V", fixed5v0_power_consumers, + ARRAY_SIZE(fixed5v0_power_consumers), 5000000); pinctrl_register_mappings(eva_pinctrl_map, ARRAY_SIZE(eva_pinctrl_map)); pwm_add_table(pwm_lookup, ARRAY_SIZE(pwm_lookup)); From db6077fd0b7dd41dc6ff18329cec979379071f87 Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Wed, 11 Dec 2013 15:45:32 -0800 Subject: [PATCH 259/312] iscsi-target: Fix incorrect np->np_thread NULL assignment When shutting down a target there is a race condition between iscsit_del_np() and __iscsi_target_login_thread(). The latter sets the thread pointer to NULL, and the former tries to issue kthread_stop() on that pointer without any synchronization. This patch moves the np->np_thread NULL assignment into iscsit_del_np(), after kthread_stop() has completed. It also removes the signal_pending() + np_state check, and only exits when kthread_should_stop() is true. Reported-by: Hannes Reinecke Cc: #3.12+ Signed-off-by: Nicholas Bellinger --- drivers/target/iscsi/iscsi_target.c | 1 + drivers/target/iscsi/iscsi_target_login.c | 6 ------ 2 files changed, 1 insertion(+), 6 deletions(-) diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index 02182ab017b1..00867190413c 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c @@ -465,6 +465,7 @@ int iscsit_del_np(struct iscsi_np *np) */ send_sig(SIGINT, np->np_thread, 1); kthread_stop(np->np_thread); + np->np_thread = NULL; } np->np_transport->iscsit_free_np(np); diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c index 4eb93b2b6473..e29279e6b577 100644 --- a/drivers/target/iscsi/iscsi_target_login.c +++ b/drivers/target/iscsi/iscsi_target_login.c @@ -1403,11 +1403,6 @@ static int __iscsi_target_login_thread(struct iscsi_np *np) out: stop = kthread_should_stop(); - if (!stop && signal_pending(current)) { - spin_lock_bh(&np->np_thread_lock); - stop = (np->np_thread_state == ISCSI_NP_THREAD_SHUTDOWN); - spin_unlock_bh(&np->np_thread_lock); - } /* Wait for another socket.. */ if (!stop) return 1; @@ -1415,7 +1410,6 @@ static int __iscsi_target_login_thread(struct iscsi_np *np) iscsi_stop_login_thread_timer(np); spin_lock_bh(&np->np_thread_lock); np->np_thread_state = ISCSI_NP_THREAD_EXIT; - np->np_thread = NULL; spin_unlock_bh(&np->np_thread_lock); return 0; From 2853c2b6671509591be09213954d7249ca6ff224 Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Wed, 11 Dec 2013 16:20:13 -0800 Subject: [PATCH 260/312] iser-target: Move INIT_WORK setup into isert_create_device_ib_res This patch moves INIT_WORK setup for cq_desc->cq_[rx,tx]_work into isert_create_device_ib_res(), instead of being done each callback invocation in isert_cq_[rx,tx]_callback(). This also fixes a 'INFO: trying to register non-static key' warning when cancel_work_sync() is called before INIT_WORK has setup the struct work_struct. Reported-by: Or Gerlitz Cc: #3.12+ Signed-off-by: Nicholas Bellinger --- drivers/infiniband/ulp/isert/ib_isert.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index 78f6e92c571f..9804fca6bf06 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c @@ -207,7 +207,9 @@ isert_free_rx_descriptors(struct isert_conn *isert_conn) isert_conn->conn_rx_descs = NULL; } +static void isert_cq_tx_work(struct work_struct *); static void isert_cq_tx_callback(struct ib_cq *, void *); +static void isert_cq_rx_work(struct work_struct *); static void isert_cq_rx_callback(struct ib_cq *, void *); static int @@ -259,6 +261,7 @@ isert_create_device_ib_res(struct isert_device *device) cq_desc[i].device = device; cq_desc[i].cq_index = i; + INIT_WORK(&cq_desc[i].cq_rx_work, isert_cq_rx_work); device->dev_rx_cq[i] = ib_create_cq(device->ib_device, isert_cq_rx_callback, isert_cq_event_callback, @@ -270,6 +273,7 @@ isert_create_device_ib_res(struct isert_device *device) goto out_cq; } + INIT_WORK(&cq_desc[i].cq_tx_work, isert_cq_tx_work); device->dev_tx_cq[i] = ib_create_cq(device->ib_device, isert_cq_tx_callback, isert_cq_event_callback, @@ -1732,7 +1736,6 @@ isert_cq_tx_callback(struct ib_cq *cq, void *context) { struct isert_cq_desc *cq_desc = (struct isert_cq_desc *)context; - INIT_WORK(&cq_desc->cq_tx_work, isert_cq_tx_work); queue_work(isert_comp_wq, &cq_desc->cq_tx_work); } @@ -1776,7 +1779,6 @@ isert_cq_rx_callback(struct ib_cq *cq, void *context) { struct isert_cq_desc *cq_desc = (struct isert_cq_desc *)context; - INIT_WORK(&cq_desc->cq_rx_work, isert_cq_rx_work); queue_work(isert_rx_wq, &cq_desc->cq_rx_work); } From 95cadace8f3959282e76ebf8b382bd0930807d2c Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Thu, 12 Dec 2013 12:24:11 -0800 Subject: [PATCH 261/312] target/file: Update hw_max_sectors based on current block_size This patch allows FILEIO to update hw_max_sectors based on the current max_bytes_per_io. This is required because vfs_[writev,readv]() can accept a maximum of 2048 iovecs per call, so the enforced hw_max_sectors really needs to be calculated based on block_size. This addresses a >= v3.5 bug where block_size=512 was rejecting > 1M sized I/O requests, because FD_MAX_SECTORS was hardcoded to 2048 for the block_size=4096 case. (v2: Use max_bytes_per_io instead of ->update_hw_max_sectors) Reported-by: Henrik Goldman Cc: #3.5+ Signed-off-by: Nicholas Bellinger --- drivers/target/target_core_device.c | 5 +++++ drivers/target/target_core_file.c | 8 ++++---- drivers/target/target_core_file.h | 5 ++++- include/target/target_core_base.h | 1 + 4 files changed, 14 insertions(+), 5 deletions(-) diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c index 207b340498a3..d06de84b069b 100644 --- a/drivers/target/target_core_device.c +++ b/drivers/target/target_core_device.c @@ -1106,6 +1106,11 @@ int se_dev_set_block_size(struct se_device *dev, u32 block_size) dev->dev_attrib.block_size = block_size; pr_debug("dev[%p]: SE Device block_size changed to %u\n", dev, block_size); + + if (dev->dev_attrib.max_bytes_per_io) + dev->dev_attrib.hw_max_sectors = + dev->dev_attrib.max_bytes_per_io / block_size; + return 0; } diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c index 0e34cda3271e..78241a53b555 100644 --- a/drivers/target/target_core_file.c +++ b/drivers/target/target_core_file.c @@ -66,9 +66,8 @@ static int fd_attach_hba(struct se_hba *hba, u32 host_id) pr_debug("CORE_HBA[%d] - TCM FILEIO HBA Driver %s on Generic" " Target Core Stack %s\n", hba->hba_id, FD_VERSION, TARGET_CORE_MOD_VERSION); - pr_debug("CORE_HBA[%d] - Attached FILEIO HBA: %u to Generic" - " MaxSectors: %u\n", - hba->hba_id, fd_host->fd_host_id, FD_MAX_SECTORS); + pr_debug("CORE_HBA[%d] - Attached FILEIO HBA: %u to Generic\n", + hba->hba_id, fd_host->fd_host_id); return 0; } @@ -220,7 +219,8 @@ static int fd_configure_device(struct se_device *dev) } dev->dev_attrib.hw_block_size = fd_dev->fd_block_size; - dev->dev_attrib.hw_max_sectors = FD_MAX_SECTORS; + dev->dev_attrib.max_bytes_per_io = FD_MAX_BYTES; + dev->dev_attrib.hw_max_sectors = FD_MAX_BYTES / fd_dev->fd_block_size; dev->dev_attrib.hw_queue_depth = FD_MAX_DEVICE_QUEUE_DEPTH; if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) { diff --git a/drivers/target/target_core_file.h b/drivers/target/target_core_file.h index 37ffc5bd2399..d7772c167685 100644 --- a/drivers/target/target_core_file.h +++ b/drivers/target/target_core_file.h @@ -7,7 +7,10 @@ #define FD_DEVICE_QUEUE_DEPTH 32 #define FD_MAX_DEVICE_QUEUE_DEPTH 128 #define FD_BLOCKSIZE 512 -#define FD_MAX_SECTORS 2048 +/* + * Limited by the number of iovecs (2048) per vfs_[writev,readv] call + */ +#define FD_MAX_BYTES 8388608 #define RRF_EMULATE_CDB 0x01 #define RRF_GOT_LBA 0x02 diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index 9f1dda659c5a..321301c0a643 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h @@ -620,6 +620,7 @@ struct se_dev_attrib { u32 unmap_granularity; u32 unmap_granularity_alignment; u32 max_write_same_len; + u32 max_bytes_per_io; struct se_device *da_dev; struct config_group da_group; }; From 4799e310caf0fb9078389766d0210d1c6133ad51 Mon Sep 17 00:00:00 2001 From: Kuninori Morimoto Date: Mon, 16 Dec 2013 00:16:52 -0800 Subject: [PATCH 262/312] ARM: shmobile: bockw: fixup DMA mask 4dcfa60071b3d23f0181f27d8519f12e37cefbb9 (ARM: DMA-API: better handing of DMA masks for coherent allocations) exchanged DMA mask check method. Below warning will appear without this patch asoc-simple-card asoc-simple-card.0: \ Coherent DMA mask 0xffffffffffffffff is larger than dma_addr_t allows asoc-simple-card asoc-simple-card.0: \ Driver did not use or check the return value from dma_set_coherent_mask()? Signed-off-by: Kuninori Morimoto Acked-by: Laurent Pinchart Signed-off-by: Simon Horman --- arch/arm/mach-shmobile/board-bockw.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm/mach-shmobile/board-bockw.c b/arch/arm/mach-shmobile/board-bockw.c index 38611526fe9a..3c4995aebd22 100644 --- a/arch/arm/mach-shmobile/board-bockw.c +++ b/arch/arm/mach-shmobile/board-bockw.c @@ -679,7 +679,7 @@ static void __init bockw_init(void) .id = i, .data = &rsnd_card_info[i], .size_data = sizeof(struct asoc_simple_card_info), - .dma_mask = ~0, + .dma_mask = DMA_BIT_MASK(32), }; platform_device_register_full(&cardinfo); From d721a15c300c5f638a11573a6dd492158e737d6a Mon Sep 17 00:00:00 2001 From: Ben Dooks Date: Mon, 16 Dec 2013 12:38:48 +0000 Subject: [PATCH 263/312] ARM: shmobile: r8a7790: fix shdi resource sizes The r8a7790.dtsi file has four sdhi nodes which the first two have the wrong resource size for their register block. This causes the sh_modbile_sdhi driver to fail to communicate with card at-all. Change sdhi{0,1} node size from 0x100 to 0x200 to correct these nodes as per Kuninori Morimoto's response to the original patch where all four nodes where changed. sdhi{2,3} are the correct size. This bug has been present since sdhi resources were added to the r8a7790 by 8c9b1aa41853272a ("ARM: shmobile: r8a7790: add MMCIF and SDHI DT templates") in v3.11-rc2. Signed-off-by: Ben Dooks Tested-by: William Towle Acked-by: Kuninori Morimoto Signed-off-by: Simon Horman --- arch/arm/boot/dts/r8a7790.dtsi | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/arm/boot/dts/r8a7790.dtsi b/arch/arm/boot/dts/r8a7790.dtsi index 46e1d7ef163f..9987dd0e9c59 100644 --- a/arch/arm/boot/dts/r8a7790.dtsi +++ b/arch/arm/boot/dts/r8a7790.dtsi @@ -241,7 +241,7 @@ pfc: pfc@e6060000 { sdhi0: sdhi@ee100000 { compatible = "renesas,sdhi-r8a7790"; - reg = <0 0xee100000 0 0x100>; + reg = <0 0xee100000 0 0x200>; interrupt-parent = <&gic>; interrupts = <0 165 4>; cap-sd-highspeed; @@ -250,7 +250,7 @@ sdhi0: sdhi@ee100000 { sdhi1: sdhi@ee120000 { compatible = "renesas,sdhi-r8a7790"; - reg = <0 0xee120000 0 0x100>; + reg = <0 0xee120000 0 0x200>; interrupt-parent = <&gic>; interrupts = <0 166 4>; cap-sd-highspeed; From 1e01c7eb7c431a74437d73fe54670398b4d2b222 Mon Sep 17 00:00:00 2001 From: Vineet Gupta Date: Thu, 19 Dec 2013 18:55:58 +0530 Subject: [PATCH 264/312] ARC: Allow conditional multiple inclusion of uapi/asm/unistd.h Commit 97bc386fc12d "ARC: Add guard macro to uapi/asm/unistd.h" inhibited multiple inclusion of ARCH unistd.h. This however hosed the system since Generic syscall table generator relies on it being included twice, and in lack-of an empty table was emitted by C preprocessor. Fix that by allowing one exception to rule for the special case (just like Xtensa) Suggested-by: Chen Gang Signed-off-by: Vineet Gupta --- arch/arc/include/uapi/asm/unistd.h | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/arch/arc/include/uapi/asm/unistd.h b/arch/arc/include/uapi/asm/unistd.h index 68125dd766c6..39e58d1cdf90 100644 --- a/arch/arc/include/uapi/asm/unistd.h +++ b/arch/arc/include/uapi/asm/unistd.h @@ -8,7 +8,11 @@ /******** no-legacy-syscalls-ABI *******/ -#ifndef _UAPI_ASM_ARC_UNISTD_H +/* + * Non-typical guard macro to enable inclusion twice in ARCH sys.c + * That is how the Generic syscall wrapper generator works + */ +#if !defined(_UAPI_ASM_ARC_UNISTD_H) || defined(__SYSCALL) #define _UAPI_ASM_ARC_UNISTD_H #define __ARCH_WANT_SYS_EXECVE @@ -36,4 +40,6 @@ __SYSCALL(__NR_arc_gettls, sys_arc_gettls) #define __NR_sysfs (__NR_arch_specific_syscall + 3) __SYSCALL(__NR_sysfs, sys_sysfs) +#undef __SYSCALL + #endif From a26ba7faddd51be3dd8957543a9d984a4ddd104a Mon Sep 17 00:00:00 2001 From: Rashika Kheria Date: Thu, 19 Dec 2013 15:02:22 +0530 Subject: [PATCH 265/312] drivers: block: Mark the functions as static in skd_main.c MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Mark functions skd_skmsg_state_to_str() and skd_skreq_state_to_str() as static in skd_main.c because they are not used outside this file. This eliminates the following warnings in skd_main.c: drivers/block/skd_main.c:5272:13: warning: no previous prototype for ‘skd_skmsg_state_to_str’ [-Wmissing-prototypes] drivers/block/skd_main.c:5284:13: warning: no previous prototype for ‘skd_skreq_state_to_str’ [-Wmissing-prototypes] Signed-off-by: Rashika Kheria Reviewed-by: Josh Triplett Signed-off-by: Jens Axboe --- drivers/block/skd_main.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/block/skd_main.c b/drivers/block/skd_main.c index 9199c93be926..eb6e1e0e8db2 100644 --- a/drivers/block/skd_main.c +++ b/drivers/block/skd_main.c @@ -5269,7 +5269,7 @@ const char *skd_skdev_state_to_str(enum skd_drvr_state state) } } -const char *skd_skmsg_state_to_str(enum skd_fit_msg_state state) +static const char *skd_skmsg_state_to_str(enum skd_fit_msg_state state) { switch (state) { case SKD_MSG_STATE_IDLE: @@ -5281,7 +5281,7 @@ const char *skd_skmsg_state_to_str(enum skd_fit_msg_state state) } } -const char *skd_skreq_state_to_str(enum skd_req_state state) +static const char *skd_skreq_state_to_str(enum skd_req_state state) { switch (state) { case SKD_REQ_STATE_IDLE: From 0c56010c83703e1f33325838eda9a2077827b6f1 Mon Sep 17 00:00:00 2001 From: Matias Bjorling Date: Tue, 10 Dec 2013 16:50:38 +0100 Subject: [PATCH 266/312] null_blk: mem garbage on NUMA systems during init For NUMA systems, initializing the blk-mq layer and using per node hctx. We initialize submit queues to 1, while blk-mq nr_hw_queues is initialized to the number of NUMA nodes. This makes the null_init_hctx function overwrite memory outside of what it allocated. In my case it lead to writing garbage into struct request_queue's mq_map. Signed-off-by: Matias Bjorling Cc: Jens Axboe Signed-off-by: Linus Torvalds --- drivers/block/null_blk.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c index ea192ec029c4..f370fc13aea5 100644 --- a/drivers/block/null_blk.c +++ b/drivers/block/null_blk.c @@ -495,23 +495,23 @@ static int null_add_dev(void) spin_lock_init(&nullb->lock); + if (queue_mode == NULL_Q_MQ && use_per_node_hctx) + submit_queues = nr_online_nodes; + if (setup_queues(nullb)) goto err; if (queue_mode == NULL_Q_MQ) { null_mq_reg.numa_node = home_node; null_mq_reg.queue_depth = hw_queue_depth; + null_mq_reg.nr_hw_queues = submit_queues; if (use_per_node_hctx) { null_mq_reg.ops->alloc_hctx = null_alloc_hctx; null_mq_reg.ops->free_hctx = null_free_hctx; - - null_mq_reg.nr_hw_queues = nr_online_nodes; } else { null_mq_reg.ops->alloc_hctx = blk_mq_alloc_single_hw_queue; null_mq_reg.ops->free_hctx = blk_mq_free_single_hw_queue; - - null_mq_reg.nr_hw_queues = submit_queues; } nullb->q = blk_mq_init_queue(&null_mq_reg, nullb); From 12f8f4fc0314103d47f9b1cbc812597b8d893ce1 Mon Sep 17 00:00:00 2001 From: Matias Bjorling Date: Wed, 18 Dec 2013 13:41:42 +0100 Subject: [PATCH 267/312] null_blk: documentation Add description of module and its parameters. Signed-off-by: Matias Bjorling Signed-off-by: Jens Axboe --- Documentation/block/null_blk.txt | 71 ++++++++++++++++++++++++++++++++ 1 file changed, 71 insertions(+) create mode 100644 Documentation/block/null_blk.txt diff --git a/Documentation/block/null_blk.txt b/Documentation/block/null_blk.txt new file mode 100644 index 000000000000..9e1b047fd13d --- /dev/null +++ b/Documentation/block/null_blk.txt @@ -0,0 +1,71 @@ +Null block device driver +================================================================================ + +I. Overview + +The null block device (/dev/nullb*) is used for benchmarking the various +block-layer implementations. It emulates a block device of X gigabytes in size. +The following instances are possible: + + Single-queue block-layer + - Request-based. + - Single submission queue per device. + - Implements IO scheduling algorithms (CFQ, Deadline, noop). + Multi-queue block-layer + - Request-based. + - Configurable submission queues per device. + No block-layer (Known as bio-based) + - Bio-based. IO requests are submitted directly to the device driver. + - Directly accepts bio data structure and returns them. + +All of them has a completion queue for each core in the system. + +II. Module parameters applicable for all instances: + +queue_mode=[0-2]: Default: 2-Multi-queue + Selects which block-layer the module should instantiate with. + + 0: Bio-based. + 1: Single-queue. + 2: Multi-queue. + +home_node=[0--nr_nodes]: Default: NUMA_NO_NODE + Selects what socket the data structures is allocated from. + +gb=[Size in GB]: Default: 250GB + The size of the device reported to the system. + +bs=[Block size (in bytes)]: Default: 512 bytes + The block size reported to the system. + +nr_devices=[Num. devices]: Default: 2 + Number of block devices instantiated. They are instantiated as /dev/nullb0, + etc. + +irq_mode=[0-2]: Default: Soft-irq + The completion mode used for completing IOs to the block-layer. + + 0: None. + 1: Soft-irq. Uses ipi to complete IOs across sockets. Simulates the overhead + when IOs are issued from another socket than the home the device is + connected to. + 2: Timer: Waits a specific period (completion_nsec) for each IO before + completion. + +completion_nsec=[Num. ns]: Default: 10.000ns + Combined with irq_mode=2 (timer). The time each completion event must wait. + +submit_queues=[0..nr_cpus]: + The number of submission queues attached to the device driver. If unset, it + defaults to 1 on single-queue and bio-based instances. For multi-queue, + its ignored when use_per_node_hctx module parameter is 1. + +hw_queue_depth=[0..qdepth]: Defaults: 64 + The hardware queue depth of the device. + +III: Multi-queue specific parameters + +use_per_node_hctx=[0/1]: Defaults: 1 + If 1, the multi-queue block layer is instantiated with a hardware dispatch + queue for each CPU node in the system. If 0, it is instantiated with the + number of queues defined in the submit_queues parameter. From 2d263a7856cbaf26dd89b671e2161c4a49f8461b Mon Sep 17 00:00:00 2001 From: Matias Bjorling Date: Wed, 18 Dec 2013 13:41:43 +0100 Subject: [PATCH 268/312] null_blk: refactor init and init errors code paths Simplify the initialization logic of the three block-layers. - The queue initialization is split into two parts. This allows reuse of code when initializing the sq-, bio- and mq-based layers. - Set submit_queues default value to 0 and always set it at init time. - Simplify the init error code paths. Signed-off-by: Matias Bjorling Signed-off-by: Jens Axboe --- drivers/block/null_blk.c | 63 ++++++++++++++++++++++++---------------- 1 file changed, 38 insertions(+), 25 deletions(-) diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c index f370fc13aea5..f0aeb2a7a9ca 100644 --- a/drivers/block/null_blk.c +++ b/drivers/block/null_blk.c @@ -65,7 +65,7 @@ enum { NULL_Q_MQ = 2, }; -static int submit_queues = 1; +static int submit_queues; module_param(submit_queues, int, S_IRUGO); MODULE_PARM_DESC(submit_queues, "Number of submission queues"); @@ -355,16 +355,24 @@ static void null_free_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_index) kfree(hctx); } +static void null_init_queue(struct nullb *nullb, struct nullb_queue *nq) +{ + BUG_ON(!nullb); + BUG_ON(!nq); + + init_waitqueue_head(&nq->wait); + nq->queue_depth = nullb->queue_depth; +} + static int null_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, unsigned int index) { struct nullb *nullb = data; struct nullb_queue *nq = &nullb->queues[index]; - init_waitqueue_head(&nq->wait); - nq->queue_depth = nullb->queue_depth; - nullb->nr_queues++; hctx->driver_data = nq; + null_init_queue(nullb, nq); + nullb->nr_queues++; return 0; } @@ -417,13 +425,13 @@ static int setup_commands(struct nullb_queue *nq) nq->cmds = kzalloc(nq->queue_depth * sizeof(*cmd), GFP_KERNEL); if (!nq->cmds) - return 1; + return -ENOMEM; tag_size = ALIGN(nq->queue_depth, BITS_PER_LONG) / BITS_PER_LONG; nq->tag_map = kzalloc(tag_size * sizeof(unsigned long), GFP_KERNEL); if (!nq->tag_map) { kfree(nq->cmds); - return 1; + return -ENOMEM; } for (i = 0; i < nq->queue_depth; i++) { @@ -454,33 +462,37 @@ static void cleanup_queues(struct nullb *nullb) static int setup_queues(struct nullb *nullb) { - struct nullb_queue *nq; - int i; - - nullb->queues = kzalloc(submit_queues * sizeof(*nq), GFP_KERNEL); + nullb->queues = kzalloc(submit_queues * sizeof(struct nullb_queue), + GFP_KERNEL); if (!nullb->queues) - return 1; + return -ENOMEM; nullb->nr_queues = 0; nullb->queue_depth = hw_queue_depth; - if (queue_mode == NULL_Q_MQ) - return 0; + return 0; +} + +static int init_driver_queues(struct nullb *nullb) +{ + struct nullb_queue *nq; + int i, ret = 0; for (i = 0; i < submit_queues; i++) { nq = &nullb->queues[i]; - init_waitqueue_head(&nq->wait); - nq->queue_depth = hw_queue_depth; - if (setup_commands(nq)) - break; + + null_init_queue(nullb, nq); + + ret = setup_commands(nq); + if (ret) + goto err_queue; nullb->nr_queues++; } - if (i == submit_queues) - return 0; - + return 0; +err_queue: cleanup_queues(nullb); - return 1; + return ret; } static int null_add_dev(void) @@ -495,9 +507,6 @@ static int null_add_dev(void) spin_lock_init(&nullb->lock); - if (queue_mode == NULL_Q_MQ && use_per_node_hctx) - submit_queues = nr_online_nodes; - if (setup_queues(nullb)) goto err; @@ -518,11 +527,13 @@ static int null_add_dev(void) } else if (queue_mode == NULL_Q_BIO) { nullb->q = blk_alloc_queue_node(GFP_KERNEL, home_node); blk_queue_make_request(nullb->q, null_queue_bio); + init_driver_queues(nullb); } else { nullb->q = blk_init_queue_node(null_request_fn, &nullb->lock, home_node); blk_queue_prep_rq(nullb->q, null_rq_prep_fn); if (nullb->q) blk_queue_softirq_done(nullb->q, null_softirq_done_fn); + init_driver_queues(nullb); } if (!nullb->q) @@ -579,7 +590,9 @@ static int __init null_init(void) } #endif - if (submit_queues > nr_cpu_ids) + if (queue_mode == NULL_Q_MQ && use_per_node_hctx) + submit_queues = nr_online_nodes; + else if (submit_queues > nr_cpu_ids) submit_queues = nr_cpu_ids; else if (!submit_queues) submit_queues = 1; From d15ee6b1a43afbe1a6cece3bd8d336a9d5cb7060 Mon Sep 17 00:00:00 2001 From: Matias Bjorling Date: Wed, 18 Dec 2013 13:41:44 +0100 Subject: [PATCH 269/312] null_blk: warning on ignored submit_queues param Let the user know when the number of submission queues are being ignored. Signed-off-by: Matias Bjorling Signed-off-by: Jens Axboe --- drivers/block/null_blk.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c index f0aeb2a7a9ca..8f2e7c330d6d 100644 --- a/drivers/block/null_blk.c +++ b/drivers/block/null_blk.c @@ -590,9 +590,12 @@ static int __init null_init(void) } #endif - if (queue_mode == NULL_Q_MQ && use_per_node_hctx) + if (queue_mode == NULL_Q_MQ && use_per_node_hctx) { + if (submit_queues > 0) + pr_warn("null_blk: submit_queues param is set to %u.", + nr_online_nodes); submit_queues = nr_online_nodes; - else if (submit_queues > nr_cpu_ids) + } else if (submit_queues > nr_cpu_ids) submit_queues = nr_cpu_ids; else if (!submit_queues) submit_queues = 1; From cdc27c27843248ae7eb0df5fc261dd004eaa5670 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 17 Dec 2013 17:09:08 +0000 Subject: [PATCH 270/312] arm64: ptrace: avoid using HW_BREAKPOINT_EMPTY for disabled events Commit 8f34a1da35ae ("arm64: ptrace: use HW_BREAKPOINT_EMPTY type for disabled breakpoints") fixed an issue with GDB trying to zero breakpoint control registers. The problem there is that the arch hw_breakpoint code will attempt to create a (disabled), execute breakpoint of length 0. This will fail validation and report unexpected failure to GDB. To avoid this, we treated disabled breakpoints as HW_BREAKPOINT_EMPTY, but that seems to have broken with recent kernels, causing watchpoints to be treated as TYPE_INST in the core code and returning ENOSPC for any further breakpoints. This patch fixes the problem by prioritising the `enable' field of the breakpoint: if it is cleared, we simply update the perf_event_attr to indicate that the thing is disabled and don't bother changing either the type or the length. This reinforces the behaviour that the breakpoint control register is essentially read-only apart from the enable bit when disabling a breakpoint. Cc: Reported-by: Aaron Liu Signed-off-by: Will Deacon Signed-off-by: Catalin Marinas --- arch/arm64/kernel/ptrace.c | 36 +++++++++++++++++------------------- 1 file changed, 17 insertions(+), 19 deletions(-) diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c index 6777a2192b83..6a8928bba03c 100644 --- a/arch/arm64/kernel/ptrace.c +++ b/arch/arm64/kernel/ptrace.c @@ -214,31 +214,29 @@ static int ptrace_hbp_fill_attr_ctrl(unsigned int note_type, { int err, len, type, disabled = !ctrl.enabled; - if (disabled) { - len = 0; - type = HW_BREAKPOINT_EMPTY; - } else { - err = arch_bp_generic_fields(ctrl, &len, &type); - if (err) - return err; + attr->disabled = disabled; + if (disabled) + return 0; - switch (note_type) { - case NT_ARM_HW_BREAK: - if ((type & HW_BREAKPOINT_X) != type) - return -EINVAL; - break; - case NT_ARM_HW_WATCH: - if ((type & HW_BREAKPOINT_RW) != type) - return -EINVAL; - break; - default: + err = arch_bp_generic_fields(ctrl, &len, &type); + if (err) + return err; + + switch (note_type) { + case NT_ARM_HW_BREAK: + if ((type & HW_BREAKPOINT_X) != type) return -EINVAL; - } + break; + case NT_ARM_HW_WATCH: + if ((type & HW_BREAKPOINT_RW) != type) + return -EINVAL; + break; + default: + return -EINVAL; } attr->bp_len = len; attr->bp_type = type; - attr->disabled = disabled; return 0; } From 85fbd722ad0f5d64d1ad15888cd1eb2188bfb557 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 18 Dec 2013 07:07:32 -0500 Subject: [PATCH 271/312] libata, freezer: avoid block device removal while system is frozen MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Freezable kthreads and workqueues are fundamentally problematic in that they effectively introduce a big kernel lock widely used in the kernel and have already been the culprit of several deadlock scenarios. This is the latest occurrence. During resume, libata rescans all the ports and revalidates all pre-existing devices. If it determines that a device has gone missing, the device is removed from the system which involves invalidating block device and flushing bdi while holding driver core layer locks. Unfortunately, this can race with the rest of device resume. Because freezable kthreads and workqueues are thawed after device resume is complete and block device removal depends on freezable workqueues and kthreads (e.g. bdi_wq, jbd2) to make progress, this can lead to deadlock - block device removal can't proceed because kthreads are frozen and kthreads can't be thawed because device resume is blocked behind block device removal. 839a8e8660b6 ("writeback: replace custom worker pool implementation with unbound workqueue") made this particular deadlock scenario more visible but the underlying problem has always been there - the original forker task and jbd2 are freezable too. In fact, this is highly likely just one of many possible deadlock scenarios given that freezer behaves as a big kernel lock and we don't have any debug mechanism around it. I believe the right thing to do is getting rid of freezable kthreads and workqueues. This is something fundamentally broken. For now, implement a funny workaround in libata - just avoid doing block device hot[un]plug while the system is frozen. Kernel engineering at its finest. :( v2: Add EXPORT_SYMBOL_GPL(pm_freezing) for cases where libata is built as a module. v3: Comment updated and polling interval changed to 10ms as suggested by Rafael. v4: Add #ifdef CONFIG_FREEZER around the hack as pm_freezing is not defined when FREEZER is not configured thus breaking build. Reported by kbuild test robot. Signed-off-by: Tejun Heo Reported-by: Tomaž Šolc Reviewed-by: "Rafael J. Wysocki" Link: https://bugzilla.kernel.org/show_bug.cgi?id=62801 Link: http://lkml.kernel.org/r/20131213174932.GA27070@htj.dyndns.org Cc: Greg Kroah-Hartman Cc: Len Brown Cc: Oleg Nesterov Cc: stable@vger.kernel.org Cc: kbuild test robot --- drivers/ata/libata-scsi.c | 21 +++++++++++++++++++++ kernel/freezer.c | 6 ++++++ 2 files changed, 27 insertions(+) diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index db6dfcfa3e2e..176f62950e3d 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -3871,6 +3871,27 @@ void ata_scsi_hotplug(struct work_struct *work) return; } + /* + * XXX - UGLY HACK + * + * The block layer suspend/resume path is fundamentally broken due + * to freezable kthreads and workqueue and may deadlock if a block + * device gets removed while resume is in progress. I don't know + * what the solution is short of removing freezable kthreads and + * workqueues altogether. + * + * The following is an ugly hack to avoid kicking off device + * removal while freezer is active. This is a joke but does avoid + * this particular deadlock scenario. + * + * https://bugzilla.kernel.org/show_bug.cgi?id=62801 + * http://marc.info/?l=linux-kernel&m=138695698516487 + */ +#ifdef CONFIG_FREEZER + while (pm_freezing) + msleep(10); +#endif + DPRINTK("ENTER\n"); mutex_lock(&ap->scsi_scan_mutex); diff --git a/kernel/freezer.c b/kernel/freezer.c index b462fa197517..aa6a8aadb911 100644 --- a/kernel/freezer.c +++ b/kernel/freezer.c @@ -19,6 +19,12 @@ EXPORT_SYMBOL(system_freezing_cnt); bool pm_freezing; bool pm_nosig_freezing; +/* + * Temporary export for the deadlock workaround in ata_scsi_hotplug(). + * Remove once the hack becomes unnecessary. + */ +EXPORT_SYMBOL_GPL(pm_freezing); + /* protects freezing and frozen transitions */ static DEFINE_SPINLOCK(freezer_lock); From 40e2d7f9b5dae048789c64672bf3027fbb663ffa Mon Sep 17 00:00:00 2001 From: Len Brown Date: Wed, 18 Dec 2013 16:44:57 -0500 Subject: [PATCH 272/312] x86 idle: Repair large-server 50-watt idle-power regression Linux 3.10 changed the timing of how thread_info->flags is touched: x86: Use generic idle loop (7d1a941731fabf27e5fb6edbebb79fe856edb4e5) This caused Intel NHM-EX and WSM-EX servers to experience a large number of immediate MONITOR/MWAIT break wakeups, which caused cpuidle to demote from deep C-states to shallow C-states, which caused these platforms to experience a significant increase in idle power. Note that this issue was already present before the commit above, however, it wasn't seen often enough to be noticed in power measurements. Here we extend an errata workaround from the Core2 EX "Dunnington" to extend to NHM-EX and WSM-EX, to prevent these immediate returns from MWAIT, reducing idle power on these platforms. While only acpi_idle ran on Dunnington, intel_idle may also run on these two newer systems. As of today, there are no other models that are known to need this tweak. Link: http://lkml.kernel.org/r/CAJvTdK=%2BaNN66mYpCGgbHGCHhYQAKx-vB0kJSWjVpsNb_hOAtQ@mail.gmail.com Signed-off-by: Len Brown Link: http://lkml.kernel.org/r/baff264285f6e585df757d58b17788feabc68918.1387403066.git.len.brown@intel.com Cc: # 3.12.x, 3.11.x, 3.10.x Signed-off-by: H. Peter Anvin --- arch/x86/kernel/cpu/intel.c | 3 ++- drivers/idle/intel_idle.c | 3 +++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index dc1ec0dff939..ea04b342c026 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c @@ -387,7 +387,8 @@ static void init_intel(struct cpuinfo_x86 *c) set_cpu_cap(c, X86_FEATURE_PEBS); } - if (c->x86 == 6 && c->x86_model == 29 && cpu_has_clflush) + if (c->x86 == 6 && cpu_has_clflush && + (c->x86_model == 29 || c->x86_model == 46 || c->x86_model == 47)) set_cpu_cap(c, X86_FEATURE_CLFLUSH_MONITOR); #ifdef CONFIG_X86_64 diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c index 92d1206482a6..f80b700f821c 100644 --- a/drivers/idle/intel_idle.c +++ b/drivers/idle/intel_idle.c @@ -377,6 +377,9 @@ static int intel_idle(struct cpuidle_device *dev, if (!current_set_polling_and_test()) { + if (this_cpu_has(X86_FEATURE_CLFLUSH_MONITOR)) + clflush((void *)¤t_thread_info()->flags); + __monitor((void *)¤t_thread_info()->flags, 0, 0); smp_mb(); if (!need_resched()) From fce7d3bfc0ae70ca2a57868f2a9708b13185fd1f Mon Sep 17 00:00:00 2001 From: Jan Beulich Date: Mon, 16 Dec 2013 14:39:40 +0000 Subject: [PATCH 273/312] x86/efi: Don't select EFI from certain special ACPI drivers Commit 7ea6c6c1 ("Move cper.c from drivers/acpi/apei to drivers/firmware/efi") results in CONFIG_EFI being enabled even when the user doesn't want this. Since ACPI APEI used to build fine without UEFI (and as far as I know also has no functional depency on it), at least in that case using a reverse dependency is wrong (and a straight one isn't needed). Whether the same is true for ACPI_EXTLOG I don't know - if there is a functional dependency, it should depend on EFI rather than selecting it. It certainly has (currently) no build dependency. Adjust Kconfig and build logic so that the bad dependency gets avoided. Signed-off-by: Jan Beulich Acked-by: Tony Luck Cc: Matt Fleming Link: http://lkml.kernel.org/r/52AF1EBC020000780010DBF9@nat28.tlf.novell.com Signed-off-by: Ingo Molnar --- drivers/acpi/Kconfig | 1 - drivers/acpi/apei/Kconfig | 1 - drivers/firmware/Makefile | 1 + drivers/firmware/efi/Kconfig | 6 +++--- drivers/firmware/efi/Makefile | 2 +- 5 files changed, 5 insertions(+), 6 deletions(-) diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig index 5d9248526d78..4770de5707b9 100644 --- a/drivers/acpi/Kconfig +++ b/drivers/acpi/Kconfig @@ -348,7 +348,6 @@ source "drivers/acpi/apei/Kconfig" config ACPI_EXTLOG tristate "Extended Error Log support" depends on X86_MCE && X86_LOCAL_APIC - select EFI select UEFI_CPER default n help diff --git a/drivers/acpi/apei/Kconfig b/drivers/acpi/apei/Kconfig index 786294bb682c..3650b2183227 100644 --- a/drivers/acpi/apei/Kconfig +++ b/drivers/acpi/apei/Kconfig @@ -2,7 +2,6 @@ config ACPI_APEI bool "ACPI Platform Error Interface (APEI)" select MISC_FILESYSTEMS select PSTORE - select EFI select UEFI_CPER depends on X86 help diff --git a/drivers/firmware/Makefile b/drivers/firmware/Makefile index 299fad6b5867..5373dc5b6011 100644 --- a/drivers/firmware/Makefile +++ b/drivers/firmware/Makefile @@ -14,3 +14,4 @@ obj-$(CONFIG_FIRMWARE_MEMMAP) += memmap.o obj-$(CONFIG_GOOGLE_FIRMWARE) += google/ obj-$(CONFIG_EFI) += efi/ +obj-$(CONFIG_UEFI_CPER) += efi/ diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig index 3150aa4874e8..6aecbc86ec94 100644 --- a/drivers/firmware/efi/Kconfig +++ b/drivers/firmware/efi/Kconfig @@ -36,7 +36,7 @@ config EFI_VARS_PSTORE_DEFAULT_DISABLE backend for pstore by default. This setting can be overridden using the efivars module's pstore_disable parameter. -config UEFI_CPER - def_bool n - endmenu + +config UEFI_CPER + bool diff --git a/drivers/firmware/efi/Makefile b/drivers/firmware/efi/Makefile index 9ba156d3c775..6c2a41ec21ba 100644 --- a/drivers/firmware/efi/Makefile +++ b/drivers/firmware/efi/Makefile @@ -1,7 +1,7 @@ # # Makefile for linux kernel # -obj-y += efi.o vars.o +obj-$(CONFIG_EFI) += efi.o vars.o obj-$(CONFIG_EFI_VARS) += efivars.o obj-$(CONFIG_EFI_VARS_PSTORE) += efi-pstore.o obj-$(CONFIG_UEFI_CPER) += cper.o From de06875f089678f4f9f1e8d5e1421fb0ceab12d0 Mon Sep 17 00:00:00 2001 From: Andy Grover Date: Tue, 26 Nov 2013 11:49:24 -0800 Subject: [PATCH 274/312] target: Remove extra percpu_ref_init lun->lun_ref is also initialized in core_tpg_post_addlun, so it doesn't need to be done in core_tpg_setup_virtual_lun0. (nab: Drop left-over percpu_ref_cancel_init in failure path) Signed-off-by: Andy Grover Signed-off-by: Nicholas Bellinger --- drivers/target/target_core_tpg.c | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c index f755712a9a0d..2a573de19a9f 100644 --- a/drivers/target/target_core_tpg.c +++ b/drivers/target/target_core_tpg.c @@ -656,16 +656,10 @@ static int core_tpg_setup_virtual_lun0(struct se_portal_group *se_tpg) spin_lock_init(&lun->lun_sep_lock); init_completion(&lun->lun_ref_comp); - ret = percpu_ref_init(&lun->lun_ref, core_tpg_lun_ref_release); + ret = core_tpg_post_addlun(se_tpg, lun, lun_access, dev); if (ret < 0) return ret; - ret = core_tpg_post_addlun(se_tpg, lun, lun_access, dev); - if (ret < 0) { - percpu_ref_cancel_init(&lun->lun_ref); - return ret; - } - return 0; } From dcd211997ddba3229e875f5990ea14f920934729 Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Tue, 17 Dec 2013 01:51:22 -0800 Subject: [PATCH 275/312] qla2xxx: Fix scsi_host leak on qlt_lport_register callback failure This patch fixes a possible scsi_host reference leak in qlt_lport_register(), when a non zero return from the passed (*callback) does not call drop the local reference via scsi_host_put() before returning. This currently does not effect existing tcm_qla2xxx code as the passed callback will never fail, but fix this up regardless for future code. Cc: Chad Dupuis Signed-off-by: Nicholas Bellinger --- drivers/scsi/qla2xxx/qla_target.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index 3bb0a1d1622a..38a1257e76e1 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -4291,6 +4291,7 @@ int qlt_lport_register(struct qla_tgt_func_tmpl *qla_tgt_ops, u64 wwpn, if (rc != 0) { ha->tgt.tgt_ops = NULL; ha->tgt.target_lport_ptr = NULL; + scsi_host_put(host); } mutex_unlock(&qla_tgt_mutex); return rc; From e2f6c88fb903e123edfd1106b0b8310d5117f774 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 19 Dec 2013 19:41:46 -0500 Subject: [PATCH 276/312] drm/radeon: fix asic gfx values for scrapper asics Fixes gfx corruption on certain TN/RL parts. bug: https://bugs.freedesktop.org/show_bug.cgi?id=60389 Signed-off-by: Alex Deucher Cc: stable@vger.kernel.org --- drivers/gpu/drm/radeon/ni.c | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c index 11aab2ab54ce..f59a9e9fccf8 100644 --- a/drivers/gpu/drm/radeon/ni.c +++ b/drivers/gpu/drm/radeon/ni.c @@ -895,6 +895,10 @@ static void cayman_gpu_init(struct radeon_device *rdev) (rdev->pdev->device == 0x999C)) { rdev->config.cayman.max_simds_per_se = 6; rdev->config.cayman.max_backends_per_se = 2; + rdev->config.cayman.max_hw_contexts = 8; + rdev->config.cayman.sx_max_export_size = 256; + rdev->config.cayman.sx_max_export_pos_size = 64; + rdev->config.cayman.sx_max_export_smx_size = 192; } else if ((rdev->pdev->device == 0x9903) || (rdev->pdev->device == 0x9904) || (rdev->pdev->device == 0x990A) || @@ -905,6 +909,10 @@ static void cayman_gpu_init(struct radeon_device *rdev) (rdev->pdev->device == 0x999D)) { rdev->config.cayman.max_simds_per_se = 4; rdev->config.cayman.max_backends_per_se = 2; + rdev->config.cayman.max_hw_contexts = 8; + rdev->config.cayman.sx_max_export_size = 256; + rdev->config.cayman.sx_max_export_pos_size = 64; + rdev->config.cayman.sx_max_export_smx_size = 192; } else if ((rdev->pdev->device == 0x9919) || (rdev->pdev->device == 0x9990) || (rdev->pdev->device == 0x9991) || @@ -915,9 +923,17 @@ static void cayman_gpu_init(struct radeon_device *rdev) (rdev->pdev->device == 0x99A0)) { rdev->config.cayman.max_simds_per_se = 3; rdev->config.cayman.max_backends_per_se = 1; + rdev->config.cayman.max_hw_contexts = 4; + rdev->config.cayman.sx_max_export_size = 128; + rdev->config.cayman.sx_max_export_pos_size = 32; + rdev->config.cayman.sx_max_export_smx_size = 96; } else { rdev->config.cayman.max_simds_per_se = 2; rdev->config.cayman.max_backends_per_se = 1; + rdev->config.cayman.max_hw_contexts = 4; + rdev->config.cayman.sx_max_export_size = 128; + rdev->config.cayman.sx_max_export_pos_size = 32; + rdev->config.cayman.sx_max_export_smx_size = 96; } rdev->config.cayman.max_texture_channel_caches = 2; rdev->config.cayman.max_gprs = 256; @@ -925,10 +941,6 @@ static void cayman_gpu_init(struct radeon_device *rdev) rdev->config.cayman.max_gs_threads = 32; rdev->config.cayman.max_stack_entries = 512; rdev->config.cayman.sx_num_of_sets = 8; - rdev->config.cayman.sx_max_export_size = 256; - rdev->config.cayman.sx_max_export_pos_size = 64; - rdev->config.cayman.sx_max_export_smx_size = 192; - rdev->config.cayman.max_hw_contexts = 8; rdev->config.cayman.sq_num_cf_insts = 2; rdev->config.cayman.sc_prim_fifo_size = 0x40; From f5a44db5d2d677dfbf12deee461f85e9ec633961 Mon Sep 17 00:00:00 2001 From: Theodore Ts'o Date: Fri, 20 Dec 2013 09:29:35 -0500 Subject: [PATCH 277/312] ext4: add explicit casts when masking cluster sizes The missing casts can cause the high 64-bits of the physical blocks to be lost. Set up new macros which allows us to make sure the right thing happen, even if at some point we end up supporting larger logical block numbers. Thanks to the Emese Revfy and the PaX security team for reporting this issue. Reported-by: PaX Team Reported-by: Emese Revfy Signed-off-by: "Theodore Ts'o" Cc: stable@vger.kernel.org --- fs/ext4/ext4.h | 10 ++++++++++ fs/ext4/extents.c | 26 ++++++++++++-------------- fs/ext4/mballoc.c | 6 +++--- 3 files changed, 25 insertions(+), 17 deletions(-) diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index e6185031c1cc..ece55565b9cd 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -268,6 +268,16 @@ struct ext4_io_submit { /* Translate # of blks to # of clusters */ #define EXT4_NUM_B2C(sbi, blks) (((blks) + (sbi)->s_cluster_ratio - 1) >> \ (sbi)->s_cluster_bits) +/* Mask out the low bits to get the starting block of the cluster */ +#define EXT4_PBLK_CMASK(s, pblk) ((pblk) & \ + ~((ext4_fsblk_t) (s)->s_cluster_ratio - 1)) +#define EXT4_LBLK_CMASK(s, lblk) ((lblk) & \ + ~((ext4_lblk_t) (s)->s_cluster_ratio - 1)) +/* Get the cluster offset */ +#define EXT4_PBLK_COFF(s, pblk) ((pblk) & \ + ((ext4_fsblk_t) (s)->s_cluster_ratio - 1)) +#define EXT4_LBLK_COFF(s, lblk) ((lblk) & \ + ((ext4_lblk_t) (s)->s_cluster_ratio - 1)) /* * Structure of a blocks group descriptor diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index 267c9fb53bf9..4410cc3d6ee2 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c @@ -1851,8 +1851,7 @@ static unsigned int ext4_ext_check_overlap(struct ext4_sb_info *sbi, depth = ext_depth(inode); if (!path[depth].p_ext) goto out; - b2 = le32_to_cpu(path[depth].p_ext->ee_block); - b2 &= ~(sbi->s_cluster_ratio - 1); + b2 = EXT4_LBLK_CMASK(sbi, le32_to_cpu(path[depth].p_ext->ee_block)); /* * get the next allocated block if the extent in the path @@ -1862,7 +1861,7 @@ static unsigned int ext4_ext_check_overlap(struct ext4_sb_info *sbi, b2 = ext4_ext_next_allocated_block(path); if (b2 == EXT_MAX_BLOCKS) goto out; - b2 &= ~(sbi->s_cluster_ratio - 1); + b2 = EXT4_LBLK_CMASK(sbi, b2); } /* check for wrap through zero on extent logical start block*/ @@ -2521,7 +2520,7 @@ static int ext4_remove_blocks(handle_t *handle, struct inode *inode, * extent, we have to mark the cluster as used (store negative * cluster number in partial_cluster). */ - unaligned = pblk & (sbi->s_cluster_ratio - 1); + unaligned = EXT4_PBLK_COFF(sbi, pblk); if (unaligned && (ee_len == num) && (*partial_cluster != -((long long)EXT4_B2C(sbi, pblk)))) *partial_cluster = EXT4_B2C(sbi, pblk); @@ -2615,7 +2614,7 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode, * accidentally freeing it later on */ pblk = ext4_ext_pblock(ex); - if (pblk & (sbi->s_cluster_ratio - 1)) + if (EXT4_PBLK_COFF(sbi, pblk)) *partial_cluster = -((long long)EXT4_B2C(sbi, pblk)); ex--; @@ -3770,7 +3769,7 @@ int ext4_find_delalloc_cluster(struct inode *inode, ext4_lblk_t lblk) { struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); ext4_lblk_t lblk_start, lblk_end; - lblk_start = lblk & (~(sbi->s_cluster_ratio - 1)); + lblk_start = EXT4_LBLK_CMASK(sbi, lblk); lblk_end = lblk_start + sbi->s_cluster_ratio - 1; return ext4_find_delalloc_range(inode, lblk_start, lblk_end); @@ -3829,9 +3828,9 @@ get_reserved_cluster_alloc(struct inode *inode, ext4_lblk_t lblk_start, trace_ext4_get_reserved_cluster_alloc(inode, lblk_start, num_blks); /* Check towards left side */ - c_offset = lblk_start & (sbi->s_cluster_ratio - 1); + c_offset = EXT4_LBLK_COFF(sbi, lblk_start); if (c_offset) { - lblk_from = lblk_start & (~(sbi->s_cluster_ratio - 1)); + lblk_from = EXT4_LBLK_CMASK(sbi, lblk_start); lblk_to = lblk_from + c_offset - 1; if (ext4_find_delalloc_range(inode, lblk_from, lblk_to)) @@ -3839,7 +3838,7 @@ get_reserved_cluster_alloc(struct inode *inode, ext4_lblk_t lblk_start, } /* Now check towards right. */ - c_offset = (lblk_start + num_blks) & (sbi->s_cluster_ratio - 1); + c_offset = EXT4_LBLK_COFF(sbi, lblk_start + num_blks); if (allocated_clusters && c_offset) { lblk_from = lblk_start + num_blks; lblk_to = lblk_from + (sbi->s_cluster_ratio - c_offset) - 1; @@ -4047,7 +4046,7 @@ static int get_implied_cluster_alloc(struct super_block *sb, struct ext4_ext_path *path) { struct ext4_sb_info *sbi = EXT4_SB(sb); - ext4_lblk_t c_offset = map->m_lblk & (sbi->s_cluster_ratio-1); + ext4_lblk_t c_offset = EXT4_LBLK_COFF(sbi, map->m_lblk); ext4_lblk_t ex_cluster_start, ex_cluster_end; ext4_lblk_t rr_cluster_start; ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block); @@ -4065,8 +4064,7 @@ static int get_implied_cluster_alloc(struct super_block *sb, (rr_cluster_start == ex_cluster_start)) { if (rr_cluster_start == ex_cluster_end) ee_start += ee_len - 1; - map->m_pblk = (ee_start & ~(sbi->s_cluster_ratio - 1)) + - c_offset; + map->m_pblk = EXT4_PBLK_CMASK(sbi, ee_start) + c_offset; map->m_len = min(map->m_len, (unsigned) sbi->s_cluster_ratio - c_offset); /* @@ -4220,7 +4218,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode, */ map->m_flags &= ~EXT4_MAP_FROM_CLUSTER; newex.ee_block = cpu_to_le32(map->m_lblk); - cluster_offset = map->m_lblk & (sbi->s_cluster_ratio-1); + cluster_offset = EXT4_LBLK_CMASK(sbi, map->m_lblk); /* * If we are doing bigalloc, check to see if the extent returned @@ -4288,7 +4286,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode, * needed so that future calls to get_implied_cluster_alloc() * work correctly. */ - offset = map->m_lblk & (sbi->s_cluster_ratio - 1); + offset = EXT4_LBLK_COFF(sbi, map->m_lblk); ar.len = EXT4_NUM_B2C(sbi, offset+allocated); ar.goal -= offset; ar.logical -= offset; diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index 04766d9a29cd..04a5c7504be9 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c @@ -4126,7 +4126,7 @@ ext4_mb_initialize_context(struct ext4_allocation_context *ac, ext4_get_group_no_and_offset(sb, goal, &group, &block); /* set up allocation goals */ - ac->ac_b_ex.fe_logical = ar->logical & ~(sbi->s_cluster_ratio - 1); + ac->ac_b_ex.fe_logical = EXT4_LBLK_CMASK(sbi, ar->logical); ac->ac_status = AC_STATUS_CONTINUE; ac->ac_sb = sb; ac->ac_inode = ar->inode; @@ -4668,7 +4668,7 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode, * blocks at the beginning or the end unless we are explicitly * requested to avoid doing so. */ - overflow = block & (sbi->s_cluster_ratio - 1); + overflow = EXT4_PBLK_COFF(sbi, block); if (overflow) { if (flags & EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER) { overflow = sbi->s_cluster_ratio - overflow; @@ -4682,7 +4682,7 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode, count += overflow; } } - overflow = count & (sbi->s_cluster_ratio - 1); + overflow = EXT4_LBLK_COFF(sbi, count); if (overflow) { if (flags & EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER) { if (count > overflow) From a96e4e2ffe439e45732d820fac6fee486b6412bf Mon Sep 17 00:00:00 2001 From: Roland Dreier Date: Thu, 19 Dec 2013 08:37:03 -0800 Subject: [PATCH 278/312] IB/uverbs: New macro to set pointers to NULL if length is 0 in INIT_UDATA() Trying to have a ternary operator to choose between NULL (or 0) and the real pointer value in invocations leads to an impossible choice between a sparse error about a literal 0 used as a NULL pointer, and a gcc warning about "pointer/integer type mismatch in conditional expression." Rather than clutter the source with more casts, move the ternary operator into a new INIT_UDATA_BUF_OR_NULL() macro, which makes it easier to use and simplifies its callers. Reported-by: Yann Droneaud Signed-off-by: Roland Dreier --- drivers/infiniband/core/uverbs.h | 8 ++++++++ drivers/infiniband/core/uverbs_main.c | 17 +++++++---------- 2 files changed, 15 insertions(+), 10 deletions(-) diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h index 9879568aed8c..a283274a5a09 100644 --- a/drivers/infiniband/core/uverbs.h +++ b/drivers/infiniband/core/uverbs.h @@ -55,6 +55,14 @@ (udata)->outlen = (olen); \ } while (0) +#define INIT_UDATA_BUF_OR_NULL(udata, ibuf, obuf, ilen, olen) \ + do { \ + (udata)->inbuf = (ilen) ? (const void __user *) (ibuf) : NULL; \ + (udata)->outbuf = (olen) ? (void __user *) (obuf) : NULL; \ + (udata)->inlen = (ilen); \ + (udata)->outlen = (olen); \ + } while (0) + /* * Our lifetime rules for these structs are the following: * diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c index 34386943ebcf..699463bbfd2d 100644 --- a/drivers/infiniband/core/uverbs_main.c +++ b/drivers/infiniband/core/uverbs_main.c @@ -676,17 +676,14 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf, return -EINVAL; } - INIT_UDATA(&ucore, - (hdr.in_words) ? buf : 0, - (unsigned long)ex_hdr.response, - hdr.in_words * 8, - hdr.out_words * 8); + INIT_UDATA_BUF_OR_NULL(&ucore, buf, (unsigned long) ex_hdr.response, + hdr.in_words * 8, hdr.out_words * 8); - INIT_UDATA(&uhw, - (ex_hdr.provider_in_words) ? buf + ucore.inlen : 0, - (ex_hdr.provider_out_words) ? (unsigned long)ex_hdr.response + ucore.outlen : 0, - ex_hdr.provider_in_words * 8, - ex_hdr.provider_out_words * 8); + INIT_UDATA_BUF_OR_NULL(&uhw, + buf + ucore.inlen, + (unsigned long) ex_hdr.response + ucore.outlen, + ex_hdr.provider_in_words * 8, + ex_hdr.provider_out_words * 8); err = uverbs_ex_cmd_table[command](file, &ucore, From 7efb1b19b3414d7dec792f39e1c1a7db57a23961 Mon Sep 17 00:00:00 2001 From: Yann Droneaud Date: Wed, 11 Dec 2013 23:01:47 +0100 Subject: [PATCH 279/312] IB/uverbs: Check reserved field in extended command header As noted by Daniel Vetter in its article "Botching up ioctls"[1] "Check *all* unused fields and flags and all the padding for whether it's 0, and reject the ioctl if that's not the case. Otherwise your nice plan for future extensions is going right down the gutters since someone *will* submit an ioctl struct with random stack garbage in the yet unused parts. Which then bakes in the ABI that those fields can never be used for anything else but garbage." It's important to ensure that reserved fields are set to known value, so that it will be possible to use them latter to extend the ABI. The same reasonning apply to comp_mask field present in newer uverbs command: per commit 22878dbc9173 ("IB/core: Better checking of userspace values for receive flow steering"), unsupported values in comp_mask are rejected. [1] http://blog.ffwll.ch/2013/11/botching-up-ioctls.html Link: http://marc.info/?i=cover.1386798254.git.ydroneaud@opteya.com> Signed-off-by: Yann Droneaud Signed-off-by: Roland Dreier --- drivers/infiniband/core/uverbs_main.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c index 699463bbfd2d..ac2305d8d7c4 100644 --- a/drivers/infiniband/core/uverbs_main.c +++ b/drivers/infiniband/core/uverbs_main.c @@ -668,6 +668,9 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf, if ((hdr.in_words + ex_hdr.provider_in_words) * 8 != count) return -EINVAL; + if (ex_hdr.cmd_hdr_reserved) + return -EINVAL; + if (ex_hdr.response) { if (!hdr.out_words && !ex_hdr.provider_out_words) return -EINVAL; From 2782c2d302557403e314a43856f681a5385e62c6 Mon Sep 17 00:00:00 2001 From: Yann Droneaud Date: Wed, 11 Dec 2013 23:01:48 +0100 Subject: [PATCH 280/312] IB/uverbs: Check comp_mask in destroy_flow Just like the check added to create_flow in 22878dbc9173 ("IB/core: Better checking of userspace values for receive flow steering"), comp_mask must be checked in destroy_flow too. Since only empty comp_mask is currently supported, any other value must be rejected. This check was silently added in a previous patch[1] to move comp_mask in extended command header, part of previous patchset[2] against create/destroy_flow uverbs. The idea of moving comp_mask to the header was discarded for the final patchset[3]. Unfortunately the check added in destroy_flow uverb was not integrated in the final patchset. [1] http://marc.info/?i=40175eda10d670d098204da6aa4c327a0171ae5f.1381510045.git.ydroneaud@opteya.com [2] http://marc.info/?i=cover.1381510045.git.ydroneaud@opteya.com [3] http://marc.info/?i=cover.1383773832.git.ydroneaud@opteya.com Cc: Matan Barak Link: http://marc.info/?i=cover.1386798254.git.ydroneaud@opteya.com> Signed-off-by: Yann Droneaud Signed-off-by: Roland Dreier --- drivers/infiniband/core/uverbs_cmd.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index 65f6e7dc380c..f0ee6b92132a 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c @@ -2795,6 +2795,9 @@ int ib_uverbs_ex_destroy_flow(struct ib_uverbs_file *file, if (ret) return ret; + if (cmd.comp_mask) + return -EINVAL; + uobj = idr_write_uobj(&ib_uverbs_rule_idr, cmd.flow_handle, file->ucontext); if (!uobj) From c780d82a74cdf247a81f877ecae569b3a248f89b Mon Sep 17 00:00:00 2001 From: Yann Droneaud Date: Wed, 11 Dec 2013 23:01:49 +0100 Subject: [PATCH 281/312] IB/uverbs: Check reserved fields in create_flow As noted by Daniel Vetter in its article "Botching up ioctls"[1] "Check *all* unused fields and flags and all the padding for whether it's 0, and reject the ioctl if that's not the case. Otherwise your nice plan for future extensions is going right down the gutters since someone *will* submit an ioctl struct with random stack garbage in the yet unused parts. Which then bakes in the ABI that those fields can never be used for anything else but garbage." It's important to ensure that reserved fields are set to known value, so that it will be possible to use them latter to extend the ABI. The same reasonning apply to comp_mask field present in newer uverbs command: per commit 22878dbc9173 ("IB/core: Better checking of userspace values for receive flow steering"), unsupported values in comp_mask are rejected. [1] http://blog.ffwll.ch/2013/11/botching-up-ioctls.html Link: http://marc.info/?i=cover.1386798254.git.ydroneaud@opteya.com> Signed-off-by: Yann Droneaud Signed-off-by: Roland Dreier --- drivers/infiniband/core/uverbs_cmd.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index f0ee6b92132a..4e8b15c50481 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c @@ -2593,6 +2593,9 @@ ssize_t ib_uverbs_detach_mcast(struct ib_uverbs_file *file, static int kern_spec_to_ib_spec(struct ib_uverbs_flow_spec *kern_spec, union ib_flow_spec *ib_spec) { + if (kern_spec->reserved) + return -EINVAL; + ib_spec->type = kern_spec->type; switch (ib_spec->type) { @@ -2671,6 +2674,10 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file, (cmd.flow_attr.num_of_specs * sizeof(struct ib_uverbs_flow_spec))) return -EINVAL; + if (cmd.flow_attr.reserved[0] || + cmd.flow_attr.reserved[1]) + return -EINVAL; + if (cmd.flow_attr.num_of_specs) { kern_flow_attr = kmalloc(sizeof(*kern_flow_attr) + cmd.flow_attr.size, GFP_KERNEL); From 98a37510ec1452817600d8ea47cff1d9f8d9bec8 Mon Sep 17 00:00:00 2001 From: Yann Droneaud Date: Wed, 11 Dec 2013 23:01:50 +0100 Subject: [PATCH 282/312] IB/uverbs: Set error code when fail to consume all flow_spec items If the flow_spec items parsed count does not match the number of items declared in the flow_attr command, or if not all bytes are used for flow_spec items (eg. trailing garbage), a log message is reported and the function leave through the error path. Unfortunately the error code is currently not set. This patch set error code to -EINVAL in such cases, so that the error is reported to userspace instead of silently fail. Link: http://marc.info/?i=cover.1386798254.git.ydroneaud@opteya.com> Signed-off-by: Yann Droneaud Signed-off-by: Roland Dreier --- drivers/infiniband/core/uverbs_cmd.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index 4e8b15c50481..45fb80b876b0 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c @@ -2738,6 +2738,7 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file, if (cmd.flow_attr.size || (i != flow_attr->num_of_specs)) { pr_warn("create flow failed, flow %d: %d bytes left from uverb cmd\n", i, cmd.flow_attr.size); + err = -EINVAL; goto err_free; } flow_id = ib_create_flow(qp, flow_attr, IB_FLOW_DOMAIN_USER); From 6bcca3d4a3bcc9859cf001a0a21c8796edae2dc0 Mon Sep 17 00:00:00 2001 From: Yann Droneaud Date: Wed, 11 Dec 2013 23:01:52 +0100 Subject: [PATCH 283/312] IB/uverbs: Check input length in flow steering uverbs Since ib_copy_from_udata() doesn't check yet the available input data length before accessing userspace memory, an explicit check of this length is required to prevent: - reading past the user provided buffer, - underflow when subtracting the expected command size from the input length. This will ensure the newly added flow steering uverbs don't try to process truncated commands. Link: http://marc.info/?i=cover.1386798254.git.ydroneaud@opteya.com> Signed-off-by: Yann Droneaud Signed-off-by: Roland Dreier --- drivers/infiniband/core/uverbs_cmd.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index 45fb80b876b0..f1cc83855af6 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c @@ -2649,6 +2649,9 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file, void *ib_spec; int i; + if (ucore->inlen < sizeof(cmd)) + return -EINVAL; + if (ucore->outlen < sizeof(resp)) return -ENOSPC; @@ -2799,6 +2802,9 @@ int ib_uverbs_ex_destroy_flow(struct ib_uverbs_file *file, struct ib_uobject *uobj; int ret; + if (ucore->inlen < sizeof(cmd)) + return -EINVAL; + ret = ib_copy_from_udata(&cmd, ucore, sizeof(cmd)); if (ret) return ret; From 6cc3df840a84dc4e8a874e74cd62a138074922ba Mon Sep 17 00:00:00 2001 From: Yann Droneaud Date: Wed, 11 Dec 2013 23:01:51 +0100 Subject: [PATCH 284/312] IB/uverbs: Check access to userspace response buffer in extended command This patch adds a check on the output buffer with access_ok(VERIFY_WRITE, ...) to ensure the whole buffer is in userspace memory before using the pointer in uverbs functions. If the buffer or a subset of it is not valid, returns -EFAULT to the caller. This will also catch invalid buffer before the final call to copy_to_user() which happen late in most uverb functions. Just like the check in read(2) syscall, it's a sanity check to detect invalid parameters provided by userspace. This particular check was added in vfs_read() by Linus Torvalds for v2.6.12 with following commit message: https://git.kernel.org/cgit/linux/kernel/git/tglx/history.git/commit/?id=fd770e66c9a65b14ce114e171266cf6f393df502 Make read/write always do the full "access_ok()" tests. The actual user copy will do them too, but only for the range that ends up being actually copied. That hides bugs when the range has been clamped by file size or other issues. Note: there's no need to check input buffer since vfs_write() already does access_ok(VERIFY_READ, ...) as part of write() syscall. Link: http://marc.info/?i=cover.1387273677.git.ydroneaud@opteya.com Signed-off-by: Yann Droneaud Signed-off-by: Roland Dreier --- drivers/infiniband/core/uverbs_main.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c index ac2305d8d7c4..08219fb3338b 100644 --- a/drivers/infiniband/core/uverbs_main.c +++ b/drivers/infiniband/core/uverbs_main.c @@ -674,6 +674,11 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf, if (ex_hdr.response) { if (!hdr.out_words && !ex_hdr.provider_out_words) return -EINVAL; + + if (!access_ok(VERIFY_WRITE, + (void __user *) (unsigned long) ex_hdr.response, + (hdr.out_words + ex_hdr.provider_out_words) * 8)) + return -EFAULT; } else { if (hdr.out_words || ex_hdr.provider_out_words) return -EINVAL; From ee53664bda169f519ce3c6a22d378f0b946c8178 Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Fri, 20 Dec 2013 15:10:03 +0200 Subject: [PATCH 285/312] mm: Fix NULL pointer dereference in madvise(MADV_WILLNEED) support Sasha Levin found a NULL pointer dereference that is due to a missing page table lock, which in turn is due to the pmd entry in question being a transparent huge-table entry. The code - introduced in commit 1998cc048901 ("mm: make madvise(MADV_WILLNEED) support swap file prefetch") - correctly checks for this situation using pmd_none_or_trans_huge_or_clear_bad(), but it turns out that that function doesn't work correctly. pmd_none_or_trans_huge_or_clear_bad() expected that pmd_bad() would trigger if the transparent hugepage bit was set, but it doesn't do that if pmd_numa() is also set. Note that the NUMA bit only gets set on real NUMA machines, so people trying to reproduce this on most normal development systems would never actually trigger this. Fix it by removing the very subtle (and subtly incorrect) expectation, and instead just checking pmd_trans_huge() explicitly. Reported-by: Sasha Levin Acked-by: Andrea Arcangeli [ Additionally remove the now stale test for pmd_trans_huge() inside the pmd_bad() case - Linus ] Signed-off-by: Linus Torvalds --- include/asm-generic/pgtable.h | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h index b12079afbd5f..db0923458940 100644 --- a/include/asm-generic/pgtable.h +++ b/include/asm-generic/pgtable.h @@ -599,11 +599,10 @@ static inline int pmd_none_or_trans_huge_or_clear_bad(pmd_t *pmd) #ifdef CONFIG_TRANSPARENT_HUGEPAGE barrier(); #endif - if (pmd_none(pmdval)) + if (pmd_none(pmdval) || pmd_trans_huge(pmdval)) return 1; if (unlikely(pmd_bad(pmdval))) { - if (!pmd_trans_huge(pmdval)) - pmd_clear_bad(pmd); + pmd_clear_bad(pmd); return 1; } return 0; From 8798cee2f90292ca4ca84baccc757e4d06f8797f Mon Sep 17 00:00:00 2001 From: Mel Gorman Date: Fri, 20 Dec 2013 14:54:11 +0000 Subject: [PATCH 286/312] Revert "mm: page_alloc: exclude unreclaimable allocations from zone fairness policy" This reverts commit 73f038b863df. The NUMA behaviour of this patch is less than ideal. An alternative approch is to interleave allocations only within local zones which is implemented in the next patch. Cc: stable@vger.kernel.org Signed-off-by: Mel Gorman Signed-off-by: Linus Torvalds --- mm/page_alloc.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index f861d0257e90..580a5f075ed0 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1920,8 +1920,7 @@ get_page_from_freelist(gfp_t gfp_mask, nodemask_t *nodemask, unsigned int order, * back to remote zones that do not partake in the * fairness round-robin cycle of this zonelist. */ - if ((alloc_flags & ALLOC_WMARK_LOW) && - (gfp_mask & GFP_MOVABLE_MASK)) { + if (alloc_flags & ALLOC_WMARK_LOW) { if (zone_page_state(zone, NR_ALLOC_BATCH) <= 0) continue; if (zone_reclaim_mode && From fff4068cba484e6b0abe334ed6b15d5a215a3b25 Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Fri, 20 Dec 2013 14:54:12 +0000 Subject: [PATCH 287/312] mm: page_alloc: revert NUMA aspect of fair allocation policy Commit 81c0a2bb515f ("mm: page_alloc: fair zone allocator policy") meant to bring aging fairness among zones in system, but it was overzealous and badly regressed basic workloads on NUMA systems. Due to the way kswapd and page allocator interacts, we still want to make sure that all zones in any given node are used equally for all allocations to maximize memory utilization and prevent thrashing on the highest zone in the node. While the same principle applies to NUMA nodes - memory utilization is obviously improved by spreading allocations throughout all nodes - remote references can be costly and so many workloads prefer locality over memory utilization. The original change assumed that zone_reclaim_mode would be a good enough predictor for that, but it turned out to be as indicative as a coin flip. Revert the NUMA aspect of the fairness until we can find a proper way to make it configurable and agree on a sane default. Signed-off-by: Johannes Weiner Reviewed-by: Michal Hocko Signed-off-by: Mel Gorman Cc: # 3.12 Signed-off-by: Linus Torvalds --- mm/page_alloc.c | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 580a5f075ed0..5248fe070aa4 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1816,7 +1816,7 @@ static void zlc_clear_zones_full(struct zonelist *zonelist) static bool zone_local(struct zone *local_zone, struct zone *zone) { - return node_distance(local_zone->node, zone->node) == LOCAL_DISTANCE; + return local_zone->node == zone->node; } static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone) @@ -1913,18 +1913,17 @@ get_page_from_freelist(gfp_t gfp_mask, nodemask_t *nodemask, unsigned int order, * page was allocated in should have no effect on the * time the page has in memory before being reclaimed. * - * When zone_reclaim_mode is enabled, try to stay in - * local zones in the fastpath. If that fails, the - * slowpath is entered, which will do another pass - * starting with the local zones, but ultimately fall - * back to remote zones that do not partake in the - * fairness round-robin cycle of this zonelist. + * Try to stay in local zones in the fastpath. If + * that fails, the slowpath is entered, which will do + * another pass starting with the local zones, but + * ultimately fall back to remote zones that do not + * partake in the fairness round-robin cycle of this + * zonelist. */ if (alloc_flags & ALLOC_WMARK_LOW) { if (zone_page_state(zone, NR_ALLOC_BATCH) <= 0) continue; - if (zone_reclaim_mode && - !zone_local(preferred_zone, zone)) + if (!zone_local(preferred_zone, zone)) continue; } /* @@ -2390,7 +2389,7 @@ static void prepare_slowpath(gfp_t gfp_mask, unsigned int order, * thrash fairness information for zones that are not * actually part of this zonelist's round-robin cycle. */ - if (zone_reclaim_mode && !zone_local(preferred_zone, zone)) + if (!zone_local(preferred_zone, zone)) continue; mod_zone_page_state(zone, NR_ALLOC_BATCH, high_wmark_pages(zone) - From 597d795a2a786d22dd872332428e2b9439ede639 Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Fri, 20 Dec 2013 13:35:58 +0200 Subject: [PATCH 288/312] mm: do not allocate page->ptl dynamically, if spinlock_t fits to long In struct page we have enough space to fit long-size page->ptl there, but we use dynamically-allocated page->ptl if size(spinlock_t) is larger than sizeof(int). It hurts 64-bit architectures with CONFIG_GENERIC_LOCKBREAK, where sizeof(spinlock_t) == 8, but it easily fits into struct page. Signed-off-by: Kirill A. Shutemov Acked-by: Hugh Dickins Signed-off-by: Linus Torvalds --- include/linux/lockref.h | 2 +- include/linux/mm.h | 6 +++--- include/linux/mm_types.h | 3 ++- kernel/bounds.c | 2 +- mm/memory.c | 2 +- 5 files changed, 8 insertions(+), 7 deletions(-) diff --git a/include/linux/lockref.h b/include/linux/lockref.h index c8929c3832db..4bfde0e99ed5 100644 --- a/include/linux/lockref.h +++ b/include/linux/lockref.h @@ -19,7 +19,7 @@ #define USE_CMPXCHG_LOCKREF \ (IS_ENABLED(CONFIG_ARCH_USE_CMPXCHG_LOCKREF) && \ - IS_ENABLED(CONFIG_SMP) && !BLOATED_SPINLOCKS) + IS_ENABLED(CONFIG_SMP) && SPINLOCK_SIZE <= 4) struct lockref { union { diff --git a/include/linux/mm.h b/include/linux/mm.h index 1cedd000cf29..35527173cf50 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1317,7 +1317,7 @@ static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long a #endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */ #if USE_SPLIT_PTE_PTLOCKS -#if BLOATED_SPINLOCKS +#if ALLOC_SPLIT_PTLOCKS extern bool ptlock_alloc(struct page *page); extern void ptlock_free(struct page *page); @@ -1325,7 +1325,7 @@ static inline spinlock_t *ptlock_ptr(struct page *page) { return page->ptl; } -#else /* BLOATED_SPINLOCKS */ +#else /* ALLOC_SPLIT_PTLOCKS */ static inline bool ptlock_alloc(struct page *page) { return true; @@ -1339,7 +1339,7 @@ static inline spinlock_t *ptlock_ptr(struct page *page) { return &page->ptl; } -#endif /* BLOATED_SPINLOCKS */ +#endif /* ALLOC_SPLIT_PTLOCKS */ static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd) { diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index ad0616f2fe2c..290901a8c1de 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -26,6 +26,7 @@ struct address_space; #define USE_SPLIT_PTE_PTLOCKS (NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS) #define USE_SPLIT_PMD_PTLOCKS (USE_SPLIT_PTE_PTLOCKS && \ IS_ENABLED(CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK)) +#define ALLOC_SPLIT_PTLOCKS (SPINLOCK_SIZE > BITS_PER_LONG/8) /* * Each physical page in the system has a struct page associated with @@ -155,7 +156,7 @@ struct page { * system if PG_buddy is set. */ #if USE_SPLIT_PTE_PTLOCKS -#if BLOATED_SPINLOCKS +#if ALLOC_SPLIT_PTLOCKS spinlock_t *ptl; #else spinlock_t ptl; diff --git a/kernel/bounds.c b/kernel/bounds.c index 5253204afdca..9fd4246b04b8 100644 --- a/kernel/bounds.c +++ b/kernel/bounds.c @@ -22,6 +22,6 @@ void foo(void) #ifdef CONFIG_SMP DEFINE(NR_CPUS_BITS, ilog2(CONFIG_NR_CPUS)); #endif - DEFINE(BLOATED_SPINLOCKS, sizeof(spinlock_t) > sizeof(int)); + DEFINE(SPINLOCK_SIZE, sizeof(spinlock_t)); /* End of constants */ } diff --git a/mm/memory.c b/mm/memory.c index 5d9025f3b3e1..b6e211b779d0 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -4271,7 +4271,7 @@ void copy_user_huge_page(struct page *dst, struct page *src, } #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */ -#if USE_SPLIT_PTE_PTLOCKS && BLOATED_SPINLOCKS +#if ALLOC_SPLIT_PTLOCKS bool ptlock_alloc(struct page *page) { spinlock_t *ptl; From df36ac1bc2a166eef90785d584e4cfed6f52bd32 Mon Sep 17 00:00:00 2001 From: "Luck, Tony" Date: Wed, 18 Dec 2013 15:17:10 -0800 Subject: [PATCH 289/312] pstore: Don't allow high traffic options on fragile devices Some pstore backing devices use on board flash as persistent storage. These have limited numbers of write cycles so it is a poor idea to use them from high frequency operations. Signed-off-by: Tony Luck Signed-off-by: Linus Torvalds --- drivers/acpi/apei/erst.c | 1 + drivers/firmware/efi/efi-pstore.c | 1 + fs/pstore/platform.c | 7 +++++-- include/linux/pstore.h | 3 +++ 4 files changed, 10 insertions(+), 2 deletions(-) diff --git a/drivers/acpi/apei/erst.c b/drivers/acpi/apei/erst.c index 26311f23c824..cb1d557fc22c 100644 --- a/drivers/acpi/apei/erst.c +++ b/drivers/acpi/apei/erst.c @@ -942,6 +942,7 @@ static int erst_clearer(enum pstore_type_id type, u64 id, int count, static struct pstore_info erst_info = { .owner = THIS_MODULE, .name = "erst", + .flags = PSTORE_FLAGS_FRAGILE, .open = erst_open_pstore, .close = erst_close_pstore, .read = erst_reader, diff --git a/drivers/firmware/efi/efi-pstore.c b/drivers/firmware/efi/efi-pstore.c index 743fd426f21b..4b9dc836dcf9 100644 --- a/drivers/firmware/efi/efi-pstore.c +++ b/drivers/firmware/efi/efi-pstore.c @@ -356,6 +356,7 @@ static int efi_pstore_erase(enum pstore_type_id type, u64 id, int count, static struct pstore_info efi_pstore_info = { .owner = THIS_MODULE, .name = "efi", + .flags = PSTORE_FLAGS_FRAGILE, .open = efi_pstore_open, .close = efi_pstore_close, .read = efi_pstore_read, diff --git a/fs/pstore/platform.c b/fs/pstore/platform.c index b8e93a40a5d3..78c3c2097787 100644 --- a/fs/pstore/platform.c +++ b/fs/pstore/platform.c @@ -443,8 +443,11 @@ int pstore_register(struct pstore_info *psi) pstore_get_records(0); kmsg_dump_register(&pstore_dumper); - pstore_register_console(); - pstore_register_ftrace(); + + if ((psi->flags & PSTORE_FLAGS_FRAGILE) == 0) { + pstore_register_console(); + pstore_register_ftrace(); + } if (pstore_update_ms >= 0) { pstore_timer.expires = jiffies + diff --git a/include/linux/pstore.h b/include/linux/pstore.h index abd437d0a8a7..ece0c6bbfcc5 100644 --- a/include/linux/pstore.h +++ b/include/linux/pstore.h @@ -51,6 +51,7 @@ struct pstore_info { char *buf; size_t bufsize; struct mutex read_mutex; /* serialize open/read/close */ + int flags; int (*open)(struct pstore_info *psi); int (*close)(struct pstore_info *psi); ssize_t (*read)(u64 *id, enum pstore_type_id *type, @@ -70,6 +71,8 @@ struct pstore_info { void *data; }; +#define PSTORE_FLAGS_FRAGILE 1 + #ifdef CONFIG_PSTORE extern int pstore_register(struct pstore_info *); extern bool pstore_cannot_block_path(enum kmsg_dump_reason reason); From 40b64acd17a2200579db265048b4a51998a84729 Mon Sep 17 00:00:00 2001 From: Olof Johansson Date: Fri, 20 Dec 2013 14:28:05 -0800 Subject: [PATCH 290/312] mm: fix build of split ptlock code Commit 597d795a2a78 ('mm: do not allocate page->ptl dynamically, if spinlock_t fits to long') restructures some allocators that are compiled even if USE_SPLIT_PTLOCKS arn't used. It results in compilation failure: mm/memory.c:4282:6: error: 'struct page' has no member named 'ptl' mm/memory.c:4288:12: error: 'struct page' has no member named 'ptl' Add in the missing ifdef. Fixes: 597d795a2a78 ('mm: do not allocate page->ptl dynamically, if spinlock_t fits to long') Signed-off-by: Olof Johansson Cc: Kirill A. Shutemov Cc: Hugh Dickins Signed-off-by: Linus Torvalds --- mm/memory.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/memory.c b/mm/memory.c index b6e211b779d0..6768ce9e57d2 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -4271,7 +4271,7 @@ void copy_user_huge_page(struct page *dst, struct page *src, } #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */ -#if ALLOC_SPLIT_PTLOCKS +#if USE_SPLIT_PTE_PTLOCKS && ALLOC_SPLIT_PTLOCKS bool ptlock_alloc(struct page *page) { spinlock_t *ptl; From b7000adef17a5cce85636e40fa2c2d9851a89e28 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Fri, 20 Dec 2013 16:52:45 -0800 Subject: [PATCH 291/312] Don't set the INITRD_COMPRESS environment variable automatically Commit 1bf49dd4be0b ("./Makefile: export initial ramdisk compression config option") started setting the INITRD_COMPRESS environment variable depending on which decompression models the kernel had available. That is completely broken. For example, we by default have CONFIG_RD_LZ4 enabled, and are able to decompress such an initrd, but the user tools to *create* such an initrd may not be availble. So trying to tell dracut to generate an lz4-compressed image just because we can decode such an image is completely inappropriate. Cc: J P Cc: Andrew Morton Cc: Jan Beulich Signed-off-by: Linus Torvalds --- Makefile | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 89cee8625651..82ea7dd9fc98 100644 --- a/Makefile +++ b/Makefile @@ -738,7 +738,9 @@ INITRD_COMPRESS-$(CONFIG_RD_LZMA) := lzma INITRD_COMPRESS-$(CONFIG_RD_XZ) := xz INITRD_COMPRESS-$(CONFIG_RD_LZO) := lzo INITRD_COMPRESS-$(CONFIG_RD_LZ4) := lz4 -export INITRD_COMPRESS := $(INITRD_COMPRESS-y) +# do not export INITRD_COMPRESS, since we didn't actually +# choose a sane default compression above. +# export INITRD_COMPRESS := $(INITRD_COMPRESS-y) ifdef CONFIG_MODULE_SIG_ALL MODSECKEY = ./signing_key.priv From 89ed05eea093d4c18df5d504d104f29b874fea29 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Matias=20Bj=C3=B8rling?= Date: Sat, 21 Dec 2013 00:10:59 +0100 Subject: [PATCH 292/312] null_blk: corrections to documentation Randy Dunlap reported a couple of grammar errors and unfortunate usages of socket/node/core. Signed-off-by: Matias Bjorling Acked-by: Randy Dunlap Signed-off-by: Jens Axboe --- Documentation/block/null_blk.txt | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/Documentation/block/null_blk.txt b/Documentation/block/null_blk.txt index 9e1b047fd13d..5603dad5534b 100644 --- a/Documentation/block/null_blk.txt +++ b/Documentation/block/null_blk.txt @@ -18,7 +18,7 @@ The following instances are possible: - Bio-based. IO requests are submitted directly to the device driver. - Directly accepts bio data structure and returns them. -All of them has a completion queue for each core in the system. +All of them have a completion queue for each core in the system. II. Module parameters applicable for all instances: @@ -30,7 +30,7 @@ queue_mode=[0-2]: Default: 2-Multi-queue 2: Multi-queue. home_node=[0--nr_nodes]: Default: NUMA_NO_NODE - Selects what socket the data structures is allocated from. + Selects what CPU node the data structures are allocated from. gb=[Size in GB]: Default: 250GB The size of the device reported to the system. @@ -38,34 +38,34 @@ gb=[Size in GB]: Default: 250GB bs=[Block size (in bytes)]: Default: 512 bytes The block size reported to the system. -nr_devices=[Num. devices]: Default: 2 +nr_devices=[Number of devices]: Default: 2 Number of block devices instantiated. They are instantiated as /dev/nullb0, etc. -irq_mode=[0-2]: Default: Soft-irq +irq_mode=[0-2]: Default: 1-Soft-irq The completion mode used for completing IOs to the block-layer. 0: None. - 1: Soft-irq. Uses ipi to complete IOs across sockets. Simulates the overhead - when IOs are issued from another socket than the home the device is + 1: Soft-irq. Uses IPI to complete IOs across CPU nodes. Simulates the overhead + when IOs are issued from another CPU node than the home the device is connected to. 2: Timer: Waits a specific period (completion_nsec) for each IO before completion. -completion_nsec=[Num. ns]: Default: 10.000ns +completion_nsec=[ns]: Default: 10.000ns Combined with irq_mode=2 (timer). The time each completion event must wait. submit_queues=[0..nr_cpus]: The number of submission queues attached to the device driver. If unset, it defaults to 1 on single-queue and bio-based instances. For multi-queue, - its ignored when use_per_node_hctx module parameter is 1. + it is ignored when use_per_node_hctx module parameter is 1. -hw_queue_depth=[0..qdepth]: Defaults: 64 +hw_queue_depth=[0..qdepth]: Default: 64 The hardware queue depth of the device. III: Multi-queue specific parameters -use_per_node_hctx=[0/1]: Defaults: 1 +use_per_node_hctx=[0/1]: Default: 1 If 1, the multi-queue block layer is instantiated with a hardware dispatch queue for each CPU node in the system. If 0, it is instantiated with the number of queues defined in the submit_queues parameter. From 200052440d3b56f593038a35b7c14bdc780184a9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Matias=20Bj=C3=B8rling?= Date: Sat, 21 Dec 2013 00:11:00 +0100 Subject: [PATCH 293/312] null_blk: set use_per_node_hctx param to false The defaults for the module is to instantiate itself with blk-mq and a submit queue for each CPU node in the system. To save resources, initialize instead with a single submit queue. Signed-off-by: Matias Bjorling Signed-off-by: Jens Axboe --- Documentation/block/null_blk.txt | 9 +++++---- drivers/block/null_blk.c | 4 ++-- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/Documentation/block/null_blk.txt b/Documentation/block/null_blk.txt index 5603dad5534b..b2830b435895 100644 --- a/Documentation/block/null_blk.txt +++ b/Documentation/block/null_blk.txt @@ -65,7 +65,8 @@ hw_queue_depth=[0..qdepth]: Default: 64 III: Multi-queue specific parameters -use_per_node_hctx=[0/1]: Default: 1 - If 1, the multi-queue block layer is instantiated with a hardware dispatch - queue for each CPU node in the system. If 0, it is instantiated with the - number of queues defined in the submit_queues parameter. +use_per_node_hctx=[0/1]: Default: 0 + 0: The number of submit queues are set to the value of the submit_queues + parameter. + 1: The multi-queue block layer is instantiated with a hardware dispatch + queue for each CPU node in the system. diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c index 8f2e7c330d6d..9b0311b61fe1 100644 --- a/drivers/block/null_blk.c +++ b/drivers/block/null_blk.c @@ -101,9 +101,9 @@ static int hw_queue_depth = 64; module_param(hw_queue_depth, int, S_IRUGO); MODULE_PARM_DESC(hw_queue_depth, "Queue depth for each hardware queue. Default: 64"); -static bool use_per_node_hctx = true; +static bool use_per_node_hctx = false; module_param(use_per_node_hctx, bool, S_IRUGO); -MODULE_PARM_DESC(use_per_node_hctx, "Use per-node allocation for hardware context queues. Default: true"); +MODULE_PARM_DESC(use_per_node_hctx, "Use per-node allocation for hardware context queues. Default: false"); static void put_tag(struct nullb_queue *nq, unsigned int tag) { From fc1bc35443741e132dd0118e8dbac53f69a6f76e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Matias=20Bj=C3=B8rling?= Date: Sat, 21 Dec 2013 00:11:01 +0100 Subject: [PATCH 294/312] null_blk: support submit_queues on use_per_node_hctx In the case of both the submit_queues param and use_per_node_hctx param are used. We limit the number af submit_queues to the number of online nodes. If the submit_queues is a multiple of nr_online_nodes, its trivial. Simply map them to the nodes. For example: 8 submit queues are mapped as node0[0,1], node1[2,3], ... If uneven, we are left with an uneven number of submit_queues that must be mapped. These are mapped toward the first node and onward. E.g. 5 submit queues mapped onto 4 nodes are mapped as node0[0,1], node1[2], ... Signed-off-by: Matias Bjorling Signed-off-by: Jens Axboe --- drivers/block/null_blk.c | 39 +++++++++++++++++++++++++++++++++++---- 1 file changed, 35 insertions(+), 4 deletions(-) diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c index 9b0311b61fe1..528f4e47f38e 100644 --- a/drivers/block/null_blk.c +++ b/drivers/block/null_blk.c @@ -1,4 +1,5 @@ #include + #include #include #include @@ -346,8 +347,37 @@ static int null_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *rq) static struct blk_mq_hw_ctx *null_alloc_hctx(struct blk_mq_reg *reg, unsigned int hctx_index) { - return kzalloc_node(sizeof(struct blk_mq_hw_ctx), GFP_KERNEL, - hctx_index); + int b_size = DIV_ROUND_UP(reg->nr_hw_queues, nr_online_nodes); + int tip = (reg->nr_hw_queues % nr_online_nodes); + int node = 0, i, n; + + /* + * Split submit queues evenly wrt to the number of nodes. If uneven, + * fill the first buckets with one extra, until the rest is filled with + * no extra. + */ + for (i = 0, n = 1; i < hctx_index; i++, n++) { + if (n % b_size == 0) { + n = 0; + node++; + + tip--; + if (!tip) + b_size = reg->nr_hw_queues / nr_online_nodes; + } + } + + /* + * A node might not be online, therefore map the relative node id to the + * real node id. + */ + for_each_online_node(n) { + if (!node) + break; + node--; + } + + return kzalloc_node(sizeof(struct blk_mq_hw_ctx), GFP_KERNEL, n); } static void null_free_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_index) @@ -591,10 +621,11 @@ static int __init null_init(void) #endif if (queue_mode == NULL_Q_MQ && use_per_node_hctx) { - if (submit_queues > 0) + if (submit_queues < nr_online_nodes) { pr_warn("null_blk: submit_queues param is set to %u.", nr_online_nodes); - submit_queues = nr_online_nodes; + submit_queues = nr_online_nodes; + } } else if (submit_queues > nr_cpu_ids) submit_queues = nr_cpu_ids; else if (!submit_queues) From 1881686f842065d2f92ec9c6424830ffc17d23b0 Mon Sep 17 00:00:00 2001 From: Benjamin LaHaise Date: Sat, 21 Dec 2013 15:49:28 -0500 Subject: [PATCH 295/312] aio: fix kioctx leak introduced by "aio: Fix a trinity splat" e34ecee2ae791df674dfb466ce40692ca6218e43 reworked the percpu reference counting to correct a bug trinity found. Unfortunately, the change lead to kioctxes being leaked because there was no final reference count to put. Add that reference count back in to fix things. Signed-off-by: Benjamin LaHaise Cc: stable@vger.kernel.org --- fs/aio.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/fs/aio.c b/fs/aio.c index 6efb7f6cb22e..fd1c0baf15bb 100644 --- a/fs/aio.c +++ b/fs/aio.c @@ -652,7 +652,8 @@ static struct kioctx *ioctx_alloc(unsigned nr_events) aio_nr += ctx->max_reqs; spin_unlock(&aio_nr_lock); - percpu_ref_get(&ctx->users); /* io_setup() will drop this ref */ + percpu_ref_get(&ctx->users); /* io_setup() will drop this ref */ + percpu_ref_get(&ctx->reqs); /* free_ioctx_users() will drop this */ err = ioctx_add_table(ctx, mm); if (err) From 8e321fefb0e60bae4e2a28d20fc4fa30758d27c6 Mon Sep 17 00:00:00 2001 From: Benjamin LaHaise Date: Sat, 21 Dec 2013 17:56:08 -0500 Subject: [PATCH 296/312] aio/migratepages: make aio migrate pages sane The arbitrary restriction on page counts offered by the core migrate_page_move_mapping() code results in rather suspicious looking fiddling with page reference counts in the aio_migratepage() operation. To fix this, make migrate_page_move_mapping() take an extra_count parameter that allows aio to tell the code about its own reference count on the page being migrated. While cleaning up aio_migratepage(), make it validate that the old page being passed in is actually what aio_migratepage() expects to prevent misbehaviour in the case of races. Signed-off-by: Benjamin LaHaise --- fs/aio.c | 52 ++++++++++++++++++++++++++++++++++------- include/linux/migrate.h | 3 ++- mm/migrate.c | 13 ++++++----- 3 files changed, 53 insertions(+), 15 deletions(-) diff --git a/fs/aio.c b/fs/aio.c index fd1c0baf15bb..efa708b29054 100644 --- a/fs/aio.c +++ b/fs/aio.c @@ -244,9 +244,14 @@ static void aio_free_ring(struct kioctx *ctx) int i; for (i = 0; i < ctx->nr_pages; i++) { + struct page *page; pr_debug("pid(%d) [%d] page->count=%d\n", current->pid, i, page_count(ctx->ring_pages[i])); - put_page(ctx->ring_pages[i]); + page = ctx->ring_pages[i]; + if (!page) + continue; + ctx->ring_pages[i] = NULL; + put_page(page); } put_aio_ring_file(ctx); @@ -280,18 +285,38 @@ static int aio_migratepage(struct address_space *mapping, struct page *new, unsigned long flags; int rc; + rc = 0; + + /* Make sure the old page hasn't already been changed */ + spin_lock(&mapping->private_lock); + ctx = mapping->private_data; + if (ctx) { + pgoff_t idx; + spin_lock_irqsave(&ctx->completion_lock, flags); + idx = old->index; + if (idx < (pgoff_t)ctx->nr_pages) { + if (ctx->ring_pages[idx] != old) + rc = -EAGAIN; + } else + rc = -EINVAL; + spin_unlock_irqrestore(&ctx->completion_lock, flags); + } else + rc = -EINVAL; + spin_unlock(&mapping->private_lock); + + if (rc != 0) + return rc; + /* Writeback must be complete */ BUG_ON(PageWriteback(old)); - put_page(old); + get_page(new); - rc = migrate_page_move_mapping(mapping, new, old, NULL, mode); + rc = migrate_page_move_mapping(mapping, new, old, NULL, mode, 1); if (rc != MIGRATEPAGE_SUCCESS) { - get_page(old); + put_page(new); return rc; } - get_page(new); - /* We can potentially race against kioctx teardown here. Use the * address_space's private data lock to protect the mapping's * private_data. @@ -303,13 +328,24 @@ static int aio_migratepage(struct address_space *mapping, struct page *new, spin_lock_irqsave(&ctx->completion_lock, flags); migrate_page_copy(new, old); idx = old->index; - if (idx < (pgoff_t)ctx->nr_pages) - ctx->ring_pages[idx] = new; + if (idx < (pgoff_t)ctx->nr_pages) { + /* And only do the move if things haven't changed */ + if (ctx->ring_pages[idx] == old) + ctx->ring_pages[idx] = new; + else + rc = -EAGAIN; + } else + rc = -EINVAL; spin_unlock_irqrestore(&ctx->completion_lock, flags); } else rc = -EBUSY; spin_unlock(&mapping->private_lock); + if (rc == MIGRATEPAGE_SUCCESS) + put_page(old); + else + put_page(new); + return rc; } #endif diff --git a/include/linux/migrate.h b/include/linux/migrate.h index b7717d74da7f..f015c059e159 100644 --- a/include/linux/migrate.h +++ b/include/linux/migrate.h @@ -55,7 +55,8 @@ extern int migrate_huge_page_move_mapping(struct address_space *mapping, struct page *newpage, struct page *page); extern int migrate_page_move_mapping(struct address_space *mapping, struct page *newpage, struct page *page, - struct buffer_head *head, enum migrate_mode mode); + struct buffer_head *head, enum migrate_mode mode, + int extra_count); #else static inline void putback_lru_pages(struct list_head *l) {} diff --git a/mm/migrate.c b/mm/migrate.c index e9b710201335..9194375b2307 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -317,14 +317,15 @@ static inline bool buffer_migrate_lock_buffers(struct buffer_head *head, */ int migrate_page_move_mapping(struct address_space *mapping, struct page *newpage, struct page *page, - struct buffer_head *head, enum migrate_mode mode) + struct buffer_head *head, enum migrate_mode mode, + int extra_count) { - int expected_count = 0; + int expected_count = 1 + extra_count; void **pslot; if (!mapping) { /* Anonymous page without mapping */ - if (page_count(page) != 1) + if (page_count(page) != expected_count) return -EAGAIN; return MIGRATEPAGE_SUCCESS; } @@ -334,7 +335,7 @@ int migrate_page_move_mapping(struct address_space *mapping, pslot = radix_tree_lookup_slot(&mapping->page_tree, page_index(page)); - expected_count = 2 + page_has_private(page); + expected_count += 1 + page_has_private(page); if (page_count(page) != expected_count || radix_tree_deref_slot_protected(pslot, &mapping->tree_lock) != page) { spin_unlock_irq(&mapping->tree_lock); @@ -584,7 +585,7 @@ int migrate_page(struct address_space *mapping, BUG_ON(PageWriteback(page)); /* Writeback must be complete */ - rc = migrate_page_move_mapping(mapping, newpage, page, NULL, mode); + rc = migrate_page_move_mapping(mapping, newpage, page, NULL, mode, 0); if (rc != MIGRATEPAGE_SUCCESS) return rc; @@ -611,7 +612,7 @@ int buffer_migrate_page(struct address_space *mapping, head = page_buffers(page); - rc = migrate_page_move_mapping(mapping, newpage, page, head, mode); + rc = migrate_page_move_mapping(mapping, newpage, page, head, mode, 0); if (rc != MIGRATEPAGE_SUCCESS) return rc; From 42f921a6f10c6c2079b093a115eb7e3c3508357f Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Fri, 20 Dec 2013 21:26:02 +0530 Subject: [PATCH 297/312] cpufreq: remove sysfs files for CPUs which failed to come back after resume MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit There are cases where cpufreq_add_dev() may fail for some CPUs during system resume. With the current code we will still have sysfs cpufreq files for those CPUs and struct cpufreq_policy would be already freed for them. Hence any operation on those sysfs files would result in kernel warnings. Example of problems resulting from resume errors (from Bjørn Mork): WARNING: CPU: 0 PID: 6055 at fs/sysfs/file.c:343 sysfs_open_file+0x77/0x212() missing sysfs attribute operations for kobject: (null) Modules linked in: [stripped as irrelevant] CPU: 0 PID: 6055 Comm: grep Tainted: G D 3.13.0-rc2 #153 Hardware name: LENOVO 2776LEG/2776LEG, BIOS 6EET55WW (3.15 ) 12/19/2011 0000000000000009 ffff8802327ebb78 ffffffff81380b0e 0000000000000006 ffff8802327ebbc8 ffff8802327ebbb8 ffffffff81038635 0000000000000000 ffffffff811823c7 ffff88021a19e688 ffff88021a19e688 ffff8802302f9310 Call Trace: [] dump_stack+0x55/0x76 [] warn_slowpath_common+0x7c/0x96 [] ? sysfs_open_file+0x77/0x212 [] warn_slowpath_fmt+0x41/0x43 [] ? sysfs_get_active+0x6b/0x82 [] ? sysfs_open_file+0x32/0x212 [] sysfs_open_file+0x77/0x212 [] ? sysfs_schedule_callback+0x1ac/0x1ac [] do_dentry_open+0x17c/0x257 [] finish_open+0x41/0x4f [] do_last+0x80c/0x9ba [] ? inode_permission+0x40/0x42 [] path_openat+0x233/0x4a1 [] do_filp_open+0x35/0x85 [] ? __alloc_fd+0x172/0x184 [] do_sys_open+0x6b/0xfa [] SyS_openat+0xf/0x11 [] system_call_fastpath+0x16/0x1b To fix this, remove those sysfs files or put the associated kobject in case of such errors. Also, to make it simple, remove the cpufreq sysfs links from all the CPUs (except for the policy->cpu) during suspend, as that operation won't result in a loss of sysfs file permissions and we can create those links during resume just fine. Fixes: 5302c3fb2e62 ("cpufreq: Perform light-weight init/teardown during suspend/resume") Reported-and-tested-by: Bjørn Mork Signed-off-by: Viresh Kumar Cc: 3.12+ # 3.12+ [rjw: Changelog] Signed-off-by: Rafael J. Wysocki --- drivers/cpufreq/cpufreq.c | 63 +++++++++++++++++++-------------------- 1 file changed, 31 insertions(+), 32 deletions(-) diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 02d534da22dd..fab042e1ee90 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -845,8 +845,7 @@ static void cpufreq_init_policy(struct cpufreq_policy *policy) #ifdef CONFIG_HOTPLUG_CPU static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, - unsigned int cpu, struct device *dev, - bool frozen) + unsigned int cpu, struct device *dev) { int ret = 0; unsigned long flags; @@ -877,11 +876,7 @@ static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, } } - /* Don't touch sysfs links during light-weight init */ - if (!frozen) - ret = sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq"); - - return ret; + return sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq"); } #endif @@ -926,6 +921,27 @@ static struct cpufreq_policy *cpufreq_policy_alloc(void) return NULL; } +static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy) +{ + struct kobject *kobj; + struct completion *cmp; + + down_read(&policy->rwsem); + kobj = &policy->kobj; + cmp = &policy->kobj_unregister; + up_read(&policy->rwsem); + kobject_put(kobj); + + /* + * We need to make sure that the underlying kobj is + * actually not referenced anymore by anybody before we + * proceed with unloading. + */ + pr_debug("waiting for dropping of refcount\n"); + wait_for_completion(cmp); + pr_debug("wait complete\n"); +} + static void cpufreq_policy_free(struct cpufreq_policy *policy) { free_cpumask_var(policy->related_cpus); @@ -986,7 +1002,7 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif, list_for_each_entry(tpolicy, &cpufreq_policy_list, policy_list) { if (cpumask_test_cpu(cpu, tpolicy->related_cpus)) { read_unlock_irqrestore(&cpufreq_driver_lock, flags); - ret = cpufreq_add_policy_cpu(tpolicy, cpu, dev, frozen); + ret = cpufreq_add_policy_cpu(tpolicy, cpu, dev); up_read(&cpufreq_rwsem); return ret; } @@ -1096,7 +1112,10 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif, if (cpufreq_driver->exit) cpufreq_driver->exit(policy); err_set_policy_cpu: + if (frozen) + cpufreq_policy_put_kobj(policy); cpufreq_policy_free(policy); + nomem_out: up_read(&cpufreq_rwsem); @@ -1118,7 +1137,7 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif) } static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy, - unsigned int old_cpu, bool frozen) + unsigned int old_cpu) { struct device *cpu_dev; int ret; @@ -1126,10 +1145,6 @@ static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy, /* first sibling now owns the new sysfs dir */ cpu_dev = get_cpu_device(cpumask_any_but(policy->cpus, old_cpu)); - /* Don't touch sysfs files during light-weight tear-down */ - if (frozen) - return cpu_dev->id; - sysfs_remove_link(&cpu_dev->kobj, "cpufreq"); ret = kobject_move(&policy->kobj, &cpu_dev->kobj); if (ret) { @@ -1196,7 +1211,7 @@ static int __cpufreq_remove_dev_prepare(struct device *dev, if (!frozen) sysfs_remove_link(&dev->kobj, "cpufreq"); } else if (cpus > 1) { - new_cpu = cpufreq_nominate_new_policy_cpu(policy, cpu, frozen); + new_cpu = cpufreq_nominate_new_policy_cpu(policy, cpu); if (new_cpu >= 0) { update_policy_cpu(policy, new_cpu); @@ -1218,8 +1233,6 @@ static int __cpufreq_remove_dev_finish(struct device *dev, int ret; unsigned long flags; struct cpufreq_policy *policy; - struct kobject *kobj; - struct completion *cmp; read_lock_irqsave(&cpufreq_driver_lock, flags); policy = per_cpu(cpufreq_cpu_data, cpu); @@ -1249,22 +1262,8 @@ static int __cpufreq_remove_dev_finish(struct device *dev, } } - if (!frozen) { - down_read(&policy->rwsem); - kobj = &policy->kobj; - cmp = &policy->kobj_unregister; - up_read(&policy->rwsem); - kobject_put(kobj); - - /* - * We need to make sure that the underlying kobj is - * actually not referenced anymore by anybody before we - * proceed with unloading. - */ - pr_debug("waiting for dropping of refcount\n"); - wait_for_completion(cmp); - pr_debug("wait complete\n"); - } + if (!frozen) + cpufreq_policy_put_kobj(policy); /* * Perform the ->exit() even during light-weight tear-down, From a27a9ab706c8f5bb8bbd320d2e9c5d089e380c6a Mon Sep 17 00:00:00 2001 From: Jason Baron Date: Thu, 19 Dec 2013 22:50:50 +0000 Subject: [PATCH 298/312] cpufreq: Use CONFIG_CPU_FREQ_DEFAULT_* to set initial policy for setpolicy drivers When configuring a default governor (via CONFIG_CPU_FREQ_DEFAULT_*) with the intel_pstate driver, the desired default policy is not properly set. For example, setting 'CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE' ends up with the 'powersave' policy being set. Fix by configuring the correct default policy, if either 'powersave' or 'performance' are requested. Otherwise, fallback to what the driver originally set via its 'init' routine. Signed-off-by: Jason Baron Acked-by: Viresh Kumar Signed-off-by: Rafael J. Wysocki --- drivers/cpufreq/cpufreq.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index fab042e1ee90..16d7b4ac94be 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -828,6 +828,12 @@ static void cpufreq_init_policy(struct cpufreq_policy *policy) int ret = 0; memcpy(&new_policy, policy, sizeof(*policy)); + + /* Use the default policy if its valid. */ + if (cpufreq_driver->setpolicy) + cpufreq_parse_governor(policy->governor->name, + &new_policy.policy, NULL); + /* assure that the starting sequence is run in cpufreq_set_policy */ policy->governor = NULL; From c606850407d9096415e226c75a871d0650404446 Mon Sep 17 00:00:00 2001 From: Masami Ichikawa Date: Thu, 19 Dec 2013 20:00:47 +0900 Subject: [PATCH 299/312] PM / sleep: Fix memory leak in pm_vt_switch_unregister(). kmemleak reported a memory leak as below. unreferenced object 0xffff880118f14700 (size 32): comm "swapper/0", pid 1, jiffies 4294877401 (age 123.283s) hex dump (first 32 bytes): 00 01 10 00 00 00 ad de 00 02 20 00 00 00 ad de .......... ..... 00 d4 d2 18 01 88 ff ff 01 00 00 00 00 04 00 00 ................ backtrace: [] kmemleak_alloc+0x4e/0xb0 [] kmem_cache_alloc_trace+0x1ec/0x260 [] pm_vt_switch_required+0x76/0xb0 [] register_framebuffer+0x195/0x320 [] efifb_probe+0x718/0x780 [] platform_drv_probe+0x45/0xb0 [] driver_probe_device+0x87/0x3a0 [] __driver_attach+0x93/0xa0 [] bus_for_each_dev+0x63/0xa0 [] driver_attach+0x1e/0x20 [] bus_add_driver+0x180/0x250 [] driver_register+0x64/0xf0 [] __platform_driver_register+0x4a/0x50 [] efifb_driver_init+0x12/0x14 [] do_one_initcall+0xfa/0x1b0 [] kernel_init_freeable+0x17b/0x201 In pm_vt_switch_required(), "entry" variable is allocated via kmalloc(). So, in pm_vt_switch_unregister(), it needs to call kfree() when object is deleted from list. Signed-off-by: Masami Ichikawa Reviewed-by: Pavel Machek Signed-off-by: Rafael J. Wysocki --- kernel/power/console.c | 1 + 1 file changed, 1 insertion(+) diff --git a/kernel/power/console.c b/kernel/power/console.c index 463aa6736751..eacb8bd8cab4 100644 --- a/kernel/power/console.c +++ b/kernel/power/console.c @@ -81,6 +81,7 @@ void pm_vt_switch_unregister(struct device *dev) list_for_each_entry(tmp, &pm_vt_switch_list, head) { if (tmp->dev == dev) { list_del(&tmp->head); + kfree(tmp); break; } } From ed93b71492da3464b4798613aa8a99bed914251b Mon Sep 17 00:00:00 2001 From: Jacob Pan Date: Wed, 11 Dec 2013 14:39:27 -0800 Subject: [PATCH 300/312] powercap / RAPL: add support for ValleyView Soc This patch adds support for RAPL on Intel ValleyView based SoC platforms, such as Baytrail. Besides adding CPU ID, special energy unit encoding is handled for ValleyView. Signed-off-by: Jacob Pan Signed-off-by: Rafael J. Wysocki --- drivers/powercap/intel_rapl.c | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/drivers/powercap/intel_rapl.c b/drivers/powercap/intel_rapl.c index 2a786c504460..3c6768378a94 100644 --- a/drivers/powercap/intel_rapl.c +++ b/drivers/powercap/intel_rapl.c @@ -833,6 +833,11 @@ static int rapl_write_data_raw(struct rapl_domain *rd, return 0; } +static const struct x86_cpu_id energy_unit_quirk_ids[] = { + { X86_VENDOR_INTEL, 6, 0x37},/* VLV */ + {} +}; + static int rapl_check_unit(struct rapl_package *rp, int cpu) { u64 msr_val; @@ -853,8 +858,11 @@ static int rapl_check_unit(struct rapl_package *rp, int cpu) * time unit: 1/time_unit_divisor Seconds */ value = (msr_val & ENERGY_UNIT_MASK) >> ENERGY_UNIT_OFFSET; - rp->energy_unit_divisor = 1 << value; - + /* some CPUs have different way to calculate energy unit */ + if (x86_match_cpu(energy_unit_quirk_ids)) + rp->energy_unit_divisor = 1000000 / (1 << value); + else + rp->energy_unit_divisor = 1 << value; value = (msr_val & POWER_UNIT_MASK) >> POWER_UNIT_OFFSET; rp->power_unit_divisor = 1 << value; @@ -941,6 +949,7 @@ static void package_power_limit_irq_restore(int package_id) static const struct x86_cpu_id rapl_ids[] = { { X86_VENDOR_INTEL, 6, 0x2a},/* SNB */ { X86_VENDOR_INTEL, 6, 0x2d},/* SNB EP */ + { X86_VENDOR_INTEL, 6, 0x37},/* VLV */ { X86_VENDOR_INTEL, 6, 0x3a},/* IVB */ { X86_VENDOR_INTEL, 6, 0x45},/* HSW */ /* TODO: Add more CPU IDs after testing */ From 3dc9acb67600393249a795934ccdfc291a200e6b Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Fri, 20 Dec 2013 05:11:12 +0900 Subject: [PATCH 301/312] aio: clean up and fix aio_setup_ring page mapping Since commit 36bc08cc01709 ("fs/aio: Add support to aio ring pages migration") the aio ring setup code has used a special per-ring backing inode for the page allocations, rather than just using random anonymous pages. However, rather than remembering the pages as it allocated them, it would allocate the pages, insert them into the file mapping (dirty, so that they couldn't be free'd), and then forget about them. And then to look them up again, it would mmap the mapping, and then use "get_user_pages()" to get back an array of the pages we just created. Now, not only is that incredibly inefficient, it also leaked all the pages if the mmap failed (which could happen due to excessive number of mappings, for example). So clean it all up, making it much more straightforward. Also remove some left-overs of the previous (broken) mm_populate() usage that was removed in commit d6c355c7dabc ("aio: fix race in ring buffer page lookup introduced by page migration support") but left the pointless and now misleading MAP_POPULATE flag around. Tested-and-acked-by: Benjamin LaHaise Signed-off-by: Linus Torvalds --- fs/aio.c | 60 +++++++++++++++++++++++--------------------------------- 1 file changed, 24 insertions(+), 36 deletions(-) diff --git a/fs/aio.c b/fs/aio.c index 6efb7f6cb22e..643db8fc43c5 100644 --- a/fs/aio.c +++ b/fs/aio.c @@ -326,7 +326,7 @@ static int aio_setup_ring(struct kioctx *ctx) struct aio_ring *ring; unsigned nr_events = ctx->max_reqs; struct mm_struct *mm = current->mm; - unsigned long size, populate; + unsigned long size, unused; int nr_pages; int i; struct file *file; @@ -347,18 +347,6 @@ static int aio_setup_ring(struct kioctx *ctx) return -EAGAIN; } - for (i = 0; i < nr_pages; i++) { - struct page *page; - page = find_or_create_page(file->f_inode->i_mapping, - i, GFP_HIGHUSER | __GFP_ZERO); - if (!page) - break; - pr_debug("pid(%d) page[%d]->count=%d\n", - current->pid, i, page_count(page)); - SetPageUptodate(page); - SetPageDirty(page); - unlock_page(page); - } ctx->aio_ring_file = file; nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event); @@ -373,15 +361,36 @@ static int aio_setup_ring(struct kioctx *ctx) } } + for (i = 0; i < nr_pages; i++) { + struct page *page; + page = find_or_create_page(file->f_inode->i_mapping, + i, GFP_HIGHUSER | __GFP_ZERO); + if (!page) + break; + pr_debug("pid(%d) page[%d]->count=%d\n", + current->pid, i, page_count(page)); + SetPageUptodate(page); + SetPageDirty(page); + unlock_page(page); + + ctx->ring_pages[i] = page; + } + ctx->nr_pages = i; + + if (unlikely(i != nr_pages)) { + aio_free_ring(ctx); + return -EAGAIN; + } + ctx->mmap_size = nr_pages * PAGE_SIZE; pr_debug("attempting mmap of %lu bytes\n", ctx->mmap_size); down_write(&mm->mmap_sem); ctx->mmap_base = do_mmap_pgoff(ctx->aio_ring_file, 0, ctx->mmap_size, PROT_READ | PROT_WRITE, - MAP_SHARED | MAP_POPULATE, 0, &populate); + MAP_SHARED, 0, &unused); + up_write(&mm->mmap_sem); if (IS_ERR((void *)ctx->mmap_base)) { - up_write(&mm->mmap_sem); ctx->mmap_size = 0; aio_free_ring(ctx); return -EAGAIN; @@ -389,27 +398,6 @@ static int aio_setup_ring(struct kioctx *ctx) pr_debug("mmap address: 0x%08lx\n", ctx->mmap_base); - /* We must do this while still holding mmap_sem for write, as we - * need to be protected against userspace attempting to mremap() - * or munmap() the ring buffer. - */ - ctx->nr_pages = get_user_pages(current, mm, ctx->mmap_base, nr_pages, - 1, 0, ctx->ring_pages, NULL); - - /* Dropping the reference here is safe as the page cache will hold - * onto the pages for us. It is also required so that page migration - * can unmap the pages and get the right reference count. - */ - for (i = 0; i < ctx->nr_pages; i++) - put_page(ctx->ring_pages[i]); - - up_write(&mm->mmap_sem); - - if (unlikely(ctx->nr_pages != nr_pages)) { - aio_free_ring(ctx); - return -EAGAIN; - } - ctx->user_id = ctx->mmap_base; ctx->nr_events = nr_events; /* trusted copy */ From 413541dd66d51f791a0b169d9b9014e4f56be13c Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Sun, 22 Dec 2013 13:08:32 -0800 Subject: [PATCH 302/312] Linux 3.13-rc5 --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 82ea7dd9fc98..14d592cbbc5f 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ VERSION = 3 PATCHLEVEL = 13 SUBLEVEL = 0 -EXTRAVERSION = -rc4 +EXTRAVERSION = -rc5 NAME = One Giant Leap for Frogkind # *DOCUMENTATION* From 488574dbc47e0f570330c8c2b56ae299c28ade14 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Fri, 20 Dec 2013 10:58:15 -0800 Subject: [PATCH 303/312] gpu: fix qxl missing crc32_le Fix build error: qxl uses crc32 functions so it needs to select CRC32. Also use angle quotes around a kernel header file name. drivers/built-in.o: In function `qxl_display_read_client_monitors_config': (.text+0x19d754): undefined reference to `crc32_le' Signed-off-by: Randy Dunlap Signed-off-by: Dave Airlie --- drivers/gpu/drm/qxl/Kconfig | 1 + drivers/gpu/drm/qxl/qxl_display.c | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/qxl/Kconfig b/drivers/gpu/drm/qxl/Kconfig index 037d324bf58f..66ac0ff95f5a 100644 --- a/drivers/gpu/drm/qxl/Kconfig +++ b/drivers/gpu/drm/qxl/Kconfig @@ -8,5 +8,6 @@ config DRM_QXL select DRM_KMS_HELPER select DRM_KMS_FB_HELPER select DRM_TTM + select CRC32 help QXL virtual GPU for Spice virtualization desktop integration. Do not enable this driver unless your distro ships a corresponding X.org QXL driver that can handle kernel modesetting. diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c index 5e827c29d194..d70aafb83307 100644 --- a/drivers/gpu/drm/qxl/qxl_display.c +++ b/drivers/gpu/drm/qxl/qxl_display.c @@ -24,7 +24,7 @@ */ -#include "linux/crc32.h" +#include #include "qxl_drv.h" #include "qxl_object.h" From 2e6d8b469b8000b4aa9b95dfcebd89eafae3e8cf Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Sat, 21 Dec 2013 22:23:02 +0100 Subject: [PATCH 304/312] drm/ttm: Fix swapin regression Commit "drm/ttm: Don't move non-existing data" didn't take the swapped-out corner case into account. This patch corrects that. Fixes blank screen after attempted suspend / hibernate on vmwgfx. Signed-off-by: Thomas Hellstrom Signed-off-by: Dave Airlie --- drivers/gpu/drm/ttm/ttm_bo_util.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index 15b86a94949d..406152152315 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -353,7 +353,8 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, * Don't move nonexistent data. Clear destination instead. */ if (old_iomap == NULL && - (ttm == NULL || ttm->state == tt_unpopulated)) { + (ttm == NULL || (ttm->state == tt_unpopulated && + !(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)))) { memset_io(new_iomap, 0, new_mem->num_pages*PAGE_SIZE); goto out2; } From 46d01d63221c3508421dd72ff9c879f61053cffc Mon Sep 17 00:00:00 2001 From: Chad Hanson Date: Mon, 23 Dec 2013 17:45:01 -0500 Subject: [PATCH 305/312] selinux: fix broken peer recv check Fix a broken networking check. Return an error if peer recv fails. If secmark is active and the packet recv succeeds the peer recv error is ignored. Signed-off-by: Chad Hanson Cc: stable@vger.kernel.org Signed-off-by: Paul Moore --- security/selinux/hooks.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c index 419491d8e7d2..5db26468b5c3 100644 --- a/security/selinux/hooks.c +++ b/security/selinux/hooks.c @@ -4334,8 +4334,10 @@ static int selinux_socket_sock_rcv_skb(struct sock *sk, struct sk_buff *skb) } err = avc_has_perm(sk_sid, peer_sid, SECCLASS_PEER, PEER__RECV, &ad); - if (err) + if (err) { selinux_netlbl_err(skb, err, 0); + return err; + } } if (secmark_active) { From c0c1439541f5305b57a83d599af32b74182933fe Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Mon, 23 Dec 2013 17:45:01 -0500 Subject: [PATCH 306/312] selinux: selinux_setprocattr()->ptrace_parent() needs rcu_read_lock() selinux_setprocattr() does ptrace_parent(p) under task_lock(p), but task_struct->alloc_lock doesn't pin ->parent or ->ptrace, this looks confusing and triggers the "suspicious RCU usage" warning because ptrace_parent() does rcu_dereference_check(). And in theory this is wrong, spin_lock()->preempt_disable() doesn't necessarily imply rcu_read_lock() we need to access the ->parent. Reported-by: Evan McNabb Signed-off-by: Oleg Nesterov Cc: stable@vger.kernel.org Signed-off-by: Paul Moore --- security/selinux/hooks.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c index 5db26468b5c3..6625699f497c 100644 --- a/security/selinux/hooks.c +++ b/security/selinux/hooks.c @@ -5588,11 +5588,11 @@ static int selinux_setprocattr(struct task_struct *p, /* Check for ptracing, and update the task SID if ok. Otherwise, leave SID unchanged and fail. */ ptsid = 0; - task_lock(p); + rcu_read_lock(); tracer = ptrace_parent(p); if (tracer) ptsid = task_sid(tracer); - task_unlock(p); + rcu_read_unlock(); if (tracer) { error = avc_has_perm(ptsid, sid, SECCLASS_PROCESS, From f60900f2609e893c7f8d0bccc7ada4947dac4cd5 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Mon, 23 Dec 2013 18:49:30 +0100 Subject: [PATCH 307/312] auxvec.h: account for AT_HWCAP2 in AT_VECTOR_SIZE_BASE Commit 2171364d1a92 ("powerpc: Add HWCAP2 aux entry") introduced a new AT_ auxv entry type AT_HWCAP2 but failed to update AT_VECTOR_SIZE_BASE accordingly. Signed-off-by: Ard Biesheuvel Fixes: 2171364d1a92 (powerpc: Add HWCAP2 aux entry) Cc: stable@vger.kernel.org Acked-by: Michael Neuling Cc: Nishanth Aravamudan Cc: Benjamin Herrenschmidt Signed-off-by: Linus Torvalds --- include/linux/auxvec.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/linux/auxvec.h b/include/linux/auxvec.h index 669fef5c745a..3e0fbe441763 100644 --- a/include/linux/auxvec.h +++ b/include/linux/auxvec.h @@ -3,6 +3,6 @@ #include -#define AT_VECTOR_SIZE_BASE 19 /* NEW_AUX_ENT entries in auxiliary table */ +#define AT_VECTOR_SIZE_BASE 20 /* NEW_AUX_ENT entries in auxiliary table */ /* number of "#define AT_.*" above, minus {AT_NULL, AT_IGNORE, AT_NOTELF} */ #endif /* _LINUX_AUXVEC_H */ From 38958c15dc640a9249e4f0cd0dfb0ddc7a23464d Mon Sep 17 00:00:00 2001 From: Rajendra Nayak Date: Thu, 12 Dec 2013 15:22:49 +0530 Subject: [PATCH 308/312] ARM: DRA7: hwmod: Fix boot crash with DEBUG_LL With commit '7dedd34: ARM: OMAP2+: hwmod: Fix a crash in _setup_reset() with DEBUG_LL' we moved from parsing cmdline to identify uart used for earlycon to using the requsite hwmod CONFIG_DEBUG_OMAPxUARTy FLAGS. On DRA7 though, we seem to be missing this flag, and atleast on the DRA7 EVM where we use uart1 for console, boot fails with DEBUG_LL enabled. Reported-by: Lokesh Vutla Tested-by: Lokesh Vutla # on a different base Signed-off-by: Rajendra Nayak Fixes: 7dedd346941d ("ARM: OMAP2+: hwmod: Fix a crash in _setup_reset() with DEBUG_LL") Signed-off-by: Paul Walmsley --- arch/arm/mach-omap2/omap_hwmod_7xx_data.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c index db32d5380b11..18f333c440db 100644 --- a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c @@ -1637,7 +1637,7 @@ static struct omap_hwmod dra7xx_uart1_hwmod = { .class = &dra7xx_uart_hwmod_class, .clkdm_name = "l4per_clkdm", .main_clk = "uart1_gfclk_mux", - .flags = HWMOD_SWSUP_SIDLE_ACT, + .flags = HWMOD_SWSUP_SIDLE_ACT | DEBUG_OMAP2UART1_FLAGS, .prcm = { .omap4 = { .clkctrl_offs = DRA7XX_CM_L4PER_UART1_CLKCTRL_OFFSET, From 6d4c88304794442055eaea1c07f3c7b988b8c924 Mon Sep 17 00:00:00 2001 From: Suman Anna Date: Mon, 23 Dec 2013 16:53:11 -0600 Subject: [PATCH 309/312] ARM: OMAP2+: hwmod_data: fix missing OMAP_INTC_START in irq data Commit 7d7e1eb (ARM: OMAP2+: Prepare for irqs.h removal) and commit ec2c082 (ARM: OMAP2+: Remove hardcoded IRQs and enable SPARSE_IRQ) updated the way interrupts for OMAP2/3 devices are defined in the HWMOD data structures to being an index plus a fixed offset (defined by OMAP_INTC_START). Couple of irqs in the OMAP2/3 hwmod data were misconfigured completely as they were missing this OMAP_INTC_START relative offset. Add this offset back to fix the incorrect irq data for the following modules: OMAP2 - GPMC, RNG OMAP3 - GPMC, ISP MMU & IVA MMU Signed-off-by: Suman Anna Fixes: 7d7e1eba7e92 ("ARM: OMAP2+: Prepare for irqs.h removal") Fixes: ec2c0825ca31 ("ARM: OMAP2+: Remove hardcoded IRQs and enable SPARSE_IRQ") Cc: Tony Lindgren Signed-off-by: Paul Walmsley --- arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c | 4 ++-- arch/arm/mach-omap2/omap_hwmod_3xxx_data.c | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c b/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c index 56cebb05509e..d23c77fadb31 100644 --- a/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c @@ -796,7 +796,7 @@ struct omap_hwmod omap2xxx_counter_32k_hwmod = { /* gpmc */ static struct omap_hwmod_irq_info omap2xxx_gpmc_irqs[] = { - { .irq = 20 }, + { .irq = 20 + OMAP_INTC_START, }, { .irq = -1 } }; @@ -841,7 +841,7 @@ static struct omap_hwmod_class omap2_rng_hwmod_class = { }; static struct omap_hwmod_irq_info omap2_rng_mpu_irqs[] = { - { .irq = 52 }, + { .irq = 52 + OMAP_INTC_START, }, { .irq = -1 } }; diff --git a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c index 9e56fabd7fa3..3bfb2db674ae 100644 --- a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c @@ -2172,7 +2172,7 @@ static struct omap_hwmod_class omap3xxx_gpmc_hwmod_class = { }; static struct omap_hwmod_irq_info omap3xxx_gpmc_irqs[] = { - { .irq = 20 }, + { .irq = 20 + OMAP_INTC_START, }, { .irq = -1 } }; @@ -3006,7 +3006,7 @@ static struct omap_mmu_dev_attr mmu_isp_dev_attr = { static struct omap_hwmod omap3xxx_mmu_isp_hwmod; static struct omap_hwmod_irq_info omap3xxx_mmu_isp_irqs[] = { - { .irq = 24 }, + { .irq = 24 + OMAP_INTC_START, }, { .irq = -1 } }; @@ -3048,7 +3048,7 @@ static struct omap_mmu_dev_attr mmu_iva_dev_attr = { static struct omap_hwmod omap3xxx_mmu_iva_hwmod; static struct omap_hwmod_irq_info omap3xxx_mmu_iva_irqs[] = { - { .irq = 28 }, + { .irq = 28 + OMAP_INTC_START, }, { .irq = -1 } }; From 7e367c18c059c638bf6fb540f1decec18d64cb55 Mon Sep 17 00:00:00 2001 From: Tony Lindgren Date: Fri, 27 Dec 2013 09:33:27 -0800 Subject: [PATCH 310/312] ARM: OMAP2+: Fix LCD panel backlight regression for LDP legacy booting Looks like the LCD panel on LDP has been broken quite a while, and recently got fixed by commit 0b2aa8bed3e1 (gpio: twl4030: Fix regression for twl gpio output). However, there's still an issue left where the panel backlight does not come on if the LCD drivers are built into the kernel. Fix the issue by registering the DPI LCD panel only after the twl4030 GPIO has probed. Reported-by: Russell King Acked-by: Tomi Valkeinen [tony@atomide.com: updated per Tomi's comments] Signed-off-by: Tony Lindgren --- arch/arm/mach-omap2/board-ldp.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/arch/arm/mach-omap2/board-ldp.c b/arch/arm/mach-omap2/board-ldp.c index 4ec8d82b0492..44a59c3abfb0 100644 --- a/arch/arm/mach-omap2/board-ldp.c +++ b/arch/arm/mach-omap2/board-ldp.c @@ -242,12 +242,18 @@ static void __init ldp_display_init(void) static int ldp_twl_gpio_setup(struct device *dev, unsigned gpio, unsigned ngpio) { + int res; + /* LCD enable GPIO */ ldp_lcd_pdata.enable_gpio = gpio + 7; /* Backlight enable GPIO */ ldp_lcd_pdata.backlight_gpio = gpio + 15; + res = platform_device_register(&ldp_lcd_device); + if (res) + pr_err("Unable to register LCD: %d\n", res); + return 0; } @@ -346,7 +352,6 @@ static struct omap2_hsmmc_info mmc[] __initdata = { static struct platform_device *ldp_devices[] __initdata = { &ldp_gpio_keys_device, - &ldp_lcd_device, }; #ifdef CONFIG_OMAP_MUX From 9928422fef6f9aafc09d3c2fed4f803f67f5240b Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Wed, 11 Dec 2013 09:48:58 +0100 Subject: [PATCH 311/312] ARM: pxa: fix USB gadget driver compilation regression After commit 88f718e3fa4d67f3a8dbe79a2f97d722323e4051 "ARM: pxa: delete the custom GPIO header" a compilation error was introduced in the PXA25x gadget driver. An attempt to fix the problem was made in commit b144e4ab1ef130e8bf30bcd3e529b7f35112c503 "usb: gadget: fix pxa25x compilation problems" by explictly stating the driver needs the header, which solved the compilation for a few boards, such as the pxa255-idp and its defconfig. However the Lubbock board has this special clause in drivers/usb/gadget/pxa25x_udc.c: This include file has an implicit dependency on having been included before was included. Before commit 88f718e3fa4d67f3a8dbe79a2f97d722323e4051 "ARM: pxa: delete the custom GPIO header" this implicit dependency for the pxa25x_udc compile on the Lubbock was satisfied by implicitly including which was in turn including , apart from the earlier added . Fix this by having the PXA25x explicitly include . Reported-by: Russell King Cc: Greg Kroah-Hartmann Cc: Felipe Balbi Signed-off-by: Linus Walleij Signed-off-by: Haojian Zhuang Signed-off-by: Olof Johansson --- arch/arm/mach-pxa/include/mach/lubbock.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/arm/mach-pxa/include/mach/lubbock.h b/arch/arm/mach-pxa/include/mach/lubbock.h index 2a086e8373eb..958cd6af9384 100644 --- a/arch/arm/mach-pxa/include/mach/lubbock.h +++ b/arch/arm/mach-pxa/include/mach/lubbock.h @@ -10,6 +10,8 @@ * published by the Free Software Foundation. */ +#include + #define LUBBOCK_ETH_PHYS PXA_CS3_PHYS #define LUBBOCK_FPGA_PHYS PXA_CS2_PHYS From 802eee95bde72fd0cd0f3a5b2098375a487d1eda Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Sun, 29 Dec 2013 16:01:33 -0800 Subject: [PATCH 312/312] Linux 3.13-rc6 --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 14d592cbbc5f..ab80be7a38bc 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ VERSION = 3 PATCHLEVEL = 13 SUBLEVEL = 0 -EXTRAVERSION = -rc5 +EXTRAVERSION = -rc6 NAME = One Giant Leap for Frogkind # *DOCUMENTATION*