Linux 4.3-rc6

-----BEGIN PGP SIGNATURE-----
 Version: GnuPG v1
 
 iQEcBAABAgAGBQJWJCaDAAoJEHm+PkMAQRiGvJQH/jGlxawu+mnNFjIRXmk8Zucq
 cxDVTPnQ4S1DJMyRSQTqe6ccDgDM/TEISJPZ0Gwc2SQLda27zdQiz05upbqHSPSH
 /FUHMKiMRhhIBOzdfEGR4Ry2YZID+DlG5EWCgRKf/GlgY3STZSpjIBPYd3Rl3zsU
 qragNjtbCE6eUpnLwkv40Q2mzzR3/fZxJ0drgE/QiSF8P0VmvVbrcQMF58vnUQgy
 9URp48mLhmKj+IrElSnvXMG0XLrKn1tRYXGXd3w+x1Nlr//SHYsIuQHUcp6SAVQh
 p2HXLL7eoy8rs45MCiOfXG7VgNWF6HFjVMtrNUcNfgBsLCx0qFMVrKIILrGtKh0=
 =ETze
 -----END PGP SIGNATURE-----

Merge tag 'v4.3-rc6' into locking/core, to pick up fixes before applying new changes

Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Ingo Molnar 2015-10-20 10:16:46 +02:00
commit a1a2ab2ff7
291 changed files with 2089 additions and 1026 deletions

View File

@ -41,9 +41,13 @@ useless and be disabled, returning errors. So it is important to monitor
the amount of free space and expand the <COW device> before it fills up.
<persistent?> is P (Persistent) or N (Not persistent - will not survive
after reboot).
The difference is that for transient snapshots less metadata must be
saved on disk - they can be kept in memory by the kernel.
after reboot). O (Overflow) can be added as a persistent store option
to allow userspace to advertise its support for seeing "Overflow" in the
snapshot status. So supported store types are "P", "PO" and "N".
The difference between persistent and transient is with transient
snapshots less metadata must be saved on disk - they can be kept in
memory by the kernel.
* snapshot-merge <origin> <COW device> <persistent> <chunksize>

View File

@ -51,7 +51,7 @@ Optional properties, deprecated for soctype-specific bindings:
- renesas,tx-fifo-size : Overrides the default tx fifo size given in words
(default is 64)
- renesas,rx-fifo-size : Overrides the default rx fifo size given in words
(default is 64, or 256 on R-Car Gen2)
(default is 64)
Pinctrl properties might be needed, too. See
Documentation/devicetree/bindings/pinctrl/renesas,*.

View File

@ -5,6 +5,7 @@ Required properties:
- "renesas,usbhs-r8a7790"
- "renesas,usbhs-r8a7791"
- "renesas,usbhs-r8a7794"
- "renesas,usbhs-r8a7795"
- reg: Base address and length of the register for the USBHS
- interrupts: Interrupt specifier for the USBHS
- clocks: A list of phandle + clock specifier pairs

View File

@ -3591,6 +3591,13 @@ F: drivers/gpu/drm/i915/
F: include/drm/i915*
F: include/uapi/drm/i915*
DRM DRIVERS FOR ATMEL HLCDC
M: Boris Brezillon <boris.brezillon@free-electrons.com>
L: dri-devel@lists.freedesktop.org
S: Supported
F: drivers/gpu/drm/atmel-hlcdc/
F: Documentation/devicetree/bindings/drm/atmel/
DRM DRIVERS FOR EXYNOS
M: Inki Dae <inki.dae@samsung.com>
M: Joonyoung Shim <jy0922.shim@samsung.com>
@ -3619,6 +3626,14 @@ S: Maintained
F: drivers/gpu/drm/imx/
F: Documentation/devicetree/bindings/drm/imx/
DRM DRIVERS FOR GMA500 (Poulsbo, Moorestown and derivative chipsets)
M: Patrik Jakobsson <patrik.r.jakobsson@gmail.com>
L: dri-devel@lists.freedesktop.org
T: git git://github.com/patjak/drm-gma500
S: Maintained
F: drivers/gpu/drm/gma500
F: include/drm/gma500*
DRM DRIVERS FOR NVIDIA TEGRA
M: Thierry Reding <thierry.reding@gmail.com>
M: Terje Bergström <tbergstrom@nvidia.com>
@ -4003,7 +4018,7 @@ S: Maintained
F: sound/usb/misc/ua101.c
EXTENSIBLE FIRMWARE INTERFACE (EFI)
M: Matt Fleming <matt.fleming@intel.com>
M: Matt Fleming <matt@codeblueprint.co.uk>
L: linux-efi@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/mfleming/efi.git
S: Maintained
@ -4018,7 +4033,7 @@ F: include/linux/efi*.h
EFI VARIABLE FILESYSTEM
M: Matthew Garrett <matthew.garrett@nebula.com>
M: Jeremy Kerr <jk@ozlabs.org>
M: Matt Fleming <matt.fleming@intel.com>
M: Matt Fleming <matt@codeblueprint.co.uk>
T: git git://git.kernel.org/pub/scm/linux/kernel/git/mfleming/efi.git
L: linux-efi@vger.kernel.org
S: Maintained
@ -9101,6 +9116,15 @@ S: Supported
F: Documentation/devicetree/bindings/net/snps,dwc-qos-ethernet.txt
F: drivers/net/ethernet/synopsys/dwc_eth_qos.c
SYNOPSYS DESIGNWARE I2C DRIVER
M: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
M: Jarkko Nikula <jarkko.nikula@linux.intel.com>
M: Mika Westerberg <mika.westerberg@linux.intel.com>
L: linux-i2c@vger.kernel.org
S: Maintained
F: drivers/i2c/busses/i2c-designware-*
F: include/linux/platform_data/i2c-designware.h
SYNOPSYS DESIGNWARE MMC/SD/SDIO DRIVER
M: Seungwon Jeon <tgih.jun@samsung.com>
M: Jaehoon Chung <jh80.chung@samsung.com>
@ -9914,7 +9938,6 @@ S: Maintained
F: drivers/staging/lustre
STAGING - NVIDIA COMPLIANT EMBEDDED CONTROLLER INTERFACE (nvec)
M: Julian Andres Klode <jak@jak-linux.org>
M: Marc Dietrich <marvin24@gmx.de>
L: ac100@lists.launchpad.net (moderated for non-subscribers)
L: linux-tegra@vger.kernel.org
@ -11378,15 +11401,6 @@ W: http://oops.ghostprotocols.net:81/blog
S: Maintained
F: drivers/net/wireless/wl3501*
WM97XX TOUCHSCREEN DRIVERS
M: Mark Brown <broonie@kernel.org>
M: Liam Girdwood <lrg@slimlogic.co.uk>
L: linux-input@vger.kernel.org
W: https://github.com/CirrusLogic/linux-drivers/wiki
S: Supported
F: drivers/input/touchscreen/*wm97*
F: include/linux/wm97xx.h
WOLFSON MICROELECTRONICS DRIVERS
L: patches@opensource.wolfsonmicro.com
T: git https://github.com/CirrusLogic/linux-drivers.git

View File

@ -1,8 +1,8 @@
VERSION = 4
PATCHLEVEL = 3
SUBLEVEL = 0
EXTRAVERSION = -rc4
NAME = Hurr durr I'ma sheep
EXTRAVERSION = -rc6
NAME = Blurry Fish Butt
# *DOCUMENTATION*
# To see a list of typical targets execute "make help"

View File

@ -52,4 +52,6 @@ static inline unsigned long find_zero(unsigned long bits)
#endif
}
#define zero_bytemask(mask) ((2ul << (find_zero(mask) * 8)) - 1)
#endif /* _ASM_WORD_AT_A_TIME_H */

View File

@ -578,7 +578,7 @@ dtb-$(CONFIG_MACH_SUN4I) += \
sun4i-a10-hackberry.dtb \
sun4i-a10-hyundai-a7hd.dtb \
sun4i-a10-inet97fv2.dtb \
sun4i-a10-itead-iteaduino-plus.dts \
sun4i-a10-itead-iteaduino-plus.dtb \
sun4i-a10-jesurun-q5.dtb \
sun4i-a10-marsboard.dtb \
sun4i-a10-mini-xplus.dtb \

View File

@ -98,6 +98,7 @@ opp06 {
opp-hz = /bits/ 64 <800000000>;
opp-microvolt = <1000000>;
clock-latency-ns = <200000>;
opp-suspend;
};
opp07 {
opp-hz = /bits/ 64 <900000000>;

View File

@ -197,6 +197,7 @@ ldo10_reg: LDO10 {
regulator-name = "P1.8V_LDO_OUT10";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
regulator-always-on;
};
ldo11_reg: LDO11 {

View File

@ -1117,7 +1117,7 @@ sysmmu_fimd1_1: sysmmu@0x14680000 {
interrupt-parent = <&combiner>;
interrupts = <3 0>;
clock-names = "sysmmu", "master";
clocks = <&clock CLK_SMMU_FIMD1M0>, <&clock CLK_FIMD1>;
clocks = <&clock CLK_SMMU_FIMD1M1>, <&clock CLK_FIMD1>;
power-domains = <&disp_pd>;
#iommu-cells = <0>;
};

View File

@ -472,7 +472,6 @@ &pwm {
*/
pinctrl-0 = <&pwm0_out &pwm1_out &pwm2_out &pwm3_out>;
pinctrl-names = "default";
samsung,pwm-outputs = <0>;
status = "okay";
};

View File

@ -36,7 +36,7 @@ pmic: mc34708@8 {
pinctrl-0 = <&pinctrl_pmic>;
reg = <0x08>;
interrupt-parent = <&gpio5>;
interrupts = <23 0x8>;
interrupts = <23 IRQ_TYPE_LEVEL_HIGH>;
regulators {
sw1_reg: sw1a {
regulator-name = "SW1";

View File

@ -15,6 +15,7 @@
#include <dt-bindings/clock/imx5-clock.h>
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/input/input.h>
#include <dt-bindings/interrupt-controller/irq.h>
/ {
aliases {

View File

@ -35,7 +35,6 @@ reg_usbh1_vbus: regulator@1 {
compatible = "regulator-fixed";
reg = <1>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usbh1>;
regulator-name = "usbh1_vbus";
regulator-min-microvolt = <5000000>;
regulator-max-microvolt = <5000000>;
@ -47,7 +46,6 @@ reg_usb_otg_vbus: regulator@2 {
compatible = "regulator-fixed";
reg = <2>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usbotg>;
regulator-name = "usb_otg_vbus";
regulator-min-microvolt = <5000000>;
regulator-max-microvolt = <5000000>;

View File

@ -1627,6 +1627,7 @@ rcar_sound: sound@ec500000 {
"mix.0", "mix.1",
"dvc.0", "dvc.1",
"clk_a", "clk_b", "clk_c", "clk_i";
power-domains = <&cpg_clocks>;
status = "disabled";

View File

@ -1677,6 +1677,7 @@ rcar_sound: sound@ec500000 {
"mix.0", "mix.1",
"dvc.0", "dvc.1",
"clk_a", "clk_b", "clk_c", "clk_i";
power-domains = <&cpg_clocks>;
status = "disabled";

View File

@ -107,7 +107,7 @@ cpu0: cpu@0 {
720000 1200000
528000 1100000
312000 1000000
144000 900000
144000 1000000
>;
#cooling-cells = <2>;
cooling-min-level = <0>;

View File

@ -20,6 +20,7 @@
#include <asm/cputype.h>
#include <asm/cp15.h>
#include <asm/mcpm.h>
#include <asm/smp_plat.h>
#include "regs-pmu.h"
#include "common.h"
@ -70,7 +71,31 @@ static int exynos_cpu_powerup(unsigned int cpu, unsigned int cluster)
cluster >= EXYNOS5420_NR_CLUSTERS)
return -EINVAL;
exynos_cpu_power_up(cpunr);
if (!exynos_cpu_power_state(cpunr)) {
exynos_cpu_power_up(cpunr);
/*
* This assumes the cluster number of the big cores(Cortex A15)
* is 0 and the Little cores(Cortex A7) is 1.
* When the system was booted from the Little core,
* they should be reset during power up cpu.
*/
if (cluster &&
cluster == MPIDR_AFFINITY_LEVEL(cpu_logical_map(0), 1)) {
/*
* Before we reset the Little cores, we should wait
* the SPARE2 register is set to 1 because the init
* codes of the iROM will set the register after
* initialization.
*/
while (!pmu_raw_readl(S5P_PMU_SPARE2))
udelay(10);
pmu_raw_writel(EXYNOS5420_KFC_CORE_RESET(cpu),
EXYNOS_SWRESET);
}
}
return 0;
}

View File

@ -513,6 +513,12 @@ static inline unsigned int exynos_pmu_cpunr(unsigned int mpidr)
#define SPREAD_ENABLE 0xF
#define SPREAD_USE_STANDWFI 0xF
#define EXYNOS5420_KFC_CORE_RESET0 BIT(8)
#define EXYNOS5420_KFC_ETM_RESET0 BIT(20)
#define EXYNOS5420_KFC_CORE_RESET(_nr) \
((EXYNOS5420_KFC_CORE_RESET0 | EXYNOS5420_KFC_ETM_RESET0) << (_nr))
#define EXYNOS5420_BB_CON1 0x0784
#define EXYNOS5420_BB_SEL_EN BIT(31)
#define EXYNOS5420_BB_PMOS_EN BIT(7)

View File

@ -42,7 +42,7 @@ endif
CHECKFLAGS += -D__aarch64__
ifeq ($(CONFIG_ARM64_ERRATUM_843419), y)
CFLAGS_MODULE += -mcmodel=large
KBUILD_CFLAGS_MODULE += -mcmodel=large
endif
# Default value

View File

@ -44,7 +44,7 @@
#define __ARM_NR_compat_cacheflush (__ARM_NR_COMPAT_BASE+2)
#define __ARM_NR_compat_set_tls (__ARM_NR_COMPAT_BASE+5)
#define __NR_compat_syscalls 388
#define __NR_compat_syscalls 390
#endif
#define __ARCH_WANT_SYS_CLONE

View File

@ -797,3 +797,12 @@ __SYSCALL(__NR_memfd_create, sys_memfd_create)
__SYSCALL(__NR_bpf, sys_bpf)
#define __NR_execveat 387
__SYSCALL(__NR_execveat, compat_sys_execveat)
#define __NR_userfaultfd 388
__SYSCALL(__NR_userfaultfd, sys_userfaultfd)
#define __NR_membarrier 389
__SYSCALL(__NR_membarrier, sys_membarrier)
/*
* Please add new compat syscalls above this comment and update
* __NR_compat_syscalls in asm/unistd.h.
*/

View File

@ -19,6 +19,9 @@
/* Required for AArch32 compatibility. */
#define SA_RESTORER 0x04000000
#define MINSIGSTKSZ 5120
#define SIGSTKSZ 16384
#include <asm-generic/signal.h>
#endif

View File

@ -201,7 +201,7 @@ void unregister_step_hook(struct step_hook *hook)
}
/*
* Call registered single step handers
* Call registered single step handlers
* There is no Syndrome info to check for determining the handler.
* So we call all the registered handlers, until the right handler is
* found which returns zero.
@ -271,20 +271,21 @@ static int single_step_handler(unsigned long addr, unsigned int esr,
* Use reader/writer locks instead of plain spinlock.
*/
static LIST_HEAD(break_hook);
static DEFINE_RWLOCK(break_hook_lock);
static DEFINE_SPINLOCK(break_hook_lock);
void register_break_hook(struct break_hook *hook)
{
write_lock(&break_hook_lock);
list_add(&hook->node, &break_hook);
write_unlock(&break_hook_lock);
spin_lock(&break_hook_lock);
list_add_rcu(&hook->node, &break_hook);
spin_unlock(&break_hook_lock);
}
void unregister_break_hook(struct break_hook *hook)
{
write_lock(&break_hook_lock);
list_del(&hook->node);
write_unlock(&break_hook_lock);
spin_lock(&break_hook_lock);
list_del_rcu(&hook->node);
spin_unlock(&break_hook_lock);
synchronize_rcu();
}
static int call_break_hook(struct pt_regs *regs, unsigned int esr)
@ -292,11 +293,11 @@ static int call_break_hook(struct pt_regs *regs, unsigned int esr)
struct break_hook *hook;
int (*fn)(struct pt_regs *regs, unsigned int esr) = NULL;
read_lock(&break_hook_lock);
list_for_each_entry(hook, &break_hook, node)
rcu_read_lock();
list_for_each_entry_rcu(hook, &break_hook, node)
if ((esr & hook->esr_mask) == hook->esr_val)
fn = hook->fn;
read_unlock(&break_hook_lock);
rcu_read_unlock();
return fn ? fn(regs, esr) : DBG_HOOK_ERROR;
}

View File

@ -85,7 +85,7 @@ bool aarch64_insn_is_branch_imm(u32 insn)
aarch64_insn_is_bcond(insn));
}
static DEFINE_SPINLOCK(patch_lock);
static DEFINE_RAW_SPINLOCK(patch_lock);
static void __kprobes *patch_map(void *addr, int fixmap)
{
@ -131,13 +131,13 @@ static int __kprobes __aarch64_insn_write(void *addr, u32 insn)
unsigned long flags = 0;
int ret;
spin_lock_irqsave(&patch_lock, flags);
raw_spin_lock_irqsave(&patch_lock, flags);
waddr = patch_map(addr, FIX_TEXT_POKE0);
ret = probe_kernel_write(waddr, &insn, AARCH64_INSN_SIZE);
patch_unmap(FIX_TEXT_POKE0);
spin_unlock_irqrestore(&patch_lock, flags);
raw_spin_unlock_irqrestore(&patch_lock, flags);
return ret;
}

View File

@ -364,6 +364,8 @@ static void __init relocate_initrd(void)
to_free = ram_end - orig_start;
size = orig_end - orig_start;
if (!size)
return;
/* initrd needs to be relocated completely inside linear mapping */
new_start = memblock_find_in_range(0, PFN_PHYS(max_pfn),

View File

@ -287,6 +287,7 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
* starvation.
*/
mm_flags &= ~FAULT_FLAG_ALLOW_RETRY;
mm_flags |= FAULT_FLAG_TRIED;
goto retry;
}
}

View File

@ -73,4 +73,5 @@ generic-y += uaccess.h
generic-y += ucontext.h
generic-y += unaligned.h
generic-y += vga.h
generic-y += word-at-a-time.h
generic-y += xor.h

View File

@ -256,6 +256,7 @@ static inline void __iomem * __ioremap_mode(phys_addr_t offset, unsigned long si
*/
#define ioremap_nocache(offset, size) \
__ioremap_mode((offset), (size), _CACHE_UNCACHED)
#define ioremap_uc ioremap_nocache
/*
* ioremap_cachable - map bus memory into CPU space

View File

@ -13,16 +13,15 @@
#define __SWAB_64_THRU_32__
#if (defined(__mips_isa_rev) && (__mips_isa_rev >= 2)) || \
defined(_MIPS_ARCH_LOONGSON3A)
#if !defined(__mips16) && \
((defined(__mips_isa_rev) && (__mips_isa_rev >= 2)) || \
defined(_MIPS_ARCH_LOONGSON3A))
static inline __attribute__((nomips16)) __attribute_const__
__u16 __arch_swab16(__u16 x)
static inline __attribute_const__ __u16 __arch_swab16(__u16 x)
{
__asm__(
" .set push \n"
" .set arch=mips32r2 \n"
" .set nomips16 \n"
" wsbh %0, %1 \n"
" .set pop \n"
: "=r" (x)
@ -32,13 +31,11 @@ static inline __attribute__((nomips16)) __attribute_const__
}
#define __arch_swab16 __arch_swab16
static inline __attribute__((nomips16)) __attribute_const__
__u32 __arch_swab32(__u32 x)
static inline __attribute_const__ __u32 __arch_swab32(__u32 x)
{
__asm__(
" .set push \n"
" .set arch=mips32r2 \n"
" .set nomips16 \n"
" wsbh %0, %1 \n"
" rotr %0, %0, 16 \n"
" .set pop \n"
@ -54,13 +51,11 @@ static inline __attribute__((nomips16)) __attribute_const__
* 64-bit kernel on r2 CPUs.
*/
#ifdef __mips64
static inline __attribute__((nomips16)) __attribute_const__
__u64 __arch_swab64(__u64 x)
static inline __attribute_const__ __u64 __arch_swab64(__u64 x)
{
__asm__(
" .set push \n"
" .set arch=mips64r2 \n"
" .set nomips16 \n"
" dsbh %0, %1 \n"
" dshd %0, %0 \n"
" .set pop \n"
@ -71,5 +66,5 @@ static inline __attribute__((nomips16)) __attribute_const__
}
#define __arch_swab64 __arch_swab64
#endif /* __mips64 */
#endif /* MIPS R2 or newer or Loongson 3A */
#endif /* (not __mips16) and (MIPS R2 or newer or Loongson 3A) */
#endif /* _ASM_SWAB_H */

View File

@ -111,7 +111,7 @@ CONFIG_SCSI_QLA_FC=m
CONFIG_SCSI_QLA_ISCSI=m
CONFIG_SCSI_LPFC=m
CONFIG_SCSI_VIRTIO=m
CONFIG_SCSI_DH=m
CONFIG_SCSI_DH=y
CONFIG_SCSI_DH_RDAC=m
CONFIG_SCSI_DH_ALUA=m
CONFIG_ATA=y

View File

@ -114,7 +114,7 @@ CONFIG_SCSI_QLA_FC=m
CONFIG_SCSI_QLA_ISCSI=m
CONFIG_SCSI_LPFC=m
CONFIG_SCSI_VIRTIO=m
CONFIG_SCSI_DH=m
CONFIG_SCSI_DH=y
CONFIG_SCSI_DH_RDAC=m
CONFIG_SCSI_DH_ALUA=m
CONFIG_ATA=y

View File

@ -7,4 +7,3 @@ generic-y += mcs_spinlock.h
generic-y += preempt.h
generic-y += rwsem.h
generic-y += vtime.h
generic-y += word-at-a-time.h

View File

@ -61,8 +61,13 @@ struct machdep_calls {
unsigned long addr,
unsigned char *hpte_slot_array,
int psize, int ssize, int local);
/* special for kexec, to be called in real mode, linear mapping is
* destroyed as well */
/*
* Special for kexec.
* To be called in real mode with interrupts disabled. No locks are
* taken as such, concurrent access on pre POWER5 hardware could result
* in a deadlock.
* The linear mapping is destroyed as well.
*/
void (*hpte_clear_all)(void);
void __iomem * (*ioremap)(phys_addr_t addr, unsigned long size,

View File

@ -40,6 +40,11 @@ static inline bool has_zero(unsigned long val, unsigned long *data, const struct
return (val + c->high_bits) & ~rhs;
}
static inline unsigned long zero_bytemask(unsigned long mask)
{
return ~1ul << __fls(mask);
}
#else
#ifdef CONFIG_64BIT

View File

@ -582,13 +582,21 @@ static void hpte_decode(struct hash_pte *hpte, unsigned long slot,
* be when they isi), and we are the only one left. We rely on our kernel
* mapping being 0xC0's and the hardware ignoring those two real bits.
*
* This must be called with interrupts disabled.
*
* Taking the native_tlbie_lock is unsafe here due to the possibility of
* lockdep being on. On pre POWER5 hardware, not taking the lock could
* cause deadlock. POWER5 and newer not taking the lock is fine. This only
* gets called during boot before secondary CPUs have come up and during
* crashdump and all bets are off anyway.
*
* TODO: add batching support when enabled. remember, no dynamic memory here,
* athough there is the control page available...
*/
static void native_hpte_clear(void)
{
unsigned long vpn = 0;
unsigned long slot, slots, flags;
unsigned long slot, slots;
struct hash_pte *hptep = htab_address;
unsigned long hpte_v;
unsigned long pteg_count;
@ -596,13 +604,6 @@ static void native_hpte_clear(void)
pteg_count = htab_hash_mask + 1;
local_irq_save(flags);
/* we take the tlbie lock and hold it. Some hardware will
* deadlock if we try to tlbie from two processors at once.
*/
raw_spin_lock(&native_tlbie_lock);
slots = pteg_count * HPTES_PER_GROUP;
for (slot = 0; slot < slots; slot++, hptep++) {
@ -614,8 +615,8 @@ static void native_hpte_clear(void)
hpte_v = be64_to_cpu(hptep->v);
/*
* Call __tlbie() here rather than tlbie() since we
* already hold the native_tlbie_lock.
* Call __tlbie() here rather than tlbie() since we can't take the
* native_tlbie_lock.
*/
if (hpte_v & HPTE_V_VALID) {
hpte_decode(hptep, slot, &psize, &apsize, &ssize, &vpn);
@ -625,8 +626,6 @@ static void native_hpte_clear(void)
}
asm volatile("eieio; tlbsync; ptesync":::"memory");
raw_spin_unlock(&native_tlbie_lock);
local_irq_restore(flags);
}
/*

View File

@ -487,9 +487,12 @@ int opal_machine_check(struct pt_regs *regs)
* PRD component would have already got notified about this
* error through other channels.
*
* In any case, let us just fall through. We anyway heading
* down to panic path.
* If hardware marked this as an unrecoverable MCE, we are
* going to panic anyway. Even if it didn't, it's not safe to
* continue at this point, so we should explicitly panic.
*/
panic("PowerNV Unrecovered Machine Check");
return 0;
}

View File

@ -194,11 +194,6 @@ static const struct os_area_db_id os_area_db_id_rtc_diff = {
.key = OS_AREA_DB_KEY_RTC_DIFF
};
static const struct os_area_db_id os_area_db_id_video_mode = {
.owner = OS_AREA_DB_OWNER_LINUX,
.key = OS_AREA_DB_KEY_VIDEO_MODE
};
#define SECONDS_FROM_1970_TO_2000 946684800LL
/**

View File

@ -10,7 +10,7 @@ targets += misc.o piggy.o sizes.h head.o
KBUILD_CFLAGS := -m64 -D__KERNEL__ $(LINUX_INCLUDE) -O2
KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
KBUILD_CFLAGS += $(cflags-y) -fno-delete-null-pointer-checks
KBUILD_CFLAGS += $(cflags-y) -fno-delete-null-pointer-checks -msoft-float
KBUILD_CFLAGS += $(call cc-option,-mpacked-stack)
KBUILD_CFLAGS += $(call cc-option,-ffreestanding)

View File

@ -381,7 +381,7 @@ CONFIG_ISCSI_TCP=m
CONFIG_SCSI_DEBUG=m
CONFIG_ZFCP=y
CONFIG_SCSI_VIRTIO=m
CONFIG_SCSI_DH=m
CONFIG_SCSI_DH=y
CONFIG_SCSI_DH_RDAC=m
CONFIG_SCSI_DH_HP_SW=m
CONFIG_SCSI_DH_EMC=m

View File

@ -377,7 +377,7 @@ CONFIG_ISCSI_TCP=m
CONFIG_SCSI_DEBUG=m
CONFIG_ZFCP=y
CONFIG_SCSI_VIRTIO=m
CONFIG_SCSI_DH=m
CONFIG_SCSI_DH=y
CONFIG_SCSI_DH_RDAC=m
CONFIG_SCSI_DH_HP_SW=m
CONFIG_SCSI_DH_EMC=m

View File

@ -377,7 +377,7 @@ CONFIG_ISCSI_TCP=m
CONFIG_SCSI_DEBUG=m
CONFIG_ZFCP=y
CONFIG_SCSI_VIRTIO=m
CONFIG_SCSI_DH=m
CONFIG_SCSI_DH=y
CONFIG_SCSI_DH_RDAC=m
CONFIG_SCSI_DH_HP_SW=m
CONFIG_SCSI_DH_EMC=m

View File

@ -19,7 +19,7 @@ int numa_pfn_to_nid(unsigned long pfn);
int __node_distance(int a, int b);
void numa_update_cpu_topology(void);
extern cpumask_var_t node_to_cpumask_map[MAX_NUMNODES];
extern cpumask_t node_to_cpumask_map[MAX_NUMNODES];
extern int numa_debug_enabled;
#else

View File

@ -68,7 +68,7 @@ static inline int cpu_to_node(int cpu)
#define cpumask_of_node cpumask_of_node
static inline const struct cpumask *cpumask_of_node(int node)
{
return node_to_cpumask_map[node];
return &node_to_cpumask_map[node];
}
/*

View File

@ -176,6 +176,7 @@ int main(void)
DEFINE(__LC_PASTE, offsetof(struct _lowcore, paste));
DEFINE(__LC_FP_CREG_SAVE_AREA, offsetof(struct _lowcore, fpt_creg_save_area));
DEFINE(__LC_LAST_BREAK, offsetof(struct _lowcore, breaking_event_addr));
DEFINE(__LC_PERCPU_OFFSET, offsetof(struct _lowcore, percpu_offset));
DEFINE(__LC_VDSO_PER_CPU, offsetof(struct _lowcore, vdso_per_cpu_data));
DEFINE(__LC_GMAP, offsetof(struct _lowcore, gmap));
DEFINE(__LC_PGM_TDB, offsetof(struct _lowcore, pgm_tdb));

View File

@ -733,6 +733,14 @@ ENTRY(psw_idle)
stg %r3,__SF_EMPTY(%r15)
larl %r1,.Lpsw_idle_lpsw+4
stg %r1,__SF_EMPTY+8(%r15)
#ifdef CONFIG_SMP
larl %r1,smp_cpu_mtid
llgf %r1,0(%r1)
ltgr %r1,%r1
jz .Lpsw_idle_stcctm
.insn rsy,0xeb0000000017,%r1,5,__SF_EMPTY+16(%r15)
.Lpsw_idle_stcctm:
#endif
STCK __CLOCK_IDLE_ENTER(%r2)
stpt __TIMER_IDLE_ENTER(%r2)
.Lpsw_idle_lpsw:
@ -1159,7 +1167,27 @@ cleanup_critical:
jhe 1f
mvc __CLOCK_IDLE_ENTER(8,%r2),__CLOCK_IDLE_EXIT(%r2)
mvc __TIMER_IDLE_ENTER(8,%r2),__TIMER_IDLE_EXIT(%r2)
1: # account system time going idle
1: # calculate idle cycles
#ifdef CONFIG_SMP
clg %r9,BASED(.Lcleanup_idle_insn)
jl 3f
larl %r1,smp_cpu_mtid
llgf %r1,0(%r1)
ltgr %r1,%r1
jz 3f
.insn rsy,0xeb0000000017,%r1,5,__SF_EMPTY+80(%r15)
larl %r3,mt_cycles
ag %r3,__LC_PERCPU_OFFSET
la %r4,__SF_EMPTY+16(%r15)
2: lg %r0,0(%r3)
slg %r0,0(%r4)
alg %r0,64(%r4)
stg %r0,0(%r3)
la %r3,8(%r3)
la %r4,8(%r4)
brct %r1,2b
#endif
3: # account system time going idle
lg %r9,__LC_STEAL_TIMER
alg %r9,__CLOCK_IDLE_ENTER(%r2)
slg %r9,__LC_LAST_UPDATE_CLOCK

View File

@ -25,7 +25,7 @@ static DEFINE_SPINLOCK(virt_timer_lock);
static atomic64_t virt_timer_current;
static atomic64_t virt_timer_elapsed;
static DEFINE_PER_CPU(u64, mt_cycles[32]);
DEFINE_PER_CPU(u64, mt_cycles[8]);
static DEFINE_PER_CPU(u64, mt_scaling_mult) = { 1 };
static DEFINE_PER_CPU(u64, mt_scaling_div) = { 1 };
static DEFINE_PER_CPU(u64, mt_scaling_jiffies);
@ -60,6 +60,34 @@ static inline int virt_timer_forward(u64 elapsed)
return elapsed >= atomic64_read(&virt_timer_current);
}
static void update_mt_scaling(void)
{
u64 cycles_new[8], *cycles_old;
u64 delta, fac, mult, div;
int i;
stcctm5(smp_cpu_mtid + 1, cycles_new);
cycles_old = this_cpu_ptr(mt_cycles);
fac = 1;
mult = div = 0;
for (i = 0; i <= smp_cpu_mtid; i++) {
delta = cycles_new[i] - cycles_old[i];
div += delta;
mult *= i + 1;
mult += delta * fac;
fac *= i + 1;
}
div *= fac;
if (div > 0) {
/* Update scaling factor */
__this_cpu_write(mt_scaling_mult, mult);
__this_cpu_write(mt_scaling_div, div);
memcpy(cycles_old, cycles_new,
sizeof(u64) * (smp_cpu_mtid + 1));
}
__this_cpu_write(mt_scaling_jiffies, jiffies_64);
}
/*
* Update process times based on virtual cpu times stored by entry.S
* to the lowcore fields user_timer, system_timer & steal_clock.
@ -69,7 +97,6 @@ static int do_account_vtime(struct task_struct *tsk, int hardirq_offset)
struct thread_info *ti = task_thread_info(tsk);
u64 timer, clock, user, system, steal;
u64 user_scaled, system_scaled;
int i;
timer = S390_lowcore.last_update_timer;
clock = S390_lowcore.last_update_clock;
@ -85,34 +112,10 @@ static int do_account_vtime(struct task_struct *tsk, int hardirq_offset)
S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer;
S390_lowcore.steal_timer += S390_lowcore.last_update_clock - clock;
/* Do MT utilization calculation */
/* Update MT utilization calculation */
if (smp_cpu_mtid &&
time_after64(jiffies_64, __this_cpu_read(mt_scaling_jiffies))) {
u64 cycles_new[32], *cycles_old;
u64 delta, fac, mult, div;
cycles_old = this_cpu_ptr(mt_cycles);
if (stcctm5(smp_cpu_mtid + 1, cycles_new) < 2) {
fac = 1;
mult = div = 0;
for (i = 0; i <= smp_cpu_mtid; i++) {
delta = cycles_new[i] - cycles_old[i];
div += delta;
mult *= i + 1;
mult += delta * fac;
fac *= i + 1;
}
div *= fac;
if (div > 0) {
/* Update scaling factor */
__this_cpu_write(mt_scaling_mult, mult);
__this_cpu_write(mt_scaling_div, div);
memcpy(cycles_old, cycles_new,
sizeof(u64) * (smp_cpu_mtid + 1));
}
}
__this_cpu_write(mt_scaling_jiffies, jiffies_64);
}
time_after64(jiffies_64, this_cpu_read(mt_scaling_jiffies)))
update_mt_scaling();
user = S390_lowcore.user_timer - ti->user_timer;
S390_lowcore.steal_timer -= user;
@ -181,6 +184,11 @@ void vtime_account_irq_enter(struct task_struct *tsk)
S390_lowcore.last_update_timer = get_vtimer();
S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer;
/* Update MT utilization calculation */
if (smp_cpu_mtid &&
time_after64(jiffies_64, this_cpu_read(mt_scaling_jiffies)))
update_mt_scaling();
system = S390_lowcore.system_timer - ti->system_timer;
S390_lowcore.steal_timer -= system;
ti->system_timer = S390_lowcore.system_timer;

View File

@ -368,7 +368,7 @@ static void topology_add_core(struct toptree *core)
cpumask_copy(&top->thread_mask, &core->mask);
cpumask_copy(&top->core_mask, &core_mc(core)->mask);
cpumask_copy(&top->book_mask, &core_book(core)->mask);
cpumask_set_cpu(cpu, node_to_cpumask_map[core_node(core)->id]);
cpumask_set_cpu(cpu, &node_to_cpumask_map[core_node(core)->id]);
top->node_id = core_node(core)->id;
}
}
@ -383,7 +383,7 @@ static void toptree_to_topology(struct toptree *numa)
/* Clear all node masks */
for (i = 0; i < MAX_NUMNODES; i++)
cpumask_clear(node_to_cpumask_map[i]);
cpumask_clear(&node_to_cpumask_map[i]);
/* Rebuild all masks */
toptree_for_each(core, numa, CORE)

View File

@ -23,7 +23,7 @@
pg_data_t *node_data[MAX_NUMNODES];
EXPORT_SYMBOL(node_data);
cpumask_var_t node_to_cpumask_map[MAX_NUMNODES];
cpumask_t node_to_cpumask_map[MAX_NUMNODES];
EXPORT_SYMBOL(node_to_cpumask_map);
const struct numa_mode numa_mode_plain = {
@ -144,7 +144,7 @@ void __init numa_setup(void)
static int __init numa_init_early(void)
{
/* Attach all possible CPUs to node 0 for now. */
cpumask_copy(node_to_cpumask_map[0], cpu_possible_mask);
cpumask_copy(&node_to_cpumask_map[0], cpu_possible_mask);
return 0;
}
early_initcall(numa_init_early);

View File

@ -59,6 +59,7 @@ pages_do_alias(unsigned long addr1, unsigned long addr2)
#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE)
extern void copy_page(void *to, void *from);
#define copy_user_page(to, from, vaddr, pg) __copy_user(to, from, PAGE_SIZE)
struct page;
struct vm_area_struct;

View File

@ -433,6 +433,7 @@ static struct crypto_alg algs[] = { {
.blkcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
.setkey = aes_set_key,
.encrypt = cbc_encrypt,
.decrypt = cbc_decrypt,
@ -452,6 +453,7 @@ static struct crypto_alg algs[] = { {
.blkcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
.setkey = aes_set_key,
.encrypt = ctr_crypt,
.decrypt = ctr_crypt,

View File

@ -274,6 +274,7 @@ static struct crypto_alg algs[] = { {
.blkcipher = {
.min_keysize = CAMELLIA_MIN_KEY_SIZE,
.max_keysize = CAMELLIA_MAX_KEY_SIZE,
.ivsize = CAMELLIA_BLOCK_SIZE,
.setkey = camellia_set_key,
.encrypt = cbc_encrypt,
.decrypt = cbc_decrypt,

View File

@ -429,6 +429,7 @@ static struct crypto_alg algs[] = { {
.blkcipher = {
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
.ivsize = DES_BLOCK_SIZE,
.setkey = des_set_key,
.encrypt = cbc_encrypt,
.decrypt = cbc_decrypt,
@ -485,6 +486,7 @@ static struct crypto_alg algs[] = { {
.blkcipher = {
.min_keysize = DES3_EDE_KEY_SIZE,
.max_keysize = DES3_EDE_KEY_SIZE,
.ivsize = DES3_EDE_BLOCK_SIZE,
.setkey = des3_ede_set_key,
.encrypt = cbc3_encrypt,
.decrypt = cbc3_decrypt,

View File

@ -40,5 +40,4 @@ generic-y += termbits.h
generic-y += termios.h
generic-y += trace_clock.h
generic-y += types.h
generic-y += word-at-a-time.h
generic-y += xor.h

View File

@ -6,7 +6,7 @@
struct word_at_a_time { /* unused */ };
#define WORD_AT_A_TIME_CONSTANTS {}
/* Generate 0x01 byte values for non-zero bytes using a SIMD instruction. */
/* Generate 0x01 byte values for zero bytes using a SIMD instruction. */
static inline unsigned long has_zero(unsigned long val, unsigned long *data,
const struct word_at_a_time *c)
{
@ -33,4 +33,10 @@ static inline long find_zero(unsigned long mask)
#endif
}
#ifdef __BIG_ENDIAN
#define zero_bytemask(mask) (~1ul << (63 - __builtin_clzl(mask)))
#else
#define zero_bytemask(mask) ((2ul << __builtin_ctzl(mask)) - 1)
#endif
#endif /* _ASM_WORD_AT_A_TIME_H */

View File

@ -1308,6 +1308,7 @@ config HIGHMEM
config X86_PAE
bool "PAE (Physical Address Extension) Support"
depends on X86_32 && !HIGHMEM4G
select SWIOTLB
---help---
PAE is required for NX support, and furthermore enables
larger swapspace support for non-overcommit purposes. It

View File

@ -554,6 +554,11 @@ static int __init camellia_aesni_init(void)
{
const char *feature_name;
if (!cpu_has_avx || !cpu_has_aes || !cpu_has_osxsave) {
pr_info("AVX or AES-NI instructions are not detected.\n");
return -ENODEV;
}
if (!cpu_has_xfeatures(XSTATE_SSE | XSTATE_YMM, &feature_name)) {
pr_info("CPU feature '%s' is not supported.\n", feature_name);
return -ENODEV;

View File

@ -1226,10 +1226,8 @@ void kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err);
int kvm_is_in_guest(void);
int __x86_set_memory_region(struct kvm *kvm,
const struct kvm_userspace_memory_region *mem);
int x86_set_memory_region(struct kvm *kvm,
const struct kvm_userspace_memory_region *mem);
int __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size);
int x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size);
bool kvm_vcpu_is_reset_bsp(struct kvm_vcpu *vcpu);
bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu);

View File

@ -336,10 +336,10 @@ HYPERVISOR_update_descriptor(u64 ma, u64 desc)
return _hypercall4(int, update_descriptor, ma, ma>>32, desc, desc>>32);
}
static inline int
static inline long
HYPERVISOR_memory_op(unsigned int cmd, void *arg)
{
return _hypercall2(int, memory_op, cmd, arg);
return _hypercall2(long, memory_op, cmd, arg);
}
static inline int

View File

@ -2418,7 +2418,7 @@ static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase)
u64 val, cr0, cr4;
u32 base3;
u16 selector;
int i;
int i, r;
for (i = 0; i < 16; i++)
*reg_write(ctxt, i) = GET_SMSTATE(u64, smbase, 0x7ff8 - i * 8);
@ -2460,13 +2460,17 @@ static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase)
dt.address = GET_SMSTATE(u64, smbase, 0x7e68);
ctxt->ops->set_gdt(ctxt, &dt);
r = rsm_enter_protected_mode(ctxt, cr0, cr4);
if (r != X86EMUL_CONTINUE)
return r;
for (i = 0; i < 6; i++) {
int r = rsm_load_seg_64(ctxt, smbase, i);
r = rsm_load_seg_64(ctxt, smbase, i);
if (r != X86EMUL_CONTINUE)
return r;
}
return rsm_enter_protected_mode(ctxt, cr0, cr4);
return X86EMUL_CONTINUE;
}
static int em_rsm(struct x86_emulate_ctxt *ctxt)

View File

@ -4105,17 +4105,13 @@ static void seg_setup(int seg)
static int alloc_apic_access_page(struct kvm *kvm)
{
struct page *page;
struct kvm_userspace_memory_region kvm_userspace_mem;
int r = 0;
mutex_lock(&kvm->slots_lock);
if (kvm->arch.apic_access_page_done)
goto out;
kvm_userspace_mem.slot = APIC_ACCESS_PAGE_PRIVATE_MEMSLOT;
kvm_userspace_mem.flags = 0;
kvm_userspace_mem.guest_phys_addr = APIC_DEFAULT_PHYS_BASE;
kvm_userspace_mem.memory_size = PAGE_SIZE;
r = __x86_set_memory_region(kvm, &kvm_userspace_mem);
r = __x86_set_memory_region(kvm, APIC_ACCESS_PAGE_PRIVATE_MEMSLOT,
APIC_DEFAULT_PHYS_BASE, PAGE_SIZE);
if (r)
goto out;
@ -4140,17 +4136,12 @@ static int alloc_identity_pagetable(struct kvm *kvm)
{
/* Called with kvm->slots_lock held. */
struct kvm_userspace_memory_region kvm_userspace_mem;
int r = 0;
BUG_ON(kvm->arch.ept_identity_pagetable_done);
kvm_userspace_mem.slot = IDENTITY_PAGETABLE_PRIVATE_MEMSLOT;
kvm_userspace_mem.flags = 0;
kvm_userspace_mem.guest_phys_addr =
kvm->arch.ept_identity_map_addr;
kvm_userspace_mem.memory_size = PAGE_SIZE;
r = __x86_set_memory_region(kvm, &kvm_userspace_mem);
r = __x86_set_memory_region(kvm, IDENTITY_PAGETABLE_PRIVATE_MEMSLOT,
kvm->arch.ept_identity_map_addr, PAGE_SIZE);
return r;
}
@ -4949,14 +4940,9 @@ static int vmx_interrupt_allowed(struct kvm_vcpu *vcpu)
static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr)
{
int ret;
struct kvm_userspace_memory_region tss_mem = {
.slot = TSS_PRIVATE_MEMSLOT,
.guest_phys_addr = addr,
.memory_size = PAGE_SIZE * 3,
.flags = 0,
};
ret = x86_set_memory_region(kvm, &tss_mem);
ret = x86_set_memory_region(kvm, TSS_PRIVATE_MEMSLOT, addr,
PAGE_SIZE * 3);
if (ret)
return ret;
kvm->arch.tss_addr = addr;

View File

@ -6453,6 +6453,12 @@ static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu)
return 1;
}
static inline bool kvm_vcpu_running(struct kvm_vcpu *vcpu)
{
return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE &&
!vcpu->arch.apf.halted);
}
static int vcpu_run(struct kvm_vcpu *vcpu)
{
int r;
@ -6461,8 +6467,7 @@ static int vcpu_run(struct kvm_vcpu *vcpu)
vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
for (;;) {
if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE &&
!vcpu->arch.apf.halted)
if (kvm_vcpu_running(vcpu))
r = vcpu_enter_guest(vcpu);
else
r = vcpu_block(kvm, vcpu);
@ -7474,34 +7479,66 @@ void kvm_arch_sync_events(struct kvm *kvm)
kvm_free_pit(kvm);
}
int __x86_set_memory_region(struct kvm *kvm,
const struct kvm_userspace_memory_region *mem)
int __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size)
{
int i, r;
unsigned long hva;
struct kvm_memslots *slots = kvm_memslots(kvm);
struct kvm_memory_slot *slot, old;
/* Called with kvm->slots_lock held. */
BUG_ON(mem->slot >= KVM_MEM_SLOTS_NUM);
if (WARN_ON(id >= KVM_MEM_SLOTS_NUM))
return -EINVAL;
slot = id_to_memslot(slots, id);
if (size) {
if (WARN_ON(slot->npages))
return -EEXIST;
/*
* MAP_SHARED to prevent internal slot pages from being moved
* by fork()/COW.
*/
hva = vm_mmap(NULL, 0, size, PROT_READ | PROT_WRITE,
MAP_SHARED | MAP_ANONYMOUS, 0);
if (IS_ERR((void *)hva))
return PTR_ERR((void *)hva);
} else {
if (!slot->npages)
return 0;
hva = 0;
}
old = *slot;
for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
struct kvm_userspace_memory_region m = *mem;
struct kvm_userspace_memory_region m;
m.slot |= i << 16;
m.slot = id | (i << 16);
m.flags = 0;
m.guest_phys_addr = gpa;
m.userspace_addr = hva;
m.memory_size = size;
r = __kvm_set_memory_region(kvm, &m);
if (r < 0)
return r;
}
if (!size) {
r = vm_munmap(old.userspace_addr, old.npages * PAGE_SIZE);
WARN_ON(r < 0);
}
return 0;
}
EXPORT_SYMBOL_GPL(__x86_set_memory_region);
int x86_set_memory_region(struct kvm *kvm,
const struct kvm_userspace_memory_region *mem)
int x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size)
{
int r;
mutex_lock(&kvm->slots_lock);
r = __x86_set_memory_region(kvm, mem);
r = __x86_set_memory_region(kvm, id, gpa, size);
mutex_unlock(&kvm->slots_lock);
return r;
@ -7516,16 +7553,9 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
* unless the the memory map has changed due to process exit
* or fd copying.
*/
struct kvm_userspace_memory_region mem;
memset(&mem, 0, sizeof(mem));
mem.slot = APIC_ACCESS_PAGE_PRIVATE_MEMSLOT;
x86_set_memory_region(kvm, &mem);
mem.slot = IDENTITY_PAGETABLE_PRIVATE_MEMSLOT;
x86_set_memory_region(kvm, &mem);
mem.slot = TSS_PRIVATE_MEMSLOT;
x86_set_memory_region(kvm, &mem);
x86_set_memory_region(kvm, APIC_ACCESS_PAGE_PRIVATE_MEMSLOT, 0, 0);
x86_set_memory_region(kvm, IDENTITY_PAGETABLE_PRIVATE_MEMSLOT, 0, 0);
x86_set_memory_region(kvm, TSS_PRIVATE_MEMSLOT, 0, 0);
}
kvm_iommu_unmap_guest(kvm);
kfree(kvm->arch.vpic);
@ -7628,27 +7658,6 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
const struct kvm_userspace_memory_region *mem,
enum kvm_mr_change change)
{
/*
* Only private memory slots need to be mapped here since
* KVM_SET_MEMORY_REGION ioctl is no longer supported.
*/
if ((memslot->id >= KVM_USER_MEM_SLOTS) && (change == KVM_MR_CREATE)) {
unsigned long userspace_addr;
/*
* MAP_SHARED to prevent internal slot pages from being moved
* by fork()/COW.
*/
userspace_addr = vm_mmap(NULL, 0, memslot->npages * PAGE_SIZE,
PROT_READ | PROT_WRITE,
MAP_SHARED | MAP_ANONYMOUS, 0);
if (IS_ERR((void *)userspace_addr))
return PTR_ERR((void *)userspace_addr);
memslot->userspace_addr = userspace_addr;
}
return 0;
}
@ -7710,17 +7719,6 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
{
int nr_mmu_pages = 0;
if (change == KVM_MR_DELETE && old->id >= KVM_USER_MEM_SLOTS) {
int ret;
ret = vm_munmap(old->userspace_addr,
old->npages * PAGE_SIZE);
if (ret < 0)
printk(KERN_WARNING
"kvm_vm_ioctl_set_memory_region: "
"failed to munmap memory\n");
}
if (!kvm->arch.n_requested_mmu_pages)
nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm);
@ -7769,19 +7767,36 @@ void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
kvm_mmu_invalidate_zap_all_pages(kvm);
}
static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu)
{
if (!list_empty_careful(&vcpu->async_pf.done))
return true;
if (kvm_apic_has_events(vcpu))
return true;
if (vcpu->arch.pv.pv_unhalted)
return true;
if (atomic_read(&vcpu->arch.nmi_queued))
return true;
if (test_bit(KVM_REQ_SMI, &vcpu->requests))
return true;
if (kvm_arch_interrupt_allowed(vcpu) &&
kvm_cpu_has_interrupt(vcpu))
return true;
return false;
}
int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
{
if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events)
kvm_x86_ops->check_nested_events(vcpu, false);
return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE &&
!vcpu->arch.apf.halted)
|| !list_empty_careful(&vcpu->async_pf.done)
|| kvm_apic_has_events(vcpu)
|| vcpu->arch.pv.pv_unhalted
|| atomic_read(&vcpu->arch.nmi_queued) ||
(kvm_arch_interrupt_allowed(vcpu) &&
kvm_cpu_has_interrupt(vcpu));
return kvm_vcpu_running(vcpu) || kvm_vcpu_has_events(vcpu);
}
int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)

View File

@ -33,6 +33,10 @@
#include <linux/memblock.h>
#include <linux/edd.h>
#ifdef CONFIG_KEXEC_CORE
#include <linux/kexec.h>
#endif
#include <xen/xen.h>
#include <xen/events.h>
#include <xen/interface/xen.h>
@ -1077,6 +1081,7 @@ static int xen_write_msr_safe(unsigned int msr, unsigned low, unsigned high)
/* Fast syscall setup is all done in hypercalls, so
these are all ignored. Stub them out here to stop
Xen console noise. */
break;
default:
if (!pmu_msr_write(msr, low, high, &ret))
@ -1807,6 +1812,21 @@ static struct notifier_block xen_hvm_cpu_notifier = {
.notifier_call = xen_hvm_cpu_notify,
};
#ifdef CONFIG_KEXEC_CORE
static void xen_hvm_shutdown(void)
{
native_machine_shutdown();
if (kexec_in_progress)
xen_reboot(SHUTDOWN_soft_reset);
}
static void xen_hvm_crash_shutdown(struct pt_regs *regs)
{
native_machine_crash_shutdown(regs);
xen_reboot(SHUTDOWN_soft_reset);
}
#endif
static void __init xen_hvm_guest_init(void)
{
if (xen_pv_domain())
@ -1826,6 +1846,10 @@ static void __init xen_hvm_guest_init(void)
x86_init.irqs.intr_init = xen_init_IRQ;
xen_hvm_init_time_ops();
xen_hvm_init_mmu_ops();
#ifdef CONFIG_KEXEC_CORE
machine_ops.shutdown = xen_hvm_shutdown;
machine_ops.crash_shutdown = xen_hvm_crash_shutdown;
#endif
}
#endif

View File

@ -112,6 +112,15 @@ static unsigned long *p2m_identity;
static pte_t *p2m_missing_pte;
static pte_t *p2m_identity_pte;
/*
* Hint at last populated PFN.
*
* Used to set HYPERVISOR_shared_info->arch.max_pfn so the toolstack
* can avoid scanning the whole P2M (which may be sized to account for
* hotplugged memory).
*/
static unsigned long xen_p2m_last_pfn;
static inline unsigned p2m_top_index(unsigned long pfn)
{
BUG_ON(pfn >= MAX_P2M_PFN);
@ -270,7 +279,7 @@ void xen_setup_mfn_list_list(void)
else
HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
virt_to_mfn(p2m_top_mfn);
HYPERVISOR_shared_info->arch.max_pfn = xen_max_p2m_pfn;
HYPERVISOR_shared_info->arch.max_pfn = xen_p2m_last_pfn;
HYPERVISOR_shared_info->arch.p2m_generation = 0;
HYPERVISOR_shared_info->arch.p2m_vaddr = (unsigned long)xen_p2m_addr;
HYPERVISOR_shared_info->arch.p2m_cr3 =
@ -406,6 +415,8 @@ void __init xen_vmalloc_p2m_tree(void)
static struct vm_struct vm;
unsigned long p2m_limit;
xen_p2m_last_pfn = xen_max_p2m_pfn;
p2m_limit = (phys_addr_t)P2M_LIMIT * 1024 * 1024 * 1024 / PAGE_SIZE;
vm.flags = VM_ALLOC;
vm.size = ALIGN(sizeof(unsigned long) * max(xen_max_p2m_pfn, p2m_limit),
@ -608,6 +619,12 @@ static bool alloc_p2m(unsigned long pfn)
free_p2m_page(p2m);
}
/* Expanded the p2m? */
if (pfn > xen_p2m_last_pfn) {
xen_p2m_last_pfn = pfn;
HYPERVISOR_shared_info->arch.max_pfn = xen_p2m_last_pfn;
}
return true;
}

View File

@ -548,7 +548,7 @@ static unsigned long __init xen_get_max_pages(void)
{
unsigned long max_pages, limit;
domid_t domid = DOMID_SELF;
int ret;
long ret;
limit = xen_get_pages_limit();
max_pages = limit;
@ -798,7 +798,7 @@ char * __init xen_memory_setup(void)
xen_ignore_unusable();
/* Make sure the Xen-supplied memory map is well-ordered. */
sanitize_e820_map(xen_e820_map, xen_e820_map_entries,
sanitize_e820_map(xen_e820_map, ARRAY_SIZE(xen_e820_map),
&xen_e820_map_entries);
max_pages = xen_get_max_pages();

View File

@ -544,7 +544,8 @@ static int ahash_prepare_alg(struct ahash_alg *alg)
struct crypto_alg *base = &alg->halg.base;
if (alg->halg.digestsize > PAGE_SIZE / 8 ||
alg->halg.statesize > PAGE_SIZE / 8)
alg->halg.statesize > PAGE_SIZE / 8 ||
alg->halg.statesize == 0)
return -EINVAL;
base->cra_type = &crypto_ahash_type;

View File

@ -61,6 +61,7 @@ ACPI_GLOBAL(struct acpi_table_header, acpi_gbl_original_dsdt_header);
ACPI_INIT_GLOBAL(u32, acpi_gbl_dsdt_index, ACPI_INVALID_TABLE_INDEX);
ACPI_INIT_GLOBAL(u32, acpi_gbl_facs_index, ACPI_INVALID_TABLE_INDEX);
ACPI_INIT_GLOBAL(u32, acpi_gbl_xfacs_index, ACPI_INVALID_TABLE_INDEX);
ACPI_INIT_GLOBAL(u32, acpi_gbl_fadt_index, ACPI_INVALID_TABLE_INDEX);
#if (!ACPI_REDUCED_HARDWARE)
ACPI_GLOBAL(struct acpi_table_facs *, acpi_gbl_FACS);

View File

@ -85,7 +85,7 @@ void acpi_tb_set_table_loaded_flag(u32 table_index, u8 is_loaded);
/*
* tbfadt - FADT parse/convert/validate
*/
void acpi_tb_parse_fadt(u32 table_index);
void acpi_tb_parse_fadt(void);
void acpi_tb_create_local_fadt(struct acpi_table_header *table, u32 length);
@ -138,8 +138,6 @@ acpi_status acpi_tb_get_owner_id(u32 table_index, acpi_owner_id *owner_id);
*/
acpi_status acpi_tb_initialize_facs(void);
u8 acpi_tb_tables_loaded(void);
void
acpi_tb_print_table_header(acpi_physical_address address,
struct acpi_table_header *header);

View File

@ -71,7 +71,7 @@ acpi_status acpi_enable(void)
/* ACPI tables must be present */
if (!acpi_tb_tables_loaded()) {
if (acpi_gbl_fadt_index == ACPI_INVALID_TABLE_INDEX) {
return_ACPI_STATUS(AE_NO_ACPI_TABLES);
}

View File

@ -298,7 +298,7 @@ acpi_tb_select_address(char *register_name, u32 address32, u64 address64)
*
* FUNCTION: acpi_tb_parse_fadt
*
* PARAMETERS: table_index - Index for the FADT
* PARAMETERS: None
*
* RETURN: None
*
@ -307,7 +307,7 @@ acpi_tb_select_address(char *register_name, u32 address32, u64 address64)
*
******************************************************************************/
void acpi_tb_parse_fadt(u32 table_index)
void acpi_tb_parse_fadt(void)
{
u32 length;
struct acpi_table_header *table;
@ -319,11 +319,11 @@ void acpi_tb_parse_fadt(u32 table_index)
* Get a local copy of the FADT and convert it to a common format
* Map entire FADT, assumed to be smaller than one page.
*/
length = acpi_gbl_root_table_list.tables[table_index].length;
length = acpi_gbl_root_table_list.tables[acpi_gbl_fadt_index].length;
table =
acpi_os_map_memory(acpi_gbl_root_table_list.tables[table_index].
address, length);
acpi_os_map_memory(acpi_gbl_root_table_list.
tables[acpi_gbl_fadt_index].address, length);
if (!table) {
return;
}

View File

@ -97,29 +97,6 @@ acpi_status acpi_tb_initialize_facs(void)
}
#endif /* !ACPI_REDUCED_HARDWARE */
/*******************************************************************************
*
* FUNCTION: acpi_tb_tables_loaded
*
* PARAMETERS: None
*
* RETURN: TRUE if required ACPI tables are loaded
*
* DESCRIPTION: Determine if the minimum required ACPI tables are present
* (FADT, FACS, DSDT)
*
******************************************************************************/
u8 acpi_tb_tables_loaded(void)
{
if (acpi_gbl_root_table_list.current_table_count >= 4) {
return (TRUE);
}
return (FALSE);
}
/*******************************************************************************
*
* FUNCTION: acpi_tb_check_dsdt_header
@ -392,7 +369,8 @@ acpi_status __init acpi_tb_parse_root_table(acpi_physical_address rsdp_address)
ACPI_COMPARE_NAME(&acpi_gbl_root_table_list.
tables[table_index].signature,
ACPI_SIG_FADT)) {
acpi_tb_parse_fadt(table_index);
acpi_gbl_fadt_index = table_index;
acpi_tb_parse_fadt();
}
next_table:

View File

@ -77,13 +77,16 @@ static bool default_stop_ok(struct device *dev)
dev_update_qos_constraint);
if (constraint_ns > 0) {
constraint_ns -= td->start_latency_ns;
constraint_ns -= td->save_state_latency_ns +
td->stop_latency_ns +
td->start_latency_ns +
td->restore_state_latency_ns;
if (constraint_ns == 0)
return false;
}
td->effective_constraint_ns = constraint_ns;
td->cached_stop_ok = constraint_ns > td->stop_latency_ns ||
constraint_ns == 0;
td->cached_stop_ok = constraint_ns >= 0;
/*
* The children have been suspended already, so we don't need to take
* their stop latencies into account here.
@ -126,18 +129,6 @@ static bool default_power_down_ok(struct dev_pm_domain *pd)
off_on_time_ns = genpd->power_off_latency_ns +
genpd->power_on_latency_ns;
/*
* It doesn't make sense to remove power from the domain if saving
* the state of all devices in it and the power off/power on operations
* take too much time.
*
* All devices in this domain have been stopped already at this point.
*/
list_for_each_entry(pdd, &genpd->dev_list, list_node) {
if (pdd->dev->driver)
off_on_time_ns +=
to_gpd_data(pdd)->td.save_state_latency_ns;
}
min_off_time_ns = -1;
/*
@ -193,7 +184,6 @@ static bool default_power_down_ok(struct dev_pm_domain *pd)
* constraint_ns cannot be negative here, because the device has
* been suspended.
*/
constraint_ns -= td->restore_state_latency_ns;
if (constraint_ns <= off_on_time_ns)
return false;

View File

@ -32,8 +32,7 @@ static DEFINE_MUTEX(regmap_debugfs_early_lock);
/* Calculate the length of a fixed format */
static size_t regmap_calc_reg_len(int max_val, char *buf, size_t buf_size)
{
snprintf(buf, buf_size, "%x", max_val);
return strlen(buf);
return snprintf(NULL, 0, "%x", max_val);
}
static ssize_t regmap_name_read_file(struct file *file,
@ -432,7 +431,7 @@ static ssize_t regmap_access_read_file(struct file *file,
/* If we're in the region the user is trying to read */
if (p >= *ppos) {
/* ...but not beyond it */
if (buf_pos >= count - 1 - tot_len)
if (buf_pos + tot_len + 1 >= count)
break;
/* Format the register */

View File

@ -1863,9 +1863,11 @@ static void rbd_osd_req_callback(struct ceph_osd_request *osd_req,
rbd_osd_read_callback(obj_request);
break;
case CEPH_OSD_OP_SETALLOCHINT:
rbd_assert(osd_req->r_ops[1].op == CEPH_OSD_OP_WRITE);
rbd_assert(osd_req->r_ops[1].op == CEPH_OSD_OP_WRITE ||
osd_req->r_ops[1].op == CEPH_OSD_OP_WRITEFULL);
/* fall through */
case CEPH_OSD_OP_WRITE:
case CEPH_OSD_OP_WRITEFULL:
rbd_osd_write_callback(obj_request);
break;
case CEPH_OSD_OP_STAT:
@ -2401,7 +2403,10 @@ static void rbd_img_obj_request_fill(struct rbd_obj_request *obj_request,
opcode = CEPH_OSD_OP_ZERO;
}
} else if (op_type == OBJ_OP_WRITE) {
opcode = CEPH_OSD_OP_WRITE;
if (!offset && length == object_size)
opcode = CEPH_OSD_OP_WRITEFULL;
else
opcode = CEPH_OSD_OP_WRITE;
osd_req_op_alloc_hint_init(osd_request, num_ops,
object_size, object_size);
num_ops++;
@ -3760,6 +3765,7 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
/* set io sizes to object size */
segment_size = rbd_obj_bytes(&rbd_dev->header);
blk_queue_max_hw_sectors(q, segment_size / SECTOR_SIZE);
q->limits.max_sectors = queue_max_hw_sectors(q);
blk_queue_max_segments(q, segment_size / SECTOR_SIZE);
blk_queue_max_segment_size(q, segment_size);
blk_queue_io_min(q, segment_size);

View File

@ -36,7 +36,6 @@ config ARM_CCI400_PORT_CTRL
config ARM_CCI500_PMU
bool "ARM CCI500 PMU support"
default y
depends on (ARM && CPU_V7) || ARM64
depends on PERF_EVENTS
select ARM_CCI_PMU

View File

@ -197,6 +197,7 @@ static void __init of_cpu_clk_setup(struct device_node *node)
for_each_node_by_type(dn, "cpu") {
struct clk_init_data init;
struct clk *clk;
struct clk *parent_clk;
char *clk_name = kzalloc(5, GFP_KERNEL);
int cpu, err;
@ -208,8 +209,9 @@ static void __init of_cpu_clk_setup(struct device_node *node)
goto bail_out;
sprintf(clk_name, "cpu%d", cpu);
parent_clk = of_clk_get(node, 0);
cpuclk[cpu].parent_name = of_clk_get_parent_name(node, 0);
cpuclk[cpu].parent_name = __clk_get_name(parent_clk);
cpuclk[cpu].clk_name = clk_name;
cpuclk[cpu].cpu = cpu;
cpuclk[cpu].reg_base = clock_complex_base;

View File

@ -164,7 +164,7 @@ static int exynos_cpuclk_pre_rate_change(struct clk_notifier_data *ndata,
* the values for DIV_COPY and DIV_HPM dividers need not be set.
*/
div0 = cfg_data->div0;
if (test_bit(CLK_CPU_HAS_DIV1, &cpuclk->flags)) {
if (cpuclk->flags & CLK_CPU_HAS_DIV1) {
div1 = cfg_data->div1;
if (readl(base + E4210_SRC_CPU) & E4210_MUX_HPM_MASK)
div1 = readl(base + E4210_DIV_CPU1) &
@ -185,7 +185,7 @@ static int exynos_cpuclk_pre_rate_change(struct clk_notifier_data *ndata,
alt_div = DIV_ROUND_UP(alt_prate, tmp_rate) - 1;
WARN_ON(alt_div >= MAX_DIV);
if (test_bit(CLK_CPU_NEEDS_DEBUG_ALT_DIV, &cpuclk->flags)) {
if (cpuclk->flags & CLK_CPU_NEEDS_DEBUG_ALT_DIV) {
/*
* In Exynos4210, ATB clock parent is also mout_core. So
* ATB clock also needs to be mantained at safe speed.
@ -206,7 +206,7 @@ static int exynos_cpuclk_pre_rate_change(struct clk_notifier_data *ndata,
writel(div0, base + E4210_DIV_CPU0);
wait_until_divider_stable(base + E4210_DIV_STAT_CPU0, DIV_MASK_ALL);
if (test_bit(CLK_CPU_HAS_DIV1, &cpuclk->flags)) {
if (cpuclk->flags & CLK_CPU_HAS_DIV1) {
writel(div1, base + E4210_DIV_CPU1);
wait_until_divider_stable(base + E4210_DIV_STAT_CPU1,
DIV_MASK_ALL);
@ -225,7 +225,7 @@ static int exynos_cpuclk_post_rate_change(struct clk_notifier_data *ndata,
unsigned long mux_reg;
/* find out the divider values to use for clock data */
if (test_bit(CLK_CPU_NEEDS_DEBUG_ALT_DIV, &cpuclk->flags)) {
if (cpuclk->flags & CLK_CPU_NEEDS_DEBUG_ALT_DIV) {
while ((cfg_data->prate * 1000) != ndata->new_rate) {
if (cfg_data->prate == 0)
return -EINVAL;
@ -240,7 +240,7 @@ static int exynos_cpuclk_post_rate_change(struct clk_notifier_data *ndata,
writel(mux_reg & ~(1 << 16), base + E4210_SRC_CPU);
wait_until_mux_stable(base + E4210_STAT_CPU, 16, 1);
if (test_bit(CLK_CPU_NEEDS_DEBUG_ALT_DIV, &cpuclk->flags)) {
if (cpuclk->flags & CLK_CPU_NEEDS_DEBUG_ALT_DIV) {
div |= (cfg_data->div0 & E4210_DIV0_ATB_MASK);
div_mask |= E4210_DIV0_ATB_MASK;
}

View File

@ -374,7 +374,6 @@ static struct ti_dt_clk omap3xxx_clks[] = {
DT_CLK(NULL, "gpio2_ick", "gpio2_ick"),
DT_CLK(NULL, "wdt3_ick", "wdt3_ick"),
DT_CLK(NULL, "uart3_ick", "uart3_ick"),
DT_CLK(NULL, "uart4_ick", "uart4_ick"),
DT_CLK(NULL, "gpt9_ick", "gpt9_ick"),
DT_CLK(NULL, "gpt8_ick", "gpt8_ick"),
DT_CLK(NULL, "gpt7_ick", "gpt7_ick"),
@ -519,6 +518,7 @@ static struct ti_dt_clk am35xx_clks[] = {
static struct ti_dt_clk omap36xx_clks[] = {
DT_CLK(NULL, "omap_192m_alwon_fck", "omap_192m_alwon_fck"),
DT_CLK(NULL, "uart4_fck", "uart4_fck"),
DT_CLK(NULL, "uart4_ick", "uart4_ick"),
{ .node_name = NULL },
};

View File

@ -18,7 +18,6 @@
#include "clock.h"
#define DRA7_DPLL_ABE_DEFFREQ 180633600
#define DRA7_DPLL_GMAC_DEFFREQ 1000000000
#define DRA7_DPLL_USB_DEFFREQ 960000000
@ -313,27 +312,12 @@ static struct ti_dt_clk dra7xx_clks[] = {
int __init dra7xx_dt_clk_init(void)
{
int rc;
struct clk *abe_dpll_mux, *sys_clkin2, *dpll_ck, *hdcp_ck;
struct clk *dpll_ck, *hdcp_ck;
ti_dt_clocks_register(dra7xx_clks);
omap2_clk_disable_autoidle_all();
abe_dpll_mux = clk_get_sys(NULL, "abe_dpll_sys_clk_mux");
sys_clkin2 = clk_get_sys(NULL, "sys_clkin2");
dpll_ck = clk_get_sys(NULL, "dpll_abe_ck");
rc = clk_set_parent(abe_dpll_mux, sys_clkin2);
if (!rc)
rc = clk_set_rate(dpll_ck, DRA7_DPLL_ABE_DEFFREQ);
if (rc)
pr_err("%s: failed to configure ABE DPLL!\n", __func__);
dpll_ck = clk_get_sys(NULL, "dpll_abe_m2x2_ck");
rc = clk_set_rate(dpll_ck, DRA7_DPLL_ABE_DEFFREQ * 2);
if (rc)
pr_err("%s: failed to configure ABE DPLL m2x2!\n", __func__);
dpll_ck = clk_get_sys(NULL, "dpll_gmac_ck");
rc = clk_set_rate(dpll_ck, DRA7_DPLL_GMAC_DEFFREQ);
if (rc)

View File

@ -222,7 +222,7 @@ int omap2_dflt_clk_enable(struct clk_hw *hw)
}
}
if (unlikely(!clk->enable_reg)) {
if (unlikely(IS_ERR(clk->enable_reg))) {
pr_err("%s: %s missing enable_reg\n", __func__,
clk_hw_get_name(hw));
ret = -EINVAL;
@ -264,7 +264,7 @@ void omap2_dflt_clk_disable(struct clk_hw *hw)
u32 v;
clk = to_clk_hw_omap(hw);
if (!clk->enable_reg) {
if (IS_ERR(clk->enable_reg)) {
/*
* 'independent' here refers to a clock which is not
* controlled by its parent.

View File

@ -149,6 +149,9 @@ static ssize_t show_freqdomain_cpus(struct cpufreq_policy *policy, char *buf)
{
struct acpi_cpufreq_data *data = policy->driver_data;
if (unlikely(!data))
return -ENODEV;
return cpufreq_show_cpus(data->freqdomain_cpus, buf);
}

View File

@ -1436,8 +1436,10 @@ static void cpufreq_offline_finish(unsigned int cpu)
* since this is a core component, and is essential for the
* subsequent light-weight ->init() to succeed.
*/
if (cpufreq_driver->exit)
if (cpufreq_driver->exit) {
cpufreq_driver->exit(policy);
policy->freq_table = NULL;
}
}
/**

View File

@ -776,6 +776,11 @@ static inline void intel_pstate_sample(struct cpudata *cpu)
local_irq_save(flags);
rdmsrl(MSR_IA32_APERF, aperf);
rdmsrl(MSR_IA32_MPERF, mperf);
if (cpu->prev_mperf == mperf) {
local_irq_restore(flags);
return;
}
tsc = rdtsc();
local_irq_restore(flags);

View File

@ -492,7 +492,7 @@ struct devfreq *devfreq_add_device(struct device *dev,
if (err) {
put_device(&devfreq->dev);
mutex_unlock(&devfreq->lock);
goto err_dev;
goto err_out;
}
mutex_unlock(&devfreq->lock);
@ -518,7 +518,6 @@ struct devfreq *devfreq_add_device(struct device *dev,
err_init:
list_del(&devfreq->node);
device_unregister(&devfreq->dev);
err_dev:
kfree(devfreq);
err_out:
return ERR_PTR(err);
@ -795,8 +794,10 @@ static ssize_t governor_store(struct device *dev, struct device_attribute *attr,
ret = PTR_ERR(governor);
goto out;
}
if (df->governor == governor)
if (df->governor == governor) {
ret = 0;
goto out;
}
if (df->governor) {
ret = df->governor->event_handler(df, DEVFREQ_GOV_STOP, NULL);

View File

@ -672,8 +672,12 @@ int amdgpu_atombios_get_clock_info(struct amdgpu_device *adev)
/* disp clock */
adev->clock.default_dispclk =
le32_to_cpu(firmware_info->info_21.ulDefaultDispEngineClkFreq);
if (adev->clock.default_dispclk == 0)
adev->clock.default_dispclk = 54000; /* 540 Mhz */
/* set a reasonable default for DP */
if (adev->clock.default_dispclk < 53900) {
DRM_INFO("Changing default dispclk from %dMhz to 600Mhz\n",
adev->clock.default_dispclk / 100);
adev->clock.default_dispclk = 60000;
}
adev->clock.dp_extclk =
le16_to_cpu(firmware_info->info_21.usUniphyDPModeExtClkFreq);
adev->clock.current_dispclk = adev->clock.default_dispclk;

View File

@ -177,7 +177,7 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
/* get chunks */
INIT_LIST_HEAD(&p->validated);
chunk_array_user = (uint64_t __user *)(cs->in.chunks);
chunk_array_user = (uint64_t __user *)(unsigned long)(cs->in.chunks);
if (copy_from_user(chunk_array, chunk_array_user,
sizeof(uint64_t)*cs->in.num_chunks)) {
ret = -EFAULT;
@ -197,7 +197,7 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
struct drm_amdgpu_cs_chunk user_chunk;
uint32_t __user *cdata;
chunk_ptr = (void __user *)chunk_array[i];
chunk_ptr = (void __user *)(unsigned long)chunk_array[i];
if (copy_from_user(&user_chunk, chunk_ptr,
sizeof(struct drm_amdgpu_cs_chunk))) {
ret = -EFAULT;
@ -208,7 +208,7 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
p->chunks[i].length_dw = user_chunk.length_dw;
size = p->chunks[i].length_dw;
cdata = (void __user *)user_chunk.chunk_data;
cdata = (void __user *)(unsigned long)user_chunk.chunk_data;
p->chunks[i].user_ptr = cdata;
p->chunks[i].kdata = drm_malloc_ab(size, sizeof(uint32_t));

View File

@ -85,8 +85,6 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
/* We borrow the event spin lock for protecting flip_status */
spin_lock_irqsave(&crtc->dev->event_lock, flags);
/* set the proper interrupt */
amdgpu_irq_get(adev, &adev->pageflip_irq, work->crtc_id);
/* do the flip (mmio) */
adev->mode_info.funcs->page_flip(adev, work->crtc_id, work->base);
/* set the flip status */

View File

@ -242,11 +242,11 @@ static struct pci_device_id pciidlist[] = {
{0x1002, 0x985F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
#endif
/* topaz */
{0x1002, 0x6900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
{0x1002, 0x6901, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
{0x1002, 0x6902, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
{0x1002, 0x6903, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
{0x1002, 0x6907, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
{0x1002, 0x6900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT},
{0x1002, 0x6901, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT},
{0x1002, 0x6902, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT},
{0x1002, 0x6903, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT},
{0x1002, 0x6907, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT},
/* tonga */
{0x1002, 0x6920, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA},
{0x1002, 0x6921, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA},

View File

@ -402,3 +402,19 @@ bool amdgpu_fbdev_robj_is_fb(struct amdgpu_device *adev, struct amdgpu_bo *robj)
return true;
return false;
}
void amdgpu_fbdev_restore_mode(struct amdgpu_device *adev)
{
struct amdgpu_fbdev *afbdev = adev->mode_info.rfbdev;
struct drm_fb_helper *fb_helper;
int ret;
if (!afbdev)
return;
fb_helper = &afbdev->helper;
ret = drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper);
if (ret)
DRM_DEBUG("failed to restore crtc mode\n");
}

View File

@ -485,7 +485,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
* Outdated mess for old drm with Xorg being in charge (void function now).
*/
/**
* amdgpu_driver_firstopen_kms - drm callback for last close
* amdgpu_driver_lastclose_kms - drm callback for last close
*
* @dev: drm dev pointer
*
@ -493,6 +493,9 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
*/
void amdgpu_driver_lastclose_kms(struct drm_device *dev)
{
struct amdgpu_device *adev = dev->dev_private;
amdgpu_fbdev_restore_mode(adev);
vga_switcheroo_process_delayed_switch();
}

View File

@ -567,6 +567,7 @@ void amdgpu_fbdev_fini(struct amdgpu_device *adev);
void amdgpu_fbdev_set_suspend(struct amdgpu_device *adev, int state);
int amdgpu_fbdev_total_size(struct amdgpu_device *adev);
bool amdgpu_fbdev_robj_is_fb(struct amdgpu_device *adev, struct amdgpu_bo *robj);
void amdgpu_fbdev_restore_mode(struct amdgpu_device *adev);
void amdgpu_fb_output_poll_changed(struct amdgpu_device *adev);

View File

@ -455,8 +455,10 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
return -ENOMEM;
r = amdgpu_ib_get(ring, NULL, ndw * 4, ib);
if (r)
if (r) {
kfree(ib);
return r;
}
ib->length_dw = 0;
/* walk over the address space and update the page directory */

View File

@ -6185,6 +6185,11 @@ static int ci_dpm_late_init(void *handle)
if (!amdgpu_dpm)
return 0;
/* init the sysfs and debugfs files late */
ret = amdgpu_pm_sysfs_init(adev);
if (ret)
return ret;
ret = ci_set_temperature_range(adev);
if (ret)
return ret;
@ -6232,9 +6237,6 @@ static int ci_dpm_sw_init(void *handle)
adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
if (amdgpu_dpm == 1)
amdgpu_pm_print_power_states(adev);
ret = amdgpu_pm_sysfs_init(adev);
if (ret)
goto dpm_failed;
mutex_unlock(&adev->pm.mutex);
DRM_INFO("amdgpu: dpm initialized\n");

View File

@ -1567,6 +1567,9 @@ static void cik_pcie_gen3_enable(struct amdgpu_device *adev)
int ret, i;
u16 tmp16;
if (pci_is_root_bus(adev->pdev->bus))
return;
if (amdgpu_pcie_gen2 == 0)
return;

View File

@ -596,6 +596,12 @@ static int cz_dpm_late_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (amdgpu_dpm) {
int ret;
/* init the sysfs and debugfs files late */
ret = amdgpu_pm_sysfs_init(adev);
if (ret)
return ret;
/* powerdown unused blocks for now */
cz_dpm_powergate_uvd(adev, true);
cz_dpm_powergate_vce(adev, true);
@ -632,10 +638,6 @@ static int cz_dpm_sw_init(void *handle)
if (amdgpu_dpm == 1)
amdgpu_pm_print_power_states(adev);
ret = amdgpu_pm_sysfs_init(adev);
if (ret)
goto dpm_init_failed;
mutex_unlock(&adev->pm.mutex);
DRM_INFO("amdgpu: dpm initialized\n");

View File

@ -255,6 +255,24 @@ static u32 dce_v10_0_vblank_get_counter(struct amdgpu_device *adev, int crtc)
return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
}
static void dce_v10_0_pageflip_interrupt_init(struct amdgpu_device *adev)
{
unsigned i;
/* Enable pflip interrupts */
for (i = 0; i < adev->mode_info.num_crtc; i++)
amdgpu_irq_get(adev, &adev->pageflip_irq, i);
}
static void dce_v10_0_pageflip_interrupt_fini(struct amdgpu_device *adev)
{
unsigned i;
/* Disable pflip interrupts */
for (i = 0; i < adev->mode_info.num_crtc; i++)
amdgpu_irq_put(adev, &adev->pageflip_irq, i);
}
/**
* dce_v10_0_page_flip - pageflip callback.
*
@ -2663,9 +2681,10 @@ static void dce_v10_0_crtc_dpms(struct drm_crtc *crtc, int mode)
dce_v10_0_vga_enable(crtc, true);
amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
dce_v10_0_vga_enable(crtc, false);
/* Make sure VBLANK interrupt is still enabled */
/* Make sure VBLANK and PFLIP interrupts are still enabled */
type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
amdgpu_irq_update(adev, &adev->crtc_irq, type);
amdgpu_irq_update(adev, &adev->pageflip_irq, type);
drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id);
dce_v10_0_crtc_load_lut(crtc);
break;
@ -3025,6 +3044,8 @@ static int dce_v10_0_hw_init(void *handle)
dce_v10_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
}
dce_v10_0_pageflip_interrupt_init(adev);
return 0;
}
@ -3039,6 +3060,8 @@ static int dce_v10_0_hw_fini(void *handle)
dce_v10_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
}
dce_v10_0_pageflip_interrupt_fini(adev);
return 0;
}
@ -3050,6 +3073,8 @@ static int dce_v10_0_suspend(void *handle)
dce_v10_0_hpd_fini(adev);
dce_v10_0_pageflip_interrupt_fini(adev);
return 0;
}
@ -3075,6 +3100,8 @@ static int dce_v10_0_resume(void *handle)
/* initialize hpd */
dce_v10_0_hpd_init(adev);
dce_v10_0_pageflip_interrupt_init(adev);
return 0;
}
@ -3369,7 +3396,6 @@ static int dce_v10_0_pageflip_irq(struct amdgpu_device *adev,
spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
drm_vblank_put(adev->ddev, amdgpu_crtc->crtc_id);
amdgpu_irq_put(adev, &adev->pageflip_irq, crtc_id);
queue_work(amdgpu_crtc->pflip_queue, &works->unpin_work);
return 0;

View File

@ -233,6 +233,24 @@ static u32 dce_v11_0_vblank_get_counter(struct amdgpu_device *adev, int crtc)
return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
}
static void dce_v11_0_pageflip_interrupt_init(struct amdgpu_device *adev)
{
unsigned i;
/* Enable pflip interrupts */
for (i = 0; i < adev->mode_info.num_crtc; i++)
amdgpu_irq_get(adev, &adev->pageflip_irq, i);
}
static void dce_v11_0_pageflip_interrupt_fini(struct amdgpu_device *adev)
{
unsigned i;
/* Disable pflip interrupts */
for (i = 0; i < adev->mode_info.num_crtc; i++)
amdgpu_irq_put(adev, &adev->pageflip_irq, i);
}
/**
* dce_v11_0_page_flip - pageflip callback.
*
@ -2640,9 +2658,10 @@ static void dce_v11_0_crtc_dpms(struct drm_crtc *crtc, int mode)
dce_v11_0_vga_enable(crtc, true);
amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
dce_v11_0_vga_enable(crtc, false);
/* Make sure VBLANK interrupt is still enabled */
/* Make sure VBLANK and PFLIP interrupts are still enabled */
type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
amdgpu_irq_update(adev, &adev->crtc_irq, type);
amdgpu_irq_update(adev, &adev->pageflip_irq, type);
drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id);
dce_v11_0_crtc_load_lut(crtc);
break;
@ -2888,7 +2907,7 @@ static int dce_v11_0_early_init(void *handle)
switch (adev->asic_type) {
case CHIP_CARRIZO:
adev->mode_info.num_crtc = 4;
adev->mode_info.num_crtc = 3;
adev->mode_info.num_hpd = 6;
adev->mode_info.num_dig = 9;
break;
@ -3000,6 +3019,8 @@ static int dce_v11_0_hw_init(void *handle)
dce_v11_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
}
dce_v11_0_pageflip_interrupt_init(adev);
return 0;
}
@ -3014,6 +3035,8 @@ static int dce_v11_0_hw_fini(void *handle)
dce_v11_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
}
dce_v11_0_pageflip_interrupt_fini(adev);
return 0;
}
@ -3025,6 +3048,8 @@ static int dce_v11_0_suspend(void *handle)
dce_v11_0_hpd_fini(adev);
dce_v11_0_pageflip_interrupt_fini(adev);
return 0;
}
@ -3051,6 +3076,8 @@ static int dce_v11_0_resume(void *handle)
/* initialize hpd */
dce_v11_0_hpd_init(adev);
dce_v11_0_pageflip_interrupt_init(adev);
return 0;
}
@ -3345,7 +3372,6 @@ static int dce_v11_0_pageflip_irq(struct amdgpu_device *adev,
spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
drm_vblank_put(adev->ddev, amdgpu_crtc->crtc_id);
amdgpu_irq_put(adev, &adev->pageflip_irq, crtc_id);
queue_work(amdgpu_crtc->pflip_queue, &works->unpin_work);
return 0;

View File

@ -204,6 +204,24 @@ static u32 dce_v8_0_vblank_get_counter(struct amdgpu_device *adev, int crtc)
return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
}
static void dce_v8_0_pageflip_interrupt_init(struct amdgpu_device *adev)
{
unsigned i;
/* Enable pflip interrupts */
for (i = 0; i < adev->mode_info.num_crtc; i++)
amdgpu_irq_get(adev, &adev->pageflip_irq, i);
}
static void dce_v8_0_pageflip_interrupt_fini(struct amdgpu_device *adev)
{
unsigned i;
/* Disable pflip interrupts */
for (i = 0; i < adev->mode_info.num_crtc; i++)
amdgpu_irq_put(adev, &adev->pageflip_irq, i);
}
/**
* dce_v8_0_page_flip - pageflip callback.
*
@ -2575,9 +2593,10 @@ static void dce_v8_0_crtc_dpms(struct drm_crtc *crtc, int mode)
dce_v8_0_vga_enable(crtc, true);
amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
dce_v8_0_vga_enable(crtc, false);
/* Make sure VBLANK interrupt is still enabled */
/* Make sure VBLANK and PFLIP interrupts are still enabled */
type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
amdgpu_irq_update(adev, &adev->crtc_irq, type);
amdgpu_irq_update(adev, &adev->pageflip_irq, type);
drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id);
dce_v8_0_crtc_load_lut(crtc);
break;
@ -2933,6 +2952,8 @@ static int dce_v8_0_hw_init(void *handle)
dce_v8_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
}
dce_v8_0_pageflip_interrupt_init(adev);
return 0;
}
@ -2947,6 +2968,8 @@ static int dce_v8_0_hw_fini(void *handle)
dce_v8_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
}
dce_v8_0_pageflip_interrupt_fini(adev);
return 0;
}
@ -2958,6 +2981,8 @@ static int dce_v8_0_suspend(void *handle)
dce_v8_0_hpd_fini(adev);
dce_v8_0_pageflip_interrupt_fini(adev);
return 0;
}
@ -2981,6 +3006,8 @@ static int dce_v8_0_resume(void *handle)
/* initialize hpd */
dce_v8_0_hpd_init(adev);
dce_v8_0_pageflip_interrupt_init(adev);
return 0;
}
@ -3376,7 +3403,6 @@ static int dce_v8_0_pageflip_irq(struct amdgpu_device *adev,
spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
drm_vblank_put(adev->ddev, amdgpu_crtc->crtc_id);
amdgpu_irq_put(adev, &adev->pageflip_irq, crtc_id);
queue_work(amdgpu_crtc->pflip_queue, &works->unpin_work);
return 0;

View File

@ -2995,6 +2995,12 @@ static int kv_dpm_late_init(void *handle)
{
/* powerdown unused blocks for now */
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int ret;
/* init the sysfs and debugfs files late */
ret = amdgpu_pm_sysfs_init(adev);
if (ret)
return ret;
kv_dpm_powergate_acp(adev, true);
kv_dpm_powergate_samu(adev, true);
@ -3038,9 +3044,6 @@ static int kv_dpm_sw_init(void *handle)
adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
if (amdgpu_dpm == 1)
amdgpu_pm_print_power_states(adev);
ret = amdgpu_pm_sysfs_init(adev);
if (ret)
goto dpm_failed;
mutex_unlock(&adev->pm.mutex);
DRM_INFO("amdgpu: dpm initialized\n");

View File

@ -1005,6 +1005,9 @@ static void vi_pcie_gen3_enable(struct amdgpu_device *adev)
u32 mask;
int ret;
if (pci_is_root_bus(adev->pdev->bus))
return;
if (amdgpu_pcie_gen2 == 0)
return;

Some files were not shown because too many files have changed in this diff Show More