Merge 5.6-rc7 into tty-next

We need the tty fixes in here as well.

Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Greg Kroah-Hartman 2020-03-23 08:02:55 +01:00
commit cbf580ff09
414 changed files with 3494 additions and 1510 deletions

View File

@ -86,6 +86,8 @@ ForEachMacros:
- 'bio_for_each_segment_all'
- 'bio_list_for_each'
- 'bip_for_each_vec'
- 'bitmap_for_each_clear_region'
- 'bitmap_for_each_set_region'
- 'blkg_for_each_descendant_post'
- 'blkg_for_each_descendant_pre'
- 'blk_queue_for_each_rl'
@ -115,6 +117,7 @@ ForEachMacros:
- 'drm_client_for_each_connector_iter'
- 'drm_client_for_each_modeset'
- 'drm_connector_for_each_possible_encoder'
- 'drm_for_each_bridge_in_chain'
- 'drm_for_each_connector_iter'
- 'drm_for_each_crtc'
- 'drm_for_each_encoder'
@ -136,9 +139,10 @@ ForEachMacros:
- 'for_each_bio'
- 'for_each_board_func_rsrc'
- 'for_each_bvec'
- 'for_each_card_auxs'
- 'for_each_card_auxs_safe'
- 'for_each_card_components'
- 'for_each_card_links'
- 'for_each_card_links_safe'
- 'for_each_card_pre_auxs'
- 'for_each_card_prelinks'
- 'for_each_card_rtds'
- 'for_each_card_rtds_safe'
@ -166,6 +170,7 @@ ForEachMacros:
- 'for_each_dpcm_fe'
- 'for_each_drhd_unit'
- 'for_each_dss_dev'
- 'for_each_efi_handle'
- 'for_each_efi_memory_desc'
- 'for_each_efi_memory_desc_in_map'
- 'for_each_element'
@ -190,6 +195,7 @@ ForEachMacros:
- 'for_each_lru'
- 'for_each_matching_node'
- 'for_each_matching_node_and_match'
- 'for_each_member'
- 'for_each_memblock'
- 'for_each_memblock_type'
- 'for_each_memcg_cache_index'
@ -200,9 +206,11 @@ ForEachMacros:
- 'for_each_msi_entry'
- 'for_each_msi_entry_safe'
- 'for_each_net'
- 'for_each_net_continue_reverse'
- 'for_each_netdev'
- 'for_each_netdev_continue'
- 'for_each_netdev_continue_rcu'
- 'for_each_netdev_continue_reverse'
- 'for_each_netdev_feature'
- 'for_each_netdev_in_bond_rcu'
- 'for_each_netdev_rcu'
@ -254,10 +262,10 @@ ForEachMacros:
- 'for_each_reserved_mem_region'
- 'for_each_rtd_codec_dai'
- 'for_each_rtd_codec_dai_rollback'
- 'for_each_rtdcom'
- 'for_each_rtdcom_safe'
- 'for_each_rtd_components'
- 'for_each_set_bit'
- 'for_each_set_bit_from'
- 'for_each_set_clump8'
- 'for_each_sg'
- 'for_each_sg_dma_page'
- 'for_each_sg_page'
@ -267,6 +275,7 @@ ForEachMacros:
- 'for_each_subelement_id'
- '__for_each_thread'
- 'for_each_thread'
- 'for_each_wakeup_source'
- 'for_each_zone'
- 'for_each_zone_zonelist'
- 'for_each_zone_zonelist_nodemask'
@ -330,6 +339,7 @@ ForEachMacros:
- 'list_for_each'
- 'list_for_each_codec'
- 'list_for_each_codec_safe'
- 'list_for_each_continue'
- 'list_for_each_entry'
- 'list_for_each_entry_continue'
- 'list_for_each_entry_continue_rcu'
@ -351,6 +361,7 @@ ForEachMacros:
- 'llist_for_each_entry'
- 'llist_for_each_entry_safe'
- 'llist_for_each_safe'
- 'mci_for_each_dimm'
- 'media_device_for_each_entity'
- 'media_device_for_each_intf'
- 'media_device_for_each_link'
@ -444,10 +455,16 @@ ForEachMacros:
- 'virtio_device_for_each_vq'
- 'xa_for_each'
- 'xa_for_each_marked'
- 'xa_for_each_range'
- 'xa_for_each_start'
- 'xas_for_each'
- 'xas_for_each_conflict'
- 'xas_for_each_marked'
- 'xbc_array_for_each_value'
- 'xbc_for_each_key_value'
- 'xbc_node_for_each_array_value'
- 'xbc_node_for_each_child'
- 'xbc_node_for_each_key_value'
- 'zorro_for_each_dev'
#IncludeBlocks: Preserve # Unknown to clang-format-5.0

View File

@ -110,6 +110,8 @@ stable kernels.
+----------------+-----------------+-----------------+-----------------------------+
| Cavium | ThunderX GICv3 | #23154 | CAVIUM_ERRATUM_23154 |
+----------------+-----------------+-----------------+-----------------------------+
| Cavium | ThunderX GICv3 | #38539 | N/A |
+----------------+-----------------+-----------------+-----------------------------+
| Cavium | ThunderX Core | #27456 | CAVIUM_ERRATUM_27456 |
+----------------+-----------------+-----------------+-----------------------------+
| Cavium | ThunderX Core | #30115 | CAVIUM_ERRATUM_30115 |

View File

@ -110,6 +110,13 @@ PROPERTIES
Usage: required
Definition: See soc/fsl/qman.txt and soc/fsl/bman.txt
- fsl,erratum-a050385
Usage: optional
Value type: boolean
Definition: A boolean property. Indicates the presence of the
erratum A050385 which indicates that DMA transactions that are
split can result in a FMan lock.
=============================================================================
FMan MURAM Node

View File

@ -850,3 +850,11 @@ business doing so.
d_alloc_pseudo() is internal-only; uses outside of alloc_file_pseudo() are
very suspect (and won't work in modules). Such uses are very likely to
be misspelled d_alloc_anon().
---
**mandatory**
[should've been added in 2016] stale comment in finish_open() nonwithstanding,
failure exits in ->atomic_open() instances should *NOT* fput() the file,
no matter what. Everything is handled by the caller.

View File

@ -237,7 +237,7 @@ This is solely useful to speed up test compiles.
KBUILD_EXTRA_SYMBOLS
--------------------
For modules that use symbols from other modules.
See more details in modules.txt.
See more details in modules.rst.
ALLSOURCE_ARCHS
---------------

View File

@ -44,7 +44,7 @@ intermediate::
def_bool y
Then, Kconfig moves onto the evaluation stage to resolve inter-symbol
dependency as explained in kconfig-language.txt.
dependency as explained in kconfig-language.rst.
Variables

View File

@ -924,7 +924,7 @@ When kbuild executes, the following steps are followed (roughly):
$(KBUILD_AFLAGS_MODULE) is used to add arch-specific options that
are used for assembler.
From commandline AFLAGS_MODULE shall be used (see kbuild.txt).
From commandline AFLAGS_MODULE shall be used (see kbuild.rst).
KBUILD_CFLAGS_KERNEL
$(CC) options specific for built-in
@ -937,7 +937,7 @@ When kbuild executes, the following steps are followed (roughly):
$(KBUILD_CFLAGS_MODULE) is used to add arch-specific options that
are used for $(CC).
From commandline CFLAGS_MODULE shall be used (see kbuild.txt).
From commandline CFLAGS_MODULE shall be used (see kbuild.rst).
KBUILD_LDFLAGS_MODULE
Options for $(LD) when linking modules
@ -945,7 +945,7 @@ When kbuild executes, the following steps are followed (roughly):
$(KBUILD_LDFLAGS_MODULE) is used to add arch-specific options
used when linking modules. This is often a linker script.
From commandline LDFLAGS_MODULE shall be used (see kbuild.txt).
From commandline LDFLAGS_MODULE shall be used (see kbuild.rst).
KBUILD_LDS

View File

@ -470,9 +470,9 @@ build.
The syntax of the Module.symvers file is::
<CRC> <Symbol> <Namespace> <Module> <Export Type>
<CRC> <Symbol> <Module> <Export Type> <Namespace>
0xe1cc2a05 usb_stor_suspend USB_STORAGE drivers/usb/storage/usb-storage EXPORT_SYMBOL_GPL
0xe1cc2a05 usb_stor_suspend drivers/usb/storage/usb-storage EXPORT_SYMBOL_GPL USB_STORAGE
The fields are separated by tabs and values may be empty (e.g.
if no namespace is defined for an exported symbol).

View File

@ -40,9 +40,6 @@ example usage
# Delete a snapshot using:
$ devlink region del pci/0000:00:05.0/cr-space snapshot 1
# Trigger (request) a snapshot be taken:
$ devlink region trigger pci/0000:00:05.0/cr-space
# Dump a snapshot:
$ devlink region dump pci/0000:00:05.0/fw-health snapshot 1
0000000000000000 0014 95dc 0014 9514 0035 1670 0034 db30

View File

@ -8,9 +8,9 @@ Overview
========
The net_failover driver provides an automated failover mechanism via APIs
to create and destroy a failover master netdev and mananges a primary and
to create and destroy a failover master netdev and manages a primary and
standby slave netdevs that get registered via the generic failover
infrastructrure.
infrastructure.
The failover netdev acts a master device and controls 2 slave devices. The
original paravirtual interface is registered as 'standby' slave netdev and
@ -29,7 +29,7 @@ virtio-net accelerated datapath: STANDBY mode
=============================================
net_failover enables hypervisor controlled accelerated datapath to virtio-net
enabled VMs in a transparent manner with no/minimal guest userspace chanages.
enabled VMs in a transparent manner with no/minimal guest userspace changes.
To support this, the hypervisor needs to enable VIRTIO_NET_F_STANDBY
feature on the virtio-net interface and assign the same MAC address to both

View File

@ -159,7 +159,7 @@ Socket Interface
set SO_RDS_TRANSPORT on a socket for which the transport has
been previously attached explicitly (by SO_RDS_TRANSPORT) or
implicitly (via bind(2)) will return an error of EOPNOTSUPP.
An attempt to set SO_RDS_TRANSPPORT to RDS_TRANS_NONE will
An attempt to set SO_RDS_TRANSPORT to RDS_TRANS_NONE will
always return EINVAL.
RDMA for RDS

View File

@ -4073,7 +4073,6 @@ F: drivers/scsi/snic/
CISCO VIC ETHERNET NIC DRIVER
M: Christian Benvenuti <benve@cisco.com>
M: Govindarajulu Varadarajan <_govind@gmx.com>
M: Parvi Kaustubhi <pkaustub@cisco.com>
S: Supported
F: drivers/net/ethernet/cisco/enic/
@ -4572,7 +4571,7 @@ F: drivers/infiniband/hw/cxgb4/
F: include/uapi/rdma/cxgb4-abi.h
CXGB4VF ETHERNET DRIVER (CXGB4VF)
M: Casey Leedom <leedom@chelsio.com>
M: Vishal Kulkarni <vishal@gmail.com>
L: netdev@vger.kernel.org
W: http://www.chelsio.com
S: Supported
@ -6198,7 +6197,6 @@ S: Supported
F: drivers/scsi/be2iscsi/
Emulex 10Gbps NIC BE2, BE3-R, Lancer, Skyhawk-R DRIVER (be2net)
M: Sathya Perla <sathya.perla@broadcom.com>
M: Ajit Khaparde <ajit.khaparde@broadcom.com>
M: Sriharsha Basavapatna <sriharsha.basavapatna@broadcom.com>
M: Somnath Kotur <somnath.kotur@broadcom.com>
@ -11119,7 +11117,7 @@ M: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
L: linux-mips@vger.kernel.org
W: http://www.linux-mips.org/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/mips/linux.git
Q: http://patchwork.linux-mips.org/project/linux-mips/list/
Q: https://patchwork.kernel.org/project/linux-mips/list/
S: Maintained
F: Documentation/devicetree/bindings/mips/
F: Documentation/mips/

View File

@ -2,7 +2,7 @@
VERSION = 5
PATCHLEVEL = 6
SUBLEVEL = 0
EXTRAVERSION = -rc5
EXTRAVERSION = -rc7
NAME = Kleptomaniac Octopus
# *DOCUMENTATION*
@ -1804,7 +1804,7 @@ existing-targets := $(wildcard $(sort $(targets)))
-include $(foreach f,$(existing-targets),$(dir $(f)).$(notdir $(f)).cmd)
endif # config-targets
endif # config-build
endif # mixed-build
endif # need-sub-make

View File

@ -154,7 +154,7 @@ config ARC_CPU_HS
help
Support for ARC HS38x Cores based on ARCv2 ISA
The notable features are:
- SMP configurations of upto 4 core with coherency
- SMP configurations of up to 4 cores with coherency
- Optional L2 Cache and IO-Coherency
- Revised Interrupt Architecture (multiple priorites, reg banks,
auto stack switch, auto regfile save/restore)
@ -192,7 +192,7 @@ config ARC_SMP_HALT_ON_RESET
help
In SMP configuration cores can be configured as Halt-on-reset
or they could all start at same time. For Halt-on-reset, non
masters are parked until Master kicks them so they can start of
masters are parked until Master kicks them so they can start off
at designated entry point. For other case, all jump to common
entry point and spin wait for Master's signal.

View File

@ -21,8 +21,6 @@ CONFIG_MODULES=y
CONFIG_MODULE_FORCE_LOAD=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_IOSCHED_DEADLINE is not set
# CONFIG_IOSCHED_CFQ is not set
CONFIG_ARC_PLAT_EZNPS=y
CONFIG_SMP=y
CONFIG_NR_CPUS=4096

View File

@ -20,8 +20,6 @@ CONFIG_ISA_ARCOMPACT=y
CONFIG_KPROBES=y
CONFIG_MODULES=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_IOSCHED_DEADLINE is not set
# CONFIG_IOSCHED_CFQ is not set
CONFIG_ARC_BUILTIN_DTB_NAME="nsimosci"
# CONFIG_COMPACTION is not set
CONFIG_NET=y

View File

@ -19,8 +19,6 @@ CONFIG_PERF_EVENTS=y
CONFIG_KPROBES=y
CONFIG_MODULES=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_IOSCHED_DEADLINE is not set
# CONFIG_IOSCHED_CFQ is not set
CONFIG_ISA_ARCV2=y
CONFIG_ARC_BUILTIN_DTB_NAME="nsimosci_hs"
# CONFIG_COMPACTION is not set

View File

@ -14,8 +14,6 @@ CONFIG_PERF_EVENTS=y
CONFIG_KPROBES=y
CONFIG_MODULES=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_IOSCHED_DEADLINE is not set
# CONFIG_IOSCHED_CFQ is not set
CONFIG_ISA_ARCV2=y
CONFIG_SMP=y
# CONFIG_ARC_TIMERS_64BIT is not set

View File

@ -43,6 +43,8 @@ extern void fpu_init_task(struct pt_regs *regs);
#endif /* !CONFIG_ISA_ARCOMPACT */
struct task_struct;
extern void fpu_save_restore(struct task_struct *p, struct task_struct *n);
#else /* !CONFIG_ARC_FPU_SAVE_RESTORE */

View File

@ -29,6 +29,8 @@
.endm
#define ASM_NL ` /* use '`' to mark new line in macro */
#define __ALIGN .align 4
#define __ALIGN_STR __stringify(__ALIGN)
/* annotation for data we want in DCCM - if enabled in .config */
.macro ARCFP_DATA nm

View File

@ -8,11 +8,11 @@
#include <linux/delay.h>
#include <linux/root_dev.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/clocksource.h>
#include <linux/console.h>
#include <linux/module.h>
#include <linux/cpu.h>
#include <linux/of_clk.h>
#include <linux/of_fdt.h>
#include <linux/of.h>
#include <linux/cache.h>

View File

@ -104,8 +104,7 @@ static void show_faulting_vma(unsigned long address)
if (IS_ERR(nm))
nm = "?";
}
pr_info(" @off 0x%lx in [%s]\n"
" VMA: 0x%08lx to 0x%08lx\n",
pr_info(" @off 0x%lx in [%s] VMA: 0x%08lx to 0x%08lx\n",
vma->vm_start < TASK_UNMAPPED_BASE ?
address : address - vma->vm_start,
nm, vma->vm_start, vma->vm_end);
@ -120,8 +119,6 @@ static void show_ecr_verbose(struct pt_regs *regs)
unsigned int vec, cause_code;
unsigned long address;
pr_info("\n[ECR ]: 0x%08lx => ", regs->event);
/* For Data fault, this is data address not instruction addr */
address = current->thread.fault_address;
@ -130,10 +127,10 @@ static void show_ecr_verbose(struct pt_regs *regs)
/* For DTLB Miss or ProtV, display the memory involved too */
if (vec == ECR_V_DTLB_MISS) {
pr_cont("Invalid %s @ 0x%08lx by insn @ 0x%08lx\n",
pr_cont("Invalid %s @ 0x%08lx by insn @ %pS\n",
(cause_code == 0x01) ? "Read" :
((cause_code == 0x02) ? "Write" : "EX"),
address, regs->ret);
address, (void *)regs->ret);
} else if (vec == ECR_V_ITLB_MISS) {
pr_cont("Insn could not be fetched\n");
} else if (vec == ECR_V_MACH_CHK) {
@ -191,31 +188,31 @@ void show_regs(struct pt_regs *regs)
show_ecr_verbose(regs);
pr_info("[EFA ]: 0x%08lx\n[BLINK ]: %pS\n[ERET ]: %pS\n",
current->thread.fault_address,
(void *)regs->blink, (void *)regs->ret);
if (user_mode(regs))
show_faulting_vma(regs->ret); /* faulting code, not data */
pr_info("[STAT32]: 0x%08lx", regs->status32);
pr_info("ECR: 0x%08lx EFA: 0x%08lx ERET: 0x%08lx\n",
regs->event, current->thread.fault_address, regs->ret);
pr_info("STAT32: 0x%08lx", regs->status32);
#define STS_BIT(r, bit) r->status32 & STATUS_##bit##_MASK ? #bit" " : ""
#ifdef CONFIG_ISA_ARCOMPACT
pr_cont(" : %2s%2s%2s%2s%2s%2s%2s\n",
pr_cont(" [%2s%2s%2s%2s%2s%2s%2s]",
(regs->status32 & STATUS_U_MASK) ? "U " : "K ",
STS_BIT(regs, DE), STS_BIT(regs, AE),
STS_BIT(regs, A2), STS_BIT(regs, A1),
STS_BIT(regs, E2), STS_BIT(regs, E1));
#else
pr_cont(" : %2s%2s%2s%2s\n",
pr_cont(" [%2s%2s%2s%2s]",
STS_BIT(regs, IE),
(regs->status32 & STATUS_U_MASK) ? "U " : "K ",
STS_BIT(regs, DE), STS_BIT(regs, AE));
#endif
pr_info("BTA: 0x%08lx\t SP: 0x%08lx\t FP: 0x%08lx\n",
regs->bta, regs->sp, regs->fp);
pr_cont(" BTA: 0x%08lx\n", regs->bta);
pr_info("BLK: %pS\n SP: 0x%08lx FP: 0x%08lx\n",
(void *)regs->blink, regs->sp, regs->fp);
pr_info("LPS: 0x%08lx\tLPE: 0x%08lx\tLPC: 0x%08lx\n",
regs->lp_start, regs->lp_end, regs->lp_count);

View File

@ -307,13 +307,15 @@ endif
ifeq ($(CONFIG_STACKPROTECTOR_PER_TASK),y)
prepare: stack_protector_prepare
stack_protector_prepare: prepare0
$(eval KBUILD_CFLAGS += \
$(eval SSP_PLUGIN_CFLAGS := \
-fplugin-arg-arm_ssp_per_task_plugin-tso=$(shell \
awk '{if ($$2 == "THREAD_SZ_ORDER") print $$3;}'\
include/generated/asm-offsets.h) \
-fplugin-arg-arm_ssp_per_task_plugin-offset=$(shell \
awk '{if ($$2 == "TI_STACK_CANARY") print $$3;}'\
include/generated/asm-offsets.h))
$(eval KBUILD_CFLAGS += $(SSP_PLUGIN_CFLAGS))
$(eval GCC_PLUGINS_CFLAGS += $(SSP_PLUGIN_CFLAGS))
endif
all: $(notdir $(KBUILD_IMAGE))

View File

@ -101,7 +101,6 @@ clean-files += piggy_data lib1funcs.S ashldi3.S bswapsdi2.S \
$(libfdt) $(libfdt_hdrs) hyp-stub.S
KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
KBUILD_CFLAGS += $(DISABLE_ARM_SSP_PER_TASK_PLUGIN)
ifeq ($(CONFIG_FUNCTION_TRACER),y)
ORIG_CFLAGS := $(KBUILD_CFLAGS)
@ -117,7 +116,8 @@ CFLAGS_fdt_ro.o := $(nossp-flags-y)
CFLAGS_fdt_rw.o := $(nossp-flags-y)
CFLAGS_fdt_wip.o := $(nossp-flags-y)
ccflags-y := -fpic $(call cc-option,-mno-single-pic-base,) -fno-builtin -I$(obj)
ccflags-y := -fpic $(call cc-option,-mno-single-pic-base,) -fno-builtin \
-I$(obj) $(DISABLE_ARM_SSP_PER_TASK_PLUGIN)
asflags-y := -DZIMAGE
# Supply kernel BSS size to the decompressor via a linker symbol.

View File

@ -94,6 +94,8 @@ static bool __init cntvct_functional(void)
* this.
*/
np = of_find_compatible_node(NULL, NULL, "arm,armv7-timer");
if (!np)
np = of_find_compatible_node(NULL, NULL, "arm,armv8-timer");
if (!np)
goto out_put;

View File

@ -118,7 +118,7 @@ ENTRY(arm_copy_from_user)
ENDPROC(arm_copy_from_user)
.pushsection .fixup,"ax"
.pushsection .text.fixup,"ax"
.align 0
copy_abort_preamble
ldmfd sp!, {r1, r2, r3}

View File

@ -20,6 +20,8 @@ &soc {
};
&fman0 {
fsl,erratum-a050385;
/* these aliases provide the FMan ports mapping */
enet0: ethernet@e0000 {
};

View File

@ -29,11 +29,9 @@ typedef struct {
*/
#define ASID(mm) ((mm)->context.id.counter & 0xffff)
extern bool arm64_use_ng_mappings;
static inline bool arm64_kernel_unmapped_at_el0(void)
{
return arm64_use_ng_mappings;
return cpus_have_const_cap(ARM64_UNMAP_KERNEL_AT_EL0);
}
typedef void (*bp_hardening_cb_t)(void);

View File

@ -23,11 +23,13 @@
#include <asm/pgtable-types.h>
extern bool arm64_use_ng_mappings;
#define _PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED)
#define _PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S)
#define PTE_MAYBE_NG (arm64_kernel_unmapped_at_el0() ? PTE_NG : 0)
#define PMD_MAYBE_NG (arm64_kernel_unmapped_at_el0() ? PMD_SECT_NG : 0)
#define PTE_MAYBE_NG (arm64_use_ng_mappings ? PTE_NG : 0)
#define PMD_MAYBE_NG (arm64_use_ng_mappings ? PMD_SECT_NG : 0)
#define PROT_DEFAULT (_PROT_DEFAULT | PTE_MAYBE_NG)
#define PROT_SECT_DEFAULT (_PROT_SECT_DEFAULT | PMD_MAYBE_NG)

View File

@ -25,8 +25,8 @@
#define __NR_compat_gettimeofday 78
#define __NR_compat_sigreturn 119
#define __NR_compat_rt_sigreturn 173
#define __NR_compat_clock_getres 247
#define __NR_compat_clock_gettime 263
#define __NR_compat_clock_getres 264
#define __NR_compat_clock_gettime64 403
#define __NR_compat_clock_getres_time64 406

View File

@ -958,11 +958,22 @@ void tick_broadcast(const struct cpumask *mask)
}
#endif
/*
* The number of CPUs online, not counting this CPU (which may not be
* fully online and so not counted in num_online_cpus()).
*/
static inline unsigned int num_other_online_cpus(void)
{
unsigned int this_cpu_online = cpu_online(smp_processor_id());
return num_online_cpus() - this_cpu_online;
}
void smp_send_stop(void)
{
unsigned long timeout;
if (num_online_cpus() > 1) {
if (num_other_online_cpus()) {
cpumask_t mask;
cpumask_copy(&mask, cpu_online_mask);
@ -975,10 +986,10 @@ void smp_send_stop(void)
/* Wait up to one second for other CPUs to stop */
timeout = USEC_PER_SEC;
while (num_online_cpus() > 1 && timeout--)
while (num_other_online_cpus() && timeout--)
udelay(1);
if (num_online_cpus() > 1)
if (num_other_online_cpus())
pr_warn("SMP: failed to stop secondary CPUs %*pbl\n",
cpumask_pr_args(cpu_online_mask));
@ -1001,7 +1012,11 @@ void crash_smp_send_stop(void)
cpus_stopped = 1;
if (num_online_cpus() == 1) {
/*
* If this cpu is the only one alive at this point in time, online or
* not, there are no stop messages to be sent around, so just back out.
*/
if (num_other_online_cpus() == 0) {
sdei_mask_local_cpu();
return;
}
@ -1009,7 +1024,7 @@ void crash_smp_send_stop(void)
cpumask_copy(&mask, cpu_online_mask);
cpumask_clear_cpu(smp_processor_id(), &mask);
atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1);
atomic_set(&waiting_for_crash_ipi, num_other_online_cpus());
pr_crit("SMP: stopping secondary CPUs\n");
smp_cross_call(&mask, IPI_CPU_CRASH_STOP);

View File

@ -4,6 +4,8 @@
#include "jz4780.dtsi"
#include <dt-bindings/clock/ingenic,tcu.h>
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/interrupt-controller/irq.h>
#include <dt-bindings/regulator/active-semi,8865-regulator.h>
/ {
compatible = "img,ci20", "ingenic,jz4780";
@ -163,63 +165,71 @@ act8600: act8600@5a {
regulators {
vddcore: SUDCDC1 {
regulator-name = "VDDCORE";
regulator-name = "DCDC_REG1";
regulator-min-microvolt = <1100000>;
regulator-max-microvolt = <1100000>;
regulator-always-on;
};
vddmem: SUDCDC2 {
regulator-name = "VDDMEM";
regulator-name = "DCDC_REG2";
regulator-min-microvolt = <1500000>;
regulator-max-microvolt = <1500000>;
regulator-always-on;
};
vcc_33: SUDCDC3 {
regulator-name = "VCC33";
regulator-name = "DCDC_REG3";
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
regulator-always-on;
};
vcc_50: SUDCDC4 {
regulator-name = "VCC50";
regulator-name = "SUDCDC_REG4";
regulator-min-microvolt = <5000000>;
regulator-max-microvolt = <5000000>;
regulator-always-on;
};
vcc_25: LDO_REG5 {
regulator-name = "VCC25";
regulator-name = "LDO_REG5";
regulator-min-microvolt = <2500000>;
regulator-max-microvolt = <2500000>;
regulator-always-on;
};
wifi_io: LDO_REG6 {
regulator-name = "WIFIIO";
regulator-name = "LDO_REG6";
regulator-min-microvolt = <2500000>;
regulator-max-microvolt = <2500000>;
regulator-always-on;
};
vcc_28: LDO_REG7 {
regulator-name = "VCC28";
regulator-name = "LDO_REG7";
regulator-min-microvolt = <2800000>;
regulator-max-microvolt = <2800000>;
regulator-always-on;
};
vcc_15: LDO_REG8 {
regulator-name = "VCC15";
regulator-name = "LDO_REG8";
regulator-min-microvolt = <1500000>;
regulator-max-microvolt = <1500000>;
regulator-always-on;
};
vcc_18: LDO_REG9 {
regulator-name = "VCC18";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
vrtc_18: LDO_REG9 {
regulator-name = "LDO_REG9";
/* Despite the datasheet stating 3.3V
* for REG9 and the driver expecting that,
* REG9 outputs 1.8V.
* Likely the CI20 uses a proprietary
* factory programmed chip variant.
* Since this is a simple on/off LDO the
* exact values do not matter.
*/
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
regulator-always-on;
};
vcc_11: LDO_REG10 {
regulator-name = "VCC11";
regulator-min-microvolt = <1100000>;
regulator-max-microvolt = <1100000>;
regulator-name = "LDO_REG10";
regulator-min-microvolt = <1200000>;
regulator-max-microvolt = <1200000>;
regulator-always-on;
};
};
@ -261,7 +271,9 @@ &i2c4 {
rtc@51 {
compatible = "nxp,pcf8563";
reg = <0x51>;
interrupts = <110>;
interrupt-parent = <&gpf>;
interrupts = <30 IRQ_TYPE_LEVEL_LOW>;
};
};

View File

@ -605,7 +605,8 @@ static void __init bootcmdline_init(char **cmdline_p)
* If we're configured to take boot arguments from DT, look for those
* now.
*/
if (IS_ENABLED(CONFIG_MIPS_CMDLINE_FROM_DTB))
if (IS_ENABLED(CONFIG_MIPS_CMDLINE_FROM_DTB) ||
IS_ENABLED(CONFIG_MIPS_CMDLINE_DTB_EXTEND))
of_scan_flat_dt(bootcmdline_scan_chosen, &dt_bootargs);
#endif

View File

@ -1817,6 +1817,7 @@ static void kvmppc_core_vcpu_free_pr(struct kvm_vcpu *vcpu)
{
struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
kvmppc_mmu_destroy_pr(vcpu);
free_page((unsigned long)vcpu->arch.shared & PAGE_MASK);
#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
kfree(vcpu->arch.shadow_vcpu);

View File

@ -759,7 +759,6 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
return 0;
out_vcpu_uninit:
kvmppc_mmu_destroy(vcpu);
kvmppc_subarch_vcpu_uninit(vcpu);
return err;
}
@ -792,7 +791,6 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
kvmppc_core_vcpu_free(vcpu);
kvmppc_mmu_destroy(vcpu);
kvmppc_subarch_vcpu_uninit(vcpu);
}

View File

@ -120,12 +120,6 @@ static void __init kasan_unmap_early_shadow_vmalloc(void)
unsigned long k_cur;
phys_addr_t pa = __pa(kasan_early_shadow_page);
if (!early_mmu_has_feature(MMU_FTR_HPTE_TABLE)) {
int ret = kasan_init_shadow_page_tables(k_start, k_end);
if (ret)
panic("kasan: kasan_init_shadow_page_tables() failed");
}
for (k_cur = k_start & PAGE_MASK; k_cur < k_end; k_cur += PAGE_SIZE) {
pmd_t *pmd = pmd_offset(pud_offset(pgd_offset_k(k_cur), k_cur), k_cur);
pte_t *ptep = pte_offset_kernel(pmd, k_cur);
@ -143,7 +137,8 @@ void __init kasan_mmu_init(void)
int ret;
struct memblock_region *reg;
if (early_mmu_has_feature(MMU_FTR_HPTE_TABLE)) {
if (early_mmu_has_feature(MMU_FTR_HPTE_TABLE) ||
IS_ENABLED(CONFIG_KASAN_VMALLOC)) {
ret = kasan_init_shadow_page_tables(KASAN_SHADOW_START, KASAN_SHADOW_END);
if (ret)

View File

@ -3268,7 +3268,10 @@ static void kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
/* Initial reset is a superset of the normal reset */
kvm_arch_vcpu_ioctl_normal_reset(vcpu);
/* this equals initial cpu reset in pop, but we don't switch to ESA */
/*
* This equals initial cpu reset in pop, but we don't switch to ESA.
* We do not only reset the internal data, but also ...
*/
vcpu->arch.sie_block->gpsw.mask = 0;
vcpu->arch.sie_block->gpsw.addr = 0;
kvm_s390_set_prefix(vcpu, 0);
@ -3278,6 +3281,19 @@ static void kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
memset(vcpu->arch.sie_block->gcr, 0, sizeof(vcpu->arch.sie_block->gcr));
vcpu->arch.sie_block->gcr[0] = CR0_INITIAL_MASK;
vcpu->arch.sie_block->gcr[14] = CR14_INITIAL_MASK;
/* ... the data in sync regs */
memset(vcpu->run->s.regs.crs, 0, sizeof(vcpu->run->s.regs.crs));
vcpu->run->s.regs.ckc = 0;
vcpu->run->s.regs.crs[0] = CR0_INITIAL_MASK;
vcpu->run->s.regs.crs[14] = CR14_INITIAL_MASK;
vcpu->run->psw_addr = 0;
vcpu->run->psw_mask = 0;
vcpu->run->s.regs.todpr = 0;
vcpu->run->s.regs.cputm = 0;
vcpu->run->s.regs.ckc = 0;
vcpu->run->s.regs.pp = 0;
vcpu->run->s.regs.gbea = 1;
vcpu->run->s.regs.fpc = 0;
vcpu->arch.sie_block->gbea = 1;
vcpu->arch.sie_block->pp = 0;

View File

@ -194,9 +194,10 @@ avx2_instr :=$(call as-instr,vpbroadcastb %xmm0$(comma)%ymm1,-DCONFIG_AS_AVX2=1)
avx512_instr :=$(call as-instr,vpmovm2b %k1$(comma)%zmm5,-DCONFIG_AS_AVX512=1)
sha1_ni_instr :=$(call as-instr,sha1msg1 %xmm0$(comma)%xmm1,-DCONFIG_AS_SHA1_NI=1)
sha256_ni_instr :=$(call as-instr,sha256msg1 %xmm0$(comma)%xmm1,-DCONFIG_AS_SHA256_NI=1)
adx_instr := $(call as-instr,adox %r10$(comma)%r10,-DCONFIG_AS_ADX=1)
KBUILD_AFLAGS += $(cfi) $(cfi-sigframe) $(cfi-sections) $(asinstr) $(avx_instr) $(avx2_instr) $(avx512_instr) $(sha1_ni_instr) $(sha256_ni_instr)
KBUILD_CFLAGS += $(cfi) $(cfi-sigframe) $(cfi-sections) $(asinstr) $(avx_instr) $(avx2_instr) $(avx512_instr) $(sha1_ni_instr) $(sha256_ni_instr)
KBUILD_AFLAGS += $(cfi) $(cfi-sigframe) $(cfi-sections) $(asinstr) $(avx_instr) $(avx2_instr) $(avx512_instr) $(sha1_ni_instr) $(sha256_ni_instr) $(adx_instr)
KBUILD_CFLAGS += $(cfi) $(cfi-sigframe) $(cfi-sections) $(asinstr) $(avx_instr) $(avx2_instr) $(avx512_instr) $(sha1_ni_instr) $(sha256_ni_instr) $(adx_instr)
KBUILD_LDFLAGS := -m elf_$(UTS_MACHINE)

View File

@ -11,6 +11,7 @@ avx2_supported := $(call as-instr,vpgatherdd %ymm0$(comma)(%eax$(comma)%ymm1\
avx512_supported :=$(call as-instr,vpmovm2b %k1$(comma)%zmm5,yes,no)
sha1_ni_supported :=$(call as-instr,sha1msg1 %xmm0$(comma)%xmm1,yes,no)
sha256_ni_supported :=$(call as-instr,sha256msg1 %xmm0$(comma)%xmm1,yes,no)
adx_supported := $(call as-instr,adox %r10$(comma)%r10,yes,no)
obj-$(CONFIG_CRYPTO_GLUE_HELPER_X86) += glue_helper.o
@ -39,7 +40,11 @@ obj-$(CONFIG_CRYPTO_AEGIS128_AESNI_SSE2) += aegis128-aesni.o
obj-$(CONFIG_CRYPTO_NHPOLY1305_SSE2) += nhpoly1305-sse2.o
obj-$(CONFIG_CRYPTO_NHPOLY1305_AVX2) += nhpoly1305-avx2.o
obj-$(CONFIG_CRYPTO_CURVE25519_X86) += curve25519-x86_64.o
# These modules require the assembler to support ADX.
ifeq ($(adx_supported),yes)
obj-$(CONFIG_CRYPTO_CURVE25519_X86) += curve25519-x86_64.o
endif
# These modules require assembler to support AVX.
ifeq ($(avx_supported),yes)

View File

@ -190,15 +190,12 @@ static int amd_uncore_event_init(struct perf_event *event)
/*
* NB and Last level cache counters (MSRs) are shared across all cores
* that share the same NB / Last level cache. Interrupts can be directed
* to a single target core, however, event counts generated by processes
* running on other cores cannot be masked out. So we do not support
* sampling and per-thread events.
* that share the same NB / Last level cache. On family 16h and below,
* Interrupts can be directed to a single target core, however, event
* counts generated by processes running on other cores cannot be masked
* out. So we do not support sampling and per-thread events via
* CAP_NO_INTERRUPT, and we do not enable counter overflow interrupts:
*/
if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
return -EINVAL;
/* and we do not enable counter overflow interrupts */
hwc->config = event->attr.config & AMD64_RAW_EVENT_MASK_NB;
hwc->idx = -1;
@ -306,7 +303,7 @@ static struct pmu amd_nb_pmu = {
.start = amd_uncore_start,
.stop = amd_uncore_stop,
.read = amd_uncore_read,
.capabilities = PERF_PMU_CAP_NO_EXCLUDE,
.capabilities = PERF_PMU_CAP_NO_EXCLUDE | PERF_PMU_CAP_NO_INTERRUPT,
};
static struct pmu amd_llc_pmu = {
@ -317,7 +314,7 @@ static struct pmu amd_llc_pmu = {
.start = amd_uncore_start,
.stop = amd_uncore_stop,
.read = amd_uncore_read,
.capabilities = PERF_PMU_CAP_NO_EXCLUDE,
.capabilities = PERF_PMU_CAP_NO_EXCLUDE | PERF_PMU_CAP_NO_INTERRUPT,
};
static struct amd_uncore *amd_uncore_alloc(unsigned int cpu)

View File

@ -360,7 +360,6 @@ struct x86_emulate_ctxt {
u64 d;
unsigned long _eip;
struct operand memop;
/* Fields above regs are cleared together. */
unsigned long _regs[NR_VCPU_REGS];
struct operand *memopp;
struct fetch_cache fetch;

View File

@ -838,13 +838,15 @@ static void free_moved_vector(struct apic_chip_data *apicd)
bool managed = apicd->is_managed;
/*
* This should never happen. Managed interrupts are not
* migrated except on CPU down, which does not involve the
* cleanup vector. But try to keep the accounting correct
* nevertheless.
* Managed interrupts are usually not migrated away
* from an online CPU, but CPU isolation 'managed_irq'
* can make that happen.
* 1) Activation does not take the isolation into account
* to keep the code simple
* 2) Migration away from an isolated CPU can happen when
* a non-isolated CPU which is in the calculated
* affinity mask comes online.
*/
WARN_ON_ONCE(managed);
trace_vector_free_moved(apicd->irq, cpu, vector, managed);
irq_matrix_free(vector_matrix, cpu, vector, managed);
per_cpu(vector_irq, cpu)[vector] = VECTOR_UNUSED;

View File

@ -493,17 +493,18 @@ static void intel_ppin_init(struct cpuinfo_x86 *c)
return;
if ((val & 3UL) == 1UL) {
/* PPIN available but disabled: */
/* PPIN locked in disabled mode */
return;
}
/* If PPIN is disabled, but not locked, try to enable: */
if (!(val & 3UL)) {
/* If PPIN is disabled, try to enable */
if (!(val & 2UL)) {
wrmsrl_safe(MSR_PPIN_CTL, val | 2UL);
rdmsrl_safe(MSR_PPIN_CTL, &val);
}
if ((val & 3UL) == 2UL)
/* Is the enable bit set? */
if (val & 2UL)
set_cpu_cap(c, X86_FEATURE_INTEL_PPIN);
}
}

View File

@ -486,9 +486,14 @@ static int thermal_throttle_offline(unsigned int cpu)
{
struct thermal_state *state = &per_cpu(thermal_state, cpu);
struct device *dev = get_cpu_device(cpu);
u32 l;
cancel_delayed_work(&state->package_throttle.therm_work);
cancel_delayed_work(&state->core_throttle.therm_work);
/* Mask the thermal vector before draining evtl. pending work */
l = apic_read(APIC_LVTTHMR);
apic_write(APIC_LVTTHMR, l | APIC_LVT_MASKED);
cancel_delayed_work_sync(&state->package_throttle.therm_work);
cancel_delayed_work_sync(&state->core_throttle.therm_work);
state->package_throttle.rate_control_active = false;
state->core_throttle.rate_control_active = false;

View File

@ -68,7 +68,7 @@ config KVM_WERROR
depends on (X86_64 && !KASAN) || !COMPILE_TEST
depends on EXPERT
help
Add -Werror to the build flags for (and only for) i915.ko.
Add -Werror to the build flags for KVM.
If in doubt, say "N".

View File

@ -5173,6 +5173,7 @@ int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
ctxt->fetch.ptr = ctxt->fetch.data;
ctxt->fetch.end = ctxt->fetch.data + insn_len;
ctxt->opcode_len = 1;
ctxt->intercept = x86_intercept_none;
if (insn_len > 0)
memcpy(ctxt->fetch.data, insn, insn_len);
else {

View File

@ -378,12 +378,15 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
if (e->fields.delivery_mode == APIC_DM_FIXED) {
struct kvm_lapic_irq irq;
irq.shorthand = APIC_DEST_NOSHORT;
irq.vector = e->fields.vector;
irq.delivery_mode = e->fields.delivery_mode << 8;
irq.dest_id = e->fields.dest_id;
irq.dest_mode =
kvm_lapic_irq_dest_mode(!!e->fields.dest_mode);
irq.level = false;
irq.trig_mode = e->fields.trig_mode;
irq.shorthand = APIC_DEST_NOSHORT;
irq.dest_id = e->fields.dest_id;
irq.msi_redir_hint = false;
bitmap_zero(&vcpu_bitmap, 16);
kvm_bitmap_or_dest_vcpus(ioapic->kvm, &irq,
&vcpu_bitmap);

View File

@ -6312,7 +6312,8 @@ static void svm_handle_exit_irqoff(struct kvm_vcpu *vcpu,
enum exit_fastpath_completion *exit_fastpath)
{
if (!is_guest_mode(vcpu) &&
to_svm(vcpu)->vmcb->control.exit_code == EXIT_REASON_MSR_WRITE)
to_svm(vcpu)->vmcb->control.exit_code == SVM_EXIT_MSR &&
to_svm(vcpu)->vmcb->control.exit_info_1)
*exit_fastpath = handle_fastpath_set_msr_irqoff(vcpu);
}

View File

@ -224,7 +224,7 @@ static inline void nested_release_evmcs(struct kvm_vcpu *vcpu)
return;
kvm_vcpu_unmap(vcpu, &vmx->nested.hv_evmcs_map, true);
vmx->nested.hv_evmcs_vmptr = -1ull;
vmx->nested.hv_evmcs_vmptr = 0;
vmx->nested.hv_evmcs = NULL;
}
@ -1923,7 +1923,8 @@ static int nested_vmx_handle_enlightened_vmptrld(struct kvm_vcpu *vcpu,
if (!nested_enlightened_vmentry(vcpu, &evmcs_gpa))
return 1;
if (unlikely(evmcs_gpa != vmx->nested.hv_evmcs_vmptr)) {
if (unlikely(!vmx->nested.hv_evmcs ||
evmcs_gpa != vmx->nested.hv_evmcs_vmptr)) {
if (!vmx->nested.hv_evmcs)
vmx->nested.current_vmptr = -1ull;

View File

@ -2338,6 +2338,17 @@ static void hardware_disable(void)
kvm_cpu_vmxoff();
}
/*
* There is no X86_FEATURE for SGX yet, but anyway we need to query CPUID
* directly instead of going through cpu_has(), to ensure KVM is trapping
* ENCLS whenever it's supported in hardware. It does not matter whether
* the host OS supports or has enabled SGX.
*/
static bool cpu_has_sgx(void)
{
return cpuid_eax(0) >= 0x12 && (cpuid_eax(0x12) & BIT(0));
}
static __init int adjust_vmx_controls(u32 ctl_min, u32 ctl_opt,
u32 msr, u32 *result)
{
@ -2418,8 +2429,9 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf,
SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE |
SECONDARY_EXEC_PT_USE_GPA |
SECONDARY_EXEC_PT_CONCEAL_VMX |
SECONDARY_EXEC_ENABLE_VMFUNC |
SECONDARY_EXEC_ENCLS_EXITING;
SECONDARY_EXEC_ENABLE_VMFUNC;
if (cpu_has_sgx())
opt2 |= SECONDARY_EXEC_ENCLS_EXITING;
if (adjust_vmx_controls(min2, opt2,
MSR_IA32_VMX_PROCBASED_CTLS2,
&_cpu_based_2nd_exec_control) < 0)

View File

@ -7195,10 +7195,12 @@ static void kvm_timer_init(void)
cpu = get_cpu();
policy = cpufreq_cpu_get(cpu);
if (policy && policy->cpuinfo.max_freq)
max_tsc_khz = policy->cpuinfo.max_freq;
if (policy) {
if (policy->cpuinfo.max_freq)
max_tsc_khz = policy->cpuinfo.max_freq;
cpufreq_cpu_put(policy);
}
put_cpu();
cpufreq_cpu_put(policy);
#endif
cpufreq_register_notifier(&kvmclock_cpufreq_notifier_block,
CPUFREQ_TRANSITION_NOTIFIER);

View File

@ -190,7 +190,7 @@ static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
return pmd_k;
}
void vmalloc_sync_all(void)
static void vmalloc_sync(void)
{
unsigned long address;
@ -217,6 +217,16 @@ void vmalloc_sync_all(void)
}
}
void vmalloc_sync_mappings(void)
{
vmalloc_sync();
}
void vmalloc_sync_unmappings(void)
{
vmalloc_sync();
}
/*
* 32-bit:
*
@ -319,11 +329,23 @@ static void dump_pagetable(unsigned long address)
#else /* CONFIG_X86_64: */
void vmalloc_sync_all(void)
void vmalloc_sync_mappings(void)
{
/*
* 64-bit mappings might allocate new p4d/pud pages
* that need to be propagated to all tasks' PGDs.
*/
sync_global_pgds(VMALLOC_START & PGDIR_MASK, VMALLOC_END);
}
void vmalloc_sync_unmappings(void)
{
/*
* Unmappings never allocate or free p4d/pud pages.
* No work is required here.
*/
}
/*
* 64-bit:
*

View File

@ -106,6 +106,19 @@ static unsigned int __ioremap_check_encrypted(struct resource *res)
return 0;
}
/*
* The EFI runtime services data area is not covered by walk_mem_res(), but must
* be mapped encrypted when SEV is active.
*/
static void __ioremap_check_other(resource_size_t addr, struct ioremap_desc *desc)
{
if (!sev_active())
return;
if (efi_mem_type(addr) == EFI_RUNTIME_SERVICES_DATA)
desc->flags |= IORES_MAP_ENCRYPTED;
}
static int __ioremap_collect_map_flags(struct resource *res, void *arg)
{
struct ioremap_desc *desc = arg;
@ -124,6 +137,9 @@ static int __ioremap_collect_map_flags(struct resource *res, void *arg)
* To avoid multiple resource walks, this function walks resources marked as
* IORESOURCE_MEM and IORESOURCE_BUSY and looking for system RAM and/or a
* resource described not as IORES_DESC_NONE (e.g. IORES_DESC_ACPI_TABLES).
*
* After that, deal with misc other ranges in __ioremap_check_other() which do
* not fall into the above category.
*/
static void __ioremap_check_mem(resource_size_t addr, unsigned long size,
struct ioremap_desc *desc)
@ -135,6 +151,8 @@ static void __ioremap_check_mem(resource_size_t addr, unsigned long size,
memset(desc, 0, sizeof(struct ioremap_desc));
walk_mem_res(start, end, desc, __ioremap_collect_map_flags);
__ioremap_check_other(addr, desc);
}
/*

View File

@ -1318,7 +1318,7 @@ static bool iocg_is_idle(struct ioc_gq *iocg)
return false;
/* is something in flight? */
if (atomic64_read(&iocg->done_vtime) < atomic64_read(&iocg->vtime))
if (atomic64_read(&iocg->done_vtime) != atomic64_read(&iocg->vtime))
return false;
return true;

View File

@ -398,6 +398,28 @@ void blk_mq_sched_insert_request(struct request *rq, bool at_head,
WARN_ON(e && (rq->tag != -1));
if (blk_mq_sched_bypass_insert(hctx, !!e, rq)) {
/*
* Firstly normal IO request is inserted to scheduler queue or
* sw queue, meantime we add flush request to dispatch queue(
* hctx->dispatch) directly and there is at most one in-flight
* flush request for each hw queue, so it doesn't matter to add
* flush request to tail or front of the dispatch queue.
*
* Secondly in case of NCQ, flush request belongs to non-NCQ
* command, and queueing it will fail when there is any
* in-flight normal IO request(NCQ command). When adding flush
* rq to the front of hctx->dispatch, it is easier to introduce
* extra time to flush rq's latency because of S_SCHED_RESTART
* compared with adding to the tail of dispatch queue, then
* chance of flush merge is increased, and less flush requests
* will be issued to controller. It is observed that ~10% time
* is saved in blktests block/004 on disk attached to AHCI/NCQ
* drive when adding flush rq to the front of hctx->dispatch.
*
* Simply queue flush rq to the front of hctx->dispatch so that
* intensive flush workloads can benefit in case of NCQ HW.
*/
at_head = (rq->rq_flags & RQF_FLUSH_SEQ) ? true : at_head;
blk_mq_request_bypass_insert(rq, at_head, false);
goto run;
}

View File

@ -301,6 +301,42 @@ struct hd_struct *disk_map_sector_rcu(struct gendisk *disk, sector_t sector)
}
EXPORT_SYMBOL_GPL(disk_map_sector_rcu);
/**
* disk_has_partitions
* @disk: gendisk of interest
*
* Walk through the partition table and check if valid partition exists.
*
* CONTEXT:
* Don't care.
*
* RETURNS:
* True if the gendisk has at least one valid non-zero size partition.
* Otherwise false.
*/
bool disk_has_partitions(struct gendisk *disk)
{
struct disk_part_tbl *ptbl;
int i;
bool ret = false;
rcu_read_lock();
ptbl = rcu_dereference(disk->part_tbl);
/* Iterate partitions skipping the whole device at index 0 */
for (i = 1; i < ptbl->len; i++) {
if (rcu_dereference(ptbl->part[i])) {
ret = true;
break;
}
}
rcu_read_unlock();
return ret;
}
EXPORT_SYMBOL_GPL(disk_has_partitions);
/*
* Can be deleted altogether. Later.
*

View File

@ -171,7 +171,7 @@ int ghes_estatus_pool_init(int num_ghes)
* New allocation must be visible in all pgd before it can be found by
* an NMI allocating from the pool.
*/
vmalloc_sync_all();
vmalloc_sync_mappings();
rc = gen_pool_add(ghes_estatus_pool, addr, PAGE_ALIGN(len), -1);
if (rc)

View File

@ -448,6 +448,7 @@ static int binderfs_binder_ctl_create(struct super_block *sb)
inode->i_uid = info->root_uid;
inode->i_gid = info->root_gid;
refcount_set(&device->ref, 1);
device->binderfs_inode = inode;
device->miscdev.minor = minor;

View File

@ -91,7 +91,7 @@
#ifdef GENERAL_DEBUG
#define PRINTK(args...) printk(args)
#else
#define PRINTK(args...)
#define PRINTK(args...) do {} while (0)
#endif /* GENERAL_DEBUG */
#ifdef EXTRA_DEBUG

View File

@ -111,7 +111,7 @@ config CFAG12864B
If unsure, say N.
config CFAG12864B_RATE
int "Refresh rate (hertz)"
int "Refresh rate (hertz)"
depends on CFAG12864B
default "20"
---help---
@ -329,7 +329,7 @@ config PANEL_LCD_PROTO
config PANEL_LCD_PIN_E
depends on PANEL_PROFILE="0" && PANEL_LCD="1" && PANEL_LCD_PROTO="0"
int "Parallel port pin number & polarity connected to the LCD E signal (-17...17) "
int "Parallel port pin number & polarity connected to the LCD E signal (-17...17) "
range -17 17
default 14
---help---
@ -344,7 +344,7 @@ config PANEL_LCD_PIN_E
config PANEL_LCD_PIN_RS
depends on PANEL_PROFILE="0" && PANEL_LCD="1" && PANEL_LCD_PROTO="0"
int "Parallel port pin number & polarity connected to the LCD RS signal (-17...17) "
int "Parallel port pin number & polarity connected to the LCD RS signal (-17...17) "
range -17 17
default 17
---help---
@ -359,7 +359,7 @@ config PANEL_LCD_PIN_RS
config PANEL_LCD_PIN_RW
depends on PANEL_PROFILE="0" && PANEL_LCD="1" && PANEL_LCD_PROTO="0"
int "Parallel port pin number & polarity connected to the LCD RW signal (-17...17) "
int "Parallel port pin number & polarity connected to the LCD RW signal (-17...17) "
range -17 17
default 16
---help---
@ -374,7 +374,7 @@ config PANEL_LCD_PIN_RW
config PANEL_LCD_PIN_SCL
depends on PANEL_PROFILE="0" && PANEL_LCD="1" && PANEL_LCD_PROTO!="0"
int "Parallel port pin number & polarity connected to the LCD SCL signal (-17...17) "
int "Parallel port pin number & polarity connected to the LCD SCL signal (-17...17) "
range -17 17
default 1
---help---
@ -389,7 +389,7 @@ config PANEL_LCD_PIN_SCL
config PANEL_LCD_PIN_SDA
depends on PANEL_PROFILE="0" && PANEL_LCD="1" && PANEL_LCD_PROTO!="0"
int "Parallel port pin number & polarity connected to the LCD SDA signal (-17...17) "
int "Parallel port pin number & polarity connected to the LCD SDA signal (-17...17) "
range -17 17
default 2
---help---
@ -404,12 +404,12 @@ config PANEL_LCD_PIN_SDA
config PANEL_LCD_PIN_BL
depends on PANEL_PROFILE="0" && PANEL_LCD="1"
int "Parallel port pin number & polarity connected to the LCD backlight signal (-17...17) "
int "Parallel port pin number & polarity connected to the LCD backlight signal (-17...17) "
range -17 17
default 0
---help---
This describes the number of the parallel port pin to which the LCD 'BL' signal
has been connected. It can be :
has been connected. It can be :
0 : no connection (eg: connected to ground)
1..17 : directly connected to any of these pins on the DB25 plug

View File

@ -88,7 +88,7 @@ struct charlcd_priv {
int len;
} esc_seq;
unsigned long long drvdata[0];
unsigned long long drvdata[];
};
#define charlcd_to_priv(p) container_of(p, struct charlcd_priv, lcd)

View File

@ -356,7 +356,6 @@ static int img_ascii_lcd_probe(struct platform_device *pdev)
const struct of_device_id *match;
const struct img_ascii_lcd_config *cfg;
struct img_ascii_lcd_ctx *ctx;
struct resource *res;
int err;
match = of_match_device(img_ascii_lcd_matches, &pdev->dev);
@ -378,8 +377,7 @@ static int img_ascii_lcd_probe(struct platform_device *pdev)
&ctx->offset))
return -EINVAL;
} else {
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
ctx->base = devm_ioremap_resource(&pdev->dev, res);
ctx->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ctx->base))
return PTR_ERR(ctx->base);
}

View File

@ -363,10 +363,10 @@ static void setup_pdev_dma_masks(struct platform_device *pdev)
{
if (!pdev->dev.coherent_dma_mask)
pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
if (!pdev->dma_mask)
pdev->dma_mask = DMA_BIT_MASK(32);
if (!pdev->dev.dma_mask)
pdev->dev.dma_mask = &pdev->dma_mask;
if (!pdev->dev.dma_mask) {
pdev->platform_dma_mask = DMA_BIT_MASK(32);
pdev->dev.dma_mask = &pdev->platform_dma_mask;
}
};
/**
@ -662,20 +662,8 @@ struct platform_device *platform_device_register_full(
pdev->dev.of_node_reused = pdevinfo->of_node_reused;
if (pdevinfo->dma_mask) {
/*
* This memory isn't freed when the device is put,
* I don't have a nice idea for that though. Conceptually
* dma_mask in struct device should not be a pointer.
* See http://thread.gmane.org/gmane.linux.kernel.pci/9081
*/
pdev->dev.dma_mask =
kmalloc(sizeof(*pdev->dev.dma_mask), GFP_KERNEL);
if (!pdev->dev.dma_mask)
goto err;
kmemleak_ignore(pdev->dev.dma_mask);
*pdev->dev.dma_mask = pdevinfo->dma_mask;
pdev->platform_dma_mask = pdevinfo->dma_mask;
pdev->dev.dma_mask = &pdev->platform_dma_mask;
pdev->dev.coherent_dma_mask = pdevinfo->dma_mask;
}
@ -700,7 +688,6 @@ struct platform_device *platform_device_register_full(
if (ret) {
err:
ACPI_COMPANION_SET(&pdev->dev, NULL);
kfree(pdev->dev.dma_mask);
platform_device_put(pdev);
return ERR_PTR(ret);
}

View File

@ -245,13 +245,20 @@ static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
err = virtblk_add_req(vblk->vqs[qid].vq, vbr, vbr->sg, num);
if (err) {
virtqueue_kick(vblk->vqs[qid].vq);
blk_mq_stop_hw_queue(hctx);
/* Don't stop the queue if -ENOMEM: we may have failed to
* bounce the buffer due to global resource outage.
*/
if (err == -ENOSPC)
blk_mq_stop_hw_queue(hctx);
spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
/* Out of mem doesn't actually happen, since we fall back
* to direct descriptors */
if (err == -ENOMEM || err == -ENOSPC)
switch (err) {
case -ENOSPC:
return BLK_STS_DEV_RESOURCE;
return BLK_STS_IOERR;
case -ENOMEM:
return BLK_STS_RESOURCE;
default:
return BLK_STS_IOERR;
}
}
if (bd->last && virtqueue_kick_prepare(vblk->vqs[qid].vq))

View File

@ -194,7 +194,7 @@ static int platform_ipmi_probe(struct platform_device *pdev)
else
io.slave_addr = slave_addr;
io.irq = platform_get_irq(pdev, 0);
io.irq = platform_get_irq_optional(pdev, 0);
if (io.irq > 0)
io.irq_setup = ipmi_std_irq_setup;
else
@ -378,7 +378,7 @@ static int acpi_ipmi_probe(struct platform_device *pdev)
io.irq = tmp;
io.irq_setup = acpi_gpe_irq_setup;
} else {
int irq = platform_get_irq(pdev, 0);
int irq = platform_get_irq_optional(pdev, 0);
if (irq > 0) {
io.irq = irq;

View File

@ -4713,7 +4713,7 @@ EXPORT_SYMBOL(of_clk_get_by_name);
*
* Returns: The number of clocks that are possible parents of this node
*/
unsigned int of_clk_get_parent_count(struct device_node *np)
unsigned int of_clk_get_parent_count(const struct device_node *np)
{
int count;
@ -4725,7 +4725,7 @@ unsigned int of_clk_get_parent_count(struct device_node *np)
}
EXPORT_SYMBOL_GPL(of_clk_get_parent_count);
const char *of_clk_get_parent_name(struct device_node *np, int index)
const char *of_clk_get_parent_name(const struct device_node *np, int index)
{
struct of_phandle_args clkspec;
struct property *prop;

View File

@ -592,24 +592,6 @@ static struct clk_branch disp_cc_mdss_rot_clk = {
},
};
static struct clk_branch disp_cc_mdss_rscc_ahb_clk = {
.halt_reg = 0x400c,
.halt_check = BRANCH_HALT,
.clkr = {
.enable_reg = 0x400c,
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "disp_cc_mdss_rscc_ahb_clk",
.parent_data = &(const struct clk_parent_data){
.hw = &disp_cc_mdss_ahb_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_IS_CRITICAL | CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
};
static struct clk_branch disp_cc_mdss_rscc_vsync_clk = {
.halt_reg = 0x4008,
.halt_check = BRANCH_HALT,
@ -687,7 +669,6 @@ static struct clk_regmap *disp_cc_sc7180_clocks[] = {
[DISP_CC_MDSS_PCLK0_CLK_SRC] = &disp_cc_mdss_pclk0_clk_src.clkr,
[DISP_CC_MDSS_ROT_CLK] = &disp_cc_mdss_rot_clk.clkr,
[DISP_CC_MDSS_ROT_CLK_SRC] = &disp_cc_mdss_rot_clk_src.clkr,
[DISP_CC_MDSS_RSCC_AHB_CLK] = &disp_cc_mdss_rscc_ahb_clk.clkr,
[DISP_CC_MDSS_RSCC_VSYNC_CLK] = &disp_cc_mdss_rscc_vsync_clk.clkr,
[DISP_CC_MDSS_VSYNC_CLK] = &disp_cc_mdss_vsync_clk.clkr,
[DISP_CC_MDSS_VSYNC_CLK_SRC] = &disp_cc_mdss_vsync_clk_src.clkr,

View File

@ -97,7 +97,7 @@ static struct clk_branch video_cc_vcodec0_axi_clk = {
static struct clk_branch video_cc_vcodec0_core_clk = {
.halt_reg = 0x890,
.halt_check = BRANCH_HALT,
.halt_check = BRANCH_HALT_VOTED,
.clkr = {
.enable_reg = 0x890,
.enable_mask = BIT(0),

View File

@ -83,13 +83,16 @@ static ssize_t
efivar_attr_read(struct efivar_entry *entry, char *buf)
{
struct efi_variable *var = &entry->var;
unsigned long size = sizeof(var->Data);
char *str = buf;
int ret;
if (!entry || !buf)
return -EINVAL;
var->DataSize = 1024;
if (efivar_entry_get(entry, &var->Attributes, &var->DataSize, var->Data))
ret = efivar_entry_get(entry, &var->Attributes, &size, var->Data);
var->DataSize = size;
if (ret)
return -EIO;
if (var->Attributes & EFI_VARIABLE_NON_VOLATILE)
@ -116,13 +119,16 @@ static ssize_t
efivar_size_read(struct efivar_entry *entry, char *buf)
{
struct efi_variable *var = &entry->var;
unsigned long size = sizeof(var->Data);
char *str = buf;
int ret;
if (!entry || !buf)
return -EINVAL;
var->DataSize = 1024;
if (efivar_entry_get(entry, &var->Attributes, &var->DataSize, var->Data))
ret = efivar_entry_get(entry, &var->Attributes, &size, var->Data);
var->DataSize = size;
if (ret)
return -EIO;
str += sprintf(str, "0x%lx\n", var->DataSize);
@ -133,12 +139,15 @@ static ssize_t
efivar_data_read(struct efivar_entry *entry, char *buf)
{
struct efi_variable *var = &entry->var;
unsigned long size = sizeof(var->Data);
int ret;
if (!entry || !buf)
return -EINVAL;
var->DataSize = 1024;
if (efivar_entry_get(entry, &var->Attributes, &var->DataSize, var->Data))
ret = efivar_entry_get(entry, &var->Attributes, &size, var->Data);
var->DataSize = size;
if (ret)
return -EIO;
memcpy(buf, var->Data, var->DataSize);
@ -199,6 +208,9 @@ efivar_store_raw(struct efivar_entry *entry, const char *buf, size_t count)
u8 *data;
int err;
if (!entry || !buf)
return -EINVAL;
if (in_compat_syscall()) {
struct compat_efi_variable *compat;
@ -250,14 +262,16 @@ efivar_show_raw(struct efivar_entry *entry, char *buf)
{
struct efi_variable *var = &entry->var;
struct compat_efi_variable *compat;
unsigned long datasize = sizeof(var->Data);
size_t size;
int ret;
if (!entry || !buf)
return 0;
var->DataSize = 1024;
if (efivar_entry_get(entry, &entry->var.Attributes,
&entry->var.DataSize, entry->var.Data))
ret = efivar_entry_get(entry, &var->Attributes, &datasize, var->Data);
var->DataSize = datasize;
if (ret)
return -EIO;
if (in_compat_syscall()) {

View File

@ -781,11 +781,11 @@ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
ssize_t result = 0;
uint32_t offset, se, sh, cu, wave, simd, thread, bank, *data;
if (size & 3 || *pos & 3)
if (size > 4096 || size & 3 || *pos & 3)
return -EINVAL;
/* decode offset */
offset = *pos & GENMASK_ULL(11, 0);
offset = (*pos & GENMASK_ULL(11, 0)) >> 2;
se = (*pos & GENMASK_ULL(19, 12)) >> 12;
sh = (*pos & GENMASK_ULL(27, 20)) >> 20;
cu = (*pos & GENMASK_ULL(35, 28)) >> 28;
@ -823,7 +823,7 @@ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
while (size) {
uint32_t value;
value = data[offset++];
value = data[result >> 2];
r = put_user(value, (uint32_t *)buf);
if (r) {
result = r;

View File

@ -3913,6 +3913,8 @@ static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,
if (r)
goto out;
amdgpu_fbdev_set_suspend(tmp_adev, 0);
/* must succeed. */
amdgpu_ras_resume(tmp_adev);
@ -4086,6 +4088,8 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
*/
amdgpu_unregister_gpu_instance(tmp_adev);
amdgpu_fbdev_set_suspend(adev, 1);
/* disable ras on ALL IPs */
if (!(in_ras_intr && !use_baco) &&
amdgpu_device_ip_need_full_reset(tmp_adev))

View File

@ -693,7 +693,7 @@ static int jpeg_v2_0_set_clockgating_state(void *handle,
bool enable = (state == AMD_CG_STATE_GATE);
if (enable) {
if (jpeg_v2_0_is_idle(handle))
if (!jpeg_v2_0_is_idle(handle))
return -EBUSY;
jpeg_v2_0_enable_clock_gating(adev);
} else {

View File

@ -477,7 +477,7 @@ static int jpeg_v2_5_set_clockgating_state(void *handle,
continue;
if (enable) {
if (jpeg_v2_5_is_idle(handle))
if (!jpeg_v2_5_is_idle(handle))
return -EBUSY;
jpeg_v2_5_enable_clock_gating(adev, i);
} else {

View File

@ -89,6 +89,13 @@
#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK 0x00010000L
#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK 0x00020000L
#define mmHDP_MEM_POWER_CTRL_BASE_IDX 0
/* for Vega20/arcturus regiter offset change */
#define mmROM_INDEX_VG20 0x00e4
#define mmROM_INDEX_VG20_BASE_IDX 0
#define mmROM_DATA_VG20 0x00e5
#define mmROM_DATA_VG20_BASE_IDX 0
/*
* Indirect registers accessor
*/
@ -309,6 +316,8 @@ static bool soc15_read_bios_from_rom(struct amdgpu_device *adev,
{
u32 *dw_ptr;
u32 i, length_dw;
uint32_t rom_index_offset;
uint32_t rom_data_offset;
if (bios == NULL)
return false;
@ -321,11 +330,23 @@ static bool soc15_read_bios_from_rom(struct amdgpu_device *adev,
dw_ptr = (u32 *)bios;
length_dw = ALIGN(length_bytes, 4) / 4;
switch (adev->asic_type) {
case CHIP_VEGA20:
case CHIP_ARCTURUS:
rom_index_offset = SOC15_REG_OFFSET(SMUIO, 0, mmROM_INDEX_VG20);
rom_data_offset = SOC15_REG_OFFSET(SMUIO, 0, mmROM_DATA_VG20);
break;
default:
rom_index_offset = SOC15_REG_OFFSET(SMUIO, 0, mmROM_INDEX);
rom_data_offset = SOC15_REG_OFFSET(SMUIO, 0, mmROM_DATA);
break;
}
/* set rom index to 0 */
WREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_INDEX), 0);
WREG32(rom_index_offset, 0);
/* read out the rom data */
for (i = 0; i < length_dw; i++)
dw_ptr[i] = RREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_DATA));
dw_ptr[i] = RREG32(rom_data_offset);
return true;
}

View File

@ -1352,7 +1352,7 @@ static int vcn_v1_0_set_clockgating_state(void *handle,
if (enable) {
/* wait for STATUS to clear */
if (vcn_v1_0_is_idle(handle))
if (!vcn_v1_0_is_idle(handle))
return -EBUSY;
vcn_v1_0_enable_clock_gating(adev);
} else {

View File

@ -1217,7 +1217,7 @@ static int vcn_v2_0_set_clockgating_state(void *handle,
if (enable) {
/* wait for STATUS to clear */
if (vcn_v2_0_is_idle(handle))
if (!vcn_v2_0_is_idle(handle))
return -EBUSY;
vcn_v2_0_enable_clock_gating(adev);
} else {

View File

@ -1672,7 +1672,7 @@ static int vcn_v2_5_set_clockgating_state(void *handle,
return 0;
if (enable) {
if (vcn_v2_5_is_idle(handle))
if (!vcn_v2_5_is_idle(handle))
return -EBUSY;
vcn_v2_5_enable_clock_gating(adev);
} else {

View File

@ -522,8 +522,9 @@ static void dm_dcn_crtc_high_irq(void *interrupt_params)
acrtc_state = to_dm_crtc_state(acrtc->base.state);
DRM_DEBUG_DRIVER("crtc:%d, vupdate-vrr:%d\n", acrtc->crtc_id,
amdgpu_dm_vrr_active(acrtc_state));
DRM_DEBUG_DRIVER("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
amdgpu_dm_vrr_active(acrtc_state),
acrtc_state->active_planes);
amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
drm_crtc_handle_vblank(&acrtc->base);
@ -543,7 +544,18 @@ static void dm_dcn_crtc_high_irq(void *interrupt_params)
&acrtc_state->vrr_params.adjust);
}
if (acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED) {
/*
* If there aren't any active_planes then DCH HUBP may be clock-gated.
* In that case, pageflip completion interrupts won't fire and pageflip
* completion events won't get delivered. Prevent this by sending
* pending pageflip events from here if a flip is still pending.
*
* If any planes are enabled, use dm_pflip_high_irq() instead, to
* avoid race conditions between flip programming and completion,
* which could cause too early flip completion events.
*/
if (acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
acrtc_state->active_planes == 0) {
if (acrtc->event) {
drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
acrtc->event = NULL;

View File

@ -3401,6 +3401,17 @@ static bool retrieve_link_cap(struct dc_link *link)
sink_id.ieee_device_id,
sizeof(sink_id.ieee_device_id));
/* Quirk Apple MBP 2017 15" Retina panel: Wrong DP_MAX_LINK_RATE */
{
uint8_t str_mbp_2017[] = { 101, 68, 21, 101, 98, 97 };
if ((link->dpcd_caps.sink_dev_id == 0x0010fa) &&
!memcmp(link->dpcd_caps.sink_dev_id_str, str_mbp_2017,
sizeof(str_mbp_2017))) {
link->reported_link_cap.link_rate = 0x0c;
}
}
core_link_read_dpcd(
link,
DP_SINK_HW_REVISION_START,

View File

@ -108,7 +108,6 @@ static const struct hwseq_private_funcs dcn20_private_funcs = {
.enable_power_gating_plane = dcn20_enable_power_gating_plane,
.dpp_pg_control = dcn20_dpp_pg_control,
.hubp_pg_control = dcn20_hubp_pg_control,
.dsc_pg_control = NULL,
.update_odm = dcn20_update_odm,
.dsc_pg_control = dcn20_dsc_pg_control,
.get_surface_visual_confirm_color = dcn10_get_surface_visual_confirm_color,

View File

@ -335,6 +335,117 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_0_soc = {
.use_urgent_burst_bw = 0
};
struct _vcs_dpi_soc_bounding_box_st dcn2_0_nv14_soc = {
.clock_limits = {
{
.state = 0,
.dcfclk_mhz = 560.0,
.fabricclk_mhz = 560.0,
.dispclk_mhz = 513.0,
.dppclk_mhz = 513.0,
.phyclk_mhz = 540.0,
.socclk_mhz = 560.0,
.dscclk_mhz = 171.0,
.dram_speed_mts = 8960.0,
},
{
.state = 1,
.dcfclk_mhz = 694.0,
.fabricclk_mhz = 694.0,
.dispclk_mhz = 642.0,
.dppclk_mhz = 642.0,
.phyclk_mhz = 600.0,
.socclk_mhz = 694.0,
.dscclk_mhz = 214.0,
.dram_speed_mts = 11104.0,
},
{
.state = 2,
.dcfclk_mhz = 875.0,
.fabricclk_mhz = 875.0,
.dispclk_mhz = 734.0,
.dppclk_mhz = 734.0,
.phyclk_mhz = 810.0,
.socclk_mhz = 875.0,
.dscclk_mhz = 245.0,
.dram_speed_mts = 14000.0,
},
{
.state = 3,
.dcfclk_mhz = 1000.0,
.fabricclk_mhz = 1000.0,
.dispclk_mhz = 1100.0,
.dppclk_mhz = 1100.0,
.phyclk_mhz = 810.0,
.socclk_mhz = 1000.0,
.dscclk_mhz = 367.0,
.dram_speed_mts = 16000.0,
},
{
.state = 4,
.dcfclk_mhz = 1200.0,
.fabricclk_mhz = 1200.0,
.dispclk_mhz = 1284.0,
.dppclk_mhz = 1284.0,
.phyclk_mhz = 810.0,
.socclk_mhz = 1200.0,
.dscclk_mhz = 428.0,
.dram_speed_mts = 16000.0,
},
/*Extra state, no dispclk ramping*/
{
.state = 5,
.dcfclk_mhz = 1200.0,
.fabricclk_mhz = 1200.0,
.dispclk_mhz = 1284.0,
.dppclk_mhz = 1284.0,
.phyclk_mhz = 810.0,
.socclk_mhz = 1200.0,
.dscclk_mhz = 428.0,
.dram_speed_mts = 16000.0,
},
},
.num_states = 5,
.sr_exit_time_us = 8.6,
.sr_enter_plus_exit_time_us = 10.9,
.urgent_latency_us = 4.0,
.urgent_latency_pixel_data_only_us = 4.0,
.urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
.urgent_latency_vm_data_only_us = 4.0,
.urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096,
.urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096,
.urgent_out_of_order_return_per_channel_vm_only_bytes = 4096,
.pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 40.0,
.pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 40.0,
.pct_ideal_dram_sdp_bw_after_urgent_vm_only = 40.0,
.max_avg_sdp_bw_use_normal_percent = 40.0,
.max_avg_dram_bw_use_normal_percent = 40.0,
.writeback_latency_us = 12.0,
.ideal_dram_bw_after_urgent_percent = 40.0,
.max_request_size_bytes = 256,
.dram_channel_width_bytes = 2,
.fabric_datapath_to_dcn_data_return_bytes = 64,
.dcn_downspread_percent = 0.5,
.downspread_percent = 0.38,
.dram_page_open_time_ns = 50.0,
.dram_rw_turnaround_time_ns = 17.5,
.dram_return_buffer_per_channel_bytes = 8192,
.round_trip_ping_latency_dcfclk_cycles = 131,
.urgent_out_of_order_return_per_channel_bytes = 256,
.channel_interleave_bytes = 256,
.num_banks = 8,
.num_chans = 8,
.vmm_page_size_bytes = 4096,
.dram_clock_change_latency_us = 404.0,
.dummy_pstate_latency_us = 5.0,
.writeback_dram_clock_change_latency_us = 23.0,
.return_bus_width_bytes = 64,
.dispclk_dppclk_vco_speed_mhz = 3850,
.xfc_bus_transport_time_us = 20,
.xfc_xbuf_latency_tolerance_us = 4,
.use_urgent_burst_bw = 0
};
struct _vcs_dpi_soc_bounding_box_st dcn2_0_nv12_soc = { 0 };
#ifndef mmDP0_DP_DPHY_INTERNAL_CTRL
@ -3291,6 +3402,9 @@ void dcn20_patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st
static struct _vcs_dpi_soc_bounding_box_st *get_asic_rev_soc_bb(
uint32_t hw_internal_rev)
{
if (ASICREV_IS_NAVI14_M(hw_internal_rev))
return &dcn2_0_nv14_soc;
if (ASICREV_IS_NAVI12_P(hw_internal_rev))
return &dcn2_0_nv12_soc;

View File

@ -116,7 +116,6 @@ static const struct hwseq_private_funcs dcn21_private_funcs = {
.enable_power_gating_plane = dcn20_enable_power_gating_plane,
.dpp_pg_control = dcn20_dpp_pg_control,
.hubp_pg_control = dcn20_hubp_pg_control,
.dsc_pg_control = NULL,
.update_odm = dcn20_update_odm,
.dsc_pg_control = dcn20_dsc_pg_control,
.get_surface_visual_confirm_color = dcn10_get_surface_visual_confirm_color,

View File

@ -2006,8 +2006,11 @@ int smu_set_watermarks_for_clock_ranges(struct smu_context *smu,
smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) &&
smu_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
smu_set_watermarks_table(smu, table, clock_ranges);
smu->watermarks_bitmap |= WATERMARKS_EXIST;
smu->watermarks_bitmap &= ~WATERMARKS_LOADED;
if (!(smu->watermarks_bitmap & WATERMARKS_EXIST)) {
smu->watermarks_bitmap |= WATERMARKS_EXIST;
smu->watermarks_bitmap &= ~WATERMARKS_LOADED;
}
}
mutex_unlock(&smu->mutex);

View File

@ -1062,15 +1062,6 @@ static int navi10_display_config_changed(struct smu_context *smu)
{
int ret = 0;
if ((smu->watermarks_bitmap & WATERMARKS_EXIST) &&
!(smu->watermarks_bitmap & WATERMARKS_LOADED)) {
ret = smu_write_watermarks_table(smu);
if (ret)
return ret;
smu->watermarks_bitmap |= WATERMARKS_LOADED;
}
if ((smu->watermarks_bitmap & WATERMARKS_EXIST) &&
smu_feature_is_supported(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) &&
smu_feature_is_supported(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
@ -1493,6 +1484,7 @@ static int navi10_set_watermarks_table(struct smu_context *smu,
*clock_ranges)
{
int i;
int ret = 0;
Watermarks_t *table = watermarks;
if (!table || !clock_ranges)
@ -1544,6 +1536,18 @@ static int navi10_set_watermarks_table(struct smu_context *smu,
clock_ranges->wm_mcif_clocks_ranges[i].wm_set_id;
}
smu->watermarks_bitmap |= WATERMARKS_EXIST;
/* pass data to smu controller */
if (!(smu->watermarks_bitmap & WATERMARKS_LOADED)) {
ret = smu_write_watermarks_table(smu);
if (ret) {
pr_err("Failed to update WMTABLE!");
return ret;
}
smu->watermarks_bitmap |= WATERMARKS_LOADED;
}
return 0;
}

View File

@ -806,9 +806,10 @@ static int renoir_set_watermarks_table(
clock_ranges->wm_mcif_clocks_ranges[i].wm_set_id;
}
smu->watermarks_bitmap |= WATERMARKS_EXIST;
/* pass data to smu controller */
if ((smu->watermarks_bitmap & WATERMARKS_EXIST) &&
!(smu->watermarks_bitmap & WATERMARKS_LOADED)) {
if (!(smu->watermarks_bitmap & WATERMARKS_LOADED)) {
ret = smu_write_watermarks_table(smu);
if (ret) {
pr_err("Failed to update WMTABLE!");

View File

@ -146,14 +146,14 @@ static const struct of_device_id komeda_of_match[] = {
MODULE_DEVICE_TABLE(of, komeda_of_match);
static int komeda_rt_pm_suspend(struct device *dev)
static int __maybe_unused komeda_rt_pm_suspend(struct device *dev)
{
struct komeda_drv *mdrv = dev_get_drvdata(dev);
return komeda_dev_suspend(mdrv->mdev);
}
static int komeda_rt_pm_resume(struct device *dev)
static int __maybe_unused komeda_rt_pm_resume(struct device *dev)
{
struct komeda_drv *mdrv = dev_get_drvdata(dev);

View File

@ -156,10 +156,8 @@ int bochs_hw_init(struct drm_device *dev)
size = min(size, mem);
}
if (pci_request_region(pdev, 0, "bochs-drm") != 0) {
DRM_ERROR("Cannot request framebuffer\n");
return -EBUSY;
}
if (pci_request_region(pdev, 0, "bochs-drm") != 0)
DRM_WARN("Cannot request framebuffer, boot fb still active?\n");
bochs->fb_map = ioremap(addr, size);
if (bochs->fb_map == NULL) {

View File

@ -1624,28 +1624,34 @@ static void hdmi_config_AVI(struct dw_hdmi *hdmi, struct drm_display_mode *mode)
frame.colorspace = HDMI_COLORSPACE_RGB;
/* Set up colorimetry */
switch (hdmi->hdmi_data.enc_out_encoding) {
case V4L2_YCBCR_ENC_601:
if (hdmi->hdmi_data.enc_in_encoding == V4L2_YCBCR_ENC_XV601)
frame.colorimetry = HDMI_COLORIMETRY_EXTENDED;
else
if (!hdmi_bus_fmt_is_rgb(hdmi->hdmi_data.enc_out_bus_format)) {
switch (hdmi->hdmi_data.enc_out_encoding) {
case V4L2_YCBCR_ENC_601:
if (hdmi->hdmi_data.enc_in_encoding == V4L2_YCBCR_ENC_XV601)
frame.colorimetry = HDMI_COLORIMETRY_EXTENDED;
else
frame.colorimetry = HDMI_COLORIMETRY_ITU_601;
frame.extended_colorimetry =
HDMI_EXTENDED_COLORIMETRY_XV_YCC_601;
break;
case V4L2_YCBCR_ENC_709:
if (hdmi->hdmi_data.enc_in_encoding == V4L2_YCBCR_ENC_XV709)
frame.colorimetry = HDMI_COLORIMETRY_EXTENDED;
else
frame.colorimetry = HDMI_COLORIMETRY_ITU_709;
frame.extended_colorimetry =
HDMI_EXTENDED_COLORIMETRY_XV_YCC_709;
break;
default: /* Carries no data */
frame.colorimetry = HDMI_COLORIMETRY_ITU_601;
frame.extended_colorimetry =
HDMI_EXTENDED_COLORIMETRY_XV_YCC_601;
break;
}
} else {
frame.colorimetry = HDMI_COLORIMETRY_NONE;
frame.extended_colorimetry =
HDMI_EXTENDED_COLORIMETRY_XV_YCC_601;
break;
case V4L2_YCBCR_ENC_709:
if (hdmi->hdmi_data.enc_in_encoding == V4L2_YCBCR_ENC_XV709)
frame.colorimetry = HDMI_COLORIMETRY_EXTENDED;
else
frame.colorimetry = HDMI_COLORIMETRY_ITU_709;
frame.extended_colorimetry =
HDMI_EXTENDED_COLORIMETRY_XV_YCC_709;
break;
default: /* Carries no data */
frame.colorimetry = HDMI_COLORIMETRY_ITU_601;
frame.extended_colorimetry =
HDMI_EXTENDED_COLORIMETRY_XV_YCC_601;
break;
HDMI_EXTENDED_COLORIMETRY_XV_YCC_601;
}
frame.scan_mode = HDMI_SCAN_MODE_NONE;

View File

@ -1935,7 +1935,7 @@ static u8 drm_dp_calculate_rad(struct drm_dp_mst_port *port,
return parent_lct + 1;
}
static bool drm_dp_mst_is_dp_mst_end_device(u8 pdt, bool mcs)
static bool drm_dp_mst_is_end_device(u8 pdt, bool mcs)
{
switch (pdt) {
case DP_PEER_DEVICE_DP_LEGACY_CONV:
@ -1965,13 +1965,13 @@ drm_dp_port_set_pdt(struct drm_dp_mst_port *port, u8 new_pdt,
/* Teardown the old pdt, if there is one */
if (port->pdt != DP_PEER_DEVICE_NONE) {
if (drm_dp_mst_is_dp_mst_end_device(port->pdt, port->mcs)) {
if (drm_dp_mst_is_end_device(port->pdt, port->mcs)) {
/*
* If the new PDT would also have an i2c bus,
* don't bother with reregistering it
*/
if (new_pdt != DP_PEER_DEVICE_NONE &&
drm_dp_mst_is_dp_mst_end_device(new_pdt, new_mcs)) {
drm_dp_mst_is_end_device(new_pdt, new_mcs)) {
port->pdt = new_pdt;
port->mcs = new_mcs;
return 0;
@ -1991,7 +1991,7 @@ drm_dp_port_set_pdt(struct drm_dp_mst_port *port, u8 new_pdt,
port->mcs = new_mcs;
if (port->pdt != DP_PEER_DEVICE_NONE) {
if (drm_dp_mst_is_dp_mst_end_device(port->pdt, port->mcs)) {
if (drm_dp_mst_is_end_device(port->pdt, port->mcs)) {
/* add i2c over sideband */
ret = drm_dp_mst_register_i2c_bus(&port->aux);
} else {
@ -2172,7 +2172,7 @@ drm_dp_mst_port_add_connector(struct drm_dp_mst_branch *mstb,
}
if (port->pdt != DP_PEER_DEVICE_NONE &&
drm_dp_mst_is_dp_mst_end_device(port->pdt, port->mcs)) {
drm_dp_mst_is_end_device(port->pdt, port->mcs)) {
port->cached_edid = drm_get_edid(port->connector,
&port->aux.ddc);
drm_connector_set_tile_property(port->connector);
@ -2302,14 +2302,18 @@ drm_dp_mst_handle_link_address_port(struct drm_dp_mst_branch *mstb,
mutex_unlock(&mgr->lock);
}
if (old_ddps != port->ddps) {
if (port->ddps) {
if (!port->input) {
drm_dp_send_enum_path_resources(mgr, mstb,
port);
}
/*
* Reprobe PBN caps on both hotplug, and when re-probing the link
* for our parent mstb
*/
if (old_ddps != port->ddps || !created) {
if (port->ddps && !port->input) {
ret = drm_dp_send_enum_path_resources(mgr, mstb,
port);
if (ret == 1)
changed = true;
} else {
port->available_pbn = 0;
port->full_pbn = 0;
}
}
@ -2401,11 +2405,10 @@ drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb,
port->ddps = conn_stat->displayport_device_plug_status;
if (old_ddps != port->ddps) {
if (port->ddps) {
dowork = true;
} else {
port->available_pbn = 0;
}
if (port->ddps && !port->input)
drm_dp_send_enum_path_resources(mgr, mstb, port);
else
port->full_pbn = 0;
}
new_pdt = port->input ? DP_PEER_DEVICE_NONE : conn_stat->peer_device_type;
@ -2556,13 +2559,6 @@ static int drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *mg
if (port->input || !port->ddps)
continue;
if (!port->available_pbn) {
drm_modeset_lock(&mgr->base.lock, NULL);
drm_dp_send_enum_path_resources(mgr, mstb, port);
drm_modeset_unlock(&mgr->base.lock);
changed = true;
}
if (port->mstb)
mstb_child = drm_dp_mst_topology_get_mstb_validated(
mgr, port->mstb);
@ -2990,6 +2986,7 @@ drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
if (ret > 0) {
ret = 0;
path_res = &txmsg->reply.u.path_resources;
if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
@ -3002,14 +2999,22 @@ drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
path_res->port_number,
path_res->full_payload_bw_number,
path_res->avail_payload_bw_number);
port->available_pbn =
path_res->avail_payload_bw_number;
/*
* If something changed, make sure we send a
* hotplug
*/
if (port->full_pbn != path_res->full_payload_bw_number ||
port->fec_capable != path_res->fec_capable)
ret = 1;
port->full_pbn = path_res->full_payload_bw_number;
port->fec_capable = path_res->fec_capable;
}
}
kfree(txmsg);
return 0;
return ret;
}
static struct drm_dp_mst_port *drm_dp_get_last_connected_port_to_mstb(struct drm_dp_mst_branch *mstb)
@ -3596,13 +3601,9 @@ drm_dp_mst_topology_mgr_invalidate_mstb(struct drm_dp_mst_branch *mstb)
/* The link address will need to be re-sent on resume */
mstb->link_address_sent = false;
list_for_each_entry(port, &mstb->ports, next) {
/* The PBN for each port will also need to be re-probed */
port->available_pbn = 0;
list_for_each_entry(port, &mstb->ports, next)
if (port->mstb)
drm_dp_mst_topology_mgr_invalidate_mstb(port->mstb);
}
}
/**
@ -4829,41 +4830,102 @@ static bool drm_dp_mst_port_downstream_of_branch(struct drm_dp_mst_port *port,
return false;
}
static inline
int drm_dp_mst_atomic_check_bw_limit(struct drm_dp_mst_branch *branch,
struct drm_dp_mst_topology_state *mst_state)
static int
drm_dp_mst_atomic_check_port_bw_limit(struct drm_dp_mst_port *port,
struct drm_dp_mst_topology_state *state);
static int
drm_dp_mst_atomic_check_mstb_bw_limit(struct drm_dp_mst_branch *mstb,
struct drm_dp_mst_topology_state *state)
{
struct drm_dp_mst_port *port;
struct drm_dp_vcpi_allocation *vcpi;
int pbn_limit = 0, pbn_used = 0;
struct drm_dp_mst_port *port;
int pbn_used = 0, ret;
bool found = false;
list_for_each_entry(port, &branch->ports, next) {
if (port->mstb)
if (drm_dp_mst_atomic_check_bw_limit(port->mstb, mst_state))
return -ENOSPC;
if (port->available_pbn > 0)
pbn_limit = port->available_pbn;
}
DRM_DEBUG_ATOMIC("[MST BRANCH:%p] branch has %d PBN available\n",
branch, pbn_limit);
list_for_each_entry(vcpi, &mst_state->vcpis, next) {
if (!vcpi->pbn)
/* Check that we have at least one port in our state that's downstream
* of this branch, otherwise we can skip this branch
*/
list_for_each_entry(vcpi, &state->vcpis, next) {
if (!vcpi->pbn ||
!drm_dp_mst_port_downstream_of_branch(vcpi->port, mstb))
continue;
if (drm_dp_mst_port_downstream_of_branch(vcpi->port, branch))
pbn_used += vcpi->pbn;
found = true;
break;
}
DRM_DEBUG_ATOMIC("[MST BRANCH:%p] branch used %d PBN\n",
branch, pbn_used);
if (!found)
return 0;
if (pbn_used > pbn_limit) {
DRM_DEBUG_ATOMIC("[MST BRANCH:%p] No available bandwidth\n",
branch);
if (mstb->port_parent)
DRM_DEBUG_ATOMIC("[MSTB:%p] [MST PORT:%p] Checking bandwidth limits on [MSTB:%p]\n",
mstb->port_parent->parent, mstb->port_parent,
mstb);
else
DRM_DEBUG_ATOMIC("[MSTB:%p] Checking bandwidth limits\n",
mstb);
list_for_each_entry(port, &mstb->ports, next) {
ret = drm_dp_mst_atomic_check_port_bw_limit(port, state);
if (ret < 0)
return ret;
pbn_used += ret;
}
return pbn_used;
}
static int
drm_dp_mst_atomic_check_port_bw_limit(struct drm_dp_mst_port *port,
struct drm_dp_mst_topology_state *state)
{
struct drm_dp_vcpi_allocation *vcpi;
int pbn_used = 0;
if (port->pdt == DP_PEER_DEVICE_NONE)
return 0;
if (drm_dp_mst_is_end_device(port->pdt, port->mcs)) {
bool found = false;
list_for_each_entry(vcpi, &state->vcpis, next) {
if (vcpi->port != port)
continue;
if (!vcpi->pbn)
return 0;
found = true;
break;
}
if (!found)
return 0;
/* This should never happen, as it means we tried to
* set a mode before querying the full_pbn
*/
if (WARN_ON(!port->full_pbn))
return -EINVAL;
pbn_used = vcpi->pbn;
} else {
pbn_used = drm_dp_mst_atomic_check_mstb_bw_limit(port->mstb,
state);
if (pbn_used <= 0)
return pbn_used;
}
if (pbn_used > port->full_pbn) {
DRM_DEBUG_ATOMIC("[MSTB:%p] [MST PORT:%p] required PBN of %d exceeds port limit of %d\n",
port->parent, port, pbn_used,
port->full_pbn);
return -ENOSPC;
}
return 0;
DRM_DEBUG_ATOMIC("[MSTB:%p] [MST PORT:%p] uses %d out of %d PBN\n",
port->parent, port, pbn_used, port->full_pbn);
return pbn_used;
}
static inline int
@ -5061,9 +5123,15 @@ int drm_dp_mst_atomic_check(struct drm_atomic_state *state)
ret = drm_dp_mst_atomic_check_vcpi_alloc_limit(mgr, mst_state);
if (ret)
break;
ret = drm_dp_mst_atomic_check_bw_limit(mgr->mst_primary, mst_state);
if (ret)
mutex_lock(&mgr->lock);
ret = drm_dp_mst_atomic_check_mstb_bw_limit(mgr->mst_primary,
mst_state);
mutex_unlock(&mgr->lock);
if (ret < 0)
break;
else
ret = 0;
}
return ret;

View File

@ -542,10 +542,12 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev,
}
DRM_DEBUG_LEASE("Creating lease\n");
/* lessee will take the ownership of leases */
lessee = drm_lease_create(lessor, &leases);
if (IS_ERR(lessee)) {
ret = PTR_ERR(lessee);
idr_destroy(&leases);
goto out_leases;
}
@ -580,7 +582,6 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev,
out_leases:
put_unused_fd(fd);
idr_destroy(&leases);
DRM_DEBUG_LEASE("drm_mode_create_lease_ioctl failed: %d\n", ret);
return ret;

View File

@ -55,6 +55,7 @@ static const char * const decon_clks_name[] = {
struct decon_context {
struct device *dev;
struct drm_device *drm_dev;
void *dma_priv;
struct exynos_drm_crtc *crtc;
struct exynos_drm_plane planes[WINDOWS_NR];
struct exynos_drm_plane_config configs[WINDOWS_NR];
@ -644,7 +645,7 @@ static int decon_bind(struct device *dev, struct device *master, void *data)
decon_clear_channels(ctx->crtc);
return exynos_drm_register_dma(drm_dev, dev);
return exynos_drm_register_dma(drm_dev, dev, &ctx->dma_priv);
}
static void decon_unbind(struct device *dev, struct device *master, void *data)
@ -654,7 +655,7 @@ static void decon_unbind(struct device *dev, struct device *master, void *data)
decon_atomic_disable(ctx->crtc);
/* detach this sub driver from iommu mapping if supported. */
exynos_drm_unregister_dma(ctx->drm_dev, ctx->dev);
exynos_drm_unregister_dma(ctx->drm_dev, ctx->dev, &ctx->dma_priv);
}
static const struct component_ops decon_component_ops = {

View File

@ -40,6 +40,7 @@
struct decon_context {
struct device *dev;
struct drm_device *drm_dev;
void *dma_priv;
struct exynos_drm_crtc *crtc;
struct exynos_drm_plane planes[WINDOWS_NR];
struct exynos_drm_plane_config configs[WINDOWS_NR];
@ -127,13 +128,13 @@ static int decon_ctx_initialize(struct decon_context *ctx,
decon_clear_channels(ctx->crtc);
return exynos_drm_register_dma(drm_dev, ctx->dev);
return exynos_drm_register_dma(drm_dev, ctx->dev, &ctx->dma_priv);
}
static void decon_ctx_remove(struct decon_context *ctx)
{
/* detach this sub driver from iommu mapping if supported. */
exynos_drm_unregister_dma(ctx->drm_dev, ctx->dev);
exynos_drm_unregister_dma(ctx->drm_dev, ctx->dev, &ctx->dma_priv);
}
static u32 decon_calc_clkdiv(struct decon_context *ctx,

View File

@ -58,7 +58,7 @@ static inline void clear_dma_max_seg_size(struct device *dev)
* mapping.
*/
static int drm_iommu_attach_device(struct drm_device *drm_dev,
struct device *subdrv_dev)
struct device *subdrv_dev, void **dma_priv)
{
struct exynos_drm_private *priv = drm_dev->dev_private;
int ret;
@ -74,7 +74,14 @@ static int drm_iommu_attach_device(struct drm_device *drm_dev,
return ret;
if (IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)) {
if (to_dma_iommu_mapping(subdrv_dev))
/*
* Keep the original DMA mapping of the sub-device and
* restore it on Exynos DRM detach, otherwise the DMA
* framework considers it as IOMMU-less during the next
* probe (in case of deferred probe or modular build)
*/
*dma_priv = to_dma_iommu_mapping(subdrv_dev);
if (*dma_priv)
arm_iommu_detach_device(subdrv_dev);
ret = arm_iommu_attach_device(subdrv_dev, priv->mapping);
@ -98,19 +105,21 @@ static int drm_iommu_attach_device(struct drm_device *drm_dev,
* mapping
*/
static void drm_iommu_detach_device(struct drm_device *drm_dev,
struct device *subdrv_dev)
struct device *subdrv_dev, void **dma_priv)
{
struct exynos_drm_private *priv = drm_dev->dev_private;
if (IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU))
if (IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)) {
arm_iommu_detach_device(subdrv_dev);
else if (IS_ENABLED(CONFIG_IOMMU_DMA))
arm_iommu_attach_device(subdrv_dev, *dma_priv);
} else if (IS_ENABLED(CONFIG_IOMMU_DMA))
iommu_detach_device(priv->mapping, subdrv_dev);
clear_dma_max_seg_size(subdrv_dev);
}
int exynos_drm_register_dma(struct drm_device *drm, struct device *dev)
int exynos_drm_register_dma(struct drm_device *drm, struct device *dev,
void **dma_priv)
{
struct exynos_drm_private *priv = drm->dev_private;
@ -137,13 +146,14 @@ int exynos_drm_register_dma(struct drm_device *drm, struct device *dev)
priv->mapping = mapping;
}
return drm_iommu_attach_device(drm, dev);
return drm_iommu_attach_device(drm, dev, dma_priv);
}
void exynos_drm_unregister_dma(struct drm_device *drm, struct device *dev)
void exynos_drm_unregister_dma(struct drm_device *drm, struct device *dev,
void **dma_priv)
{
if (IS_ENABLED(CONFIG_EXYNOS_IOMMU))
drm_iommu_detach_device(drm, dev);
drm_iommu_detach_device(drm, dev, dma_priv);
}
void exynos_drm_cleanup_dma(struct drm_device *drm)

View File

@ -223,8 +223,10 @@ static inline bool is_drm_iommu_supported(struct drm_device *drm_dev)
return priv->mapping ? true : false;
}
int exynos_drm_register_dma(struct drm_device *drm, struct device *dev);
void exynos_drm_unregister_dma(struct drm_device *drm, struct device *dev);
int exynos_drm_register_dma(struct drm_device *drm, struct device *dev,
void **dma_priv);
void exynos_drm_unregister_dma(struct drm_device *drm, struct device *dev,
void **dma_priv);
void exynos_drm_cleanup_dma(struct drm_device *drm);
#ifdef CONFIG_DRM_EXYNOS_DPI

View File

@ -97,6 +97,7 @@ struct fimc_scaler {
struct fimc_context {
struct exynos_drm_ipp ipp;
struct drm_device *drm_dev;
void *dma_priv;
struct device *dev;
struct exynos_drm_ipp_task *task;
struct exynos_drm_ipp_formats *formats;
@ -1133,7 +1134,7 @@ static int fimc_bind(struct device *dev, struct device *master, void *data)
ctx->drm_dev = drm_dev;
ipp->drm_dev = drm_dev;
exynos_drm_register_dma(drm_dev, dev);
exynos_drm_register_dma(drm_dev, dev, &ctx->dma_priv);
exynos_drm_ipp_register(dev, ipp, &ipp_funcs,
DRM_EXYNOS_IPP_CAP_CROP | DRM_EXYNOS_IPP_CAP_ROTATE |
@ -1153,7 +1154,7 @@ static void fimc_unbind(struct device *dev, struct device *master,
struct exynos_drm_ipp *ipp = &ctx->ipp;
exynos_drm_ipp_unregister(dev, ipp);
exynos_drm_unregister_dma(drm_dev, dev);
exynos_drm_unregister_dma(drm_dev, dev, &ctx->dma_priv);
}
static const struct component_ops fimc_component_ops = {

View File

@ -167,6 +167,7 @@ static struct fimd_driver_data exynos5420_fimd_driver_data = {
struct fimd_context {
struct device *dev;
struct drm_device *drm_dev;
void *dma_priv;
struct exynos_drm_crtc *crtc;
struct exynos_drm_plane planes[WINDOWS_NR];
struct exynos_drm_plane_config configs[WINDOWS_NR];
@ -1090,7 +1091,7 @@ static int fimd_bind(struct device *dev, struct device *master, void *data)
if (is_drm_iommu_supported(drm_dev))
fimd_clear_channels(ctx->crtc);
return exynos_drm_register_dma(drm_dev, dev);
return exynos_drm_register_dma(drm_dev, dev, &ctx->dma_priv);
}
static void fimd_unbind(struct device *dev, struct device *master,
@ -1100,7 +1101,7 @@ static void fimd_unbind(struct device *dev, struct device *master,
fimd_atomic_disable(ctx->crtc);
exynos_drm_unregister_dma(ctx->drm_dev, ctx->dev);
exynos_drm_unregister_dma(ctx->drm_dev, ctx->dev, &ctx->dma_priv);
if (ctx->encoder)
exynos_dpi_remove(ctx->encoder);

View File

@ -232,6 +232,7 @@ struct g2d_runqueue_node {
struct g2d_data {
struct device *dev;
void *dma_priv;
struct clk *gate_clk;
void __iomem *regs;
int irq;
@ -1409,7 +1410,7 @@ static int g2d_bind(struct device *dev, struct device *master, void *data)
return ret;
}
ret = exynos_drm_register_dma(drm_dev, dev);
ret = exynos_drm_register_dma(drm_dev, dev, &g2d->dma_priv);
if (ret < 0) {
dev_err(dev, "failed to enable iommu.\n");
g2d_fini_cmdlist(g2d);
@ -1434,7 +1435,7 @@ static void g2d_unbind(struct device *dev, struct device *master, void *data)
priv->g2d_dev = NULL;
cancel_work_sync(&g2d->runqueue_work);
exynos_drm_unregister_dma(g2d->drm_dev, dev);
exynos_drm_unregister_dma(g2d->drm_dev, dev, &g2d->dma_priv);
}
static const struct component_ops g2d_component_ops = {

View File

@ -97,6 +97,7 @@ struct gsc_scaler {
struct gsc_context {
struct exynos_drm_ipp ipp;
struct drm_device *drm_dev;
void *dma_priv;
struct device *dev;
struct exynos_drm_ipp_task *task;
struct exynos_drm_ipp_formats *formats;
@ -1169,7 +1170,7 @@ static int gsc_bind(struct device *dev, struct device *master, void *data)
ctx->drm_dev = drm_dev;
ctx->drm_dev = drm_dev;
exynos_drm_register_dma(drm_dev, dev);
exynos_drm_register_dma(drm_dev, dev, &ctx->dma_priv);
exynos_drm_ipp_register(dev, ipp, &ipp_funcs,
DRM_EXYNOS_IPP_CAP_CROP | DRM_EXYNOS_IPP_CAP_ROTATE |
@ -1189,7 +1190,7 @@ static void gsc_unbind(struct device *dev, struct device *master,
struct exynos_drm_ipp *ipp = &ctx->ipp;
exynos_drm_ipp_unregister(dev, ipp);
exynos_drm_unregister_dma(drm_dev, dev);
exynos_drm_unregister_dma(drm_dev, dev, &ctx->dma_priv);
}
static const struct component_ops gsc_component_ops = {

View File

@ -56,6 +56,7 @@ struct rot_variant {
struct rot_context {
struct exynos_drm_ipp ipp;
struct drm_device *drm_dev;
void *dma_priv;
struct device *dev;
void __iomem *regs;
struct clk *clock;
@ -243,7 +244,7 @@ static int rotator_bind(struct device *dev, struct device *master, void *data)
rot->drm_dev = drm_dev;
ipp->drm_dev = drm_dev;
exynos_drm_register_dma(drm_dev, dev);
exynos_drm_register_dma(drm_dev, dev, &rot->dma_priv);
exynos_drm_ipp_register(dev, ipp, &ipp_funcs,
DRM_EXYNOS_IPP_CAP_CROP | DRM_EXYNOS_IPP_CAP_ROTATE,
@ -261,7 +262,7 @@ static void rotator_unbind(struct device *dev, struct device *master,
struct exynos_drm_ipp *ipp = &rot->ipp;
exynos_drm_ipp_unregister(dev, ipp);
exynos_drm_unregister_dma(rot->drm_dev, rot->dev);
exynos_drm_unregister_dma(rot->drm_dev, rot->dev, &rot->dma_priv);
}
static const struct component_ops rotator_component_ops = {

View File

@ -39,6 +39,7 @@ struct scaler_data {
struct scaler_context {
struct exynos_drm_ipp ipp;
struct drm_device *drm_dev;
void *dma_priv;
struct device *dev;
void __iomem *regs;
struct clk *clock[SCALER_MAX_CLK];
@ -450,7 +451,7 @@ static int scaler_bind(struct device *dev, struct device *master, void *data)
scaler->drm_dev = drm_dev;
ipp->drm_dev = drm_dev;
exynos_drm_register_dma(drm_dev, dev);
exynos_drm_register_dma(drm_dev, dev, &scaler->dma_priv);
exynos_drm_ipp_register(dev, ipp, &ipp_funcs,
DRM_EXYNOS_IPP_CAP_CROP | DRM_EXYNOS_IPP_CAP_ROTATE |
@ -470,7 +471,8 @@ static void scaler_unbind(struct device *dev, struct device *master,
struct exynos_drm_ipp *ipp = &scaler->ipp;
exynos_drm_ipp_unregister(dev, ipp);
exynos_drm_unregister_dma(scaler->drm_dev, scaler->dev);
exynos_drm_unregister_dma(scaler->drm_dev, scaler->dev,
&scaler->dma_priv);
}
static const struct component_ops scaler_component_ops = {

Some files were not shown because too many files have changed in this diff Show More