Merge 5.15.35 into android13-5.15

Changes in 5.15.35
	drm/amd/display: Add pstate verification and recovery for DCN31
	drm/amd/display: Fix p-state allow debug index on dcn31
	hamradio: defer 6pack kfree after unregister_netdev
	hamradio: remove needs_free_netdev to avoid UAF
	cpuidle: PSCI: Move the `has_lpi` check to the beginning of the function
	ACPI: processor idle: Check for architectural support for LPI
	ACPI: processor idle: Allow playing dead in C3 state
	ACPI: processor: idle: fix lockup regression on 32-bit ThinkPad T40
	btrfs: remove unused parameter nr_pages in add_ra_bio_pages()
	btrfs: remove no longer used counter when reading data page
	btrfs: remove unused variable in btrfs_{start,write}_dirty_block_groups()
	soc: qcom: aoss: Expose send for generic usecase
	dt-bindings: net: qcom,ipa: add optional qcom,qmp property
	net: ipa: request IPA register values be retained
	btrfs: release correct delalloc amount in direct IO write path
	ALSA: core: Add snd_card_free_on_error() helper
	ALSA: sis7019: Fix the missing error handling
	ALSA: ali5451: Fix the missing snd_card_free() call at probe error
	ALSA: als300: Fix the missing snd_card_free() call at probe error
	ALSA: als4000: Fix the missing snd_card_free() call at probe error
	ALSA: atiixp: Fix the missing snd_card_free() call at probe error
	ALSA: au88x0: Fix the missing snd_card_free() call at probe error
	ALSA: aw2: Fix the missing snd_card_free() call at probe error
	ALSA: azt3328: Fix the missing snd_card_free() call at probe error
	ALSA: bt87x: Fix the missing snd_card_free() call at probe error
	ALSA: ca0106: Fix the missing snd_card_free() call at probe error
	ALSA: cmipci: Fix the missing snd_card_free() call at probe error
	ALSA: cs4281: Fix the missing snd_card_free() call at probe error
	ALSA: cs5535audio: Fix the missing snd_card_free() call at probe error
	ALSA: echoaudio: Fix the missing snd_card_free() call at probe error
	ALSA: emu10k1x: Fix the missing snd_card_free() call at probe error
	ALSA: ens137x: Fix the missing snd_card_free() call at probe error
	ALSA: es1938: Fix the missing snd_card_free() call at probe error
	ALSA: es1968: Fix the missing snd_card_free() call at probe error
	ALSA: fm801: Fix the missing snd_card_free() call at probe error
	ALSA: galaxy: Fix the missing snd_card_free() call at probe error
	ALSA: hdsp: Fix the missing snd_card_free() call at probe error
	ALSA: hdspm: Fix the missing snd_card_free() call at probe error
	ALSA: ice1724: Fix the missing snd_card_free() call at probe error
	ALSA: intel8x0: Fix the missing snd_card_free() call at probe error
	ALSA: intel_hdmi: Fix the missing snd_card_free() call at probe error
	ALSA: korg1212: Fix the missing snd_card_free() call at probe error
	ALSA: lola: Fix the missing snd_card_free() call at probe error
	ALSA: lx6464es: Fix the missing snd_card_free() call at probe error
	ALSA: maestro3: Fix the missing snd_card_free() call at probe error
	ALSA: oxygen: Fix the missing snd_card_free() call at probe error
	ALSA: riptide: Fix the missing snd_card_free() call at probe error
	ALSA: rme32: Fix the missing snd_card_free() call at probe error
	ALSA: rme9652: Fix the missing snd_card_free() call at probe error
	ALSA: rme96: Fix the missing snd_card_free() call at probe error
	ALSA: sc6000: Fix the missing snd_card_free() call at probe error
	ALSA: sonicvibes: Fix the missing snd_card_free() call at probe error
	ALSA: via82xx: Fix the missing snd_card_free() call at probe error
	ALSA: usb-audio: Cap upper limits of buffer/period bytes for implicit fb
	ALSA: nm256: Don't call card private_free at probe error path
	drm/msm: Add missing put_task_struct() in debugfs path
	firmware: arm_scmi: Remove clear channel call on the TX channel
	memory: atmel-ebi: Fix missing of_node_put in atmel_ebi_probe
	Revert "ath11k: mesh: add support for 256 bitmap in blockack frames in 11ax"
	firmware: arm_scmi: Fix sorting of retrieved clock rates
	media: rockchip/rga: do proper error checking in probe
	SUNRPC: Fix the svc_deferred_event trace class
	net/sched: flower: fix parsing of ethertype following VLAN header
	veth: Ensure eth header is in skb's linear part
	gpiolib: acpi: use correct format characters
	cifs: release cached dentries only if mount is complete
	net: mdio: don't defer probe forever if PHY IRQ provider is missing
	mlxsw: i2c: Fix initialization error flow
	net/sched: fix initialization order when updating chain 0 head
	net: dsa: felix: suppress -EPROBE_DEFER errors
	net: ethernet: stmmac: fix altr_tse_pcs function when using a fixed-link
	net/sched: taprio: Check if socket flags are valid
	cfg80211: hold bss_lock while updating nontrans_list
	netfilter: nft_socket: make cgroup match work in input too
	drm/msm: Fix range size vs end confusion
	drm/msm/dsi: Use connector directly in msm_dsi_manager_connector_init()
	drm/msm/dp: add fail safe mode outside of event_mutex context
	net/smc: Fix NULL pointer dereference in smc_pnet_find_ib()
	scsi: pm80xx: Mask and unmask upper interrupt vectors 32-63
	scsi: pm80xx: Enable upper inbound, outbound queues
	scsi: iscsi: Move iscsi_ep_disconnect()
	scsi: iscsi: Fix offload conn cleanup when iscsid restarts
	scsi: iscsi: Fix endpoint reuse regression
	scsi: iscsi: Fix conn cleanup and stop race during iscsid restart
	scsi: iscsi: Fix unbound endpoint error handling
	sctp: Initialize daddr on peeled off socket
	netfilter: nf_tables: nft_parse_register can return a negative value
	ALSA: ad1889: Fix the missing snd_card_free() call at probe error
	ALSA: mtpav: Don't call card private_free at probe error path
	io_uring: move io_uring_rsrc_update2 validation
	io_uring: verify that resv2 is 0 in io_uring_rsrc_update2
	io_uring: verify pad field is 0 in io_get_ext_arg
	testing/selftests/mqueue: Fix mq_perf_tests to free the allocated cpu set
	ALSA: usb-audio: Increase max buffer size
	ALSA: usb-audio: Limit max buffer and period sizes per time
	perf tools: Fix misleading add event PMU debug message
	macvlan: Fix leaking skb in source mode with nodst option
	net: ftgmac100: access hardware register after clock ready
	nfc: nci: add flush_workqueue to prevent uaf
	cifs: potential buffer overflow in handling symlinks
	dm mpath: only use ktime_get_ns() in historical selector
	vfio/pci: Fix vf_token mechanism when device-specific VF drivers are used
	net: bcmgenet: Revert "Use stronger register read/writes to assure ordering"
	block: fix offset/size check in bio_trim()
	drm/amd: Add USBC connector ID
	btrfs: fix fallocate to use file_modified to update permissions consistently
	btrfs: do not warn for free space inode in cow_file_range
	drm/amdgpu: conduct a proper cleanup of PDB bo
	drm/amdgpu/gmc: use PCI BARs for APUs in passthrough
	drm/amd/display: fix audio format not updated after edid updated
	drm/amd/display: FEC check in timing validation
	drm/amd/display: Update VTEM Infopacket definition
	drm/amdkfd: Fix Incorrect VMIDs passed to HWS
	drm/amdgpu/vcn: improve vcn dpg stop procedure
	drm/amdkfd: Check for potential null return of kmalloc_array()
	Drivers: hv: vmbus: Deactivate sysctl_record_panic_msg by default in isolated guests
	PCI: hv: Propagate coherence from VMbus device to PCI device
	Drivers: hv: vmbus: Prevent load re-ordering when reading ring buffer
	scsi: target: tcmu: Fix possible page UAF
	scsi: lpfc: Fix queue failures when recovering from PCI parity error
	scsi: ibmvscsis: Increase INITIAL_SRP_LIMIT to 1024
	net: micrel: fix KS8851_MLL Kconfig
	ata: libata-core: Disable READ LOG DMA EXT for Samsung 840 EVOs
	gpu: ipu-v3: Fix dev_dbg frequency output
	regulator: wm8994: Add an off-on delay for WM8994 variant
	arm64: alternatives: mark patch_alternative() as `noinstr`
	tlb: hugetlb: Add more sizes to tlb_remove_huge_tlb_entry
	net: axienet: setup mdio unconditionally
	Drivers: hv: balloon: Disable balloon and hot-add accordingly
	net: usb: aqc111: Fix out-of-bounds accesses in RX fixup
	myri10ge: fix an incorrect free for skb in myri10ge_sw_tso
	spi: cadence-quadspi: fix protocol setup for non-1-1-X operations
	drm/amd/display: Enable power gating before init_pipes
	drm/amd/display: Revert FEC check in validation
	drm/amd/display: Fix allocate_mst_payload assert on resume
	drbd: set QUEUE_FLAG_STABLE_WRITES
	scsi: mpt3sas: Fail reset operation if config request timed out
	scsi: mvsas: Add PCI ID of RocketRaid 2640
	scsi: megaraid_sas: Target with invalid LUN ID is deleted during scan
	drivers: net: slip: fix NPD bug in sl_tx_timeout()
	io_uring: zero tag on rsrc removal
	io_uring: use nospec annotation for more indexes
	perf/imx_ddr: Fix undefined behavior due to shift overflowing the constant
	mm/secretmem: fix panic when growing a memfd_secret
	mm, page_alloc: fix build_zonerefs_node()
	mm: fix unexpected zeroed page mapping with zram swap
	mm: kmemleak: take a full lowmem check in kmemleak_*_phys()
	KVM: x86/mmu: Resolve nx_huge_pages when kvm.ko is loaded
	SUNRPC: Fix NFSD's request deferral on RDMA transports
	memory: renesas-rpc-if: fix platform-device leak in error path
	gcc-plugins: latent_entropy: use /dev/urandom
	cifs: verify that tcon is valid before dereference in cifs_kill_sb
	ath9k: Properly clear TX status area before reporting to mac80211
	ath9k: Fix usage of driver-private space in tx_info
	btrfs: fix root ref counts in error handling in btrfs_get_root_ref
	btrfs: mark resumed async balance as writing
	ALSA: hda/realtek: Add quirk for Clevo PD50PNT
	ALSA: hda/realtek: add quirk for Lenovo Thinkpad X12 speakers
	ALSA: pcm: Test for "silence" field in struct "pcm_format_data"
	nl80211: correctly check NL80211_ATTR_REG_ALPHA2 size
	ipv6: fix panic when forwarding a pkt with no in6 dev
	drm/amd/display: don't ignore alpha property on pre-multiplied mode
	drm/amdgpu: Enable gfxoff quirk on MacBook Pro
	x86/tsx: Use MSR_TSX_CTRL to clear CPUID bits
	x86/tsx: Disable TSX development mode at boot
	genirq/affinity: Consider that CPUs on nodes can be unbalanced
	tick/nohz: Use WARN_ON_ONCE() to prevent console saturation
	ARM: davinci: da850-evm: Avoid NULL pointer dereference
	dm integrity: fix memory corruption when tag_size is less than digest size
	i2c: dev: check return value when calling dev_set_name()
	smp: Fix offline cpu check in flush_smp_call_function_queue()
	i2c: pasemi: Wait for write xfers to finish
	dt-bindings: net: snps: remove duplicate name
	timers: Fix warning condition in __run_timers()
	dma-direct: avoid redundant memory sync for swiotlb
	drm/i915: Sunset igpu legacy mmap support based on GRAPHICS_VER_FULL
	cpu/hotplug: Remove the 'cpu' member of cpuhp_cpu_state
	soc: qcom: aoss: Fix missing put_device call in qmp_get
	net: ipa: fix a build dependency
	cpufreq: intel_pstate: ITMT support for overclocked system
	ax25: add refcount in ax25_dev to avoid UAF bugs
	ax25: fix reference count leaks of ax25_dev
	ax25: fix UAF bugs of net_device caused by rebinding operation
	ax25: Fix refcount leaks caused by ax25_cb_del()
	ax25: fix UAF bug in ax25_send_control()
	ax25: fix NPD bug in ax25_disconnect
	ax25: Fix NULL pointer dereferences in ax25 timers
	ax25: Fix UAF bugs in ax25 timers
	Linux 5.15.35

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Change-Id: I0dd9eaea7f977df42b0a5b9cb9043c879f62718b
This commit is contained in:
Greg Kroah-Hartman 2022-04-22 13:55:05 +02:00
commit ec1a28c7c0
201 changed files with 1645 additions and 652 deletions

View File

@ -106,6 +106,10 @@ properties:
- const: imem
- const: config
qcom,qmp:
$ref: /schemas/types.yaml#/definitions/phandle
description: phandle to the AOSS side-channel message RAM
qcom,smem-states:
$ref: /schemas/types.yaml#/definitions/phandle-array
description: State bits used in by the AP to signal the modem.
@ -221,6 +225,8 @@ examples:
"imem",
"config";
qcom,qmp = <&aoss_qmp>;
qcom,smem-states = <&ipa_smp2p_out 0>,
<&ipa_smp2p_out 1>;
qcom,smem-state-names = "ipa-clock-enabled-valid",

View File

@ -53,20 +53,18 @@ properties:
- allwinner,sun8i-r40-emac
- allwinner,sun8i-v3s-emac
- allwinner,sun50i-a64-emac
- loongson,ls2k-dwmac
- loongson,ls7a-dwmac
- amlogic,meson6-dwmac
- amlogic,meson8b-dwmac
- amlogic,meson8m2-dwmac
- amlogic,meson-gxbb-dwmac
- amlogic,meson-axg-dwmac
- loongson,ls2k-dwmac
- loongson,ls7a-dwmac
- ingenic,jz4775-mac
- ingenic,x1000-mac
- ingenic,x1600-mac
- ingenic,x1830-mac
- ingenic,x2000-mac
- loongson,ls2k-dwmac
- loongson,ls7a-dwmac
- rockchip,px30-gmac
- rockchip,rk3128-gmac
- rockchip,rk3228-gmac

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 5
PATCHLEVEL = 15
SUBLEVEL = 34
SUBLEVEL = 35
EXTRAVERSION =
NAME = Trick or Treat

View File

@ -1101,11 +1101,13 @@ static int __init da850_evm_config_emac(void)
int ret;
u32 val;
struct davinci_soc_info *soc_info = &davinci_soc_info;
u8 rmii_en = soc_info->emac_pdata->rmii_en;
u8 rmii_en;
if (!machine_is_davinci_da850_evm())
return 0;
rmii_en = soc_info->emac_pdata->rmii_en;
cfg_chip3_base = DA8XX_SYSCFG0_VIRT(DA8XX_CFGCHIP3_REG);
val = __raw_readl(cfg_chip3_base);

View File

@ -42,7 +42,7 @@ bool alternative_is_applied(u16 cpufeature)
/*
* Check if the target PC is within an alternative block.
*/
static bool branch_insn_requires_update(struct alt_instr *alt, unsigned long pc)
static __always_inline bool branch_insn_requires_update(struct alt_instr *alt, unsigned long pc)
{
unsigned long replptr = (unsigned long)ALT_REPL_PTR(alt);
return !(pc >= replptr && pc <= (replptr + alt->alt_len));
@ -50,7 +50,7 @@ static bool branch_insn_requires_update(struct alt_instr *alt, unsigned long pc)
#define align_down(x, a) ((unsigned long)(x) & ~(((unsigned long)(a)) - 1))
static u32 get_alt_insn(struct alt_instr *alt, __le32 *insnptr, __le32 *altinsnptr)
static __always_inline u32 get_alt_insn(struct alt_instr *alt, __le32 *insnptr, __le32 *altinsnptr)
{
u32 insn;
@ -95,7 +95,7 @@ static u32 get_alt_insn(struct alt_instr *alt, __le32 *insnptr, __le32 *altinsnp
return insn;
}
static void patch_alternative(struct alt_instr *alt,
static noinstr void patch_alternative(struct alt_instr *alt,
__le32 *origptr, __le32 *updptr, int nr_inst)
{
__le32 *replptr;

View File

@ -54,6 +54,9 @@ static int psci_acpi_cpu_init_idle(unsigned int cpu)
struct acpi_lpi_state *lpi;
struct acpi_processor *pr = per_cpu(processors, cpu);
if (unlikely(!pr || !pr->flags.has_lpi))
return -EINVAL;
/*
* If the PSCI cpu_suspend function hook has not been initialized
* idle states must not be enabled, so bail out
@ -61,9 +64,6 @@ static int psci_acpi_cpu_init_idle(unsigned int cpu)
if (!psci_ops.cpu_suspend)
return -EOPNOTSUPP;
if (unlikely(!pr || !pr->flags.has_lpi))
return -EINVAL;
count = pr->power.count - 1;
if (count <= 0)
return -ENODEV;

View File

@ -1559,8 +1559,9 @@ static inline int kvm_arch_flush_remote_tlb(struct kvm *kvm)
return -ENOTSUPP;
}
int kvm_mmu_module_init(void);
void kvm_mmu_module_exit(void);
void kvm_mmu_x86_module_init(void);
int kvm_mmu_vendor_module_init(void);
void kvm_mmu_vendor_module_exit(void);
void kvm_mmu_destroy(struct kvm_vcpu *vcpu);
int kvm_mmu_create(struct kvm_vcpu *vcpu);

View File

@ -128,9 +128,9 @@
#define TSX_CTRL_RTM_DISABLE BIT(0) /* Disable RTM feature */
#define TSX_CTRL_CPUID_CLEAR BIT(1) /* Disable TSX enumeration */
/* SRBDS support */
#define MSR_IA32_MCU_OPT_CTRL 0x00000123
#define RNGDS_MITG_DIS BIT(0)
#define RNGDS_MITG_DIS BIT(0) /* SRBDS support */
#define RTM_ALLOW BIT(1) /* TSX development mode */
#define MSR_IA32_SYSENTER_CS 0x00000174
#define MSR_IA32_SYSENTER_ESP 0x00000175

View File

@ -1714,6 +1714,8 @@ void identify_secondary_cpu(struct cpuinfo_x86 *c)
validate_apic_and_package_id(c);
x86_spec_ctrl_setup_ap();
update_srbds_msr();
tsx_ap_init();
}
static __init int setup_noclflush(char *arg)

View File

@ -55,11 +55,10 @@ enum tsx_ctrl_states {
extern __ro_after_init enum tsx_ctrl_states tsx_ctrl_state;
extern void __init tsx_init(void);
extern void tsx_enable(void);
extern void tsx_disable(void);
extern void tsx_clear_cpuid(void);
void tsx_ap_init(void);
#else
static inline void tsx_init(void) { }
static inline void tsx_ap_init(void) { }
#endif /* CONFIG_CPU_SUP_INTEL */
extern void get_cpu_cap(struct cpuinfo_x86 *c);

View File

@ -717,13 +717,6 @@ static void init_intel(struct cpuinfo_x86 *c)
init_intel_misc_features(c);
if (tsx_ctrl_state == TSX_CTRL_ENABLE)
tsx_enable();
else if (tsx_ctrl_state == TSX_CTRL_DISABLE)
tsx_disable();
else if (tsx_ctrl_state == TSX_CTRL_RTM_ALWAYS_ABORT)
tsx_clear_cpuid();
split_lock_init();
bus_lock_init();

View File

@ -19,7 +19,7 @@
enum tsx_ctrl_states tsx_ctrl_state __ro_after_init = TSX_CTRL_NOT_SUPPORTED;
void tsx_disable(void)
static void tsx_disable(void)
{
u64 tsx;
@ -39,7 +39,7 @@ void tsx_disable(void)
wrmsrl(MSR_IA32_TSX_CTRL, tsx);
}
void tsx_enable(void)
static void tsx_enable(void)
{
u64 tsx;
@ -58,7 +58,7 @@ void tsx_enable(void)
wrmsrl(MSR_IA32_TSX_CTRL, tsx);
}
static bool __init tsx_ctrl_is_supported(void)
static bool tsx_ctrl_is_supported(void)
{
u64 ia32_cap = x86_read_arch_cap_msr();
@ -84,7 +84,45 @@ static enum tsx_ctrl_states x86_get_tsx_auto_mode(void)
return TSX_CTRL_ENABLE;
}
void tsx_clear_cpuid(void)
/*
* Disabling TSX is not a trivial business.
*
* First of all, there's a CPUID bit: X86_FEATURE_RTM_ALWAYS_ABORT
* which says that TSX is practically disabled (all transactions are
* aborted by default). When that bit is set, the kernel unconditionally
* disables TSX.
*
* In order to do that, however, it needs to dance a bit:
*
* 1. The first method to disable it is through MSR_TSX_FORCE_ABORT and
* the MSR is present only when *two* CPUID bits are set:
*
* - X86_FEATURE_RTM_ALWAYS_ABORT
* - X86_FEATURE_TSX_FORCE_ABORT
*
* 2. The second method is for CPUs which do not have the above-mentioned
* MSR: those use a different MSR - MSR_IA32_TSX_CTRL and disable TSX
* through that one. Those CPUs can also have the initially mentioned
* CPUID bit X86_FEATURE_RTM_ALWAYS_ABORT set and for those the same strategy
* applies: TSX gets disabled unconditionally.
*
* When either of the two methods are present, the kernel disables TSX and
* clears the respective RTM and HLE feature flags.
*
* An additional twist in the whole thing presents late microcode loading
* which, when done, may cause for the X86_FEATURE_RTM_ALWAYS_ABORT CPUID
* bit to be set after the update.
*
* A subsequent hotplug operation on any logical CPU except the BSP will
* cause for the supported CPUID feature bits to get re-detected and, if
* RTM and HLE get cleared all of a sudden, but, userspace did consult
* them before the update, then funny explosions will happen. Long story
* short: the kernel doesn't modify CPUID feature bits after booting.
*
* That's why, this function's call in init_intel() doesn't clear the
* feature flags.
*/
static void tsx_clear_cpuid(void)
{
u64 msr;
@ -97,6 +135,39 @@ void tsx_clear_cpuid(void)
rdmsrl(MSR_TSX_FORCE_ABORT, msr);
msr |= MSR_TFA_TSX_CPUID_CLEAR;
wrmsrl(MSR_TSX_FORCE_ABORT, msr);
} else if (tsx_ctrl_is_supported()) {
rdmsrl(MSR_IA32_TSX_CTRL, msr);
msr |= TSX_CTRL_CPUID_CLEAR;
wrmsrl(MSR_IA32_TSX_CTRL, msr);
}
}
/*
* Disable TSX development mode
*
* When the microcode released in Feb 2022 is applied, TSX will be disabled by
* default on some processors. MSR 0x122 (TSX_CTRL) and MSR 0x123
* (IA32_MCU_OPT_CTRL) can be used to re-enable TSX for development, doing so is
* not recommended for production deployments. In particular, applying MD_CLEAR
* flows for mitigation of the Intel TSX Asynchronous Abort (TAA) transient
* execution attack may not be effective on these processors when Intel TSX is
* enabled with updated microcode.
*/
static void tsx_dev_mode_disable(void)
{
u64 mcu_opt_ctrl;
/* Check if RTM_ALLOW exists */
if (!boot_cpu_has_bug(X86_BUG_TAA) || !tsx_ctrl_is_supported() ||
!cpu_feature_enabled(X86_FEATURE_SRBDS_CTRL))
return;
rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_opt_ctrl);
if (mcu_opt_ctrl & RTM_ALLOW) {
mcu_opt_ctrl &= ~RTM_ALLOW;
wrmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_opt_ctrl);
setup_force_cpu_cap(X86_FEATURE_RTM_ALWAYS_ABORT);
}
}
@ -105,14 +176,14 @@ void __init tsx_init(void)
char arg[5] = {};
int ret;
tsx_dev_mode_disable();
/*
* Hardware will always abort a TSX transaction if both CPUID bits
* RTM_ALWAYS_ABORT and TSX_FORCE_ABORT are set. In this case, it is
* better not to enumerate CPUID.RTM and CPUID.HLE bits. Clear them
* here.
* Hardware will always abort a TSX transaction when the CPUID bit
* RTM_ALWAYS_ABORT is set. In this case, it is better not to enumerate
* CPUID.RTM and CPUID.HLE bits. Clear them here.
*/
if (boot_cpu_has(X86_FEATURE_RTM_ALWAYS_ABORT) &&
boot_cpu_has(X86_FEATURE_TSX_FORCE_ABORT)) {
if (boot_cpu_has(X86_FEATURE_RTM_ALWAYS_ABORT)) {
tsx_ctrl_state = TSX_CTRL_RTM_ALWAYS_ABORT;
tsx_clear_cpuid();
setup_clear_cpu_cap(X86_FEATURE_RTM);
@ -175,3 +246,16 @@ void __init tsx_init(void)
setup_force_cpu_cap(X86_FEATURE_HLE);
}
}
void tsx_ap_init(void)
{
tsx_dev_mode_disable();
if (tsx_ctrl_state == TSX_CTRL_ENABLE)
tsx_enable();
else if (tsx_ctrl_state == TSX_CTRL_DISABLE)
tsx_disable();
else if (tsx_ctrl_state == TSX_CTRL_RTM_ALWAYS_ABORT)
/* See comment over that function for more details. */
tsx_clear_cpuid();
}

View File

@ -6105,12 +6105,24 @@ static int set_nx_huge_pages(const char *val, const struct kernel_param *kp)
return 0;
}
int kvm_mmu_module_init(void)
/*
* nx_huge_pages needs to be resolved to true/false when kvm.ko is loaded, as
* its default value of -1 is technically undefined behavior for a boolean.
*/
void kvm_mmu_x86_module_init(void)
{
int ret = -ENOMEM;
if (nx_huge_pages == -1)
__set_nx_huge_pages(get_nx_auto_mode());
}
/*
* The bulk of the MMU initialization is deferred until the vendor module is
* loaded as many of the masks/values may be modified by VMX or SVM, i.e. need
* to be reset when a potentially different vendor module is loaded.
*/
int kvm_mmu_vendor_module_init(void)
{
int ret = -ENOMEM;
/*
* MMU roles use union aliasing which is, generally speaking, an
@ -6182,7 +6194,7 @@ void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
mmu_free_memory_caches(vcpu);
}
void kvm_mmu_module_exit(void)
void kvm_mmu_vendor_module_exit(void)
{
mmu_destroy_caches();
percpu_counter_destroy(&kvm_total_used_mmu_pages);

View File

@ -8562,7 +8562,7 @@ int kvm_arch_init(void *opaque)
}
kvm_nr_uret_msrs = 0;
r = kvm_mmu_module_init();
r = kvm_mmu_vendor_module_init();
if (r)
goto out_free_percpu;
@ -8612,7 +8612,7 @@ void kvm_arch_exit(void)
cancel_work_sync(&pvclock_gtod_work);
#endif
kvm_x86_ops.hardware_enable = NULL;
kvm_mmu_module_exit();
kvm_mmu_vendor_module_exit();
free_percpu(user_return_msrs);
kmem_cache_destroy(x86_emulator_cache);
kmem_cache_destroy(x86_fpu_cache);
@ -12618,3 +12618,19 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_vmgexit_enter);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_vmgexit_exit);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_vmgexit_msr_protocol_enter);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_vmgexit_msr_protocol_exit);
static int __init kvm_x86_init(void)
{
kvm_mmu_x86_module_init();
return 0;
}
module_init(kvm_x86_init);
static void __exit kvm_x86_exit(void)
{
/*
* If module_init() is implemented, module_exit() must also be
* implemented to allow module unload.
*/
}
module_exit(kvm_x86_exit);

View File

@ -1555,7 +1555,7 @@ EXPORT_SYMBOL(bio_split);
void bio_trim(struct bio *bio, sector_t offset, sector_t size)
{
if (WARN_ON_ONCE(offset > BIO_MAX_SECTORS || size > BIO_MAX_SECTORS ||
offset + size > bio->bi_iter.bi_size))
offset + size > bio_sectors(bio)))
return;
size <<= 9;

View File

@ -95,6 +95,11 @@ static const struct dmi_system_id processor_power_dmi_table[] = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
DMI_MATCH(DMI_PRODUCT_NAME,"L8400B series Notebook PC")},
(void *)1},
/* T40 can not handle C3 idle state */
{ set_max_cstate, "IBM ThinkPad T40", {
DMI_MATCH(DMI_SYS_VENDOR, "IBM"),
DMI_MATCH(DMI_PRODUCT_NAME, "23737CU")},
(void *)2},
{},
};
@ -789,7 +794,8 @@ static int acpi_processor_setup_cstates(struct acpi_processor *pr)
state->enter = acpi_idle_enter;
state->flags = 0;
if (cx->type == ACPI_STATE_C1 || cx->type == ACPI_STATE_C2) {
if (cx->type == ACPI_STATE_C1 || cx->type == ACPI_STATE_C2 ||
cx->type == ACPI_STATE_C3) {
state->enter_dead = acpi_idle_play_dead;
drv->safe_state_index = count;
}
@ -1075,6 +1081,11 @@ static int flatten_lpi_states(struct acpi_processor *pr,
return 0;
}
int __weak acpi_processor_ffh_lpi_probe(unsigned int cpu)
{
return -EOPNOTSUPP;
}
static int acpi_processor_get_lpi_info(struct acpi_processor *pr)
{
int ret, i;
@ -1083,6 +1094,11 @@ static int acpi_processor_get_lpi_info(struct acpi_processor *pr)
struct acpi_device *d = NULL;
struct acpi_lpi_states_array info[2], *tmp, *prev, *curr;
/* make sure our architecture has support */
ret = acpi_processor_ffh_lpi_probe(pr->id);
if (ret == -EOPNOTSUPP)
return ret;
if (!osc_pc_lpi_support_confirmed)
return -EOPNOTSUPP;
@ -1134,11 +1150,6 @@ static int acpi_processor_get_lpi_info(struct acpi_processor *pr)
return 0;
}
int __weak acpi_processor_ffh_lpi_probe(unsigned int cpu)
{
return -ENODEV;
}
int __weak acpi_processor_ffh_lpi_enter(struct acpi_lpi_state *lpi)
{
return -ENODEV;

View File

@ -3999,6 +3999,9 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
ATA_HORKAGE_ZERO_AFTER_TRIM, },
{ "Crucial_CT*MX100*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
ATA_HORKAGE_ZERO_AFTER_TRIM, },
{ "Samsung SSD 840 EVO*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
ATA_HORKAGE_NO_DMA_LOG |
ATA_HORKAGE_ZERO_AFTER_TRIM, },
{ "Samsung SSD 840*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
ATA_HORKAGE_ZERO_AFTER_TRIM, },
{ "Samsung SSD 850*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |

View File

@ -296,6 +296,7 @@ int driver_deferred_probe_check_state(struct device *dev)
return -EPROBE_DEFER;
}
EXPORT_SYMBOL_GPL(driver_deferred_probe_check_state);
static void deferred_probe_timeout_work_func(struct work_struct *work)
{

View File

@ -2737,6 +2737,7 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig
sprintf(disk->disk_name, "drbd%d", minor);
disk->private_data = device;
blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, disk->queue);
blk_queue_write_cache(disk->queue, true, true);
/* Setting the max_hw_sectors to an odd value of 8kibyte here
This triggers a max_bio_size message upon first attach or connect */

View File

@ -335,6 +335,8 @@ static void intel_pstste_sched_itmt_work_fn(struct work_struct *work)
static DECLARE_WORK(sched_itmt_work, intel_pstste_sched_itmt_work_fn);
#define CPPC_MAX_PERF U8_MAX
static void intel_pstate_set_itmt_prio(int cpu)
{
struct cppc_perf_caps cppc_perf;
@ -345,6 +347,14 @@ static void intel_pstate_set_itmt_prio(int cpu)
if (ret)
return;
/*
* On some systems with overclocking enabled, CPPC.highest_perf is hardcoded to 0xff.
* In this case we can't use CPPC.highest_perf to enable ITMT.
* In this case we can look at MSR_HWP_CAPABILITIES bits [8:0] to decide.
*/
if (cppc_perf.highest_perf == CPPC_MAX_PERF)
cppc_perf.highest_perf = HWP_HIGHEST_PERF(READ_ONCE(all_cpu_data[cpu]->hwp_cap_cached));
/*
* The priorities can be set regardless of whether or not
* sched_set_itmt_support(true) has been called and it is valid to

View File

@ -204,7 +204,8 @@ scmi_clock_describe_rates_get(const struct scmi_protocol_handle *ph, u32 clk_id,
if (rate_discrete && rate) {
clk->list.num_rates = tot_rate_cnt;
sort(rate, tot_rate_cnt, sizeof(*rate), rate_cmp_func, NULL);
sort(clk->list.rates, tot_rate_cnt, sizeof(*rate),
rate_cmp_func, NULL);
}
clk->rate_discrete = rate_discrete;

View File

@ -652,7 +652,8 @@ static void scmi_handle_response(struct scmi_chan_info *cinfo,
xfer = scmi_xfer_command_acquire(cinfo, msg_hdr);
if (IS_ERR(xfer)) {
scmi_clear_channel(info, cinfo);
if (MSG_XTRACT_TYPE(msg_hdr) == MSG_TYPE_DELAYED_RESP)
scmi_clear_channel(info, cinfo);
return;
}

View File

@ -392,8 +392,8 @@ static acpi_status acpi_gpiochip_alloc_event(struct acpi_resource *ares,
pin = agpio->pin_table[0];
if (pin <= 255) {
char ev_name[5];
sprintf(ev_name, "_%c%02hhX",
char ev_name[8];
sprintf(ev_name, "_%c%02X",
agpio->triggering == ACPI_EDGE_SENSITIVE ? 'E' : 'L',
pin);
if (ACPI_SUCCESS(acpi_get_handle(handle, ev_name, &evt_handle)))

View File

@ -119,6 +119,7 @@
#define CONNECTOR_OBJECT_ID_eDP 0x14
#define CONNECTOR_OBJECT_ID_MXM 0x15
#define CONNECTOR_OBJECT_ID_LVDS_eDP 0x16
#define CONNECTOR_OBJECT_ID_USBC 0x17
/* deleted */

View File

@ -5625,7 +5625,7 @@ void amdgpu_device_flush_hdp(struct amdgpu_device *adev,
struct amdgpu_ring *ring)
{
#ifdef CONFIG_X86_64
if (adev->flags & AMD_IS_APU)
if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev))
return;
#endif
if (adev->gmc.xgmi.connected_to_cpu)
@ -5641,7 +5641,7 @@ void amdgpu_device_invalidate_hdp(struct amdgpu_device *adev,
struct amdgpu_ring *ring)
{
#ifdef CONFIG_X86_64
if (adev->flags & AMD_IS_APU)
if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev))
return;
#endif
if (adev->gmc.xgmi.connected_to_cpu)

View File

@ -680,7 +680,7 @@ MODULE_PARM_DESC(sched_policy,
* Maximum number of processes that HWS can schedule concurrently. The maximum is the
* number of VMIDs assigned to the HWS, which is also the default.
*/
int hws_max_conc_proc = 8;
int hws_max_conc_proc = -1;
module_param(hws_max_conc_proc, int, 0444);
MODULE_PARM_DESC(hws_max_conc_proc,
"Max # processes HWS can execute concurrently when sched_policy=0 (0 = no concurrency, #VMIDs for KFD = Maximum(default))");

View File

@ -1272,6 +1272,8 @@ static const struct amdgpu_gfxoff_quirk amdgpu_gfxoff_quirk_list[] = {
{ 0x1002, 0x15dd, 0x103c, 0x83e7, 0xd3 },
/* GFXOFF is unstable on C6 parts with a VBIOS 113-RAVEN-114 */
{ 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc6 },
/* Apple MacBook Pro (15-inch, 2019) Radeon Pro Vega 20 4 GB */
{ 0x1002, 0x69af, 0x106b, 0x019a, 0xc0 },
{ 0, 0, 0, 0, 0 },
};

View File

@ -788,7 +788,7 @@ static int gmc_v10_0_mc_init(struct amdgpu_device *adev)
adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
#ifdef CONFIG_X86_64
if (adev->flags & AMD_IS_APU) {
if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev)) {
adev->gmc.aper_base = adev->gfxhub.funcs->get_mc_fb_offset(adev);
adev->gmc.aper_size = adev->gmc.real_vram_size;
}

View File

@ -381,8 +381,9 @@ static int gmc_v7_0_mc_init(struct amdgpu_device *adev)
adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
#ifdef CONFIG_X86_64
if (adev->flags & AMD_IS_APU &&
adev->gmc.real_vram_size > adev->gmc.aper_size) {
if ((adev->flags & AMD_IS_APU) &&
adev->gmc.real_vram_size > adev->gmc.aper_size &&
!amdgpu_passthrough(adev)) {
adev->gmc.aper_base = ((u64)RREG32(mmMC_VM_FB_OFFSET)) << 22;
adev->gmc.aper_size = adev->gmc.real_vram_size;
}

View File

@ -581,7 +581,7 @@ static int gmc_v8_0_mc_init(struct amdgpu_device *adev)
adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
#ifdef CONFIG_X86_64
if (adev->flags & AMD_IS_APU) {
if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev)) {
adev->gmc.aper_base = ((u64)RREG32(mmMC_VM_FB_OFFSET)) << 22;
adev->gmc.aper_size = adev->gmc.real_vram_size;
}

View File

@ -1387,7 +1387,7 @@ static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
*/
/* check whether both host-gpu and gpu-gpu xgmi links exist */
if ((adev->flags & AMD_IS_APU) ||
if (((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev)) ||
(adev->gmc.xgmi.supported &&
adev->gmc.xgmi.connected_to_cpu)) {
adev->gmc.aper_base =
@ -1652,7 +1652,7 @@ static int gmc_v9_0_sw_fini(void *handle)
amdgpu_gem_force_release(adev);
amdgpu_vm_manager_fini(adev);
amdgpu_gart_table_vram_free(adev);
amdgpu_bo_unref(&adev->gmc.pdb0_bo);
amdgpu_bo_free_kernel(&adev->gmc.pdb0_bo, NULL, &adev->gmc.ptr_pdb0);
amdgpu_bo_fini(adev);
return 0;

View File

@ -1508,8 +1508,11 @@ static int vcn_v3_0_start_sriov(struct amdgpu_device *adev)
static int vcn_v3_0_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
{
struct dpg_pause_state state = {.fw_based = VCN_DPG_STATE__UNPAUSE};
uint32_t tmp;
vcn_v3_0_pause_dpg_mode(adev, 0, &state);
/* Wait for power status to be 1 */
SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 1,
UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);

View File

@ -834,15 +834,10 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
}
/* Verify module parameters regarding mapped process number*/
if ((hws_max_conc_proc < 0)
|| (hws_max_conc_proc > kfd->vm_info.vmid_num_kfd)) {
dev_err(kfd_device,
"hws_max_conc_proc %d must be between 0 and %d, use %d instead\n",
hws_max_conc_proc, kfd->vm_info.vmid_num_kfd,
kfd->vm_info.vmid_num_kfd);
if (hws_max_conc_proc >= 0)
kfd->max_proc_per_quantum = min((u32)hws_max_conc_proc, kfd->vm_info.vmid_num_kfd);
else
kfd->max_proc_per_quantum = kfd->vm_info.vmid_num_kfd;
} else
kfd->max_proc_per_quantum = hws_max_conc_proc;
/* calculate max size of mqds needed for queues */
size = max_num_of_queues_per_device *

View File

@ -531,6 +531,8 @@ static struct kfd_event_waiter *alloc_event_waiters(uint32_t num_events)
event_waiters = kmalloc_array(num_events,
sizeof(struct kfd_event_waiter),
GFP_KERNEL);
if (!event_waiters)
return NULL;
for (i = 0; (event_waiters) && (i < num_events) ; i++) {
init_wait(&event_waiters[i].wait);

View File

@ -2296,7 +2296,8 @@ static int dm_resume(void *handle)
* this is the case when traversing through already created
* MST connectors, should be skipped
*/
if (aconnector->mst_port)
if (aconnector->dc_link &&
aconnector->dc_link->type == dc_connection_mst_branch)
continue;
mutex_lock(&aconnector->hpd_lock);

View File

@ -1626,8 +1626,8 @@ bool dc_is_stream_unchanged(
if (old_stream->ignore_msa_timing_param != stream->ignore_msa_timing_param)
return false;
// Only Have Audio left to check whether it is same or not. This is a corner case for Tiled sinks
if (old_stream->audio_info.mode_count != stream->audio_info.mode_count)
/*compare audio info*/
if (memcmp(&old_stream->audio_info, &stream->audio_info, sizeof(stream->audio_info)) != 0)
return false;
return true;

View File

@ -940,6 +940,7 @@ static const struct hubbub_funcs hubbub1_funcs = {
.program_watermarks = hubbub1_program_watermarks,
.is_allow_self_refresh_enabled = hubbub1_is_allow_self_refresh_enabled,
.allow_self_refresh_control = hubbub1_allow_self_refresh_control,
.verify_allow_pstate_change_high = hubbub1_verify_allow_pstate_change_high,
};
void hubbub1_construct(struct hubbub *hubbub,

View File

@ -1052,9 +1052,13 @@ static bool dcn10_hw_wa_force_recovery(struct dc *dc)
void dcn10_verify_allow_pstate_change_high(struct dc *dc)
{
struct hubbub *hubbub = dc->res_pool->hubbub;
static bool should_log_hw_state; /* prevent hw state log by default */
if (!hubbub1_verify_allow_pstate_change_high(dc->res_pool->hubbub)) {
if (!hubbub->funcs->verify_allow_pstate_change_high)
return;
if (!hubbub->funcs->verify_allow_pstate_change_high(hubbub)) {
int i = 0;
if (should_log_hw_state)
@ -1063,8 +1067,8 @@ void dcn10_verify_allow_pstate_change_high(struct dc *dc)
TRACE_DC_PIPE_STATE(pipe_ctx, i, MAX_PIPES);
BREAK_TO_DEBUGGER();
if (dcn10_hw_wa_force_recovery(dc)) {
/*check again*/
if (!hubbub1_verify_allow_pstate_change_high(dc->res_pool->hubbub))
/*check again*/
if (!hubbub->funcs->verify_allow_pstate_change_high(hubbub))
BREAK_TO_DEBUGGER();
}
}
@ -1435,6 +1439,9 @@ void dcn10_init_hw(struct dc *dc)
}
}
if (hws->funcs.enable_power_gating_plane)
hws->funcs.enable_power_gating_plane(dc->hwseq, true);
/* If taking control over from VBIOS, we may want to optimize our first
* mode set, so we need to skip powering down pipes until we know which
* pipes we want to use.
@ -1487,8 +1494,6 @@ void dcn10_init_hw(struct dc *dc)
REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
}
if (hws->funcs.enable_power_gating_plane)
hws->funcs.enable_power_gating_plane(dc->hwseq, true);
if (dc->clk_mgr->funcs->notify_wm_ranges)
dc->clk_mgr->funcs->notify_wm_ranges(dc->clk_mgr);
@ -2455,14 +2460,18 @@ void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
struct mpc *mpc = dc->res_pool->mpc;
struct mpc_tree *mpc_tree_params = &(pipe_ctx->stream_res.opp->mpc_tree_params);
if (per_pixel_alpha)
blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
else
blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA;
blnd_cfg.overlap_only = false;
blnd_cfg.global_gain = 0xff;
if (per_pixel_alpha && pipe_ctx->plane_state->global_alpha) {
blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA_COMBINED_GLOBAL_GAIN;
blnd_cfg.global_gain = pipe_ctx->plane_state->global_alpha_value;
} else if (per_pixel_alpha) {
blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
} else {
blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA;
}
if (pipe_ctx->plane_state->global_alpha)
blnd_cfg.global_alpha = pipe_ctx->plane_state->global_alpha_value;
else

View File

@ -2297,14 +2297,18 @@ void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
struct mpc *mpc = dc->res_pool->mpc;
struct mpc_tree *mpc_tree_params = &(pipe_ctx->stream_res.opp->mpc_tree_params);
if (per_pixel_alpha)
blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
else
blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA;
blnd_cfg.overlap_only = false;
blnd_cfg.global_gain = 0xff;
if (per_pixel_alpha && pipe_ctx->plane_state->global_alpha) {
blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA_COMBINED_GLOBAL_GAIN;
blnd_cfg.global_gain = pipe_ctx->plane_state->global_alpha_value;
} else if (per_pixel_alpha) {
blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
} else {
blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA;
}
if (pipe_ctx->plane_state->global_alpha)
blnd_cfg.global_alpha = pipe_ctx->plane_state->global_alpha_value;
else

View File

@ -448,6 +448,7 @@ static const struct hubbub_funcs hubbub30_funcs = {
.program_watermarks = hubbub3_program_watermarks,
.allow_self_refresh_control = hubbub1_allow_self_refresh_control,
.is_allow_self_refresh_enabled = hubbub1_is_allow_self_refresh_enabled,
.verify_allow_pstate_change_high = hubbub1_verify_allow_pstate_change_high,
.force_wm_propagate_to_pipes = hubbub3_force_wm_propagate_to_pipes,
.force_pstate_change_control = hubbub3_force_pstate_change_control,
.init_watermarks = hubbub3_init_watermarks,

View File

@ -570,6 +570,9 @@ void dcn30_init_hw(struct dc *dc)
}
}
if (hws->funcs.enable_power_gating_plane)
hws->funcs.enable_power_gating_plane(dc->hwseq, true);
/* If taking control over from VBIOS, we may want to optimize our first
* mode set, so we need to skip powering down pipes until we know which
* pipes we want to use.
@ -647,8 +650,6 @@ void dcn30_init_hw(struct dc *dc)
REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
}
if (hws->funcs.enable_power_gating_plane)
hws->funcs.enable_power_gating_plane(dc->hwseq, true);
if (!dcb->funcs->is_accelerated_mode(dcb) && dc->res_pool->hubbub->funcs->init_watermarks)
dc->res_pool->hubbub->funcs->init_watermarks(dc->res_pool->hubbub);

View File

@ -60,6 +60,7 @@ static const struct hubbub_funcs hubbub301_funcs = {
.program_watermarks = hubbub3_program_watermarks,
.allow_self_refresh_control = hubbub1_allow_self_refresh_control,
.is_allow_self_refresh_enabled = hubbub1_is_allow_self_refresh_enabled,
.verify_allow_pstate_change_high = hubbub1_verify_allow_pstate_change_high,
.force_wm_propagate_to_pipes = hubbub3_force_wm_propagate_to_pipes,
.force_pstate_change_control = hubbub3_force_pstate_change_control,
.hubbub_read_state = hubbub2_read_state,

View File

@ -24,6 +24,7 @@
*/
#include <linux/delay.h>
#include "dcn30/dcn30_hubbub.h"
#include "dcn31_hubbub.h"
#include "dm_services.h"
@ -949,6 +950,65 @@ static void hubbub31_get_dchub_ref_freq(struct hubbub *hubbub,
}
}
static bool hubbub31_verify_allow_pstate_change_high(struct hubbub *hubbub)
{
struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
/*
* Pstate latency is ~20us so if we wait over 40us and pstate allow
* still not asserted, we are probably stuck and going to hang
*/
const unsigned int pstate_wait_timeout_us = 100;
const unsigned int pstate_wait_expected_timeout_us = 40;
static unsigned int max_sampled_pstate_wait_us; /* data collection */
static bool forced_pstate_allow; /* help with revert wa */
unsigned int debug_data = 0;
unsigned int i;
if (forced_pstate_allow) {
/* we hacked to force pstate allow to prevent hang last time
* we verify_allow_pstate_change_high. so disable force
* here so we can check status
*/
REG_UPDATE_2(DCHUBBUB_ARB_DRAM_STATE_CNTL,
DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_VALUE, 0,
DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_ENABLE, 0);
forced_pstate_allow = false;
}
REG_WRITE(DCHUBBUB_TEST_DEBUG_INDEX, hubbub2->debug_test_index_pstate);
for (i = 0; i < pstate_wait_timeout_us; i++) {
debug_data = REG_READ(DCHUBBUB_TEST_DEBUG_DATA);
/* Debug bit is specific to ASIC. */
if (debug_data & (1 << 26)) {
if (i > pstate_wait_expected_timeout_us)
DC_LOG_WARNING("pstate took longer than expected ~%dus\n", i);
return true;
}
if (max_sampled_pstate_wait_us < i)
max_sampled_pstate_wait_us = i;
udelay(1);
}
/* force pstate allow to prevent system hang
* and break to debugger to investigate
*/
REG_UPDATE_2(DCHUBBUB_ARB_DRAM_STATE_CNTL,
DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_VALUE, 1,
DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_ENABLE, 1);
forced_pstate_allow = true;
DC_LOG_WARNING("pstate TEST_DEBUG_DATA: 0x%X\n",
debug_data);
return false;
}
static const struct hubbub_funcs hubbub31_funcs = {
.update_dchub = hubbub2_update_dchub,
.init_dchub_sys_ctx = hubbub31_init_dchub_sys_ctx,
@ -961,6 +1021,7 @@ static const struct hubbub_funcs hubbub31_funcs = {
.program_watermarks = hubbub31_program_watermarks,
.allow_self_refresh_control = hubbub1_allow_self_refresh_control,
.is_allow_self_refresh_enabled = hubbub1_is_allow_self_refresh_enabled,
.verify_allow_pstate_change_high = hubbub31_verify_allow_pstate_change_high,
.program_det_size = dcn31_program_det_size,
.program_compbuf_size = dcn31_program_compbuf_size,
.init_crb = dcn31_init_crb,
@ -982,5 +1043,7 @@ void hubbub31_construct(struct dcn20_hubbub *hubbub31,
hubbub31->detile_buf_size = det_size_kb * 1024;
hubbub31->pixel_chunk_size = pixel_chunk_size_kb * 1024;
hubbub31->crb_size_segs = config_return_buffer_size_kb / DCN31_CRB_SEGMENT_SIZE_KB;
hubbub31->debug_test_index_pstate = 0x6;
}

View File

@ -204,6 +204,9 @@ void dcn31_init_hw(struct dc *dc)
}
}
if (hws->funcs.enable_power_gating_plane)
hws->funcs.enable_power_gating_plane(dc->hwseq, true);
/* If taking control over from VBIOS, we may want to optimize our first
* mode set, so we need to skip powering down pipes until we know which
* pipes we want to use.
@ -287,8 +290,6 @@ void dcn31_init_hw(struct dc *dc)
REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
}
if (hws->funcs.enable_power_gating_plane)
hws->funcs.enable_power_gating_plane(dc->hwseq, true);
if (!dcb->funcs->is_accelerated_mode(dcb) && dc->res_pool->hubbub->funcs->init_watermarks)
dc->res_pool->hubbub->funcs->init_watermarks(dc->res_pool->hubbub);

View File

@ -940,7 +940,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.max_downscale_src_width = 4096,/*upto true 4K*/
.disable_pplib_wm_range = false,
.scl_reset_length10 = true,
.sanity_checks = false,
.sanity_checks = true,
.underflow_assert_delay_us = 0xFFFFFFFF,
.dwb_fi_phase = -1, // -1 = disable,
.dmub_command_table = true,

View File

@ -154,6 +154,8 @@ struct hubbub_funcs {
bool (*is_allow_self_refresh_enabled)(struct hubbub *hubbub);
void (*allow_self_refresh_control)(struct hubbub *hubbub, bool allow);
bool (*verify_allow_pstate_change_high)(struct hubbub *hubbub);
void (*apply_DEDCN21_147_wa)(struct hubbub *hubbub);
void (*force_wm_propagate_to_pipes)(struct hubbub *hubbub);

View File

@ -100,7 +100,8 @@ enum vsc_packet_revision {
//PB7 = MD0
#define MASK_VTEM_MD0__VRR_EN 0x01
#define MASK_VTEM_MD0__M_CONST 0x02
#define MASK_VTEM_MD0__RESERVED2 0x0C
#define MASK_VTEM_MD0__QMS_EN 0x04
#define MASK_VTEM_MD0__RESERVED2 0x08
#define MASK_VTEM_MD0__FVA_FACTOR_M1 0xF0
//MD1
@ -109,7 +110,7 @@ enum vsc_packet_revision {
//MD2
#define MASK_VTEM_MD2__BASE_REFRESH_RATE_98 0x03
#define MASK_VTEM_MD2__RB 0x04
#define MASK_VTEM_MD2__RESERVED3 0xF8
#define MASK_VTEM_MD2__NEXT_TFR 0xF8
//MD3
#define MASK_VTEM_MD3__BASE_REFRESH_RATE_07 0xFF

View File

@ -66,7 +66,7 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
* mmap ioctl is disallowed for all discrete platforms,
* and for all platforms with GRAPHICS_VER > 12.
*/
if (IS_DGFX(i915) || GRAPHICS_VER(i915) > 12)
if (IS_DGFX(i915) || GRAPHICS_VER_FULL(i915) > IP_VER(12, 0))
return -EOPNOTSUPP;
if (args->flags & ~(I915_MMAP_WC))

View File

@ -1711,7 +1711,7 @@ a6xx_create_private_address_space(struct msm_gpu *gpu)
return ERR_CAST(mmu);
return msm_gem_address_space_create(mmu,
"gpu", 0x100000000ULL, 0x1ffffffffULL);
"gpu", 0x100000000ULL, SZ_4G);
}
static uint32_t a6xx_get_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)

View File

@ -551,6 +551,12 @@ static int dp_hpd_plug_handle(struct dp_display_private *dp, u32 data)
mutex_unlock(&dp->event_mutex);
/*
* add fail safe mode outside event_mutex scope
* to avoid potiential circular lock with drm thread
*/
dp_panel_add_fail_safe_mode(dp->dp_display.connector);
/* uevent will complete connection part */
return 0;
};

View File

@ -151,6 +151,15 @@ static int dp_panel_update_modes(struct drm_connector *connector,
return rc;
}
void dp_panel_add_fail_safe_mode(struct drm_connector *connector)
{
/* fail safe edid */
mutex_lock(&connector->dev->mode_config.mutex);
if (drm_add_modes_noedid(connector, 640, 480))
drm_set_preferred_mode(connector, 640, 480);
mutex_unlock(&connector->dev->mode_config.mutex);
}
int dp_panel_read_sink_caps(struct dp_panel *dp_panel,
struct drm_connector *connector)
{
@ -207,16 +216,7 @@ int dp_panel_read_sink_caps(struct dp_panel *dp_panel,
goto end;
}
/* fail safe edid */
mutex_lock(&connector->dev->mode_config.mutex);
if (drm_add_modes_noedid(connector, 640, 480))
drm_set_preferred_mode(connector, 640, 480);
mutex_unlock(&connector->dev->mode_config.mutex);
} else {
/* always add fail-safe mode as backup mode */
mutex_lock(&connector->dev->mode_config.mutex);
drm_add_modes_noedid(connector, 640, 480);
mutex_unlock(&connector->dev->mode_config.mutex);
dp_panel_add_fail_safe_mode(connector);
}
if (panel->aux_cfg_update_done) {

View File

@ -59,6 +59,7 @@ int dp_panel_init_panel_info(struct dp_panel *dp_panel);
int dp_panel_deinit(struct dp_panel *dp_panel);
int dp_panel_timing_cfg(struct dp_panel *dp_panel);
void dp_panel_dump_regs(struct dp_panel *dp_panel);
void dp_panel_add_fail_safe_mode(struct drm_connector *connector);
int dp_panel_read_sink_caps(struct dp_panel *dp_panel,
struct drm_connector *connector);
u32 dp_panel_get_mode_bpp(struct dp_panel *dp_panel, u32 mode_max_bpp,

View File

@ -643,7 +643,7 @@ struct drm_connector *msm_dsi_manager_connector_init(u8 id)
return connector;
fail:
connector->funcs->destroy(msm_dsi->connector);
connector->funcs->destroy(connector);
return ERR_PTR(ret);
}

View File

@ -937,6 +937,7 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m,
get_pid_task(aspace->pid, PIDTYPE_PID);
if (task) {
comm = kstrdup(task->comm, GFP_KERNEL);
put_task_struct(task);
} else {
comm = NULL;
}

View File

@ -447,8 +447,9 @@ static void ipu_di_config_clock(struct ipu_di *di,
error = rate / (sig->mode.pixelclock / 1000);
dev_dbg(di->ipu->dev, " IPU clock can give %lu with divider %u, error %d.%u%%\n",
rate, div, (signed)(error - 1000) / 10, error % 10);
dev_dbg(di->ipu->dev, " IPU clock can give %lu with divider %u, error %c%d.%d%%\n",
rate, div, error < 1000 ? '-' : '+',
abs(error - 1000) / 10, abs(error - 1000) % 10);
/* Allow a 1% error */
if (error < 1010 && error >= 990) {

View File

@ -1653,6 +1653,38 @@ static void disable_page_reporting(void)
}
}
static int ballooning_enabled(void)
{
/*
* Disable ballooning if the page size is not 4k (HV_HYP_PAGE_SIZE),
* since currently it's unclear to us whether an unballoon request can
* make sure all page ranges are guest page size aligned.
*/
if (PAGE_SIZE != HV_HYP_PAGE_SIZE) {
pr_info("Ballooning disabled because page size is not 4096 bytes\n");
return 0;
}
return 1;
}
static int hot_add_enabled(void)
{
/*
* Disable hot add on ARM64, because we currently rely on
* memory_add_physaddr_to_nid() to get a node id of a hot add range,
* however ARM64's memory_add_physaddr_to_nid() always return 0 and
* DM_MEM_HOT_ADD_REQUEST doesn't have the NUMA node information for
* add_memory().
*/
if (IS_ENABLED(CONFIG_ARM64)) {
pr_info("Memory hot add disabled on ARM64\n");
return 0;
}
return 1;
}
static int balloon_connect_vsp(struct hv_device *dev)
{
struct dm_version_request version_req;
@ -1724,8 +1756,8 @@ static int balloon_connect_vsp(struct hv_device *dev)
* currently still requires the bits to be set, so we have to add code
* to fail the host's hot-add and balloon up/down requests, if any.
*/
cap_msg.caps.cap_bits.balloon = 1;
cap_msg.caps.cap_bits.hot_add = 1;
cap_msg.caps.cap_bits.balloon = ballooning_enabled();
cap_msg.caps.cap_bits.hot_add = hot_add_enabled();
/*
* Specify our alignment requirements as it relates

View File

@ -408,7 +408,16 @@ int hv_ringbuffer_read(struct vmbus_channel *channel,
static u32 hv_pkt_iter_avail(const struct hv_ring_buffer_info *rbi)
{
u32 priv_read_loc = rbi->priv_read_index;
u32 write_loc = READ_ONCE(rbi->ring_buffer->write_index);
u32 write_loc;
/*
* The Hyper-V host writes the packet data, then uses
* store_release() to update the write_index. Use load_acquire()
* here to prevent loads of the packet data from being re-ordered
* before the read of the write_index and potentially getting
* stale data.
*/
write_loc = virt_load_acquire(&rbi->ring_buffer->write_index);
if (write_loc >= priv_read_loc)
return write_loc - priv_read_loc;

View File

@ -76,8 +76,8 @@ static int hyperv_panic_event(struct notifier_block *nb, unsigned long val,
/*
* Hyper-V should be notified only once about a panic. If we will be
* doing hyperv_report_panic_msg() later with kmsg data, don't do
* the notification here.
* doing hv_kmsg_dump() with kmsg data later, don't do the notification
* here.
*/
if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE
&& hyperv_report_reg()) {
@ -99,8 +99,8 @@ static int hyperv_die_event(struct notifier_block *nb, unsigned long val,
/*
* Hyper-V should be notified only once about a panic. If we will be
* doing hyperv_report_panic_msg() later with kmsg data, don't do
* the notification here.
* doing hv_kmsg_dump() with kmsg data later, don't do the notification
* here.
*/
if (hyperv_report_reg())
hyperv_report_panic(regs, val, true);
@ -1545,14 +1545,20 @@ static int vmbus_bus_init(void)
if (ret)
goto err_connect;
if (hv_is_isolation_supported())
sysctl_record_panic_msg = 0;
/*
* Only register if the crash MSRs are available
*/
if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE) {
u64 hyperv_crash_ctl;
/*
* Sysctl registration is not fatal, since by default
* reporting is enabled.
* Panic message recording (sysctl_record_panic_msg)
* is enabled by default in non-isolated guests and
* disabled by default in isolated guests; the panic
* message recording won't be available in isolated
* guests should the following registration fail.
*/
hv_ctl_table_hdr = register_sysctl_table(hv_root_table);
if (!hv_ctl_table_hdr)

View File

@ -137,6 +137,12 @@ static int pasemi_i2c_xfer_msg(struct i2c_adapter *adapter,
TXFIFO_WR(smbus, msg->buf[msg->len-1] |
(stop ? MTXFIFO_STOP : 0));
if (stop) {
err = pasemi_smb_waitready(smbus);
if (err)
goto reset_out;
}
}
return 0;

View File

@ -668,16 +668,21 @@ static int i2cdev_attach_adapter(struct device *dev, void *dummy)
i2c_dev->dev.class = i2c_dev_class;
i2c_dev->dev.parent = &adap->dev;
i2c_dev->dev.release = i2cdev_dev_release;
dev_set_name(&i2c_dev->dev, "i2c-%d", adap->nr);
res = dev_set_name(&i2c_dev->dev, "i2c-%d", adap->nr);
if (res)
goto err_put_i2c_dev;
res = cdev_device_add(&i2c_dev->cdev, &i2c_dev->dev);
if (res) {
put_i2c_dev(i2c_dev, false);
return res;
}
if (res)
goto err_put_i2c_dev;
pr_debug("adapter [%s] registered as minor %d\n", adap->name, adap->nr);
return 0;
err_put_i2c_dev:
put_i2c_dev(i2c_dev, false);
return res;
}
static int i2cdev_detach_adapter(struct device *dev, void *dummy)

View File

@ -4383,6 +4383,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
}
if (ic->internal_hash) {
size_t recalc_tags_size;
ic->recalc_wq = alloc_workqueue("dm-integrity-recalc", WQ_MEM_RECLAIM, 1);
if (!ic->recalc_wq ) {
ti->error = "Cannot allocate workqueue";
@ -4396,8 +4397,10 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
r = -ENOMEM;
goto bad;
}
ic->recalc_tags = kvmalloc_array(RECALC_SECTORS >> ic->sb->log2_sectors_per_block,
ic->tag_size, GFP_KERNEL);
recalc_tags_size = (RECALC_SECTORS >> ic->sb->log2_sectors_per_block) * ic->tag_size;
if (crypto_shash_digestsize(ic->internal_hash) > ic->tag_size)
recalc_tags_size += crypto_shash_digestsize(ic->internal_hash) - ic->tag_size;
ic->recalc_tags = kvmalloc(recalc_tags_size, GFP_KERNEL);
if (!ic->recalc_tags) {
ti->error = "Cannot allocate tags for recalculating";
r = -ENOMEM;

View File

@ -432,7 +432,7 @@ static struct dm_path *hst_select_path(struct path_selector *ps,
{
struct selector *s = ps->context;
struct path_info *pi = NULL, *best = NULL;
u64 time_now = sched_clock();
u64 time_now = ktime_get_ns();
struct dm_path *ret = NULL;
unsigned long flags;
@ -473,7 +473,7 @@ static int hst_start_io(struct path_selector *ps, struct dm_path *path,
static u64 path_service_time(struct path_info *pi, u64 start_time)
{
u64 sched_now = ktime_get_ns();
u64 now = ktime_get_ns();
/* if a previous disk request has finished after this IO was
* sent to the hardware, pretend the submission happened
@ -482,11 +482,11 @@ static u64 path_service_time(struct path_info *pi, u64 start_time)
if (time_after64(pi->last_finish, start_time))
start_time = pi->last_finish;
pi->last_finish = sched_now;
if (time_before64(sched_now, start_time))
pi->last_finish = now;
if (time_before64(now, start_time))
return 0;
return sched_now - start_time;
return now - start_time;
}
static int hst_end_io(struct path_selector *ps, struct dm_path *path,

View File

@ -895,7 +895,7 @@ static int rga_probe(struct platform_device *pdev)
}
rga->dst_mmu_pages =
(unsigned int *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 3);
if (rga->dst_mmu_pages) {
if (!rga->dst_mmu_pages) {
ret = -ENOMEM;
goto free_src_pages;
}

View File

@ -544,20 +544,27 @@ static int atmel_ebi_probe(struct platform_device *pdev)
smc_np = of_parse_phandle(dev->of_node, "atmel,smc", 0);
ebi->smc.regmap = syscon_node_to_regmap(smc_np);
if (IS_ERR(ebi->smc.regmap))
return PTR_ERR(ebi->smc.regmap);
if (IS_ERR(ebi->smc.regmap)) {
ret = PTR_ERR(ebi->smc.regmap);
goto put_node;
}
ebi->smc.layout = atmel_hsmc_get_reg_layout(smc_np);
if (IS_ERR(ebi->smc.layout))
return PTR_ERR(ebi->smc.layout);
if (IS_ERR(ebi->smc.layout)) {
ret = PTR_ERR(ebi->smc.layout);
goto put_node;
}
ebi->smc.clk = of_clk_get(smc_np, 0);
if (IS_ERR(ebi->smc.clk)) {
if (PTR_ERR(ebi->smc.clk) != -ENOENT)
return PTR_ERR(ebi->smc.clk);
if (PTR_ERR(ebi->smc.clk) != -ENOENT) {
ret = PTR_ERR(ebi->smc.clk);
goto put_node;
}
ebi->smc.clk = NULL;
}
of_node_put(smc_np);
ret = clk_prepare_enable(ebi->smc.clk);
if (ret)
return ret;
@ -608,6 +615,10 @@ static int atmel_ebi_probe(struct platform_device *pdev)
}
return of_platform_populate(np, NULL, NULL, dev);
put_node:
of_node_put(smc_np);
return ret;
}
static __maybe_unused int atmel_ebi_resume(struct device *dev)

View File

@ -579,6 +579,7 @@ static int rpcif_probe(struct platform_device *pdev)
struct platform_device *vdev;
struct device_node *flash;
const char *name;
int ret;
flash = of_get_next_child(pdev->dev.of_node, NULL);
if (!flash) {
@ -602,7 +603,14 @@ static int rpcif_probe(struct platform_device *pdev)
return -ENOMEM;
vdev->dev.parent = &pdev->dev;
platform_set_drvdata(pdev, vdev);
return platform_device_add(vdev);
ret = platform_device_add(vdev);
if (ret) {
platform_device_put(vdev);
return ret;
}
return 0;
}
static int rpcif_remove(struct platform_device *pdev)

View File

@ -1455,7 +1455,7 @@ static int felix_pci_probe(struct pci_dev *pdev,
err = dsa_register_switch(ds);
if (err) {
dev_err(&pdev->dev, "Failed to register DSA switch: %d\n", err);
dev_err_probe(&pdev->dev, err, "Failed to register DSA switch\n");
goto err_register_ds;
}

View File

@ -76,7 +76,7 @@ static inline void bcmgenet_writel(u32 value, void __iomem *offset)
if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
__raw_writel(value, offset);
else
writel(value, offset);
writel_relaxed(value, offset);
}
static inline u32 bcmgenet_readl(void __iomem *offset)
@ -84,7 +84,7 @@ static inline u32 bcmgenet_readl(void __iomem *offset)
if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
return __raw_readl(offset);
else
return readl(offset);
return readl_relaxed(offset);
}
static inline void dmadesc_set_length_status(struct bcmgenet_priv *priv,

View File

@ -1817,11 +1817,6 @@ static int ftgmac100_probe(struct platform_device *pdev)
priv->rxdes0_edorr_mask = BIT(30);
priv->txdes0_edotr_mask = BIT(30);
priv->is_aspeed = true;
/* Disable ast2600 problematic HW arbitration */
if (of_device_is_compatible(np, "aspeed,ast2600-mac")) {
iowrite32(FTGMAC100_TM_DEFAULT,
priv->base + FTGMAC100_OFFSET_TM);
}
} else {
priv->rxdes0_edorr_mask = BIT(15);
priv->txdes0_edotr_mask = BIT(15);
@ -1893,6 +1888,11 @@ static int ftgmac100_probe(struct platform_device *pdev)
err = ftgmac100_setup_clk(priv);
if (err)
goto err_phy_connect;
/* Disable ast2600 problematic HW arbitration */
if (of_device_is_compatible(np, "aspeed,ast2600-mac"))
iowrite32(FTGMAC100_TM_DEFAULT,
priv->base + FTGMAC100_OFFSET_TM);
}
/* Default ring sizes */

View File

@ -650,6 +650,7 @@ static int mlxsw_i2c_probe(struct i2c_client *client,
return 0;
errout:
mutex_destroy(&mlxsw_i2c->cmd.lock);
i2c_set_clientdata(client, NULL);
return err;

View File

@ -39,6 +39,7 @@ config KS8851
config KS8851_MLL
tristate "Micrel KS8851 MLL"
depends on HAS_IOMEM
depends on PTP_1588_CLOCK_OPTIONAL
select MII
select CRC32
select EEPROM_93CX6

View File

@ -2900,11 +2900,9 @@ static netdev_tx_t myri10ge_sw_tso(struct sk_buff *skb,
status = myri10ge_xmit(curr, dev);
if (status != 0) {
dev_kfree_skb_any(curr);
if (segs != NULL) {
curr = segs;
segs = next;
skb_list_walk_safe(next, curr, next) {
curr->next = NULL;
dev_kfree_skb_any(segs);
dev_kfree_skb_any(curr);
}
goto drop;
}

View File

@ -57,10 +57,6 @@
#define TSE_PCS_USE_SGMII_ENA BIT(0)
#define TSE_PCS_IF_USE_SGMII 0x03
#define SGMII_ADAPTER_CTRL_REG 0x00
#define SGMII_ADAPTER_DISABLE 0x0001
#define SGMII_ADAPTER_ENABLE 0x0000
#define AUTONEGO_LINK_TIMER 20
static int tse_pcs_reset(void __iomem *base, struct tse_pcs *pcs)
@ -202,12 +198,8 @@ void tse_pcs_fix_mac_speed(struct tse_pcs *pcs, struct phy_device *phy_dev,
unsigned int speed)
{
void __iomem *tse_pcs_base = pcs->tse_pcs_base;
void __iomem *sgmii_adapter_base = pcs->sgmii_adapter_base;
u32 val;
writew(SGMII_ADAPTER_ENABLE,
sgmii_adapter_base + SGMII_ADAPTER_CTRL_REG);
pcs->autoneg = phy_dev->autoneg;
if (phy_dev->autoneg == AUTONEG_ENABLE) {

View File

@ -10,6 +10,10 @@
#include <linux/phy.h>
#include <linux/timer.h>
#define SGMII_ADAPTER_CTRL_REG 0x00
#define SGMII_ADAPTER_ENABLE 0x0000
#define SGMII_ADAPTER_DISABLE 0x0001
struct tse_pcs {
struct device *dev;
void __iomem *tse_pcs_base;

View File

@ -18,9 +18,6 @@
#include "altr_tse_pcs.h"
#define SGMII_ADAPTER_CTRL_REG 0x00
#define SGMII_ADAPTER_DISABLE 0x0001
#define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_GMII_MII 0x0
#define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_RGMII 0x1
#define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_RMII 0x2
@ -62,16 +59,14 @@ static void socfpga_dwmac_fix_mac_speed(void *priv, unsigned int speed)
{
struct socfpga_dwmac *dwmac = (struct socfpga_dwmac *)priv;
void __iomem *splitter_base = dwmac->splitter_base;
void __iomem *tse_pcs_base = dwmac->pcs.tse_pcs_base;
void __iomem *sgmii_adapter_base = dwmac->pcs.sgmii_adapter_base;
struct device *dev = dwmac->dev;
struct net_device *ndev = dev_get_drvdata(dev);
struct phy_device *phy_dev = ndev->phydev;
u32 val;
if ((tse_pcs_base) && (sgmii_adapter_base))
writew(SGMII_ADAPTER_DISABLE,
sgmii_adapter_base + SGMII_ADAPTER_CTRL_REG);
writew(SGMII_ADAPTER_DISABLE,
sgmii_adapter_base + SGMII_ADAPTER_CTRL_REG);
if (splitter_base) {
val = readl(splitter_base + EMAC_SPLITTER_CTRL_REG);
@ -93,7 +88,9 @@ static void socfpga_dwmac_fix_mac_speed(void *priv, unsigned int speed)
writel(val, splitter_base + EMAC_SPLITTER_CTRL_REG);
}
if (tse_pcs_base && sgmii_adapter_base)
writew(SGMII_ADAPTER_ENABLE,
sgmii_adapter_base + SGMII_ADAPTER_CTRL_REG);
if (phy_dev)
tse_pcs_fix_mac_speed(&dwmac->pcs, phy_dev, speed);
}

View File

@ -2127,15 +2127,14 @@ static int axienet_probe(struct platform_device *pdev)
if (ret)
goto cleanup_clk;
lp->phy_node = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
if (lp->phy_node) {
ret = axienet_mdio_setup(lp);
if (ret)
dev_warn(&pdev->dev,
"error registering MDIO bus: %d\n", ret);
}
ret = axienet_mdio_setup(lp);
if (ret)
dev_warn(&pdev->dev,
"error registering MDIO bus: %d\n", ret);
if (lp->phy_mode == PHY_INTERFACE_MODE_SGMII ||
lp->phy_mode == PHY_INTERFACE_MODE_1000BASEX) {
lp->phy_node = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
if (!lp->phy_node) {
dev_err(&pdev->dev, "phy-handle required for 1000BaseX/SGMII\n");
ret = -EINVAL;

View File

@ -306,7 +306,6 @@ static void sp_setup(struct net_device *dev)
{
/* Finish setting up the DEVICE info. */
dev->netdev_ops = &sp_netdev_ops;
dev->needs_free_netdev = true;
dev->mtu = SIXP_MTU;
dev->hard_header_len = AX25_MAX_HEADER_LEN;
dev->header_ops = &ax25_header_ops;
@ -674,9 +673,11 @@ static void sixpack_close(struct tty_struct *tty)
del_timer_sync(&sp->tx_t);
del_timer_sync(&sp->resync_t);
/* Free all 6pack frame buffers. */
/* Free all 6pack frame buffers after unreg. */
kfree(sp->rbuff);
kfree(sp->xbuff);
free_netdev(sp->dev);
}
/* Perform I/O control on an active 6pack channel. */

View File

@ -4,6 +4,7 @@ config QCOM_IPA
depends on ARCH_QCOM || COMPILE_TEST
depends on INTERCONNECT
depends on QCOM_RPROC_COMMON || (QCOM_RPROC_COMMON=n && COMPILE_TEST)
depends on QCOM_AOSS_QMP || QCOM_AOSS_QMP=n
select QCOM_MDT_LOADER if ARCH_QCOM
select QCOM_SCM
select QCOM_QMI_HELPERS

View File

@ -11,6 +11,8 @@
#include <linux/pm_runtime.h>
#include <linux/bitops.h>
#include "linux/soc/qcom/qcom_aoss.h"
#include "ipa.h"
#include "ipa_power.h"
#include "ipa_endpoint.h"
@ -64,6 +66,7 @@ enum ipa_power_flag {
* struct ipa_power - IPA power management information
* @dev: IPA device pointer
* @core: IPA core clock
* @qmp: QMP handle for AOSS communication
* @spinlock: Protects modem TX queue enable/disable
* @flags: Boolean state flags
* @interconnect_count: Number of elements in interconnect[]
@ -72,6 +75,7 @@ enum ipa_power_flag {
struct ipa_power {
struct device *dev;
struct clk *core;
struct qmp *qmp;
spinlock_t spinlock; /* used with STOPPED/STARTED power flags */
DECLARE_BITMAP(flags, IPA_POWER_FLAG_COUNT);
u32 interconnect_count;
@ -382,6 +386,47 @@ void ipa_power_modem_queue_active(struct ipa *ipa)
clear_bit(IPA_POWER_FLAG_STARTED, ipa->power->flags);
}
static int ipa_power_retention_init(struct ipa_power *power)
{
struct qmp *qmp = qmp_get(power->dev);
if (IS_ERR(qmp)) {
if (PTR_ERR(qmp) == -EPROBE_DEFER)
return -EPROBE_DEFER;
/* We assume any other error means it's not defined/needed */
qmp = NULL;
}
power->qmp = qmp;
return 0;
}
static void ipa_power_retention_exit(struct ipa_power *power)
{
qmp_put(power->qmp);
power->qmp = NULL;
}
/* Control register retention on power collapse */
void ipa_power_retention(struct ipa *ipa, bool enable)
{
static const char fmt[] = "{ class: bcm, res: ipa_pc, val: %c }";
struct ipa_power *power = ipa->power;
char buf[36]; /* Exactly enough for fmt[]; size a multiple of 4 */
int ret;
if (!power->qmp)
return; /* Not needed on this platform */
(void)snprintf(buf, sizeof(buf), fmt, enable ? '1' : '0');
ret = qmp_send(power->qmp, buf, sizeof(buf));
if (ret)
dev_err(power->dev, "error %d sending QMP %sable request\n",
ret, enable ? "en" : "dis");
}
int ipa_power_setup(struct ipa *ipa)
{
int ret;
@ -438,12 +483,18 @@ ipa_power_init(struct device *dev, const struct ipa_power_data *data)
if (ret)
goto err_kfree;
ret = ipa_power_retention_init(power);
if (ret)
goto err_interconnect_exit;
pm_runtime_set_autosuspend_delay(dev, IPA_AUTOSUSPEND_DELAY);
pm_runtime_use_autosuspend(dev);
pm_runtime_enable(dev);
return power;
err_interconnect_exit:
ipa_interconnect_exit(power);
err_kfree:
kfree(power);
err_clk_put:
@ -460,6 +511,7 @@ void ipa_power_exit(struct ipa_power *power)
pm_runtime_disable(dev);
pm_runtime_dont_use_autosuspend(dev);
ipa_power_retention_exit(power);
ipa_interconnect_exit(power);
kfree(power);
clk_put(clk);

View File

@ -40,6 +40,13 @@ void ipa_power_modem_queue_wake(struct ipa *ipa);
*/
void ipa_power_modem_queue_active(struct ipa *ipa);
/**
* ipa_power_retention() - Control register retention on power collapse
* @ipa: IPA pointer
* @enable: Whether retention should be enabled or disabled
*/
void ipa_power_retention(struct ipa *ipa, bool enable);
/**
* ipa_power_setup() - Set up IPA power management
* @ipa: IPA pointer

View File

@ -11,6 +11,7 @@
#include "ipa.h"
#include "ipa_uc.h"
#include "ipa_power.h"
/**
* DOC: The IPA embedded microcontroller
@ -154,6 +155,7 @@ static void ipa_uc_response_hdlr(struct ipa *ipa, enum ipa_irq_id irq_id)
case IPA_UC_RESPONSE_INIT_COMPLETED:
if (ipa->uc_powered) {
ipa->uc_loaded = true;
ipa_power_retention(ipa, true);
pm_runtime_mark_last_busy(dev);
(void)pm_runtime_put_autosuspend(dev);
ipa->uc_powered = false;
@ -184,6 +186,9 @@ void ipa_uc_deconfig(struct ipa *ipa)
ipa_interrupt_remove(ipa->interrupt, IPA_IRQ_UC_1);
ipa_interrupt_remove(ipa->interrupt, IPA_IRQ_UC_0);
if (ipa->uc_loaded)
ipa_power_retention(ipa, false);
if (!ipa->uc_powered)
return;

View File

@ -460,8 +460,10 @@ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb)
return RX_HANDLER_CONSUMED;
*pskb = skb;
eth = eth_hdr(skb);
if (macvlan_forward_source(skb, port, eth->h_source))
if (macvlan_forward_source(skb, port, eth->h_source)) {
kfree_skb(skb);
return RX_HANDLER_CONSUMED;
}
src = macvlan_hash_lookup(port, eth->h_source);
if (src && src->mode != MACVLAN_MODE_VEPA &&
src->mode != MACVLAN_MODE_BRIDGE) {
@ -480,8 +482,10 @@ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb)
return RX_HANDLER_PASS;
}
if (macvlan_forward_source(skb, port, eth->h_source))
if (macvlan_forward_source(skb, port, eth->h_source)) {
kfree_skb(skb);
return RX_HANDLER_CONSUMED;
}
if (macvlan_passthru(port))
vlan = list_first_or_null_rcu(&port->vlans,
struct macvlan_dev, list);

View File

@ -43,6 +43,11 @@ int fwnode_mdiobus_phy_device_register(struct mii_bus *mdio,
int rc;
rc = fwnode_irq_get(child, 0);
/* Don't wait forever if the IRQ provider doesn't become available,
* just fall back to poll mode
*/
if (rc == -EPROBE_DEFER)
rc = driver_deferred_probe_check_state(&phy->mdio.dev);
if (rc == -EPROBE_DEFER)
return rc;

View File

@ -469,7 +469,7 @@ static void sl_tx_timeout(struct net_device *dev, unsigned int txqueue)
spin_lock(&sl->lock);
if (netif_queue_stopped(dev)) {
if (!netif_running(dev))
if (!netif_running(dev) || !sl->tty)
goto out;
/* May be we must check transmitter timeout here ?

View File

@ -1102,10 +1102,15 @@ static int aqc111_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
if (start_of_descs != desc_offset)
goto err;
/* self check desc_offset from header*/
if (desc_offset >= skb_len)
/* self check desc_offset from header and make sure that the
* bounds of the metadata array are inside the SKB
*/
if (pkt_count * 2 + desc_offset >= skb_len)
goto err;
/* Packets must not overlap the metadata array */
skb_trim(skb, desc_offset);
if (pkt_count == 0)
goto err;

View File

@ -327,7 +327,7 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
rcu_read_lock();
rcv = rcu_dereference(priv->peer);
if (unlikely(!rcv)) {
if (unlikely(!rcv) || !pskb_may_pull(skb, ETH_HLEN)) {
kfree_skb(skb);
goto drop;
}

View File

@ -2156,6 +2156,19 @@ static void ath11k_mac_op_bss_info_changed(struct ieee80211_hw *hw,
if (ret)
ath11k_warn(ar->ab, "failed to update bcn template: %d\n",
ret);
if (vif->bss_conf.he_support) {
ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
WMI_VDEV_PARAM_BA_MODE,
WMI_BA_MODE_BUFFER_SIZE_256);
if (ret)
ath11k_warn(ar->ab,
"failed to set BA BUFFER SIZE 256 for vdev: %d\n",
arvif->vdev_id);
else
ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
"Set BA BUFFER SIZE 256 for VDEV: %d\n",
arvif->vdev_id);
}
}
if (changed & (BSS_CHANGED_BEACON_INFO | BSS_CHANGED_BEACON)) {
@ -2191,14 +2204,6 @@ static void ath11k_mac_op_bss_info_changed(struct ieee80211_hw *hw,
if (arvif->is_up && vif->bss_conf.he_support &&
vif->bss_conf.he_oper.params) {
ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
WMI_VDEV_PARAM_BA_MODE,
WMI_BA_MODE_BUFFER_SIZE_256);
if (ret)
ath11k_warn(ar->ab,
"failed to set BA BUFFER SIZE 256 for vdev: %d\n",
arvif->vdev_id);
param_id = WMI_VDEV_PARAM_HEOPS_0_31;
param_value = vif->bss_conf.he_oper.params;
ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,

View File

@ -839,7 +839,7 @@ static bool ath9k_txq_list_has_key(struct list_head *txq_list, u32 keyix)
continue;
txinfo = IEEE80211_SKB_CB(bf->bf_mpdu);
fi = (struct ath_frame_info *)&txinfo->rate_driver_data[0];
fi = (struct ath_frame_info *)&txinfo->status.status_driver_data[0];
if (fi->keyix == keyix)
return true;
}

View File

@ -141,8 +141,8 @@ static struct ath_frame_info *get_frame_info(struct sk_buff *skb)
{
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
BUILD_BUG_ON(sizeof(struct ath_frame_info) >
sizeof(tx_info->rate_driver_data));
return (struct ath_frame_info *) &tx_info->rate_driver_data[0];
sizeof(tx_info->status.status_driver_data));
return (struct ath_frame_info *) &tx_info->status.status_driver_data[0];
}
static void ath_send_bar(struct ath_atx_tid *tid, u16 seqno)
@ -2501,6 +2501,16 @@ static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
spin_unlock_irqrestore(&sc->tx.txbuflock, flags);
}
static void ath_clear_tx_status(struct ieee80211_tx_info *tx_info)
{
void *ptr = &tx_info->status;
memset(ptr + sizeof(tx_info->status.rates), 0,
sizeof(tx_info->status) -
sizeof(tx_info->status.rates) -
sizeof(tx_info->status.status_driver_data));
}
static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
struct ath_tx_status *ts, int nframes, int nbad,
int txok)
@ -2512,6 +2522,8 @@ static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
struct ath_hw *ah = sc->sc_ah;
u8 i, tx_rateindex;
ath_clear_tx_status(tx_info);
if (txok)
tx_info->status.ack_signal = ts->ts_rssi;
@ -2526,6 +2538,13 @@ static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
tx_info->status.ampdu_len = nframes;
tx_info->status.ampdu_ack_len = nframes - nbad;
tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
for (i = tx_rateindex + 1; i < hw->max_rates; i++) {
tx_info->status.rates[i].count = 0;
tx_info->status.rates[i].idx = -1;
}
if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 &&
(tx_info->flags & IEEE80211_TX_CTL_NO_ACK) == 0) {
/*
@ -2547,16 +2566,6 @@ static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
tx_info->status.rates[tx_rateindex].count =
hw->max_rate_tries;
}
for (i = tx_rateindex + 1; i < hw->max_rates; i++) {
tx_info->status.rates[i].count = 0;
tx_info->status.rates[i].idx = -1;
}
tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
/* we report airtime in ath_tx_count_airtime(), don't report twice */
tx_info->status.tx_time = 0;
}
static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)

View File

@ -3148,6 +3148,15 @@ static int hv_pci_probe(struct hv_device *hdev,
hbus->bridge->domain_nr = dom;
#ifdef CONFIG_X86
hbus->sysdata.domain = dom;
#elif defined(CONFIG_ARM64)
/*
* Set the PCI bus parent to be the corresponding VMbus
* device. Then the VMbus device will be assigned as the
* ACPI companion in pcibios_root_bridge_prepare() and
* pci_dma_configure() will propagate device coherence
* information to devices created on the bus.
*/
hbus->sysdata.parent = hdev->device.parent;
#endif
hbus->hdev = hdev;

View File

@ -29,7 +29,7 @@
#define CNTL_OVER_MASK 0xFFFFFFFE
#define CNTL_CSV_SHIFT 24
#define CNTL_CSV_MASK (0xFF << CNTL_CSV_SHIFT)
#define CNTL_CSV_MASK (0xFFU << CNTL_CSV_SHIFT)
#define EVENT_CYCLES_ID 0
#define EVENT_CYCLES_COUNTER 0

View File

@ -71,6 +71,35 @@ static const struct regulator_ops wm8994_ldo2_ops = {
};
static const struct regulator_desc wm8994_ldo_desc[] = {
{
.name = "LDO1",
.id = 1,
.type = REGULATOR_VOLTAGE,
.n_voltages = WM8994_LDO1_MAX_SELECTOR + 1,
.vsel_reg = WM8994_LDO_1,
.vsel_mask = WM8994_LDO1_VSEL_MASK,
.ops = &wm8994_ldo1_ops,
.min_uV = 2400000,
.uV_step = 100000,
.enable_time = 3000,
.off_on_delay = 36000,
.owner = THIS_MODULE,
},
{
.name = "LDO2",
.id = 2,
.type = REGULATOR_VOLTAGE,
.n_voltages = WM8994_LDO2_MAX_SELECTOR + 1,
.vsel_reg = WM8994_LDO_2,
.vsel_mask = WM8994_LDO2_VSEL_MASK,
.ops = &wm8994_ldo2_ops,
.enable_time = 3000,
.off_on_delay = 36000,
.owner = THIS_MODULE,
},
};
static const struct regulator_desc wm8958_ldo_desc[] = {
{
.name = "LDO1",
.id = 1,
@ -172,9 +201,16 @@ static int wm8994_ldo_probe(struct platform_device *pdev)
* regulator core and we need not worry about it on the
* error path.
*/
ldo->regulator = devm_regulator_register(&pdev->dev,
&wm8994_ldo_desc[id],
&config);
if (ldo->wm8994->type == WM8994) {
ldo->regulator = devm_regulator_register(&pdev->dev,
&wm8994_ldo_desc[id],
&config);
} else {
ldo->regulator = devm_regulator_register(&pdev->dev,
&wm8958_ldo_desc[id],
&config);
}
if (IS_ERR(ldo->regulator)) {
ret = PTR_ERR(ldo->regulator);
dev_err(wm8994->dev, "Failed to register LDO%d: %d\n",

View File

@ -36,7 +36,7 @@
#define IBMVSCSIS_VERSION "v0.2"
#define INITIAL_SRP_LIMIT 800
#define INITIAL_SRP_LIMIT 1024
#define DEFAULT_MAX_SECTORS 256
#define MAX_TXU 1024 * 1024

View File

@ -15105,6 +15105,8 @@ lpfc_io_slot_reset_s4(struct pci_dev *pdev)
psli->sli_flag &= ~LPFC_SLI_ACTIVE;
spin_unlock_irq(&phba->hbalock);
/* Init cpu_map array */
lpfc_cpu_map_array_init(phba);
/* Configure and enable interrupt */
intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
if (intr_mode == LPFC_INTR_ERROR) {

View File

@ -2558,6 +2558,9 @@ struct megasas_instance_template {
#define MEGASAS_IS_LOGICAL(sdev) \
((sdev->channel < MEGASAS_MAX_PD_CHANNELS) ? 0 : 1)
#define MEGASAS_IS_LUN_VALID(sdev) \
(((sdev)->lun == 0) ? 1 : 0)
#define MEGASAS_DEV_INDEX(scp) \
(((scp->device->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL) + \
scp->device->id)

View File

@ -2126,6 +2126,9 @@ static int megasas_slave_alloc(struct scsi_device *sdev)
goto scan_target;
}
return -ENXIO;
} else if (!MEGASAS_IS_LUN_VALID(sdev)) {
sdev_printk(KERN_INFO, sdev, "%s: invalid LUN\n", __func__);
return -ENXIO;
}
scan_target:
@ -2156,6 +2159,10 @@ static void megasas_slave_destroy(struct scsi_device *sdev)
instance = megasas_lookup_instance(sdev->host->host_no);
if (MEGASAS_IS_LOGICAL(sdev)) {
if (!MEGASAS_IS_LUN_VALID(sdev)) {
sdev_printk(KERN_INFO, sdev, "%s: invalid LUN\n", __func__);
return;
}
ld_tgt_id = MEGASAS_TARGET_ID(sdev);
instance->ld_tgtid_status[ld_tgt_id] = LD_TARGET_ID_DELETED;
if (megasas_dbg_lvl & LD_PD_DEBUG)

View File

@ -394,10 +394,13 @@ _config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
retry_count++;
if (ioc->config_cmds.smid == smid)
mpt3sas_base_free_smid(ioc, smid);
if ((ioc->shost_recovery) || (ioc->config_cmds.status &
MPT3_CMD_RESET) || ioc->pci_error_recovery)
if (ioc->config_cmds.status & MPT3_CMD_RESET)
goto retry_config;
issue_host_reset = 1;
if (ioc->shost_recovery || ioc->pci_error_recovery) {
issue_host_reset = 0;
r = -EFAULT;
} else
issue_host_reset = 1;
goto free_mem;
}

View File

@ -646,6 +646,7 @@ static struct pci_device_id mvs_pci_table[] = {
{ PCI_VDEVICE(ARECA, PCI_DEVICE_ID_ARECA_1300), chip_1300 },
{ PCI_VDEVICE(ARECA, PCI_DEVICE_ID_ARECA_1320), chip_1320 },
{ PCI_VDEVICE(ADAPTEC2, 0x0450), chip_6440 },
{ PCI_VDEVICE(TTI, 0x2640), chip_6440 },
{ PCI_VDEVICE(TTI, 0x2710), chip_9480 },
{ PCI_VDEVICE(TTI, 0x2720), chip_9480 },
{ PCI_VDEVICE(TTI, 0x2721), chip_9480 },

View File

@ -765,6 +765,10 @@ static void init_default_table_values(struct pm8001_hba_info *pm8001_ha)
pm8001_ha->main_cfg_tbl.pm80xx_tbl.pcs_event_log_severity = 0x01;
pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_interrupt = 0x01;
/* Enable higher IQs and OQs, 32 to 63, bit 16 */
if (pm8001_ha->max_q_num > 32)
pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_interrupt |=
1 << 16;
/* Disable end to end CRC checking */
pm8001_ha->main_cfg_tbl.pm80xx_tbl.crc_core_dump = (0x1 << 16);
@ -1026,6 +1030,13 @@ static int mpi_init_check(struct pm8001_hba_info *pm8001_ha)
if (0x0000 != gst_len_mpistate)
return -EBUSY;
/*
* As per controller datasheet, after successful MPI
* initialization minimum 500ms delay is required before
* issuing commands.
*/
msleep(500);
return 0;
}
@ -1733,10 +1744,11 @@ static void
pm80xx_chip_interrupt_enable(struct pm8001_hba_info *pm8001_ha, u8 vec)
{
#ifdef PM8001_USE_MSIX
u32 mask;
mask = (u32)(1 << vec);
pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_CLR, (u32)(mask & 0xFFFFFFFF));
if (vec < 32)
pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_CLR, 1U << vec);
else
pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_CLR_U,
1U << (vec - 32));
return;
#endif
pm80xx_chip_intx_interrupt_enable(pm8001_ha);
@ -1752,12 +1764,15 @@ static void
pm80xx_chip_interrupt_disable(struct pm8001_hba_info *pm8001_ha, u8 vec)
{
#ifdef PM8001_USE_MSIX
u32 mask;
if (vec == 0xFF)
mask = 0xFFFFFFFF;
if (vec == 0xFF) {
/* disable all vectors 0-31, 32-63 */
pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, 0xFFFFFFFF);
pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_U, 0xFFFFFFFF);
} else if (vec < 32)
pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, 1U << vec);
else
mask = (u32)(1 << vec);
pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, (u32)(mask & 0xFFFFFFFF));
pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_U,
1U << (vec - 32));
return;
#endif
pm80xx_chip_intx_interrupt_disable(pm8001_ha);

View File

@ -2221,10 +2221,10 @@ static void iscsi_stop_conn(struct iscsi_cls_conn *conn, int flag)
switch (flag) {
case STOP_CONN_RECOVER:
conn->state = ISCSI_CONN_FAILED;
WRITE_ONCE(conn->state, ISCSI_CONN_FAILED);
break;
case STOP_CONN_TERM:
conn->state = ISCSI_CONN_DOWN;
WRITE_ONCE(conn->state, ISCSI_CONN_DOWN);
break;
default:
iscsi_cls_conn_printk(KERN_ERR, conn, "invalid stop flag %d\n",
@ -2236,6 +2236,49 @@ static void iscsi_stop_conn(struct iscsi_cls_conn *conn, int flag)
ISCSI_DBG_TRANS_CONN(conn, "Stopping conn done.\n");
}
static void iscsi_ep_disconnect(struct iscsi_cls_conn *conn, bool is_active)
{
struct iscsi_cls_session *session = iscsi_conn_to_session(conn);
struct iscsi_endpoint *ep;
ISCSI_DBG_TRANS_CONN(conn, "disconnect ep.\n");
WRITE_ONCE(conn->state, ISCSI_CONN_FAILED);
if (!conn->ep || !session->transport->ep_disconnect)
return;
ep = conn->ep;
conn->ep = NULL;
session->transport->unbind_conn(conn, is_active);
session->transport->ep_disconnect(ep);
ISCSI_DBG_TRANS_CONN(conn, "disconnect ep done.\n");
}
static void iscsi_if_disconnect_bound_ep(struct iscsi_cls_conn *conn,
struct iscsi_endpoint *ep,
bool is_active)
{
/* Check if this was a conn error and the kernel took ownership */
spin_lock_irq(&conn->lock);
if (!test_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags)) {
spin_unlock_irq(&conn->lock);
iscsi_ep_disconnect(conn, is_active);
} else {
spin_unlock_irq(&conn->lock);
ISCSI_DBG_TRANS_CONN(conn, "flush kernel conn cleanup.\n");
mutex_unlock(&conn->ep_mutex);
flush_work(&conn->cleanup_work);
/*
* Userspace is now done with the EP so we can release the ref
* iscsi_cleanup_conn_work_fn took.
*/
iscsi_put_endpoint(ep);
mutex_lock(&conn->ep_mutex);
}
}
static int iscsi_if_stop_conn(struct iscsi_transport *transport,
struct iscsi_uevent *ev)
{
@ -2256,12 +2299,25 @@ static int iscsi_if_stop_conn(struct iscsi_transport *transport,
cancel_work_sync(&conn->cleanup_work);
iscsi_stop_conn(conn, flag);
} else {
/*
* For offload, when iscsid is restarted it won't know about
* existing endpoints so it can't do a ep_disconnect. We clean
* it up here for userspace.
*/
mutex_lock(&conn->ep_mutex);
if (conn->ep)
iscsi_if_disconnect_bound_ep(conn, conn->ep, true);
mutex_unlock(&conn->ep_mutex);
/*
* Figure out if it was the kernel or userspace initiating this.
*/
spin_lock_irq(&conn->lock);
if (!test_and_set_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags)) {
spin_unlock_irq(&conn->lock);
iscsi_stop_conn(conn, flag);
} else {
spin_unlock_irq(&conn->lock);
ISCSI_DBG_TRANS_CONN(conn,
"flush kernel conn cleanup.\n");
flush_work(&conn->cleanup_work);
@ -2270,31 +2326,14 @@ static int iscsi_if_stop_conn(struct iscsi_transport *transport,
* Only clear for recovery to avoid extra cleanup runs during
* termination.
*/
spin_lock_irq(&conn->lock);
clear_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags);
spin_unlock_irq(&conn->lock);
}
ISCSI_DBG_TRANS_CONN(conn, "iscsi if conn stop done.\n");
return 0;
}
static void iscsi_ep_disconnect(struct iscsi_cls_conn *conn, bool is_active)
{
struct iscsi_cls_session *session = iscsi_conn_to_session(conn);
struct iscsi_endpoint *ep;
ISCSI_DBG_TRANS_CONN(conn, "disconnect ep.\n");
conn->state = ISCSI_CONN_FAILED;
if (!conn->ep || !session->transport->ep_disconnect)
return;
ep = conn->ep;
conn->ep = NULL;
session->transport->unbind_conn(conn, is_active);
session->transport->ep_disconnect(ep);
ISCSI_DBG_TRANS_CONN(conn, "disconnect ep done.\n");
}
static void iscsi_cleanup_conn_work_fn(struct work_struct *work)
{
struct iscsi_cls_conn *conn = container_of(work, struct iscsi_cls_conn,
@ -2303,18 +2342,11 @@ static void iscsi_cleanup_conn_work_fn(struct work_struct *work)
mutex_lock(&conn->ep_mutex);
/*
* If we are not at least bound there is nothing for us to do. Userspace
* will do a ep_disconnect call if offload is used, but will not be
* doing a stop since there is nothing to clean up, so we have to clear
* the cleanup bit here.
* Get a ref to the ep, so we don't release its ID until after
* userspace is done referencing it in iscsi_if_disconnect_bound_ep.
*/
if (conn->state != ISCSI_CONN_BOUND && conn->state != ISCSI_CONN_UP) {
ISCSI_DBG_TRANS_CONN(conn, "Got error while conn is already failed. Ignoring.\n");
clear_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags);
mutex_unlock(&conn->ep_mutex);
return;
}
if (conn->ep)
get_device(&conn->ep->dev);
iscsi_ep_disconnect(conn, false);
if (system_state != SYSTEM_RUNNING) {
@ -2370,11 +2402,12 @@ iscsi_create_conn(struct iscsi_cls_session *session, int dd_size, uint32_t cid)
conn->dd_data = &conn[1];
mutex_init(&conn->ep_mutex);
spin_lock_init(&conn->lock);
INIT_LIST_HEAD(&conn->conn_list);
INIT_WORK(&conn->cleanup_work, iscsi_cleanup_conn_work_fn);
conn->transport = transport;
conn->cid = cid;
conn->state = ISCSI_CONN_DOWN;
WRITE_ONCE(conn->state, ISCSI_CONN_DOWN);
/* this is released in the dev's release function */
if (!get_device(&session->dev))
@ -2561,9 +2594,32 @@ void iscsi_conn_error_event(struct iscsi_cls_conn *conn, enum iscsi_err error)
struct iscsi_uevent *ev;
struct iscsi_internal *priv;
int len = nlmsg_total_size(sizeof(*ev));
unsigned long flags;
int state;
if (!test_and_set_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags))
queue_work(iscsi_conn_cleanup_workq, &conn->cleanup_work);
spin_lock_irqsave(&conn->lock, flags);
/*
* Userspace will only do a stop call if we are at least bound. And, we
* only need to do the in kernel cleanup if in the UP state so cmds can
* be released to upper layers. If in other states just wait for
* userspace to avoid races that can leave the cleanup_work queued.
*/
state = READ_ONCE(conn->state);
switch (state) {
case ISCSI_CONN_BOUND:
case ISCSI_CONN_UP:
if (!test_and_set_bit(ISCSI_CLS_CONN_BIT_CLEANUP,
&conn->flags)) {
queue_work(iscsi_conn_cleanup_workq,
&conn->cleanup_work);
}
break;
default:
ISCSI_DBG_TRANS_CONN(conn, "Got conn error in state %d\n",
state);
break;
}
spin_unlock_irqrestore(&conn->lock, flags);
priv = iscsi_if_transport_lookup(conn->transport);
if (!priv)
@ -2913,7 +2969,7 @@ iscsi_set_param(struct iscsi_transport *transport, struct iscsi_uevent *ev)
char *data = (char*)ev + sizeof(*ev);
struct iscsi_cls_conn *conn;
struct iscsi_cls_session *session;
int err = 0, value = 0;
int err = 0, value = 0, state;
if (ev->u.set_param.len > PAGE_SIZE)
return -EINVAL;
@ -2930,8 +2986,8 @@ iscsi_set_param(struct iscsi_transport *transport, struct iscsi_uevent *ev)
session->recovery_tmo = value;
break;
default:
if ((conn->state == ISCSI_CONN_BOUND) ||
(conn->state == ISCSI_CONN_UP)) {
state = READ_ONCE(conn->state);
if (state == ISCSI_CONN_BOUND || state == ISCSI_CONN_UP) {
err = transport->set_param(conn, ev->u.set_param.param,
data, ev->u.set_param.len);
} else {
@ -3003,16 +3059,7 @@ static int iscsi_if_ep_disconnect(struct iscsi_transport *transport,
}
mutex_lock(&conn->ep_mutex);
/* Check if this was a conn error and the kernel took ownership */
if (test_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags)) {
ISCSI_DBG_TRANS_CONN(conn, "flush kernel conn cleanup.\n");
mutex_unlock(&conn->ep_mutex);
flush_work(&conn->cleanup_work);
goto put_ep;
}
iscsi_ep_disconnect(conn, false);
iscsi_if_disconnect_bound_ep(conn, ep, false);
mutex_unlock(&conn->ep_mutex);
put_ep:
iscsi_put_endpoint(ep);
@ -3715,24 +3762,17 @@ static int iscsi_if_transport_conn(struct iscsi_transport *transport,
return -EINVAL;
mutex_lock(&conn->ep_mutex);
spin_lock_irq(&conn->lock);
if (test_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags)) {
spin_unlock_irq(&conn->lock);
mutex_unlock(&conn->ep_mutex);
ev->r.retcode = -ENOTCONN;
return 0;
}
spin_unlock_irq(&conn->lock);
switch (nlh->nlmsg_type) {
case ISCSI_UEVENT_BIND_CONN:
if (conn->ep) {
/*
* For offload boot support where iscsid is restarted
* during the pivot root stage, the ep will be intact
* here when the new iscsid instance starts up and
* reconnects.
*/
iscsi_ep_disconnect(conn, true);
}
session = iscsi_session_lookup(ev->u.b_conn.sid);
if (!session) {
err = -EINVAL;
@ -3743,7 +3783,7 @@ static int iscsi_if_transport_conn(struct iscsi_transport *transport,
ev->u.b_conn.transport_eph,
ev->u.b_conn.is_leading);
if (!ev->r.retcode)
conn->state = ISCSI_CONN_BOUND;
WRITE_ONCE(conn->state, ISCSI_CONN_BOUND);
if (ev->r.retcode || !transport->ep_connect)
break;
@ -3762,7 +3802,8 @@ static int iscsi_if_transport_conn(struct iscsi_transport *transport,
case ISCSI_UEVENT_START_CONN:
ev->r.retcode = transport->start_conn(conn);
if (!ev->r.retcode)
conn->state = ISCSI_CONN_UP;
WRITE_ONCE(conn->state, ISCSI_CONN_UP);
break;
case ISCSI_UEVENT_SEND_PDU:
pdu_len = nlh->nlmsg_len - sizeof(*nlh) - sizeof(*ev);
@ -4070,10 +4111,11 @@ static ssize_t show_conn_state(struct device *dev,
{
struct iscsi_cls_conn *conn = iscsi_dev_to_conn(dev->parent);
const char *state = "unknown";
int conn_state = READ_ONCE(conn->state);
if (conn->state >= 0 &&
conn->state < ARRAY_SIZE(connection_state_names))
state = connection_state_names[conn->state];
if (conn_state >= 0 &&
conn_state < ARRAY_SIZE(connection_state_names))
state = connection_state_names[conn_state];
return sysfs_emit(buf, "%s\n", state);
}

View File

@ -8,10 +8,12 @@
#include <linux/io.h>
#include <linux/mailbox_client.h>
#include <linux/module.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/pm_domain.h>
#include <linux/thermal.h>
#include <linux/slab.h>
#include <linux/soc/qcom/qcom_aoss.h>
#define QMP_DESC_MAGIC 0x0
#define QMP_DESC_VERSION 0x4
@ -223,11 +225,14 @@ static bool qmp_message_empty(struct qmp *qmp)
*
* Return: 0 on success, negative errno on failure
*/
static int qmp_send(struct qmp *qmp, const void *data, size_t len)
int qmp_send(struct qmp *qmp, const void *data, size_t len)
{
long time_left;
int ret;
if (WARN_ON(IS_ERR_OR_NULL(qmp) || !data))
return -EINVAL;
if (WARN_ON(len + sizeof(u32) > qmp->size))
return -EINVAL;
@ -261,6 +266,7 @@ static int qmp_send(struct qmp *qmp, const void *data, size_t len)
return ret;
}
EXPORT_SYMBOL(qmp_send);
static int qmp_qdss_clk_prepare(struct clk_hw *hw)
{
@ -519,6 +525,55 @@ static void qmp_cooling_devices_remove(struct qmp *qmp)
thermal_cooling_device_unregister(qmp->cooling_devs[i].cdev);
}
/**
* qmp_get() - get a qmp handle from a device
* @dev: client device pointer
*
* Return: handle to qmp device on success, ERR_PTR() on failure
*/
struct qmp *qmp_get(struct device *dev)
{
struct platform_device *pdev;
struct device_node *np;
struct qmp *qmp;
if (!dev || !dev->of_node)
return ERR_PTR(-EINVAL);
np = of_parse_phandle(dev->of_node, "qcom,qmp", 0);
if (!np)
return ERR_PTR(-ENODEV);
pdev = of_find_device_by_node(np);
of_node_put(np);
if (!pdev)
return ERR_PTR(-EINVAL);
qmp = platform_get_drvdata(pdev);
if (!qmp) {
put_device(&pdev->dev);
return ERR_PTR(-EPROBE_DEFER);
}
return qmp;
}
EXPORT_SYMBOL(qmp_get);
/**
* qmp_put() - release a qmp handle
* @qmp: qmp handle obtained from qmp_get()
*/
void qmp_put(struct qmp *qmp)
{
/*
* Match get_device() inside of_find_device_by_node() in
* qmp_get()
*/
if (!IS_ERR_OR_NULL(qmp))
put_device(qmp->dev);
}
EXPORT_SYMBOL(qmp_put);
static int qmp_probe(struct platform_device *pdev)
{
struct resource *res;
@ -615,6 +670,7 @@ static struct platform_driver qmp_driver = {
.driver = {
.name = "qcom_aoss_qmp",
.of_match_table = qmp_dt_match,
.suppress_bind_attrs = true,
},
.probe = qmp_probe,
.remove = qmp_remove,

Some files were not shown because too many files have changed in this diff Show More