Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
This commit is contained in:
commit
72438f8cef
|
@ -324,7 +324,6 @@ F: Documentation/ABI/testing/sysfs-bus-acpi
|
|||
F: Documentation/ABI/testing/configfs-acpi
|
||||
F: drivers/pci/*acpi*
|
||||
F: drivers/pci/*/*acpi*
|
||||
F: drivers/pci/*/*/*acpi*
|
||||
F: tools/power/acpi/
|
||||
|
||||
ACPI APEI
|
||||
|
@ -8608,7 +8607,6 @@ F: include/linux/spinlock*.h
|
|||
F: arch/*/include/asm/spinlock*.h
|
||||
F: include/linux/rwlock*.h
|
||||
F: include/linux/mutex*.h
|
||||
F: arch/*/include/asm/mutex*.h
|
||||
F: include/linux/rwsem*.h
|
||||
F: arch/*/include/asm/rwsem.h
|
||||
F: include/linux/seqlock.h
|
||||
|
|
|
@ -473,7 +473,7 @@ void pci_ioremap_set_mem_type(int mem_type)
|
|||
|
||||
int pci_ioremap_io(unsigned int offset, phys_addr_t phys_addr)
|
||||
{
|
||||
BUG_ON(offset + SZ_64K > IO_SPACE_LIMIT);
|
||||
BUG_ON(offset + SZ_64K - 1 > IO_SPACE_LIMIT);
|
||||
|
||||
return ioremap_page_range(PCI_IO_VIRT_BASE + offset,
|
||||
PCI_IO_VIRT_BASE + offset + SZ_64K,
|
||||
|
|
|
@ -413,3 +413,4 @@
|
|||
396 common pkey_free sys_pkey_free
|
||||
397 common statx sys_statx
|
||||
398 common rseq sys_rseq
|
||||
399 common io_pgetevents sys_io_pgetevents
|
||||
|
|
|
@ -646,6 +646,16 @@ int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
|||
*/
|
||||
local_irq_disable();
|
||||
ptep = __find_linux_pte(vcpu->arch.pgdir, hva, NULL, &shift);
|
||||
/*
|
||||
* If the PTE disappeared temporarily due to a THP
|
||||
* collapse, just return and let the guest try again.
|
||||
*/
|
||||
if (!ptep) {
|
||||
local_irq_enable();
|
||||
if (page)
|
||||
put_page(page);
|
||||
return RESUME_GUEST;
|
||||
}
|
||||
pte = *ptep;
|
||||
local_irq_enable();
|
||||
|
||||
|
|
|
@ -186,7 +186,7 @@ static void __init setup_bootmem(void)
|
|||
BUG_ON(mem_size == 0);
|
||||
|
||||
set_max_mapnr(PFN_DOWN(mem_size));
|
||||
max_low_pfn = pfn_base + PFN_DOWN(mem_size);
|
||||
max_low_pfn = memblock_end_of_DRAM();
|
||||
|
||||
#ifdef CONFIG_BLK_DEV_INITRD
|
||||
setup_initrd();
|
||||
|
|
|
@ -68,7 +68,13 @@ $(obj)/vdso-image-%.c: $(obj)/vdso%.so.dbg $(obj)/vdso%.so $(obj)/vdso2c FORCE
|
|||
CFL := $(PROFILING) -mcmodel=small -fPIC -O2 -fasynchronous-unwind-tables -m64 \
|
||||
$(filter -g%,$(KBUILD_CFLAGS)) $(call cc-option, -fno-stack-protector) \
|
||||
-fno-omit-frame-pointer -foptimize-sibling-calls \
|
||||
-DDISABLE_BRANCH_PROFILING -DBUILD_VDSO $(RETPOLINE_VDSO_CFLAGS)
|
||||
-DDISABLE_BRANCH_PROFILING -DBUILD_VDSO
|
||||
|
||||
ifdef CONFIG_RETPOLINE
|
||||
ifneq ($(RETPOLINE_VDSO_CFLAGS),)
|
||||
CFL += $(RETPOLINE_VDSO_CFLAGS)
|
||||
endif
|
||||
endif
|
||||
|
||||
$(vobjs): KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS) $(RETPOLINE_CFLAGS),$(KBUILD_CFLAGS)) $(CFL)
|
||||
|
||||
|
@ -138,7 +144,13 @@ KBUILD_CFLAGS_32 += $(call cc-option, -fno-stack-protector)
|
|||
KBUILD_CFLAGS_32 += $(call cc-option, -foptimize-sibling-calls)
|
||||
KBUILD_CFLAGS_32 += -fno-omit-frame-pointer
|
||||
KBUILD_CFLAGS_32 += -DDISABLE_BRANCH_PROFILING
|
||||
KBUILD_CFLAGS_32 += $(RETPOLINE_VDSO_CFLAGS)
|
||||
|
||||
ifdef CONFIG_RETPOLINE
|
||||
ifneq ($(RETPOLINE_VDSO_CFLAGS),)
|
||||
KBUILD_CFLAGS_32 += $(RETPOLINE_VDSO_CFLAGS)
|
||||
endif
|
||||
endif
|
||||
|
||||
$(obj)/vdso32.so.dbg: KBUILD_CFLAGS = $(KBUILD_CFLAGS_32)
|
||||
|
||||
$(obj)/vdso32.so.dbg: FORCE \
|
||||
|
|
|
@ -43,8 +43,9 @@ extern u8 hvclock_page
|
|||
notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
|
||||
{
|
||||
long ret;
|
||||
asm("syscall" : "=a" (ret) :
|
||||
"0" (__NR_clock_gettime), "D" (clock), "S" (ts) : "memory");
|
||||
asm ("syscall" : "=a" (ret), "=m" (*ts) :
|
||||
"0" (__NR_clock_gettime), "D" (clock), "S" (ts) :
|
||||
"memory", "rcx", "r11");
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -52,8 +53,9 @@ notrace static long vdso_fallback_gtod(struct timeval *tv, struct timezone *tz)
|
|||
{
|
||||
long ret;
|
||||
|
||||
asm("syscall" : "=a" (ret) :
|
||||
"0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory");
|
||||
asm ("syscall" : "=a" (ret), "=m" (*tv), "=m" (*tz) :
|
||||
"0" (__NR_gettimeofday), "D" (tv), "S" (tz) :
|
||||
"memory", "rcx", "r11");
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -64,13 +66,13 @@ notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
|
|||
{
|
||||
long ret;
|
||||
|
||||
asm(
|
||||
asm (
|
||||
"mov %%ebx, %%edx \n"
|
||||
"mov %2, %%ebx \n"
|
||||
"mov %[clock], %%ebx \n"
|
||||
"call __kernel_vsyscall \n"
|
||||
"mov %%edx, %%ebx \n"
|
||||
: "=a" (ret)
|
||||
: "0" (__NR_clock_gettime), "g" (clock), "c" (ts)
|
||||
: "=a" (ret), "=m" (*ts)
|
||||
: "0" (__NR_clock_gettime), [clock] "g" (clock), "c" (ts)
|
||||
: "memory", "edx");
|
||||
return ret;
|
||||
}
|
||||
|
@ -79,13 +81,13 @@ notrace static long vdso_fallback_gtod(struct timeval *tv, struct timezone *tz)
|
|||
{
|
||||
long ret;
|
||||
|
||||
asm(
|
||||
asm (
|
||||
"mov %%ebx, %%edx \n"
|
||||
"mov %2, %%ebx \n"
|
||||
"mov %[tv], %%ebx \n"
|
||||
"call __kernel_vsyscall \n"
|
||||
"mov %%edx, %%ebx \n"
|
||||
: "=a" (ret)
|
||||
: "0" (__NR_gettimeofday), "g" (tv), "c" (tz)
|
||||
: "=a" (ret), "=m" (*tv), "=m" (*tz)
|
||||
: "0" (__NR_gettimeofday), [tv] "g" (tv), "c" (tz)
|
||||
: "memory", "edx");
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -36,6 +36,7 @@
|
|||
|
||||
static int num_counters_llc;
|
||||
static int num_counters_nb;
|
||||
static bool l3_mask;
|
||||
|
||||
static HLIST_HEAD(uncore_unused_list);
|
||||
|
||||
|
@ -209,6 +210,13 @@ static int amd_uncore_event_init(struct perf_event *event)
|
|||
hwc->config = event->attr.config & AMD64_RAW_EVENT_MASK_NB;
|
||||
hwc->idx = -1;
|
||||
|
||||
/*
|
||||
* SliceMask and ThreadMask need to be set for certain L3 events in
|
||||
* Family 17h. For other events, the two fields do not affect the count.
|
||||
*/
|
||||
if (l3_mask)
|
||||
hwc->config |= (AMD64_L3_SLICE_MASK | AMD64_L3_THREAD_MASK);
|
||||
|
||||
if (event->cpu < 0)
|
||||
return -EINVAL;
|
||||
|
||||
|
@ -525,6 +533,7 @@ static int __init amd_uncore_init(void)
|
|||
amd_llc_pmu.name = "amd_l3";
|
||||
format_attr_event_df.show = &event_show_df;
|
||||
format_attr_event_l3.show = &event_show_l3;
|
||||
l3_mask = true;
|
||||
} else {
|
||||
num_counters_nb = NUM_COUNTERS_NB;
|
||||
num_counters_llc = NUM_COUNTERS_L2;
|
||||
|
@ -532,6 +541,7 @@ static int __init amd_uncore_init(void)
|
|||
amd_llc_pmu.name = "amd_l2";
|
||||
format_attr_event_df = format_attr_event;
|
||||
format_attr_event_l3 = format_attr_event;
|
||||
l3_mask = false;
|
||||
}
|
||||
|
||||
amd_nb_pmu.attr_groups = amd_uncore_attr_groups_df;
|
||||
|
|
|
@ -3061,7 +3061,7 @@ static struct event_constraint bdx_uncore_pcu_constraints[] = {
|
|||
|
||||
void bdx_uncore_cpu_init(void)
|
||||
{
|
||||
int pkg = topology_phys_to_logical_pkg(0);
|
||||
int pkg = topology_phys_to_logical_pkg(boot_cpu_data.phys_proc_id);
|
||||
|
||||
if (bdx_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
|
||||
bdx_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
|
||||
|
@ -3931,16 +3931,16 @@ static const struct pci_device_id skx_uncore_pci_ids[] = {
|
|||
.driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 5, SKX_PCI_UNCORE_M2PCIE, 3),
|
||||
},
|
||||
{ /* M3UPI0 Link 0 */
|
||||
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204C),
|
||||
.driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 0, SKX_PCI_UNCORE_M3UPI, 0),
|
||||
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204D),
|
||||
.driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 1, SKX_PCI_UNCORE_M3UPI, 0),
|
||||
},
|
||||
{ /* M3UPI0 Link 1 */
|
||||
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204D),
|
||||
.driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 1, SKX_PCI_UNCORE_M3UPI, 1),
|
||||
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204E),
|
||||
.driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 2, SKX_PCI_UNCORE_M3UPI, 1),
|
||||
},
|
||||
{ /* M3UPI1 Link 2 */
|
||||
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204C),
|
||||
.driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 4, SKX_PCI_UNCORE_M3UPI, 2),
|
||||
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204D),
|
||||
.driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 5, SKX_PCI_UNCORE_M3UPI, 2),
|
||||
},
|
||||
{ /* end: all zeroes */ }
|
||||
};
|
||||
|
|
|
@ -46,6 +46,14 @@
|
|||
#define INTEL_ARCH_EVENT_MASK \
|
||||
(ARCH_PERFMON_EVENTSEL_UMASK | ARCH_PERFMON_EVENTSEL_EVENT)
|
||||
|
||||
#define AMD64_L3_SLICE_SHIFT 48
|
||||
#define AMD64_L3_SLICE_MASK \
|
||||
((0xFULL) << AMD64_L3_SLICE_SHIFT)
|
||||
|
||||
#define AMD64_L3_THREAD_SHIFT 56
|
||||
#define AMD64_L3_THREAD_MASK \
|
||||
((0xFFULL) << AMD64_L3_THREAD_SHIFT)
|
||||
|
||||
#define X86_RAW_EVENT_MASK \
|
||||
(ARCH_PERFMON_EVENTSEL_EVENT | \
|
||||
ARCH_PERFMON_EVENTSEL_UMASK | \
|
||||
|
|
|
@ -10,8 +10,13 @@ struct cpumask;
|
|||
struct mm_struct;
|
||||
|
||||
#ifdef CONFIG_X86_UV
|
||||
#include <linux/efi.h>
|
||||
|
||||
extern enum uv_system_type get_uv_system_type(void);
|
||||
static inline bool is_early_uv_system(void)
|
||||
{
|
||||
return !((efi.uv_systab == EFI_INVALID_TABLE_ADDR) || !efi.uv_systab);
|
||||
}
|
||||
extern int is_uv_system(void);
|
||||
extern int is_uv_hubless(void);
|
||||
extern void uv_cpu_init(void);
|
||||
|
@ -23,6 +28,7 @@ extern const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
|
|||
#else /* X86_UV */
|
||||
|
||||
static inline enum uv_system_type get_uv_system_type(void) { return UV_NONE; }
|
||||
static inline bool is_early_uv_system(void) { return 0; }
|
||||
static inline int is_uv_system(void) { return 0; }
|
||||
static inline int is_uv_hubless(void) { return 0; }
|
||||
static inline void uv_cpu_init(void) { }
|
||||
|
|
|
@ -922,7 +922,7 @@ static void init_amd(struct cpuinfo_x86 *c)
|
|||
static unsigned int amd_size_cache(struct cpuinfo_x86 *c, unsigned int size)
|
||||
{
|
||||
/* AMD errata T13 (order #21922) */
|
||||
if ((c->x86 == 6)) {
|
||||
if (c->x86 == 6) {
|
||||
/* Duron Rev A0 */
|
||||
if (c->x86_model == 3 && c->x86_stepping == 0)
|
||||
size = 64;
|
||||
|
|
|
@ -26,6 +26,7 @@
|
|||
#include <asm/apic.h>
|
||||
#include <asm/intel-family.h>
|
||||
#include <asm/i8259.h>
|
||||
#include <asm/uv/uv.h>
|
||||
|
||||
unsigned int __read_mostly cpu_khz; /* TSC clocks / usec, not used here */
|
||||
EXPORT_SYMBOL(cpu_khz);
|
||||
|
@ -1433,6 +1434,9 @@ void __init tsc_early_init(void)
|
|||
{
|
||||
if (!boot_cpu_has(X86_FEATURE_TSC))
|
||||
return;
|
||||
/* Don't change UV TSC multi-chassis synchronization */
|
||||
if (is_early_uv_system())
|
||||
return;
|
||||
if (!determine_cpu_tsc_frequencies(true))
|
||||
return;
|
||||
loops_per_jiffy = get_loops_per_jiffy();
|
||||
|
|
|
@ -249,6 +249,17 @@ static u64 __read_mostly shadow_nonpresent_or_rsvd_mask;
|
|||
*/
|
||||
static const u64 shadow_nonpresent_or_rsvd_mask_len = 5;
|
||||
|
||||
/*
|
||||
* In some cases, we need to preserve the GFN of a non-present or reserved
|
||||
* SPTE when we usurp the upper five bits of the physical address space to
|
||||
* defend against L1TF, e.g. for MMIO SPTEs. To preserve the GFN, we'll
|
||||
* shift bits of the GFN that overlap with shadow_nonpresent_or_rsvd_mask
|
||||
* left into the reserved bits, i.e. the GFN in the SPTE will be split into
|
||||
* high and low parts. This mask covers the lower bits of the GFN.
|
||||
*/
|
||||
static u64 __read_mostly shadow_nonpresent_or_rsvd_lower_gfn_mask;
|
||||
|
||||
|
||||
static void mmu_spte_set(u64 *sptep, u64 spte);
|
||||
static union kvm_mmu_page_role
|
||||
kvm_mmu_calc_root_page_role(struct kvm_vcpu *vcpu);
|
||||
|
@ -357,9 +368,7 @@ static bool is_mmio_spte(u64 spte)
|
|||
|
||||
static gfn_t get_mmio_spte_gfn(u64 spte)
|
||||
{
|
||||
u64 mask = generation_mmio_spte_mask(MMIO_GEN_MASK) | shadow_mmio_mask |
|
||||
shadow_nonpresent_or_rsvd_mask;
|
||||
u64 gpa = spte & ~mask;
|
||||
u64 gpa = spte & shadow_nonpresent_or_rsvd_lower_gfn_mask;
|
||||
|
||||
gpa |= (spte >> shadow_nonpresent_or_rsvd_mask_len)
|
||||
& shadow_nonpresent_or_rsvd_mask;
|
||||
|
@ -423,6 +432,8 @@ EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes);
|
|||
|
||||
static void kvm_mmu_reset_all_pte_masks(void)
|
||||
{
|
||||
u8 low_phys_bits;
|
||||
|
||||
shadow_user_mask = 0;
|
||||
shadow_accessed_mask = 0;
|
||||
shadow_dirty_mask = 0;
|
||||
|
@ -437,12 +448,17 @@ static void kvm_mmu_reset_all_pte_masks(void)
|
|||
* appropriate mask to guard against L1TF attacks. Otherwise, it is
|
||||
* assumed that the CPU is not vulnerable to L1TF.
|
||||
*/
|
||||
low_phys_bits = boot_cpu_data.x86_phys_bits;
|
||||
if (boot_cpu_data.x86_phys_bits <
|
||||
52 - shadow_nonpresent_or_rsvd_mask_len)
|
||||
52 - shadow_nonpresent_or_rsvd_mask_len) {
|
||||
shadow_nonpresent_or_rsvd_mask =
|
||||
rsvd_bits(boot_cpu_data.x86_phys_bits -
|
||||
shadow_nonpresent_or_rsvd_mask_len,
|
||||
boot_cpu_data.x86_phys_bits - 1);
|
||||
low_phys_bits -= shadow_nonpresent_or_rsvd_mask_len;
|
||||
}
|
||||
shadow_nonpresent_or_rsvd_lower_gfn_mask =
|
||||
GENMASK_ULL(low_phys_bits - 1, PAGE_SHIFT);
|
||||
}
|
||||
|
||||
static int is_cpuid_PSE36(void)
|
||||
|
|
|
@ -121,7 +121,6 @@ module_param_named(pml, enable_pml, bool, S_IRUGO);
|
|||
|
||||
#define MSR_BITMAP_MODE_X2APIC 1
|
||||
#define MSR_BITMAP_MODE_X2APIC_APICV 2
|
||||
#define MSR_BITMAP_MODE_LM 4
|
||||
|
||||
#define KVM_VMX_TSC_MULTIPLIER_MAX 0xffffffffffffffffULL
|
||||
|
||||
|
@ -857,6 +856,7 @@ struct nested_vmx {
|
|||
|
||||
/* to migrate it to L2 if VM_ENTRY_LOAD_DEBUG_CONTROLS is off */
|
||||
u64 vmcs01_debugctl;
|
||||
u64 vmcs01_guest_bndcfgs;
|
||||
|
||||
u16 vpid02;
|
||||
u16 last_vpid;
|
||||
|
@ -2899,8 +2899,7 @@ static void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
|
|||
vmx->msr_host_kernel_gs_base = read_msr(MSR_KERNEL_GS_BASE);
|
||||
}
|
||||
|
||||
if (is_long_mode(&vmx->vcpu))
|
||||
wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
|
||||
wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
|
||||
#else
|
||||
savesegment(fs, fs_sel);
|
||||
savesegment(gs, gs_sel);
|
||||
|
@ -2951,8 +2950,7 @@ static void vmx_prepare_switch_to_host(struct vcpu_vmx *vmx)
|
|||
vmx->loaded_cpu_state = NULL;
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
if (is_long_mode(&vmx->vcpu))
|
||||
rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
|
||||
rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
|
||||
#endif
|
||||
if (host_state->ldt_sel || (host_state->gs_sel & 7)) {
|
||||
kvm_load_ldt(host_state->ldt_sel);
|
||||
|
@ -2980,24 +2978,19 @@ static void vmx_prepare_switch_to_host(struct vcpu_vmx *vmx)
|
|||
#ifdef CONFIG_X86_64
|
||||
static u64 vmx_read_guest_kernel_gs_base(struct vcpu_vmx *vmx)
|
||||
{
|
||||
if (is_long_mode(&vmx->vcpu)) {
|
||||
preempt_disable();
|
||||
if (vmx->loaded_cpu_state)
|
||||
rdmsrl(MSR_KERNEL_GS_BASE,
|
||||
vmx->msr_guest_kernel_gs_base);
|
||||
preempt_enable();
|
||||
}
|
||||
preempt_disable();
|
||||
if (vmx->loaded_cpu_state)
|
||||
rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
|
||||
preempt_enable();
|
||||
return vmx->msr_guest_kernel_gs_base;
|
||||
}
|
||||
|
||||
static void vmx_write_guest_kernel_gs_base(struct vcpu_vmx *vmx, u64 data)
|
||||
{
|
||||
if (is_long_mode(&vmx->vcpu)) {
|
||||
preempt_disable();
|
||||
if (vmx->loaded_cpu_state)
|
||||
wrmsrl(MSR_KERNEL_GS_BASE, data);
|
||||
preempt_enable();
|
||||
}
|
||||
preempt_disable();
|
||||
if (vmx->loaded_cpu_state)
|
||||
wrmsrl(MSR_KERNEL_GS_BASE, data);
|
||||
preempt_enable();
|
||||
vmx->msr_guest_kernel_gs_base = data;
|
||||
}
|
||||
#endif
|
||||
|
@ -3533,9 +3526,6 @@ static void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, bool apicv)
|
|||
VM_EXIT_LOAD_IA32_EFER | VM_EXIT_SAVE_IA32_EFER |
|
||||
VM_EXIT_SAVE_VMX_PREEMPTION_TIMER | VM_EXIT_ACK_INTR_ON_EXIT;
|
||||
|
||||
if (kvm_mpx_supported())
|
||||
msrs->exit_ctls_high |= VM_EXIT_CLEAR_BNDCFGS;
|
||||
|
||||
/* We support free control of debug control saving. */
|
||||
msrs->exit_ctls_low &= ~VM_EXIT_SAVE_DEBUG_CONTROLS;
|
||||
|
||||
|
@ -3552,8 +3542,6 @@ static void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, bool apicv)
|
|||
VM_ENTRY_LOAD_IA32_PAT;
|
||||
msrs->entry_ctls_high |=
|
||||
(VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR | VM_ENTRY_LOAD_IA32_EFER);
|
||||
if (kvm_mpx_supported())
|
||||
msrs->entry_ctls_high |= VM_ENTRY_LOAD_BNDCFGS;
|
||||
|
||||
/* We support free control of debug control loading. */
|
||||
msrs->entry_ctls_low &= ~VM_ENTRY_LOAD_DEBUG_CONTROLS;
|
||||
|
@ -3601,12 +3589,12 @@ static void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, bool apicv)
|
|||
msrs->secondary_ctls_high);
|
||||
msrs->secondary_ctls_low = 0;
|
||||
msrs->secondary_ctls_high &=
|
||||
SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
|
||||
SECONDARY_EXEC_DESC |
|
||||
SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
|
||||
SECONDARY_EXEC_APIC_REGISTER_VIRT |
|
||||
SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
|
||||
SECONDARY_EXEC_WBINVD_EXITING;
|
||||
|
||||
/*
|
||||
* We can emulate "VMCS shadowing," even if the hardware
|
||||
* doesn't support it.
|
||||
|
@ -3663,6 +3651,10 @@ static void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, bool apicv)
|
|||
msrs->secondary_ctls_high |=
|
||||
SECONDARY_EXEC_UNRESTRICTED_GUEST;
|
||||
|
||||
if (flexpriority_enabled)
|
||||
msrs->secondary_ctls_high |=
|
||||
SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
|
||||
|
||||
/* miscellaneous data */
|
||||
rdmsr(MSR_IA32_VMX_MISC,
|
||||
msrs->misc_low,
|
||||
|
@ -5073,19 +5065,6 @@ static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
|
|||
if (!msr)
|
||||
return;
|
||||
|
||||
/*
|
||||
* MSR_KERNEL_GS_BASE is not intercepted when the guest is in
|
||||
* 64-bit mode as a 64-bit kernel may frequently access the
|
||||
* MSR. This means we need to manually save/restore the MSR
|
||||
* when switching between guest and host state, but only if
|
||||
* the guest is in 64-bit mode. Sync our cached value if the
|
||||
* guest is transitioning to 32-bit mode and the CPU contains
|
||||
* guest state, i.e. the cache is stale.
|
||||
*/
|
||||
#ifdef CONFIG_X86_64
|
||||
if (!(efer & EFER_LMA))
|
||||
(void)vmx_read_guest_kernel_gs_base(vmx);
|
||||
#endif
|
||||
vcpu->arch.efer = efer;
|
||||
if (efer & EFER_LMA) {
|
||||
vm_entry_controls_setbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE);
|
||||
|
@ -6078,9 +6057,6 @@ static u8 vmx_msr_bitmap_mode(struct kvm_vcpu *vcpu)
|
|||
mode |= MSR_BITMAP_MODE_X2APIC_APICV;
|
||||
}
|
||||
|
||||
if (is_long_mode(vcpu))
|
||||
mode |= MSR_BITMAP_MODE_LM;
|
||||
|
||||
return mode;
|
||||
}
|
||||
|
||||
|
@ -6121,9 +6097,6 @@ static void vmx_update_msr_bitmap(struct kvm_vcpu *vcpu)
|
|||
if (!changed)
|
||||
return;
|
||||
|
||||
vmx_set_intercept_for_msr(msr_bitmap, MSR_KERNEL_GS_BASE, MSR_TYPE_RW,
|
||||
!(mode & MSR_BITMAP_MODE_LM));
|
||||
|
||||
if (changed & (MSR_BITMAP_MODE_X2APIC | MSR_BITMAP_MODE_X2APIC_APICV))
|
||||
vmx_update_msr_bitmap_x2apic(msr_bitmap, mode);
|
||||
|
||||
|
@ -6189,6 +6162,11 @@ static void vmx_complete_nested_posted_interrupt(struct kvm_vcpu *vcpu)
|
|||
nested_mark_vmcs12_pages_dirty(vcpu);
|
||||
}
|
||||
|
||||
static u8 vmx_get_rvi(void)
|
||||
{
|
||||
return vmcs_read16(GUEST_INTR_STATUS) & 0xff;
|
||||
}
|
||||
|
||||
static bool vmx_guest_apic_has_interrupt(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
||||
|
@ -6201,7 +6179,7 @@ static bool vmx_guest_apic_has_interrupt(struct kvm_vcpu *vcpu)
|
|||
WARN_ON_ONCE(!vmx->nested.virtual_apic_page))
|
||||
return false;
|
||||
|
||||
rvi = vmcs_read16(GUEST_INTR_STATUS) & 0xff;
|
||||
rvi = vmx_get_rvi();
|
||||
|
||||
vapic_page = kmap(vmx->nested.virtual_apic_page);
|
||||
vppr = *((u32 *)(vapic_page + APIC_PROCPRI));
|
||||
|
@ -10245,15 +10223,16 @@ static void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu)
|
|||
if (!lapic_in_kernel(vcpu))
|
||||
return;
|
||||
|
||||
if (!flexpriority_enabled &&
|
||||
!cpu_has_vmx_virtualize_x2apic_mode())
|
||||
return;
|
||||
|
||||
/* Postpone execution until vmcs01 is the current VMCS. */
|
||||
if (is_guest_mode(vcpu)) {
|
||||
to_vmx(vcpu)->nested.change_vmcs01_virtual_apic_mode = true;
|
||||
return;
|
||||
}
|
||||
|
||||
if (!cpu_need_tpr_shadow(vcpu))
|
||||
return;
|
||||
|
||||
sec_exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
|
||||
sec_exec_control &= ~(SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
|
||||
SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE);
|
||||
|
@ -10375,6 +10354,14 @@ static int vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu)
|
|||
return max_irr;
|
||||
}
|
||||
|
||||
static u8 vmx_has_apicv_interrupt(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
u8 rvi = vmx_get_rvi();
|
||||
u8 vppr = kvm_lapic_get_reg(vcpu->arch.apic, APIC_PROCPRI);
|
||||
|
||||
return ((rvi & 0xf0) > (vppr & 0xf0));
|
||||
}
|
||||
|
||||
static void vmx_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
|
||||
{
|
||||
if (!kvm_vcpu_apicv_active(vcpu))
|
||||
|
@ -11264,6 +11251,23 @@ static void nested_vmx_cr_fixed1_bits_update(struct kvm_vcpu *vcpu)
|
|||
#undef cr4_fixed1_update
|
||||
}
|
||||
|
||||
static void nested_vmx_entry_exit_ctls_update(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
||||
|
||||
if (kvm_mpx_supported()) {
|
||||
bool mpx_enabled = guest_cpuid_has(vcpu, X86_FEATURE_MPX);
|
||||
|
||||
if (mpx_enabled) {
|
||||
vmx->nested.msrs.entry_ctls_high |= VM_ENTRY_LOAD_BNDCFGS;
|
||||
vmx->nested.msrs.exit_ctls_high |= VM_EXIT_CLEAR_BNDCFGS;
|
||||
} else {
|
||||
vmx->nested.msrs.entry_ctls_high &= ~VM_ENTRY_LOAD_BNDCFGS;
|
||||
vmx->nested.msrs.exit_ctls_high &= ~VM_EXIT_CLEAR_BNDCFGS;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
||||
|
@ -11280,8 +11284,10 @@ static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
|
|||
to_vmx(vcpu)->msr_ia32_feature_control_valid_bits &=
|
||||
~FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX;
|
||||
|
||||
if (nested_vmx_allowed(vcpu))
|
||||
if (nested_vmx_allowed(vcpu)) {
|
||||
nested_vmx_cr_fixed1_bits_update(vcpu);
|
||||
nested_vmx_entry_exit_ctls_update(vcpu);
|
||||
}
|
||||
}
|
||||
|
||||
static void vmx_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry)
|
||||
|
@ -12049,8 +12055,13 @@ static void prepare_vmcs02_full(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
|
|||
|
||||
set_cr4_guest_host_mask(vmx);
|
||||
|
||||
if (vmx_mpx_supported())
|
||||
vmcs_write64(GUEST_BNDCFGS, vmcs12->guest_bndcfgs);
|
||||
if (kvm_mpx_supported()) {
|
||||
if (vmx->nested.nested_run_pending &&
|
||||
(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS))
|
||||
vmcs_write64(GUEST_BNDCFGS, vmcs12->guest_bndcfgs);
|
||||
else
|
||||
vmcs_write64(GUEST_BNDCFGS, vmx->nested.vmcs01_guest_bndcfgs);
|
||||
}
|
||||
|
||||
if (enable_vpid) {
|
||||
if (nested_cpu_has_vpid(vmcs12) && vmx->nested.vpid02)
|
||||
|
@ -12595,15 +12606,21 @@ static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu, u32 *exit_qual)
|
|||
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
|
||||
bool from_vmentry = !!exit_qual;
|
||||
u32 dummy_exit_qual;
|
||||
u32 vmcs01_cpu_exec_ctrl;
|
||||
bool evaluate_pending_interrupts;
|
||||
int r = 0;
|
||||
|
||||
vmcs01_cpu_exec_ctrl = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
|
||||
evaluate_pending_interrupts = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL) &
|
||||
(CPU_BASED_VIRTUAL_INTR_PENDING | CPU_BASED_VIRTUAL_NMI_PENDING);
|
||||
if (likely(!evaluate_pending_interrupts) && kvm_vcpu_apicv_active(vcpu))
|
||||
evaluate_pending_interrupts |= vmx_has_apicv_interrupt(vcpu);
|
||||
|
||||
enter_guest_mode(vcpu);
|
||||
|
||||
if (!(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS))
|
||||
vmx->nested.vmcs01_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL);
|
||||
if (kvm_mpx_supported() &&
|
||||
!(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS))
|
||||
vmx->nested.vmcs01_guest_bndcfgs = vmcs_read64(GUEST_BNDCFGS);
|
||||
|
||||
vmx_switch_vmcs(vcpu, &vmx->nested.vmcs02);
|
||||
vmx_segment_cache_clear(vmx);
|
||||
|
@ -12643,16 +12660,14 @@ static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu, u32 *exit_qual)
|
|||
* to L1 or delivered directly to L2 (e.g. In case L1 don't
|
||||
* intercept EXTERNAL_INTERRUPT).
|
||||
*
|
||||
* Usually this would be handled by L0 requesting a
|
||||
* IRQ/NMI window by setting VMCS accordingly. However,
|
||||
* this setting was done on VMCS01 and now VMCS02 is active
|
||||
* instead. Thus, we force L0 to perform pending event
|
||||
* evaluation by requesting a KVM_REQ_EVENT.
|
||||
* Usually this would be handled by the processor noticing an
|
||||
* IRQ/NMI window request, or checking RVI during evaluation of
|
||||
* pending virtual interrupts. However, this setting was done
|
||||
* on VMCS01 and now VMCS02 is active instead. Thus, we force L0
|
||||
* to perform pending event evaluation by requesting a KVM_REQ_EVENT.
|
||||
*/
|
||||
if (vmcs01_cpu_exec_ctrl &
|
||||
(CPU_BASED_VIRTUAL_INTR_PENDING | CPU_BASED_VIRTUAL_NMI_PENDING)) {
|
||||
if (unlikely(evaluate_pending_interrupts))
|
||||
kvm_make_request(KVM_REQ_EVENT, vcpu);
|
||||
}
|
||||
|
||||
/*
|
||||
* Note no nested_vmx_succeed or nested_vmx_fail here. At this point
|
||||
|
|
|
@ -4698,7 +4698,7 @@ static void kvm_init_msr_list(void)
|
|||
*/
|
||||
switch (msrs_to_save[i]) {
|
||||
case MSR_IA32_BNDCFGS:
|
||||
if (!kvm_x86_ops->mpx_supported())
|
||||
if (!kvm_mpx_supported())
|
||||
continue;
|
||||
break;
|
||||
case MSR_TSC_AUX:
|
||||
|
|
|
@ -1713,8 +1713,10 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
|
|||
|
||||
dpm_wait_for_subordinate(dev, async);
|
||||
|
||||
if (async_error)
|
||||
if (async_error) {
|
||||
dev->power.direct_complete = false;
|
||||
goto Complete;
|
||||
}
|
||||
|
||||
/*
|
||||
* If a device configured to wake up the system from sleep states
|
||||
|
@ -1726,6 +1728,7 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
|
|||
pm_wakeup_event(dev, 0);
|
||||
|
||||
if (pm_wakeup_pending()) {
|
||||
dev->power.direct_complete = false;
|
||||
async_error = -EBUSY;
|
||||
goto Complete;
|
||||
}
|
||||
|
|
|
@ -1553,8 +1553,8 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
|
|||
edesc->src_nents = src_nents;
|
||||
edesc->dst_nents = dst_nents;
|
||||
edesc->sec4_sg_bytes = sec4_sg_bytes;
|
||||
edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
|
||||
desc_bytes;
|
||||
edesc->sec4_sg = (struct sec4_sg_entry *)((u8 *)edesc->hw_desc +
|
||||
desc_bytes);
|
||||
edesc->iv_dir = DMA_TO_DEVICE;
|
||||
|
||||
/* Make sure IV is located in a DMAable area */
|
||||
|
@ -1757,8 +1757,8 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
|
|||
edesc->src_nents = src_nents;
|
||||
edesc->dst_nents = dst_nents;
|
||||
edesc->sec4_sg_bytes = sec4_sg_bytes;
|
||||
edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
|
||||
desc_bytes;
|
||||
edesc->sec4_sg = (struct sec4_sg_entry *)((u8 *)edesc->hw_desc +
|
||||
desc_bytes);
|
||||
edesc->iv_dir = DMA_FROM_DEVICE;
|
||||
|
||||
/* Make sure IV is located in a DMAable area */
|
||||
|
|
|
@ -367,7 +367,8 @@ static inline void dsgl_walk_init(struct dsgl_walk *walk,
|
|||
walk->to = (struct phys_sge_pairs *)(dsgl + 1);
|
||||
}
|
||||
|
||||
static inline void dsgl_walk_end(struct dsgl_walk *walk, unsigned short qid)
|
||||
static inline void dsgl_walk_end(struct dsgl_walk *walk, unsigned short qid,
|
||||
int pci_chan_id)
|
||||
{
|
||||
struct cpl_rx_phys_dsgl *phys_cpl;
|
||||
|
||||
|
@ -385,6 +386,7 @@ static inline void dsgl_walk_end(struct dsgl_walk *walk, unsigned short qid)
|
|||
phys_cpl->rss_hdr_int.opcode = CPL_RX_PHYS_ADDR;
|
||||
phys_cpl->rss_hdr_int.qid = htons(qid);
|
||||
phys_cpl->rss_hdr_int.hash_val = 0;
|
||||
phys_cpl->rss_hdr_int.channel = pci_chan_id;
|
||||
}
|
||||
|
||||
static inline void dsgl_walk_add_page(struct dsgl_walk *walk,
|
||||
|
@ -718,7 +720,7 @@ static inline void create_wreq(struct chcr_context *ctx,
|
|||
FILL_WR_RX_Q_ID(ctx->dev->rx_channel_id, qid,
|
||||
!!lcb, ctx->tx_qidx);
|
||||
|
||||
chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(ctx->dev->tx_channel_id,
|
||||
chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(ctx->tx_chan_id,
|
||||
qid);
|
||||
chcr_req->ulptx.len = htonl((DIV_ROUND_UP(len16, 16) -
|
||||
((sizeof(chcr_req->wreq)) >> 4)));
|
||||
|
@ -1339,16 +1341,23 @@ static int chcr_device_init(struct chcr_context *ctx)
|
|||
adap->vres.ncrypto_fc);
|
||||
rxq_perchan = u_ctx->lldi.nrxq / u_ctx->lldi.nchan;
|
||||
txq_perchan = ntxq / u_ctx->lldi.nchan;
|
||||
rxq_idx = ctx->dev->tx_channel_id * rxq_perchan;
|
||||
rxq_idx += id % rxq_perchan;
|
||||
txq_idx = ctx->dev->tx_channel_id * txq_perchan;
|
||||
txq_idx += id % txq_perchan;
|
||||
spin_lock(&ctx->dev->lock_chcr_dev);
|
||||
ctx->rx_qidx = rxq_idx;
|
||||
ctx->tx_qidx = txq_idx;
|
||||
ctx->tx_chan_id = ctx->dev->tx_channel_id;
|
||||
ctx->dev->tx_channel_id = !ctx->dev->tx_channel_id;
|
||||
ctx->dev->rx_channel_id = 0;
|
||||
spin_unlock(&ctx->dev->lock_chcr_dev);
|
||||
rxq_idx = ctx->tx_chan_id * rxq_perchan;
|
||||
rxq_idx += id % rxq_perchan;
|
||||
txq_idx = ctx->tx_chan_id * txq_perchan;
|
||||
txq_idx += id % txq_perchan;
|
||||
ctx->rx_qidx = rxq_idx;
|
||||
ctx->tx_qidx = txq_idx;
|
||||
/* Channel Id used by SGE to forward packet to Host.
|
||||
* Same value should be used in cpl_fw6_pld RSS_CH field
|
||||
* by FW. Driver programs PCI channel ID to be used in fw
|
||||
* at the time of queue allocation with value "pi->tx_chan"
|
||||
*/
|
||||
ctx->pci_chan_id = txq_idx / txq_perchan;
|
||||
}
|
||||
out:
|
||||
return err;
|
||||
|
@ -2503,6 +2512,7 @@ void chcr_add_aead_dst_ent(struct aead_request *req,
|
|||
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
|
||||
struct dsgl_walk dsgl_walk;
|
||||
unsigned int authsize = crypto_aead_authsize(tfm);
|
||||
struct chcr_context *ctx = a_ctx(tfm);
|
||||
u32 temp;
|
||||
|
||||
dsgl_walk_init(&dsgl_walk, phys_cpl);
|
||||
|
@ -2512,7 +2522,7 @@ void chcr_add_aead_dst_ent(struct aead_request *req,
|
|||
dsgl_walk_add_page(&dsgl_walk, IV, &reqctx->iv_dma);
|
||||
temp = req->cryptlen + (reqctx->op ? -authsize : authsize);
|
||||
dsgl_walk_add_sg(&dsgl_walk, req->dst, temp, req->assoclen);
|
||||
dsgl_walk_end(&dsgl_walk, qid);
|
||||
dsgl_walk_end(&dsgl_walk, qid, ctx->pci_chan_id);
|
||||
}
|
||||
|
||||
void chcr_add_cipher_src_ent(struct ablkcipher_request *req,
|
||||
|
@ -2544,6 +2554,8 @@ void chcr_add_cipher_dst_ent(struct ablkcipher_request *req,
|
|||
unsigned short qid)
|
||||
{
|
||||
struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
|
||||
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(wrparam->req);
|
||||
struct chcr_context *ctx = c_ctx(tfm);
|
||||
struct dsgl_walk dsgl_walk;
|
||||
|
||||
dsgl_walk_init(&dsgl_walk, phys_cpl);
|
||||
|
@ -2552,7 +2564,7 @@ void chcr_add_cipher_dst_ent(struct ablkcipher_request *req,
|
|||
reqctx->dstsg = dsgl_walk.last_sg;
|
||||
reqctx->dst_ofst = dsgl_walk.last_sg_len;
|
||||
|
||||
dsgl_walk_end(&dsgl_walk, qid);
|
||||
dsgl_walk_end(&dsgl_walk, qid, ctx->pci_chan_id);
|
||||
}
|
||||
|
||||
void chcr_add_hash_src_ent(struct ahash_request *req,
|
||||
|
|
|
@ -255,6 +255,8 @@ struct chcr_context {
|
|||
struct chcr_dev *dev;
|
||||
unsigned char tx_qidx;
|
||||
unsigned char rx_qidx;
|
||||
unsigned char tx_chan_id;
|
||||
unsigned char pci_chan_id;
|
||||
struct __crypto_ctx crypto_ctx[0];
|
||||
};
|
||||
|
||||
|
|
|
@ -63,7 +63,7 @@ struct dcp {
|
|||
struct dcp_coherent_block *coh;
|
||||
|
||||
struct completion completion[DCP_MAX_CHANS];
|
||||
struct mutex mutex[DCP_MAX_CHANS];
|
||||
spinlock_t lock[DCP_MAX_CHANS];
|
||||
struct task_struct *thread[DCP_MAX_CHANS];
|
||||
struct crypto_queue queue[DCP_MAX_CHANS];
|
||||
};
|
||||
|
@ -349,13 +349,20 @@ static int dcp_chan_thread_aes(void *data)
|
|||
|
||||
int ret;
|
||||
|
||||
do {
|
||||
__set_current_state(TASK_INTERRUPTIBLE);
|
||||
while (!kthread_should_stop()) {
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
|
||||
mutex_lock(&sdcp->mutex[chan]);
|
||||
spin_lock(&sdcp->lock[chan]);
|
||||
backlog = crypto_get_backlog(&sdcp->queue[chan]);
|
||||
arq = crypto_dequeue_request(&sdcp->queue[chan]);
|
||||
mutex_unlock(&sdcp->mutex[chan]);
|
||||
spin_unlock(&sdcp->lock[chan]);
|
||||
|
||||
if (!backlog && !arq) {
|
||||
schedule();
|
||||
continue;
|
||||
}
|
||||
|
||||
set_current_state(TASK_RUNNING);
|
||||
|
||||
if (backlog)
|
||||
backlog->complete(backlog, -EINPROGRESS);
|
||||
|
@ -363,11 +370,8 @@ static int dcp_chan_thread_aes(void *data)
|
|||
if (arq) {
|
||||
ret = mxs_dcp_aes_block_crypt(arq);
|
||||
arq->complete(arq, ret);
|
||||
continue;
|
||||
}
|
||||
|
||||
schedule();
|
||||
} while (!kthread_should_stop());
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -409,9 +413,9 @@ static int mxs_dcp_aes_enqueue(struct ablkcipher_request *req, int enc, int ecb)
|
|||
rctx->ecb = ecb;
|
||||
actx->chan = DCP_CHAN_CRYPTO;
|
||||
|
||||
mutex_lock(&sdcp->mutex[actx->chan]);
|
||||
spin_lock(&sdcp->lock[actx->chan]);
|
||||
ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base);
|
||||
mutex_unlock(&sdcp->mutex[actx->chan]);
|
||||
spin_unlock(&sdcp->lock[actx->chan]);
|
||||
|
||||
wake_up_process(sdcp->thread[actx->chan]);
|
||||
|
||||
|
@ -640,13 +644,20 @@ static int dcp_chan_thread_sha(void *data)
|
|||
struct ahash_request *req;
|
||||
int ret, fini;
|
||||
|
||||
do {
|
||||
__set_current_state(TASK_INTERRUPTIBLE);
|
||||
while (!kthread_should_stop()) {
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
|
||||
mutex_lock(&sdcp->mutex[chan]);
|
||||
spin_lock(&sdcp->lock[chan]);
|
||||
backlog = crypto_get_backlog(&sdcp->queue[chan]);
|
||||
arq = crypto_dequeue_request(&sdcp->queue[chan]);
|
||||
mutex_unlock(&sdcp->mutex[chan]);
|
||||
spin_unlock(&sdcp->lock[chan]);
|
||||
|
||||
if (!backlog && !arq) {
|
||||
schedule();
|
||||
continue;
|
||||
}
|
||||
|
||||
set_current_state(TASK_RUNNING);
|
||||
|
||||
if (backlog)
|
||||
backlog->complete(backlog, -EINPROGRESS);
|
||||
|
@ -658,12 +669,8 @@ static int dcp_chan_thread_sha(void *data)
|
|||
ret = dcp_sha_req_to_buf(arq);
|
||||
fini = rctx->fini;
|
||||
arq->complete(arq, ret);
|
||||
if (!fini)
|
||||
continue;
|
||||
}
|
||||
|
||||
schedule();
|
||||
} while (!kthread_should_stop());
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -721,9 +728,9 @@ static int dcp_sha_update_fx(struct ahash_request *req, int fini)
|
|||
rctx->init = 1;
|
||||
}
|
||||
|
||||
mutex_lock(&sdcp->mutex[actx->chan]);
|
||||
spin_lock(&sdcp->lock[actx->chan]);
|
||||
ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base);
|
||||
mutex_unlock(&sdcp->mutex[actx->chan]);
|
||||
spin_unlock(&sdcp->lock[actx->chan]);
|
||||
|
||||
wake_up_process(sdcp->thread[actx->chan]);
|
||||
mutex_unlock(&actx->mutex);
|
||||
|
@ -997,7 +1004,7 @@ static int mxs_dcp_probe(struct platform_device *pdev)
|
|||
platform_set_drvdata(pdev, sdcp);
|
||||
|
||||
for (i = 0; i < DCP_MAX_CHANS; i++) {
|
||||
mutex_init(&sdcp->mutex[i]);
|
||||
spin_lock_init(&sdcp->lock[i]);
|
||||
init_completion(&sdcp->completion[i]);
|
||||
crypto_init_queue(&sdcp->queue[i], 50);
|
||||
}
|
||||
|
|
|
@ -123,7 +123,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
struct adf_hw_device_data *hw_data;
|
||||
char name[ADF_DEVICE_NAME_LENGTH];
|
||||
unsigned int i, bar_nr;
|
||||
int ret, bar_mask;
|
||||
unsigned long bar_mask;
|
||||
int ret;
|
||||
|
||||
switch (ent->device) {
|
||||
case ADF_C3XXX_PCI_DEVICE_ID:
|
||||
|
@ -235,8 +236,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
/* Find and map all the device's BARS */
|
||||
i = 0;
|
||||
bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
|
||||
for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask,
|
||||
ADF_PCI_MAX_BARS * 2) {
|
||||
for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
|
||||
struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
|
||||
|
||||
bar->base_addr = pci_resource_start(pdev, bar_nr);
|
||||
|
|
|
@ -125,7 +125,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
struct adf_hw_device_data *hw_data;
|
||||
char name[ADF_DEVICE_NAME_LENGTH];
|
||||
unsigned int i, bar_nr;
|
||||
int ret, bar_mask;
|
||||
unsigned long bar_mask;
|
||||
int ret;
|
||||
|
||||
switch (ent->device) {
|
||||
case ADF_C3XXXIOV_PCI_DEVICE_ID:
|
||||
|
@ -215,8 +216,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
/* Find and map all the device's BARS */
|
||||
i = 0;
|
||||
bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
|
||||
for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask,
|
||||
ADF_PCI_MAX_BARS * 2) {
|
||||
for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
|
||||
struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
|
||||
|
||||
bar->base_addr = pci_resource_start(pdev, bar_nr);
|
||||
|
|
|
@ -123,7 +123,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
struct adf_hw_device_data *hw_data;
|
||||
char name[ADF_DEVICE_NAME_LENGTH];
|
||||
unsigned int i, bar_nr;
|
||||
int ret, bar_mask;
|
||||
unsigned long bar_mask;
|
||||
int ret;
|
||||
|
||||
switch (ent->device) {
|
||||
case ADF_C62X_PCI_DEVICE_ID:
|
||||
|
@ -235,8 +236,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
/* Find and map all the device's BARS */
|
||||
i = (hw_data->fuses & ADF_DEVICE_FUSECTL_MASK) ? 1 : 0;
|
||||
bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
|
||||
for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask,
|
||||
ADF_PCI_MAX_BARS * 2) {
|
||||
for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
|
||||
struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
|
||||
|
||||
bar->base_addr = pci_resource_start(pdev, bar_nr);
|
||||
|
|
|
@ -125,7 +125,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
struct adf_hw_device_data *hw_data;
|
||||
char name[ADF_DEVICE_NAME_LENGTH];
|
||||
unsigned int i, bar_nr;
|
||||
int ret, bar_mask;
|
||||
unsigned long bar_mask;
|
||||
int ret;
|
||||
|
||||
switch (ent->device) {
|
||||
case ADF_C62XIOV_PCI_DEVICE_ID:
|
||||
|
@ -215,8 +216,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
/* Find and map all the device's BARS */
|
||||
i = 0;
|
||||
bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
|
||||
for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask,
|
||||
ADF_PCI_MAX_BARS * 2) {
|
||||
for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
|
||||
struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
|
||||
|
||||
bar->base_addr = pci_resource_start(pdev, bar_nr);
|
||||
|
|
|
@ -123,7 +123,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
struct adf_hw_device_data *hw_data;
|
||||
char name[ADF_DEVICE_NAME_LENGTH];
|
||||
unsigned int i, bar_nr;
|
||||
int ret, bar_mask;
|
||||
unsigned long bar_mask;
|
||||
int ret;
|
||||
|
||||
switch (ent->device) {
|
||||
case ADF_DH895XCC_PCI_DEVICE_ID:
|
||||
|
@ -237,8 +238,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
/* Find and map all the device's BARS */
|
||||
i = 0;
|
||||
bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
|
||||
for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask,
|
||||
ADF_PCI_MAX_BARS * 2) {
|
||||
for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
|
||||
struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
|
||||
|
||||
bar->base_addr = pci_resource_start(pdev, bar_nr);
|
||||
|
|
|
@ -125,7 +125,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
struct adf_hw_device_data *hw_data;
|
||||
char name[ADF_DEVICE_NAME_LENGTH];
|
||||
unsigned int i, bar_nr;
|
||||
int ret, bar_mask;
|
||||
unsigned long bar_mask;
|
||||
int ret;
|
||||
|
||||
switch (ent->device) {
|
||||
case ADF_DH895XCCIOV_PCI_DEVICE_ID:
|
||||
|
@ -215,8 +216,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
/* Find and map all the device's BARS */
|
||||
i = 0;
|
||||
bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
|
||||
for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask,
|
||||
ADF_PCI_MAX_BARS * 2) {
|
||||
for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
|
||||
struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
|
||||
|
||||
bar->base_addr = pci_resource_start(pdev, bar_nr);
|
||||
|
|
|
@ -571,7 +571,7 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip)
|
|||
if (ret)
|
||||
goto out_free_descs;
|
||||
lh->descs[i] = desc;
|
||||
count = i;
|
||||
count = i + 1;
|
||||
|
||||
if (lflags & GPIOHANDLE_REQUEST_ACTIVE_LOW)
|
||||
set_bit(FLAG_ACTIVE_LOW, &desc->flags);
|
||||
|
|
|
@ -358,8 +358,8 @@ static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
|
|||
struct queue *q,
|
||||
struct qcm_process_device *qpd)
|
||||
{
|
||||
int retval;
|
||||
struct mqd_manager *mqd_mgr;
|
||||
int retval;
|
||||
|
||||
mqd_mgr = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE);
|
||||
if (!mqd_mgr)
|
||||
|
@ -387,8 +387,12 @@ static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
|
|||
if (!q->properties.is_active)
|
||||
return 0;
|
||||
|
||||
retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe, q->queue,
|
||||
&q->properties, q->process->mm);
|
||||
if (WARN(q->process->mm != current->mm,
|
||||
"should only run in user thread"))
|
||||
retval = -EFAULT;
|
||||
else
|
||||
retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe, q->queue,
|
||||
&q->properties, current->mm);
|
||||
if (retval)
|
||||
goto out_uninit_mqd;
|
||||
|
||||
|
@ -545,9 +549,15 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
|
|||
retval = map_queues_cpsch(dqm);
|
||||
else if (q->properties.is_active &&
|
||||
(q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
|
||||
q->properties.type == KFD_QUEUE_TYPE_SDMA))
|
||||
retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe, q->queue,
|
||||
&q->properties, q->process->mm);
|
||||
q->properties.type == KFD_QUEUE_TYPE_SDMA)) {
|
||||
if (WARN(q->process->mm != current->mm,
|
||||
"should only run in user thread"))
|
||||
retval = -EFAULT;
|
||||
else
|
||||
retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd,
|
||||
q->pipe, q->queue,
|
||||
&q->properties, current->mm);
|
||||
}
|
||||
|
||||
out_unlock:
|
||||
dqm_unlock(dqm);
|
||||
|
@ -653,6 +663,7 @@ static int evict_process_queues_cpsch(struct device_queue_manager *dqm,
|
|||
static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
|
||||
struct qcm_process_device *qpd)
|
||||
{
|
||||
struct mm_struct *mm = NULL;
|
||||
struct queue *q;
|
||||
struct mqd_manager *mqd_mgr;
|
||||
struct kfd_process_device *pdd;
|
||||
|
@ -686,6 +697,15 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
|
|||
kfd_flush_tlb(pdd);
|
||||
}
|
||||
|
||||
/* Take a safe reference to the mm_struct, which may otherwise
|
||||
* disappear even while the kfd_process is still referenced.
|
||||
*/
|
||||
mm = get_task_mm(pdd->process->lead_thread);
|
||||
if (!mm) {
|
||||
retval = -EFAULT;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* activate all active queues on the qpd */
|
||||
list_for_each_entry(q, &qpd->queues_list, list) {
|
||||
if (!q->properties.is_evicted)
|
||||
|
@ -700,14 +720,15 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
|
|||
q->properties.is_evicted = false;
|
||||
q->properties.is_active = true;
|
||||
retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe,
|
||||
q->queue, &q->properties,
|
||||
q->process->mm);
|
||||
q->queue, &q->properties, mm);
|
||||
if (retval)
|
||||
goto out;
|
||||
dqm->queue_count++;
|
||||
}
|
||||
qpd->evicted = 0;
|
||||
out:
|
||||
if (mm)
|
||||
mmput(mm);
|
||||
dqm_unlock(dqm);
|
||||
return retval;
|
||||
}
|
||||
|
|
|
@ -4633,12 +4633,18 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
|
|||
}
|
||||
spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
|
||||
|
||||
/* Signal HW programming completion */
|
||||
drm_atomic_helper_commit_hw_done(state);
|
||||
|
||||
if (wait_for_vblank)
|
||||
drm_atomic_helper_wait_for_flip_done(dev, state);
|
||||
|
||||
/*
|
||||
* FIXME:
|
||||
* Delay hw_done() until flip_done() is signaled. This is to block
|
||||
* another commit from freeing the CRTC state while we're still
|
||||
* waiting on flip_done.
|
||||
*/
|
||||
drm_atomic_helper_commit_hw_done(state);
|
||||
|
||||
drm_atomic_helper_cleanup_planes(dev, state);
|
||||
|
||||
/* Finally, drop a runtime PM reference for each newly disabled CRTC,
|
||||
|
|
|
@ -63,20 +63,21 @@ static void drm_client_close(struct drm_client_dev *client)
|
|||
EXPORT_SYMBOL(drm_client_close);
|
||||
|
||||
/**
|
||||
* drm_client_new - Create a DRM client
|
||||
* drm_client_init - Initialise a DRM client
|
||||
* @dev: DRM device
|
||||
* @client: DRM client
|
||||
* @name: Client name
|
||||
* @funcs: DRM client functions (optional)
|
||||
*
|
||||
* This initialises the client and opens a &drm_file. Use drm_client_add() to complete the process.
|
||||
* The caller needs to hold a reference on @dev before calling this function.
|
||||
* The client is freed when the &drm_device is unregistered. See drm_client_release().
|
||||
*
|
||||
* Returns:
|
||||
* Zero on success or negative error code on failure.
|
||||
*/
|
||||
int drm_client_new(struct drm_device *dev, struct drm_client_dev *client,
|
||||
const char *name, const struct drm_client_funcs *funcs)
|
||||
int drm_client_init(struct drm_device *dev, struct drm_client_dev *client,
|
||||
const char *name, const struct drm_client_funcs *funcs)
|
||||
{
|
||||
int ret;
|
||||
|
||||
|
@ -95,10 +96,6 @@ int drm_client_new(struct drm_device *dev, struct drm_client_dev *client,
|
|||
if (ret)
|
||||
goto err_put_module;
|
||||
|
||||
mutex_lock(&dev->clientlist_mutex);
|
||||
list_add(&client->list, &dev->clientlist);
|
||||
mutex_unlock(&dev->clientlist_mutex);
|
||||
|
||||
drm_dev_get(dev);
|
||||
|
||||
return 0;
|
||||
|
@ -109,13 +106,33 @@ int drm_client_new(struct drm_device *dev, struct drm_client_dev *client,
|
|||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_client_new);
|
||||
EXPORT_SYMBOL(drm_client_init);
|
||||
|
||||
/**
|
||||
* drm_client_add - Add client to the device list
|
||||
* @client: DRM client
|
||||
*
|
||||
* Add the client to the &drm_device client list to activate its callbacks.
|
||||
* @client must be initialized by a call to drm_client_init(). After
|
||||
* drm_client_add() it is no longer permissible to call drm_client_release()
|
||||
* directly (outside the unregister callback), instead cleanup will happen
|
||||
* automatically on driver unload.
|
||||
*/
|
||||
void drm_client_add(struct drm_client_dev *client)
|
||||
{
|
||||
struct drm_device *dev = client->dev;
|
||||
|
||||
mutex_lock(&dev->clientlist_mutex);
|
||||
list_add(&client->list, &dev->clientlist);
|
||||
mutex_unlock(&dev->clientlist_mutex);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_client_add);
|
||||
|
||||
/**
|
||||
* drm_client_release - Release DRM client resources
|
||||
* @client: DRM client
|
||||
*
|
||||
* Releases resources by closing the &drm_file that was opened by drm_client_new().
|
||||
* Releases resources by closing the &drm_file that was opened by drm_client_init().
|
||||
* It is called automatically if the &drm_client_funcs.unregister callback is _not_ set.
|
||||
*
|
||||
* This function should only be called from the unregister callback. An exception
|
||||
|
|
|
@ -160,7 +160,7 @@ struct drm_fbdev_cma *drm_fbdev_cma_init(struct drm_device *dev,
|
|||
|
||||
fb_helper = &fbdev_cma->fb_helper;
|
||||
|
||||
ret = drm_client_new(dev, &fb_helper->client, "fbdev", NULL);
|
||||
ret = drm_client_init(dev, &fb_helper->client, "fbdev", NULL);
|
||||
if (ret)
|
||||
goto err_free;
|
||||
|
||||
|
@ -169,6 +169,8 @@ struct drm_fbdev_cma *drm_fbdev_cma_init(struct drm_device *dev,
|
|||
if (ret)
|
||||
goto err_client_put;
|
||||
|
||||
drm_client_add(&fb_helper->client);
|
||||
|
||||
return fbdev_cma;
|
||||
|
||||
err_client_put:
|
||||
|
|
|
@ -3218,12 +3218,14 @@ int drm_fbdev_generic_setup(struct drm_device *dev, unsigned int preferred_bpp)
|
|||
if (!fb_helper)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = drm_client_new(dev, &fb_helper->client, "fbdev", &drm_fbdev_client_funcs);
|
||||
ret = drm_client_init(dev, &fb_helper->client, "fbdev", &drm_fbdev_client_funcs);
|
||||
if (ret) {
|
||||
kfree(fb_helper);
|
||||
return ret;
|
||||
}
|
||||
|
||||
drm_client_add(&fb_helper->client);
|
||||
|
||||
fb_helper->preferred_bpp = preferred_bpp;
|
||||
|
||||
drm_fbdev_client_hotplug(&fb_helper->client);
|
||||
|
|
|
@ -566,14 +566,14 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev,
|
|||
lessee_priv->is_master = 1;
|
||||
lessee_priv->authenticated = 1;
|
||||
|
||||
/* Hook up the fd */
|
||||
fd_install(fd, lessee_file);
|
||||
|
||||
/* Pass fd back to userspace */
|
||||
DRM_DEBUG_LEASE("Returning fd %d id %d\n", fd, lessee->lessee_id);
|
||||
cl->fd = fd;
|
||||
cl->lessee_id = lessee->lessee_id;
|
||||
|
||||
/* Hook up the fd */
|
||||
fd_install(fd, lessee_file);
|
||||
|
||||
DRM_DEBUG_LEASE("drm_mode_create_lease_ioctl succeeded\n");
|
||||
return 0;
|
||||
|
||||
|
|
|
@ -55,37 +55,12 @@ static inline void __exynos_iommu_detach(struct exynos_drm_private *priv,
|
|||
static inline int __exynos_iommu_create_mapping(struct exynos_drm_private *priv,
|
||||
unsigned long start, unsigned long size)
|
||||
{
|
||||
struct iommu_domain *domain;
|
||||
int ret;
|
||||
|
||||
domain = iommu_domain_alloc(priv->dma_dev->bus);
|
||||
if (!domain)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = iommu_get_dma_cookie(domain);
|
||||
if (ret)
|
||||
goto free_domain;
|
||||
|
||||
ret = iommu_dma_init_domain(domain, start, size, NULL);
|
||||
if (ret)
|
||||
goto put_cookie;
|
||||
|
||||
priv->mapping = domain;
|
||||
priv->mapping = iommu_get_domain_for_dev(priv->dma_dev);
|
||||
return 0;
|
||||
|
||||
put_cookie:
|
||||
iommu_put_dma_cookie(domain);
|
||||
free_domain:
|
||||
iommu_domain_free(domain);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline void __exynos_iommu_release_mapping(struct exynos_drm_private *priv)
|
||||
{
|
||||
struct iommu_domain *domain = priv->mapping;
|
||||
|
||||
iommu_put_dma_cookie(domain);
|
||||
iommu_domain_free(domain);
|
||||
priv->mapping = NULL;
|
||||
}
|
||||
|
||||
|
@ -94,7 +69,9 @@ static inline int __exynos_iommu_attach(struct exynos_drm_private *priv,
|
|||
{
|
||||
struct iommu_domain *domain = priv->mapping;
|
||||
|
||||
return iommu_attach_device(domain, dev);
|
||||
if (dev != priv->dma_dev)
|
||||
return iommu_attach_device(domain, dev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void __exynos_iommu_detach(struct exynos_drm_private *priv,
|
||||
|
@ -102,7 +79,8 @@ static inline void __exynos_iommu_detach(struct exynos_drm_private *priv,
|
|||
{
|
||||
struct iommu_domain *domain = priv->mapping;
|
||||
|
||||
iommu_detach_device(domain, dev);
|
||||
if (dev != priv->dma_dev)
|
||||
iommu_detach_device(domain, dev);
|
||||
}
|
||||
#else
|
||||
#error Unsupported architecture and IOMMU/DMA-mapping glue code
|
||||
|
|
|
@ -191,7 +191,8 @@ static irqreturn_t tda9950_irq(int irq, void *data)
|
|||
break;
|
||||
}
|
||||
/* TDA9950 executes all retries for us */
|
||||
tx_status |= CEC_TX_STATUS_MAX_RETRIES;
|
||||
if (tx_status != CEC_TX_STATUS_OK)
|
||||
tx_status |= CEC_TX_STATUS_MAX_RETRIES;
|
||||
cec_transmit_done(priv->adap, tx_status, arb_lost_cnt,
|
||||
nack_cnt, 0, err_cnt);
|
||||
break;
|
||||
|
@ -310,7 +311,7 @@ static void tda9950_release(struct tda9950_priv *priv)
|
|||
/* Wait up to .5s for it to signal non-busy */
|
||||
do {
|
||||
csr = tda9950_read(client, REG_CSR);
|
||||
if (!(csr & CSR_BUSY) || --timeout)
|
||||
if (!(csr & CSR_BUSY) || !--timeout)
|
||||
break;
|
||||
msleep(10);
|
||||
} while (1);
|
||||
|
|
|
@ -232,6 +232,20 @@ static bool compress_init(struct compress *c)
|
|||
return true;
|
||||
}
|
||||
|
||||
static void *compress_next_page(struct drm_i915_error_object *dst)
|
||||
{
|
||||
unsigned long page;
|
||||
|
||||
if (dst->page_count >= dst->num_pages)
|
||||
return ERR_PTR(-ENOSPC);
|
||||
|
||||
page = __get_free_page(GFP_ATOMIC | __GFP_NOWARN);
|
||||
if (!page)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
return dst->pages[dst->page_count++] = (void *)page;
|
||||
}
|
||||
|
||||
static int compress_page(struct compress *c,
|
||||
void *src,
|
||||
struct drm_i915_error_object *dst)
|
||||
|
@ -245,19 +259,14 @@ static int compress_page(struct compress *c,
|
|||
|
||||
do {
|
||||
if (zstream->avail_out == 0) {
|
||||
unsigned long page;
|
||||
zstream->next_out = compress_next_page(dst);
|
||||
if (IS_ERR(zstream->next_out))
|
||||
return PTR_ERR(zstream->next_out);
|
||||
|
||||
page = __get_free_page(GFP_ATOMIC | __GFP_NOWARN);
|
||||
if (!page)
|
||||
return -ENOMEM;
|
||||
|
||||
dst->pages[dst->page_count++] = (void *)page;
|
||||
|
||||
zstream->next_out = (void *)page;
|
||||
zstream->avail_out = PAGE_SIZE;
|
||||
}
|
||||
|
||||
if (zlib_deflate(zstream, Z_SYNC_FLUSH) != Z_OK)
|
||||
if (zlib_deflate(zstream, Z_NO_FLUSH) != Z_OK)
|
||||
return -EIO;
|
||||
} while (zstream->avail_in);
|
||||
|
||||
|
@ -268,19 +277,42 @@ static int compress_page(struct compress *c,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int compress_flush(struct compress *c,
|
||||
struct drm_i915_error_object *dst)
|
||||
{
|
||||
struct z_stream_s *zstream = &c->zstream;
|
||||
|
||||
do {
|
||||
switch (zlib_deflate(zstream, Z_FINISH)) {
|
||||
case Z_OK: /* more space requested */
|
||||
zstream->next_out = compress_next_page(dst);
|
||||
if (IS_ERR(zstream->next_out))
|
||||
return PTR_ERR(zstream->next_out);
|
||||
|
||||
zstream->avail_out = PAGE_SIZE;
|
||||
break;
|
||||
|
||||
case Z_STREAM_END:
|
||||
goto end;
|
||||
|
||||
default: /* any error */
|
||||
return -EIO;
|
||||
}
|
||||
} while (1);
|
||||
|
||||
end:
|
||||
memset(zstream->next_out, 0, zstream->avail_out);
|
||||
dst->unused = zstream->avail_out;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void compress_fini(struct compress *c,
|
||||
struct drm_i915_error_object *dst)
|
||||
{
|
||||
struct z_stream_s *zstream = &c->zstream;
|
||||
|
||||
if (dst) {
|
||||
zlib_deflate(zstream, Z_FINISH);
|
||||
dst->unused = zstream->avail_out;
|
||||
}
|
||||
|
||||
zlib_deflateEnd(zstream);
|
||||
kfree(zstream->workspace);
|
||||
|
||||
if (c->tmp)
|
||||
free_page((unsigned long)c->tmp);
|
||||
}
|
||||
|
@ -319,6 +351,12 @@ static int compress_page(struct compress *c,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int compress_flush(struct compress *c,
|
||||
struct drm_i915_error_object *dst)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void compress_fini(struct compress *c,
|
||||
struct drm_i915_error_object *dst)
|
||||
{
|
||||
|
@ -917,6 +955,7 @@ i915_error_object_create(struct drm_i915_private *i915,
|
|||
unsigned long num_pages;
|
||||
struct sgt_iter iter;
|
||||
dma_addr_t dma;
|
||||
int ret;
|
||||
|
||||
if (!vma)
|
||||
return NULL;
|
||||
|
@ -930,6 +969,7 @@ i915_error_object_create(struct drm_i915_private *i915,
|
|||
|
||||
dst->gtt_offset = vma->node.start;
|
||||
dst->gtt_size = vma->node.size;
|
||||
dst->num_pages = num_pages;
|
||||
dst->page_count = 0;
|
||||
dst->unused = 0;
|
||||
|
||||
|
@ -938,28 +978,26 @@ i915_error_object_create(struct drm_i915_private *i915,
|
|||
return NULL;
|
||||
}
|
||||
|
||||
ret = -EINVAL;
|
||||
for_each_sgt_dma(dma, iter, vma->pages) {
|
||||
void __iomem *s;
|
||||
int ret;
|
||||
|
||||
ggtt->vm.insert_page(&ggtt->vm, dma, slot, I915_CACHE_NONE, 0);
|
||||
|
||||
s = io_mapping_map_atomic_wc(&ggtt->iomap, slot);
|
||||
ret = compress_page(&compress, (void __force *)s, dst);
|
||||
io_mapping_unmap_atomic(s);
|
||||
|
||||
if (ret)
|
||||
goto unwind;
|
||||
break;
|
||||
}
|
||||
goto out;
|
||||
|
||||
unwind:
|
||||
while (dst->page_count--)
|
||||
free_page((unsigned long)dst->pages[dst->page_count]);
|
||||
kfree(dst);
|
||||
dst = NULL;
|
||||
if (ret || compress_flush(&compress, dst)) {
|
||||
while (dst->page_count--)
|
||||
free_page((unsigned long)dst->pages[dst->page_count]);
|
||||
kfree(dst);
|
||||
dst = NULL;
|
||||
}
|
||||
|
||||
out:
|
||||
compress_fini(&compress, dst);
|
||||
ggtt->vm.clear_range(&ggtt->vm, slot, PAGE_SIZE);
|
||||
return dst;
|
||||
|
|
|
@ -135,6 +135,7 @@ struct i915_gpu_state {
|
|||
struct drm_i915_error_object {
|
||||
u64 gtt_offset;
|
||||
u64 gtt_size;
|
||||
int num_pages;
|
||||
int page_count;
|
||||
int unused;
|
||||
u32 *pages[0];
|
||||
|
|
|
@ -3091,36 +3091,27 @@ gen11_gt_irq_handler(struct drm_i915_private * const i915,
|
|||
spin_unlock(&i915->irq_lock);
|
||||
}
|
||||
|
||||
static void
|
||||
gen11_gu_misc_irq_ack(struct drm_i915_private *dev_priv, const u32 master_ctl,
|
||||
u32 *iir)
|
||||
static u32
|
||||
gen11_gu_misc_irq_ack(struct drm_i915_private *dev_priv, const u32 master_ctl)
|
||||
{
|
||||
void __iomem * const regs = dev_priv->regs;
|
||||
u32 iir;
|
||||
|
||||
if (!(master_ctl & GEN11_GU_MISC_IRQ))
|
||||
return;
|
||||
return 0;
|
||||
|
||||
*iir = raw_reg_read(regs, GEN11_GU_MISC_IIR);
|
||||
if (likely(*iir))
|
||||
raw_reg_write(regs, GEN11_GU_MISC_IIR, *iir);
|
||||
iir = raw_reg_read(regs, GEN11_GU_MISC_IIR);
|
||||
if (likely(iir))
|
||||
raw_reg_write(regs, GEN11_GU_MISC_IIR, iir);
|
||||
|
||||
return iir;
|
||||
}
|
||||
|
||||
static void
|
||||
gen11_gu_misc_irq_handler(struct drm_i915_private *dev_priv,
|
||||
const u32 master_ctl, const u32 iir)
|
||||
gen11_gu_misc_irq_handler(struct drm_i915_private *dev_priv, const u32 iir)
|
||||
{
|
||||
if (!(master_ctl & GEN11_GU_MISC_IRQ))
|
||||
return;
|
||||
|
||||
if (unlikely(!iir)) {
|
||||
DRM_ERROR("GU_MISC iir blank!\n");
|
||||
return;
|
||||
}
|
||||
|
||||
if (iir & GEN11_GU_MISC_GSE)
|
||||
intel_opregion_asle_intr(dev_priv);
|
||||
else
|
||||
DRM_ERROR("Unexpected GU_MISC interrupt 0x%x\n", iir);
|
||||
}
|
||||
|
||||
static irqreturn_t gen11_irq_handler(int irq, void *arg)
|
||||
|
@ -3157,12 +3148,12 @@ static irqreturn_t gen11_irq_handler(int irq, void *arg)
|
|||
enable_rpm_wakeref_asserts(i915);
|
||||
}
|
||||
|
||||
gen11_gu_misc_irq_ack(i915, master_ctl, &gu_misc_iir);
|
||||
gu_misc_iir = gen11_gu_misc_irq_ack(i915, master_ctl);
|
||||
|
||||
/* Acknowledge and enable interrupts. */
|
||||
raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ | master_ctl);
|
||||
|
||||
gen11_gu_misc_irq_handler(i915, master_ctl, gu_misc_iir);
|
||||
gen11_gu_misc_irq_handler(i915, gu_misc_iir);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
|
|
@ -592,7 +592,6 @@ static const struct intel_device_info intel_cannonlake_info = {
|
|||
GEN10_FEATURES, \
|
||||
GEN(11), \
|
||||
.ddb_size = 2048, \
|
||||
.has_csr = 0, \
|
||||
.has_logical_ring_elsq = 1
|
||||
|
||||
static const struct intel_device_info intel_icelake_11_info = {
|
||||
|
|
|
@ -3069,7 +3069,7 @@ static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,
|
|||
return 0;
|
||||
|
||||
offset_mask = pte_pgsize - 1;
|
||||
__pte = *pte & PM_ADDR_MASK;
|
||||
__pte = __sme_clr(*pte & PM_ADDR_MASK);
|
||||
|
||||
return (__pte & ~offset_mask) | (iova & offset_mask);
|
||||
}
|
||||
|
|
|
@ -1455,8 +1455,8 @@ static int __load_mappings(struct dm_cache_metadata *cmd,
|
|||
if (hints_valid) {
|
||||
r = dm_array_cursor_next(&cmd->hint_cursor);
|
||||
if (r) {
|
||||
DMERR("dm_array_cursor_next for hint failed");
|
||||
goto out;
|
||||
dm_array_cursor_end(&cmd->hint_cursor);
|
||||
hints_valid = false;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -3009,8 +3009,13 @@ static dm_cblock_t get_cache_dev_size(struct cache *cache)
|
|||
|
||||
static bool can_resize(struct cache *cache, dm_cblock_t new_size)
|
||||
{
|
||||
if (from_cblock(new_size) > from_cblock(cache->cache_size))
|
||||
return true;
|
||||
if (from_cblock(new_size) > from_cblock(cache->cache_size)) {
|
||||
if (cache->sized) {
|
||||
DMERR("%s: unable to extend cache due to missing cache table reload",
|
||||
cache_device_name(cache));
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* We can't drop a dirty block when shrinking the cache.
|
||||
|
|
|
@ -806,19 +806,19 @@ static int parse_path_selector(struct dm_arg_set *as, struct priority_group *pg,
|
|||
}
|
||||
|
||||
static int setup_scsi_dh(struct block_device *bdev, struct multipath *m,
|
||||
const char *attached_handler_name, char **error)
|
||||
const char **attached_handler_name, char **error)
|
||||
{
|
||||
struct request_queue *q = bdev_get_queue(bdev);
|
||||
int r;
|
||||
|
||||
if (test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags)) {
|
||||
retain:
|
||||
if (attached_handler_name) {
|
||||
if (*attached_handler_name) {
|
||||
/*
|
||||
* Clear any hw_handler_params associated with a
|
||||
* handler that isn't already attached.
|
||||
*/
|
||||
if (m->hw_handler_name && strcmp(attached_handler_name, m->hw_handler_name)) {
|
||||
if (m->hw_handler_name && strcmp(*attached_handler_name, m->hw_handler_name)) {
|
||||
kfree(m->hw_handler_params);
|
||||
m->hw_handler_params = NULL;
|
||||
}
|
||||
|
@ -830,7 +830,8 @@ static int setup_scsi_dh(struct block_device *bdev, struct multipath *m,
|
|||
* handler instead of the original table passed in.
|
||||
*/
|
||||
kfree(m->hw_handler_name);
|
||||
m->hw_handler_name = attached_handler_name;
|
||||
m->hw_handler_name = *attached_handler_name;
|
||||
*attached_handler_name = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -867,7 +868,7 @@ static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps
|
|||
struct pgpath *p;
|
||||
struct multipath *m = ti->private;
|
||||
struct request_queue *q;
|
||||
const char *attached_handler_name;
|
||||
const char *attached_handler_name = NULL;
|
||||
|
||||
/* we need at least a path arg */
|
||||
if (as->argc < 1) {
|
||||
|
@ -890,7 +891,7 @@ static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps
|
|||
attached_handler_name = scsi_dh_attached_handler_name(q, GFP_KERNEL);
|
||||
if (attached_handler_name || m->hw_handler_name) {
|
||||
INIT_DELAYED_WORK(&p->activate_path, activate_path_work);
|
||||
r = setup_scsi_dh(p->path.dev->bdev, m, attached_handler_name, &ti->error);
|
||||
r = setup_scsi_dh(p->path.dev->bdev, m, &attached_handler_name, &ti->error);
|
||||
if (r) {
|
||||
dm_put_device(ti, p->path.dev);
|
||||
goto bad;
|
||||
|
@ -905,6 +906,7 @@ static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps
|
|||
|
||||
return p;
|
||||
bad:
|
||||
kfree(attached_handler_name);
|
||||
free_pgpath(p);
|
||||
return ERR_PTR(r);
|
||||
}
|
||||
|
|
|
@ -3353,7 +3353,7 @@ static const char *sync_str(enum sync_state state)
|
|||
};
|
||||
|
||||
/* Return enum sync_state for @mddev derived from @recovery flags */
|
||||
static const enum sync_state decipher_sync_action(struct mddev *mddev, unsigned long recovery)
|
||||
static enum sync_state decipher_sync_action(struct mddev *mddev, unsigned long recovery)
|
||||
{
|
||||
if (test_bit(MD_RECOVERY_FROZEN, &recovery))
|
||||
return st_frozen;
|
||||
|
|
|
@ -832,10 +832,8 @@ static void __set_metadata_reserve(struct dm_pool_metadata *pmd)
|
|||
if (r) {
|
||||
DMERR("could not get size of metadata device");
|
||||
pmd->metadata_reserve = max_blocks;
|
||||
} else {
|
||||
sector_div(total, 10);
|
||||
pmd->metadata_reserve = min(max_blocks, total);
|
||||
}
|
||||
} else
|
||||
pmd->metadata_reserve = min(max_blocks, div_u64(total, 10));
|
||||
}
|
||||
|
||||
struct dm_pool_metadata *dm_pool_metadata_open(struct block_device *bdev,
|
||||
|
|
|
@ -1291,7 +1291,7 @@ void b53_vlan_add(struct dsa_switch *ds, int port,
|
|||
b53_get_vlan_entry(dev, vid, vl);
|
||||
|
||||
vl->members |= BIT(port);
|
||||
if (untagged)
|
||||
if (untagged && !dsa_is_cpu_port(ds, port))
|
||||
vl->untag |= BIT(port);
|
||||
else
|
||||
vl->untag &= ~BIT(port);
|
||||
|
@ -1333,7 +1333,7 @@ int b53_vlan_del(struct dsa_switch *ds, int port,
|
|||
pvid = 0;
|
||||
}
|
||||
|
||||
if (untagged)
|
||||
if (untagged && !dsa_is_cpu_port(ds, port))
|
||||
vl->untag &= ~(BIT(port));
|
||||
|
||||
b53_set_vlan_entry(dev, vid, vl);
|
||||
|
|
|
@ -3017,10 +3017,11 @@ static void bnxt_free_hwrm_resources(struct bnxt *bp)
|
|||
{
|
||||
struct pci_dev *pdev = bp->pdev;
|
||||
|
||||
dma_free_coherent(&pdev->dev, PAGE_SIZE, bp->hwrm_cmd_resp_addr,
|
||||
bp->hwrm_cmd_resp_dma_addr);
|
||||
|
||||
bp->hwrm_cmd_resp_addr = NULL;
|
||||
if (bp->hwrm_cmd_resp_addr) {
|
||||
dma_free_coherent(&pdev->dev, PAGE_SIZE, bp->hwrm_cmd_resp_addr,
|
||||
bp->hwrm_cmd_resp_dma_addr);
|
||||
bp->hwrm_cmd_resp_addr = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static int bnxt_alloc_hwrm_resources(struct bnxt *bp)
|
||||
|
@ -4650,7 +4651,7 @@ __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct hwrm_func_cfg_input *req,
|
|||
FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
|
||||
enables |= ring_grps ?
|
||||
FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
|
||||
enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0;
|
||||
enables |= vnics ? FUNC_CFG_REQ_ENABLES_NUM_VNICS : 0;
|
||||
|
||||
req->num_rx_rings = cpu_to_le16(rx_rings);
|
||||
req->num_hw_ring_grps = cpu_to_le16(ring_grps);
|
||||
|
@ -8621,7 +8622,7 @@ static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
|
|||
*max_tx = hw_resc->max_tx_rings;
|
||||
*max_rx = hw_resc->max_rx_rings;
|
||||
*max_cp = min_t(int, bnxt_get_max_func_cp_rings_for_en(bp),
|
||||
hw_resc->max_irqs);
|
||||
hw_resc->max_irqs - bnxt_get_ulp_msix_num(bp));
|
||||
*max_cp = min_t(int, *max_cp, hw_resc->max_stat_ctxs);
|
||||
max_ring_grps = hw_resc->max_hw_ring_grps;
|
||||
if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) {
|
||||
|
@ -9057,6 +9058,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
bnxt_clear_int_mode(bp);
|
||||
|
||||
init_err_pci_clean:
|
||||
bnxt_free_hwrm_resources(bp);
|
||||
bnxt_cleanup_pci(bp);
|
||||
|
||||
init_err_free:
|
||||
|
|
|
@ -98,13 +98,13 @@ static int bnxt_hwrm_queue_cos2bw_cfg(struct bnxt *bp, struct ieee_ets *ets,
|
|||
|
||||
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_COS2BW_CFG, -1, -1);
|
||||
for (i = 0; i < max_tc; i++) {
|
||||
u8 qidx;
|
||||
u8 qidx = bp->tc_to_qidx[i];
|
||||
|
||||
req.enables |= cpu_to_le32(
|
||||
QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID0_VALID << i);
|
||||
QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID0_VALID <<
|
||||
qidx);
|
||||
|
||||
memset(&cos2bw, 0, sizeof(cos2bw));
|
||||
qidx = bp->tc_to_qidx[i];
|
||||
cos2bw.queue_id = bp->q_info[qidx].queue_id;
|
||||
if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_STRICT) {
|
||||
cos2bw.tsa =
|
||||
|
|
|
@ -2158,6 +2158,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
|
|||
return -EPERM;
|
||||
if (copy_from_user(&t, useraddr, sizeof(t)))
|
||||
return -EFAULT;
|
||||
if (t.cmd != CHELSIO_SET_QSET_PARAMS)
|
||||
return -EINVAL;
|
||||
if (t.qset_idx >= SGE_QSETS)
|
||||
return -EINVAL;
|
||||
if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
|
||||
|
@ -2257,6 +2259,9 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
|
|||
if (copy_from_user(&t, useraddr, sizeof(t)))
|
||||
return -EFAULT;
|
||||
|
||||
if (t.cmd != CHELSIO_GET_QSET_PARAMS)
|
||||
return -EINVAL;
|
||||
|
||||
/* Display qsets for all ports when offload enabled */
|
||||
if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
|
||||
q1 = 0;
|
||||
|
@ -2302,6 +2307,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
|
|||
return -EBUSY;
|
||||
if (copy_from_user(&edata, useraddr, sizeof(edata)))
|
||||
return -EFAULT;
|
||||
if (edata.cmd != CHELSIO_SET_QSET_NUM)
|
||||
return -EINVAL;
|
||||
if (edata.val < 1 ||
|
||||
(edata.val > 1 && !(adapter->flags & USING_MSIX)))
|
||||
return -EINVAL;
|
||||
|
@ -2342,6 +2349,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
|
|||
return -EPERM;
|
||||
if (copy_from_user(&t, useraddr, sizeof(t)))
|
||||
return -EFAULT;
|
||||
if (t.cmd != CHELSIO_LOAD_FW)
|
||||
return -EINVAL;
|
||||
/* Check t.len sanity ? */
|
||||
fw_data = memdup_user(useraddr + sizeof(t), t.len);
|
||||
if (IS_ERR(fw_data))
|
||||
|
@ -2365,6 +2374,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
|
|||
return -EBUSY;
|
||||
if (copy_from_user(&m, useraddr, sizeof(m)))
|
||||
return -EFAULT;
|
||||
if (m.cmd != CHELSIO_SETMTUTAB)
|
||||
return -EINVAL;
|
||||
if (m.nmtus != NMTUS)
|
||||
return -EINVAL;
|
||||
if (m.mtus[0] < 81) /* accommodate SACK */
|
||||
|
@ -2406,6 +2417,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
|
|||
return -EBUSY;
|
||||
if (copy_from_user(&m, useraddr, sizeof(m)))
|
||||
return -EFAULT;
|
||||
if (m.cmd != CHELSIO_SET_PM)
|
||||
return -EINVAL;
|
||||
if (!is_power_of_2(m.rx_pg_sz) ||
|
||||
!is_power_of_2(m.tx_pg_sz))
|
||||
return -EINVAL; /* not power of 2 */
|
||||
|
@ -2439,6 +2452,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
|
|||
return -EIO; /* need the memory controllers */
|
||||
if (copy_from_user(&t, useraddr, sizeof(t)))
|
||||
return -EFAULT;
|
||||
if (t.cmd != CHELSIO_GET_MEM)
|
||||
return -EINVAL;
|
||||
if ((t.addr & 7) || (t.len & 7))
|
||||
return -EINVAL;
|
||||
if (t.mem_id == MEM_CM)
|
||||
|
@ -2491,6 +2506,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
|
|||
return -EAGAIN;
|
||||
if (copy_from_user(&t, useraddr, sizeof(t)))
|
||||
return -EFAULT;
|
||||
if (t.cmd != CHELSIO_SET_TRACE_FILTER)
|
||||
return -EINVAL;
|
||||
|
||||
tp = (const struct trace_params *)&t.sip;
|
||||
if (t.config_tx)
|
||||
|
|
|
@ -4002,8 +4002,6 @@ static int be_enable_vxlan_offloads(struct be_adapter *adapter)
|
|||
netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
|
||||
NETIF_F_TSO | NETIF_F_TSO6 |
|
||||
NETIF_F_GSO_UDP_TUNNEL;
|
||||
netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
|
||||
netdev->features |= NETIF_F_GSO_UDP_TUNNEL;
|
||||
|
||||
dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
|
||||
be16_to_cpu(port));
|
||||
|
@ -4025,8 +4023,6 @@ static void be_disable_vxlan_offloads(struct be_adapter *adapter)
|
|||
adapter->vxlan_port = 0;
|
||||
|
||||
netdev->hw_enc_features = 0;
|
||||
netdev->hw_features &= ~(NETIF_F_GSO_UDP_TUNNEL);
|
||||
netdev->features &= ~(NETIF_F_GSO_UDP_TUNNEL);
|
||||
}
|
||||
|
||||
static void be_calculate_vf_res(struct be_adapter *adapter, u16 num_vfs,
|
||||
|
@ -5320,6 +5316,7 @@ static void be_netdev_init(struct net_device *netdev)
|
|||
struct be_adapter *adapter = netdev_priv(netdev);
|
||||
|
||||
netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
|
||||
NETIF_F_GSO_UDP_TUNNEL |
|
||||
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
|
||||
NETIF_F_HW_VLAN_CTAG_TX;
|
||||
if ((be_if_cap_flags(adapter) & BE_IF_FLAGS_RSS))
|
||||
|
|
|
@ -1755,7 +1755,7 @@ static void mvpp2_txq_desc_put(struct mvpp2_tx_queue *txq)
|
|||
}
|
||||
|
||||
/* Set Tx descriptors fields relevant for CSUM calculation */
|
||||
static u32 mvpp2_txq_desc_csum(int l3_offs, int l3_proto,
|
||||
static u32 mvpp2_txq_desc_csum(int l3_offs, __be16 l3_proto,
|
||||
int ip_hdr_len, int l4_proto)
|
||||
{
|
||||
u32 command;
|
||||
|
@ -2645,14 +2645,15 @@ static u32 mvpp2_skb_tx_csum(struct mvpp2_port *port, struct sk_buff *skb)
|
|||
if (skb->ip_summed == CHECKSUM_PARTIAL) {
|
||||
int ip_hdr_len = 0;
|
||||
u8 l4_proto;
|
||||
__be16 l3_proto = vlan_get_protocol(skb);
|
||||
|
||||
if (skb->protocol == htons(ETH_P_IP)) {
|
||||
if (l3_proto == htons(ETH_P_IP)) {
|
||||
struct iphdr *ip4h = ip_hdr(skb);
|
||||
|
||||
/* Calculate IPv4 checksum and L4 checksum */
|
||||
ip_hdr_len = ip4h->ihl;
|
||||
l4_proto = ip4h->protocol;
|
||||
} else if (skb->protocol == htons(ETH_P_IPV6)) {
|
||||
} else if (l3_proto == htons(ETH_P_IPV6)) {
|
||||
struct ipv6hdr *ip6h = ipv6_hdr(skb);
|
||||
|
||||
/* Read l4_protocol from one of IPv6 extra headers */
|
||||
|
@ -2664,7 +2665,7 @@ static u32 mvpp2_skb_tx_csum(struct mvpp2_port *port, struct sk_buff *skb)
|
|||
}
|
||||
|
||||
return mvpp2_txq_desc_csum(skb_network_offset(skb),
|
||||
skb->protocol, ip_hdr_len, l4_proto);
|
||||
l3_proto, ip_hdr_len, l4_proto);
|
||||
}
|
||||
|
||||
return MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE;
|
||||
|
|
|
@ -718,14 +718,17 @@ static void mlxsw_pci_eq_tasklet(unsigned long data)
|
|||
memset(&active_cqns, 0, sizeof(active_cqns));
|
||||
|
||||
while ((eqe = mlxsw_pci_eq_sw_eqe_get(q))) {
|
||||
u8 event_type = mlxsw_pci_eqe_event_type_get(eqe);
|
||||
|
||||
switch (event_type) {
|
||||
case MLXSW_PCI_EQE_EVENT_TYPE_CMD:
|
||||
/* Command interface completion events are always received on
|
||||
* queue MLXSW_PCI_EQ_ASYNC_NUM (EQ0) and completion events
|
||||
* are mapped to queue MLXSW_PCI_EQ_COMP_NUM (EQ1).
|
||||
*/
|
||||
switch (q->num) {
|
||||
case MLXSW_PCI_EQ_ASYNC_NUM:
|
||||
mlxsw_pci_eq_cmd_event(mlxsw_pci, eqe);
|
||||
q->u.eq.ev_cmd_count++;
|
||||
break;
|
||||
case MLXSW_PCI_EQE_EVENT_TYPE_COMP:
|
||||
case MLXSW_PCI_EQ_COMP_NUM:
|
||||
cqn = mlxsw_pci_eqe_cqn_get(eqe);
|
||||
set_bit(cqn, active_cqns);
|
||||
cq_handle = true;
|
||||
|
|
|
@ -4855,6 +4855,8 @@ static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev,
|
|||
upper_dev = info->upper_dev;
|
||||
if (info->linking)
|
||||
break;
|
||||
if (is_vlan_dev(upper_dev))
|
||||
mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, upper_dev);
|
||||
if (netif_is_macvlan(upper_dev))
|
||||
mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
|
||||
break;
|
||||
|
|
|
@ -966,6 +966,8 @@ static int yam_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
|
|||
sizeof(struct yamdrv_ioctl_mcs));
|
||||
if (IS_ERR(ym))
|
||||
return PTR_ERR(ym);
|
||||
if (ym->cmd != SIOCYAMSMCS)
|
||||
return -EINVAL;
|
||||
if (ym->bitrate > YAM_MAXBITRATE) {
|
||||
kfree(ym);
|
||||
return -EINVAL;
|
||||
|
@ -981,6 +983,8 @@ static int yam_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
|
|||
if (copy_from_user(&yi, ifr->ifr_data, sizeof(struct yamdrv_ioctl_cfg)))
|
||||
return -EFAULT;
|
||||
|
||||
if (yi.cmd != SIOCYAMSCFG)
|
||||
return -EINVAL;
|
||||
if ((yi.cfg.mask & YAM_IOBASE) && netif_running(dev))
|
||||
return -EINVAL; /* Cannot change this parameter when up */
|
||||
if ((yi.cfg.mask & YAM_IRQ) && netif_running(dev))
|
||||
|
|
|
@ -690,6 +690,30 @@ static int phylink_bringup_phy(struct phylink *pl, struct phy_device *phy)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int __phylink_connect_phy(struct phylink *pl, struct phy_device *phy,
|
||||
phy_interface_t interface)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (WARN_ON(pl->link_an_mode == MLO_AN_FIXED ||
|
||||
(pl->link_an_mode == MLO_AN_INBAND &&
|
||||
phy_interface_mode_is_8023z(interface))))
|
||||
return -EINVAL;
|
||||
|
||||
if (pl->phydev)
|
||||
return -EBUSY;
|
||||
|
||||
ret = phy_attach_direct(pl->netdev, phy, 0, interface);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = phylink_bringup_phy(pl, phy);
|
||||
if (ret)
|
||||
phy_detach(phy);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* phylink_connect_phy() - connect a PHY to the phylink instance
|
||||
* @pl: a pointer to a &struct phylink returned from phylink_create()
|
||||
|
@ -707,31 +731,13 @@ static int phylink_bringup_phy(struct phylink *pl, struct phy_device *phy)
|
|||
*/
|
||||
int phylink_connect_phy(struct phylink *pl, struct phy_device *phy)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (WARN_ON(pl->link_an_mode == MLO_AN_FIXED ||
|
||||
(pl->link_an_mode == MLO_AN_INBAND &&
|
||||
phy_interface_mode_is_8023z(pl->link_interface))))
|
||||
return -EINVAL;
|
||||
|
||||
if (pl->phydev)
|
||||
return -EBUSY;
|
||||
|
||||
/* Use PHY device/driver interface */
|
||||
if (pl->link_interface == PHY_INTERFACE_MODE_NA) {
|
||||
pl->link_interface = phy->interface;
|
||||
pl->link_config.interface = pl->link_interface;
|
||||
}
|
||||
|
||||
ret = phy_attach_direct(pl->netdev, phy, 0, pl->link_interface);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = phylink_bringup_phy(pl, phy);
|
||||
if (ret)
|
||||
phy_detach(phy);
|
||||
|
||||
return ret;
|
||||
return __phylink_connect_phy(pl, phy, pl->link_interface);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(phylink_connect_phy);
|
||||
|
||||
|
@ -1648,7 +1654,9 @@ static void phylink_sfp_link_up(void *upstream)
|
|||
|
||||
static int phylink_sfp_connect_phy(void *upstream, struct phy_device *phy)
|
||||
{
|
||||
return phylink_connect_phy(upstream, phy);
|
||||
struct phylink *pl = upstream;
|
||||
|
||||
return __phylink_connect_phy(upstream, phy, pl->link_config.interface);
|
||||
}
|
||||
|
||||
static void phylink_sfp_disconnect_phy(void *upstream)
|
||||
|
|
|
@ -1167,6 +1167,12 @@ static int team_port_add(struct team *team, struct net_device *port_dev,
|
|||
return -EBUSY;
|
||||
}
|
||||
|
||||
if (dev == port_dev) {
|
||||
NL_SET_ERR_MSG(extack, "Cannot enslave team device to itself");
|
||||
netdev_err(dev, "Cannot enslave team device to itself\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (port_dev->features & NETIF_F_VLAN_CHALLENGED &&
|
||||
vlan_uses_dev(dev)) {
|
||||
NL_SET_ERR_MSG(extack, "Device is VLAN challenged and team device has VLAN set up");
|
||||
|
|
|
@ -1520,6 +1520,7 @@ static void smsc75xx_unbind(struct usbnet *dev, struct usb_interface *intf)
|
|||
{
|
||||
struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
|
||||
if (pdata) {
|
||||
cancel_work_sync(&pdata->set_multicast);
|
||||
netif_dbg(dev, ifdown, dev->net, "free pdata\n");
|
||||
kfree(pdata);
|
||||
pdata = NULL;
|
||||
|
|
|
@ -1145,7 +1145,6 @@ static int mvebu_pcie_parse_request_resources(struct mvebu_pcie *pcie)
|
|||
{
|
||||
struct device *dev = &pcie->pdev->dev;
|
||||
struct device_node *np = dev->of_node;
|
||||
unsigned int i;
|
||||
int ret;
|
||||
|
||||
INIT_LIST_HEAD(&pcie->resources);
|
||||
|
@ -1179,15 +1178,60 @@ static int mvebu_pcie_parse_request_resources(struct mvebu_pcie *pcie)
|
|||
resource_size(&pcie->io) - 1);
|
||||
pcie->realio.name = "PCI I/O";
|
||||
|
||||
for (i = 0; i < resource_size(&pcie->realio); i += SZ_64K)
|
||||
pci_ioremap_io(i, pcie->io.start + i);
|
||||
|
||||
pci_add_resource(&pcie->resources, &pcie->realio);
|
||||
}
|
||||
|
||||
return devm_request_pci_bus_resources(dev, &pcie->resources);
|
||||
}
|
||||
|
||||
/*
|
||||
* This is a copy of pci_host_probe(), except that it does the I/O
|
||||
* remap as the last step, once we are sure we won't fail.
|
||||
*
|
||||
* It should be removed once the I/O remap error handling issue has
|
||||
* been sorted out.
|
||||
*/
|
||||
static int mvebu_pci_host_probe(struct pci_host_bridge *bridge)
|
||||
{
|
||||
struct mvebu_pcie *pcie;
|
||||
struct pci_bus *bus, *child;
|
||||
int ret;
|
||||
|
||||
ret = pci_scan_root_bus_bridge(bridge);
|
||||
if (ret < 0) {
|
||||
dev_err(bridge->dev.parent, "Scanning root bridge failed");
|
||||
return ret;
|
||||
}
|
||||
|
||||
pcie = pci_host_bridge_priv(bridge);
|
||||
if (resource_size(&pcie->io) != 0) {
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i < resource_size(&pcie->realio); i += SZ_64K)
|
||||
pci_ioremap_io(i, pcie->io.start + i);
|
||||
}
|
||||
|
||||
bus = bridge->bus;
|
||||
|
||||
/*
|
||||
* We insert PCI resources into the iomem_resource and
|
||||
* ioport_resource trees in either pci_bus_claim_resources()
|
||||
* or pci_bus_assign_resources().
|
||||
*/
|
||||
if (pci_has_flag(PCI_PROBE_ONLY)) {
|
||||
pci_bus_claim_resources(bus);
|
||||
} else {
|
||||
pci_bus_size_bridges(bus);
|
||||
pci_bus_assign_resources(bus);
|
||||
|
||||
list_for_each_entry(child, &bus->children, node)
|
||||
pcie_bus_configure_settings(child);
|
||||
}
|
||||
|
||||
pci_bus_add_devices(bus);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mvebu_pcie_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct device *dev = &pdev->dev;
|
||||
|
@ -1268,7 +1312,7 @@ static int mvebu_pcie_probe(struct platform_device *pdev)
|
|||
bridge->align_resource = mvebu_pcie_align_resource;
|
||||
bridge->msi = pcie->msi;
|
||||
|
||||
return pci_host_probe(bridge);
|
||||
return mvebu_pci_host_probe(bridge);
|
||||
}
|
||||
|
||||
static const struct of_device_id mvebu_pcie_of_match_table[] = {
|
||||
|
|
|
@ -1289,12 +1289,12 @@ int pci_save_state(struct pci_dev *dev)
|
|||
EXPORT_SYMBOL(pci_save_state);
|
||||
|
||||
static void pci_restore_config_dword(struct pci_dev *pdev, int offset,
|
||||
u32 saved_val, int retry)
|
||||
u32 saved_val, int retry, bool force)
|
||||
{
|
||||
u32 val;
|
||||
|
||||
pci_read_config_dword(pdev, offset, &val);
|
||||
if (val == saved_val)
|
||||
if (!force && val == saved_val)
|
||||
return;
|
||||
|
||||
for (;;) {
|
||||
|
@ -1313,25 +1313,36 @@ static void pci_restore_config_dword(struct pci_dev *pdev, int offset,
|
|||
}
|
||||
|
||||
static void pci_restore_config_space_range(struct pci_dev *pdev,
|
||||
int start, int end, int retry)
|
||||
int start, int end, int retry,
|
||||
bool force)
|
||||
{
|
||||
int index;
|
||||
|
||||
for (index = end; index >= start; index--)
|
||||
pci_restore_config_dword(pdev, 4 * index,
|
||||
pdev->saved_config_space[index],
|
||||
retry);
|
||||
retry, force);
|
||||
}
|
||||
|
||||
static void pci_restore_config_space(struct pci_dev *pdev)
|
||||
{
|
||||
if (pdev->hdr_type == PCI_HEADER_TYPE_NORMAL) {
|
||||
pci_restore_config_space_range(pdev, 10, 15, 0);
|
||||
pci_restore_config_space_range(pdev, 10, 15, 0, false);
|
||||
/* Restore BARs before the command register. */
|
||||
pci_restore_config_space_range(pdev, 4, 9, 10);
|
||||
pci_restore_config_space_range(pdev, 0, 3, 0);
|
||||
pci_restore_config_space_range(pdev, 4, 9, 10, false);
|
||||
pci_restore_config_space_range(pdev, 0, 3, 0, false);
|
||||
} else if (pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
|
||||
pci_restore_config_space_range(pdev, 12, 15, 0, false);
|
||||
|
||||
/*
|
||||
* Force rewriting of prefetch registers to avoid S3 resume
|
||||
* issues on Intel PCI bridges that occur when these
|
||||
* registers are not explicitly written.
|
||||
*/
|
||||
pci_restore_config_space_range(pdev, 9, 11, 0, true);
|
||||
pci_restore_config_space_range(pdev, 0, 8, 0, false);
|
||||
} else {
|
||||
pci_restore_config_space_range(pdev, 0, 15, 0);
|
||||
pci_restore_config_space_range(pdev, 0, 15, 0, false);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1553,6 +1553,7 @@ static inline void free_dfs_info_array(struct dfs_info3_param *param,
|
|||
|
||||
/* Flags */
|
||||
#define MID_WAIT_CANCELLED 1 /* Cancelled while waiting for response */
|
||||
#define MID_DELETED 2 /* Mid has been dequeued/deleted */
|
||||
|
||||
/* Types of response buffer returned from SendReceive2 */
|
||||
#define CIFS_NO_BUFFER 0 /* Response buffer not returned */
|
||||
|
|
|
@ -659,7 +659,15 @@ dequeue_mid(struct mid_q_entry *mid, bool malformed)
|
|||
mid->mid_state = MID_RESPONSE_RECEIVED;
|
||||
else
|
||||
mid->mid_state = MID_RESPONSE_MALFORMED;
|
||||
list_del_init(&mid->qhead);
|
||||
/*
|
||||
* Trying to handle/dequeue a mid after the send_recv()
|
||||
* function has finished processing it is a bug.
|
||||
*/
|
||||
if (mid->mid_flags & MID_DELETED)
|
||||
printk_once(KERN_WARNING
|
||||
"trying to dequeue a deleted mid\n");
|
||||
else
|
||||
list_del_init(&mid->qhead);
|
||||
spin_unlock(&GlobalMid_Lock);
|
||||
}
|
||||
|
||||
|
@ -938,8 +946,7 @@ cifs_demultiplex_thread(void *p)
|
|||
} else {
|
||||
mids[0] = server->ops->find_mid(server, buf);
|
||||
bufs[0] = buf;
|
||||
if (mids[0])
|
||||
num_mids = 1;
|
||||
num_mids = 1;
|
||||
|
||||
if (!mids[0] || !mids[0]->receive)
|
||||
length = standard_receive3(server, mids[0]);
|
||||
|
|
|
@ -1477,7 +1477,7 @@ smb2_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon,
|
|||
}
|
||||
|
||||
srch_inf->entries_in_buffer = 0;
|
||||
srch_inf->index_of_last_entry = 0;
|
||||
srch_inf->index_of_last_entry = 2;
|
||||
|
||||
rc = SMB2_query_directory(xid, tcon, fid->persistent_fid,
|
||||
fid->volatile_fid, 0, srch_inf);
|
||||
|
|
|
@ -142,7 +142,8 @@ void
|
|||
cifs_delete_mid(struct mid_q_entry *mid)
|
||||
{
|
||||
spin_lock(&GlobalMid_Lock);
|
||||
list_del(&mid->qhead);
|
||||
list_del_init(&mid->qhead);
|
||||
mid->mid_flags |= MID_DELETED;
|
||||
spin_unlock(&GlobalMid_Lock);
|
||||
|
||||
DeleteMidQEntry(mid);
|
||||
|
@ -772,6 +773,11 @@ cifs_setup_request(struct cifs_ses *ses, struct smb_rqst *rqst)
|
|||
return mid;
|
||||
}
|
||||
|
||||
static void
|
||||
cifs_noop_callback(struct mid_q_entry *mid)
|
||||
{
|
||||
}
|
||||
|
||||
int
|
||||
compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
|
||||
const int flags, const int num_rqst, struct smb_rqst *rqst,
|
||||
|
@ -826,8 +832,13 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
|
|||
}
|
||||
|
||||
midQ[i]->mid_state = MID_REQUEST_SUBMITTED;
|
||||
/*
|
||||
* We don't invoke the callback compounds unless it is the last
|
||||
* request.
|
||||
*/
|
||||
if (i < num_rqst - 1)
|
||||
midQ[i]->callback = cifs_noop_callback;
|
||||
}
|
||||
|
||||
cifs_in_send_inc(ses->server);
|
||||
rc = smb_send_rqst(ses->server, num_rqst, rqst, flags);
|
||||
cifs_in_send_dec(ses->server);
|
||||
|
@ -908,6 +919,12 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
|
|||
midQ[i]->resp_buf = NULL;
|
||||
}
|
||||
out:
|
||||
/*
|
||||
* This will dequeue all mids. After this it is important that the
|
||||
* demultiplex_thread will not process any of these mids any futher.
|
||||
* This is prevented above by using a noop callback that will not
|
||||
* wake this thread except for the very last PDU.
|
||||
*/
|
||||
for (i = 0; i < num_rqst; i++)
|
||||
cifs_delete_mid(midQ[i]);
|
||||
add_credits(ses->server, credits, optype);
|
||||
|
|
|
@ -230,7 +230,7 @@ static long ioctl_file_clone(struct file *dst_file, unsigned long srcfd,
|
|||
ret = -EXDEV;
|
||||
if (src_file.file->f_path.mnt != dst_file->f_path.mnt)
|
||||
goto fdput;
|
||||
ret = do_clone_file_range(src_file.file, off, dst_file, destoff, olen);
|
||||
ret = vfs_clone_file_range(src_file.file, off, dst_file, destoff, olen);
|
||||
fdput:
|
||||
fdput(src_file);
|
||||
return ret;
|
||||
|
|
|
@ -1051,6 +1051,7 @@ iomap_page_mkwrite_actor(struct inode *inode, loff_t pos, loff_t length,
|
|||
} else {
|
||||
WARN_ON_ONCE(!PageUptodate(page));
|
||||
iomap_page_create(inode, page);
|
||||
set_page_dirty(page);
|
||||
}
|
||||
|
||||
return length;
|
||||
|
@ -1090,7 +1091,6 @@ int iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops)
|
|||
length -= ret;
|
||||
}
|
||||
|
||||
set_page_dirty(page);
|
||||
wait_for_stable_page(page);
|
||||
return VM_FAULT_LOCKED;
|
||||
out_unlock:
|
||||
|
|
|
@ -541,7 +541,8 @@ __be32 nfsd4_set_nfs4_label(struct svc_rqst *rqstp, struct svc_fh *fhp,
|
|||
__be32 nfsd4_clone_file_range(struct file *src, u64 src_pos, struct file *dst,
|
||||
u64 dst_pos, u64 count)
|
||||
{
|
||||
return nfserrno(do_clone_file_range(src, src_pos, dst, dst_pos, count));
|
||||
return nfserrno(vfs_clone_file_range(src, src_pos, dst, dst_pos,
|
||||
count));
|
||||
}
|
||||
|
||||
ssize_t nfsd_copy_file_range(struct file *src, u64 src_pos, struct file *dst,
|
||||
|
|
|
@ -584,9 +584,9 @@ static void dlm_init_lockres(struct dlm_ctxt *dlm,
|
|||
|
||||
res->last_used = 0;
|
||||
|
||||
spin_lock(&dlm->spinlock);
|
||||
spin_lock(&dlm->track_lock);
|
||||
list_add_tail(&res->tracking, &dlm->tracking_list);
|
||||
spin_unlock(&dlm->spinlock);
|
||||
spin_unlock(&dlm->track_lock);
|
||||
|
||||
memset(res->lvb, 0, DLM_LVB_LEN);
|
||||
memset(res->refmap, 0, sizeof(res->refmap));
|
||||
|
|
|
@ -2946,6 +2946,7 @@ int ocfs2_duplicate_clusters_by_page(handle_t *handle,
|
|||
if (map_end & (PAGE_SIZE - 1))
|
||||
to = map_end & (PAGE_SIZE - 1);
|
||||
|
||||
retry:
|
||||
page = find_or_create_page(mapping, page_index, GFP_NOFS);
|
||||
if (!page) {
|
||||
ret = -ENOMEM;
|
||||
|
@ -2954,11 +2955,18 @@ int ocfs2_duplicate_clusters_by_page(handle_t *handle,
|
|||
}
|
||||
|
||||
/*
|
||||
* In case PAGE_SIZE <= CLUSTER_SIZE, This page
|
||||
* can't be dirtied before we CoW it out.
|
||||
* In case PAGE_SIZE <= CLUSTER_SIZE, we do not expect a dirty
|
||||
* page, so write it back.
|
||||
*/
|
||||
if (PAGE_SIZE <= OCFS2_SB(sb)->s_clustersize)
|
||||
BUG_ON(PageDirty(page));
|
||||
if (PAGE_SIZE <= OCFS2_SB(sb)->s_clustersize) {
|
||||
if (PageDirty(page)) {
|
||||
/*
|
||||
* write_on_page will unlock the page on return
|
||||
*/
|
||||
ret = write_one_page(page);
|
||||
goto retry;
|
||||
}
|
||||
}
|
||||
|
||||
if (!PageUptodate(page)) {
|
||||
ret = block_read_full_page(page, ocfs2_get_block);
|
||||
|
|
|
@ -141,7 +141,7 @@ static int ovl_copy_up_data(struct path *old, struct path *new, loff_t len)
|
|||
}
|
||||
|
||||
/* Try to use clone_file_range to clone up within the same fs */
|
||||
error = vfs_clone_file_range(old_file, 0, new_file, 0, len);
|
||||
error = do_clone_file_range(old_file, 0, new_file, 0, len);
|
||||
if (!error)
|
||||
goto out;
|
||||
/* Couldn't clone, so now we try to copy the data */
|
||||
|
|
|
@ -240,8 +240,10 @@ static ssize_t ovl_write_iter(struct kiocb *iocb, struct iov_iter *iter)
|
|||
goto out_unlock;
|
||||
|
||||
old_cred = ovl_override_creds(file_inode(file)->i_sb);
|
||||
file_start_write(real.file);
|
||||
ret = vfs_iter_write(real.file, iter, &iocb->ki_pos,
|
||||
ovl_iocb_to_rwf(iocb));
|
||||
file_end_write(real.file);
|
||||
revert_creds(old_cred);
|
||||
|
||||
/* Update size */
|
||||
|
|
|
@ -504,7 +504,7 @@ static const struct inode_operations ovl_special_inode_operations = {
|
|||
.update_time = ovl_update_time,
|
||||
};
|
||||
|
||||
const struct address_space_operations ovl_aops = {
|
||||
static const struct address_space_operations ovl_aops = {
|
||||
/* For O_DIRECT dentry_open() checks f_mapping->a_ops->direct_IO */
|
||||
.direct_IO = noop_direct_IO,
|
||||
};
|
||||
|
|
|
@ -686,7 +686,7 @@ struct dentry *ovl_lookup_index(struct ovl_fs *ofs, struct dentry *upper,
|
|||
index = NULL;
|
||||
goto out;
|
||||
}
|
||||
pr_warn_ratelimited("overlayfs: failed inode index lookup (ino=%lu, key=%*s, err=%i);\n"
|
||||
pr_warn_ratelimited("overlayfs: failed inode index lookup (ino=%lu, key=%.*s, err=%i);\n"
|
||||
"overlayfs: mount with '-o index=off' to disable inodes index.\n",
|
||||
d_inode(origin)->i_ino, name.len, name.name,
|
||||
err);
|
||||
|
|
|
@ -152,8 +152,8 @@ static inline int ovl_do_setxattr(struct dentry *dentry, const char *name,
|
|||
const void *value, size_t size, int flags)
|
||||
{
|
||||
int err = vfs_setxattr(dentry, name, value, size, flags);
|
||||
pr_debug("setxattr(%pd2, \"%s\", \"%*s\", 0x%x) = %i\n",
|
||||
dentry, name, (int) size, (char *) value, flags, err);
|
||||
pr_debug("setxattr(%pd2, \"%s\", \"%*pE\", %zu, 0x%x) = %i\n",
|
||||
dentry, name, min((int)size, 48), value, size, flags, err);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
|
|
@ -683,7 +683,7 @@ static void ovl_cleanup_index(struct dentry *dentry)
|
|||
struct dentry *upperdentry = ovl_dentry_upper(dentry);
|
||||
struct dentry *index = NULL;
|
||||
struct inode *inode;
|
||||
struct qstr name;
|
||||
struct qstr name = { };
|
||||
int err;
|
||||
|
||||
err = ovl_get_index_name(lowerdentry, &name);
|
||||
|
@ -726,6 +726,7 @@ static void ovl_cleanup_index(struct dentry *dentry)
|
|||
goto fail;
|
||||
|
||||
out:
|
||||
kfree(name.name);
|
||||
dput(index);
|
||||
return;
|
||||
|
||||
|
|
|
@ -407,6 +407,20 @@ static int proc_pid_stack(struct seq_file *m, struct pid_namespace *ns,
|
|||
unsigned long *entries;
|
||||
int err;
|
||||
|
||||
/*
|
||||
* The ability to racily run the kernel stack unwinder on a running task
|
||||
* and then observe the unwinder output is scary; while it is useful for
|
||||
* debugging kernel issues, it can also allow an attacker to leak kernel
|
||||
* stack contents.
|
||||
* Doing this in a manner that is at least safe from races would require
|
||||
* some work to ensure that the remote task can not be scheduled; and
|
||||
* even then, this would still expose the unwinder as local attack
|
||||
* surface.
|
||||
* Therefore, this interface is restricted to root.
|
||||
*/
|
||||
if (!file_ns_capable(m->file, &init_user_ns, CAP_SYS_ADMIN))
|
||||
return -EACCES;
|
||||
|
||||
entries = kmalloc_array(MAX_STACK_TRACE_DEPTH, sizeof(*entries),
|
||||
GFP_KERNEL);
|
||||
if (!entries)
|
||||
|
|
|
@ -1818,8 +1818,8 @@ int vfs_clone_file_prep_inodes(struct inode *inode_in, loff_t pos_in,
|
|||
}
|
||||
EXPORT_SYMBOL(vfs_clone_file_prep_inodes);
|
||||
|
||||
int vfs_clone_file_range(struct file *file_in, loff_t pos_in,
|
||||
struct file *file_out, loff_t pos_out, u64 len)
|
||||
int do_clone_file_range(struct file *file_in, loff_t pos_in,
|
||||
struct file *file_out, loff_t pos_out, u64 len)
|
||||
{
|
||||
struct inode *inode_in = file_inode(file_in);
|
||||
struct inode *inode_out = file_inode(file_out);
|
||||
|
@ -1866,6 +1866,19 @@ int vfs_clone_file_range(struct file *file_in, loff_t pos_in,
|
|||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(do_clone_file_range);
|
||||
|
||||
int vfs_clone_file_range(struct file *file_in, loff_t pos_in,
|
||||
struct file *file_out, loff_t pos_out, u64 len)
|
||||
{
|
||||
int ret;
|
||||
|
||||
file_start_write(file_out);
|
||||
ret = do_clone_file_range(file_in, pos_in, file_out, pos_out, len);
|
||||
file_end_write(file_out);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(vfs_clone_file_range);
|
||||
|
||||
/*
|
||||
|
|
|
@ -587,7 +587,7 @@ xfs_attr_leaf_addname(
|
|||
*/
|
||||
error = xfs_attr3_leaf_to_node(args);
|
||||
if (error)
|
||||
goto out_defer_cancel;
|
||||
return error;
|
||||
error = xfs_defer_finish(&args->trans);
|
||||
if (error)
|
||||
return error;
|
||||
|
@ -675,7 +675,7 @@ xfs_attr_leaf_addname(
|
|||
error = xfs_attr3_leaf_to_shortform(bp, args, forkoff);
|
||||
/* bp is gone due to xfs_da_shrink_inode */
|
||||
if (error)
|
||||
goto out_defer_cancel;
|
||||
return error;
|
||||
error = xfs_defer_finish(&args->trans);
|
||||
if (error)
|
||||
return error;
|
||||
|
@ -693,9 +693,6 @@ xfs_attr_leaf_addname(
|
|||
error = xfs_attr3_leaf_clearflag(args);
|
||||
}
|
||||
return error;
|
||||
out_defer_cancel:
|
||||
xfs_defer_cancel(args->trans);
|
||||
return error;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -738,15 +735,12 @@ xfs_attr_leaf_removename(
|
|||
error = xfs_attr3_leaf_to_shortform(bp, args, forkoff);
|
||||
/* bp is gone due to xfs_da_shrink_inode */
|
||||
if (error)
|
||||
goto out_defer_cancel;
|
||||
return error;
|
||||
error = xfs_defer_finish(&args->trans);
|
||||
if (error)
|
||||
return error;
|
||||
}
|
||||
return 0;
|
||||
out_defer_cancel:
|
||||
xfs_defer_cancel(args->trans);
|
||||
return error;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -864,7 +858,7 @@ xfs_attr_node_addname(
|
|||
state = NULL;
|
||||
error = xfs_attr3_leaf_to_node(args);
|
||||
if (error)
|
||||
goto out_defer_cancel;
|
||||
goto out;
|
||||
error = xfs_defer_finish(&args->trans);
|
||||
if (error)
|
||||
goto out;
|
||||
|
@ -888,7 +882,7 @@ xfs_attr_node_addname(
|
|||
*/
|
||||
error = xfs_da3_split(state);
|
||||
if (error)
|
||||
goto out_defer_cancel;
|
||||
goto out;
|
||||
error = xfs_defer_finish(&args->trans);
|
||||
if (error)
|
||||
goto out;
|
||||
|
@ -984,7 +978,7 @@ xfs_attr_node_addname(
|
|||
if (retval && (state->path.active > 1)) {
|
||||
error = xfs_da3_join(state);
|
||||
if (error)
|
||||
goto out_defer_cancel;
|
||||
goto out;
|
||||
error = xfs_defer_finish(&args->trans);
|
||||
if (error)
|
||||
goto out;
|
||||
|
@ -1013,9 +1007,6 @@ xfs_attr_node_addname(
|
|||
if (error)
|
||||
return error;
|
||||
return retval;
|
||||
out_defer_cancel:
|
||||
xfs_defer_cancel(args->trans);
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1107,7 +1098,7 @@ xfs_attr_node_removename(
|
|||
if (retval && (state->path.active > 1)) {
|
||||
error = xfs_da3_join(state);
|
||||
if (error)
|
||||
goto out_defer_cancel;
|
||||
goto out;
|
||||
error = xfs_defer_finish(&args->trans);
|
||||
if (error)
|
||||
goto out;
|
||||
|
@ -1138,7 +1129,7 @@ xfs_attr_node_removename(
|
|||
error = xfs_attr3_leaf_to_shortform(bp, args, forkoff);
|
||||
/* bp is gone due to xfs_da_shrink_inode */
|
||||
if (error)
|
||||
goto out_defer_cancel;
|
||||
goto out;
|
||||
error = xfs_defer_finish(&args->trans);
|
||||
if (error)
|
||||
goto out;
|
||||
|
@ -1150,9 +1141,6 @@ xfs_attr_node_removename(
|
|||
out:
|
||||
xfs_da_state_free(state);
|
||||
return error;
|
||||
out_defer_cancel:
|
||||
xfs_defer_cancel(args->trans);
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -485,7 +485,7 @@ xfs_attr_rmtval_set(
|
|||
blkcnt, XFS_BMAPI_ATTRFORK, args->total, &map,
|
||||
&nmap);
|
||||
if (error)
|
||||
goto out_defer_cancel;
|
||||
return error;
|
||||
error = xfs_defer_finish(&args->trans);
|
||||
if (error)
|
||||
return error;
|
||||
|
@ -553,9 +553,6 @@ xfs_attr_rmtval_set(
|
|||
}
|
||||
ASSERT(valuelen == 0);
|
||||
return 0;
|
||||
out_defer_cancel:
|
||||
xfs_defer_cancel(args->trans);
|
||||
return error;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -625,7 +622,7 @@ xfs_attr_rmtval_remove(
|
|||
error = xfs_bunmapi(args->trans, args->dp, lblkno, blkcnt,
|
||||
XFS_BMAPI_ATTRFORK, 1, &done);
|
||||
if (error)
|
||||
goto out_defer_cancel;
|
||||
return error;
|
||||
error = xfs_defer_finish(&args->trans);
|
||||
if (error)
|
||||
return error;
|
||||
|
@ -638,7 +635,4 @@ xfs_attr_rmtval_remove(
|
|||
return error;
|
||||
}
|
||||
return 0;
|
||||
out_defer_cancel:
|
||||
xfs_defer_cancel(args->trans);
|
||||
return error;
|
||||
}
|
||||
|
|
|
@ -673,7 +673,8 @@ xfs_bmap_extents_to_btree(
|
|||
ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS);
|
||||
|
||||
/*
|
||||
* Make space in the inode incore.
|
||||
* Make space in the inode incore. This needs to be undone if we fail
|
||||
* to expand the root.
|
||||
*/
|
||||
xfs_iroot_realloc(ip, 1, whichfork);
|
||||
ifp->if_flags |= XFS_IFBROOT;
|
||||
|
@ -711,16 +712,15 @@ xfs_bmap_extents_to_btree(
|
|||
args.minlen = args.maxlen = args.prod = 1;
|
||||
args.wasdel = wasdel;
|
||||
*logflagsp = 0;
|
||||
if ((error = xfs_alloc_vextent(&args))) {
|
||||
ASSERT(ifp->if_broot == NULL);
|
||||
goto err1;
|
||||
}
|
||||
error = xfs_alloc_vextent(&args);
|
||||
if (error)
|
||||
goto out_root_realloc;
|
||||
|
||||
if (WARN_ON_ONCE(args.fsbno == NULLFSBLOCK)) {
|
||||
ASSERT(ifp->if_broot == NULL);
|
||||
error = -ENOSPC;
|
||||
goto err1;
|
||||
goto out_root_realloc;
|
||||
}
|
||||
|
||||
/*
|
||||
* Allocation can't fail, the space was reserved.
|
||||
*/
|
||||
|
@ -732,9 +732,10 @@ xfs_bmap_extents_to_btree(
|
|||
xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, 1L);
|
||||
abp = xfs_btree_get_bufl(mp, tp, args.fsbno, 0);
|
||||
if (!abp) {
|
||||
error = -ENOSPC;
|
||||
goto err2;
|
||||
error = -EFSCORRUPTED;
|
||||
goto out_unreserve_dquot;
|
||||
}
|
||||
|
||||
/*
|
||||
* Fill in the child block.
|
||||
*/
|
||||
|
@ -775,11 +776,12 @@ xfs_bmap_extents_to_btree(
|
|||
*logflagsp = XFS_ILOG_CORE | xfs_ilog_fbroot(whichfork);
|
||||
return 0;
|
||||
|
||||
err2:
|
||||
out_unreserve_dquot:
|
||||
xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L);
|
||||
err1:
|
||||
out_root_realloc:
|
||||
xfs_iroot_realloc(ip, -1, whichfork);
|
||||
XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS);
|
||||
ASSERT(ifp->if_broot == NULL);
|
||||
xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
|
||||
|
||||
return error;
|
||||
|
|
|
@ -1016,6 +1016,8 @@ static inline void xfs_dinode_put_rdev(struct xfs_dinode *dip, xfs_dev_t rdev)
|
|||
#define XFS_DIFLAG_EXTSZINHERIT_BIT 12 /* inherit inode extent size */
|
||||
#define XFS_DIFLAG_NODEFRAG_BIT 13 /* do not reorganize/defragment */
|
||||
#define XFS_DIFLAG_FILESTREAM_BIT 14 /* use filestream allocator */
|
||||
/* Do not use bit 15, di_flags is legacy and unchanging now */
|
||||
|
||||
#define XFS_DIFLAG_REALTIME (1 << XFS_DIFLAG_REALTIME_BIT)
|
||||
#define XFS_DIFLAG_PREALLOC (1 << XFS_DIFLAG_PREALLOC_BIT)
|
||||
#define XFS_DIFLAG_NEWRTBM (1 << XFS_DIFLAG_NEWRTBM_BIT)
|
||||
|
|
|
@ -415,6 +415,31 @@ xfs_dinode_verify_fork(
|
|||
return NULL;
|
||||
}
|
||||
|
||||
static xfs_failaddr_t
|
||||
xfs_dinode_verify_forkoff(
|
||||
struct xfs_dinode *dip,
|
||||
struct xfs_mount *mp)
|
||||
{
|
||||
if (!XFS_DFORK_Q(dip))
|
||||
return NULL;
|
||||
|
||||
switch (dip->di_format) {
|
||||
case XFS_DINODE_FMT_DEV:
|
||||
if (dip->di_forkoff != (roundup(sizeof(xfs_dev_t), 8) >> 3))
|
||||
return __this_address;
|
||||
break;
|
||||
case XFS_DINODE_FMT_LOCAL: /* fall through ... */
|
||||
case XFS_DINODE_FMT_EXTENTS: /* fall through ... */
|
||||
case XFS_DINODE_FMT_BTREE:
|
||||
if (dip->di_forkoff >= (XFS_LITINO(mp, dip->di_version) >> 3))
|
||||
return __this_address;
|
||||
break;
|
||||
default:
|
||||
return __this_address;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
xfs_failaddr_t
|
||||
xfs_dinode_verify(
|
||||
struct xfs_mount *mp,
|
||||
|
@ -470,6 +495,11 @@ xfs_dinode_verify(
|
|||
if (mode && (flags & XFS_DIFLAG_REALTIME) && !mp->m_rtdev_targp)
|
||||
return __this_address;
|
||||
|
||||
/* check for illegal values of forkoff */
|
||||
fa = xfs_dinode_verify_forkoff(dip, mp);
|
||||
if (fa)
|
||||
return fa;
|
||||
|
||||
/* Do we have appropriate data fork formats for the mode? */
|
||||
switch (mode & S_IFMT) {
|
||||
case S_IFIFO:
|
||||
|
|
|
@ -17,7 +17,6 @@
|
|||
#include "xfs_sb.h"
|
||||
#include "xfs_alloc.h"
|
||||
#include "xfs_rmap.h"
|
||||
#include "xfs_alloc.h"
|
||||
#include "scrub/xfs_scrub.h"
|
||||
#include "scrub/scrub.h"
|
||||
#include "scrub/common.h"
|
||||
|
|
|
@ -126,6 +126,7 @@ xchk_inode_flags(
|
|||
{
|
||||
struct xfs_mount *mp = sc->mp;
|
||||
|
||||
/* di_flags are all taken, last bit cannot be used */
|
||||
if (flags & ~XFS_DIFLAG_ANY)
|
||||
goto bad;
|
||||
|
||||
|
@ -172,8 +173,9 @@ xchk_inode_flags2(
|
|||
{
|
||||
struct xfs_mount *mp = sc->mp;
|
||||
|
||||
/* Unknown di_flags2 could be from a future kernel */
|
||||
if (flags2 & ~XFS_DIFLAG2_ANY)
|
||||
goto bad;
|
||||
xchk_ino_set_warning(sc, ino);
|
||||
|
||||
/* reflink flag requires reflink feature */
|
||||
if ((flags2 & XFS_DIFLAG2_REFLINK) &&
|
||||
|
|
|
@ -702,13 +702,9 @@ xfs_bmap_punch_delalloc_range(
|
|||
struct xfs_iext_cursor icur;
|
||||
int error = 0;
|
||||
|
||||
xfs_ilock(ip, XFS_ILOCK_EXCL);
|
||||
if (!(ifp->if_flags & XFS_IFEXTENTS)) {
|
||||
error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK);
|
||||
if (error)
|
||||
goto out_unlock;
|
||||
}
|
||||
ASSERT(ifp->if_flags & XFS_IFEXTENTS);
|
||||
|
||||
xfs_ilock(ip, XFS_ILOCK_EXCL);
|
||||
if (!xfs_iext_lookup_extent_before(ip, ifp, &end_fsb, &icur, &got))
|
||||
goto out_unlock;
|
||||
|
||||
|
@ -1584,7 +1580,7 @@ xfs_swap_extent_rmap(
|
|||
tirec.br_blockcount, &irec,
|
||||
&nimaps, 0);
|
||||
if (error)
|
||||
goto out_defer;
|
||||
goto out;
|
||||
ASSERT(nimaps == 1);
|
||||
ASSERT(tirec.br_startoff == irec.br_startoff);
|
||||
trace_xfs_swap_extent_rmap_remap_piece(ip, &irec);
|
||||
|
@ -1599,22 +1595,22 @@ xfs_swap_extent_rmap(
|
|||
/* Remove the mapping from the donor file. */
|
||||
error = xfs_bmap_unmap_extent(tp, tip, &uirec);
|
||||
if (error)
|
||||
goto out_defer;
|
||||
goto out;
|
||||
|
||||
/* Remove the mapping from the source file. */
|
||||
error = xfs_bmap_unmap_extent(tp, ip, &irec);
|
||||
if (error)
|
||||
goto out_defer;
|
||||
goto out;
|
||||
|
||||
/* Map the donor file's blocks into the source file. */
|
||||
error = xfs_bmap_map_extent(tp, ip, &uirec);
|
||||
if (error)
|
||||
goto out_defer;
|
||||
goto out;
|
||||
|
||||
/* Map the source file's blocks into the donor file. */
|
||||
error = xfs_bmap_map_extent(tp, tip, &irec);
|
||||
if (error)
|
||||
goto out_defer;
|
||||
goto out;
|
||||
|
||||
error = xfs_defer_finish(tpp);
|
||||
tp = *tpp;
|
||||
|
@ -1636,8 +1632,6 @@ xfs_swap_extent_rmap(
|
|||
tip->i_d.di_flags2 = tip_flags2;
|
||||
return 0;
|
||||
|
||||
out_defer:
|
||||
xfs_defer_cancel(tp);
|
||||
out:
|
||||
trace_xfs_swap_extent_rmap_error(ip, error, _RET_IP_);
|
||||
tip->i_d.di_flags2 = tip_flags2;
|
||||
|
|
|
@ -531,6 +531,49 @@ xfs_buf_item_push(
|
|||
return rval;
|
||||
}
|
||||
|
||||
/*
|
||||
* Drop the buffer log item refcount and take appropriate action. This helper
|
||||
* determines whether the bli must be freed or not, since a decrement to zero
|
||||
* does not necessarily mean the bli is unused.
|
||||
*
|
||||
* Return true if the bli is freed, false otherwise.
|
||||
*/
|
||||
bool
|
||||
xfs_buf_item_put(
|
||||
struct xfs_buf_log_item *bip)
|
||||
{
|
||||
struct xfs_log_item *lip = &bip->bli_item;
|
||||
bool aborted;
|
||||
bool dirty;
|
||||
|
||||
/* drop the bli ref and return if it wasn't the last one */
|
||||
if (!atomic_dec_and_test(&bip->bli_refcount))
|
||||
return false;
|
||||
|
||||
/*
|
||||
* We dropped the last ref and must free the item if clean or aborted.
|
||||
* If the bli is dirty and non-aborted, the buffer was clean in the
|
||||
* transaction but still awaiting writeback from previous changes. In
|
||||
* that case, the bli is freed on buffer writeback completion.
|
||||
*/
|
||||
aborted = test_bit(XFS_LI_ABORTED, &lip->li_flags) ||
|
||||
XFS_FORCED_SHUTDOWN(lip->li_mountp);
|
||||
dirty = bip->bli_flags & XFS_BLI_DIRTY;
|
||||
if (dirty && !aborted)
|
||||
return false;
|
||||
|
||||
/*
|
||||
* The bli is aborted or clean. An aborted item may be in the AIL
|
||||
* regardless of dirty state. For example, consider an aborted
|
||||
* transaction that invalidated a dirty bli and cleared the dirty
|
||||
* state.
|
||||
*/
|
||||
if (aborted)
|
||||
xfs_trans_ail_remove(lip, SHUTDOWN_LOG_IO_ERROR);
|
||||
xfs_buf_item_relse(bip->bli_buf);
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Release the buffer associated with the buf log item. If there is no dirty
|
||||
* logged data associated with the buffer recorded in the buf log item, then
|
||||
|
@ -556,76 +599,42 @@ xfs_buf_item_unlock(
|
|||
{
|
||||
struct xfs_buf_log_item *bip = BUF_ITEM(lip);
|
||||
struct xfs_buf *bp = bip->bli_buf;
|
||||
bool aborted;
|
||||
bool hold = !!(bip->bli_flags & XFS_BLI_HOLD);
|
||||
bool dirty = !!(bip->bli_flags & XFS_BLI_DIRTY);
|
||||
bool released;
|
||||
bool hold = bip->bli_flags & XFS_BLI_HOLD;
|
||||
bool stale = bip->bli_flags & XFS_BLI_STALE;
|
||||
#if defined(DEBUG) || defined(XFS_WARN)
|
||||
bool ordered = !!(bip->bli_flags & XFS_BLI_ORDERED);
|
||||
bool ordered = bip->bli_flags & XFS_BLI_ORDERED;
|
||||
bool dirty = bip->bli_flags & XFS_BLI_DIRTY;
|
||||
#endif
|
||||
|
||||
aborted = test_bit(XFS_LI_ABORTED, &lip->li_flags);
|
||||
|
||||
/* Clear the buffer's association with this transaction. */
|
||||
bp->b_transp = NULL;
|
||||
|
||||
/*
|
||||
* The per-transaction state has been copied above so clear it from the
|
||||
* bli.
|
||||
*/
|
||||
bip->bli_flags &= ~(XFS_BLI_LOGGED | XFS_BLI_HOLD | XFS_BLI_ORDERED);
|
||||
|
||||
/*
|
||||
* If the buf item is marked stale, then don't do anything. We'll
|
||||
* unlock the buffer and free the buf item when the buffer is unpinned
|
||||
* for the last time.
|
||||
*/
|
||||
if (bip->bli_flags & XFS_BLI_STALE) {
|
||||
trace_xfs_buf_item_unlock_stale(bip);
|
||||
ASSERT(bip->__bli_format.blf_flags & XFS_BLF_CANCEL);
|
||||
if (!aborted) {
|
||||
atomic_dec(&bip->bli_refcount);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
trace_xfs_buf_item_unlock(bip);
|
||||
|
||||
/*
|
||||
* If the buf item isn't tracking any data, free it, otherwise drop the
|
||||
* reference we hold to it. If we are aborting the transaction, this may
|
||||
* be the only reference to the buf item, so we free it anyway
|
||||
* regardless of whether it is dirty or not. A dirty abort implies a
|
||||
* shutdown, anyway.
|
||||
*
|
||||
* The bli dirty state should match whether the blf has logged segments
|
||||
* except for ordered buffers, where only the bli should be dirty.
|
||||
*/
|
||||
ASSERT((!ordered && dirty == xfs_buf_item_dirty_format(bip)) ||
|
||||
(ordered && dirty && !xfs_buf_item_dirty_format(bip)));
|
||||
ASSERT(!stale || (bip->__bli_format.blf_flags & XFS_BLF_CANCEL));
|
||||
|
||||
/*
|
||||
* Clean buffers, by definition, cannot be in the AIL. However, aborted
|
||||
* buffers may be in the AIL regardless of dirty state. An aborted
|
||||
* transaction that invalidates a buffer already in the AIL may have
|
||||
* marked it stale and cleared the dirty state, for example.
|
||||
*
|
||||
* Therefore if we are aborting a buffer and we've just taken the last
|
||||
* reference away, we have to check if it is in the AIL before freeing
|
||||
* it. We need to free it in this case, because an aborted transaction
|
||||
* has already shut the filesystem down and this is the last chance we
|
||||
* will have to do so.
|
||||
* Clear the buffer's association with this transaction and
|
||||
* per-transaction state from the bli, which has been copied above.
|
||||
*/
|
||||
if (atomic_dec_and_test(&bip->bli_refcount)) {
|
||||
if (aborted) {
|
||||
ASSERT(XFS_FORCED_SHUTDOWN(lip->li_mountp));
|
||||
xfs_trans_ail_remove(lip, SHUTDOWN_LOG_IO_ERROR);
|
||||
xfs_buf_item_relse(bp);
|
||||
} else if (!dirty)
|
||||
xfs_buf_item_relse(bp);
|
||||
}
|
||||
bp->b_transp = NULL;
|
||||
bip->bli_flags &= ~(XFS_BLI_LOGGED | XFS_BLI_HOLD | XFS_BLI_ORDERED);
|
||||
|
||||
if (!hold)
|
||||
xfs_buf_relse(bp);
|
||||
/*
|
||||
* Unref the item and unlock the buffer unless held or stale. Stale
|
||||
* buffers remain locked until final unpin unless the bli is freed by
|
||||
* the unref call. The latter implies shutdown because buffer
|
||||
* invalidation dirties the bli and transaction.
|
||||
*/
|
||||
released = xfs_buf_item_put(bip);
|
||||
if (hold || (stale && !released))
|
||||
return;
|
||||
ASSERT(!stale || test_bit(XFS_LI_ABORTED, &lip->li_flags));
|
||||
xfs_buf_relse(bp);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -51,6 +51,7 @@ struct xfs_buf_log_item {
|
|||
|
||||
int xfs_buf_item_init(struct xfs_buf *, struct xfs_mount *);
|
||||
void xfs_buf_item_relse(struct xfs_buf *);
|
||||
bool xfs_buf_item_put(struct xfs_buf_log_item *);
|
||||
void xfs_buf_item_log(struct xfs_buf_log_item *, uint, uint);
|
||||
bool xfs_buf_item_dirty_format(struct xfs_buf_log_item *);
|
||||
void xfs_buf_attach_iodone(struct xfs_buf *,
|
||||
|
|
|
@ -1563,7 +1563,7 @@ xfs_itruncate_extents_flags(
|
|||
error = xfs_bunmapi(tp, ip, first_unmap_block, unmap_len, flags,
|
||||
XFS_ITRUNC_MAX_EXTENTS, &done);
|
||||
if (error)
|
||||
goto out_bmap_cancel;
|
||||
goto out;
|
||||
|
||||
/*
|
||||
* Duplicate the transaction that has the permanent
|
||||
|
@ -1599,14 +1599,6 @@ xfs_itruncate_extents_flags(
|
|||
out:
|
||||
*tpp = tp;
|
||||
return error;
|
||||
out_bmap_cancel:
|
||||
/*
|
||||
* If the bunmapi call encounters an error, return to the caller where
|
||||
* the transaction can be properly aborted. We just need to make sure
|
||||
* we're not holding any resources that we were not when we came in.
|
||||
*/
|
||||
xfs_defer_cancel(tp);
|
||||
goto out;
|
||||
}
|
||||
|
||||
int
|
||||
|
|
|
@ -471,8 +471,18 @@ xfs_vn_get_link_inline(
|
|||
struct inode *inode,
|
||||
struct delayed_call *done)
|
||||
{
|
||||
char *link;
|
||||
|
||||
ASSERT(XFS_I(inode)->i_df.if_flags & XFS_IFINLINE);
|
||||
return XFS_I(inode)->i_df.if_u1.if_data;
|
||||
|
||||
/*
|
||||
* The VFS crashes on a NULL pointer, so return -EFSCORRUPTED if
|
||||
* if_data is junk.
|
||||
*/
|
||||
link = XFS_I(inode)->i_df.if_u1.if_data;
|
||||
if (!link)
|
||||
return ERR_PTR(-EFSCORRUPTED);
|
||||
return link;
|
||||
}
|
||||
|
||||
STATIC int
|
||||
|
|
|
@ -1570,16 +1570,6 @@ xlog_find_zeroed(
|
|||
if (last_cycle != 0) { /* log completely written to */
|
||||
xlog_put_bp(bp);
|
||||
return 0;
|
||||
} else if (first_cycle != 1) {
|
||||
/*
|
||||
* If the cycle of the last block is zero, the cycle of
|
||||
* the first block must be 1. If it's not, maybe we're
|
||||
* not looking at a log... Bail out.
|
||||
*/
|
||||
xfs_warn(log->l_mp,
|
||||
"Log inconsistent or not a log (last==0, first!=1)");
|
||||
error = -EINVAL;
|
||||
goto bp_err;
|
||||
}
|
||||
|
||||
/* we have a partially zeroed log */
|
||||
|
|
|
@ -352,6 +352,47 @@ xfs_reflink_convert_cow(
|
|||
return error;
|
||||
}
|
||||
|
||||
/*
|
||||
* Find the extent that maps the given range in the COW fork. Even if the extent
|
||||
* is not shared we might have a preallocation for it in the COW fork. If so we
|
||||
* use it that rather than trigger a new allocation.
|
||||
*/
|
||||
static int
|
||||
xfs_find_trim_cow_extent(
|
||||
struct xfs_inode *ip,
|
||||
struct xfs_bmbt_irec *imap,
|
||||
bool *shared,
|
||||
bool *found)
|
||||
{
|
||||
xfs_fileoff_t offset_fsb = imap->br_startoff;
|
||||
xfs_filblks_t count_fsb = imap->br_blockcount;
|
||||
struct xfs_iext_cursor icur;
|
||||
struct xfs_bmbt_irec got;
|
||||
bool trimmed;
|
||||
|
||||
*found = false;
|
||||
|
||||
/*
|
||||
* If we don't find an overlapping extent, trim the range we need to
|
||||
* allocate to fit the hole we found.
|
||||
*/
|
||||
if (!xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb, &icur, &got) ||
|
||||
got.br_startoff > offset_fsb)
|
||||
return xfs_reflink_trim_around_shared(ip, imap, shared, &trimmed);
|
||||
|
||||
*shared = true;
|
||||
if (isnullstartblock(got.br_startblock)) {
|
||||
xfs_trim_extent(imap, got.br_startoff, got.br_blockcount);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* real extent found - no need to allocate */
|
||||
xfs_trim_extent(&got, offset_fsb, count_fsb);
|
||||
*imap = got;
|
||||
*found = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Allocate all CoW reservations covering a range of blocks in a file. */
|
||||
int
|
||||
xfs_reflink_allocate_cow(
|
||||
|
@ -363,78 +404,64 @@ xfs_reflink_allocate_cow(
|
|||
struct xfs_mount *mp = ip->i_mount;
|
||||
xfs_fileoff_t offset_fsb = imap->br_startoff;
|
||||
xfs_filblks_t count_fsb = imap->br_blockcount;
|
||||
struct xfs_bmbt_irec got;
|
||||
struct xfs_trans *tp = NULL;
|
||||
struct xfs_trans *tp;
|
||||
int nimaps, error = 0;
|
||||
bool trimmed;
|
||||
bool found;
|
||||
xfs_filblks_t resaligned;
|
||||
xfs_extlen_t resblks = 0;
|
||||
struct xfs_iext_cursor icur;
|
||||
|
||||
retry:
|
||||
ASSERT(xfs_is_reflink_inode(ip));
|
||||
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
|
||||
ASSERT(xfs_is_reflink_inode(ip));
|
||||
|
||||
error = xfs_find_trim_cow_extent(ip, imap, shared, &found);
|
||||
if (error || !*shared)
|
||||
return error;
|
||||
if (found)
|
||||
goto convert;
|
||||
|
||||
resaligned = xfs_aligned_fsb_count(imap->br_startoff,
|
||||
imap->br_blockcount, xfs_get_cowextsz_hint(ip));
|
||||
resblks = XFS_DIOSTRAT_SPACE_RES(mp, resaligned);
|
||||
|
||||
xfs_iunlock(ip, *lockmode);
|
||||
error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, 0, &tp);
|
||||
*lockmode = XFS_ILOCK_EXCL;
|
||||
xfs_ilock(ip, *lockmode);
|
||||
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
error = xfs_qm_dqattach_locked(ip, false);
|
||||
if (error)
|
||||
goto out_trans_cancel;
|
||||
|
||||
/*
|
||||
* Even if the extent is not shared we might have a preallocation for
|
||||
* it in the COW fork. If so use it.
|
||||
* Check for an overlapping extent again now that we dropped the ilock.
|
||||
*/
|
||||
if (xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb, &icur, &got) &&
|
||||
got.br_startoff <= offset_fsb) {
|
||||
*shared = true;
|
||||
|
||||
/* If we have a real allocation in the COW fork we're done. */
|
||||
if (!isnullstartblock(got.br_startblock)) {
|
||||
xfs_trim_extent(&got, offset_fsb, count_fsb);
|
||||
*imap = got;
|
||||
goto convert;
|
||||
}
|
||||
|
||||
xfs_trim_extent(imap, got.br_startoff, got.br_blockcount);
|
||||
} else {
|
||||
error = xfs_reflink_trim_around_shared(ip, imap, shared, &trimmed);
|
||||
if (error || !*shared)
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (!tp) {
|
||||
resaligned = xfs_aligned_fsb_count(imap->br_startoff,
|
||||
imap->br_blockcount, xfs_get_cowextsz_hint(ip));
|
||||
resblks = XFS_DIOSTRAT_SPACE_RES(mp, resaligned);
|
||||
|
||||
xfs_iunlock(ip, *lockmode);
|
||||
error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, 0, &tp);
|
||||
*lockmode = XFS_ILOCK_EXCL;
|
||||
xfs_ilock(ip, *lockmode);
|
||||
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
error = xfs_qm_dqattach_locked(ip, false);
|
||||
if (error)
|
||||
goto out;
|
||||
goto retry;
|
||||
error = xfs_find_trim_cow_extent(ip, imap, shared, &found);
|
||||
if (error || !*shared)
|
||||
goto out_trans_cancel;
|
||||
if (found) {
|
||||
xfs_trans_cancel(tp);
|
||||
goto convert;
|
||||
}
|
||||
|
||||
error = xfs_trans_reserve_quota_nblks(tp, ip, resblks, 0,
|
||||
XFS_QMOPT_RES_REGBLKS);
|
||||
if (error)
|
||||
goto out;
|
||||
goto out_trans_cancel;
|
||||
|
||||
xfs_trans_ijoin(tp, ip, 0);
|
||||
|
||||
nimaps = 1;
|
||||
|
||||
/* Allocate the entire reservation as unwritten blocks. */
|
||||
nimaps = 1;
|
||||
error = xfs_bmapi_write(tp, ip, imap->br_startoff, imap->br_blockcount,
|
||||
XFS_BMAPI_COWFORK | XFS_BMAPI_PREALLOC,
|
||||
resblks, imap, &nimaps);
|
||||
if (error)
|
||||
goto out_trans_cancel;
|
||||
goto out_unreserve;
|
||||
|
||||
xfs_inode_set_cowblocks_tag(ip);
|
||||
|
||||
/* Finish up. */
|
||||
error = xfs_trans_commit(tp);
|
||||
if (error)
|
||||
return error;
|
||||
|
@ -447,12 +474,12 @@ xfs_reflink_allocate_cow(
|
|||
return -ENOSPC;
|
||||
convert:
|
||||
return xfs_reflink_convert_cow_extent(ip, imap, offset_fsb, count_fsb);
|
||||
out_trans_cancel:
|
||||
|
||||
out_unreserve:
|
||||
xfs_trans_unreserve_quota_nblks(tp, ip, (long)resblks, 0,
|
||||
XFS_QMOPT_RES_REGBLKS);
|
||||
out:
|
||||
if (tp)
|
||||
xfs_trans_cancel(tp);
|
||||
out_trans_cancel:
|
||||
xfs_trans_cancel(tp);
|
||||
return error;
|
||||
}
|
||||
|
||||
|
@ -666,14 +693,12 @@ xfs_reflink_end_cow(
|
|||
if (!del.br_blockcount)
|
||||
goto prev_extent;
|
||||
|
||||
ASSERT(!isnullstartblock(got.br_startblock));
|
||||
|
||||
/*
|
||||
* Don't remap unwritten extents; these are
|
||||
* speculatively preallocated CoW extents that have been
|
||||
* allocated but have not yet been involved in a write.
|
||||
* Only remap real extent that contain data. With AIO
|
||||
* speculatively preallocations can leak into the range we
|
||||
* are called upon, and we need to skip them.
|
||||
*/
|
||||
if (got.br_state == XFS_EXT_UNWRITTEN)
|
||||
if (!xfs_bmap_is_real_extent(&got))
|
||||
goto prev_extent;
|
||||
|
||||
/* Unmap the old blocks in the data fork. */
|
||||
|
|
|
@ -473,7 +473,6 @@ DEFINE_BUF_ITEM_EVENT(xfs_buf_item_pin);
|
|||
DEFINE_BUF_ITEM_EVENT(xfs_buf_item_unpin);
|
||||
DEFINE_BUF_ITEM_EVENT(xfs_buf_item_unpin_stale);
|
||||
DEFINE_BUF_ITEM_EVENT(xfs_buf_item_unlock);
|
||||
DEFINE_BUF_ITEM_EVENT(xfs_buf_item_unlock_stale);
|
||||
DEFINE_BUF_ITEM_EVENT(xfs_buf_item_committed);
|
||||
DEFINE_BUF_ITEM_EVENT(xfs_buf_item_push);
|
||||
DEFINE_BUF_ITEM_EVENT(xfs_trans_get_buf);
|
||||
|
|
|
@ -259,6 +259,14 @@ xfs_trans_alloc(
|
|||
struct xfs_trans *tp;
|
||||
int error;
|
||||
|
||||
/*
|
||||
* Allocate the handle before we do our freeze accounting and setting up
|
||||
* GFP_NOFS allocation context so that we avoid lockdep false positives
|
||||
* by doing GFP_KERNEL allocations inside sb_start_intwrite().
|
||||
*/
|
||||
tp = kmem_zone_zalloc(xfs_trans_zone,
|
||||
(flags & XFS_TRANS_NOFS) ? KM_NOFS : KM_SLEEP);
|
||||
|
||||
if (!(flags & XFS_TRANS_NO_WRITECOUNT))
|
||||
sb_start_intwrite(mp->m_super);
|
||||
|
||||
|
@ -270,8 +278,6 @@ xfs_trans_alloc(
|
|||
mp->m_super->s_writers.frozen == SB_FREEZE_COMPLETE);
|
||||
atomic_inc(&mp->m_active_trans);
|
||||
|
||||
tp = kmem_zone_zalloc(xfs_trans_zone,
|
||||
(flags & XFS_TRANS_NOFS) ? KM_NOFS : KM_SLEEP);
|
||||
tp->t_magic = XFS_TRANS_HEADER_MAGIC;
|
||||
tp->t_flags = flags;
|
||||
tp->t_mountp = mp;
|
||||
|
|
|
@ -322,49 +322,38 @@ xfs_trans_read_buf_map(
|
|||
}
|
||||
|
||||
/*
|
||||
* Release the buffer bp which was previously acquired with one of the
|
||||
* xfs_trans_... buffer allocation routines if the buffer has not
|
||||
* been modified within this transaction. If the buffer is modified
|
||||
* within this transaction, do decrement the recursion count but do
|
||||
* not release the buffer even if the count goes to 0. If the buffer is not
|
||||
* modified within the transaction, decrement the recursion count and
|
||||
* release the buffer if the recursion count goes to 0.
|
||||
* Release a buffer previously joined to the transaction. If the buffer is
|
||||
* modified within this transaction, decrement the recursion count but do not
|
||||
* release the buffer even if the count goes to 0. If the buffer is not modified
|
||||
* within the transaction, decrement the recursion count and release the buffer
|
||||
* if the recursion count goes to 0.
|
||||
*
|
||||
* If the buffer is to be released and it was not modified before
|
||||
* this transaction began, then free the buf_log_item associated with it.
|
||||
* If the buffer is to be released and it was not already dirty before this
|
||||
* transaction began, then also free the buf_log_item associated with it.
|
||||
*
|
||||
* If the transaction pointer is NULL, make this just a normal
|
||||
* brelse() call.
|
||||
* If the transaction pointer is NULL, this is a normal xfs_buf_relse() call.
|
||||
*/
|
||||
void
|
||||
xfs_trans_brelse(
|
||||
xfs_trans_t *tp,
|
||||
xfs_buf_t *bp)
|
||||
struct xfs_trans *tp,
|
||||
struct xfs_buf *bp)
|
||||
{
|
||||
struct xfs_buf_log_item *bip;
|
||||
int freed;
|
||||
struct xfs_buf_log_item *bip = bp->b_log_item;
|
||||
|
||||
/*
|
||||
* Default to a normal brelse() call if the tp is NULL.
|
||||
*/
|
||||
if (tp == NULL) {
|
||||
ASSERT(bp->b_transp == NULL);
|
||||
ASSERT(bp->b_transp == tp);
|
||||
|
||||
if (!tp) {
|
||||
xfs_buf_relse(bp);
|
||||
return;
|
||||
}
|
||||
|
||||
ASSERT(bp->b_transp == tp);
|
||||
bip = bp->b_log_item;
|
||||
trace_xfs_trans_brelse(bip);
|
||||
ASSERT(bip->bli_item.li_type == XFS_LI_BUF);
|
||||
ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
|
||||
ASSERT(!(bip->__bli_format.blf_flags & XFS_BLF_CANCEL));
|
||||
ASSERT(atomic_read(&bip->bli_refcount) > 0);
|
||||
|
||||
trace_xfs_trans_brelse(bip);
|
||||
|
||||
/*
|
||||
* If the release is just for a recursive lock,
|
||||
* then decrement the count and return.
|
||||
* If the release is for a recursive lookup, then decrement the count
|
||||
* and return.
|
||||
*/
|
||||
if (bip->bli_recur > 0) {
|
||||
bip->bli_recur--;
|
||||
|
@ -372,64 +361,24 @@ xfs_trans_brelse(
|
|||
}
|
||||
|
||||
/*
|
||||
* If the buffer is dirty within this transaction, we can't
|
||||
* If the buffer is invalidated or dirty in this transaction, we can't
|
||||
* release it until we commit.
|
||||
*/
|
||||
if (test_bit(XFS_LI_DIRTY, &bip->bli_item.li_flags))
|
||||
return;
|
||||
|
||||
/*
|
||||
* If the buffer has been invalidated, then we can't release
|
||||
* it until the transaction commits to disk unless it is re-dirtied
|
||||
* as part of this transaction. This prevents us from pulling
|
||||
* the item from the AIL before we should.
|
||||
*/
|
||||
if (bip->bli_flags & XFS_BLI_STALE)
|
||||
return;
|
||||
|
||||
/*
|
||||
* Unlink the log item from the transaction and clear the hold flag, if
|
||||
* set. We wouldn't want the next user of the buffer to get confused.
|
||||
*/
|
||||
ASSERT(!(bip->bli_flags & XFS_BLI_LOGGED));
|
||||
|
||||
/*
|
||||
* Free up the log item descriptor tracking the released item.
|
||||
*/
|
||||
xfs_trans_del_item(&bip->bli_item);
|
||||
bip->bli_flags &= ~XFS_BLI_HOLD;
|
||||
|
||||
/*
|
||||
* Clear the hold flag in the buf log item if it is set.
|
||||
* We wouldn't want the next user of the buffer to
|
||||
* get confused.
|
||||
*/
|
||||
if (bip->bli_flags & XFS_BLI_HOLD) {
|
||||
bip->bli_flags &= ~XFS_BLI_HOLD;
|
||||
}
|
||||
|
||||
/*
|
||||
* Drop our reference to the buf log item.
|
||||
*/
|
||||
freed = atomic_dec_and_test(&bip->bli_refcount);
|
||||
|
||||
/*
|
||||
* If the buf item is not tracking data in the log, then we must free it
|
||||
* before releasing the buffer back to the free pool.
|
||||
*
|
||||
* If the fs has shutdown and we dropped the last reference, it may fall
|
||||
* on us to release a (possibly dirty) bli if it never made it to the
|
||||
* AIL (e.g., the aborted unpin already happened and didn't release it
|
||||
* due to our reference). Since we're already shutdown and need
|
||||
* ail_lock, just force remove from the AIL and release the bli here.
|
||||
*/
|
||||
if (XFS_FORCED_SHUTDOWN(tp->t_mountp) && freed) {
|
||||
xfs_trans_ail_remove(&bip->bli_item, SHUTDOWN_LOG_IO_ERROR);
|
||||
xfs_buf_item_relse(bp);
|
||||
} else if (!(bip->bli_flags & XFS_BLI_DIRTY)) {
|
||||
/***
|
||||
ASSERT(bp->b_pincount == 0);
|
||||
***/
|
||||
ASSERT(atomic_read(&bip->bli_refcount) == 0);
|
||||
ASSERT(!test_bit(XFS_LI_IN_AIL, &bip->bli_item.li_flags));
|
||||
ASSERT(!(bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF));
|
||||
xfs_buf_item_relse(bp);
|
||||
}
|
||||
/* drop the reference to the bli */
|
||||
xfs_buf_item_put(bip);
|
||||
|
||||
bp->b_transp = NULL;
|
||||
xfs_buf_relse(bp);
|
||||
|
|
|
@ -87,9 +87,10 @@ struct drm_client_dev {
|
|||
struct drm_file *file;
|
||||
};
|
||||
|
||||
int drm_client_new(struct drm_device *dev, struct drm_client_dev *client,
|
||||
const char *name, const struct drm_client_funcs *funcs);
|
||||
int drm_client_init(struct drm_device *dev, struct drm_client_dev *client,
|
||||
const char *name, const struct drm_client_funcs *funcs);
|
||||
void drm_client_release(struct drm_client_dev *client);
|
||||
void drm_client_add(struct drm_client_dev *client);
|
||||
|
||||
void drm_client_dev_unregister(struct drm_device *dev);
|
||||
void drm_client_dev_hotplug(struct drm_device *dev);
|
||||
|
|
|
@ -1828,8 +1828,10 @@ extern ssize_t vfs_copy_file_range(struct file *, loff_t , struct file *,
|
|||
extern int vfs_clone_file_prep_inodes(struct inode *inode_in, loff_t pos_in,
|
||||
struct inode *inode_out, loff_t pos_out,
|
||||
u64 *len, bool is_dedupe);
|
||||
extern int do_clone_file_range(struct file *file_in, loff_t pos_in,
|
||||
struct file *file_out, loff_t pos_out, u64 len);
|
||||
extern int vfs_clone_file_range(struct file *file_in, loff_t pos_in,
|
||||
struct file *file_out, loff_t pos_out, u64 len);
|
||||
struct file *file_out, loff_t pos_out, u64 len);
|
||||
extern int vfs_dedupe_file_range_compare(struct inode *src, loff_t srcoff,
|
||||
struct inode *dest, loff_t destoff,
|
||||
loff_t len, bool *is_same);
|
||||
|
@ -2773,19 +2775,6 @@ static inline void file_end_write(struct file *file)
|
|||
__sb_end_write(file_inode(file)->i_sb, SB_FREEZE_WRITE);
|
||||
}
|
||||
|
||||
static inline int do_clone_file_range(struct file *file_in, loff_t pos_in,
|
||||
struct file *file_out, loff_t pos_out,
|
||||
u64 len)
|
||||
{
|
||||
int ret;
|
||||
|
||||
file_start_write(file_out);
|
||||
ret = vfs_clone_file_range(file_in, pos_in, file_out, pos_out, len);
|
||||
file_end_write(file_out);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* get_write_access() gets write permission for a file.
|
||||
* put_write_access() releases this write permission.
|
||||
|
|
|
@ -140,6 +140,8 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
|
|||
pte_t *huge_pte_offset(struct mm_struct *mm,
|
||||
unsigned long addr, unsigned long sz);
|
||||
int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep);
|
||||
void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
|
||||
unsigned long *start, unsigned long *end);
|
||||
struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
|
||||
int write);
|
||||
struct page *follow_huge_pd(struct vm_area_struct *vma,
|
||||
|
@ -170,6 +172,18 @@ static inline unsigned long hugetlb_total_pages(void)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static inline int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr,
|
||||
pte_t *ptep)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void adjust_range_if_pmd_sharing_possible(
|
||||
struct vm_area_struct *vma,
|
||||
unsigned long *start, unsigned long *end)
|
||||
{
|
||||
}
|
||||
|
||||
#define follow_hugetlb_page(m,v,p,vs,a,b,i,w,n) ({ BUG(); 0; })
|
||||
#define follow_huge_addr(mm, addr, write) ERR_PTR(-EINVAL)
|
||||
#define copy_hugetlb_page_range(src, dst, vma) ({ BUG(); 0; })
|
||||
|
|
|
@ -2455,6 +2455,12 @@ static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
|
|||
return vma;
|
||||
}
|
||||
|
||||
static inline bool range_in_vma(struct vm_area_struct *vma,
|
||||
unsigned long start, unsigned long end)
|
||||
{
|
||||
return (vma && vma->vm_start <= start && end <= vma->vm_end);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_MMU
|
||||
pgprot_t vm_get_page_prot(unsigned long vm_flags);
|
||||
void vma_set_page_prot(struct vm_area_struct *vma);
|
||||
|
|
|
@ -671,12 +671,6 @@ typedef struct pglist_data {
|
|||
#ifdef CONFIG_NUMA_BALANCING
|
||||
/* Lock serializing the migrate rate limiting window */
|
||||
spinlock_t numabalancing_migrate_lock;
|
||||
|
||||
/* Rate limiting time interval */
|
||||
unsigned long numabalancing_migrate_next_window;
|
||||
|
||||
/* Number of pages migrated during the rate limiting time interval */
|
||||
unsigned long numabalancing_migrate_nr_pages;
|
||||
#endif
|
||||
/*
|
||||
* This is a per-node reserve of pages that are not available
|
||||
|
|
|
@ -5,6 +5,24 @@
|
|||
#include <linux/if_vlan.h>
|
||||
#include <uapi/linux/virtio_net.h>
|
||||
|
||||
static inline int virtio_net_hdr_set_proto(struct sk_buff *skb,
|
||||
const struct virtio_net_hdr *hdr)
|
||||
{
|
||||
switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
|
||||
case VIRTIO_NET_HDR_GSO_TCPV4:
|
||||
case VIRTIO_NET_HDR_GSO_UDP:
|
||||
skb->protocol = cpu_to_be16(ETH_P_IP);
|
||||
break;
|
||||
case VIRTIO_NET_HDR_GSO_TCPV6:
|
||||
skb->protocol = cpu_to_be16(ETH_P_IPV6);
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
|
||||
const struct virtio_net_hdr *hdr,
|
||||
bool little_endian)
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue