Merge 4.12-rc4 into tty-next

We want the tty locking fix in here, so that maybe we can finally get it
fixed for real...

Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Greg Kroah-Hartman 2017-06-05 09:27:05 +02:00
commit 8bc39bca5e
228 changed files with 2052 additions and 1200 deletions

View File

@ -247,7 +247,6 @@ bias-bus-hold - latch weakly
bias-pull-up - pull up the pin
bias-pull-down - pull down the pin
bias-pull-pin-default - use pin-default pull state
bi-directional - pin supports simultaneous input/output operations
drive-push-pull - drive actively high and low
drive-open-drain - drive with open drain
drive-open-source - drive with open source
@ -260,7 +259,6 @@ input-debounce - debounce mode with debound time X
power-source - select between different power supplies
low-power-enable - enable low power mode
low-power-disable - disable low power mode
output-enable - enable output on pin regardless of output value
output-low - set the pin to output mode with low level
output-high - set the pin to output mode with high level
slew-rate - set the slew rate

View File

@ -10450,7 +10450,7 @@ S: Orphan
PXA RTC DRIVER
M: Robert Jarzmik <robert.jarzmik@free.fr>
L: rtc-linux@googlegroups.com
L: linux-rtc@vger.kernel.org
S: Maintained
QAT DRIVER
@ -10757,7 +10757,7 @@ X: kernel/torture.c
REAL TIME CLOCK (RTC) SUBSYSTEM
M: Alessandro Zummo <a.zummo@towertech.it>
M: Alexandre Belloni <alexandre.belloni@free-electrons.com>
L: rtc-linux@googlegroups.com
L: linux-rtc@vger.kernel.org
Q: http://patchwork.ozlabs.org/project/rtc-linux/list/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/abelloni/linux.git
S: Maintained

View File

@ -1,7 +1,7 @@
VERSION = 4
PATCHLEVEL = 12
SUBLEVEL = 0
EXTRAVERSION = -rc3
EXTRAVERSION = -rc4
NAME = Fearless Coyote
# *DOCUMENTATION*

View File

@ -24,8 +24,8 @@
(acpi_gbl_FADT.header.revision < 6 ? 76 : 80)
#define BAD_MADT_GICC_ENTRY(entry, end) \
(!(entry) || (unsigned long)(entry) + sizeof(*(entry)) > (end) || \
(entry)->header.length != ACPI_MADT_GICC_LENGTH)
(!(entry) || (entry)->header.length != ACPI_MADT_GICC_LENGTH || \
(unsigned long)(entry) + ACPI_MADT_GICC_LENGTH > (end))
/* Basic configuration for ACPI */
#ifdef CONFIG_ACPI

View File

@ -191,8 +191,10 @@ struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root)
return NULL;
root_ops = kzalloc_node(sizeof(*root_ops), GFP_KERNEL, node);
if (!root_ops)
if (!root_ops) {
kfree(ri);
return NULL;
}
ri->cfg = pci_acpi_setup_ecam_mapping(root);
if (!ri->cfg) {

View File

@ -16,5 +16,11 @@ static inline cycles_t get_cycles(void)
#define vxtime_lock() do {} while (0)
#define vxtime_unlock() do {} while (0)
/* This attribute is used in include/linux/jiffies.h alongside with
* __cacheline_aligned_in_smp. It is assumed that __cacheline_aligned_in_smp
* for frv does not contain another section specification.
*/
#define __jiffy_arch_data __attribute__((__section__(".data")))
#endif

View File

@ -120,7 +120,6 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long usp,
struct thread_info *ti = task_thread_info(p);
struct pt_regs *childregs, *regs = current_pt_regs();
unsigned long childksp;
p->set_child_tid = p->clear_child_tid = NULL;
childksp = (unsigned long)task_stack_page(p) + THREAD_SIZE - 32;

View File

@ -167,8 +167,6 @@ copy_thread(unsigned long clone_flags, unsigned long usp,
top_of_kernel_stack = sp;
p->set_child_tid = p->clear_child_tid = NULL;
/* Locate userspace context on stack... */
sp -= STACK_FRAME_OVERHEAD; /* redzone */
sp -= sizeof(struct pt_regs);

View File

@ -320,7 +320,7 @@ void load_ucode_amd_ap(unsigned int cpuid_1_eax)
}
static enum ucode_state
load_microcode_amd(int cpu, u8 family, const u8 *data, size_t size);
load_microcode_amd(bool save, u8 family, const u8 *data, size_t size);
int __init save_microcode_in_initrd_amd(unsigned int cpuid_1_eax)
{
@ -338,8 +338,7 @@ int __init save_microcode_in_initrd_amd(unsigned int cpuid_1_eax)
if (!desc.mc)
return -EINVAL;
ret = load_microcode_amd(smp_processor_id(), x86_family(cpuid_1_eax),
desc.data, desc.size);
ret = load_microcode_amd(true, x86_family(cpuid_1_eax), desc.data, desc.size);
if (ret != UCODE_OK)
return -EINVAL;
@ -675,7 +674,7 @@ static enum ucode_state __load_microcode_amd(u8 family, const u8 *data,
}
static enum ucode_state
load_microcode_amd(int cpu, u8 family, const u8 *data, size_t size)
load_microcode_amd(bool save, u8 family, const u8 *data, size_t size)
{
enum ucode_state ret;
@ -689,8 +688,8 @@ load_microcode_amd(int cpu, u8 family, const u8 *data, size_t size)
#ifdef CONFIG_X86_32
/* save BSP's matching patch for early load */
if (cpu_data(cpu).cpu_index == boot_cpu_data.cpu_index) {
struct ucode_patch *p = find_patch(cpu);
if (save) {
struct ucode_patch *p = find_patch(0);
if (p) {
memset(amd_ucode_patch, 0, PATCH_MAX_SIZE);
memcpy(amd_ucode_patch, p->data, min_t(u32, ksize(p->data),
@ -722,11 +721,12 @@ static enum ucode_state request_microcode_amd(int cpu, struct device *device,
{
char fw_name[36] = "amd-ucode/microcode_amd.bin";
struct cpuinfo_x86 *c = &cpu_data(cpu);
bool bsp = c->cpu_index == boot_cpu_data.cpu_index;
enum ucode_state ret = UCODE_NFOUND;
const struct firmware *fw;
/* reload ucode container only on the boot cpu */
if (!refresh_fw || c->cpu_index != boot_cpu_data.cpu_index)
if (!refresh_fw || !bsp)
return UCODE_OK;
if (c->x86 >= 0x15)
@ -743,7 +743,7 @@ static enum ucode_state request_microcode_amd(int cpu, struct device *device,
goto fw_release;
}
ret = load_microcode_amd(cpu, c->x86, fw->data, fw->size);
ret = load_microcode_amd(bsp, c->x86, fw->data, fw->size);
fw_release:
release_firmware(fw);

View File

@ -78,7 +78,7 @@ void __show_regs(struct pt_regs *regs, int all)
printk(KERN_DEFAULT "EIP: %pS\n", (void *)regs->ip);
printk(KERN_DEFAULT "EFLAGS: %08lx CPU: %d\n", regs->flags,
smp_processor_id());
raw_smp_processor_id());
printk(KERN_DEFAULT "EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
regs->ax, regs->bx, regs->cx, regs->dx);

View File

@ -1495,8 +1495,10 @@ EXPORT_SYMBOL_GPL(kvm_lapic_hv_timer_in_use);
static void cancel_hv_timer(struct kvm_lapic *apic)
{
preempt_disable();
kvm_x86_ops->cancel_hv_timer(apic->vcpu);
apic->lapic_timer.hv_timer_in_use = false;
preempt_enable();
}
static bool start_hv_timer(struct kvm_lapic *apic)
@ -1934,7 +1936,8 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event)
for (i = 0; i < KVM_APIC_LVT_NUM; i++)
kvm_lapic_set_reg(apic, APIC_LVTT + 0x10 * i, APIC_LVT_MASKED);
apic_update_lvtt(apic);
if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_LINT0_REENABLED))
if (kvm_vcpu_is_reset_bsp(vcpu) &&
kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_LINT0_REENABLED))
kvm_lapic_set_reg(apic, APIC_LVT0,
SET_APIC_DELIVERY_MODE(0, APIC_MODE_EXTINT));
apic_manage_nmi_watchdog(apic, kvm_lapic_get_reg(apic, APIC_LVT0));

View File

@ -1807,7 +1807,7 @@ static void svm_get_segment(struct kvm_vcpu *vcpu,
* AMD's VMCB does not have an explicit unusable field, so emulate it
* for cross vendor migration purposes by "not present"
*/
var->unusable = !var->present || (var->type == 0);
var->unusable = !var->present;
switch (seg) {
case VCPU_SREG_TR:
@ -1840,6 +1840,7 @@ static void svm_get_segment(struct kvm_vcpu *vcpu,
*/
if (var->unusable)
var->db = 0;
/* This is symmetric with svm_set_segment() */
var->dpl = to_svm(vcpu)->vmcb->save.cpl;
break;
}
@ -1980,18 +1981,14 @@ static void svm_set_segment(struct kvm_vcpu *vcpu,
s->base = var->base;
s->limit = var->limit;
s->selector = var->selector;
if (var->unusable)
s->attrib = 0;
else {
s->attrib = (var->type & SVM_SELECTOR_TYPE_MASK);
s->attrib |= (var->s & 1) << SVM_SELECTOR_S_SHIFT;
s->attrib |= (var->dpl & 3) << SVM_SELECTOR_DPL_SHIFT;
s->attrib |= (var->present & 1) << SVM_SELECTOR_P_SHIFT;
s->attrib |= ((var->present & 1) && !var->unusable) << SVM_SELECTOR_P_SHIFT;
s->attrib |= (var->avl & 1) << SVM_SELECTOR_AVL_SHIFT;
s->attrib |= (var->l & 1) << SVM_SELECTOR_L_SHIFT;
s->attrib |= (var->db & 1) << SVM_SELECTOR_DB_SHIFT;
s->attrib |= (var->g & 1) << SVM_SELECTOR_G_SHIFT;
}
/*
* This is always accurate, except if SYSRET returned to a segment
@ -2000,7 +1997,8 @@ static void svm_set_segment(struct kvm_vcpu *vcpu,
* would entail passing the CPL to userspace and back.
*/
if (seg == VCPU_SREG_SS)
svm->vmcb->save.cpl = (s->attrib >> SVM_SELECTOR_DPL_SHIFT) & 3;
/* This is symmetric with svm_get_segment() */
svm->vmcb->save.cpl = (var->dpl & 3);
mark_dirty(svm->vmcb, VMCB_SEG);
}

View File

@ -6914,97 +6914,21 @@ static int get_vmx_mem_address(struct kvm_vcpu *vcpu,
return 0;
}
/*
* This function performs the various checks including
* - if it's 4KB aligned
* - No bits beyond the physical address width are set
* - Returns 0 on success or else 1
* (Intel SDM Section 30.3)
*/
static int nested_vmx_check_vmptr(struct kvm_vcpu *vcpu, int exit_reason,
gpa_t *vmpointer)
static int nested_vmx_get_vmptr(struct kvm_vcpu *vcpu, gpa_t *vmpointer)
{
gva_t gva;
gpa_t vmptr;
struct x86_exception e;
struct page *page;
struct vcpu_vmx *vmx = to_vmx(vcpu);
int maxphyaddr = cpuid_maxphyaddr(vcpu);
if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
vmcs_read32(VMX_INSTRUCTION_INFO), false, &gva))
return 1;
if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &vmptr,
sizeof(vmptr), &e)) {
if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, vmpointer,
sizeof(*vmpointer), &e)) {
kvm_inject_page_fault(vcpu, &e);
return 1;
}
switch (exit_reason) {
case EXIT_REASON_VMON:
/*
* SDM 3: 24.11.5
* The first 4 bytes of VMXON region contain the supported
* VMCS revision identifier
*
* Note - IA32_VMX_BASIC[48] will never be 1
* for the nested case;
* which replaces physical address width with 32
*
*/
if (!PAGE_ALIGNED(vmptr) || (vmptr >> maxphyaddr)) {
nested_vmx_failInvalid(vcpu);
return kvm_skip_emulated_instruction(vcpu);
}
page = nested_get_page(vcpu, vmptr);
if (page == NULL) {
nested_vmx_failInvalid(vcpu);
return kvm_skip_emulated_instruction(vcpu);
}
if (*(u32 *)kmap(page) != VMCS12_REVISION) {
kunmap(page);
nested_release_page_clean(page);
nested_vmx_failInvalid(vcpu);
return kvm_skip_emulated_instruction(vcpu);
}
kunmap(page);
nested_release_page_clean(page);
vmx->nested.vmxon_ptr = vmptr;
break;
case EXIT_REASON_VMCLEAR:
if (!PAGE_ALIGNED(vmptr) || (vmptr >> maxphyaddr)) {
nested_vmx_failValid(vcpu,
VMXERR_VMCLEAR_INVALID_ADDRESS);
return kvm_skip_emulated_instruction(vcpu);
}
if (vmptr == vmx->nested.vmxon_ptr) {
nested_vmx_failValid(vcpu,
VMXERR_VMCLEAR_VMXON_POINTER);
return kvm_skip_emulated_instruction(vcpu);
}
break;
case EXIT_REASON_VMPTRLD:
if (!PAGE_ALIGNED(vmptr) || (vmptr >> maxphyaddr)) {
nested_vmx_failValid(vcpu,
VMXERR_VMPTRLD_INVALID_ADDRESS);
return kvm_skip_emulated_instruction(vcpu);
}
if (vmptr == vmx->nested.vmxon_ptr) {
nested_vmx_failValid(vcpu,
VMXERR_VMPTRLD_VMXON_POINTER);
return kvm_skip_emulated_instruction(vcpu);
}
break;
default:
return 1; /* shouldn't happen */
}
if (vmpointer)
*vmpointer = vmptr;
return 0;
}
@ -7066,6 +6990,8 @@ static int enter_vmx_operation(struct kvm_vcpu *vcpu)
static int handle_vmon(struct kvm_vcpu *vcpu)
{
int ret;
gpa_t vmptr;
struct page *page;
struct vcpu_vmx *vmx = to_vmx(vcpu);
const u64 VMXON_NEEDED_FEATURES = FEATURE_CONTROL_LOCKED
| FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX;
@ -7095,9 +7021,37 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
return 1;
}
if (nested_vmx_check_vmptr(vcpu, EXIT_REASON_VMON, NULL))
if (nested_vmx_get_vmptr(vcpu, &vmptr))
return 1;
/*
* SDM 3: 24.11.5
* The first 4 bytes of VMXON region contain the supported
* VMCS revision identifier
*
* Note - IA32_VMX_BASIC[48] will never be 1 for the nested case;
* which replaces physical address width with 32
*/
if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu))) {
nested_vmx_failInvalid(vcpu);
return kvm_skip_emulated_instruction(vcpu);
}
page = nested_get_page(vcpu, vmptr);
if (page == NULL) {
nested_vmx_failInvalid(vcpu);
return kvm_skip_emulated_instruction(vcpu);
}
if (*(u32 *)kmap(page) != VMCS12_REVISION) {
kunmap(page);
nested_release_page_clean(page);
nested_vmx_failInvalid(vcpu);
return kvm_skip_emulated_instruction(vcpu);
}
kunmap(page);
nested_release_page_clean(page);
vmx->nested.vmxon_ptr = vmptr;
ret = enter_vmx_operation(vcpu);
if (ret)
return ret;
@ -7213,9 +7167,19 @@ static int handle_vmclear(struct kvm_vcpu *vcpu)
if (!nested_vmx_check_permission(vcpu))
return 1;
if (nested_vmx_check_vmptr(vcpu, EXIT_REASON_VMCLEAR, &vmptr))
if (nested_vmx_get_vmptr(vcpu, &vmptr))
return 1;
if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu))) {
nested_vmx_failValid(vcpu, VMXERR_VMCLEAR_INVALID_ADDRESS);
return kvm_skip_emulated_instruction(vcpu);
}
if (vmptr == vmx->nested.vmxon_ptr) {
nested_vmx_failValid(vcpu, VMXERR_VMCLEAR_VMXON_POINTER);
return kvm_skip_emulated_instruction(vcpu);
}
if (vmptr == vmx->nested.current_vmptr)
nested_release_vmcs12(vmx);
@ -7545,9 +7509,19 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu)
if (!nested_vmx_check_permission(vcpu))
return 1;
if (nested_vmx_check_vmptr(vcpu, EXIT_REASON_VMPTRLD, &vmptr))
if (nested_vmx_get_vmptr(vcpu, &vmptr))
return 1;
if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu))) {
nested_vmx_failValid(vcpu, VMXERR_VMPTRLD_INVALID_ADDRESS);
return kvm_skip_emulated_instruction(vcpu);
}
if (vmptr == vmx->nested.vmxon_ptr) {
nested_vmx_failValid(vcpu, VMXERR_VMPTRLD_VMXON_POINTER);
return kvm_skip_emulated_instruction(vcpu);
}
if (vmx->nested.current_vmptr != vmptr) {
struct vmcs12 *new_vmcs12;
struct page *page;
@ -7913,11 +7887,13 @@ static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu,
{
unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
int cr = exit_qualification & 15;
int reg = (exit_qualification >> 8) & 15;
unsigned long val = kvm_register_readl(vcpu, reg);
int reg;
unsigned long val;
switch ((exit_qualification >> 4) & 3) {
case 0: /* mov to cr */
reg = (exit_qualification >> 8) & 15;
val = kvm_register_readl(vcpu, reg);
switch (cr) {
case 0:
if (vmcs12->cr0_guest_host_mask &
@ -7972,6 +7948,7 @@ static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu,
* lmsw can change bits 1..3 of cr0, and only set bit 0 of
* cr0. Other attempted changes are ignored, with no exit.
*/
val = (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f;
if (vmcs12->cr0_guest_host_mask & 0xe &
(val ^ vmcs12->cr0_read_shadow))
return true;

View File

@ -8394,10 +8394,13 @@ static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu)
if (vcpu->arch.pv.pv_unhalted)
return true;
if (atomic_read(&vcpu->arch.nmi_queued))
if (kvm_test_request(KVM_REQ_NMI, vcpu) ||
(vcpu->arch.nmi_pending &&
kvm_x86_ops->nmi_allowed(vcpu)))
return true;
if (kvm_test_request(KVM_REQ_SMI, vcpu))
if (kvm_test_request(KVM_REQ_SMI, vcpu) ||
(vcpu->arch.smi_pending && !is_smm(vcpu)))
return true;
if (kvm_arch_interrupt_allowed(vcpu) &&

View File

@ -65,11 +65,9 @@ static int __init nopat(char *str)
}
early_param("nopat", nopat);
static bool __read_mostly __pat_initialized = false;
bool pat_enabled(void)
{
return __pat_initialized;
return !!__pat_enabled;
}
EXPORT_SYMBOL_GPL(pat_enabled);
@ -227,14 +225,13 @@ static void pat_bsp_init(u64 pat)
}
wrmsrl(MSR_IA32_CR_PAT, pat);
__pat_initialized = true;
__init_cache_modes(pat);
}
static void pat_ap_init(u64 pat)
{
if (!this_cpu_has(X86_FEATURE_PAT)) {
if (!boot_cpu_has(X86_FEATURE_PAT)) {
/*
* If this happens we are on a secondary CPU, but switched to
* PAT on the boot CPU. We have no way to undo PAT.
@ -309,7 +306,7 @@ void pat_init(void)
u64 pat;
struct cpuinfo_x86 *c = &boot_cpu_data;
if (!__pat_enabled) {
if (!pat_enabled()) {
init_cache_modes();
return;
}

View File

@ -828,9 +828,11 @@ static void __init kexec_enter_virtual_mode(void)
/*
* We don't do virtual mode, since we don't do runtime services, on
* non-native EFI
* non-native EFI. With efi=old_map, we don't do runtime services in
* kexec kernel because in the initial boot something else might
* have been mapped at these virtual addresses.
*/
if (!efi_is_native()) {
if (!efi_is_native() || efi_enabled(EFI_OLD_MEMMAP)) {
efi_memmap_unmap();
clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
return;

View File

@ -71,11 +71,13 @@ static void __init early_code_mapping_set_exec(int executable)
pgd_t * __init efi_call_phys_prolog(void)
{
unsigned long vaddress;
pgd_t *save_pgd;
unsigned long vaddr, addr_pgd, addr_p4d, addr_pud;
pgd_t *save_pgd, *pgd_k, *pgd_efi;
p4d_t *p4d, *p4d_k, *p4d_efi;
pud_t *pud;
int pgd;
int n_pgds;
int n_pgds, i, j;
if (!efi_enabled(EFI_OLD_MEMMAP)) {
save_pgd = (pgd_t *)read_cr3();
@ -88,10 +90,49 @@ pgd_t * __init efi_call_phys_prolog(void)
n_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT), PGDIR_SIZE);
save_pgd = kmalloc_array(n_pgds, sizeof(*save_pgd), GFP_KERNEL);
/*
* Build 1:1 identity mapping for efi=old_map usage. Note that
* PAGE_OFFSET is PGDIR_SIZE aligned when KASLR is disabled, while
* it is PUD_SIZE ALIGNED with KASLR enabled. So for a given physical
* address X, the pud_index(X) != pud_index(__va(X)), we can only copy
* PUD entry of __va(X) to fill in pud entry of X to build 1:1 mapping.
* This means here we can only reuse the PMD tables of the direct mapping.
*/
for (pgd = 0; pgd < n_pgds; pgd++) {
save_pgd[pgd] = *pgd_offset_k(pgd * PGDIR_SIZE);
vaddress = (unsigned long)__va(pgd * PGDIR_SIZE);
set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), *pgd_offset_k(vaddress));
addr_pgd = (unsigned long)(pgd * PGDIR_SIZE);
vaddr = (unsigned long)__va(pgd * PGDIR_SIZE);
pgd_efi = pgd_offset_k(addr_pgd);
save_pgd[pgd] = *pgd_efi;
p4d = p4d_alloc(&init_mm, pgd_efi, addr_pgd);
if (!p4d) {
pr_err("Failed to allocate p4d table!\n");
goto out;
}
for (i = 0; i < PTRS_PER_P4D; i++) {
addr_p4d = addr_pgd + i * P4D_SIZE;
p4d_efi = p4d + p4d_index(addr_p4d);
pud = pud_alloc(&init_mm, p4d_efi, addr_p4d);
if (!pud) {
pr_err("Failed to allocate pud table!\n");
goto out;
}
for (j = 0; j < PTRS_PER_PUD; j++) {
addr_pud = addr_p4d + j * PUD_SIZE;
if (addr_pud > (max_pfn << PAGE_SHIFT))
break;
vaddr = (unsigned long)__va(addr_pud);
pgd_k = pgd_offset_k(vaddr);
p4d_k = p4d_offset(pgd_k, vaddr);
pud[j] = *pud_offset(p4d_k, vaddr);
}
}
}
out:
__flush_tlb_all();
@ -104,8 +145,11 @@ void __init efi_call_phys_epilog(pgd_t *save_pgd)
/*
* After the lock is released, the original page table is restored.
*/
int pgd_idx;
int pgd_idx, i;
int nr_pgds;
pgd_t *pgd;
p4d_t *p4d;
pud_t *pud;
if (!efi_enabled(EFI_OLD_MEMMAP)) {
write_cr3((unsigned long)save_pgd);
@ -115,9 +159,28 @@ void __init efi_call_phys_epilog(pgd_t *save_pgd)
nr_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT) , PGDIR_SIZE);
for (pgd_idx = 0; pgd_idx < nr_pgds; pgd_idx++)
for (pgd_idx = 0; pgd_idx < nr_pgds; pgd_idx++) {
pgd = pgd_offset_k(pgd_idx * PGDIR_SIZE);
set_pgd(pgd_offset_k(pgd_idx * PGDIR_SIZE), save_pgd[pgd_idx]);
if (!(pgd_val(*pgd) & _PAGE_PRESENT))
continue;
for (i = 0; i < PTRS_PER_P4D; i++) {
p4d = p4d_offset(pgd,
pgd_idx * PGDIR_SIZE + i * P4D_SIZE);
if (!(p4d_val(*p4d) & _PAGE_PRESENT))
continue;
pud = (pud_t *)p4d_page_vaddr(*p4d);
pud_free(&init_mm, pud);
}
p4d = (p4d_t *)pgd_page_vaddr(*pgd);
p4d_free(&init_mm, p4d);
}
kfree(save_pgd);
__flush_tlb_all();

View File

@ -360,6 +360,9 @@ void __init efi_free_boot_services(void)
free_bootmem_late(start, size);
}
if (!num_entries)
return;
new_size = efi.memmap.desc_size * num_entries;
new_phys = efi_memmap_alloc(num_entries);
if (!new_phys) {

View File

@ -74,7 +74,7 @@ static void blkg_free(struct blkcg_gq *blkg)
blkcg_policy[i]->pd_free_fn(blkg->pd[i]);
if (blkg->blkcg != &blkcg_root)
blk_exit_rl(&blkg->rl);
blk_exit_rl(blkg->q, &blkg->rl);
blkg_rwstat_exit(&blkg->stat_ios);
blkg_rwstat_exit(&blkg->stat_bytes);

View File

@ -648,13 +648,19 @@ int blk_init_rl(struct request_list *rl, struct request_queue *q,
if (!rl->rq_pool)
return -ENOMEM;
if (rl != &q->root_rl)
WARN_ON_ONCE(!blk_get_queue(q));
return 0;
}
void blk_exit_rl(struct request_list *rl)
void blk_exit_rl(struct request_queue *q, struct request_list *rl)
{
if (rl->rq_pool)
if (rl->rq_pool) {
mempool_destroy(rl->rq_pool);
if (rl != &q->root_rl)
blk_put_queue(q);
}
}
struct request_queue *blk_alloc_queue(gfp_t gfp_mask)

View File

@ -2641,7 +2641,8 @@ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
return ret;
}
void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues)
static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
int nr_hw_queues)
{
struct request_queue *q;
@ -2665,6 +2666,13 @@ void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues)
list_for_each_entry(q, &set->tag_list, tag_set_list)
blk_mq_unfreeze_queue(q);
}
void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues)
{
mutex_lock(&set->tag_list_lock);
__blk_mq_update_nr_hw_queues(set, nr_hw_queues);
mutex_unlock(&set->tag_list_lock);
}
EXPORT_SYMBOL_GPL(blk_mq_update_nr_hw_queues);
/* Enable polling stats and return whether they were already enabled. */

View File

@ -809,7 +809,7 @@ static void blk_release_queue(struct kobject *kobj)
blk_free_queue_stats(q->stats);
blk_exit_rl(&q->root_rl);
blk_exit_rl(q, &q->root_rl);
if (q->queue_tags)
__blk_queue_free_tags(q);

View File

@ -59,7 +59,7 @@ void blk_free_flush_queue(struct blk_flush_queue *q);
int blk_init_rl(struct request_list *rl, struct request_queue *q,
gfp_t gfp_mask);
void blk_exit_rl(struct request_list *rl);
void blk_exit_rl(struct request_queue *q, struct request_list *rl);
void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
struct bio *bio);
void blk_queue_bypass_start(struct request_queue *q);

View File

@ -38,9 +38,13 @@ static const u64 cfq_target_latency = (u64)NSEC_PER_SEC * 3/10; /* 300 ms */
static const int cfq_hist_divisor = 4;
/*
* offset from end of service tree
* offset from end of queue service tree for idle class
*/
#define CFQ_IDLE_DELAY (NSEC_PER_SEC / 5)
/* offset from end of group service tree under time slice mode */
#define CFQ_SLICE_MODE_GROUP_DELAY (NSEC_PER_SEC / 5)
/* offset from end of group service under IOPS mode */
#define CFQ_IOPS_MODE_GROUP_DELAY (HZ / 5)
/*
* below this threshold, we consider thinktime immediate
@ -1362,6 +1366,14 @@ cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
cfqg->vfraction = max_t(unsigned, vfr, 1);
}
static inline u64 cfq_get_cfqg_vdisktime_delay(struct cfq_data *cfqd)
{
if (!iops_mode(cfqd))
return CFQ_SLICE_MODE_GROUP_DELAY;
else
return CFQ_IOPS_MODE_GROUP_DELAY;
}
static void
cfq_group_notify_queue_add(struct cfq_data *cfqd, struct cfq_group *cfqg)
{
@ -1381,7 +1393,8 @@ cfq_group_notify_queue_add(struct cfq_data *cfqd, struct cfq_group *cfqg)
n = rb_last(&st->rb);
if (n) {
__cfqg = rb_entry_cfqg(n);
cfqg->vdisktime = __cfqg->vdisktime + CFQ_IDLE_DELAY;
cfqg->vdisktime = __cfqg->vdisktime +
cfq_get_cfqg_vdisktime_delay(cfqd);
} else
cfqg->vdisktime = st->min_vdisktime;
cfq_group_service_tree_add(st, cfqg);

View File

@ -418,11 +418,7 @@ acpi_tb_get_table(struct acpi_table_desc *table_desc,
table_desc->validation_count++;
if (table_desc->validation_count == 0) {
ACPI_ERROR((AE_INFO,
"Table %p, Validation count is zero after increment\n",
table_desc));
table_desc->validation_count--;
return_ACPI_STATUS(AE_LIMIT);
}
*out_table = table_desc->pointer;

View File

@ -113,7 +113,7 @@ struct acpi_button {
static BLOCKING_NOTIFIER_HEAD(acpi_lid_notifier);
static struct acpi_device *lid_device;
static u8 lid_init_state = ACPI_BUTTON_LID_INIT_OPEN;
static u8 lid_init_state = ACPI_BUTTON_LID_INIT_METHOD;
static unsigned long lid_report_interval __read_mostly = 500;
module_param(lid_report_interval, ulong, 0644);

View File

@ -333,14 +333,17 @@ static ssize_t acpi_table_show(struct file *filp, struct kobject *kobj,
container_of(bin_attr, struct acpi_table_attr, attr);
struct acpi_table_header *table_header = NULL;
acpi_status status;
ssize_t rc;
status = acpi_get_table(table_attr->name, table_attr->instance,
&table_header);
if (ACPI_FAILURE(status))
return -ENODEV;
return memory_read_from_buffer(buf, count, &offset,
table_header, table_header->length);
rc = memory_read_from_buffer(buf, count, &offset, table_header,
table_header->length);
acpi_put_table(table_header);
return rc;
}
static int acpi_table_attr_init(struct kobject *tables_obj,

View File

@ -937,14 +937,6 @@ static int nbd_reconnect_socket(struct nbd_device *nbd, unsigned long arg)
return -ENOSPC;
}
/* Reset all properties of an NBD device */
static void nbd_reset(struct nbd_device *nbd)
{
nbd->config = NULL;
nbd->tag_set.timeout = 0;
queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue);
}
static void nbd_bdev_reset(struct block_device *bdev)
{
if (bdev->bd_openers > 1)
@ -1029,7 +1021,11 @@ static void nbd_config_put(struct nbd_device *nbd)
}
kfree(config->socks);
}
nbd_reset(nbd);
kfree(nbd->config);
nbd->config = NULL;
nbd->tag_set.timeout = 0;
queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue);
mutex_unlock(&nbd->config_lock);
nbd_put(nbd);
@ -1483,7 +1479,6 @@ static int nbd_dev_add(int index)
disk->fops = &nbd_fops;
disk->private_data = nbd;
sprintf(disk->disk_name, "nbd%d", index);
nbd_reset(nbd);
add_disk(disk);
nbd_total_devices++;
return index;

View File

@ -4023,6 +4023,7 @@ static void rbd_queue_workfn(struct work_struct *work)
switch (req_op(rq)) {
case REQ_OP_DISCARD:
case REQ_OP_WRITE_ZEROES:
op_type = OBJ_OP_DISCARD;
break;
case REQ_OP_WRITE:
@ -4420,6 +4421,7 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
q->limits.discard_granularity = segment_size;
q->limits.discard_alignment = segment_size;
blk_queue_max_discard_sectors(q, segment_size / SECTOR_SIZE);
blk_queue_max_write_zeroes_sectors(q, segment_size / SECTOR_SIZE);
if (!ceph_test_opt(rbd_dev->rbd_client->client, NOCRC))
q->backing_dev_info->capabilities |= BDI_CAP_STABLE_WRITES;

View File

@ -374,7 +374,7 @@ static ssize_t cm4040_write(struct file *filp, const char __user *buf,
rc = write_sync_reg(SCR_HOST_TO_READER_START, dev);
if (rc <= 0) {
DEBUGP(5, dev, "write_sync_reg c=%.2Zx\n", rc);
DEBUGP(5, dev, "write_sync_reg c=%.2zx\n", rc);
DEBUGP(2, dev, "<- cm4040_write (failed)\n");
if (rc == -ERESTARTSYS)
return rc;
@ -387,7 +387,7 @@ static ssize_t cm4040_write(struct file *filp, const char __user *buf,
for (i = 0; i < bytes_to_write; i++) {
rc = wait_for_bulk_out_ready(dev);
if (rc <= 0) {
DEBUGP(5, dev, "wait_for_bulk_out_ready rc=%.2Zx\n",
DEBUGP(5, dev, "wait_for_bulk_out_ready rc=%.2zx\n",
rc);
DEBUGP(2, dev, "<- cm4040_write (failed)\n");
if (rc == -ERESTARTSYS)
@ -403,7 +403,7 @@ static ssize_t cm4040_write(struct file *filp, const char __user *buf,
rc = write_sync_reg(SCR_HOST_TO_READER_DONE, dev);
if (rc <= 0) {
DEBUGP(5, dev, "write_sync_reg c=%.2Zx\n", rc);
DEBUGP(5, dev, "write_sync_reg c=%.2zx\n", rc);
DEBUGP(2, dev, "<- cm4040_write (failed)\n");
if (rc == -ERESTARTSYS)
return rc;

View File

@ -1097,12 +1097,16 @@ static void add_interrupt_bench(cycles_t start)
static __u32 get_reg(struct fast_pool *f, struct pt_regs *regs)
{
__u32 *ptr = (__u32 *) regs;
unsigned long flags;
if (regs == NULL)
return 0;
local_irq_save(flags);
if (f->reg_idx >= sizeof(struct pt_regs) / sizeof(__u32))
f->reg_idx = 0;
return *(ptr + f->reg_idx++);
ptr += f->reg_idx++;
local_irq_restore(flags);
return *ptr;
}
void add_interrupt_randomness(int irq, int irq_flags)

View File

@ -2468,6 +2468,7 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
if (!(cpufreq_driver->flags & CPUFREQ_STICKY) &&
list_empty(&cpufreq_policy_list)) {
/* if all ->init() calls failed, unregister */
ret = -ENODEV;
pr_debug("%s: No CPU initialized for driver %s\n", __func__,
driver_data->name);
goto err_if_unreg;

View File

@ -127,7 +127,12 @@ static int kirkwood_cpufreq_probe(struct platform_device *pdev)
return PTR_ERR(priv.cpu_clk);
}
clk_prepare_enable(priv.cpu_clk);
err = clk_prepare_enable(priv.cpu_clk);
if (err) {
dev_err(priv.dev, "Unable to prepare cpuclk\n");
return err;
}
kirkwood_freq_table[0].frequency = clk_get_rate(priv.cpu_clk) / 1000;
priv.ddr_clk = of_clk_get_by_name(np, "ddrclk");
@ -137,7 +142,11 @@ static int kirkwood_cpufreq_probe(struct platform_device *pdev)
goto out_cpu;
}
clk_prepare_enable(priv.ddr_clk);
err = clk_prepare_enable(priv.ddr_clk);
if (err) {
dev_err(priv.dev, "Unable to prepare ddrclk\n");
goto out_cpu;
}
kirkwood_freq_table[1].frequency = clk_get_rate(priv.ddr_clk) / 1000;
priv.powersave_clk = of_clk_get_by_name(np, "powersave");
@ -146,7 +155,11 @@ static int kirkwood_cpufreq_probe(struct platform_device *pdev)
err = PTR_ERR(priv.powersave_clk);
goto out_ddr;
}
clk_prepare_enable(priv.powersave_clk);
err = clk_prepare_enable(priv.powersave_clk);
if (err) {
dev_err(priv.dev, "Unable to prepare powersave clk\n");
goto out_ddr;
}
of_node_put(np);
np = NULL;

View File

@ -201,6 +201,7 @@ struct ep93xx_dma_engine {
struct dma_device dma_dev;
bool m2m;
int (*hw_setup)(struct ep93xx_dma_chan *);
void (*hw_synchronize)(struct ep93xx_dma_chan *);
void (*hw_shutdown)(struct ep93xx_dma_chan *);
void (*hw_submit)(struct ep93xx_dma_chan *);
int (*hw_interrupt)(struct ep93xx_dma_chan *);
@ -323,6 +324,8 @@ static int m2p_hw_setup(struct ep93xx_dma_chan *edmac)
| M2P_CONTROL_ENABLE;
m2p_set_control(edmac, control);
edmac->buffer = 0;
return 0;
}
@ -331,21 +334,27 @@ static inline u32 m2p_channel_state(struct ep93xx_dma_chan *edmac)
return (readl(edmac->regs + M2P_STATUS) >> 4) & 0x3;
}
static void m2p_hw_shutdown(struct ep93xx_dma_chan *edmac)
static void m2p_hw_synchronize(struct ep93xx_dma_chan *edmac)
{
unsigned long flags;
u32 control;
spin_lock_irqsave(&edmac->lock, flags);
control = readl(edmac->regs + M2P_CONTROL);
control &= ~(M2P_CONTROL_STALLINT | M2P_CONTROL_NFBINT);
m2p_set_control(edmac, control);
spin_unlock_irqrestore(&edmac->lock, flags);
while (m2p_channel_state(edmac) >= M2P_STATE_ON)
cpu_relax();
schedule();
}
static void m2p_hw_shutdown(struct ep93xx_dma_chan *edmac)
{
m2p_set_control(edmac, 0);
while (m2p_channel_state(edmac) == M2P_STATE_STALL)
cpu_relax();
while (m2p_channel_state(edmac) != M2P_STATE_IDLE)
dev_warn(chan2dev(edmac), "M2P: Not yet IDLE\n");
}
static void m2p_fill_desc(struct ep93xx_dma_chan *edmac)
@ -1160,6 +1169,26 @@ ep93xx_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
return NULL;
}
/**
* ep93xx_dma_synchronize - Synchronizes the termination of transfers to the
* current context.
* @chan: channel
*
* Synchronizes the DMA channel termination to the current context. When this
* function returns it is guaranteed that all transfers for previously issued
* descriptors have stopped and and it is safe to free the memory associated
* with them. Furthermore it is guaranteed that all complete callback functions
* for a previously submitted descriptor have finished running and it is safe to
* free resources accessed from within the complete callbacks.
*/
static void ep93xx_dma_synchronize(struct dma_chan *chan)
{
struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
if (edmac->edma->hw_synchronize)
edmac->edma->hw_synchronize(edmac);
}
/**
* ep93xx_dma_terminate_all - terminate all transactions
* @chan: channel
@ -1323,6 +1352,7 @@ static int __init ep93xx_dma_probe(struct platform_device *pdev)
dma_dev->device_prep_slave_sg = ep93xx_dma_prep_slave_sg;
dma_dev->device_prep_dma_cyclic = ep93xx_dma_prep_dma_cyclic;
dma_dev->device_config = ep93xx_dma_slave_config;
dma_dev->device_synchronize = ep93xx_dma_synchronize;
dma_dev->device_terminate_all = ep93xx_dma_terminate_all;
dma_dev->device_issue_pending = ep93xx_dma_issue_pending;
dma_dev->device_tx_status = ep93xx_dma_tx_status;
@ -1340,6 +1370,7 @@ static int __init ep93xx_dma_probe(struct platform_device *pdev)
} else {
dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask);
edma->hw_synchronize = m2p_hw_synchronize;
edma->hw_setup = m2p_hw_setup;
edma->hw_shutdown = m2p_hw_shutdown;
edma->hw_submit = m2p_hw_submit;

View File

@ -161,6 +161,7 @@ struct mv_xor_v2_device {
struct mv_xor_v2_sw_desc *sw_desq;
int desc_size;
unsigned int npendings;
unsigned int hw_queue_idx;
};
/**
@ -213,18 +214,6 @@ static void mv_xor_v2_set_data_buffers(struct mv_xor_v2_device *xor_dev,
}
}
/*
* Return the next available index in the DESQ.
*/
static int mv_xor_v2_get_desq_write_ptr(struct mv_xor_v2_device *xor_dev)
{
/* read the index for the next available descriptor in the DESQ */
u32 reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_ALLOC_OFF);
return ((reg >> MV_XOR_V2_DMA_DESQ_ALLOC_WRPTR_SHIFT)
& MV_XOR_V2_DMA_DESQ_ALLOC_WRPTR_MASK);
}
/*
* notify the engine of new descriptors, and update the available index.
*/
@ -257,22 +246,6 @@ static int mv_xor_v2_set_desc_size(struct mv_xor_v2_device *xor_dev)
return MV_XOR_V2_EXT_DESC_SIZE;
}
/*
* Set the IMSG threshold
*/
static inline
void mv_xor_v2_set_imsg_thrd(struct mv_xor_v2_device *xor_dev, int thrd_val)
{
u32 reg;
reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_THRD_OFF);
reg &= (~MV_XOR_V2_DMA_IMSG_THRD_MASK << MV_XOR_V2_DMA_IMSG_THRD_SHIFT);
reg |= (thrd_val << MV_XOR_V2_DMA_IMSG_THRD_SHIFT);
writel(reg, xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_THRD_OFF);
}
static irqreturn_t mv_xor_v2_interrupt_handler(int irq, void *data)
{
struct mv_xor_v2_device *xor_dev = data;
@ -288,12 +261,6 @@ static irqreturn_t mv_xor_v2_interrupt_handler(int irq, void *data)
if (!ndescs)
return IRQ_NONE;
/*
* Update IMSG threshold, to disable new IMSG interrupts until
* end of the tasklet
*/
mv_xor_v2_set_imsg_thrd(xor_dev, MV_XOR_V2_DESC_NUM);
/* schedule a tasklet to handle descriptors callbacks */
tasklet_schedule(&xor_dev->irq_tasklet);
@ -306,7 +273,6 @@ static irqreturn_t mv_xor_v2_interrupt_handler(int irq, void *data)
static dma_cookie_t
mv_xor_v2_tx_submit(struct dma_async_tx_descriptor *tx)
{
int desq_ptr;
void *dest_hw_desc;
dma_cookie_t cookie;
struct mv_xor_v2_sw_desc *sw_desc =
@ -322,15 +288,15 @@ mv_xor_v2_tx_submit(struct dma_async_tx_descriptor *tx)
spin_lock_bh(&xor_dev->lock);
cookie = dma_cookie_assign(tx);
/* get the next available slot in the DESQ */
desq_ptr = mv_xor_v2_get_desq_write_ptr(xor_dev);
/* copy the HW descriptor from the SW descriptor to the DESQ */
dest_hw_desc = xor_dev->hw_desq_virt + desq_ptr;
dest_hw_desc = xor_dev->hw_desq_virt + xor_dev->hw_queue_idx;
memcpy(dest_hw_desc, &sw_desc->hw_desc, xor_dev->desc_size);
xor_dev->npendings++;
xor_dev->hw_queue_idx++;
if (xor_dev->hw_queue_idx >= MV_XOR_V2_DESC_NUM)
xor_dev->hw_queue_idx = 0;
spin_unlock_bh(&xor_dev->lock);
@ -344,6 +310,7 @@ static struct mv_xor_v2_sw_desc *
mv_xor_v2_prep_sw_desc(struct mv_xor_v2_device *xor_dev)
{
struct mv_xor_v2_sw_desc *sw_desc;
bool found = false;
/* Lock the channel */
spin_lock_bh(&xor_dev->lock);
@ -355,19 +322,23 @@ mv_xor_v2_prep_sw_desc(struct mv_xor_v2_device *xor_dev)
return NULL;
}
/* get a free SW descriptor from the SW DESQ */
sw_desc = list_first_entry(&xor_dev->free_sw_desc,
struct mv_xor_v2_sw_desc, free_list);
list_for_each_entry(sw_desc, &xor_dev->free_sw_desc, free_list) {
if (async_tx_test_ack(&sw_desc->async_tx)) {
found = true;
break;
}
}
if (!found) {
spin_unlock_bh(&xor_dev->lock);
return NULL;
}
list_del(&sw_desc->free_list);
/* Release the channel */
spin_unlock_bh(&xor_dev->lock);
/* set the async tx descriptor */
dma_async_tx_descriptor_init(&sw_desc->async_tx, &xor_dev->dmachan);
sw_desc->async_tx.tx_submit = mv_xor_v2_tx_submit;
async_tx_ack(&sw_desc->async_tx);
return sw_desc;
}
@ -389,6 +360,8 @@ mv_xor_v2_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest,
__func__, len, &src, &dest, flags);
sw_desc = mv_xor_v2_prep_sw_desc(xor_dev);
if (!sw_desc)
return NULL;
sw_desc->async_tx.flags = flags;
@ -443,6 +416,8 @@ mv_xor_v2_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
__func__, src_cnt, len, &dest, flags);
sw_desc = mv_xor_v2_prep_sw_desc(xor_dev);
if (!sw_desc)
return NULL;
sw_desc->async_tx.flags = flags;
@ -491,6 +466,8 @@ mv_xor_v2_prep_dma_interrupt(struct dma_chan *chan, unsigned long flags)
container_of(chan, struct mv_xor_v2_device, dmachan);
sw_desc = mv_xor_v2_prep_sw_desc(xor_dev);
if (!sw_desc)
return NULL;
/* set the HW descriptor */
hw_descriptor = &sw_desc->hw_desc;
@ -554,7 +531,6 @@ static void mv_xor_v2_tasklet(unsigned long data)
{
struct mv_xor_v2_device *xor_dev = (struct mv_xor_v2_device *) data;
int pending_ptr, num_of_pending, i;
struct mv_xor_v2_descriptor *next_pending_hw_desc = NULL;
struct mv_xor_v2_sw_desc *next_pending_sw_desc = NULL;
dev_dbg(xor_dev->dmadev.dev, "%s %d\n", __func__, __LINE__);
@ -562,17 +538,10 @@ static void mv_xor_v2_tasklet(unsigned long data)
/* get the pending descriptors parameters */
num_of_pending = mv_xor_v2_get_pending_params(xor_dev, &pending_ptr);
/* next HW descriptor */
next_pending_hw_desc = xor_dev->hw_desq_virt + pending_ptr;
/* loop over free descriptors */
for (i = 0; i < num_of_pending; i++) {
if (pending_ptr > MV_XOR_V2_DESC_NUM)
pending_ptr = 0;
if (next_pending_sw_desc != NULL)
next_pending_hw_desc++;
struct mv_xor_v2_descriptor *next_pending_hw_desc =
xor_dev->hw_desq_virt + pending_ptr;
/* get the SW descriptor related to the HW descriptor */
next_pending_sw_desc =
@ -608,15 +577,14 @@ static void mv_xor_v2_tasklet(unsigned long data)
/* increment the next descriptor */
pending_ptr++;
if (pending_ptr >= MV_XOR_V2_DESC_NUM)
pending_ptr = 0;
}
if (num_of_pending != 0) {
/* free the descriptores */
mv_xor_v2_free_desc_from_desq(xor_dev, num_of_pending);
}
/* Update IMSG threshold, to enable new IMSG interrupts */
mv_xor_v2_set_imsg_thrd(xor_dev, 0);
}
/*
@ -648,9 +616,6 @@ static int mv_xor_v2_descq_init(struct mv_xor_v2_device *xor_dev)
writel((xor_dev->hw_desq & 0xFFFF00000000) >> 32,
xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_BAHR_OFF);
/* enable the DMA engine */
writel(0, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_STOP_OFF);
/*
* This is a temporary solution, until we activate the
* SMMU. Set the attributes for reading & writing data buffers
@ -694,6 +659,9 @@ static int mv_xor_v2_descq_init(struct mv_xor_v2_device *xor_dev)
reg |= MV_XOR_V2_GLOB_PAUSE_AXI_TIME_DIS_VAL;
writel(reg, xor_dev->glob_base + MV_XOR_V2_GLOB_PAUSE);
/* enable the DMA engine */
writel(0, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_STOP_OFF);
return 0;
}
@ -725,6 +693,10 @@ static int mv_xor_v2_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, xor_dev);
ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40));
if (ret)
return ret;
xor_dev->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(xor_dev->clk) && PTR_ERR(xor_dev->clk) == -EPROBE_DEFER)
return -EPROBE_DEFER;
@ -785,8 +757,15 @@ static int mv_xor_v2_probe(struct platform_device *pdev)
/* add all SW descriptors to the free list */
for (i = 0; i < MV_XOR_V2_DESC_NUM; i++) {
xor_dev->sw_desq[i].idx = i;
list_add(&xor_dev->sw_desq[i].free_list,
struct mv_xor_v2_sw_desc *sw_desc =
xor_dev->sw_desq + i;
sw_desc->idx = i;
dma_async_tx_descriptor_init(&sw_desc->async_tx,
&xor_dev->dmachan);
sw_desc->async_tx.tx_submit = mv_xor_v2_tx_submit;
async_tx_ack(&sw_desc->async_tx);
list_add(&sw_desc->free_list,
&xor_dev->free_sw_desc);
}

View File

@ -3008,6 +3008,7 @@ static int pl330_remove(struct amba_device *adev)
for (i = 0; i < AMBA_NR_IRQS; i++) {
irq = adev->irq[i];
if (irq)
devm_free_irq(&adev->dev, irq, pl330);
}

View File

@ -1287,6 +1287,9 @@ static unsigned int rcar_dmac_chan_get_residue(struct rcar_dmac_chan *chan,
if (desc->hwdescs.use) {
dptr = (rcar_dmac_chan_read(chan, RCAR_DMACHCRB) &
RCAR_DMACHCRB_DPTR_MASK) >> RCAR_DMACHCRB_DPTR_SHIFT;
if (dptr == 0)
dptr = desc->nchunks;
dptr--;
WARN_ON(dptr >= desc->nchunks);
} else {
running = desc->running;

View File

@ -117,7 +117,7 @@ struct usb_dmac {
#define USB_DMASWR 0x0008
#define USB_DMASWR_SWR (1 << 0)
#define USB_DMAOR 0x0060
#define USB_DMAOR_AE (1 << 2)
#define USB_DMAOR_AE (1 << 1)
#define USB_DMAOR_DME (1 << 0)
#define USB_DMASAR 0x0000

View File

@ -47,6 +47,7 @@ DEFINE_DMI_ATTR_WITH_SHOW(product_name, 0444, DMI_PRODUCT_NAME);
DEFINE_DMI_ATTR_WITH_SHOW(product_version, 0444, DMI_PRODUCT_VERSION);
DEFINE_DMI_ATTR_WITH_SHOW(product_serial, 0400, DMI_PRODUCT_SERIAL);
DEFINE_DMI_ATTR_WITH_SHOW(product_uuid, 0400, DMI_PRODUCT_UUID);
DEFINE_DMI_ATTR_WITH_SHOW(product_family, 0400, DMI_PRODUCT_FAMILY);
DEFINE_DMI_ATTR_WITH_SHOW(board_vendor, 0444, DMI_BOARD_VENDOR);
DEFINE_DMI_ATTR_WITH_SHOW(board_name, 0444, DMI_BOARD_NAME);
DEFINE_DMI_ATTR_WITH_SHOW(board_version, 0444, DMI_BOARD_VERSION);
@ -191,6 +192,7 @@ static void __init dmi_id_init_attr_table(void)
ADD_DMI_ATTR(product_version, DMI_PRODUCT_VERSION);
ADD_DMI_ATTR(product_serial, DMI_PRODUCT_SERIAL);
ADD_DMI_ATTR(product_uuid, DMI_PRODUCT_UUID);
ADD_DMI_ATTR(product_family, DMI_PRODUCT_FAMILY);
ADD_DMI_ATTR(board_vendor, DMI_BOARD_VENDOR);
ADD_DMI_ATTR(board_name, DMI_BOARD_NAME);
ADD_DMI_ATTR(board_version, DMI_BOARD_VERSION);

View File

@ -430,6 +430,7 @@ static void __init dmi_decode(const struct dmi_header *dm, void *dummy)
dmi_save_ident(dm, DMI_PRODUCT_VERSION, 6);
dmi_save_ident(dm, DMI_PRODUCT_SERIAL, 7);
dmi_save_uuid(dm, DMI_PRODUCT_UUID, 8);
dmi_save_ident(dm, DMI_PRODUCT_FAMILY, 26);
break;
case 2: /* Base Board Information */
dmi_save_ident(dm, DMI_BOARD_VENDOR, 4);

View File

@ -36,6 +36,9 @@ void __init efi_bgrt_init(struct acpi_table_header *table)
if (acpi_disabled)
return;
if (!efi_enabled(EFI_BOOT))
return;
if (table->length < sizeof(bgrt_tab)) {
pr_notice("Ignoring BGRT: invalid length %u (expected %zu)\n",
table->length, sizeof(bgrt_tab));

View File

@ -16,10 +16,10 @@
/* BIOS variables */
static const efi_guid_t efi_variable_guid = EFI_GLOBAL_VARIABLE_GUID;
static const efi_char16_t const efi_SecureBoot_name[] = {
static const efi_char16_t efi_SecureBoot_name[] = {
'S', 'e', 'c', 'u', 'r', 'e', 'B', 'o', 'o', 't', 0
};
static const efi_char16_t const efi_SetupMode_name[] = {
static const efi_char16_t efi_SetupMode_name[] = {
'S', 'e', 't', 'u', 'p', 'M', 'o', 'd', 'e', 0
};

View File

@ -220,9 +220,9 @@ static void amdgpu_vram_mgr_debug(struct ttm_mem_type_manager *man,
}
const struct ttm_mem_type_manager_func amdgpu_vram_mgr_func = {
amdgpu_vram_mgr_init,
amdgpu_vram_mgr_fini,
amdgpu_vram_mgr_new,
amdgpu_vram_mgr_del,
amdgpu_vram_mgr_debug
.init = amdgpu_vram_mgr_init,
.takedown = amdgpu_vram_mgr_fini,
.get_node = amdgpu_vram_mgr_new,
.put_node = amdgpu_vram_mgr_del,
.debug = amdgpu_vram_mgr_debug
};

View File

@ -77,13 +77,26 @@ static int vce_v3_0_set_clockgating_state(void *handle,
static uint64_t vce_v3_0_ring_get_rptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
u32 v;
mutex_lock(&adev->grbm_idx_mutex);
if (adev->vce.harvest_config == 0 ||
adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE1)
WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0));
else if (adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE0)
WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1));
if (ring == &adev->vce.ring[0])
return RREG32(mmVCE_RB_RPTR);
v = RREG32(mmVCE_RB_RPTR);
else if (ring == &adev->vce.ring[1])
return RREG32(mmVCE_RB_RPTR2);
v = RREG32(mmVCE_RB_RPTR2);
else
return RREG32(mmVCE_RB_RPTR3);
v = RREG32(mmVCE_RB_RPTR3);
WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT);
mutex_unlock(&adev->grbm_idx_mutex);
return v;
}
/**
@ -96,13 +109,26 @@ static uint64_t vce_v3_0_ring_get_rptr(struct amdgpu_ring *ring)
static uint64_t vce_v3_0_ring_get_wptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
u32 v;
mutex_lock(&adev->grbm_idx_mutex);
if (adev->vce.harvest_config == 0 ||
adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE1)
WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0));
else if (adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE0)
WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1));
if (ring == &adev->vce.ring[0])
return RREG32(mmVCE_RB_WPTR);
v = RREG32(mmVCE_RB_WPTR);
else if (ring == &adev->vce.ring[1])
return RREG32(mmVCE_RB_WPTR2);
v = RREG32(mmVCE_RB_WPTR2);
else
return RREG32(mmVCE_RB_WPTR3);
v = RREG32(mmVCE_RB_WPTR3);
WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT);
mutex_unlock(&adev->grbm_idx_mutex);
return v;
}
/**
@ -116,12 +142,22 @@ static void vce_v3_0_ring_set_wptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
mutex_lock(&adev->grbm_idx_mutex);
if (adev->vce.harvest_config == 0 ||
adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE1)
WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0));
else if (adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE0)
WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1));
if (ring == &adev->vce.ring[0])
WREG32(mmVCE_RB_WPTR, lower_32_bits(ring->wptr));
else if (ring == &adev->vce.ring[1])
WREG32(mmVCE_RB_WPTR2, lower_32_bits(ring->wptr));
else
WREG32(mmVCE_RB_WPTR3, lower_32_bits(ring->wptr));
WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT);
mutex_unlock(&adev->grbm_idx_mutex);
}
static void vce_v3_0_override_vce_clock_gating(struct amdgpu_device *adev, bool override)
@ -231,6 +267,16 @@ static int vce_v3_0_start(struct amdgpu_device *adev)
struct amdgpu_ring *ring;
int idx, r;
mutex_lock(&adev->grbm_idx_mutex);
for (idx = 0; idx < 2; ++idx) {
if (adev->vce.harvest_config & (1 << idx))
continue;
WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(idx));
/* Program instance 0 reg space for two instances or instance 0 case
program instance 1 reg space for only instance 1 available case */
if (idx != 1 || adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE0) {
ring = &adev->vce.ring[0];
WREG32(mmVCE_RB_RPTR, lower_32_bits(ring->wptr));
WREG32(mmVCE_RB_WPTR, lower_32_bits(ring->wptr));
@ -251,13 +297,8 @@ static int vce_v3_0_start(struct amdgpu_device *adev)
WREG32(mmVCE_RB_BASE_LO3, ring->gpu_addr);
WREG32(mmVCE_RB_BASE_HI3, upper_32_bits(ring->gpu_addr));
WREG32(mmVCE_RB_SIZE3, ring->ring_size / 4);
}
mutex_lock(&adev->grbm_idx_mutex);
for (idx = 0; idx < 2; ++idx) {
if (adev->vce.harvest_config & (1 << idx))
continue;
WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(idx));
vce_v3_0_mc_resume(adev, idx);
WREG32_FIELD(VCE_STATUS, JOB_BUSY, 1);

View File

@ -709,17 +709,17 @@ static int tf_vega10_thermal_disable_alert(struct pp_hwmgr *hwmgr,
static struct phm_master_table_item
vega10_thermal_start_thermal_controller_master_list[] = {
{NULL, tf_vega10_thermal_initialize},
{NULL, tf_vega10_thermal_set_temperature_range},
{NULL, tf_vega10_thermal_enable_alert},
{ .tableFunction = tf_vega10_thermal_initialize },
{ .tableFunction = tf_vega10_thermal_set_temperature_range },
{ .tableFunction = tf_vega10_thermal_enable_alert },
/* We should restrict performance levels to low before we halt the SMC.
* On the other hand we are still in boot state when we do this
* so it would be pointless.
* If this assumption changes we have to revisit this table.
*/
{NULL, tf_vega10_thermal_setup_fan_table},
{NULL, tf_vega10_thermal_start_smc_fan_control},
{NULL, NULL}
{ .tableFunction = tf_vega10_thermal_setup_fan_table },
{ .tableFunction = tf_vega10_thermal_start_smc_fan_control },
{ }
};
static struct phm_master_table_header
@ -731,10 +731,10 @@ vega10_thermal_start_thermal_controller_master = {
static struct phm_master_table_item
vega10_thermal_set_temperature_range_master_list[] = {
{NULL, tf_vega10_thermal_disable_alert},
{NULL, tf_vega10_thermal_set_temperature_range},
{NULL, tf_vega10_thermal_enable_alert},
{NULL, NULL}
{ .tableFunction = tf_vega10_thermal_disable_alert },
{ .tableFunction = tf_vega10_thermal_set_temperature_range },
{ .tableFunction = tf_vega10_thermal_enable_alert },
{ }
};
struct phm_master_table_header

View File

@ -1208,3 +1208,86 @@ int drm_dp_stop_crc(struct drm_dp_aux *aux)
return 0;
}
EXPORT_SYMBOL(drm_dp_stop_crc);
struct dpcd_quirk {
u8 oui[3];
bool is_branch;
u32 quirks;
};
#define OUI(first, second, third) { (first), (second), (third) }
static const struct dpcd_quirk dpcd_quirk_list[] = {
/* Analogix 7737 needs reduced M and N at HBR2 link rates */
{ OUI(0x00, 0x22, 0xb9), true, BIT(DP_DPCD_QUIRK_LIMITED_M_N) },
};
#undef OUI
/*
* Get a bit mask of DPCD quirks for the sink/branch device identified by
* ident. The quirk data is shared but it's up to the drivers to act on the
* data.
*
* For now, only the OUI (first three bytes) is used, but this may be extended
* to device identification string and hardware/firmware revisions later.
*/
static u32
drm_dp_get_quirks(const struct drm_dp_dpcd_ident *ident, bool is_branch)
{
const struct dpcd_quirk *quirk;
u32 quirks = 0;
int i;
for (i = 0; i < ARRAY_SIZE(dpcd_quirk_list); i++) {
quirk = &dpcd_quirk_list[i];
if (quirk->is_branch != is_branch)
continue;
if (memcmp(quirk->oui, ident->oui, sizeof(ident->oui)) != 0)
continue;
quirks |= quirk->quirks;
}
return quirks;
}
/**
* drm_dp_read_desc - read sink/branch descriptor from DPCD
* @aux: DisplayPort AUX channel
* @desc: Device decriptor to fill from DPCD
* @is_branch: true for branch devices, false for sink devices
*
* Read DPCD 0x400 (sink) or 0x500 (branch) into @desc. Also debug log the
* identification.
*
* Returns 0 on success or a negative error code on failure.
*/
int drm_dp_read_desc(struct drm_dp_aux *aux, struct drm_dp_desc *desc,
bool is_branch)
{
struct drm_dp_dpcd_ident *ident = &desc->ident;
unsigned int offset = is_branch ? DP_BRANCH_OUI : DP_SINK_OUI;
int ret, dev_id_len;
ret = drm_dp_dpcd_read(aux, offset, ident, sizeof(*ident));
if (ret < 0)
return ret;
desc->quirks = drm_dp_get_quirks(ident, is_branch);
dev_id_len = strnlen(ident->device_id, sizeof(ident->device_id));
DRM_DEBUG_KMS("DP %s: OUI %*phD dev-ID %*pE HW-rev %d.%d SW-rev %d.%d quirks 0x%04x\n",
is_branch ? "branch" : "sink",
(int)sizeof(ident->oui), ident->oui,
dev_id_len, ident->device_id,
ident->hw_rev >> 4, ident->hw_rev & 0xf,
ident->sw_major_rev, ident->sw_minor_rev,
desc->quirks);
return 0;
}
EXPORT_SYMBOL(drm_dp_read_desc);

View File

@ -82,14 +82,9 @@ static int exynos_drm_open(struct drm_device *dev, struct drm_file *file)
return ret;
}
static void exynos_drm_preclose(struct drm_device *dev,
struct drm_file *file)
{
exynos_drm_subdrv_close(dev, file);
}
static void exynos_drm_postclose(struct drm_device *dev, struct drm_file *file)
{
exynos_drm_subdrv_close(dev, file);
kfree(file->driver_priv);
file->driver_priv = NULL;
}
@ -145,7 +140,6 @@ static struct drm_driver exynos_drm_driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME
| DRIVER_ATOMIC | DRIVER_RENDER,
.open = exynos_drm_open,
.preclose = exynos_drm_preclose,
.lastclose = exynos_drm_lastclose,
.postclose = exynos_drm_postclose,
.gem_free_object_unlocked = exynos_drm_gem_free_object,

View File

@ -160,12 +160,9 @@ struct exynos_drm_clk {
* drm framework doesn't support multiple irq yet.
* we can refer to the crtc to current hardware interrupt occurred through
* this pipe value.
* @enabled: if the crtc is enabled or not
* @event: vblank event that is currently queued for flip
* @wait_update: wait all pending planes updates to finish
* @pending_update: number of pending plane updates in this crtc
* @ops: pointer to callbacks for exynos drm specific functionality
* @ctx: A pointer to the crtc's implementation specific context
* @pipe_clk: A pointer to the crtc's pipeline clock.
*/
struct exynos_drm_crtc {
struct drm_crtc base;

View File

@ -1633,7 +1633,6 @@ static int exynos_dsi_parse_dt(struct exynos_dsi *dsi)
{
struct device *dev = dsi->dev;
struct device_node *node = dev->of_node;
struct device_node *ep;
int ret;
ret = exynos_dsi_of_read_u32(node, "samsung,pll-clock-frequency",
@ -1641,32 +1640,21 @@ static int exynos_dsi_parse_dt(struct exynos_dsi *dsi)
if (ret < 0)
return ret;
ep = of_graph_get_endpoint_by_regs(node, DSI_PORT_OUT, 0);
if (!ep) {
dev_err(dev, "no output port with endpoint specified\n");
return -EINVAL;
}
ret = exynos_dsi_of_read_u32(ep, "samsung,burst-clock-frequency",
ret = exynos_dsi_of_read_u32(node, "samsung,burst-clock-frequency",
&dsi->burst_clk_rate);
if (ret < 0)
goto end;
return ret;
ret = exynos_dsi_of_read_u32(ep, "samsung,esc-clock-frequency",
ret = exynos_dsi_of_read_u32(node, "samsung,esc-clock-frequency",
&dsi->esc_clk_rate);
if (ret < 0)
goto end;
of_node_put(ep);
return ret;
dsi->bridge_node = of_graph_get_remote_node(node, DSI_PORT_OUT, 0);
if (!dsi->bridge_node)
return -EINVAL;
end:
of_node_put(ep);
return ret;
return 0;
}
static int exynos_dsi_bind(struct device *dev, struct device *master,
@ -1817,6 +1805,10 @@ static int exynos_dsi_probe(struct platform_device *pdev)
static int exynos_dsi_remove(struct platform_device *pdev)
{
struct exynos_dsi *dsi = platform_get_drvdata(pdev);
of_node_put(dsi->bridge_node);
pm_runtime_disable(&pdev->dev);
component_del(&pdev->dev, &exynos_dsi_component_ops);

View File

@ -779,8 +779,26 @@ static void init_vgpu_execlist(struct intel_vgpu *vgpu, int ring_id)
vgpu_vreg(vgpu, ctx_status_ptr_reg) = ctx_status_ptr.dw;
}
static void clean_workloads(struct intel_vgpu *vgpu, unsigned long engine_mask)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
struct intel_engine_cs *engine;
struct intel_vgpu_workload *pos, *n;
unsigned int tmp;
/* free the unsubmited workloads in the queues. */
for_each_engine_masked(engine, dev_priv, engine_mask, tmp) {
list_for_each_entry_safe(pos, n,
&vgpu->workload_q_head[engine->id], list) {
list_del_init(&pos->list);
free_workload(pos);
}
}
}
void intel_vgpu_clean_execlist(struct intel_vgpu *vgpu)
{
clean_workloads(vgpu, ALL_ENGINES);
kmem_cache_destroy(vgpu->workloads);
}
@ -811,17 +829,9 @@ void intel_vgpu_reset_execlist(struct intel_vgpu *vgpu,
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
struct intel_engine_cs *engine;
struct intel_vgpu_workload *pos, *n;
unsigned int tmp;
for_each_engine_masked(engine, dev_priv, engine_mask, tmp) {
/* free the unsubmited workload in the queue */
list_for_each_entry_safe(pos, n,
&vgpu->workload_q_head[engine->id], list) {
list_del_init(&pos->list);
free_workload(pos);
}
clean_workloads(vgpu, engine_mask);
for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
init_vgpu_execlist(vgpu, engine->id);
}
}

View File

@ -1366,18 +1366,28 @@ static int skl_misc_ctl_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
i915_reg_t reg = {.reg = offset};
u32 v = *(u32 *)p_data;
if (!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv))
return intel_vgpu_default_mmio_write(vgpu,
offset, p_data, bytes);
switch (offset) {
case 0x4ddc:
vgpu_vreg(vgpu, offset) = 0x8000003c;
/* WaCompressedResourceSamplerPbeMediaNewHashMode:skl */
I915_WRITE(reg, vgpu_vreg(vgpu, offset));
/* bypass WaCompressedResourceSamplerPbeMediaNewHashMode */
vgpu_vreg(vgpu, offset) = v & ~(1 << 31);
break;
case 0x42080:
vgpu_vreg(vgpu, offset) = 0x8000;
/* WaCompressedResourceDisplayNewHashMode:skl */
I915_WRITE(reg, vgpu_vreg(vgpu, offset));
/* bypass WaCompressedResourceDisplayNewHashMode */
vgpu_vreg(vgpu, offset) = v & ~(1 << 15);
break;
case 0xe194:
/* bypass WaCompressedResourceSamplerPbeMediaNewHashMode */
vgpu_vreg(vgpu, offset) = v & ~(1 << 8);
break;
case 0x7014:
/* bypass WaCompressedResourceSamplerPbeMediaNewHashMode */
vgpu_vreg(vgpu, offset) = v & ~(1 << 13);
break;
default:
return -EINVAL;
@ -1634,7 +1644,8 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_DFH(GAM_ECOCHK, D_ALL, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(GEN7_COMMON_SLICE_CHICKEN1, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
NULL, NULL);
MMIO_DFH(COMMON_SLICE_CHICKEN2, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(COMMON_SLICE_CHICKEN2, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL,
skl_misc_ctl_write);
MMIO_DFH(0x9030, D_ALL, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(0x20a0, D_ALL, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(0x2420, D_ALL, F_CMD_ACCESS, NULL, NULL);
@ -2568,7 +2579,8 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
MMIO_D(0x6e570, D_BDW_PLUS);
MMIO_D(0x65f10, D_BDW_PLUS);
MMIO_DFH(0xe194, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(0xe194, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL,
skl_misc_ctl_write);
MMIO_DFH(0xe188, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(HALF_SLICE_CHICKEN2, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(0x2580, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);

View File

@ -1272,10 +1272,6 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
dev_priv->ipc_enabled = false;
/* Everything is in place, we can now relax! */
DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n",
driver.name, driver.major, driver.minor, driver.patchlevel,
driver.date, pci_name(pdev), dev_priv->drm.primary->index);
if (IS_ENABLED(CONFIG_DRM_I915_DEBUG))
DRM_INFO("DRM_I915_DEBUG enabled\n");
if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))

View File

@ -562,7 +562,8 @@ struct intel_link_m_n {
void intel_link_compute_m_n(int bpp, int nlanes,
int pixel_clock, int link_clock,
struct intel_link_m_n *m_n);
struct intel_link_m_n *m_n,
bool reduce_m_n);
/* Interface history:
*

View File

@ -2313,7 +2313,7 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma,
appgtt->base.allocate_va_range) {
ret = appgtt->base.allocate_va_range(&appgtt->base,
vma->node.start,
vma->node.size);
vma->size);
if (ret)
goto err_pages;
}

View File

@ -59,9 +59,6 @@ static void i915_gem_shrinker_unlock(struct drm_device *dev, bool unlock)
return;
mutex_unlock(&dev->struct_mutex);
/* expedite the RCU grace period to free some request slabs */
synchronize_rcu_expedited();
}
static bool any_vma_pinned(struct drm_i915_gem_object *obj)
@ -274,8 +271,6 @@ unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv)
I915_SHRINK_ACTIVE);
intel_runtime_pm_put(dev_priv);
synchronize_rcu(); /* wait for our earlier RCU delayed slab frees */
return freed;
}

View File

@ -2953,7 +2953,6 @@ static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
u32 pipestat_mask;
u32 enable_mask;
enum pipe pipe;
u32 val;
pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
PIPE_CRC_DONE_INTERRUPT_STATUS;
@ -2964,18 +2963,16 @@ static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
enable_mask = I915_DISPLAY_PORT_INTERRUPT |
I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
I915_LPE_PIPE_A_INTERRUPT |
I915_LPE_PIPE_B_INTERRUPT;
if (IS_CHERRYVIEW(dev_priv))
enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT |
I915_LPE_PIPE_C_INTERRUPT;
WARN_ON(dev_priv->irq_mask != ~0);
val = (I915_LPE_PIPE_A_INTERRUPT |
I915_LPE_PIPE_B_INTERRUPT |
I915_LPE_PIPE_C_INTERRUPT);
enable_mask |= val;
dev_priv->irq_mask = ~enable_mask;
GEN5_IRQ_INIT(VLV_, dev_priv->irq_mask, enable_mask);

View File

@ -8280,7 +8280,7 @@ enum {
/* MIPI DSI registers */
#define _MIPI_PORT(port, a, c) ((port) ? c : a) /* ports A and C only */
#define _MIPI_PORT(port, a, c) (((port) == PORT_A) ? a : c) /* ports A and C only */
#define _MMIO_MIPI(port, a, c) _MMIO(_MIPI_PORT(port, a, c))
#define MIPIO_TXESC_CLK_DIV1 _MMIO(0x160004)

View File

@ -6101,7 +6101,7 @@ static int ironlake_fdi_compute_config(struct intel_crtc *intel_crtc,
pipe_config->fdi_lanes = lane;
intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
link_bw, &pipe_config->fdi_m_n);
link_bw, &pipe_config->fdi_m_n, false);
ret = ironlake_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config);
if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) {
@ -6277,7 +6277,8 @@ intel_reduce_m_n_ratio(uint32_t *num, uint32_t *den)
}
static void compute_m_n(unsigned int m, unsigned int n,
uint32_t *ret_m, uint32_t *ret_n)
uint32_t *ret_m, uint32_t *ret_n,
bool reduce_m_n)
{
/*
* Reduce M/N as much as possible without loss in precision. Several DP
@ -6285,10 +6286,12 @@ static void compute_m_n(unsigned int m, unsigned int n,
* values. The passed in values are more likely to have the least
* significant bits zero than M after rounding below, so do this first.
*/
if (reduce_m_n) {
while ((m & 1) == 0 && (n & 1) == 0) {
m >>= 1;
n >>= 1;
}
}
*ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
*ret_m = div_u64((uint64_t) m * *ret_n, n);
@ -6298,16 +6301,19 @@ static void compute_m_n(unsigned int m, unsigned int n,
void
intel_link_compute_m_n(int bits_per_pixel, int nlanes,
int pixel_clock, int link_clock,
struct intel_link_m_n *m_n)
struct intel_link_m_n *m_n,
bool reduce_m_n)
{
m_n->tu = 64;
compute_m_n(bits_per_pixel * pixel_clock,
link_clock * nlanes * 8,
&m_n->gmch_m, &m_n->gmch_n);
&m_n->gmch_m, &m_n->gmch_n,
reduce_m_n);
compute_m_n(pixel_clock, link_clock,
&m_n->link_m, &m_n->link_n);
&m_n->link_m, &m_n->link_n,
reduce_m_n);
}
static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)

View File

@ -1507,37 +1507,6 @@ static void intel_dp_print_rates(struct intel_dp *intel_dp)
DRM_DEBUG_KMS("common rates: %s\n", str);
}
bool
__intel_dp_read_desc(struct intel_dp *intel_dp, struct intel_dp_desc *desc)
{
u32 base = drm_dp_is_branch(intel_dp->dpcd) ? DP_BRANCH_OUI :
DP_SINK_OUI;
return drm_dp_dpcd_read(&intel_dp->aux, base, desc, sizeof(*desc)) ==
sizeof(*desc);
}
bool intel_dp_read_desc(struct intel_dp *intel_dp)
{
struct intel_dp_desc *desc = &intel_dp->desc;
bool oui_sup = intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] &
DP_OUI_SUPPORT;
int dev_id_len;
if (!__intel_dp_read_desc(intel_dp, desc))
return false;
dev_id_len = strnlen(desc->device_id, sizeof(desc->device_id));
DRM_DEBUG_KMS("DP %s: OUI %*phD%s dev-ID %*pE HW-rev %d.%d SW-rev %d.%d\n",
drm_dp_is_branch(intel_dp->dpcd) ? "branch" : "sink",
(int)sizeof(desc->oui), desc->oui, oui_sup ? "" : "(NS)",
dev_id_len, desc->device_id,
desc->hw_rev >> 4, desc->hw_rev & 0xf,
desc->sw_major_rev, desc->sw_minor_rev);
return true;
}
static int rate_to_index(int find, const int *rates)
{
int i = 0;
@ -1624,6 +1593,8 @@ intel_dp_compute_config(struct intel_encoder *encoder,
int common_rates[DP_MAX_SUPPORTED_RATES] = {};
int common_len;
uint8_t link_bw, rate_select;
bool reduce_m_n = drm_dp_has_quirk(&intel_dp->desc,
DP_DPCD_QUIRK_LIMITED_M_N);
common_len = intel_dp_common_rates(intel_dp, common_rates);
@ -1753,7 +1724,8 @@ intel_dp_compute_config(struct intel_encoder *encoder,
intel_link_compute_m_n(bpp, lane_count,
adjusted_mode->crtc_clock,
pipe_config->port_clock,
&pipe_config->dp_m_n);
&pipe_config->dp_m_n,
reduce_m_n);
if (intel_connector->panel.downclock_mode != NULL &&
dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
@ -1761,7 +1733,8 @@ intel_dp_compute_config(struct intel_encoder *encoder,
intel_link_compute_m_n(bpp, lane_count,
intel_connector->panel.downclock_mode->clock,
pipe_config->port_clock,
&pipe_config->dp_m2_n2);
&pipe_config->dp_m2_n2,
reduce_m_n);
}
/*
@ -3622,7 +3595,8 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp)
if (!intel_dp_read_dpcd(intel_dp))
return false;
intel_dp_read_desc(intel_dp);
drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc,
drm_dp_is_branch(intel_dp->dpcd));
if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
dev_priv->no_aux_handshake = intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
@ -4624,7 +4598,8 @@ intel_dp_long_pulse(struct intel_connector *intel_connector)
intel_dp_print_rates(intel_dp);
intel_dp_read_desc(intel_dp);
drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc,
drm_dp_is_branch(intel_dp->dpcd));
intel_dp_configure_mst(intel_dp);

View File

@ -44,6 +44,8 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
int lane_count, slots;
const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
int mst_pbn;
bool reduce_m_n = drm_dp_has_quirk(&intel_dp->desc,
DP_DPCD_QUIRK_LIMITED_M_N);
pipe_config->has_pch_encoder = false;
bpp = 24;
@ -75,7 +77,8 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
intel_link_compute_m_n(bpp, lane_count,
adjusted_mode->crtc_clock,
pipe_config->port_clock,
&pipe_config->dp_m_n);
&pipe_config->dp_m_n,
reduce_m_n);
pipe_config->dp_m_n.tu = slots;

View File

@ -906,14 +906,6 @@ enum link_m_n_set {
M2_N2
};
struct intel_dp_desc {
u8 oui[3];
u8 device_id[6];
u8 hw_rev;
u8 sw_major_rev;
u8 sw_minor_rev;
} __packed;
struct intel_dp_compliance_data {
unsigned long edid;
uint8_t video_pattern;
@ -957,7 +949,7 @@ struct intel_dp {
/* Max link BW for the sink as per DPCD registers */
int max_sink_link_bw;
/* sink or branch descriptor */
struct intel_dp_desc desc;
struct drm_dp_desc desc;
struct drm_dp_aux aux;
enum intel_display_power_domain aux_power_domain;
uint8_t train_set[4];
@ -1532,9 +1524,6 @@ static inline unsigned int intel_dp_unused_lane_mask(int lane_count)
}
bool intel_dp_read_dpcd(struct intel_dp *intel_dp);
bool __intel_dp_read_desc(struct intel_dp *intel_dp,
struct intel_dp_desc *desc);
bool intel_dp_read_desc(struct intel_dp *intel_dp);
int intel_dp_link_required(int pixel_clock, int bpp);
int intel_dp_max_data_rate(int max_link_clock, int max_lanes);
bool intel_digital_port_connected(struct drm_i915_private *dev_priv,

View File

@ -149,44 +149,10 @@ static void lpe_audio_platdev_destroy(struct drm_i915_private *dev_priv)
static void lpe_audio_irq_unmask(struct irq_data *d)
{
struct drm_i915_private *dev_priv = d->chip_data;
unsigned long irqflags;
u32 val = (I915_LPE_PIPE_A_INTERRUPT |
I915_LPE_PIPE_B_INTERRUPT);
if (IS_CHERRYVIEW(dev_priv))
val |= I915_LPE_PIPE_C_INTERRUPT;
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
dev_priv->irq_mask &= ~val;
I915_WRITE(VLV_IIR, val);
I915_WRITE(VLV_IIR, val);
I915_WRITE(VLV_IMR, dev_priv->irq_mask);
POSTING_READ(VLV_IMR);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
static void lpe_audio_irq_mask(struct irq_data *d)
{
struct drm_i915_private *dev_priv = d->chip_data;
unsigned long irqflags;
u32 val = (I915_LPE_PIPE_A_INTERRUPT |
I915_LPE_PIPE_B_INTERRUPT);
if (IS_CHERRYVIEW(dev_priv))
val |= I915_LPE_PIPE_C_INTERRUPT;
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
dev_priv->irq_mask |= val;
I915_WRITE(VLV_IMR, dev_priv->irq_mask);
I915_WRITE(VLV_IIR, val);
I915_WRITE(VLV_IIR, val);
POSTING_READ(VLV_IIR);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
static struct irq_chip lpe_audio_irqchip = {
@ -330,8 +296,6 @@ void intel_lpe_audio_teardown(struct drm_i915_private *dev_priv)
desc = irq_to_desc(dev_priv->lpe_audio.irq);
lpe_audio_irq_mask(&desc->irq_data);
lpe_audio_platdev_destroy(dev_priv);
irq_free_desc(dev_priv->lpe_audio.irq);

View File

@ -1989,7 +1989,7 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
ce->ring = ring;
ce->state = vma;
ce->initialised = engine->init_context == NULL;
ce->initialised |= engine->init_context == NULL;
return 0;

View File

@ -240,7 +240,7 @@ bool lspcon_init(struct intel_digital_port *intel_dig_port)
return false;
}
intel_dp_read_desc(dp);
drm_dp_read_desc(&dp->aux, &dp->desc, drm_dp_is_branch(dp->dpcd));
DRM_DEBUG_KMS("Success: LSPCON init\n");
return true;

View File

@ -320,7 +320,7 @@ static unsigned long max_dwords(struct drm_i915_gem_object *obj)
static int igt_ctx_exec(void *arg)
{
struct drm_i915_private *i915 = arg;
struct drm_i915_gem_object *obj;
struct drm_i915_gem_object *obj = NULL;
struct drm_file *file;
IGT_TIMEOUT(end_time);
LIST_HEAD(objects);
@ -359,7 +359,7 @@ static int igt_ctx_exec(void *arg)
}
for_each_engine(engine, i915, id) {
if (dw == 0) {
if (!obj) {
obj = create_test_object(ctx, file, &objects);
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
@ -376,8 +376,10 @@ static int igt_ctx_exec(void *arg)
goto out_unlock;
}
if (++dw == max_dwords(obj))
if (++dw == max_dwords(obj)) {
obj = NULL;
dw = 0;
}
ndwords++;
}
ncontexts++;

View File

@ -13,6 +13,7 @@ config DRM_MSM
select QCOM_SCM
select SND_SOC_HDMI_CODEC if SND_SOC
select SYNC_FILE
select PM_OPP
default y
help
DRM/KMS driver for MSM/snapdragon.

View File

@ -116,7 +116,7 @@ static int mdss_hw_irqdomain_map(struct irq_domain *d, unsigned int irq,
return 0;
}
static struct irq_domain_ops mdss_hw_irqdomain_ops = {
static const struct irq_domain_ops mdss_hw_irqdomain_ops = {
.map = mdss_hw_irqdomain_map,
.xlate = irq_domain_xlate_onecell,
};

View File

@ -225,9 +225,10 @@ mdp5_plane_duplicate_state(struct drm_plane *plane)
mdp5_state = kmemdup(to_mdp5_plane_state(plane->state),
sizeof(*mdp5_state), GFP_KERNEL);
if (!mdp5_state)
return NULL;
if (mdp5_state && mdp5_state->base.fb)
drm_framebuffer_reference(mdp5_state->base.fb);
__drm_atomic_helper_plane_duplicate_state(plane, &mdp5_state->base);
return &mdp5_state->base;
}
@ -444,6 +445,10 @@ static int mdp5_plane_atomic_check_with_state(struct drm_crtc_state *crtc_state,
mdp5_pipe_release(state->state, old_hwpipe);
mdp5_pipe_release(state->state, old_right_hwpipe);
}
} else {
mdp5_pipe_release(state->state, mdp5_state->hwpipe);
mdp5_pipe_release(state->state, mdp5_state->r_hwpipe);
mdp5_state->hwpipe = mdp5_state->r_hwpipe = NULL;
}
return 0;

View File

@ -830,6 +830,7 @@ static struct drm_driver msm_driver = {
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_export = drm_gem_prime_export,
.gem_prime_import = drm_gem_prime_import,
.gem_prime_res_obj = msm_gem_prime_res_obj,
.gem_prime_pin = msm_gem_prime_pin,
.gem_prime_unpin = msm_gem_prime_unpin,
.gem_prime_get_sg_table = msm_gem_prime_get_sg_table,

View File

@ -224,6 +224,7 @@ struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj);
void *msm_gem_prime_vmap(struct drm_gem_object *obj);
void msm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
struct reservation_object *msm_gem_prime_res_obj(struct drm_gem_object *obj);
struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach, struct sg_table *sg);
int msm_gem_prime_pin(struct drm_gem_object *obj);

View File

@ -99,8 +99,8 @@ void msm_update_fence(struct msm_fence_context *fctx, uint32_t fence)
}
struct msm_fence {
struct msm_fence_context *fctx;
struct dma_fence base;
struct msm_fence_context *fctx;
};
static inline struct msm_fence *to_msm_fence(struct dma_fence *fence)
@ -130,19 +130,13 @@ static bool msm_fence_signaled(struct dma_fence *fence)
return fence_completed(f->fctx, f->base.seqno);
}
static void msm_fence_release(struct dma_fence *fence)
{
struct msm_fence *f = to_msm_fence(fence);
kfree_rcu(f, base.rcu);
}
static const struct dma_fence_ops msm_fence_ops = {
.get_driver_name = msm_fence_get_driver_name,
.get_timeline_name = msm_fence_get_timeline_name,
.enable_signaling = msm_fence_enable_signaling,
.signaled = msm_fence_signaled,
.wait = dma_fence_default_wait,
.release = msm_fence_release,
.release = dma_fence_free,
};
struct dma_fence *

View File

@ -758,6 +758,8 @@ static int msm_gem_new_impl(struct drm_device *dev,
struct msm_gem_object *msm_obj;
bool use_vram = false;
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
switch (flags & MSM_BO_CACHE_MASK) {
case MSM_BO_UNCACHED:
case MSM_BO_CACHED:
@ -853,7 +855,11 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev,
size = PAGE_ALIGN(dmabuf->size);
/* Take mutex so we can modify the inactive list in msm_gem_new_impl */
mutex_lock(&dev->struct_mutex);
ret = msm_gem_new_impl(dev, size, MSM_BO_WC, dmabuf->resv, &obj);
mutex_unlock(&dev->struct_mutex);
if (ret)
goto fail;

View File

@ -70,3 +70,10 @@ void msm_gem_prime_unpin(struct drm_gem_object *obj)
if (!obj->import_attach)
msm_gem_put_pages(obj);
}
struct reservation_object *msm_gem_prime_res_obj(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
return msm_obj->resv;
}

View File

@ -410,12 +410,11 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
if (!in_fence)
return -EINVAL;
/* TODO if we get an array-fence due to userspace merging multiple
* fences, we need a way to determine if all the backing fences
* are from our own context..
/*
* Wait if the fence is from a foreign context, or if the fence
* array contains any fence from a foreign context.
*/
if (in_fence->context != gpu->fctx->context) {
if (!dma_fence_match_context(in_fence, gpu->fctx->context)) {
ret = dma_fence_wait(in_fence, true);
if (ret)
return ret;
@ -496,8 +495,9 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
goto out;
}
if ((submit_cmd.size + submit_cmd.submit_offset) >=
msm_obj->base.size) {
if (!submit_cmd.size ||
((submit_cmd.size + submit_cmd.submit_offset) >
msm_obj->base.size)) {
DRM_ERROR("invalid cmdstream size: %u\n", submit_cmd.size);
ret = -EINVAL;
goto out;

View File

@ -549,9 +549,9 @@ static int get_clocks(struct platform_device *pdev, struct msm_gpu *gpu)
gpu->grp_clks[i] = get_clock(dev, name);
/* Remember the key clocks that we need to control later */
if (!strcmp(name, "core"))
if (!strcmp(name, "core") || !strcmp(name, "core_clk"))
gpu->core_clk = gpu->grp_clks[i];
else if (!strcmp(name, "rbbmtimer"))
else if (!strcmp(name, "rbbmtimer") || !strcmp(name, "rbbmtimer_clk"))
gpu->rbbmtimer_clk = gpu->grp_clks[i];
++i;

View File

@ -275,10 +275,12 @@ config HID_EMS_FF
- Trio Linker Plus II
config HID_ELECOM
tristate "ELECOM BM084 bluetooth mouse"
tristate "ELECOM HID devices"
depends on HID
---help---
Support for the ELECOM BM084 (bluetooth mouse).
Support for ELECOM devices:
- BM084 Bluetooth Mouse
- DEFT Trackball (Wired and wireless)
config HID_ELO
tristate "ELO USB 4000/4500 touchscreen"

View File

@ -69,6 +69,7 @@ MODULE_DESCRIPTION("Asus HID Keyboard and TouchPad");
#define QUIRK_IS_MULTITOUCH BIT(3)
#define QUIRK_NO_CONSUMER_USAGES BIT(4)
#define QUIRK_USE_KBD_BACKLIGHT BIT(5)
#define QUIRK_T100_KEYBOARD BIT(6)
#define I2C_KEYBOARD_QUIRKS (QUIRK_FIX_NOTEBOOK_REPORT | \
QUIRK_NO_INIT_REPORTS | \
@ -536,6 +537,8 @@ static void asus_remove(struct hid_device *hdev)
drvdata->kbd_backlight->removed = true;
cancel_work_sync(&drvdata->kbd_backlight->work);
}
hid_hw_stop(hdev);
}
static __u8 *asus_report_fixup(struct hid_device *hdev, __u8 *rdesc,
@ -548,6 +551,12 @@ static __u8 *asus_report_fixup(struct hid_device *hdev, __u8 *rdesc,
hid_info(hdev, "Fixing up Asus notebook report descriptor\n");
rdesc[55] = 0xdd;
}
if (drvdata->quirks & QUIRK_T100_KEYBOARD &&
*rsize == 76 && rdesc[73] == 0x81 && rdesc[74] == 0x01) {
hid_info(hdev, "Fixing up Asus T100 keyb report descriptor\n");
rdesc[74] &= ~HID_MAIN_ITEM_CONSTANT;
}
return rdesc;
}
@ -560,6 +569,9 @@ static const struct hid_device_id asus_devices[] = {
USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD1) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK,
USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD2), QUIRK_USE_KBD_BACKLIGHT },
{ HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK,
USB_DEVICE_ID_ASUSTEK_T100_KEYBOARD),
QUIRK_T100_KEYBOARD | QUIRK_NO_CONSUMER_USAGES },
{ }
};
MODULE_DEVICE_TABLE(hid, asus_devices);

View File

@ -1855,6 +1855,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_I2C_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_I2C_TOUCHPAD) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD1) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_T100_KEYBOARD) },
{ HID_USB_DEVICE(USB_VENDOR_ID_AUREAL, USB_DEVICE_ID_AUREAL_W01RN) },
{ HID_USB_DEVICE(USB_VENDOR_ID_BELKIN, USB_DEVICE_ID_FLIP_KVM) },
{ HID_USB_DEVICE(USB_VENDOR_ID_BETOP_2185BFM, 0x2208) },
@ -1891,6 +1892,8 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, USB_DEVICE_ID_DREAM_CHEEKY_WN) },
{ HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, USB_DEVICE_ID_DREAM_CHEEKY_FA) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRED) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRELESS) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELO, 0x0009) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELO, 0x0030) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_ACCUTOUCH_2216) },

View File

@ -1,10 +1,8 @@
/*
* HID driver for Elecom BM084 (bluetooth mouse).
* Removes a non-existing horizontal wheel from
* the HID descriptor.
* (This module is based on "hid-ortek".)
*
* HID driver for ELECOM devices.
* Copyright (c) 2010 Richard Nauber <Richard.Nauber@gmail.com>
* Copyright (c) 2016 Yuxuan Shui <yshuiv7@gmail.com>
* Copyright (c) 2017 Diego Elio Pettenò <flameeyes@flameeyes.eu>
*/
/*
@ -23,15 +21,61 @@
static __u8 *elecom_report_fixup(struct hid_device *hdev, __u8 *rdesc,
unsigned int *rsize)
{
switch (hdev->product) {
case USB_DEVICE_ID_ELECOM_BM084:
/* The BM084 Bluetooth mouse includes a non-existing horizontal
* wheel in the HID descriptor. */
if (*rsize >= 48 && rdesc[46] == 0x05 && rdesc[47] == 0x0c) {
hid_info(hdev, "Fixing up Elecom BM084 report descriptor\n");
rdesc[47] = 0x00;
}
break;
case USB_DEVICE_ID_ELECOM_DEFT_WIRED:
case USB_DEVICE_ID_ELECOM_DEFT_WIRELESS:
/* The DEFT trackball has eight buttons, but its descriptor only
* reports five, disabling the three Fn buttons on the top of
* the mouse.
*
* Apply the following diff to the descriptor:
*
* Collection (Physical), Collection (Physical),
* Report ID (1), Report ID (1),
* Report Count (5), -> Report Count (8),
* Report Size (1), Report Size (1),
* Usage Page (Button), Usage Page (Button),
* Usage Minimum (01h), Usage Minimum (01h),
* Usage Maximum (05h), -> Usage Maximum (08h),
* Logical Minimum (0), Logical Minimum (0),
* Logical Maximum (1), Logical Maximum (1),
* Input (Variable), Input (Variable),
* Report Count (1), -> Report Count (0),
* Report Size (3), Report Size (3),
* Input (Constant), Input (Constant),
* Report Size (16), Report Size (16),
* Report Count (2), Report Count (2),
* Usage Page (Desktop), Usage Page (Desktop),
* Usage (X), Usage (X),
* Usage (Y), Usage (Y),
* Logical Minimum (-32768), Logical Minimum (-32768),
* Logical Maximum (32767), Logical Maximum (32767),
* Input (Variable, Relative), Input (Variable, Relative),
* End Collection, End Collection,
*/
if (*rsize == 213 && rdesc[13] == 5 && rdesc[21] == 5) {
hid_info(hdev, "Fixing up Elecom DEFT Fn buttons\n");
rdesc[13] = 8; /* Button/Variable Report Count */
rdesc[21] = 8; /* Button/Variable Usage Maximum */
rdesc[29] = 0; /* Button/Constant Report Count */
}
break;
}
return rdesc;
}
static const struct hid_device_id elecom_devices[] = {
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084)},
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRED) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRELESS) },
{ }
};
MODULE_DEVICE_TABLE(hid, elecom_devices);

View File

@ -173,6 +173,7 @@
#define USB_VENDOR_ID_ASUSTEK 0x0b05
#define USB_DEVICE_ID_ASUSTEK_LCM 0x1726
#define USB_DEVICE_ID_ASUSTEK_LCM2 0x175b
#define USB_DEVICE_ID_ASUSTEK_T100_KEYBOARD 0x17e0
#define USB_DEVICE_ID_ASUSTEK_I2C_KEYBOARD 0x8585
#define USB_DEVICE_ID_ASUSTEK_I2C_TOUCHPAD 0x0101
#define USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD1 0x1854
@ -358,6 +359,8 @@
#define USB_VENDOR_ID_ELECOM 0x056e
#define USB_DEVICE_ID_ELECOM_BM084 0x0061
#define USB_DEVICE_ID_ELECOM_DEFT_WIRED 0x00fe
#define USB_DEVICE_ID_ELECOM_DEFT_WIRELESS 0x00ff
#define USB_VENDOR_ID_DREAM_CHEEKY 0x1d34
#define USB_DEVICE_ID_DREAM_CHEEKY_WN 0x0004

View File

@ -349,6 +349,7 @@ static int magicmouse_raw_event(struct hid_device *hdev,
if (input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE) {
magicmouse_emit_buttons(msc, clicks & 3);
input_mt_report_pointer_emulation(input, true);
input_report_rel(input, REL_X, x);
input_report_rel(input, REL_Y, y);
} else { /* USB_DEVICE_ID_APPLE_MAGICTRACKPAD */
@ -388,6 +389,9 @@ static int magicmouse_setup_input(struct input_dev *input, struct hid_device *hd
__clear_bit(BTN_RIGHT, input->keybit);
__clear_bit(BTN_MIDDLE, input->keybit);
__set_bit(BTN_MOUSE, input->keybit);
__set_bit(INPUT_PROP_BUTTONPAD, input->propbit);
}
__set_bit(BTN_TOOL_FINGER, input->keybit);
__set_bit(BTN_TOOL_DOUBLETAP, input->keybit);
__set_bit(BTN_TOOL_TRIPLETAP, input->keybit);
@ -395,9 +399,6 @@ static int magicmouse_setup_input(struct input_dev *input, struct hid_device *hd
__set_bit(BTN_TOOL_QUINTTAP, input->keybit);
__set_bit(BTN_TOUCH, input->keybit);
__set_bit(INPUT_PROP_POINTER, input->propbit);
__set_bit(INPUT_PROP_BUTTONPAD, input->propbit);
}
__set_bit(EV_ABS, input->evbit);

View File

@ -897,6 +897,15 @@ static int i2c_hid_acpi_pdata(struct i2c_client *client,
return 0;
}
static void i2c_hid_acpi_fix_up_power(struct device *dev)
{
acpi_handle handle = ACPI_HANDLE(dev);
struct acpi_device *adev;
if (handle && acpi_bus_get_device(handle, &adev) == 0)
acpi_device_fix_up_power(adev);
}
static const struct acpi_device_id i2c_hid_acpi_match[] = {
{"ACPI0C50", 0 },
{"PNP0C50", 0 },
@ -909,6 +918,8 @@ static inline int i2c_hid_acpi_pdata(struct i2c_client *client,
{
return -ENODEV;
}
static inline void i2c_hid_acpi_fix_up_power(struct device *dev) {}
#endif
#ifdef CONFIG_OF
@ -1030,6 +1041,8 @@ static int i2c_hid_probe(struct i2c_client *client,
if (ret < 0)
goto err_regulator;
i2c_hid_acpi_fix_up_power(&client->dev);
pm_runtime_get_noresume(&client->dev);
pm_runtime_set_active(&client->dev);
pm_runtime_enable(&client->dev);

View File

@ -1571,10 +1571,15 @@ static int wacom_tpc_irq(struct wacom_wac *wacom, size_t len)
{
unsigned char *data = wacom->data;
if (wacom->pen_input)
if (wacom->pen_input) {
dev_dbg(wacom->pen_input->dev.parent,
"%s: received report #%d\n", __func__, data[0]);
else if (wacom->touch_input)
if (len == WACOM_PKGLEN_PENABLED ||
data[0] == WACOM_REPORT_PENABLED)
return wacom_tpc_pen(wacom);
}
else if (wacom->touch_input) {
dev_dbg(wacom->touch_input->dev.parent,
"%s: received report #%d\n", __func__, data[0]);
@ -1585,9 +1590,6 @@ static int wacom_tpc_irq(struct wacom_wac *wacom, size_t len)
case WACOM_PKGLEN_TPC2FG:
return wacom_tpc_mt_touch(wacom);
case WACOM_PKGLEN_PENABLED:
return wacom_tpc_pen(wacom);
default:
switch (data[0]) {
case WACOM_REPORT_TPC1FG:
@ -1600,8 +1602,7 @@ static int wacom_tpc_irq(struct wacom_wac *wacom, size_t len)
case WACOM_REPORT_TPCMT2:
return wacom_mt_touch(wacom);
case WACOM_REPORT_PENABLED:
return wacom_tpc_pen(wacom);
}
}
}

View File

@ -343,6 +343,7 @@ config SENSORS_ASB100
config SENSORS_ASPEED
tristate "ASPEED AST2400/AST2500 PWM and Fan tach driver"
select REGMAP
help
This driver provides support for ASPEED AST2400/AST2500 PWM
and Fan Tacho controllers.

View File

@ -7,6 +7,7 @@
*/
#include <linux/clk.h>
#include <linux/errno.h>
#include <linux/gpio/consumer.h>
#include <linux/delay.h>
#include <linux/hwmon.h>
@ -494,7 +495,7 @@ static u32 aspeed_get_fan_tach_ch_measure_period(struct aspeed_pwm_tacho_data
return clk / (clk_unit * div_h * div_l * tacho_div * tacho_unit);
}
static u32 aspeed_get_fan_tach_ch_rpm(struct aspeed_pwm_tacho_data *priv,
static int aspeed_get_fan_tach_ch_rpm(struct aspeed_pwm_tacho_data *priv,
u8 fan_tach_ch)
{
u32 raw_data, tach_div, clk_source, sec, val;
@ -510,6 +511,9 @@ static u32 aspeed_get_fan_tach_ch_rpm(struct aspeed_pwm_tacho_data *priv,
msleep(sec);
regmap_read(priv->regmap, ASPEED_PTCR_RESULT, &val);
if (!(val & RESULT_STATUS_MASK))
return -ETIMEDOUT;
raw_data = val & RESULT_VALUE_MASK;
tach_div = priv->type_fan_tach_clock_division[type];
tach_div = 0x4 << (tach_div * 2);
@ -561,12 +565,14 @@ static ssize_t show_rpm(struct device *dev, struct device_attribute *attr,
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int index = sensor_attr->index;
u32 rpm;
int rpm;
struct aspeed_pwm_tacho_data *priv = dev_get_drvdata(dev);
rpm = aspeed_get_fan_tach_ch_rpm(priv, index);
if (rpm < 0)
return rpm;
return sprintf(buf, "%u\n", rpm);
return sprintf(buf, "%d\n", rpm);
}
static umode_t pwm_is_visible(struct kobject *kobj,
@ -591,24 +597,23 @@ static umode_t fan_dev_is_visible(struct kobject *kobj,
return a->mode;
}
static SENSOR_DEVICE_ATTR(pwm0, 0644,
show_pwm, set_pwm, 0);
static SENSOR_DEVICE_ATTR(pwm1, 0644,
show_pwm, set_pwm, 1);
show_pwm, set_pwm, 0);
static SENSOR_DEVICE_ATTR(pwm2, 0644,
show_pwm, set_pwm, 2);
show_pwm, set_pwm, 1);
static SENSOR_DEVICE_ATTR(pwm3, 0644,
show_pwm, set_pwm, 3);
show_pwm, set_pwm, 2);
static SENSOR_DEVICE_ATTR(pwm4, 0644,
show_pwm, set_pwm, 4);
show_pwm, set_pwm, 3);
static SENSOR_DEVICE_ATTR(pwm5, 0644,
show_pwm, set_pwm, 5);
show_pwm, set_pwm, 4);
static SENSOR_DEVICE_ATTR(pwm6, 0644,
show_pwm, set_pwm, 6);
show_pwm, set_pwm, 5);
static SENSOR_DEVICE_ATTR(pwm7, 0644,
show_pwm, set_pwm, 6);
static SENSOR_DEVICE_ATTR(pwm8, 0644,
show_pwm, set_pwm, 7);
static struct attribute *pwm_dev_attrs[] = {
&sensor_dev_attr_pwm0.dev_attr.attr,
&sensor_dev_attr_pwm1.dev_attr.attr,
&sensor_dev_attr_pwm2.dev_attr.attr,
&sensor_dev_attr_pwm3.dev_attr.attr,
@ -616,6 +621,7 @@ static struct attribute *pwm_dev_attrs[] = {
&sensor_dev_attr_pwm5.dev_attr.attr,
&sensor_dev_attr_pwm6.dev_attr.attr,
&sensor_dev_attr_pwm7.dev_attr.attr,
&sensor_dev_attr_pwm8.dev_attr.attr,
NULL,
};
@ -624,40 +630,39 @@ static const struct attribute_group pwm_dev_group = {
.is_visible = pwm_is_visible,
};
static SENSOR_DEVICE_ATTR(fan0_input, 0444,
show_rpm, NULL, 0);
static SENSOR_DEVICE_ATTR(fan1_input, 0444,
show_rpm, NULL, 1);
show_rpm, NULL, 0);
static SENSOR_DEVICE_ATTR(fan2_input, 0444,
show_rpm, NULL, 2);
show_rpm, NULL, 1);
static SENSOR_DEVICE_ATTR(fan3_input, 0444,
show_rpm, NULL, 3);
show_rpm, NULL, 2);
static SENSOR_DEVICE_ATTR(fan4_input, 0444,
show_rpm, NULL, 4);
show_rpm, NULL, 3);
static SENSOR_DEVICE_ATTR(fan5_input, 0444,
show_rpm, NULL, 5);
show_rpm, NULL, 4);
static SENSOR_DEVICE_ATTR(fan6_input, 0444,
show_rpm, NULL, 6);
show_rpm, NULL, 5);
static SENSOR_DEVICE_ATTR(fan7_input, 0444,
show_rpm, NULL, 7);
show_rpm, NULL, 6);
static SENSOR_DEVICE_ATTR(fan8_input, 0444,
show_rpm, NULL, 8);
show_rpm, NULL, 7);
static SENSOR_DEVICE_ATTR(fan9_input, 0444,
show_rpm, NULL, 9);
show_rpm, NULL, 8);
static SENSOR_DEVICE_ATTR(fan10_input, 0444,
show_rpm, NULL, 10);
show_rpm, NULL, 9);
static SENSOR_DEVICE_ATTR(fan11_input, 0444,
show_rpm, NULL, 11);
show_rpm, NULL, 10);
static SENSOR_DEVICE_ATTR(fan12_input, 0444,
show_rpm, NULL, 12);
show_rpm, NULL, 11);
static SENSOR_DEVICE_ATTR(fan13_input, 0444,
show_rpm, NULL, 13);
show_rpm, NULL, 12);
static SENSOR_DEVICE_ATTR(fan14_input, 0444,
show_rpm, NULL, 14);
show_rpm, NULL, 13);
static SENSOR_DEVICE_ATTR(fan15_input, 0444,
show_rpm, NULL, 14);
static SENSOR_DEVICE_ATTR(fan16_input, 0444,
show_rpm, NULL, 15);
static struct attribute *fan_dev_attrs[] = {
&sensor_dev_attr_fan0_input.dev_attr.attr,
&sensor_dev_attr_fan1_input.dev_attr.attr,
&sensor_dev_attr_fan2_input.dev_attr.attr,
&sensor_dev_attr_fan3_input.dev_attr.attr,
@ -673,6 +678,7 @@ static struct attribute *fan_dev_attrs[] = {
&sensor_dev_attr_fan13_input.dev_attr.attr,
&sensor_dev_attr_fan14_input.dev_attr.attr,
&sensor_dev_attr_fan15_input.dev_attr.attr,
&sensor_dev_attr_fan16_input.dev_attr.attr,
NULL
};
@ -802,7 +808,6 @@ static int aspeed_pwm_tacho_probe(struct platform_device *pdev)
if (ret)
return ret;
}
of_node_put(np);
priv->groups[0] = &pwm_dev_group;
priv->groups[1] = &fan_dev_group;

View File

@ -1429,7 +1429,7 @@ static void cm_format_paths_from_req(struct cm_req_msg *req_msg,
primary_path->packet_life_time =
cm_req_get_primary_local_ack_timeout(req_msg);
primary_path->packet_life_time -= (primary_path->packet_life_time > 0);
sa_path_set_service_id(primary_path, req_msg->service_id);
primary_path->service_id = req_msg->service_id;
if (req_msg->alt_local_lid) {
alt_path->dgid = req_msg->alt_local_gid;
@ -1452,7 +1452,7 @@ static void cm_format_paths_from_req(struct cm_req_msg *req_msg,
alt_path->packet_life_time =
cm_req_get_alt_local_ack_timeout(req_msg);
alt_path->packet_life_time -= (alt_path->packet_life_time > 0);
sa_path_set_service_id(alt_path, req_msg->service_id);
alt_path->service_id = req_msg->service_id;
}
}

View File

@ -1140,7 +1140,7 @@ static void cma_save_ib_info(struct sockaddr *src_addr,
ib->sib_pkey = path->pkey;
ib->sib_flowinfo = path->flow_label;
memcpy(&ib->sib_addr, &path->sgid, 16);
ib->sib_sid = sa_path_get_service_id(path);
ib->sib_sid = path->service_id;
ib->sib_scope_id = 0;
} else {
ib->sib_pkey = listen_ib->sib_pkey;
@ -1274,8 +1274,7 @@ static int cma_save_req_info(const struct ib_cm_event *ib_event,
memcpy(&req->local_gid, &req_param->primary_path->sgid,
sizeof(req->local_gid));
req->has_gid = true;
req->service_id =
sa_path_get_service_id(req_param->primary_path);
req->service_id = req_param->primary_path->service_id;
req->pkey = be16_to_cpu(req_param->primary_path->pkey);
if (req->pkey != req_param->bth_pkey)
pr_warn_ratelimited("RDMA CMA: got different BTH P_Key (0x%x) and primary path P_Key (0x%x)\n"
@ -1827,7 +1826,8 @@ static struct rdma_id_private *cma_new_conn_id(struct rdma_cm_id *listen_id,
struct rdma_route *rt;
const sa_family_t ss_family = listen_id->route.addr.src_addr.ss_family;
struct sa_path_rec *path = ib_event->param.req_rcvd.primary_path;
const __be64 service_id = sa_path_get_service_id(path);
const __be64 service_id =
ib_event->param.req_rcvd.primary_path->service_id;
int ret;
id = rdma_create_id(listen_id->route.addr.dev_addr.net,
@ -2345,9 +2345,8 @@ static int cma_query_ib_route(struct rdma_id_private *id_priv, int timeout_ms,
path_rec.pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr));
path_rec.numb_path = 1;
path_rec.reversible = 1;
sa_path_set_service_id(&path_rec,
rdma_get_service_id(&id_priv->id,
cma_dst_addr(id_priv)));
path_rec.service_id = rdma_get_service_id(&id_priv->id,
cma_dst_addr(id_priv));
comp_mask = IB_SA_PATH_REC_DGID | IB_SA_PATH_REC_SGID |
IB_SA_PATH_REC_PKEY | IB_SA_PATH_REC_NUMB_PATH |

View File

@ -169,6 +169,16 @@ void ib_mad_cleanup(void);
int ib_sa_init(void);
void ib_sa_cleanup(void);
int ibnl_init(void);
void ibnl_cleanup(void);
/**
* Check if there are any listeners to the netlink group
* @group: the netlink group ID
* Returns 0 on success or a negative for no listeners.
*/
int ibnl_chk_listeners(unsigned int group);
int ib_nl_handle_resolve_resp(struct sk_buff *skb,
struct netlink_callback *cb);
int ib_nl_handle_set_timeout(struct sk_buff *skb,

View File

@ -37,6 +37,7 @@
#include <net/net_namespace.h>
#include <net/sock.h>
#include <rdma/rdma_netlink.h>
#include "core_priv.h"
struct ibnl_client {
struct list_head list;
@ -55,7 +56,6 @@ int ibnl_chk_listeners(unsigned int group)
return -1;
return 0;
}
EXPORT_SYMBOL(ibnl_chk_listeners);
int ibnl_add_client(int index, int nops,
const struct ibnl_client_cbs cb_table[])

View File

@ -194,7 +194,7 @@ static u32 tid;
.field_name = "sa_path_rec:" #field
static const struct ib_field path_rec_table[] = {
{ PATH_REC_FIELD(ib.service_id),
{ PATH_REC_FIELD(service_id),
.offset_words = 0,
.offset_bits = 0,
.size_bits = 64 },
@ -296,7 +296,7 @@ static const struct ib_field path_rec_table[] = {
.field_name = "sa_path_rec:" #field
static const struct ib_field opa_path_rec_table[] = {
{ OPA_PATH_REC_FIELD(opa.service_id),
{ OPA_PATH_REC_FIELD(service_id),
.offset_words = 0,
.offset_bits = 0,
.size_bits = 64 },
@ -774,7 +774,7 @@ static void ib_nl_set_path_rec_attrs(struct sk_buff *skb,
/* Now build the attributes */
if (comp_mask & IB_SA_PATH_REC_SERVICE_ID) {
val64 = be64_to_cpu(sa_path_get_service_id(sa_rec));
val64 = be64_to_cpu(sa_rec->service_id);
nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_SERVICE_ID,
sizeof(val64), &val64);
}

View File

@ -58,7 +58,7 @@ static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int d
for_each_sg(umem->sg_head.sgl, sg, umem->npages, i) {
page = sg_page(sg);
if (umem->writable && dirty)
if (!PageDirty(page) && umem->writable && dirty)
set_page_dirty_lock(page);
put_page(page);
}

View File

@ -321,11 +321,15 @@ int ib_umem_odp_get(struct ib_ucontext *context, struct ib_umem *umem,
struct vm_area_struct *vma;
struct hstate *h;
down_read(&mm->mmap_sem);
vma = find_vma(mm, ib_umem_start(umem));
if (!vma || !is_vm_hugetlb_page(vma))
if (!vma || !is_vm_hugetlb_page(vma)) {
up_read(&mm->mmap_sem);
return -EINVAL;
}
h = hstate_vma(vma);
umem->page_shift = huge_page_shift(h);
up_read(&mm->mmap_sem);
umem->hugetlb = 1;
} else {
umem->hugetlb = 0;

View File

@ -96,11 +96,11 @@ void ib_copy_qp_attr_to_user(struct ib_uverbs_qp_attr *dst,
}
EXPORT_SYMBOL(ib_copy_qp_attr_to_user);
void __ib_copy_path_rec_to_user(struct ib_user_path_rec *dst,
static void __ib_copy_path_rec_to_user(struct ib_user_path_rec *dst,
struct sa_path_rec *src)
{
memcpy(dst->dgid, src->dgid.raw, sizeof src->dgid);
memcpy(dst->sgid, src->sgid.raw, sizeof src->sgid);
memcpy(dst->dgid, src->dgid.raw, sizeof(src->dgid));
memcpy(dst->sgid, src->sgid.raw, sizeof(src->sgid));
dst->dlid = htons(ntohl(sa_path_get_dlid(src)));
dst->slid = htons(ntohl(sa_path_get_slid(src)));

View File

@ -488,6 +488,7 @@ static int _put_ep_safe(struct c4iw_dev *dev, struct sk_buff *skb)
ep = *((struct c4iw_ep **)(skb->cb + 2 * sizeof(void *)));
release_ep_resources(ep);
kfree_skb(skb);
return 0;
}
@ -498,6 +499,7 @@ static int _put_pass_ep_safe(struct c4iw_dev *dev, struct sk_buff *skb)
ep = *((struct c4iw_ep **)(skb->cb + 2 * sizeof(void *)));
c4iw_put_ep(&ep->parent_ep->com);
release_ep_resources(ep);
kfree_skb(skb);
return 0;
}
@ -569,11 +571,13 @@ static void abort_arp_failure(void *handle, struct sk_buff *skb)
pr_debug("%s rdev %p\n", __func__, rdev);
req->cmd = CPL_ABORT_NO_RST;
skb_get(skb);
ret = c4iw_ofld_send(rdev, skb);
if (ret) {
__state_set(&ep->com, DEAD);
queue_arp_failure_cpl(ep, skb, FAKE_CPL_PUT_EP_SAFE);
}
} else
kfree_skb(skb);
}
static int send_flowc(struct c4iw_ep *ep)
@ -2517,7 +2521,8 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
goto reject;
}
hdrs = sizeof(struct iphdr) + sizeof(struct tcphdr) +
hdrs = ((iptype == 4) ? sizeof(struct iphdr) : sizeof(struct ipv6hdr)) +
sizeof(struct tcphdr) +
((enable_tcp_timestamps && req->tcpopt.tstamp) ? 12 : 0);
if (peer_mss && child_ep->mtu > (peer_mss + hdrs))
child_ep->mtu = peer_mss + hdrs;

View File

@ -971,7 +971,7 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
devp->rdev.lldi.sge_egrstatuspagesize);
devp->rdev.hw_queue.t4_eq_status_entries =
devp->rdev.lldi.sge_ingpadboundary > 64 ? 2 : 1;
devp->rdev.lldi.sge_egrstatuspagesize / 64;
devp->rdev.hw_queue.t4_max_eq_size = 65520;
devp->rdev.hw_queue.t4_max_iq_size = 65520;
devp->rdev.hw_queue.t4_max_rq_size = 8192 -

View File

@ -6312,25 +6312,38 @@ static void handle_8051_request(struct hfi1_pportdata *ppd)
}
}
static void write_global_credit(struct hfi1_devdata *dd,
u8 vau, u16 total, u16 shared)
/*
* Set up allocation unit vaulue.
*/
void set_up_vau(struct hfi1_devdata *dd, u8 vau)
{
write_csr(dd, SEND_CM_GLOBAL_CREDIT,
((u64)total <<
SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT) |
((u64)shared <<
SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT) |
((u64)vau << SEND_CM_GLOBAL_CREDIT_AU_SHIFT));
u64 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
/* do not modify other values in the register */
reg &= ~SEND_CM_GLOBAL_CREDIT_AU_SMASK;
reg |= (u64)vau << SEND_CM_GLOBAL_CREDIT_AU_SHIFT;
write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
}
/*
* Set up initial VL15 credits of the remote. Assumes the rest of
* the CM credit registers are zero from a previous global or credit reset .
* the CM credit registers are zero from a previous global or credit reset.
* Shared limit for VL15 will always be 0.
*/
void set_up_vl15(struct hfi1_devdata *dd, u8 vau, u16 vl15buf)
void set_up_vl15(struct hfi1_devdata *dd, u16 vl15buf)
{
/* leave shared count at zero for both global and VL15 */
write_global_credit(dd, vau, vl15buf, 0);
u64 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
/* set initial values for total and shared credit limit */
reg &= ~(SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SMASK |
SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SMASK);
/*
* Set total limit to be equal to VL15 credits.
* Leave shared limit at 0.
*/
reg |= (u64)vl15buf << SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT;
write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
write_csr(dd, SEND_CM_CREDIT_VL15, (u64)vl15buf
<< SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT);
@ -6348,9 +6361,11 @@ void reset_link_credits(struct hfi1_devdata *dd)
for (i = 0; i < TXE_NUM_DATA_VL; i++)
write_csr(dd, SEND_CM_CREDIT_VL + (8 * i), 0);
write_csr(dd, SEND_CM_CREDIT_VL15, 0);
write_global_credit(dd, 0, 0, 0);
write_csr(dd, SEND_CM_GLOBAL_CREDIT, 0);
/* reset the CM block */
pio_send_control(dd, PSC_CM_RESET);
/* reset cached value */
dd->vl15buf_cached = 0;
}
/* convert a vCU to a CU */
@ -6839,24 +6854,35 @@ void handle_link_up(struct work_struct *work)
{
struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
link_up_work);
struct hfi1_devdata *dd = ppd->dd;
set_link_state(ppd, HLS_UP_INIT);
/* cache the read of DC_LCB_STS_ROUND_TRIP_LTP_CNT */
read_ltp_rtt(ppd->dd);
read_ltp_rtt(dd);
/*
* OPA specifies that certain counters are cleared on a transition
* to link up, so do that.
*/
clear_linkup_counters(ppd->dd);
clear_linkup_counters(dd);
/*
* And (re)set link up default values.
*/
set_linkup_defaults(ppd);
/*
* Set VL15 credits. Use cached value from verify cap interrupt.
* In case of quick linkup or simulator, vl15 value will be set by
* handle_linkup_change. VerifyCap interrupt handler will not be
* called in those scenarios.
*/
if (!(quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR))
set_up_vl15(dd, dd->vl15buf_cached);
/* enforce link speed enabled */
if ((ppd->link_speed_active & ppd->link_speed_enabled) == 0) {
/* oops - current speed is not enabled, bounce */
dd_dev_err(ppd->dd,
dd_dev_err(dd,
"Link speed active 0x%x is outside enabled 0x%x, downing link\n",
ppd->link_speed_active, ppd->link_speed_enabled);
set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SPEED_POLICY, 0,
@ -7357,7 +7383,14 @@ void handle_verify_cap(struct work_struct *work)
*/
if (vau == 0)
vau = 1;
set_up_vl15(dd, vau, vl15buf);
set_up_vau(dd, vau);
/*
* Set VL15 credits to 0 in global credit register. Cache remote VL15
* credits value and wait for link-up interrupt ot set it.
*/
set_up_vl15(dd, 0);
dd->vl15buf_cached = vl15buf;
/* set up the LCB CRC mode */
crc_mask = ppd->port_crc_mode_enabled & partner_supported_crc;

View File

@ -839,7 +839,9 @@
#define SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK 0x8ull
#define SEND_CM_CTRL_RESETCSR 0x0000000000000020ull
#define SEND_CM_GLOBAL_CREDIT (TXE + 0x000000000508)
#define SEND_CM_GLOBAL_CREDIT_AU_MASK 0x7ull
#define SEND_CM_GLOBAL_CREDIT_AU_SHIFT 16
#define SEND_CM_GLOBAL_CREDIT_AU_SMASK 0x70000ull
#define SEND_CM_GLOBAL_CREDIT_RESETCSR 0x0000094000030000ull
#define SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_MASK 0xFFFFull
#define SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT 0

View File

@ -1045,6 +1045,14 @@ struct hfi1_devdata {
/* initial vl15 credits to use */
u16 vl15_init;
/*
* Cached value for vl15buf, read during verify cap interrupt. VL15
* credits are to be kept at 0 and set when handling the link-up
* interrupt. This removes the possibility of receiving VL15 MAD
* packets before this HFI is ready.
*/
u16 vl15buf_cached;
/* Misc small ints */
u8 n_krcv_queues;
u8 qos_shift;
@ -1598,7 +1606,8 @@ int hfi1_rcvbuf_validate(u32 size, u8 type, u16 *encode);
int fm_get_table(struct hfi1_pportdata *ppd, int which, void *t);
int fm_set_table(struct hfi1_pportdata *ppd, int which, void *t);
void set_up_vl15(struct hfi1_devdata *dd, u8 vau, u16 vl15buf);
void set_up_vau(struct hfi1_devdata *dd, u8 vau);
void set_up_vl15(struct hfi1_devdata *dd, u16 vl15buf);
void reset_link_credits(struct hfi1_devdata *dd);
void assign_remote_cm_au_table(struct hfi1_devdata *dd, u8 vcu);

View File

@ -130,7 +130,8 @@ void handle_linkup_change(struct hfi1_devdata *dd, u32 linkup)
* the remote values. Both sides must be using the values.
*/
if (quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
set_up_vl15(dd, dd->vau, dd->vl15_init);
set_up_vau(dd, dd->vau);
set_up_vl15(dd, dd->vl15_init);
assign_remote_cm_au_table(dd, dd->vcu);
}

View File

@ -207,8 +207,8 @@ int hfi1_pcie_ddinit(struct hfi1_devdata *dd, struct pci_dev *pdev)
/*
* Save BARs and command to rewrite after device reset.
*/
dd->pcibar0 = addr;
dd->pcibar1 = addr >> 32;
pci_read_config_dword(dd->pcidev, PCI_BASE_ADDRESS_0, &dd->pcibar0);
pci_read_config_dword(dd->pcidev, PCI_BASE_ADDRESS_1, &dd->pcibar1);
pci_read_config_dword(dd->pcidev, PCI_ROM_ADDRESS, &dd->pci_rom);
pci_read_config_word(dd->pcidev, PCI_COMMAND, &dd->pci_command);
pcie_capability_read_word(dd->pcidev, PCI_EXP_DEVCTL, &dd->pcie_devctl);

Some files were not shown because too many files have changed in this diff Show More