mirror of https://gitee.com/openkylin/linux.git
commit
8cb38e9921
|
@ -1,12 +1,16 @@
|
|||
* Freescale Smart Direct Memory Access (SDMA) Controller for i.MX
|
||||
|
||||
Required properties:
|
||||
- compatible : Should be "fsl,imx31-sdma", "fsl,imx31-to1-sdma",
|
||||
"fsl,imx31-to2-sdma", "fsl,imx35-sdma", "fsl,imx35-to1-sdma",
|
||||
"fsl,imx35-to2-sdma", "fsl,imx51-sdma", "fsl,imx53-sdma" or
|
||||
"fsl,imx6q-sdma". The -to variants should be preferred since they
|
||||
allow to determnine the correct ROM script addresses needed for
|
||||
the driver to work without additional firmware.
|
||||
- compatible : Should be one of
|
||||
"fsl,imx25-sdma"
|
||||
"fsl,imx31-sdma", "fsl,imx31-to1-sdma", "fsl,imx31-to2-sdma"
|
||||
"fsl,imx35-sdma", "fsl,imx35-to1-sdma", "fsl,imx35-to2-sdma"
|
||||
"fsl,imx51-sdma"
|
||||
"fsl,imx53-sdma"
|
||||
"fsl,imx6q-sdma"
|
||||
The -to variants should be preferred since they allow to determnine the
|
||||
correct ROM script addresses needed for the driver to work without additional
|
||||
firmware.
|
||||
- reg : Should contain SDMA registers location and length
|
||||
- interrupts : Should contain SDMA interrupt
|
||||
- #dma-cells : Must be <3>.
|
||||
|
|
25
MAINTAINERS
25
MAINTAINERS
|
@ -538,7 +538,7 @@ F: arch/alpha/
|
|||
ALTERA UART/JTAG UART SERIAL DRIVERS
|
||||
M: Tobias Klauser <tklauser@distanz.ch>
|
||||
L: linux-serial@vger.kernel.org
|
||||
L: nios2-dev@sopc.et.ntust.edu.tw (moderated for non-subscribers)
|
||||
L: nios2-dev@lists.rocketboards.org (moderated for non-subscribers)
|
||||
S: Maintained
|
||||
F: drivers/tty/serial/altera_uart.c
|
||||
F: drivers/tty/serial/altera_jtaguart.c
|
||||
|
@ -2611,9 +2611,9 @@ DC395x SCSI driver
|
|||
M: Oliver Neukum <oliver@neukum.org>
|
||||
M: Ali Akcaagac <aliakc@web.de>
|
||||
M: Jamie Lenehan <lenehan@twibble.org>
|
||||
W: http://twibble.org/dist/dc395x/
|
||||
L: dc395x@twibble.org
|
||||
L: http://lists.twibble.org/mailman/listinfo/dc395x/
|
||||
W: http://twibble.org/dist/dc395x/
|
||||
W: http://lists.twibble.org/mailman/listinfo/dc395x/
|
||||
S: Maintained
|
||||
F: Documentation/scsi/dc395x.txt
|
||||
F: drivers/scsi/dc395x.*
|
||||
|
@ -2848,12 +2848,22 @@ F: lib/kobj*
|
|||
DRM DRIVERS
|
||||
M: David Airlie <airlied@linux.ie>
|
||||
L: dri-devel@lists.freedesktop.org
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6.git
|
||||
T: git git://people.freedesktop.org/~airlied/linux
|
||||
S: Maintained
|
||||
F: drivers/gpu/drm/
|
||||
F: include/drm/
|
||||
F: include/uapi/drm/
|
||||
|
||||
RADEON DRM DRIVERS
|
||||
M: Alex Deucher <alexander.deucher@amd.com>
|
||||
M: Christian König <christian.koenig@amd.com>
|
||||
L: dri-devel@lists.freedesktop.org
|
||||
T: git git://people.freedesktop.org/~agd5f/linux
|
||||
S: Supported
|
||||
F: drivers/gpu/drm/radeon/
|
||||
F: include/drm/radeon*
|
||||
F: include/uapi/drm/radeon*
|
||||
|
||||
INTEL DRM DRIVERS (excluding Poulsbo, Moorestown and derivative chipsets)
|
||||
M: Daniel Vetter <daniel.vetter@ffwll.ch>
|
||||
M: Jani Nikula <jani.nikula@linux.intel.com>
|
||||
|
@ -5501,6 +5511,11 @@ W: http://www.kernel.org/doc/man-pages
|
|||
L: linux-man@vger.kernel.org
|
||||
S: Maintained
|
||||
|
||||
MARVELL ARMADA DRM SUPPORT
|
||||
M: Russell King <rmk+kernel@arm.linux.org.uk>
|
||||
S: Maintained
|
||||
F: drivers/gpu/drm/armada/
|
||||
|
||||
MARVELL GIGABIT ETHERNET DRIVERS (skge/sky2)
|
||||
M: Mirko Lindner <mlindner@marvell.com>
|
||||
M: Stephen Hemminger <stephen@networkplumber.org>
|
||||
|
@ -8443,8 +8458,8 @@ TARGET SUBSYSTEM
|
|||
M: Nicholas A. Bellinger <nab@linux-iscsi.org>
|
||||
L: linux-scsi@vger.kernel.org
|
||||
L: target-devel@vger.kernel.org
|
||||
L: http://groups.google.com/group/linux-iscsi-target-dev
|
||||
W: http://www.linux-iscsi.org
|
||||
W: http://groups.google.com/group/linux-iscsi-target-dev
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending.git master
|
||||
S: Supported
|
||||
F: drivers/target/
|
||||
|
|
10
Makefile
10
Makefile
|
@ -1,7 +1,7 @@
|
|||
VERSION = 3
|
||||
PATCHLEVEL = 14
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc4
|
||||
EXTRAVERSION = -rc5
|
||||
NAME = Shuffling Zombie Juror
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
@ -605,10 +605,11 @@ endif
|
|||
ifdef CONFIG_CC_STACKPROTECTOR_REGULAR
|
||||
stackp-flag := -fstack-protector
|
||||
ifeq ($(call cc-option, $(stackp-flag)),)
|
||||
$(warning Cannot use CONFIG_CC_STACKPROTECTOR: \
|
||||
-fstack-protector not supported by compiler))
|
||||
$(warning Cannot use CONFIG_CC_STACKPROTECTOR_REGULAR: \
|
||||
-fstack-protector not supported by compiler)
|
||||
endif
|
||||
else ifdef CONFIG_CC_STACKPROTECTOR_STRONG
|
||||
else
|
||||
ifdef CONFIG_CC_STACKPROTECTOR_STRONG
|
||||
stackp-flag := -fstack-protector-strong
|
||||
ifeq ($(call cc-option, $(stackp-flag)),)
|
||||
$(warning Cannot use CONFIG_CC_STACKPROTECTOR_STRONG: \
|
||||
|
@ -618,6 +619,7 @@ else
|
|||
# Force off for distro compilers that enable stack protector by default.
|
||||
stackp-flag := $(call cc-option, -fno-stack-protector)
|
||||
endif
|
||||
endif
|
||||
KBUILD_CFLAGS += $(stackp-flag)
|
||||
|
||||
# This warning generated too much noise in a regular build.
|
||||
|
|
|
@ -878,7 +878,8 @@ static int hyp_init_cpu_pm_notifier(struct notifier_block *self,
|
|||
unsigned long cmd,
|
||||
void *v)
|
||||
{
|
||||
if (cmd == CPU_PM_EXIT) {
|
||||
if (cmd == CPU_PM_EXIT &&
|
||||
__hyp_get_vectors() == hyp_default_vectors) {
|
||||
cpu_init_hyp_mode(NULL);
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
|
|
|
@ -220,6 +220,10 @@ after_vfp_restore:
|
|||
* in Hyp mode (see init_hyp_mode in arch/arm/kvm/arm.c). Return values are
|
||||
* passed in r0 and r1.
|
||||
*
|
||||
* A function pointer with a value of 0xffffffff has a special meaning,
|
||||
* and is used to implement __hyp_get_vectors in the same way as in
|
||||
* arch/arm/kernel/hyp_stub.S.
|
||||
*
|
||||
* The calling convention follows the standard AAPCS:
|
||||
* r0 - r3: caller save
|
||||
* r12: caller save
|
||||
|
@ -363,6 +367,11 @@ hyp_hvc:
|
|||
host_switch_to_hyp:
|
||||
pop {r0, r1, r2}
|
||||
|
||||
/* Check for __hyp_get_vectors */
|
||||
cmp r0, #-1
|
||||
mrceq p15, 4, r0, c12, c0, 0 @ get HVBAR
|
||||
beq 1f
|
||||
|
||||
push {lr}
|
||||
mrs lr, SPSR
|
||||
push {lr}
|
||||
|
@ -378,7 +387,7 @@ THUMB( orr lr, #1)
|
|||
pop {lr}
|
||||
msr SPSR_csxf, lr
|
||||
pop {lr}
|
||||
eret
|
||||
1: eret
|
||||
|
||||
guest_trap:
|
||||
load_vcpu @ Load VCPU pointer to r0
|
||||
|
|
|
@ -16,6 +16,8 @@
|
|||
#ifndef __ASM_PERCPU_H
|
||||
#define __ASM_PERCPU_H
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
|
||||
static inline void set_my_cpu_offset(unsigned long off)
|
||||
{
|
||||
asm volatile("msr tpidr_el1, %0" :: "r" (off) : "memory");
|
||||
|
@ -36,6 +38,12 @@ static inline unsigned long __my_cpu_offset(void)
|
|||
}
|
||||
#define __my_cpu_offset __my_cpu_offset()
|
||||
|
||||
#else /* !CONFIG_SMP */
|
||||
|
||||
#define set_my_cpu_offset(x) do { } while (0)
|
||||
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
#include <asm-generic/percpu.h>
|
||||
|
||||
#endif /* __ASM_PERCPU_H */
|
||||
|
|
|
@ -136,11 +136,11 @@ extern struct page *empty_zero_page;
|
|||
/*
|
||||
* The following only work if pte_present(). Undefined behaviour otherwise.
|
||||
*/
|
||||
#define pte_present(pte) (pte_val(pte) & (PTE_VALID | PTE_PROT_NONE))
|
||||
#define pte_dirty(pte) (pte_val(pte) & PTE_DIRTY)
|
||||
#define pte_young(pte) (pte_val(pte) & PTE_AF)
|
||||
#define pte_special(pte) (pte_val(pte) & PTE_SPECIAL)
|
||||
#define pte_write(pte) (pte_val(pte) & PTE_WRITE)
|
||||
#define pte_present(pte) (!!(pte_val(pte) & (PTE_VALID | PTE_PROT_NONE)))
|
||||
#define pte_dirty(pte) (!!(pte_val(pte) & PTE_DIRTY))
|
||||
#define pte_young(pte) (!!(pte_val(pte) & PTE_AF))
|
||||
#define pte_special(pte) (!!(pte_val(pte) & PTE_SPECIAL))
|
||||
#define pte_write(pte) (!!(pte_val(pte) & PTE_WRITE))
|
||||
#define pte_exec(pte) (!(pte_val(pte) & PTE_UXN))
|
||||
|
||||
#define pte_valid_user(pte) \
|
||||
|
|
|
@ -48,7 +48,11 @@ int unwind_frame(struct stackframe *frame)
|
|||
|
||||
frame->sp = fp + 0x10;
|
||||
frame->fp = *(unsigned long *)(fp);
|
||||
frame->pc = *(unsigned long *)(fp + 8);
|
||||
/*
|
||||
* -4 here because we care about the PC at time of bl,
|
||||
* not where the return will go.
|
||||
*/
|
||||
frame->pc = *(unsigned long *)(fp + 8) - 4;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -694,6 +694,24 @@ __hyp_panic_str:
|
|||
|
||||
.align 2
|
||||
|
||||
/*
|
||||
* u64 kvm_call_hyp(void *hypfn, ...);
|
||||
*
|
||||
* This is not really a variadic function in the classic C-way and care must
|
||||
* be taken when calling this to ensure parameters are passed in registers
|
||||
* only, since the stack will change between the caller and the callee.
|
||||
*
|
||||
* Call the function with the first argument containing a pointer to the
|
||||
* function you wish to call in Hyp mode, and subsequent arguments will be
|
||||
* passed as x0, x1, and x2 (a maximum of 3 arguments in addition to the
|
||||
* function pointer can be passed). The function being called must be mapped
|
||||
* in Hyp mode (see init_hyp_mode in arch/arm/kvm/arm.c). Return values are
|
||||
* passed in r0 and r1.
|
||||
*
|
||||
* A function pointer with a value of 0 has a special meaning, and is
|
||||
* used to implement __hyp_get_vectors in the same way as in
|
||||
* arch/arm64/kernel/hyp_stub.S.
|
||||
*/
|
||||
ENTRY(kvm_call_hyp)
|
||||
hvc #0
|
||||
ret
|
||||
|
@ -737,7 +755,12 @@ el1_sync: // Guest trapped into EL2
|
|||
pop x2, x3
|
||||
pop x0, x1
|
||||
|
||||
push lr, xzr
|
||||
/* Check for __hyp_get_vectors */
|
||||
cbnz x0, 1f
|
||||
mrs x0, vbar_el2
|
||||
b 2f
|
||||
|
||||
1: push lr, xzr
|
||||
|
||||
/*
|
||||
* Compute the function address in EL2, and shuffle the parameters.
|
||||
|
@ -750,7 +773,7 @@ el1_sync: // Guest trapped into EL2
|
|||
blr lr
|
||||
|
||||
pop lr, xzr
|
||||
eret
|
||||
2: eret
|
||||
|
||||
el1_trap:
|
||||
/*
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
|
||||
generic-y += barrier.h
|
||||
generic-y += bitsperlong.h
|
||||
generic-y += clkdev.h
|
||||
generic-y += cputime.h
|
||||
|
@ -6,6 +6,7 @@ generic-y += device.h
|
|||
generic-y += emergency-restart.h
|
||||
generic-y += errno.h
|
||||
generic-y += exec.h
|
||||
generic-y += hash.h
|
||||
generic-y += hw_irq.h
|
||||
generic-y += ioctl.h
|
||||
generic-y += ipcbuf.h
|
||||
|
@ -18,6 +19,7 @@ generic-y += local.h
|
|||
generic-y += mman.h
|
||||
generic-y += mutex.h
|
||||
generic-y += percpu.h
|
||||
generic-y += preempt.h
|
||||
generic-y += resource.h
|
||||
generic-y += scatterlist.h
|
||||
generic-y += sections.h
|
||||
|
@ -31,5 +33,3 @@ generic-y += trace_clock.h
|
|||
generic-y += types.h
|
||||
generic-y += word-at-a-time.h
|
||||
generic-y += xor.h
|
||||
generic-y += preempt.h
|
||||
generic-y += hash.h
|
||||
|
|
|
@ -1,8 +0,0 @@
|
|||
#ifndef _M68K_BARRIER_H
|
||||
#define _M68K_BARRIER_H
|
||||
|
||||
#define nop() do { asm volatile ("nop"); barrier(); } while (0)
|
||||
|
||||
#include <asm-generic/barrier.h>
|
||||
|
||||
#endif /* _M68K_BARRIER_H */
|
|
@ -4,7 +4,7 @@
|
|||
#include <uapi/asm/unistd.h>
|
||||
|
||||
|
||||
#define NR_syscalls 349
|
||||
#define NR_syscalls 351
|
||||
|
||||
#define __ARCH_WANT_OLD_READDIR
|
||||
#define __ARCH_WANT_OLD_STAT
|
||||
|
|
|
@ -354,5 +354,7 @@
|
|||
#define __NR_process_vm_writev 346
|
||||
#define __NR_kcmp 347
|
||||
#define __NR_finit_module 348
|
||||
#define __NR_sched_setattr 349
|
||||
#define __NR_sched_getattr 350
|
||||
|
||||
#endif /* _UAPI_ASM_M68K_UNISTD_H_ */
|
||||
|
|
|
@ -369,4 +369,6 @@ ENTRY(sys_call_table)
|
|||
.long sys_process_vm_writev
|
||||
.long sys_kcmp
|
||||
.long sys_finit_module
|
||||
.long sys_sched_setattr
|
||||
.long sys_sched_getattr /* 350 */
|
||||
|
||||
|
|
|
@ -200,10 +200,11 @@ static inline void __user *arch_compat_alloc_user_space(long len)
|
|||
|
||||
/*
|
||||
* We can't access below the stack pointer in the 32bit ABI and
|
||||
* can access 288 bytes in the 64bit ABI
|
||||
* can access 288 bytes in the 64bit big-endian ABI,
|
||||
* or 512 bytes with the new ELFv2 little-endian ABI.
|
||||
*/
|
||||
if (!is_32bit_task())
|
||||
usp -= 288;
|
||||
usp -= USER_REDZONE_SIZE;
|
||||
|
||||
return (void __user *) (usp - len);
|
||||
}
|
||||
|
|
|
@ -816,8 +816,8 @@ int64_t opal_pci_next_error(uint64_t phb_id, uint64_t *first_frozen_pe,
|
|||
int64_t opal_pci_poll(uint64_t phb_id);
|
||||
int64_t opal_return_cpu(void);
|
||||
|
||||
int64_t opal_xscom_read(uint32_t gcid, uint32_t pcb_addr, __be64 *val);
|
||||
int64_t opal_xscom_write(uint32_t gcid, uint32_t pcb_addr, uint64_t val);
|
||||
int64_t opal_xscom_read(uint32_t gcid, uint64_t pcb_addr, __be64 *val);
|
||||
int64_t opal_xscom_write(uint32_t gcid, uint64_t pcb_addr, uint64_t val);
|
||||
|
||||
int64_t opal_lpc_write(uint32_t chip_id, enum OpalLPCAddressType addr_type,
|
||||
uint32_t addr, uint32_t data, uint32_t sz);
|
||||
|
|
|
@ -28,11 +28,23 @@
|
|||
|
||||
#ifdef __powerpc64__
|
||||
|
||||
/*
|
||||
* Size of redzone that userspace is allowed to use below the stack
|
||||
* pointer. This is 288 in the 64-bit big-endian ELF ABI, and 512 in
|
||||
* the new ELFv2 little-endian ABI, so we allow the larger amount.
|
||||
*
|
||||
* For kernel code we allow a 288-byte redzone, in order to conserve
|
||||
* kernel stack space; gcc currently only uses 288 bytes, and will
|
||||
* hopefully allow explicit control of the redzone size in future.
|
||||
*/
|
||||
#define USER_REDZONE_SIZE 512
|
||||
#define KERNEL_REDZONE_SIZE 288
|
||||
|
||||
#define STACK_FRAME_OVERHEAD 112 /* size of minimum stack frame */
|
||||
#define STACK_FRAME_LR_SAVE 2 /* Location of LR in stack frame */
|
||||
#define STACK_FRAME_REGS_MARKER ASM_CONST(0x7265677368657265)
|
||||
#define STACK_INT_FRAME_SIZE (sizeof(struct pt_regs) + \
|
||||
STACK_FRAME_OVERHEAD + 288)
|
||||
STACK_FRAME_OVERHEAD + KERNEL_REDZONE_SIZE)
|
||||
#define STACK_FRAME_MARKER 12
|
||||
|
||||
/* Size of dummy stack frame allocated when calling signal handler. */
|
||||
|
@ -41,6 +53,8 @@
|
|||
|
||||
#else /* __powerpc64__ */
|
||||
|
||||
#define USER_REDZONE_SIZE 0
|
||||
#define KERNEL_REDZONE_SIZE 0
|
||||
#define STACK_FRAME_OVERHEAD 16 /* size of minimum stack frame */
|
||||
#define STACK_FRAME_LR_SAVE 1 /* Location of LR in stack frame */
|
||||
#define STACK_FRAME_REGS_MARKER ASM_CONST(0x72656773)
|
||||
|
|
|
@ -98,17 +98,19 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
|
|||
size_t csize, unsigned long offset, int userbuf)
|
||||
{
|
||||
void *vaddr;
|
||||
phys_addr_t paddr;
|
||||
|
||||
if (!csize)
|
||||
return 0;
|
||||
|
||||
csize = min_t(size_t, csize, PAGE_SIZE);
|
||||
paddr = pfn << PAGE_SHIFT;
|
||||
|
||||
if ((min_low_pfn < pfn) && (pfn < max_pfn)) {
|
||||
vaddr = __va(pfn << PAGE_SHIFT);
|
||||
if (memblock_is_region_memory(paddr, csize)) {
|
||||
vaddr = __va(paddr);
|
||||
csize = copy_oldmem_vaddr(vaddr, buf, csize, offset, userbuf);
|
||||
} else {
|
||||
vaddr = __ioremap(pfn << PAGE_SHIFT, PAGE_SIZE, 0);
|
||||
vaddr = __ioremap(paddr, PAGE_SIZE, 0);
|
||||
csize = copy_oldmem_vaddr(vaddr, buf, csize, offset, userbuf);
|
||||
iounmap(vaddr);
|
||||
}
|
||||
|
|
|
@ -74,6 +74,7 @@ ftrace_modify_code(unsigned long ip, unsigned int old, unsigned int new)
|
|||
*/
|
||||
static int test_24bit_addr(unsigned long ip, unsigned long addr)
|
||||
{
|
||||
addr = ppc_function_entry((void *)addr);
|
||||
|
||||
/* use the create_branch to verify that this offset can be branched */
|
||||
return create_branch((unsigned int *)ip, addr, 0);
|
||||
|
|
|
@ -65,8 +65,8 @@ struct rt_sigframe {
|
|||
struct siginfo __user *pinfo;
|
||||
void __user *puc;
|
||||
struct siginfo info;
|
||||
/* 64 bit ABI allows for 288 bytes below sp before decrementing it. */
|
||||
char abigap[288];
|
||||
/* New 64 bit little-endian ABI allows redzone of 512 bytes below sp */
|
||||
char abigap[USER_REDZONE_SIZE];
|
||||
} __attribute__ ((aligned (16)));
|
||||
|
||||
static const char fmt32[] = KERN_INFO \
|
||||
|
|
|
@ -114,6 +114,7 @@ DEFINE_SIMPLE_ATTRIBUTE(ioda_eeh_inbB_dbgfs_ops, ioda_eeh_inbB_dbgfs_get,
|
|||
ioda_eeh_inbB_dbgfs_set, "0x%llx\n");
|
||||
#endif /* CONFIG_DEBUG_FS */
|
||||
|
||||
|
||||
/**
|
||||
* ioda_eeh_post_init - Chip dependent post initialization
|
||||
* @hose: PCI controller
|
||||
|
@ -221,6 +222,22 @@ static int ioda_eeh_set_option(struct eeh_pe *pe, int option)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static void ioda_eeh_phb_diag(struct pci_controller *hose)
|
||||
{
|
||||
struct pnv_phb *phb = hose->private_data;
|
||||
long rc;
|
||||
|
||||
rc = opal_pci_get_phb_diag_data2(phb->opal_id, phb->diag.blob,
|
||||
PNV_PCI_DIAG_BUF_SIZE);
|
||||
if (rc != OPAL_SUCCESS) {
|
||||
pr_warning("%s: Failed to get diag-data for PHB#%x (%ld)\n",
|
||||
__func__, hose->global_number, rc);
|
||||
return;
|
||||
}
|
||||
|
||||
pnv_pci_dump_phb_diag_data(hose, phb->diag.blob);
|
||||
}
|
||||
|
||||
/**
|
||||
* ioda_eeh_get_state - Retrieve the state of PE
|
||||
* @pe: EEH PE
|
||||
|
@ -272,6 +289,9 @@ static int ioda_eeh_get_state(struct eeh_pe *pe)
|
|||
result |= EEH_STATE_DMA_ACTIVE;
|
||||
result |= EEH_STATE_MMIO_ENABLED;
|
||||
result |= EEH_STATE_DMA_ENABLED;
|
||||
} else if (!(pe->state & EEH_PE_ISOLATED)) {
|
||||
eeh_pe_state_mark(pe, EEH_PE_ISOLATED);
|
||||
ioda_eeh_phb_diag(hose);
|
||||
}
|
||||
|
||||
return result;
|
||||
|
@ -315,6 +335,15 @@ static int ioda_eeh_get_state(struct eeh_pe *pe)
|
|||
__func__, fstate, hose->global_number, pe_no);
|
||||
}
|
||||
|
||||
/* Dump PHB diag-data for frozen PE */
|
||||
if (result != EEH_STATE_NOT_SUPPORT &&
|
||||
(result & (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE)) !=
|
||||
(EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE) &&
|
||||
!(pe->state & EEH_PE_ISOLATED)) {
|
||||
eeh_pe_state_mark(pe, EEH_PE_ISOLATED);
|
||||
ioda_eeh_phb_diag(hose);
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
|
@ -529,42 +558,6 @@ static int ioda_eeh_reset(struct eeh_pe *pe, int option)
|
|||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* ioda_eeh_get_log - Retrieve error log
|
||||
* @pe: EEH PE
|
||||
* @severity: Severity level of the log
|
||||
* @drv_log: buffer to store the log
|
||||
* @len: space of the log buffer
|
||||
*
|
||||
* The function is used to retrieve error log from P7IOC.
|
||||
*/
|
||||
static int ioda_eeh_get_log(struct eeh_pe *pe, int severity,
|
||||
char *drv_log, unsigned long len)
|
||||
{
|
||||
s64 ret;
|
||||
unsigned long flags;
|
||||
struct pci_controller *hose = pe->phb;
|
||||
struct pnv_phb *phb = hose->private_data;
|
||||
|
||||
spin_lock_irqsave(&phb->lock, flags);
|
||||
|
||||
ret = opal_pci_get_phb_diag_data2(phb->opal_id,
|
||||
phb->diag.blob, PNV_PCI_DIAG_BUF_SIZE);
|
||||
if (ret) {
|
||||
spin_unlock_irqrestore(&phb->lock, flags);
|
||||
pr_warning("%s: Can't get log for PHB#%x-PE#%x (%lld)\n",
|
||||
__func__, hose->global_number, pe->addr, ret);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
/* The PHB diag-data is always indicative */
|
||||
pnv_pci_dump_phb_diag_data(hose, phb->diag.blob);
|
||||
|
||||
spin_unlock_irqrestore(&phb->lock, flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* ioda_eeh_configure_bridge - Configure the PCI bridges for the indicated PE
|
||||
* @pe: EEH PE
|
||||
|
@ -646,22 +639,6 @@ static void ioda_eeh_hub_diag(struct pci_controller *hose)
|
|||
}
|
||||
}
|
||||
|
||||
static void ioda_eeh_phb_diag(struct pci_controller *hose)
|
||||
{
|
||||
struct pnv_phb *phb = hose->private_data;
|
||||
long rc;
|
||||
|
||||
rc = opal_pci_get_phb_diag_data2(phb->opal_id, phb->diag.blob,
|
||||
PNV_PCI_DIAG_BUF_SIZE);
|
||||
if (rc != OPAL_SUCCESS) {
|
||||
pr_warning("%s: Failed to get diag-data for PHB#%x (%ld)\n",
|
||||
__func__, hose->global_number, rc);
|
||||
return;
|
||||
}
|
||||
|
||||
pnv_pci_dump_phb_diag_data(hose, phb->diag.blob);
|
||||
}
|
||||
|
||||
static int ioda_eeh_get_phb_pe(struct pci_controller *hose,
|
||||
struct eeh_pe **pe)
|
||||
{
|
||||
|
@ -834,6 +811,20 @@ static int ioda_eeh_next_error(struct eeh_pe **pe)
|
|||
__func__, err_type);
|
||||
}
|
||||
|
||||
/*
|
||||
* EEH core will try recover from fenced PHB or
|
||||
* frozen PE. In the time for frozen PE, EEH core
|
||||
* enable IO path for that before collecting logs,
|
||||
* but it ruins the site. So we have to dump the
|
||||
* log in advance here.
|
||||
*/
|
||||
if ((ret == EEH_NEXT_ERR_FROZEN_PE ||
|
||||
ret == EEH_NEXT_ERR_FENCED_PHB) &&
|
||||
!((*pe)->state & EEH_PE_ISOLATED)) {
|
||||
eeh_pe_state_mark(*pe, EEH_PE_ISOLATED);
|
||||
ioda_eeh_phb_diag(hose);
|
||||
}
|
||||
|
||||
/*
|
||||
* If we have no errors on the specific PHB or only
|
||||
* informative error there, we continue poking it.
|
||||
|
@ -852,7 +843,6 @@ struct pnv_eeh_ops ioda_eeh_ops = {
|
|||
.set_option = ioda_eeh_set_option,
|
||||
.get_state = ioda_eeh_get_state,
|
||||
.reset = ioda_eeh_reset,
|
||||
.get_log = ioda_eeh_get_log,
|
||||
.configure_bridge = ioda_eeh_configure_bridge,
|
||||
.next_error = ioda_eeh_next_error
|
||||
};
|
||||
|
|
|
@ -71,11 +71,11 @@ static int opal_xscom_err_xlate(int64_t rc)
|
|||
}
|
||||
}
|
||||
|
||||
static u64 opal_scom_unmangle(u64 reg)
|
||||
static u64 opal_scom_unmangle(u64 addr)
|
||||
{
|
||||
/*
|
||||
* XSCOM indirect addresses have the top bit set. Additionally
|
||||
* the reset of the top 3 nibbles is always 0.
|
||||
* the rest of the top 3 nibbles is always 0.
|
||||
*
|
||||
* Because the debugfs interface uses signed offsets and shifts
|
||||
* the address left by 3, we basically cannot use the top 4 bits
|
||||
|
@ -86,10 +86,13 @@ static u64 opal_scom_unmangle(u64 reg)
|
|||
* conversion here. To leave room for further xscom address
|
||||
* expansion, we only clear out the top byte
|
||||
*
|
||||
* For in-kernel use, we also support the real indirect bit, so
|
||||
* we test for any of the top 5 bits
|
||||
*
|
||||
*/
|
||||
if (reg & (1ull << 59))
|
||||
reg = (reg & ~(0xffull << 56)) | (1ull << 63);
|
||||
return reg;
|
||||
if (addr & (0x1full << 59))
|
||||
addr = (addr & ~(0xffull << 56)) | (1ull << 63);
|
||||
return addr;
|
||||
}
|
||||
|
||||
static int opal_scom_read(scom_map_t map, u64 reg, u64 *value)
|
||||
|
@ -98,8 +101,8 @@ static int opal_scom_read(scom_map_t map, u64 reg, u64 *value)
|
|||
int64_t rc;
|
||||
__be64 v;
|
||||
|
||||
reg = opal_scom_unmangle(reg);
|
||||
rc = opal_xscom_read(m->chip, m->addr + reg, (__be64 *)__pa(&v));
|
||||
reg = opal_scom_unmangle(m->addr + reg);
|
||||
rc = opal_xscom_read(m->chip, reg, (__be64 *)__pa(&v));
|
||||
*value = be64_to_cpu(v);
|
||||
return opal_xscom_err_xlate(rc);
|
||||
}
|
||||
|
@ -109,8 +112,8 @@ static int opal_scom_write(scom_map_t map, u64 reg, u64 value)
|
|||
struct opal_scom_map *m = map;
|
||||
int64_t rc;
|
||||
|
||||
reg = opal_scom_unmangle(reg);
|
||||
rc = opal_xscom_write(m->chip, m->addr + reg, value);
|
||||
reg = opal_scom_unmangle(m->addr + reg);
|
||||
rc = opal_xscom_write(m->chip, reg, value);
|
||||
return opal_xscom_err_xlate(rc);
|
||||
}
|
||||
|
||||
|
|
|
@ -134,57 +134,72 @@ static void pnv_pci_dump_p7ioc_diag_data(struct pci_controller *hose,
|
|||
pr_info("P7IOC PHB#%d Diag-data (Version: %d)\n\n",
|
||||
hose->global_number, common->version);
|
||||
|
||||
pr_info(" brdgCtl: %08x\n", data->brdgCtl);
|
||||
|
||||
pr_info(" portStatusReg: %08x\n", data->portStatusReg);
|
||||
pr_info(" rootCmplxStatus: %08x\n", data->rootCmplxStatus);
|
||||
pr_info(" busAgentStatus: %08x\n", data->busAgentStatus);
|
||||
|
||||
pr_info(" deviceStatus: %08x\n", data->deviceStatus);
|
||||
pr_info(" slotStatus: %08x\n", data->slotStatus);
|
||||
pr_info(" linkStatus: %08x\n", data->linkStatus);
|
||||
pr_info(" devCmdStatus: %08x\n", data->devCmdStatus);
|
||||
pr_info(" devSecStatus: %08x\n", data->devSecStatus);
|
||||
|
||||
pr_info(" rootErrorStatus: %08x\n", data->rootErrorStatus);
|
||||
pr_info(" uncorrErrorStatus: %08x\n", data->uncorrErrorStatus);
|
||||
pr_info(" corrErrorStatus: %08x\n", data->corrErrorStatus);
|
||||
pr_info(" tlpHdr1: %08x\n", data->tlpHdr1);
|
||||
pr_info(" tlpHdr2: %08x\n", data->tlpHdr2);
|
||||
pr_info(" tlpHdr3: %08x\n", data->tlpHdr3);
|
||||
pr_info(" tlpHdr4: %08x\n", data->tlpHdr4);
|
||||
pr_info(" sourceId: %08x\n", data->sourceId);
|
||||
pr_info(" errorClass: %016llx\n", data->errorClass);
|
||||
pr_info(" correlator: %016llx\n", data->correlator);
|
||||
pr_info(" p7iocPlssr: %016llx\n", data->p7iocPlssr);
|
||||
pr_info(" p7iocCsr: %016llx\n", data->p7iocCsr);
|
||||
pr_info(" lemFir: %016llx\n", data->lemFir);
|
||||
pr_info(" lemErrorMask: %016llx\n", data->lemErrorMask);
|
||||
pr_info(" lemWOF: %016llx\n", data->lemWOF);
|
||||
pr_info(" phbErrorStatus: %016llx\n", data->phbErrorStatus);
|
||||
pr_info(" phbFirstErrorStatus: %016llx\n", data->phbFirstErrorStatus);
|
||||
pr_info(" phbErrorLog0: %016llx\n", data->phbErrorLog0);
|
||||
pr_info(" phbErrorLog1: %016llx\n", data->phbErrorLog1);
|
||||
pr_info(" mmioErrorStatus: %016llx\n", data->mmioErrorStatus);
|
||||
pr_info(" mmioFirstErrorStatus: %016llx\n", data->mmioFirstErrorStatus);
|
||||
pr_info(" mmioErrorLog0: %016llx\n", data->mmioErrorLog0);
|
||||
pr_info(" mmioErrorLog1: %016llx\n", data->mmioErrorLog1);
|
||||
pr_info(" dma0ErrorStatus: %016llx\n", data->dma0ErrorStatus);
|
||||
pr_info(" dma0FirstErrorStatus: %016llx\n", data->dma0FirstErrorStatus);
|
||||
pr_info(" dma0ErrorLog0: %016llx\n", data->dma0ErrorLog0);
|
||||
pr_info(" dma0ErrorLog1: %016llx\n", data->dma0ErrorLog1);
|
||||
pr_info(" dma1ErrorStatus: %016llx\n", data->dma1ErrorStatus);
|
||||
pr_info(" dma1FirstErrorStatus: %016llx\n", data->dma1FirstErrorStatus);
|
||||
pr_info(" dma1ErrorLog0: %016llx\n", data->dma1ErrorLog0);
|
||||
pr_info(" dma1ErrorLog1: %016llx\n", data->dma1ErrorLog1);
|
||||
if (data->brdgCtl)
|
||||
pr_info(" brdgCtl: %08x\n",
|
||||
data->brdgCtl);
|
||||
if (data->portStatusReg || data->rootCmplxStatus ||
|
||||
data->busAgentStatus)
|
||||
pr_info(" UtlSts: %08x %08x %08x\n",
|
||||
data->portStatusReg, data->rootCmplxStatus,
|
||||
data->busAgentStatus);
|
||||
if (data->deviceStatus || data->slotStatus ||
|
||||
data->linkStatus || data->devCmdStatus ||
|
||||
data->devSecStatus)
|
||||
pr_info(" RootSts: %08x %08x %08x %08x %08x\n",
|
||||
data->deviceStatus, data->slotStatus,
|
||||
data->linkStatus, data->devCmdStatus,
|
||||
data->devSecStatus);
|
||||
if (data->rootErrorStatus || data->uncorrErrorStatus ||
|
||||
data->corrErrorStatus)
|
||||
pr_info(" RootErrSts: %08x %08x %08x\n",
|
||||
data->rootErrorStatus, data->uncorrErrorStatus,
|
||||
data->corrErrorStatus);
|
||||
if (data->tlpHdr1 || data->tlpHdr2 ||
|
||||
data->tlpHdr3 || data->tlpHdr4)
|
||||
pr_info(" RootErrLog: %08x %08x %08x %08x\n",
|
||||
data->tlpHdr1, data->tlpHdr2,
|
||||
data->tlpHdr3, data->tlpHdr4);
|
||||
if (data->sourceId || data->errorClass ||
|
||||
data->correlator)
|
||||
pr_info(" RootErrLog1: %08x %016llx %016llx\n",
|
||||
data->sourceId, data->errorClass,
|
||||
data->correlator);
|
||||
if (data->p7iocPlssr || data->p7iocCsr)
|
||||
pr_info(" PhbSts: %016llx %016llx\n",
|
||||
data->p7iocPlssr, data->p7iocCsr);
|
||||
if (data->lemFir || data->lemErrorMask ||
|
||||
data->lemWOF)
|
||||
pr_info(" Lem: %016llx %016llx %016llx\n",
|
||||
data->lemFir, data->lemErrorMask,
|
||||
data->lemWOF);
|
||||
if (data->phbErrorStatus || data->phbFirstErrorStatus ||
|
||||
data->phbErrorLog0 || data->phbErrorLog1)
|
||||
pr_info(" PhbErr: %016llx %016llx %016llx %016llx\n",
|
||||
data->phbErrorStatus, data->phbFirstErrorStatus,
|
||||
data->phbErrorLog0, data->phbErrorLog1);
|
||||
if (data->mmioErrorStatus || data->mmioFirstErrorStatus ||
|
||||
data->mmioErrorLog0 || data->mmioErrorLog1)
|
||||
pr_info(" OutErr: %016llx %016llx %016llx %016llx\n",
|
||||
data->mmioErrorStatus, data->mmioFirstErrorStatus,
|
||||
data->mmioErrorLog0, data->mmioErrorLog1);
|
||||
if (data->dma0ErrorStatus || data->dma0FirstErrorStatus ||
|
||||
data->dma0ErrorLog0 || data->dma0ErrorLog1)
|
||||
pr_info(" InAErr: %016llx %016llx %016llx %016llx\n",
|
||||
data->dma0ErrorStatus, data->dma0FirstErrorStatus,
|
||||
data->dma0ErrorLog0, data->dma0ErrorLog1);
|
||||
if (data->dma1ErrorStatus || data->dma1FirstErrorStatus ||
|
||||
data->dma1ErrorLog0 || data->dma1ErrorLog1)
|
||||
pr_info(" InBErr: %016llx %016llx %016llx %016llx\n",
|
||||
data->dma1ErrorStatus, data->dma1FirstErrorStatus,
|
||||
data->dma1ErrorLog0, data->dma1ErrorLog1);
|
||||
|
||||
for (i = 0; i < OPAL_P7IOC_NUM_PEST_REGS; i++) {
|
||||
if ((data->pestA[i] >> 63) == 0 &&
|
||||
(data->pestB[i] >> 63) == 0)
|
||||
continue;
|
||||
|
||||
pr_info(" PE[%3d] PESTA: %016llx\n", i, data->pestA[i]);
|
||||
pr_info(" PESTB: %016llx\n", data->pestB[i]);
|
||||
pr_info(" PE[%3d] A/B: %016llx %016llx\n",
|
||||
i, data->pestA[i], data->pestB[i]);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -197,62 +212,77 @@ static void pnv_pci_dump_phb3_diag_data(struct pci_controller *hose,
|
|||
data = (struct OpalIoPhb3ErrorData*)common;
|
||||
pr_info("PHB3 PHB#%d Diag-data (Version: %d)\n\n",
|
||||
hose->global_number, common->version);
|
||||
|
||||
pr_info(" brdgCtl: %08x\n", data->brdgCtl);
|
||||
|
||||
pr_info(" portStatusReg: %08x\n", data->portStatusReg);
|
||||
pr_info(" rootCmplxStatus: %08x\n", data->rootCmplxStatus);
|
||||
pr_info(" busAgentStatus: %08x\n", data->busAgentStatus);
|
||||
|
||||
pr_info(" deviceStatus: %08x\n", data->deviceStatus);
|
||||
pr_info(" slotStatus: %08x\n", data->slotStatus);
|
||||
pr_info(" linkStatus: %08x\n", data->linkStatus);
|
||||
pr_info(" devCmdStatus: %08x\n", data->devCmdStatus);
|
||||
pr_info(" devSecStatus: %08x\n", data->devSecStatus);
|
||||
|
||||
pr_info(" rootErrorStatus: %08x\n", data->rootErrorStatus);
|
||||
pr_info(" uncorrErrorStatus: %08x\n", data->uncorrErrorStatus);
|
||||
pr_info(" corrErrorStatus: %08x\n", data->corrErrorStatus);
|
||||
pr_info(" tlpHdr1: %08x\n", data->tlpHdr1);
|
||||
pr_info(" tlpHdr2: %08x\n", data->tlpHdr2);
|
||||
pr_info(" tlpHdr3: %08x\n", data->tlpHdr3);
|
||||
pr_info(" tlpHdr4: %08x\n", data->tlpHdr4);
|
||||
pr_info(" sourceId: %08x\n", data->sourceId);
|
||||
pr_info(" errorClass: %016llx\n", data->errorClass);
|
||||
pr_info(" correlator: %016llx\n", data->correlator);
|
||||
|
||||
pr_info(" nFir: %016llx\n", data->nFir);
|
||||
pr_info(" nFirMask: %016llx\n", data->nFirMask);
|
||||
pr_info(" nFirWOF: %016llx\n", data->nFirWOF);
|
||||
pr_info(" PhbPlssr: %016llx\n", data->phbPlssr);
|
||||
pr_info(" PhbCsr: %016llx\n", data->phbCsr);
|
||||
pr_info(" lemFir: %016llx\n", data->lemFir);
|
||||
pr_info(" lemErrorMask: %016llx\n", data->lemErrorMask);
|
||||
pr_info(" lemWOF: %016llx\n", data->lemWOF);
|
||||
pr_info(" phbErrorStatus: %016llx\n", data->phbErrorStatus);
|
||||
pr_info(" phbFirstErrorStatus: %016llx\n", data->phbFirstErrorStatus);
|
||||
pr_info(" phbErrorLog0: %016llx\n", data->phbErrorLog0);
|
||||
pr_info(" phbErrorLog1: %016llx\n", data->phbErrorLog1);
|
||||
pr_info(" mmioErrorStatus: %016llx\n", data->mmioErrorStatus);
|
||||
pr_info(" mmioFirstErrorStatus: %016llx\n", data->mmioFirstErrorStatus);
|
||||
pr_info(" mmioErrorLog0: %016llx\n", data->mmioErrorLog0);
|
||||
pr_info(" mmioErrorLog1: %016llx\n", data->mmioErrorLog1);
|
||||
pr_info(" dma0ErrorStatus: %016llx\n", data->dma0ErrorStatus);
|
||||
pr_info(" dma0FirstErrorStatus: %016llx\n", data->dma0FirstErrorStatus);
|
||||
pr_info(" dma0ErrorLog0: %016llx\n", data->dma0ErrorLog0);
|
||||
pr_info(" dma0ErrorLog1: %016llx\n", data->dma0ErrorLog1);
|
||||
pr_info(" dma1ErrorStatus: %016llx\n", data->dma1ErrorStatus);
|
||||
pr_info(" dma1FirstErrorStatus: %016llx\n", data->dma1FirstErrorStatus);
|
||||
pr_info(" dma1ErrorLog0: %016llx\n", data->dma1ErrorLog0);
|
||||
pr_info(" dma1ErrorLog1: %016llx\n", data->dma1ErrorLog1);
|
||||
if (data->brdgCtl)
|
||||
pr_info(" brdgCtl: %08x\n",
|
||||
data->brdgCtl);
|
||||
if (data->portStatusReg || data->rootCmplxStatus ||
|
||||
data->busAgentStatus)
|
||||
pr_info(" UtlSts: %08x %08x %08x\n",
|
||||
data->portStatusReg, data->rootCmplxStatus,
|
||||
data->busAgentStatus);
|
||||
if (data->deviceStatus || data->slotStatus ||
|
||||
data->linkStatus || data->devCmdStatus ||
|
||||
data->devSecStatus)
|
||||
pr_info(" RootSts: %08x %08x %08x %08x %08x\n",
|
||||
data->deviceStatus, data->slotStatus,
|
||||
data->linkStatus, data->devCmdStatus,
|
||||
data->devSecStatus);
|
||||
if (data->rootErrorStatus || data->uncorrErrorStatus ||
|
||||
data->corrErrorStatus)
|
||||
pr_info(" RootErrSts: %08x %08x %08x\n",
|
||||
data->rootErrorStatus, data->uncorrErrorStatus,
|
||||
data->corrErrorStatus);
|
||||
if (data->tlpHdr1 || data->tlpHdr2 ||
|
||||
data->tlpHdr3 || data->tlpHdr4)
|
||||
pr_info(" RootErrLog: %08x %08x %08x %08x\n",
|
||||
data->tlpHdr1, data->tlpHdr2,
|
||||
data->tlpHdr3, data->tlpHdr4);
|
||||
if (data->sourceId || data->errorClass ||
|
||||
data->correlator)
|
||||
pr_info(" RootErrLog1: %08x %016llx %016llx\n",
|
||||
data->sourceId, data->errorClass,
|
||||
data->correlator);
|
||||
if (data->nFir || data->nFirMask ||
|
||||
data->nFirWOF)
|
||||
pr_info(" nFir: %016llx %016llx %016llx\n",
|
||||
data->nFir, data->nFirMask,
|
||||
data->nFirWOF);
|
||||
if (data->phbPlssr || data->phbCsr)
|
||||
pr_info(" PhbSts: %016llx %016llx\n",
|
||||
data->phbPlssr, data->phbCsr);
|
||||
if (data->lemFir || data->lemErrorMask ||
|
||||
data->lemWOF)
|
||||
pr_info(" Lem: %016llx %016llx %016llx\n",
|
||||
data->lemFir, data->lemErrorMask,
|
||||
data->lemWOF);
|
||||
if (data->phbErrorStatus || data->phbFirstErrorStatus ||
|
||||
data->phbErrorLog0 || data->phbErrorLog1)
|
||||
pr_info(" PhbErr: %016llx %016llx %016llx %016llx\n",
|
||||
data->phbErrorStatus, data->phbFirstErrorStatus,
|
||||
data->phbErrorLog0, data->phbErrorLog1);
|
||||
if (data->mmioErrorStatus || data->mmioFirstErrorStatus ||
|
||||
data->mmioErrorLog0 || data->mmioErrorLog1)
|
||||
pr_info(" OutErr: %016llx %016llx %016llx %016llx\n",
|
||||
data->mmioErrorStatus, data->mmioFirstErrorStatus,
|
||||
data->mmioErrorLog0, data->mmioErrorLog1);
|
||||
if (data->dma0ErrorStatus || data->dma0FirstErrorStatus ||
|
||||
data->dma0ErrorLog0 || data->dma0ErrorLog1)
|
||||
pr_info(" InAErr: %016llx %016llx %016llx %016llx\n",
|
||||
data->dma0ErrorStatus, data->dma0FirstErrorStatus,
|
||||
data->dma0ErrorLog0, data->dma0ErrorLog1);
|
||||
if (data->dma1ErrorStatus || data->dma1FirstErrorStatus ||
|
||||
data->dma1ErrorLog0 || data->dma1ErrorLog1)
|
||||
pr_info(" InBErr: %016llx %016llx %016llx %016llx\n",
|
||||
data->dma1ErrorStatus, data->dma1FirstErrorStatus,
|
||||
data->dma1ErrorLog0, data->dma1ErrorLog1);
|
||||
|
||||
for (i = 0; i < OPAL_PHB3_NUM_PEST_REGS; i++) {
|
||||
if ((data->pestA[i] >> 63) == 0 &&
|
||||
(data->pestB[i] >> 63) == 0)
|
||||
continue;
|
||||
|
||||
pr_info(" PE[%3d] PESTA: %016llx\n", i, data->pestA[i]);
|
||||
pr_info(" PESTB: %016llx\n", data->pestB[i]);
|
||||
pr_info(" PE[%3d] A/B: %016llx %016llx\n",
|
||||
i, data->pestA[i], data->pestB[i]);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -35,12 +35,7 @@
|
|||
#include "offline_states.h"
|
||||
|
||||
/* This version can't take the spinlock, because it never returns */
|
||||
static struct rtas_args rtas_stop_self_args = {
|
||||
.token = RTAS_UNKNOWN_SERVICE,
|
||||
.nargs = 0,
|
||||
.nret = 1,
|
||||
.rets = &rtas_stop_self_args.args[0],
|
||||
};
|
||||
static int rtas_stop_self_token = RTAS_UNKNOWN_SERVICE;
|
||||
|
||||
static DEFINE_PER_CPU(enum cpu_state_vals, preferred_offline_state) =
|
||||
CPU_STATE_OFFLINE;
|
||||
|
@ -93,15 +88,20 @@ void set_default_offline_state(int cpu)
|
|||
|
||||
static void rtas_stop_self(void)
|
||||
{
|
||||
struct rtas_args *args = &rtas_stop_self_args;
|
||||
struct rtas_args args = {
|
||||
.token = cpu_to_be32(rtas_stop_self_token),
|
||||
.nargs = 0,
|
||||
.nret = 1,
|
||||
.rets = &args.args[0],
|
||||
};
|
||||
|
||||
local_irq_disable();
|
||||
|
||||
BUG_ON(args->token == RTAS_UNKNOWN_SERVICE);
|
||||
BUG_ON(rtas_stop_self_token == RTAS_UNKNOWN_SERVICE);
|
||||
|
||||
printk("cpu %u (hwid %u) Ready to die...\n",
|
||||
smp_processor_id(), hard_smp_processor_id());
|
||||
enter_rtas(__pa(args));
|
||||
enter_rtas(__pa(&args));
|
||||
|
||||
panic("Alas, I survived.\n");
|
||||
}
|
||||
|
@ -392,10 +392,10 @@ static int __init pseries_cpu_hotplug_init(void)
|
|||
}
|
||||
}
|
||||
|
||||
rtas_stop_self_args.token = rtas_token("stop-self");
|
||||
rtas_stop_self_token = rtas_token("stop-self");
|
||||
qcss_tok = rtas_token("query-cpu-stopped-state");
|
||||
|
||||
if (rtas_stop_self_args.token == RTAS_UNKNOWN_SERVICE ||
|
||||
if (rtas_stop_self_token == RTAS_UNKNOWN_SERVICE ||
|
||||
qcss_tok == RTAS_UNKNOWN_SERVICE) {
|
||||
printk(KERN_INFO "CPU Hotplug not supported by firmware "
|
||||
"- disabling.\n");
|
||||
|
|
|
@ -1421,5 +1421,5 @@ ENTRY(sys_sched_setattr_wrapper)
|
|||
ENTRY(sys_sched_getattr_wrapper)
|
||||
lgfr %r2,%r2 # pid_t
|
||||
llgtr %r3,%r3 # const char __user *
|
||||
llgfr %r3,%r3 # unsigned int
|
||||
llgfr %r4,%r4 # unsigned int
|
||||
jg sys_sched_getattr
|
||||
|
|
|
@ -206,11 +206,13 @@ static void dma_cleanup_tables(struct zpci_dev *zdev)
|
|||
zdev->dma_table = NULL;
|
||||
}
|
||||
|
||||
static unsigned long __dma_alloc_iommu(struct zpci_dev *zdev, unsigned long start,
|
||||
int size)
|
||||
static unsigned long __dma_alloc_iommu(struct zpci_dev *zdev,
|
||||
unsigned long start, int size)
|
||||
{
|
||||
unsigned long boundary_size = 0x1000000;
|
||||
unsigned long boundary_size;
|
||||
|
||||
boundary_size = ALIGN(dma_get_seg_boundary(&zdev->pdev->dev) + 1,
|
||||
PAGE_SIZE) >> PAGE_SHIFT;
|
||||
return iommu_area_alloc(zdev->iommu_bitmap, zdev->iommu_pages,
|
||||
start, size, 0, boundary_size, 0);
|
||||
}
|
||||
|
|
|
@ -111,7 +111,7 @@ struct mem_vector {
|
|||
};
|
||||
|
||||
#define MEM_AVOID_MAX 5
|
||||
struct mem_vector mem_avoid[MEM_AVOID_MAX];
|
||||
static struct mem_vector mem_avoid[MEM_AVOID_MAX];
|
||||
|
||||
static bool mem_contains(struct mem_vector *region, struct mem_vector *item)
|
||||
{
|
||||
|
@ -180,7 +180,7 @@ static void mem_avoid_init(unsigned long input, unsigned long input_size,
|
|||
}
|
||||
|
||||
/* Does this memory vector overlap a known avoided area? */
|
||||
bool mem_avoid_overlap(struct mem_vector *img)
|
||||
static bool mem_avoid_overlap(struct mem_vector *img)
|
||||
{
|
||||
int i;
|
||||
|
||||
|
@ -192,8 +192,9 @@ bool mem_avoid_overlap(struct mem_vector *img)
|
|||
return false;
|
||||
}
|
||||
|
||||
unsigned long slots[CONFIG_RANDOMIZE_BASE_MAX_OFFSET / CONFIG_PHYSICAL_ALIGN];
|
||||
unsigned long slot_max = 0;
|
||||
static unsigned long slots[CONFIG_RANDOMIZE_BASE_MAX_OFFSET /
|
||||
CONFIG_PHYSICAL_ALIGN];
|
||||
static unsigned long slot_max;
|
||||
|
||||
static void slots_append(unsigned long addr)
|
||||
{
|
||||
|
|
|
@ -1192,6 +1192,9 @@ static void x86_pmu_del(struct perf_event *event, int flags)
|
|||
for (i = 0; i < cpuc->n_events; i++) {
|
||||
if (event == cpuc->event_list[i]) {
|
||||
|
||||
if (i >= cpuc->n_events - cpuc->n_added)
|
||||
--cpuc->n_added;
|
||||
|
||||
if (x86_pmu.put_event_constraints)
|
||||
x86_pmu.put_event_constraints(cpuc, event);
|
||||
|
||||
|
|
|
@ -279,5 +279,7 @@ void arch_crash_save_vmcoreinfo(void)
|
|||
VMCOREINFO_SYMBOL(node_data);
|
||||
VMCOREINFO_LENGTH(node_data, MAX_NUMNODES);
|
||||
#endif
|
||||
vmcoreinfo_append_str("KERNELOFFSET=%lx\n",
|
||||
(unsigned long)&_text - __START_KERNEL);
|
||||
}
|
||||
|
||||
|
|
|
@ -2672,6 +2672,7 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
|
|||
break;
|
||||
}
|
||||
|
||||
drop_large_spte(vcpu, iterator.sptep);
|
||||
if (!is_shadow_present_pte(*iterator.sptep)) {
|
||||
u64 base_addr = iterator.addr;
|
||||
|
||||
|
|
|
@ -6688,7 +6688,7 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu)
|
|||
else if (is_page_fault(intr_info))
|
||||
return enable_ept;
|
||||
else if (is_no_device(intr_info) &&
|
||||
!(nested_read_cr0(vmcs12) & X86_CR0_TS))
|
||||
!(vmcs12->guest_cr0 & X86_CR0_TS))
|
||||
return 0;
|
||||
return vmcs12->exception_bitmap &
|
||||
(1u << (intr_info & INTR_INFO_VECTOR_MASK));
|
||||
|
|
|
@ -6186,7 +6186,7 @@ static int complete_emulated_mmio(struct kvm_vcpu *vcpu)
|
|||
frag->len -= len;
|
||||
}
|
||||
|
||||
if (vcpu->mmio_cur_fragment == vcpu->mmio_nr_fragments) {
|
||||
if (vcpu->mmio_cur_fragment >= vcpu->mmio_nr_fragments) {
|
||||
vcpu->mmio_needed = 0;
|
||||
|
||||
/* FIXME: return into emulator if single-stepping. */
|
||||
|
|
|
@ -20,6 +20,7 @@ config XTENSA
|
|||
select HAVE_FUNCTION_TRACER
|
||||
select HAVE_IRQ_TIME_ACCOUNTING
|
||||
select HAVE_PERF_EVENTS
|
||||
select COMMON_CLK
|
||||
help
|
||||
Xtensa processors are 32-bit RISC machines designed by Tensilica
|
||||
primarily for embedded systems. These processors are both
|
||||
|
@ -80,7 +81,6 @@ choice
|
|||
config XTENSA_VARIANT_FSF
|
||||
bool "fsf - default (not generic) configuration"
|
||||
select MMU
|
||||
select HAVE_XTENSA_GPIO32
|
||||
|
||||
config XTENSA_VARIANT_DC232B
|
||||
bool "dc232b - Diamond 232L Standard Core Rev.B (LE)"
|
||||
|
@ -135,7 +135,6 @@ config HAVE_SMP
|
|||
config SMP
|
||||
bool "Enable Symmetric multi-processing support"
|
||||
depends on HAVE_SMP
|
||||
select USE_GENERIC_SMP_HELPERS
|
||||
select GENERIC_SMP_IDLE_THREAD
|
||||
help
|
||||
Enabled SMP Software; allows more than one CPU/CORE
|
||||
|
|
|
@ -35,6 +35,13 @@ pic: pic {
|
|||
interrupt-controller;
|
||||
};
|
||||
|
||||
clocks {
|
||||
osc: main-oscillator {
|
||||
#clock-cells = <0>;
|
||||
compatible = "fixed-clock";
|
||||
};
|
||||
};
|
||||
|
||||
serial0: serial@fd050020 {
|
||||
device_type = "serial";
|
||||
compatible = "ns16550a";
|
||||
|
@ -42,9 +49,7 @@ serial0: serial@fd050020 {
|
|||
reg = <0xfd050020 0x20>;
|
||||
reg-shift = <2>;
|
||||
interrupts = <0 1>; /* external irq 0 */
|
||||
/* Filled in by platform_setup from FPGA register
|
||||
* clock-frequency = <100000000>;
|
||||
*/
|
||||
clocks = <&osc>;
|
||||
};
|
||||
|
||||
enet0: ethoc@fd030000 {
|
||||
|
@ -52,5 +57,6 @@ enet0: ethoc@fd030000 {
|
|||
reg = <0xfd030000 0x4000 0xfd800000 0x4000>;
|
||||
interrupts = <1 1>; /* external irq 1 */
|
||||
local-mac-address = [00 50 c2 13 6f 00];
|
||||
clocks = <&osc>;
|
||||
};
|
||||
};
|
||||
|
|
|
@ -25,7 +25,7 @@
|
|||
|
||||
#ifdef CONFIG_MMU
|
||||
|
||||
#if XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY && CONFIG_OF
|
||||
#if XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY && defined(CONFIG_OF)
|
||||
extern unsigned long xtensa_kio_paddr;
|
||||
|
||||
static inline unsigned long xtensa_get_kio_paddr(void)
|
||||
|
|
|
@ -23,25 +23,37 @@ void secondary_trap_init(void);
|
|||
|
||||
static inline void spill_registers(void)
|
||||
{
|
||||
|
||||
#if XCHAL_NUM_AREGS > 16
|
||||
__asm__ __volatile__ (
|
||||
"movi a14, "__stringify((1 << PS_EXCM_BIT) | LOCKLEVEL)"\n\t"
|
||||
"mov a12, a0\n\t"
|
||||
"rsr a13, sar\n\t"
|
||||
"xsr a14, ps\n\t"
|
||||
"movi a0, _spill_registers\n\t"
|
||||
"rsync\n\t"
|
||||
"callx0 a0\n\t"
|
||||
"mov a0, a12\n\t"
|
||||
"wsr a13, sar\n\t"
|
||||
"wsr a14, ps\n\t"
|
||||
: :
|
||||
#if defined(CONFIG_FRAME_POINTER)
|
||||
: "a2", "a3", "a4", "a11", "a12", "a13", "a14", "a15",
|
||||
#else
|
||||
: "a2", "a3", "a4", "a7", "a11", "a12", "a13", "a14", "a15",
|
||||
" call12 1f\n"
|
||||
" _j 2f\n"
|
||||
" retw\n"
|
||||
" .align 4\n"
|
||||
"1:\n"
|
||||
" _entry a1, 48\n"
|
||||
" addi a12, a0, 3\n"
|
||||
#if XCHAL_NUM_AREGS > 32
|
||||
" .rept (" __stringify(XCHAL_NUM_AREGS) " - 32) / 12\n"
|
||||
" _entry a1, 48\n"
|
||||
" mov a12, a0\n"
|
||||
" .endr\n"
|
||||
#endif
|
||||
" _entry a1, 48\n"
|
||||
#if XCHAL_NUM_AREGS % 12 == 0
|
||||
" mov a8, a8\n"
|
||||
#elif XCHAL_NUM_AREGS % 12 == 4
|
||||
" mov a12, a12\n"
|
||||
#elif XCHAL_NUM_AREGS % 12 == 8
|
||||
" mov a4, a4\n"
|
||||
#endif
|
||||
" retw\n"
|
||||
"2:\n"
|
||||
: : : "a12", "a13", "memory");
|
||||
#else
|
||||
__asm__ __volatile__ (
|
||||
" mov a12, a12\n"
|
||||
: : : "memory");
|
||||
#endif
|
||||
"memory");
|
||||
}
|
||||
|
||||
#endif /* _XTENSA_TRAPS_H */
|
||||
|
|
|
@ -25,7 +25,7 @@
|
|||
#define XCHAL_KIO_DEFAULT_PADDR 0xf0000000
|
||||
#define XCHAL_KIO_SIZE 0x10000000
|
||||
|
||||
#if XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY && CONFIG_OF
|
||||
#if XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY && defined(CONFIG_OF)
|
||||
#define XCHAL_KIO_PADDR xtensa_get_kio_paddr()
|
||||
#else
|
||||
#define XCHAL_KIO_PADDR XCHAL_KIO_DEFAULT_PADDR
|
||||
|
|
|
@ -734,7 +734,12 @@ __SYSCALL(332, sys_finit_module, 3)
|
|||
#define __NR_accept4 333
|
||||
__SYSCALL(333, sys_accept4, 4)
|
||||
|
||||
#define __NR_syscall_count 334
|
||||
#define __NR_sched_setattr 334
|
||||
__SYSCALL(334, sys_sched_setattr, 2)
|
||||
#define __NR_sched_getattr 335
|
||||
__SYSCALL(335, sys_sched_getattr, 3)
|
||||
|
||||
#define __NR_syscall_count 336
|
||||
|
||||
/*
|
||||
* sysxtensa syscall handler
|
||||
|
|
|
@ -1081,34 +1081,202 @@ ENTRY(fast_syscall_spill_registers)
|
|||
|
||||
rsr a0, sar
|
||||
s32i a3, a2, PT_AREG3
|
||||
s32i a0, a2, PT_SAR
|
||||
|
||||
/* The spill routine might clobber a4, a7, a8, a11, a12, and a15. */
|
||||
|
||||
s32i a4, a2, PT_AREG4
|
||||
s32i a0, a2, PT_AREG5 # store SAR to PT_AREG5
|
||||
|
||||
/* The spill routine might clobber a7, a11, and a15. */
|
||||
|
||||
s32i a7, a2, PT_AREG7
|
||||
s32i a8, a2, PT_AREG8
|
||||
s32i a11, a2, PT_AREG11
|
||||
s32i a12, a2, PT_AREG12
|
||||
s32i a15, a2, PT_AREG15
|
||||
|
||||
call0 _spill_registers # destroys a3, a4, and SAR
|
||||
/*
|
||||
* Rotate ws so that the current windowbase is at bit 0.
|
||||
* Assume ws = xxxwww1yy (www1 current window frame).
|
||||
* Rotate ws right so that a4 = yyxxxwww1.
|
||||
*/
|
||||
|
||||
rsr a0, windowbase
|
||||
rsr a3, windowstart # a3 = xxxwww1yy
|
||||
ssr a0 # holds WB
|
||||
slli a0, a3, WSBITS
|
||||
or a3, a3, a0 # a3 = xxxwww1yyxxxwww1yy
|
||||
srl a3, a3 # a3 = 00xxxwww1yyxxxwww1
|
||||
|
||||
/* We are done if there are no more than the current register frame. */
|
||||
|
||||
extui a3, a3, 1, WSBITS-1 # a3 = 0yyxxxwww
|
||||
movi a0, (1 << (WSBITS-1))
|
||||
_beqz a3, .Lnospill # only one active frame? jump
|
||||
|
||||
/* We want 1 at the top, so that we return to the current windowbase */
|
||||
|
||||
or a3, a3, a0 # 1yyxxxwww
|
||||
|
||||
/* Skip empty frames - get 'oldest' WINDOWSTART-bit. */
|
||||
|
||||
wsr a3, windowstart # save shifted windowstart
|
||||
neg a0, a3
|
||||
and a3, a0, a3 # first bit set from right: 000010000
|
||||
|
||||
ffs_ws a0, a3 # a0: shifts to skip empty frames
|
||||
movi a3, WSBITS
|
||||
sub a0, a3, a0 # WSBITS-a0:number of 0-bits from right
|
||||
ssr a0 # save in SAR for later.
|
||||
|
||||
rsr a3, windowbase
|
||||
add a3, a3, a0
|
||||
wsr a3, windowbase
|
||||
rsync
|
||||
|
||||
rsr a3, windowstart
|
||||
srl a3, a3 # shift windowstart
|
||||
|
||||
/* WB is now just one frame below the oldest frame in the register
|
||||
window. WS is shifted so the oldest frame is in bit 0, thus, WB
|
||||
and WS differ by one 4-register frame. */
|
||||
|
||||
/* Save frames. Depending what call was used (call4, call8, call12),
|
||||
* we have to save 4,8. or 12 registers.
|
||||
*/
|
||||
|
||||
|
||||
.Lloop: _bbsi.l a3, 1, .Lc4
|
||||
_bbci.l a3, 2, .Lc12
|
||||
|
||||
.Lc8: s32e a4, a13, -16
|
||||
l32e a4, a5, -12
|
||||
s32e a8, a4, -32
|
||||
s32e a5, a13, -12
|
||||
s32e a6, a13, -8
|
||||
s32e a7, a13, -4
|
||||
s32e a9, a4, -28
|
||||
s32e a10, a4, -24
|
||||
s32e a11, a4, -20
|
||||
srli a11, a3, 2 # shift windowbase by 2
|
||||
rotw 2
|
||||
_bnei a3, 1, .Lloop
|
||||
j .Lexit
|
||||
|
||||
.Lc4: s32e a4, a9, -16
|
||||
s32e a5, a9, -12
|
||||
s32e a6, a9, -8
|
||||
s32e a7, a9, -4
|
||||
|
||||
srli a7, a3, 1
|
||||
rotw 1
|
||||
_bnei a3, 1, .Lloop
|
||||
j .Lexit
|
||||
|
||||
.Lc12: _bbci.l a3, 3, .Linvalid_mask # bit 2 shouldn't be zero!
|
||||
|
||||
/* 12-register frame (call12) */
|
||||
|
||||
l32e a0, a5, -12
|
||||
s32e a8, a0, -48
|
||||
mov a8, a0
|
||||
|
||||
s32e a9, a8, -44
|
||||
s32e a10, a8, -40
|
||||
s32e a11, a8, -36
|
||||
s32e a12, a8, -32
|
||||
s32e a13, a8, -28
|
||||
s32e a14, a8, -24
|
||||
s32e a15, a8, -20
|
||||
srli a15, a3, 3
|
||||
|
||||
/* The stack pointer for a4..a7 is out of reach, so we rotate the
|
||||
* window, grab the stackpointer, and rotate back.
|
||||
* Alternatively, we could also use the following approach, but that
|
||||
* makes the fixup routine much more complicated:
|
||||
* rotw 1
|
||||
* s32e a0, a13, -16
|
||||
* ...
|
||||
* rotw 2
|
||||
*/
|
||||
|
||||
rotw 1
|
||||
mov a4, a13
|
||||
rotw -1
|
||||
|
||||
s32e a4, a8, -16
|
||||
s32e a5, a8, -12
|
||||
s32e a6, a8, -8
|
||||
s32e a7, a8, -4
|
||||
|
||||
rotw 3
|
||||
|
||||
_beqi a3, 1, .Lexit
|
||||
j .Lloop
|
||||
|
||||
.Lexit:
|
||||
|
||||
/* Done. Do the final rotation and set WS */
|
||||
|
||||
rotw 1
|
||||
rsr a3, windowbase
|
||||
ssl a3
|
||||
movi a3, 1
|
||||
sll a3, a3
|
||||
wsr a3, windowstart
|
||||
.Lnospill:
|
||||
|
||||
/* Advance PC, restore registers and SAR, and return from exception. */
|
||||
|
||||
l32i a3, a2, PT_AREG5
|
||||
l32i a4, a2, PT_AREG4
|
||||
l32i a3, a2, PT_SAR
|
||||
l32i a0, a2, PT_AREG0
|
||||
wsr a3, sar
|
||||
l32i a3, a2, PT_AREG3
|
||||
|
||||
/* Restore clobbered registers. */
|
||||
|
||||
l32i a4, a2, PT_AREG4
|
||||
l32i a7, a2, PT_AREG7
|
||||
l32i a8, a2, PT_AREG8
|
||||
l32i a11, a2, PT_AREG11
|
||||
l32i a12, a2, PT_AREG12
|
||||
l32i a15, a2, PT_AREG15
|
||||
|
||||
movi a2, 0
|
||||
rfe
|
||||
|
||||
.Linvalid_mask:
|
||||
|
||||
/* We get here because of an unrecoverable error in the window
|
||||
* registers, so set up a dummy frame and kill the user application.
|
||||
* Note: We assume EXC_TABLE_KSTK contains a valid stack pointer.
|
||||
*/
|
||||
|
||||
movi a0, 1
|
||||
movi a1, 0
|
||||
|
||||
wsr a0, windowstart
|
||||
wsr a1, windowbase
|
||||
rsync
|
||||
|
||||
movi a0, 0
|
||||
|
||||
rsr a3, excsave1
|
||||
l32i a1, a3, EXC_TABLE_KSTK
|
||||
|
||||
movi a4, (1 << PS_WOE_BIT) | LOCKLEVEL
|
||||
wsr a4, ps
|
||||
rsync
|
||||
|
||||
movi a6, SIGSEGV
|
||||
movi a4, do_exit
|
||||
callx4 a4
|
||||
|
||||
/* shouldn't return, so panic */
|
||||
|
||||
wsr a0, excsave1
|
||||
movi a0, unrecoverable_exception
|
||||
callx0 a0 # should not return
|
||||
1: j 1b
|
||||
|
||||
|
||||
ENDPROC(fast_syscall_spill_registers)
|
||||
|
||||
/* Fixup handler.
|
||||
|
@ -1117,6 +1285,13 @@ ENDPROC(fast_syscall_spill_registers)
|
|||
* We basically restore WINDOWBASE and WINDOWSTART to the condition when
|
||||
* we entered the spill routine and jump to the user exception handler.
|
||||
*
|
||||
* Note that we only need to restore the bits in windowstart that have not
|
||||
* been spilled yet by the _spill_register routine. Luckily, a3 contains a
|
||||
* rotated windowstart with only those bits set for frames that haven't been
|
||||
* spilled yet. Because a3 is rotated such that bit 0 represents the register
|
||||
* frame for the current windowbase - 1, we need to rotate a3 left by the
|
||||
* value of the current windowbase + 1 and move it to windowstart.
|
||||
*
|
||||
* a0: value of depc, original value in depc
|
||||
* a2: trashed, original value in EXC_TABLE_DOUBLE_SAVE
|
||||
* a3: exctable, original value in excsave1
|
||||
|
@ -1131,10 +1306,15 @@ ENTRY(fast_syscall_spill_registers_fixup)
|
|||
/* We need to make sure the current registers (a0-a3) are preserved.
|
||||
* To do this, we simply set the bit for the current window frame
|
||||
* in WS, so that the exception handlers save them to the task stack.
|
||||
*
|
||||
* Note: we use a3 to set the windowbase, so we take a special care
|
||||
* of it, saving it in the original _spill_registers frame across
|
||||
* the exception handler call.
|
||||
*/
|
||||
|
||||
xsr a3, excsave1 # get spill-mask
|
||||
slli a3, a3, 1 # shift left by one
|
||||
addi a3, a3, 1 # set the bit for the current window frame
|
||||
|
||||
slli a2, a3, 32-WSBITS
|
||||
src a2, a3, a2 # a2 = xxwww1yyxxxwww1yy......
|
||||
|
@ -1220,209 +1400,6 @@ ENTRY(fast_syscall_spill_registers_fixup_return)
|
|||
|
||||
ENDPROC(fast_syscall_spill_registers_fixup_return)
|
||||
|
||||
/*
|
||||
* spill all registers.
|
||||
*
|
||||
* This is not a real function. The following conditions must be met:
|
||||
*
|
||||
* - must be called with call0.
|
||||
* - uses a3, a4 and SAR.
|
||||
* - the last 'valid' register of each frame are clobbered.
|
||||
* - the caller must have registered a fixup handler
|
||||
* (or be inside a critical section)
|
||||
* - PS_EXCM must be set (PS_WOE cleared?)
|
||||
*/
|
||||
|
||||
ENTRY(_spill_registers)
|
||||
|
||||
/*
|
||||
* Rotate ws so that the current windowbase is at bit 0.
|
||||
* Assume ws = xxxwww1yy (www1 current window frame).
|
||||
* Rotate ws right so that a4 = yyxxxwww1.
|
||||
*/
|
||||
|
||||
rsr a4, windowbase
|
||||
rsr a3, windowstart # a3 = xxxwww1yy
|
||||
ssr a4 # holds WB
|
||||
slli a4, a3, WSBITS
|
||||
or a3, a3, a4 # a3 = xxxwww1yyxxxwww1yy
|
||||
srl a3, a3 # a3 = 00xxxwww1yyxxxwww1
|
||||
|
||||
/* We are done if there are no more than the current register frame. */
|
||||
|
||||
extui a3, a3, 1, WSBITS-1 # a3 = 0yyxxxwww
|
||||
movi a4, (1 << (WSBITS-1))
|
||||
_beqz a3, .Lnospill # only one active frame? jump
|
||||
|
||||
/* We want 1 at the top, so that we return to the current windowbase */
|
||||
|
||||
or a3, a3, a4 # 1yyxxxwww
|
||||
|
||||
/* Skip empty frames - get 'oldest' WINDOWSTART-bit. */
|
||||
|
||||
wsr a3, windowstart # save shifted windowstart
|
||||
neg a4, a3
|
||||
and a3, a4, a3 # first bit set from right: 000010000
|
||||
|
||||
ffs_ws a4, a3 # a4: shifts to skip empty frames
|
||||
movi a3, WSBITS
|
||||
sub a4, a3, a4 # WSBITS-a4:number of 0-bits from right
|
||||
ssr a4 # save in SAR for later.
|
||||
|
||||
rsr a3, windowbase
|
||||
add a3, a3, a4
|
||||
wsr a3, windowbase
|
||||
rsync
|
||||
|
||||
rsr a3, windowstart
|
||||
srl a3, a3 # shift windowstart
|
||||
|
||||
/* WB is now just one frame below the oldest frame in the register
|
||||
window. WS is shifted so the oldest frame is in bit 0, thus, WB
|
||||
and WS differ by one 4-register frame. */
|
||||
|
||||
/* Save frames. Depending what call was used (call4, call8, call12),
|
||||
* we have to save 4,8. or 12 registers.
|
||||
*/
|
||||
|
||||
_bbsi.l a3, 1, .Lc4
|
||||
_bbsi.l a3, 2, .Lc8
|
||||
|
||||
/* Special case: we have a call12-frame starting at a4. */
|
||||
|
||||
_bbci.l a3, 3, .Lc12 # bit 3 shouldn't be zero! (Jump to Lc12 first)
|
||||
|
||||
s32e a4, a1, -16 # a1 is valid with an empty spill area
|
||||
l32e a4, a5, -12
|
||||
s32e a8, a4, -48
|
||||
mov a8, a4
|
||||
l32e a4, a1, -16
|
||||
j .Lc12c
|
||||
|
||||
.Lnospill:
|
||||
ret
|
||||
|
||||
.Lloop: _bbsi.l a3, 1, .Lc4
|
||||
_bbci.l a3, 2, .Lc12
|
||||
|
||||
.Lc8: s32e a4, a13, -16
|
||||
l32e a4, a5, -12
|
||||
s32e a8, a4, -32
|
||||
s32e a5, a13, -12
|
||||
s32e a6, a13, -8
|
||||
s32e a7, a13, -4
|
||||
s32e a9, a4, -28
|
||||
s32e a10, a4, -24
|
||||
s32e a11, a4, -20
|
||||
|
||||
srli a11, a3, 2 # shift windowbase by 2
|
||||
rotw 2
|
||||
_bnei a3, 1, .Lloop
|
||||
|
||||
.Lexit: /* Done. Do the final rotation, set WS, and return. */
|
||||
|
||||
rotw 1
|
||||
rsr a3, windowbase
|
||||
ssl a3
|
||||
movi a3, 1
|
||||
sll a3, a3
|
||||
wsr a3, windowstart
|
||||
ret
|
||||
|
||||
.Lc4: s32e a4, a9, -16
|
||||
s32e a5, a9, -12
|
||||
s32e a6, a9, -8
|
||||
s32e a7, a9, -4
|
||||
|
||||
srli a7, a3, 1
|
||||
rotw 1
|
||||
_bnei a3, 1, .Lloop
|
||||
j .Lexit
|
||||
|
||||
.Lc12: _bbci.l a3, 3, .Linvalid_mask # bit 2 shouldn't be zero!
|
||||
|
||||
/* 12-register frame (call12) */
|
||||
|
||||
l32e a2, a5, -12
|
||||
s32e a8, a2, -48
|
||||
mov a8, a2
|
||||
|
||||
.Lc12c: s32e a9, a8, -44
|
||||
s32e a10, a8, -40
|
||||
s32e a11, a8, -36
|
||||
s32e a12, a8, -32
|
||||
s32e a13, a8, -28
|
||||
s32e a14, a8, -24
|
||||
s32e a15, a8, -20
|
||||
srli a15, a3, 3
|
||||
|
||||
/* The stack pointer for a4..a7 is out of reach, so we rotate the
|
||||
* window, grab the stackpointer, and rotate back.
|
||||
* Alternatively, we could also use the following approach, but that
|
||||
* makes the fixup routine much more complicated:
|
||||
* rotw 1
|
||||
* s32e a0, a13, -16
|
||||
* ...
|
||||
* rotw 2
|
||||
*/
|
||||
|
||||
rotw 1
|
||||
mov a5, a13
|
||||
rotw -1
|
||||
|
||||
s32e a4, a9, -16
|
||||
s32e a5, a9, -12
|
||||
s32e a6, a9, -8
|
||||
s32e a7, a9, -4
|
||||
|
||||
rotw 3
|
||||
|
||||
_beqi a3, 1, .Lexit
|
||||
j .Lloop
|
||||
|
||||
.Linvalid_mask:
|
||||
|
||||
/* We get here because of an unrecoverable error in the window
|
||||
* registers. If we are in user space, we kill the application,
|
||||
* however, this condition is unrecoverable in kernel space.
|
||||
*/
|
||||
|
||||
rsr a0, ps
|
||||
_bbci.l a0, PS_UM_BIT, 1f
|
||||
|
||||
/* User space: Setup a dummy frame and kill application.
|
||||
* Note: We assume EXC_TABLE_KSTK contains a valid stack pointer.
|
||||
*/
|
||||
|
||||
movi a0, 1
|
||||
movi a1, 0
|
||||
|
||||
wsr a0, windowstart
|
||||
wsr a1, windowbase
|
||||
rsync
|
||||
|
||||
movi a0, 0
|
||||
|
||||
rsr a3, excsave1
|
||||
l32i a1, a3, EXC_TABLE_KSTK
|
||||
|
||||
movi a4, (1 << PS_WOE_BIT) | LOCKLEVEL
|
||||
wsr a4, ps
|
||||
rsync
|
||||
|
||||
movi a6, SIGSEGV
|
||||
movi a4, do_exit
|
||||
callx4 a4
|
||||
|
||||
1: /* Kernel space: PANIC! */
|
||||
|
||||
wsr a0, excsave1
|
||||
movi a0, unrecoverable_exception
|
||||
callx0 a0 # should not return
|
||||
1: j 1b
|
||||
|
||||
ENDPROC(_spill_registers)
|
||||
|
||||
#ifdef CONFIG_MMU
|
||||
/*
|
||||
* We should never get here. Bail out!
|
||||
|
@ -1794,6 +1771,43 @@ ENTRY(system_call)
|
|||
|
||||
ENDPROC(system_call)
|
||||
|
||||
/*
|
||||
* Spill live registers on the kernel stack macro.
|
||||
*
|
||||
* Entry condition: ps.woe is set, ps.excm is cleared
|
||||
* Exit condition: windowstart has single bit set
|
||||
* May clobber: a12, a13
|
||||
*/
|
||||
.macro spill_registers_kernel
|
||||
|
||||
#if XCHAL_NUM_AREGS > 16
|
||||
call12 1f
|
||||
_j 2f
|
||||
retw
|
||||
.align 4
|
||||
1:
|
||||
_entry a1, 48
|
||||
addi a12, a0, 3
|
||||
#if XCHAL_NUM_AREGS > 32
|
||||
.rept (XCHAL_NUM_AREGS - 32) / 12
|
||||
_entry a1, 48
|
||||
mov a12, a0
|
||||
.endr
|
||||
#endif
|
||||
_entry a1, 48
|
||||
#if XCHAL_NUM_AREGS % 12 == 0
|
||||
mov a8, a8
|
||||
#elif XCHAL_NUM_AREGS % 12 == 4
|
||||
mov a12, a12
|
||||
#elif XCHAL_NUM_AREGS % 12 == 8
|
||||
mov a4, a4
|
||||
#endif
|
||||
retw
|
||||
2:
|
||||
#else
|
||||
mov a12, a12
|
||||
#endif
|
||||
.endm
|
||||
|
||||
/*
|
||||
* Task switch.
|
||||
|
@ -1806,21 +1820,20 @@ ENTRY(_switch_to)
|
|||
|
||||
entry a1, 16
|
||||
|
||||
mov a12, a2 # preserve 'prev' (a2)
|
||||
mov a13, a3 # and 'next' (a3)
|
||||
mov a10, a2 # preserve 'prev' (a2)
|
||||
mov a11, a3 # and 'next' (a3)
|
||||
|
||||
l32i a4, a2, TASK_THREAD_INFO
|
||||
l32i a5, a3, TASK_THREAD_INFO
|
||||
|
||||
save_xtregs_user a4 a6 a8 a9 a10 a11 THREAD_XTREGS_USER
|
||||
save_xtregs_user a4 a6 a8 a9 a12 a13 THREAD_XTREGS_USER
|
||||
|
||||
s32i a0, a12, THREAD_RA # save return address
|
||||
s32i a1, a12, THREAD_SP # save stack pointer
|
||||
s32i a0, a10, THREAD_RA # save return address
|
||||
s32i a1, a10, THREAD_SP # save stack pointer
|
||||
|
||||
/* Disable ints while we manipulate the stack pointer. */
|
||||
|
||||
movi a14, (1 << PS_EXCM_BIT) | LOCKLEVEL
|
||||
xsr a14, ps
|
||||
rsil a14, LOCKLEVEL
|
||||
rsr a3, excsave1
|
||||
rsync
|
||||
s32i a3, a3, EXC_TABLE_FIXUP /* enter critical section */
|
||||
|
@ -1835,7 +1848,7 @@ ENTRY(_switch_to)
|
|||
|
||||
/* Flush register file. */
|
||||
|
||||
call0 _spill_registers # destroys a3, a4, and SAR
|
||||
spill_registers_kernel
|
||||
|
||||
/* Set kernel stack (and leave critical section)
|
||||
* Note: It's save to set it here. The stack will not be overwritten
|
||||
|
@ -1851,13 +1864,13 @@ ENTRY(_switch_to)
|
|||
|
||||
/* restore context of the task 'next' */
|
||||
|
||||
l32i a0, a13, THREAD_RA # restore return address
|
||||
l32i a1, a13, THREAD_SP # restore stack pointer
|
||||
l32i a0, a11, THREAD_RA # restore return address
|
||||
l32i a1, a11, THREAD_SP # restore stack pointer
|
||||
|
||||
load_xtregs_user a5 a6 a8 a9 a10 a11 THREAD_XTREGS_USER
|
||||
load_xtregs_user a5 a6 a8 a9 a12 a13 THREAD_XTREGS_USER
|
||||
|
||||
wsr a14, ps
|
||||
mov a2, a12 # return 'prev'
|
||||
mov a2, a10 # return 'prev'
|
||||
rsync
|
||||
|
||||
retw
|
||||
|
|
|
@ -22,6 +22,7 @@
|
|||
#include <linux/bootmem.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/percpu.h>
|
||||
#include <linux/clk-provider.h>
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/of_fdt.h>
|
||||
#include <linux/of_platform.h>
|
||||
|
@ -276,6 +277,7 @@ void __init early_init_devtree(void *params)
|
|||
|
||||
static int __init xtensa_device_probe(void)
|
||||
{
|
||||
of_clk_init(NULL);
|
||||
of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -30,6 +30,7 @@
|
|||
#include <asm/platform.h>
|
||||
|
||||
unsigned long ccount_freq; /* ccount Hz */
|
||||
EXPORT_SYMBOL(ccount_freq);
|
||||
|
||||
static cycle_t ccount_read(struct clocksource *cs)
|
||||
{
|
||||
|
|
|
@ -235,7 +235,7 @@ ENTRY(_DoubleExceptionVector)
|
|||
|
||||
/* Check for overflow/underflow exception, jump if overflow. */
|
||||
|
||||
_bbci.l a0, 6, _DoubleExceptionVector_WindowOverflow
|
||||
bbci.l a0, 6, _DoubleExceptionVector_WindowOverflow
|
||||
|
||||
/*
|
||||
* Restart window underflow exception.
|
||||
|
|
|
@ -122,9 +122,7 @@ EXPORT_SYMBOL(insw);
|
|||
EXPORT_SYMBOL(insl);
|
||||
|
||||
extern long common_exception_return;
|
||||
extern long _spill_registers;
|
||||
EXPORT_SYMBOL(common_exception_return);
|
||||
EXPORT_SYMBOL(_spill_registers);
|
||||
|
||||
#ifdef CONFIG_FUNCTION_TRACER
|
||||
EXPORT_SYMBOL(_mcount);
|
||||
|
|
|
@ -90,7 +90,7 @@ int __init mem_reserve(unsigned long start, unsigned long end, int must_exist)
|
|||
|
||||
|
||||
/*
|
||||
* Initialize the bootmem system and give it all the memory we have available.
|
||||
* Initialize the bootmem system and give it all low memory we have available.
|
||||
*/
|
||||
|
||||
void __init bootmem_init(void)
|
||||
|
@ -142,9 +142,14 @@ void __init bootmem_init(void)
|
|||
|
||||
/* Add all remaining memory pieces into the bootmem map */
|
||||
|
||||
for (i=0; i<sysmem.nr_banks; i++)
|
||||
free_bootmem(sysmem.bank[i].start,
|
||||
sysmem.bank[i].end - sysmem.bank[i].start);
|
||||
for (i = 0; i < sysmem.nr_banks; i++) {
|
||||
if (sysmem.bank[i].start >> PAGE_SHIFT < max_low_pfn) {
|
||||
unsigned long end = min(max_low_pfn << PAGE_SHIFT,
|
||||
sysmem.bank[i].end);
|
||||
free_bootmem(sysmem.bank[i].start,
|
||||
end - sysmem.bank[i].start);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
|
|
@ -39,7 +39,7 @@ void init_mmu(void)
|
|||
set_itlbcfg_register(0);
|
||||
set_dtlbcfg_register(0);
|
||||
#endif
|
||||
#if XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY && CONFIG_OF
|
||||
#if XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY && defined(CONFIG_OF)
|
||||
/*
|
||||
* Update the IO area mapping in case xtensa_kio_paddr has changed
|
||||
*/
|
||||
|
|
|
@ -135,11 +135,11 @@ static void __init update_local_mac(struct device_node *node)
|
|||
|
||||
static int __init machine_setup(void)
|
||||
{
|
||||
struct device_node *serial;
|
||||
struct device_node *clock;
|
||||
struct device_node *eth = NULL;
|
||||
|
||||
for_each_compatible_node(serial, NULL, "ns16550a")
|
||||
update_clock_frequency(serial);
|
||||
for_each_node_by_name(clock, "main-oscillator")
|
||||
update_clock_frequency(clock);
|
||||
|
||||
if ((eth = of_find_compatible_node(eth, NULL, "opencores,ethoc")))
|
||||
update_local_mac(eth);
|
||||
|
@ -290,6 +290,7 @@ static int __init xtavnet_init(void)
|
|||
* knows whether they set it correctly on the DIP switches.
|
||||
*/
|
||||
pr_info("XTFPGA: Ethernet MAC %pM\n", ethoc_pdata.hwaddr);
|
||||
ethoc_pdata.eth_clkfreq = *(long *)XTFPGA_CLKFRQ_VADDR;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -18,13 +18,6 @@
|
|||
#define XCHAL_CP_MASK 0x00 /* bitmask of all CPs by ID */
|
||||
#define XCHAL_CP_PORT_MASK 0x00 /* bitmask of only port CPs */
|
||||
|
||||
/* Basic parameters of each coprocessor: */
|
||||
#define XCHAL_CP7_NAME "XTIOP"
|
||||
#define XCHAL_CP7_IDENT XTIOP
|
||||
#define XCHAL_CP7_SA_SIZE 0 /* size of state save area */
|
||||
#define XCHAL_CP7_SA_ALIGN 1 /* min alignment of save area */
|
||||
#define XCHAL_CP_ID_XTIOP 7 /* coprocessor ID (0..7) */
|
||||
|
||||
/* Filler info for unassigned coprocessors, to simplify arrays etc: */
|
||||
#define XCHAL_NCP_SA_SIZE 0
|
||||
#define XCHAL_NCP_SA_ALIGN 1
|
||||
|
@ -42,6 +35,8 @@
|
|||
#define XCHAL_CP5_SA_ALIGN 1
|
||||
#define XCHAL_CP6_SA_SIZE 0
|
||||
#define XCHAL_CP6_SA_ALIGN 1
|
||||
#define XCHAL_CP7_SA_SIZE 0
|
||||
#define XCHAL_CP7_SA_ALIGN 1
|
||||
|
||||
/* Save area for non-coprocessor optional and custom (TIE) state: */
|
||||
#define XCHAL_NCP_SA_SIZE 0
|
||||
|
|
|
@ -56,6 +56,12 @@ struct throttling_tstate {
|
|||
int target_state; /* target T-state */
|
||||
};
|
||||
|
||||
struct acpi_processor_throttling_arg {
|
||||
struct acpi_processor *pr;
|
||||
int target_state;
|
||||
bool force;
|
||||
};
|
||||
|
||||
#define THROTTLING_PRECHANGE (1)
|
||||
#define THROTTLING_POSTCHANGE (2)
|
||||
|
||||
|
@ -1060,16 +1066,24 @@ static int acpi_processor_set_throttling_ptc(struct acpi_processor *pr,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static long acpi_processor_throttling_fn(void *data)
|
||||
{
|
||||
struct acpi_processor_throttling_arg *arg = data;
|
||||
struct acpi_processor *pr = arg->pr;
|
||||
|
||||
return pr->throttling.acpi_processor_set_throttling(pr,
|
||||
arg->target_state, arg->force);
|
||||
}
|
||||
|
||||
int acpi_processor_set_throttling(struct acpi_processor *pr,
|
||||
int state, bool force)
|
||||
{
|
||||
cpumask_var_t saved_mask;
|
||||
int ret = 0;
|
||||
unsigned int i;
|
||||
struct acpi_processor *match_pr;
|
||||
struct acpi_processor_throttling *p_throttling;
|
||||
struct acpi_processor_throttling_arg arg;
|
||||
struct throttling_tstate t_state;
|
||||
cpumask_var_t online_throttling_cpus;
|
||||
|
||||
if (!pr)
|
||||
return -EINVAL;
|
||||
|
@ -1080,14 +1094,6 @@ int acpi_processor_set_throttling(struct acpi_processor *pr,
|
|||
if ((state < 0) || (state > (pr->throttling.state_count - 1)))
|
||||
return -EINVAL;
|
||||
|
||||
if (!alloc_cpumask_var(&saved_mask, GFP_KERNEL))
|
||||
return -ENOMEM;
|
||||
|
||||
if (!alloc_cpumask_var(&online_throttling_cpus, GFP_KERNEL)) {
|
||||
free_cpumask_var(saved_mask);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
if (cpu_is_offline(pr->id)) {
|
||||
/*
|
||||
* the cpu pointed by pr->id is offline. Unnecessary to change
|
||||
|
@ -1096,17 +1102,15 @@ int acpi_processor_set_throttling(struct acpi_processor *pr,
|
|||
return -ENODEV;
|
||||
}
|
||||
|
||||
cpumask_copy(saved_mask, ¤t->cpus_allowed);
|
||||
t_state.target_state = state;
|
||||
p_throttling = &(pr->throttling);
|
||||
cpumask_and(online_throttling_cpus, cpu_online_mask,
|
||||
p_throttling->shared_cpu_map);
|
||||
|
||||
/*
|
||||
* The throttling notifier will be called for every
|
||||
* affected cpu in order to get one proper T-state.
|
||||
* The notifier event is THROTTLING_PRECHANGE.
|
||||
*/
|
||||
for_each_cpu(i, online_throttling_cpus) {
|
||||
for_each_cpu_and(i, cpu_online_mask, p_throttling->shared_cpu_map) {
|
||||
t_state.cpu = i;
|
||||
acpi_processor_throttling_notifier(THROTTLING_PRECHANGE,
|
||||
&t_state);
|
||||
|
@ -1118,21 +1122,18 @@ int acpi_processor_set_throttling(struct acpi_processor *pr,
|
|||
* it can be called only for the cpu pointed by pr.
|
||||
*/
|
||||
if (p_throttling->shared_type == DOMAIN_COORD_TYPE_SW_ANY) {
|
||||
/* FIXME: use work_on_cpu() */
|
||||
if (set_cpus_allowed_ptr(current, cpumask_of(pr->id))) {
|
||||
/* Can't migrate to the pr->id CPU. Exit */
|
||||
ret = -ENODEV;
|
||||
goto exit;
|
||||
}
|
||||
ret = p_throttling->acpi_processor_set_throttling(pr,
|
||||
t_state.target_state, force);
|
||||
arg.pr = pr;
|
||||
arg.target_state = state;
|
||||
arg.force = force;
|
||||
ret = work_on_cpu(pr->id, acpi_processor_throttling_fn, &arg);
|
||||
} else {
|
||||
/*
|
||||
* When the T-state coordination is SW_ALL or HW_ALL,
|
||||
* it is necessary to set T-state for every affected
|
||||
* cpus.
|
||||
*/
|
||||
for_each_cpu(i, online_throttling_cpus) {
|
||||
for_each_cpu_and(i, cpu_online_mask,
|
||||
p_throttling->shared_cpu_map) {
|
||||
match_pr = per_cpu(processors, i);
|
||||
/*
|
||||
* If the pointer is invalid, we will report the
|
||||
|
@ -1153,13 +1154,12 @@ int acpi_processor_set_throttling(struct acpi_processor *pr,
|
|||
"on CPU %d\n", i));
|
||||
continue;
|
||||
}
|
||||
t_state.cpu = i;
|
||||
/* FIXME: use work_on_cpu() */
|
||||
if (set_cpus_allowed_ptr(current, cpumask_of(i)))
|
||||
continue;
|
||||
ret = match_pr->throttling.
|
||||
acpi_processor_set_throttling(
|
||||
match_pr, t_state.target_state, force);
|
||||
|
||||
arg.pr = match_pr;
|
||||
arg.target_state = state;
|
||||
arg.force = force;
|
||||
ret = work_on_cpu(pr->id, acpi_processor_throttling_fn,
|
||||
&arg);
|
||||
}
|
||||
}
|
||||
/*
|
||||
|
@ -1168,17 +1168,12 @@ int acpi_processor_set_throttling(struct acpi_processor *pr,
|
|||
* affected cpu to update the T-states.
|
||||
* The notifier event is THROTTLING_POSTCHANGE
|
||||
*/
|
||||
for_each_cpu(i, online_throttling_cpus) {
|
||||
for_each_cpu_and(i, cpu_online_mask, p_throttling->shared_cpu_map) {
|
||||
t_state.cpu = i;
|
||||
acpi_processor_throttling_notifier(THROTTLING_POSTCHANGE,
|
||||
&t_state);
|
||||
}
|
||||
/* restore the previous state */
|
||||
/* FIXME: use work_on_cpu() */
|
||||
set_cpus_allowed_ptr(current, saved_mask);
|
||||
exit:
|
||||
free_cpumask_var(online_throttling_cpus);
|
||||
free_cpumask_var(saved_mask);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -1580,6 +1580,7 @@ static int fw_pm_notify(struct notifier_block *notify_block,
|
|||
switch (mode) {
|
||||
case PM_HIBERNATION_PREPARE:
|
||||
case PM_SUSPEND_PREPARE:
|
||||
case PM_RESTORE_PREPARE:
|
||||
kill_requests_without_uevent();
|
||||
device_cache_fw_images();
|
||||
break;
|
||||
|
|
|
@ -39,9 +39,10 @@
|
|||
#define BYT_TURBO_RATIOS 0x66c
|
||||
|
||||
|
||||
#define FRAC_BITS 8
|
||||
#define FRAC_BITS 6
|
||||
#define int_tofp(X) ((int64_t)(X) << FRAC_BITS)
|
||||
#define fp_toint(X) ((X) >> FRAC_BITS)
|
||||
#define FP_ROUNDUP(X) ((X) += 1 << FRAC_BITS)
|
||||
|
||||
static inline int32_t mul_fp(int32_t x, int32_t y)
|
||||
{
|
||||
|
@ -556,18 +557,20 @@ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
|
|||
static inline void intel_pstate_calc_busy(struct cpudata *cpu,
|
||||
struct sample *sample)
|
||||
{
|
||||
u64 core_pct;
|
||||
u64 c0_pct;
|
||||
int32_t core_pct;
|
||||
int32_t c0_pct;
|
||||
|
||||
core_pct = div64_u64(sample->aperf * 100, sample->mperf);
|
||||
core_pct = div_fp(int_tofp((sample->aperf)),
|
||||
int_tofp((sample->mperf)));
|
||||
core_pct = mul_fp(core_pct, int_tofp(100));
|
||||
FP_ROUNDUP(core_pct);
|
||||
|
||||
c0_pct = div_fp(int_tofp(sample->mperf), int_tofp(sample->tsc));
|
||||
|
||||
c0_pct = div64_u64(sample->mperf * 100, sample->tsc);
|
||||
sample->freq = fp_toint(
|
||||
mul_fp(int_tofp(cpu->pstate.max_pstate),
|
||||
int_tofp(core_pct * 1000)));
|
||||
mul_fp(int_tofp(cpu->pstate.max_pstate * 1000), core_pct));
|
||||
|
||||
sample->core_pct_busy = mul_fp(int_tofp(core_pct),
|
||||
div_fp(int_tofp(c0_pct + 1), int_tofp(100)));
|
||||
sample->core_pct_busy = mul_fp(core_pct, c0_pct);
|
||||
}
|
||||
|
||||
static inline void intel_pstate_sample(struct cpudata *cpu)
|
||||
|
@ -579,6 +582,10 @@ static inline void intel_pstate_sample(struct cpudata *cpu)
|
|||
rdmsrl(MSR_IA32_MPERF, mperf);
|
||||
tsc = native_read_tsc();
|
||||
|
||||
aperf = aperf >> FRAC_BITS;
|
||||
mperf = mperf >> FRAC_BITS;
|
||||
tsc = tsc >> FRAC_BITS;
|
||||
|
||||
cpu->sample_ptr = (cpu->sample_ptr + 1) % SAMPLE_COUNT;
|
||||
cpu->samples[cpu->sample_ptr].aperf = aperf;
|
||||
cpu->samples[cpu->sample_ptr].mperf = mperf;
|
||||
|
@ -610,7 +617,8 @@ static inline int32_t intel_pstate_get_scaled_busy(struct cpudata *cpu)
|
|||
core_busy = cpu->samples[cpu->sample_ptr].core_pct_busy;
|
||||
max_pstate = int_tofp(cpu->pstate.max_pstate);
|
||||
current_pstate = int_tofp(cpu->pstate.current_pstate);
|
||||
return mul_fp(core_busy, div_fp(max_pstate, current_pstate));
|
||||
core_busy = mul_fp(core_busy, div_fp(max_pstate, current_pstate));
|
||||
return FP_ROUNDUP(core_busy);
|
||||
}
|
||||
|
||||
static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu)
|
||||
|
|
|
@ -449,6 +449,7 @@ static const struct of_device_id sdma_dt_ids[] = {
|
|||
{ .compatible = "fsl,imx51-sdma", .data = &sdma_imx51, },
|
||||
{ .compatible = "fsl,imx35-sdma", .data = &sdma_imx35, },
|
||||
{ .compatible = "fsl,imx31-sdma", .data = &sdma_imx31, },
|
||||
{ .compatible = "fsl,imx25-sdma", .data = &sdma_imx25, },
|
||||
{ /* sentinel */ }
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, sdma_dt_ids);
|
||||
|
|
|
@ -77,7 +77,8 @@ static irqreturn_t ioat_dma_do_interrupt(int irq, void *data)
|
|||
attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET);
|
||||
for_each_set_bit(bit, &attnstatus, BITS_PER_LONG) {
|
||||
chan = ioat_chan_by_index(instance, bit);
|
||||
tasklet_schedule(&chan->cleanup_task);
|
||||
if (test_bit(IOAT_RUN, &chan->state))
|
||||
tasklet_schedule(&chan->cleanup_task);
|
||||
}
|
||||
|
||||
writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
|
||||
|
@ -93,7 +94,8 @@ static irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data)
|
|||
{
|
||||
struct ioat_chan_common *chan = data;
|
||||
|
||||
tasklet_schedule(&chan->cleanup_task);
|
||||
if (test_bit(IOAT_RUN, &chan->state))
|
||||
tasklet_schedule(&chan->cleanup_task);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
@ -116,7 +118,6 @@ void ioat_init_channel(struct ioatdma_device *device, struct ioat_chan_common *c
|
|||
chan->timer.function = device->timer_fn;
|
||||
chan->timer.data = data;
|
||||
tasklet_init(&chan->cleanup_task, device->cleanup_fn, data);
|
||||
tasklet_disable(&chan->cleanup_task);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -354,13 +355,49 @@ static int ioat1_dma_alloc_chan_resources(struct dma_chan *c)
|
|||
writel(((u64) chan->completion_dma) >> 32,
|
||||
chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);
|
||||
|
||||
tasklet_enable(&chan->cleanup_task);
|
||||
set_bit(IOAT_RUN, &chan->state);
|
||||
ioat1_dma_start_null_desc(ioat); /* give chain to dma device */
|
||||
dev_dbg(to_dev(chan), "%s: allocated %d descriptors\n",
|
||||
__func__, ioat->desccount);
|
||||
return ioat->desccount;
|
||||
}
|
||||
|
||||
void ioat_stop(struct ioat_chan_common *chan)
|
||||
{
|
||||
struct ioatdma_device *device = chan->device;
|
||||
struct pci_dev *pdev = device->pdev;
|
||||
int chan_id = chan_num(chan);
|
||||
struct msix_entry *msix;
|
||||
|
||||
/* 1/ stop irq from firing tasklets
|
||||
* 2/ stop the tasklet from re-arming irqs
|
||||
*/
|
||||
clear_bit(IOAT_RUN, &chan->state);
|
||||
|
||||
/* flush inflight interrupts */
|
||||
switch (device->irq_mode) {
|
||||
case IOAT_MSIX:
|
||||
msix = &device->msix_entries[chan_id];
|
||||
synchronize_irq(msix->vector);
|
||||
break;
|
||||
case IOAT_MSI:
|
||||
case IOAT_INTX:
|
||||
synchronize_irq(pdev->irq);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
/* flush inflight timers */
|
||||
del_timer_sync(&chan->timer);
|
||||
|
||||
/* flush inflight tasklet runs */
|
||||
tasklet_kill(&chan->cleanup_task);
|
||||
|
||||
/* final cleanup now that everything is quiesced and can't re-arm */
|
||||
device->cleanup_fn((unsigned long) &chan->common);
|
||||
}
|
||||
|
||||
/**
|
||||
* ioat1_dma_free_chan_resources - release all the descriptors
|
||||
* @chan: the channel to be cleaned
|
||||
|
@ -379,9 +416,7 @@ static void ioat1_dma_free_chan_resources(struct dma_chan *c)
|
|||
if (ioat->desccount == 0)
|
||||
return;
|
||||
|
||||
tasklet_disable(&chan->cleanup_task);
|
||||
del_timer_sync(&chan->timer);
|
||||
ioat1_cleanup(ioat);
|
||||
ioat_stop(chan);
|
||||
|
||||
/* Delay 100ms after reset to allow internal DMA logic to quiesce
|
||||
* before removing DMA descriptor resources.
|
||||
|
@ -526,8 +561,11 @@ ioat1_dma_prep_memcpy(struct dma_chan *c, dma_addr_t dma_dest,
|
|||
static void ioat1_cleanup_event(unsigned long data)
|
||||
{
|
||||
struct ioat_dma_chan *ioat = to_ioat_chan((void *) data);
|
||||
struct ioat_chan_common *chan = &ioat->base;
|
||||
|
||||
ioat1_cleanup(ioat);
|
||||
if (!test_bit(IOAT_RUN, &chan->state))
|
||||
return;
|
||||
writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET);
|
||||
}
|
||||
|
||||
|
|
|
@ -356,6 +356,7 @@ bool ioat_cleanup_preamble(struct ioat_chan_common *chan,
|
|||
void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type);
|
||||
void ioat_kobject_del(struct ioatdma_device *device);
|
||||
int ioat_dma_setup_interrupts(struct ioatdma_device *device);
|
||||
void ioat_stop(struct ioat_chan_common *chan);
|
||||
extern const struct sysfs_ops ioat_sysfs_ops;
|
||||
extern struct ioat_sysfs_entry ioat_version_attr;
|
||||
extern struct ioat_sysfs_entry ioat_cap_attr;
|
||||
|
|
|
@ -190,8 +190,11 @@ static void ioat2_cleanup(struct ioat2_dma_chan *ioat)
|
|||
void ioat2_cleanup_event(unsigned long data)
|
||||
{
|
||||
struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data);
|
||||
struct ioat_chan_common *chan = &ioat->base;
|
||||
|
||||
ioat2_cleanup(ioat);
|
||||
if (!test_bit(IOAT_RUN, &chan->state))
|
||||
return;
|
||||
writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET);
|
||||
}
|
||||
|
||||
|
@ -553,10 +556,10 @@ int ioat2_alloc_chan_resources(struct dma_chan *c)
|
|||
ioat->issued = 0;
|
||||
ioat->tail = 0;
|
||||
ioat->alloc_order = order;
|
||||
set_bit(IOAT_RUN, &chan->state);
|
||||
spin_unlock_bh(&ioat->prep_lock);
|
||||
spin_unlock_bh(&chan->cleanup_lock);
|
||||
|
||||
tasklet_enable(&chan->cleanup_task);
|
||||
ioat2_start_null_desc(ioat);
|
||||
|
||||
/* check that we got off the ground */
|
||||
|
@ -566,7 +569,6 @@ int ioat2_alloc_chan_resources(struct dma_chan *c)
|
|||
} while (i++ < 20 && !is_ioat_active(status) && !is_ioat_idle(status));
|
||||
|
||||
if (is_ioat_active(status) || is_ioat_idle(status)) {
|
||||
set_bit(IOAT_RUN, &chan->state);
|
||||
return 1 << ioat->alloc_order;
|
||||
} else {
|
||||
u32 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
|
||||
|
@ -809,11 +811,8 @@ void ioat2_free_chan_resources(struct dma_chan *c)
|
|||
if (!ioat->ring)
|
||||
return;
|
||||
|
||||
tasklet_disable(&chan->cleanup_task);
|
||||
del_timer_sync(&chan->timer);
|
||||
device->cleanup_fn((unsigned long) c);
|
||||
ioat_stop(chan);
|
||||
device->reset_hw(chan);
|
||||
clear_bit(IOAT_RUN, &chan->state);
|
||||
|
||||
spin_lock_bh(&chan->cleanup_lock);
|
||||
spin_lock_bh(&ioat->prep_lock);
|
||||
|
|
|
@ -464,8 +464,11 @@ static void ioat3_cleanup(struct ioat2_dma_chan *ioat)
|
|||
static void ioat3_cleanup_event(unsigned long data)
|
||||
{
|
||||
struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data);
|
||||
struct ioat_chan_common *chan = &ioat->base;
|
||||
|
||||
ioat3_cleanup(ioat);
|
||||
if (!test_bit(IOAT_RUN, &chan->state))
|
||||
return;
|
||||
writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET);
|
||||
}
|
||||
|
||||
|
|
|
@ -1641,6 +1641,7 @@ static void dma_tasklet(unsigned long data)
|
|||
struct d40_chan *d40c = (struct d40_chan *) data;
|
||||
struct d40_desc *d40d;
|
||||
unsigned long flags;
|
||||
bool callback_active;
|
||||
dma_async_tx_callback callback;
|
||||
void *callback_param;
|
||||
|
||||
|
@ -1668,6 +1669,7 @@ static void dma_tasklet(unsigned long data)
|
|||
}
|
||||
|
||||
/* Callback to client */
|
||||
callback_active = !!(d40d->txd.flags & DMA_PREP_INTERRUPT);
|
||||
callback = d40d->txd.callback;
|
||||
callback_param = d40d->txd.callback_param;
|
||||
|
||||
|
@ -1690,7 +1692,7 @@ static void dma_tasklet(unsigned long data)
|
|||
|
||||
spin_unlock_irqrestore(&d40c->lock, flags);
|
||||
|
||||
if (callback && (d40d->txd.flags & DMA_PREP_INTERRUPT))
|
||||
if (callback_active && callback)
|
||||
callback(callback_param);
|
||||
|
||||
return;
|
||||
|
|
|
@ -943,33 +943,35 @@ static int i7300_get_devices(struct mem_ctl_info *mci)
|
|||
|
||||
/* Attempt to 'get' the MCH register we want */
|
||||
pdev = NULL;
|
||||
while (!pvt->pci_dev_16_1_fsb_addr_map ||
|
||||
!pvt->pci_dev_16_2_fsb_err_regs) {
|
||||
pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
|
||||
PCI_DEVICE_ID_INTEL_I7300_MCH_ERR, pdev);
|
||||
if (!pdev) {
|
||||
/* End of list, leave */
|
||||
i7300_printk(KERN_ERR,
|
||||
"'system address,Process Bus' "
|
||||
"device not found:"
|
||||
"vendor 0x%x device 0x%x ERR funcs "
|
||||
"(broken BIOS?)\n",
|
||||
PCI_VENDOR_ID_INTEL,
|
||||
PCI_DEVICE_ID_INTEL_I7300_MCH_ERR);
|
||||
goto error;
|
||||
}
|
||||
|
||||
while ((pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
|
||||
PCI_DEVICE_ID_INTEL_I7300_MCH_ERR,
|
||||
pdev))) {
|
||||
/* Store device 16 funcs 1 and 2 */
|
||||
switch (PCI_FUNC(pdev->devfn)) {
|
||||
case 1:
|
||||
pvt->pci_dev_16_1_fsb_addr_map = pdev;
|
||||
if (!pvt->pci_dev_16_1_fsb_addr_map)
|
||||
pvt->pci_dev_16_1_fsb_addr_map =
|
||||
pci_dev_get(pdev);
|
||||
break;
|
||||
case 2:
|
||||
pvt->pci_dev_16_2_fsb_err_regs = pdev;
|
||||
if (!pvt->pci_dev_16_2_fsb_err_regs)
|
||||
pvt->pci_dev_16_2_fsb_err_regs =
|
||||
pci_dev_get(pdev);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!pvt->pci_dev_16_1_fsb_addr_map ||
|
||||
!pvt->pci_dev_16_2_fsb_err_regs) {
|
||||
/* At least one device was not found */
|
||||
i7300_printk(KERN_ERR,
|
||||
"'system address,Process Bus' device not found:"
|
||||
"vendor 0x%x device 0x%x ERR funcs (broken BIOS?)\n",
|
||||
PCI_VENDOR_ID_INTEL,
|
||||
PCI_DEVICE_ID_INTEL_I7300_MCH_ERR);
|
||||
goto error;
|
||||
}
|
||||
|
||||
edac_dbg(1, "System Address, processor bus- PCI Bus ID: %s %x:%x\n",
|
||||
pci_name(pvt->pci_dev_16_0_fsb_ctlr),
|
||||
pvt->pci_dev_16_0_fsb_ctlr->vendor,
|
||||
|
|
|
@ -1334,14 +1334,19 @@ static int i7core_get_onedevice(struct pci_dev **prev,
|
|||
* is at addr 8086:2c40, instead of 8086:2c41. So, we need
|
||||
* to probe for the alternate address in case of failure
|
||||
*/
|
||||
if (dev_descr->dev_id == PCI_DEVICE_ID_INTEL_I7_NONCORE && !pdev)
|
||||
if (dev_descr->dev_id == PCI_DEVICE_ID_INTEL_I7_NONCORE && !pdev) {
|
||||
pci_dev_get(*prev); /* pci_get_device will put it */
|
||||
pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
|
||||
PCI_DEVICE_ID_INTEL_I7_NONCORE_ALT, *prev);
|
||||
}
|
||||
|
||||
if (dev_descr->dev_id == PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE && !pdev)
|
||||
if (dev_descr->dev_id == PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE &&
|
||||
!pdev) {
|
||||
pci_dev_get(*prev); /* pci_get_device will put it */
|
||||
pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
|
||||
PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE_ALT,
|
||||
*prev);
|
||||
}
|
||||
|
||||
if (!pdev) {
|
||||
if (*prev) {
|
||||
|
|
|
@ -222,27 +222,19 @@ static void arizona_extcon_pulse_micbias(struct arizona_extcon_info *info)
|
|||
struct snd_soc_dapm_context *dapm = arizona->dapm;
|
||||
int ret;
|
||||
|
||||
mutex_lock(&dapm->card->dapm_mutex);
|
||||
|
||||
ret = snd_soc_dapm_force_enable_pin(dapm, widget);
|
||||
if (ret != 0)
|
||||
dev_warn(arizona->dev, "Failed to enable %s: %d\n",
|
||||
widget, ret);
|
||||
|
||||
mutex_unlock(&dapm->card->dapm_mutex);
|
||||
|
||||
snd_soc_dapm_sync(dapm);
|
||||
|
||||
if (!arizona->pdata.micd_force_micbias) {
|
||||
mutex_lock(&dapm->card->dapm_mutex);
|
||||
|
||||
ret = snd_soc_dapm_disable_pin(arizona->dapm, widget);
|
||||
if (ret != 0)
|
||||
dev_warn(arizona->dev, "Failed to disable %s: %d\n",
|
||||
widget, ret);
|
||||
|
||||
mutex_unlock(&dapm->card->dapm_mutex);
|
||||
|
||||
snd_soc_dapm_sync(dapm);
|
||||
}
|
||||
}
|
||||
|
@ -304,16 +296,12 @@ static void arizona_stop_mic(struct arizona_extcon_info *info)
|
|||
ARIZONA_MICD_ENA, 0,
|
||||
&change);
|
||||
|
||||
mutex_lock(&dapm->card->dapm_mutex);
|
||||
|
||||
ret = snd_soc_dapm_disable_pin(dapm, widget);
|
||||
if (ret != 0)
|
||||
dev_warn(arizona->dev,
|
||||
"Failed to disable %s: %d\n",
|
||||
widget, ret);
|
||||
|
||||
mutex_unlock(&dapm->card->dapm_mutex);
|
||||
|
||||
snd_soc_dapm_sync(dapm);
|
||||
|
||||
if (info->micd_reva) {
|
||||
|
|
|
@ -27,7 +27,7 @@ FMC_PARAM_BUSID(fwe_drv);
|
|||
/* The "file=" is like the generic "gateware=" used elsewhere */
|
||||
static char *fwe_file[FMC_MAX_CARDS];
|
||||
static int fwe_file_n;
|
||||
module_param_array_named(file, fwe_file, charp, &fwe_file_n, 444);
|
||||
module_param_array_named(file, fwe_file, charp, &fwe_file_n, 0444);
|
||||
|
||||
static int fwe_run_tlv(struct fmc_device *fmc, const struct firmware *fw,
|
||||
int write)
|
||||
|
|
|
@ -1774,6 +1774,20 @@ static int radeon_atom_pick_pll(struct drm_crtc *crtc)
|
|||
return ATOM_PPLL1;
|
||||
DRM_ERROR("unable to allocate a PPLL\n");
|
||||
return ATOM_PPLL_INVALID;
|
||||
} else if (ASIC_IS_DCE41(rdev)) {
|
||||
/* Don't share PLLs on DCE4.1 chips */
|
||||
if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(radeon_crtc->encoder))) {
|
||||
if (rdev->clock.dp_extclk)
|
||||
/* skip PPLL programming if using ext clock */
|
||||
return ATOM_PPLL_INVALID;
|
||||
}
|
||||
pll_in_use = radeon_get_pll_use_mask(crtc);
|
||||
if (!(pll_in_use & (1 << ATOM_PPLL1)))
|
||||
return ATOM_PPLL1;
|
||||
if (!(pll_in_use & (1 << ATOM_PPLL2)))
|
||||
return ATOM_PPLL2;
|
||||
DRM_ERROR("unable to allocate a PPLL\n");
|
||||
return ATOM_PPLL_INVALID;
|
||||
} else if (ASIC_IS_DCE4(rdev)) {
|
||||
/* in DP mode, the DP ref clock can come from PPLL, DCPLL, or ext clock,
|
||||
* depending on the asic:
|
||||
|
@ -1801,7 +1815,7 @@ static int radeon_atom_pick_pll(struct drm_crtc *crtc)
|
|||
if (pll != ATOM_PPLL_INVALID)
|
||||
return pll;
|
||||
}
|
||||
} else if (!ASIC_IS_DCE41(rdev)) { /* Don't share PLLs on DCE4.1 chips */
|
||||
} else {
|
||||
/* use the same PPLL for all monitors with the same clock */
|
||||
pll = radeon_get_shared_nondp_ppll(crtc);
|
||||
if (pll != ATOM_PPLL_INVALID)
|
||||
|
|
|
@ -278,13 +278,15 @@ static int dce6_audio_chipset_supported(struct radeon_device *rdev)
|
|||
return !ASIC_IS_NODCE(rdev);
|
||||
}
|
||||
|
||||
static void dce6_audio_enable(struct radeon_device *rdev,
|
||||
struct r600_audio_pin *pin,
|
||||
bool enable)
|
||||
void dce6_audio_enable(struct radeon_device *rdev,
|
||||
struct r600_audio_pin *pin,
|
||||
bool enable)
|
||||
{
|
||||
if (!pin)
|
||||
return;
|
||||
|
||||
WREG32_ENDPOINT(pin->offset, AZ_F0_CODEC_PIN_CONTROL_HOTPLUG_CONTROL,
|
||||
AUDIO_ENABLED);
|
||||
DRM_INFO("%s audio %d support\n", enable ? "Enabling" : "Disabling", pin->id);
|
||||
enable ? AUDIO_ENABLED : 0);
|
||||
}
|
||||
|
||||
static const u32 pin_offsets[7] =
|
||||
|
@ -323,7 +325,8 @@ int dce6_audio_init(struct radeon_device *rdev)
|
|||
rdev->audio.pin[i].connected = false;
|
||||
rdev->audio.pin[i].offset = pin_offsets[i];
|
||||
rdev->audio.pin[i].id = i;
|
||||
dce6_audio_enable(rdev, &rdev->audio.pin[i], true);
|
||||
/* disable audio. it will be set up later */
|
||||
dce6_audio_enable(rdev, &rdev->audio.pin[i], false);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -5475,9 +5475,9 @@ void evergreen_fini(struct radeon_device *rdev)
|
|||
radeon_wb_fini(rdev);
|
||||
radeon_ib_pool_fini(rdev);
|
||||
radeon_irq_kms_fini(rdev);
|
||||
evergreen_pcie_gart_fini(rdev);
|
||||
uvd_v1_0_fini(rdev);
|
||||
radeon_uvd_fini(rdev);
|
||||
evergreen_pcie_gart_fini(rdev);
|
||||
r600_vram_scratch_fini(rdev);
|
||||
radeon_gem_fini(rdev);
|
||||
radeon_fence_driver_fini(rdev);
|
||||
|
|
|
@ -306,6 +306,15 @@ void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode
|
|||
return;
|
||||
offset = dig->afmt->offset;
|
||||
|
||||
/* disable audio prior to setting up hw */
|
||||
if (ASIC_IS_DCE6(rdev)) {
|
||||
dig->afmt->pin = dce6_audio_get_pin(rdev);
|
||||
dce6_audio_enable(rdev, dig->afmt->pin, false);
|
||||
} else {
|
||||
dig->afmt->pin = r600_audio_get_pin(rdev);
|
||||
r600_audio_enable(rdev, dig->afmt->pin, false);
|
||||
}
|
||||
|
||||
evergreen_audio_set_dto(encoder, mode->clock);
|
||||
|
||||
WREG32(HDMI_VBI_PACKET_CONTROL + offset,
|
||||
|
@ -409,12 +418,16 @@ void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode
|
|||
WREG32(AFMT_RAMP_CONTROL1 + offset, 0x007FFFFF);
|
||||
WREG32(AFMT_RAMP_CONTROL2 + offset, 0x00000001);
|
||||
WREG32(AFMT_RAMP_CONTROL3 + offset, 0x00000001);
|
||||
|
||||
/* enable audio after to setting up hw */
|
||||
if (ASIC_IS_DCE6(rdev))
|
||||
dce6_audio_enable(rdev, dig->afmt->pin, true);
|
||||
else
|
||||
r600_audio_enable(rdev, dig->afmt->pin, true);
|
||||
}
|
||||
|
||||
void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable)
|
||||
{
|
||||
struct drm_device *dev = encoder->dev;
|
||||
struct radeon_device *rdev = dev->dev_private;
|
||||
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
|
||||
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
|
||||
|
||||
|
@ -427,15 +440,6 @@ void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable)
|
|||
if (!enable && !dig->afmt->enabled)
|
||||
return;
|
||||
|
||||
if (enable) {
|
||||
if (ASIC_IS_DCE6(rdev))
|
||||
dig->afmt->pin = dce6_audio_get_pin(rdev);
|
||||
else
|
||||
dig->afmt->pin = r600_audio_get_pin(rdev);
|
||||
} else {
|
||||
dig->afmt->pin = NULL;
|
||||
}
|
||||
|
||||
dig->afmt->enabled = enable;
|
||||
|
||||
DRM_DEBUG("%sabling HDMI interface @ 0x%04X for encoder 0x%x\n",
|
||||
|
|
|
@ -142,12 +142,15 @@ void r600_audio_update_hdmi(struct work_struct *work)
|
|||
}
|
||||
|
||||
/* enable the audio stream */
|
||||
static void r600_audio_enable(struct radeon_device *rdev,
|
||||
struct r600_audio_pin *pin,
|
||||
bool enable)
|
||||
void r600_audio_enable(struct radeon_device *rdev,
|
||||
struct r600_audio_pin *pin,
|
||||
bool enable)
|
||||
{
|
||||
u32 value = 0;
|
||||
|
||||
if (!pin)
|
||||
return;
|
||||
|
||||
if (ASIC_IS_DCE4(rdev)) {
|
||||
if (enable) {
|
||||
value |= 0x81000000; /* Required to enable audio */
|
||||
|
@ -158,7 +161,6 @@ static void r600_audio_enable(struct radeon_device *rdev,
|
|||
WREG32_P(R600_AUDIO_ENABLE,
|
||||
enable ? 0x81000000 : 0x0, ~0x81000000);
|
||||
}
|
||||
DRM_INFO("%s audio %d support\n", enable ? "Enabling" : "Disabling", pin->id);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -178,8 +180,8 @@ int r600_audio_init(struct radeon_device *rdev)
|
|||
rdev->audio.pin[0].status_bits = 0;
|
||||
rdev->audio.pin[0].category_code = 0;
|
||||
rdev->audio.pin[0].id = 0;
|
||||
|
||||
r600_audio_enable(rdev, &rdev->audio.pin[0], true);
|
||||
/* disable audio. it will be set up later */
|
||||
r600_audio_enable(rdev, &rdev->audio.pin[0], false);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -329,9 +329,6 @@ static void dce3_2_afmt_write_speaker_allocation(struct drm_encoder *encoder)
|
|||
u8 *sadb;
|
||||
int sad_count;
|
||||
|
||||
/* XXX: setting this register causes hangs on some asics */
|
||||
return;
|
||||
|
||||
list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
|
||||
if (connector->encoder == encoder) {
|
||||
radeon_connector = to_radeon_connector(connector);
|
||||
|
@ -460,6 +457,10 @@ void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mod
|
|||
return;
|
||||
offset = dig->afmt->offset;
|
||||
|
||||
/* disable audio prior to setting up hw */
|
||||
dig->afmt->pin = r600_audio_get_pin(rdev);
|
||||
r600_audio_enable(rdev, dig->afmt->pin, false);
|
||||
|
||||
r600_audio_set_dto(encoder, mode->clock);
|
||||
|
||||
WREG32(HDMI0_VBI_PACKET_CONTROL + offset,
|
||||
|
@ -531,6 +532,9 @@ void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mod
|
|||
WREG32(HDMI0_RAMP_CONTROL3 + offset, 0x00000001);
|
||||
|
||||
r600_hdmi_audio_workaround(encoder);
|
||||
|
||||
/* enable audio after to setting up hw */
|
||||
r600_audio_enable(rdev, dig->afmt->pin, true);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -651,11 +655,6 @@ void r600_hdmi_enable(struct drm_encoder *encoder, bool enable)
|
|||
if (!enable && !dig->afmt->enabled)
|
||||
return;
|
||||
|
||||
if (enable)
|
||||
dig->afmt->pin = r600_audio_get_pin(rdev);
|
||||
else
|
||||
dig->afmt->pin = NULL;
|
||||
|
||||
/* Older chipsets require setting HDMI and routing manually */
|
||||
if (!ASIC_IS_DCE3(rdev)) {
|
||||
if (enable)
|
||||
|
|
|
@ -2747,6 +2747,12 @@ int radeon_vm_bo_rmv(struct radeon_device *rdev,
|
|||
void r600_audio_update_hdmi(struct work_struct *work);
|
||||
struct r600_audio_pin *r600_audio_get_pin(struct radeon_device *rdev);
|
||||
struct r600_audio_pin *dce6_audio_get_pin(struct radeon_device *rdev);
|
||||
void r600_audio_enable(struct radeon_device *rdev,
|
||||
struct r600_audio_pin *pin,
|
||||
bool enable);
|
||||
void dce6_audio_enable(struct radeon_device *rdev,
|
||||
struct r600_audio_pin *pin,
|
||||
bool enable);
|
||||
|
||||
/*
|
||||
* R600 vram scratch functions
|
||||
|
|
|
@ -219,7 +219,8 @@ static int radeon_atpx_verify_interface(struct radeon_atpx *atpx)
|
|||
memcpy(&output, info->buffer.pointer, size);
|
||||
|
||||
/* TODO: check version? */
|
||||
printk("ATPX version %u\n", output.version);
|
||||
printk("ATPX version %u, functions 0x%08x\n",
|
||||
output.version, output.function_bits);
|
||||
|
||||
radeon_atpx_parse_functions(&atpx->functions, output.function_bits);
|
||||
|
||||
|
|
|
@ -537,6 +537,10 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
|
|||
|
||||
radeon_vm_init(rdev, &fpriv->vm);
|
||||
|
||||
r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
/* map the ib pool buffer read only into
|
||||
* virtual address space */
|
||||
bo_va = radeon_vm_bo_add(rdev, &fpriv->vm,
|
||||
|
@ -544,6 +548,8 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
|
|||
r = radeon_vm_bo_set_addr(rdev, bo_va, RADEON_VA_IB_OFFSET,
|
||||
RADEON_VM_PAGE_READABLE |
|
||||
RADEON_VM_PAGE_SNOOPED);
|
||||
|
||||
radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
|
||||
if (r) {
|
||||
radeon_vm_fini(rdev, &fpriv->vm);
|
||||
kfree(fpriv);
|
||||
|
|
|
@ -171,6 +171,8 @@ void radeon_uvd_fini(struct radeon_device *rdev)
|
|||
|
||||
radeon_bo_unref(&rdev->uvd.vcpu_bo);
|
||||
|
||||
radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_UVD_INDEX]);
|
||||
|
||||
release_firmware(rdev->uvd_fw);
|
||||
}
|
||||
|
||||
|
|
|
@ -1955,9 +1955,9 @@ void rv770_fini(struct radeon_device *rdev)
|
|||
radeon_wb_fini(rdev);
|
||||
radeon_ib_pool_fini(rdev);
|
||||
radeon_irq_kms_fini(rdev);
|
||||
rv770_pcie_gart_fini(rdev);
|
||||
uvd_v1_0_fini(rdev);
|
||||
radeon_uvd_fini(rdev);
|
||||
rv770_pcie_gart_fini(rdev);
|
||||
r600_vram_scratch_fini(rdev);
|
||||
radeon_gem_fini(rdev);
|
||||
radeon_fence_driver_fini(rdev);
|
||||
|
|
|
@ -104,7 +104,7 @@ static void tegra_drm_context_free(struct tegra_drm_context *context)
|
|||
|
||||
static void tegra_drm_lastclose(struct drm_device *drm)
|
||||
{
|
||||
#ifdef CONFIG_TEGRA_DRM_FBDEV
|
||||
#ifdef CONFIG_DRM_TEGRA_FBDEV
|
||||
struct tegra_drm *tegra = drm->dev_private;
|
||||
|
||||
tegra_fbdev_restore_mode(tegra->fbdev);
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
struct tegra_rgb {
|
||||
struct tegra_output output;
|
||||
struct tegra_dc *dc;
|
||||
bool enabled;
|
||||
|
||||
struct clk *clk_parent;
|
||||
struct clk *clk;
|
||||
|
@ -89,6 +90,9 @@ static int tegra_output_rgb_enable(struct tegra_output *output)
|
|||
struct tegra_rgb *rgb = to_rgb(output);
|
||||
unsigned long value;
|
||||
|
||||
if (rgb->enabled)
|
||||
return 0;
|
||||
|
||||
tegra_dc_write_regs(rgb->dc, rgb_enable, ARRAY_SIZE(rgb_enable));
|
||||
|
||||
value = DE_SELECT_ACTIVE | DE_CONTROL_NORMAL;
|
||||
|
@ -122,6 +126,8 @@ static int tegra_output_rgb_enable(struct tegra_output *output)
|
|||
tegra_dc_writel(rgb->dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL);
|
||||
tegra_dc_writel(rgb->dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL);
|
||||
|
||||
rgb->enabled = true;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -130,6 +136,9 @@ static int tegra_output_rgb_disable(struct tegra_output *output)
|
|||
struct tegra_rgb *rgb = to_rgb(output);
|
||||
unsigned long value;
|
||||
|
||||
if (!rgb->enabled)
|
||||
return 0;
|
||||
|
||||
value = tegra_dc_readl(rgb->dc, DC_CMD_DISPLAY_POWER_CONTROL);
|
||||
value &= ~(PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE |
|
||||
PW4_ENABLE | PM0_ENABLE | PM1_ENABLE);
|
||||
|
@ -144,6 +153,8 @@ static int tegra_output_rgb_disable(struct tegra_output *output)
|
|||
|
||||
tegra_dc_write_regs(rgb->dc, rgb_disable, ARRAY_SIZE(rgb_disable));
|
||||
|
||||
rgb->enabled = false;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -261,12 +261,7 @@ typedef enum SVGA3dSurfaceFormat {
|
|||
/* Planar video formats. */
|
||||
SVGA3D_YV12 = 121,
|
||||
|
||||
/* Shader constant formats. */
|
||||
SVGA3D_SURFACE_SHADERCONST_FLOAT = 122,
|
||||
SVGA3D_SURFACE_SHADERCONST_INT = 123,
|
||||
SVGA3D_SURFACE_SHADERCONST_BOOL = 124,
|
||||
|
||||
SVGA3D_FORMAT_MAX = 125,
|
||||
SVGA3D_FORMAT_MAX = 122,
|
||||
} SVGA3dSurfaceFormat;
|
||||
|
||||
typedef uint32 SVGA3dColor; /* a, r, g, b */
|
||||
|
|
|
@ -40,7 +40,7 @@
|
|||
#include <drm/ttm/ttm_module.h>
|
||||
#include "vmwgfx_fence.h"
|
||||
|
||||
#define VMWGFX_DRIVER_DATE "20121114"
|
||||
#define VMWGFX_DRIVER_DATE "20140228"
|
||||
#define VMWGFX_DRIVER_MAJOR 2
|
||||
#define VMWGFX_DRIVER_MINOR 5
|
||||
#define VMWGFX_DRIVER_PATCHLEVEL 0
|
||||
|
|
|
@ -188,18 +188,20 @@ static void vmw_takedown_otable_base(struct vmw_private *dev_priv,
|
|||
|
||||
bo = otable->page_table->pt_bo;
|
||||
cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
|
||||
if (unlikely(cmd == NULL))
|
||||
DRM_ERROR("Failed reserving FIFO space for OTable setup.\n");
|
||||
|
||||
memset(cmd, 0, sizeof(*cmd));
|
||||
cmd->header.id = SVGA_3D_CMD_SET_OTABLE_BASE;
|
||||
cmd->header.size = sizeof(cmd->body);
|
||||
cmd->body.type = type;
|
||||
cmd->body.baseAddress = 0;
|
||||
cmd->body.sizeInBytes = 0;
|
||||
cmd->body.validSizeInBytes = 0;
|
||||
cmd->body.ptDepth = SVGA3D_MOBFMT_INVALID;
|
||||
vmw_fifo_commit(dev_priv, sizeof(*cmd));
|
||||
if (unlikely(cmd == NULL)) {
|
||||
DRM_ERROR("Failed reserving FIFO space for OTable "
|
||||
"takedown.\n");
|
||||
} else {
|
||||
memset(cmd, 0, sizeof(*cmd));
|
||||
cmd->header.id = SVGA_3D_CMD_SET_OTABLE_BASE;
|
||||
cmd->header.size = sizeof(cmd->body);
|
||||
cmd->body.type = type;
|
||||
cmd->body.baseAddress = 0;
|
||||
cmd->body.sizeInBytes = 0;
|
||||
cmd->body.validSizeInBytes = 0;
|
||||
cmd->body.ptDepth = SVGA3D_MOBFMT_INVALID;
|
||||
vmw_fifo_commit(dev_priv, sizeof(*cmd));
|
||||
}
|
||||
|
||||
if (bo) {
|
||||
int ret;
|
||||
|
@ -562,11 +564,12 @@ void vmw_mob_unbind(struct vmw_private *dev_priv,
|
|||
if (unlikely(cmd == NULL)) {
|
||||
DRM_ERROR("Failed reserving FIFO space for Memory "
|
||||
"Object unbinding.\n");
|
||||
} else {
|
||||
cmd->header.id = SVGA_3D_CMD_DESTROY_GB_MOB;
|
||||
cmd->header.size = sizeof(cmd->body);
|
||||
cmd->body.mobid = mob->id;
|
||||
vmw_fifo_commit(dev_priv, sizeof(*cmd));
|
||||
}
|
||||
cmd->header.id = SVGA_3D_CMD_DESTROY_GB_MOB;
|
||||
cmd->header.size = sizeof(cmd->body);
|
||||
cmd->body.mobid = mob->id;
|
||||
vmw_fifo_commit(dev_priv, sizeof(*cmd));
|
||||
if (bo) {
|
||||
vmw_fence_single_bo(bo, NULL);
|
||||
ttm_bo_unreserve(bo);
|
||||
|
|
|
@ -427,8 +427,7 @@ int vmw_dmabuf_init(struct vmw_private *dev_priv,
|
|||
INIT_LIST_HEAD(&vmw_bo->res_list);
|
||||
|
||||
ret = ttm_bo_init(bdev, &vmw_bo->base, size,
|
||||
(user) ? ttm_bo_type_device :
|
||||
ttm_bo_type_kernel, placement,
|
||||
ttm_bo_type_device, placement,
|
||||
0, interruptible,
|
||||
NULL, acc_size, NULL, bo_free);
|
||||
return ret;
|
||||
|
|
|
@ -538,7 +538,7 @@ int host1x_job_pin(struct host1x_job *job, struct device *dev)
|
|||
|
||||
g->base = job->gather_addr_phys[i];
|
||||
|
||||
for (j = 0; j < job->num_gathers; j++)
|
||||
for (j = i + 1; j < job->num_gathers; j++)
|
||||
if (job->gathers[j].bo == g->bo)
|
||||
job->gathers[j].handled = true;
|
||||
|
||||
|
|
|
@ -70,7 +70,7 @@ config IIO_ST_GYRO_3AXIS
|
|||
select IIO_TRIGGERED_BUFFER if (IIO_BUFFER)
|
||||
help
|
||||
Say yes here to build support for STMicroelectronics gyroscopes:
|
||||
L3G4200D, LSM330DL, L3GD20, L3GD20H, LSM330DLC, L3G4IS, LSM330.
|
||||
L3G4200D, LSM330DL, L3GD20, LSM330DLC, L3G4IS, LSM330.
|
||||
|
||||
This driver can also be built as a module. If so, these modules
|
||||
will be created:
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
#define LSM330DL_GYRO_DEV_NAME "lsm330dl_gyro"
|
||||
#define LSM330DLC_GYRO_DEV_NAME "lsm330dlc_gyro"
|
||||
#define L3GD20_GYRO_DEV_NAME "l3gd20"
|
||||
#define L3GD20H_GYRO_DEV_NAME "l3gd20h"
|
||||
#define L3G4IS_GYRO_DEV_NAME "l3g4is_ui"
|
||||
#define LSM330_GYRO_DEV_NAME "lsm330_gyro"
|
||||
|
||||
|
|
|
@ -167,11 +167,10 @@ static const struct st_sensors st_gyro_sensors[] = {
|
|||
.wai = ST_GYRO_2_WAI_EXP,
|
||||
.sensors_supported = {
|
||||
[0] = L3GD20_GYRO_DEV_NAME,
|
||||
[1] = L3GD20H_GYRO_DEV_NAME,
|
||||
[2] = LSM330D_GYRO_DEV_NAME,
|
||||
[3] = LSM330DLC_GYRO_DEV_NAME,
|
||||
[4] = L3G4IS_GYRO_DEV_NAME,
|
||||
[5] = LSM330_GYRO_DEV_NAME,
|
||||
[1] = LSM330D_GYRO_DEV_NAME,
|
||||
[2] = LSM330DLC_GYRO_DEV_NAME,
|
||||
[3] = L3G4IS_GYRO_DEV_NAME,
|
||||
[4] = LSM330_GYRO_DEV_NAME,
|
||||
},
|
||||
.ch = (struct iio_chan_spec *)st_gyro_16bit_channels,
|
||||
.odr = {
|
||||
|
|
|
@ -55,7 +55,6 @@ static const struct i2c_device_id st_gyro_id_table[] = {
|
|||
{ LSM330DL_GYRO_DEV_NAME },
|
||||
{ LSM330DLC_GYRO_DEV_NAME },
|
||||
{ L3GD20_GYRO_DEV_NAME },
|
||||
{ L3GD20H_GYRO_DEV_NAME },
|
||||
{ L3G4IS_GYRO_DEV_NAME },
|
||||
{ LSM330_GYRO_DEV_NAME },
|
||||
{},
|
||||
|
|
|
@ -54,7 +54,6 @@ static const struct spi_device_id st_gyro_id_table[] = {
|
|||
{ LSM330DL_GYRO_DEV_NAME },
|
||||
{ LSM330DLC_GYRO_DEV_NAME },
|
||||
{ L3GD20_GYRO_DEV_NAME },
|
||||
{ L3GD20H_GYRO_DEV_NAME },
|
||||
{ L3G4IS_GYRO_DEV_NAME },
|
||||
{ LSM330_GYRO_DEV_NAME },
|
||||
{},
|
||||
|
|
|
@ -103,13 +103,13 @@ static int cm32181_reg_init(struct cm32181_chip *cm32181)
|
|||
/**
|
||||
* cm32181_read_als_it() - Get sensor integration time (ms)
|
||||
* @cm32181: pointer of struct cm32181
|
||||
* @val: pointer of int to load the als_it value.
|
||||
* @val2: pointer of int to load the als_it value.
|
||||
*
|
||||
* Report the current integartion time by millisecond.
|
||||
*
|
||||
* Return: IIO_VAL_INT for success, otherwise -EINVAL.
|
||||
* Return: IIO_VAL_INT_PLUS_MICRO for success, otherwise -EINVAL.
|
||||
*/
|
||||
static int cm32181_read_als_it(struct cm32181_chip *cm32181, int *val)
|
||||
static int cm32181_read_als_it(struct cm32181_chip *cm32181, int *val2)
|
||||
{
|
||||
u16 als_it;
|
||||
int i;
|
||||
|
@ -119,8 +119,8 @@ static int cm32181_read_als_it(struct cm32181_chip *cm32181, int *val)
|
|||
als_it >>= CM32181_CMD_ALS_IT_SHIFT;
|
||||
for (i = 0; i < ARRAY_SIZE(als_it_bits); i++) {
|
||||
if (als_it == als_it_bits[i]) {
|
||||
*val = als_it_value[i];
|
||||
return IIO_VAL_INT;
|
||||
*val2 = als_it_value[i];
|
||||
return IIO_VAL_INT_PLUS_MICRO;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -221,7 +221,7 @@ static int cm32181_read_raw(struct iio_dev *indio_dev,
|
|||
*val = cm32181->calibscale;
|
||||
return IIO_VAL_INT;
|
||||
case IIO_CHAN_INFO_INT_TIME:
|
||||
ret = cm32181_read_als_it(cm32181, val);
|
||||
ret = cm32181_read_als_it(cm32181, val2);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -240,7 +240,7 @@ static int cm32181_write_raw(struct iio_dev *indio_dev,
|
|||
cm32181->calibscale = val;
|
||||
return val;
|
||||
case IIO_CHAN_INFO_INT_TIME:
|
||||
ret = cm32181_write_als_it(cm32181, val);
|
||||
ret = cm32181_write_als_it(cm32181, val2);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -264,7 +264,7 @@ static ssize_t cm32181_get_it_available(struct device *dev,
|
|||
|
||||
n = ARRAY_SIZE(als_it_value);
|
||||
for (i = 0, len = 0; i < n; i++)
|
||||
len += sprintf(buf + len, "%d ", als_it_value[i]);
|
||||
len += sprintf(buf + len, "0.%06u ", als_it_value[i]);
|
||||
return len + sprintf(buf + len, "\n");
|
||||
}
|
||||
|
||||
|
|
|
@ -50,10 +50,10 @@
|
|||
#define CM36651_CS_CONF2_DEFAULT_BIT 0x08
|
||||
|
||||
/* CS_CONF3 channel integration time */
|
||||
#define CM36651_CS_IT1 0x00 /* Integration time 80000 usec */
|
||||
#define CM36651_CS_IT2 0x40 /* Integration time 160000 usec */
|
||||
#define CM36651_CS_IT3 0x80 /* Integration time 320000 usec */
|
||||
#define CM36651_CS_IT4 0xC0 /* Integration time 640000 usec */
|
||||
#define CM36651_CS_IT1 0x00 /* Integration time 80 msec */
|
||||
#define CM36651_CS_IT2 0x40 /* Integration time 160 msec */
|
||||
#define CM36651_CS_IT3 0x80 /* Integration time 320 msec */
|
||||
#define CM36651_CS_IT4 0xC0 /* Integration time 640 msec */
|
||||
|
||||
/* PS_CONF1 command code */
|
||||
#define CM36651_PS_ENABLE 0x00
|
||||
|
@ -64,10 +64,10 @@
|
|||
#define CM36651_PS_PERS4 0x0C
|
||||
|
||||
/* PS_CONF1 command code: integration time */
|
||||
#define CM36651_PS_IT1 0x00 /* Integration time 320 usec */
|
||||
#define CM36651_PS_IT2 0x10 /* Integration time 420 usec */
|
||||
#define CM36651_PS_IT3 0x20 /* Integration time 520 usec */
|
||||
#define CM36651_PS_IT4 0x30 /* Integration time 640 usec */
|
||||
#define CM36651_PS_IT1 0x00 /* Integration time 0.32 msec */
|
||||
#define CM36651_PS_IT2 0x10 /* Integration time 0.42 msec */
|
||||
#define CM36651_PS_IT3 0x20 /* Integration time 0.52 msec */
|
||||
#define CM36651_PS_IT4 0x30 /* Integration time 0.64 msec */
|
||||
|
||||
/* PS_CONF1 command code: duty ratio */
|
||||
#define CM36651_PS_DR1 0x00 /* Duty ratio 1/80 */
|
||||
|
@ -93,8 +93,8 @@
|
|||
#define CM36651_CLOSE_PROXIMITY 0x32
|
||||
#define CM36651_FAR_PROXIMITY 0x33
|
||||
|
||||
#define CM36651_CS_INT_TIME_AVAIL "80000 160000 320000 640000"
|
||||
#define CM36651_PS_INT_TIME_AVAIL "320 420 520 640"
|
||||
#define CM36651_CS_INT_TIME_AVAIL "0.08 0.16 0.32 0.64"
|
||||
#define CM36651_PS_INT_TIME_AVAIL "0.000320 0.000420 0.000520 0.000640"
|
||||
|
||||
enum cm36651_operation_mode {
|
||||
CM36651_LIGHT_EN,
|
||||
|
@ -356,30 +356,30 @@ static int cm36651_read_channel(struct cm36651_data *cm36651,
|
|||
}
|
||||
|
||||
static int cm36651_read_int_time(struct cm36651_data *cm36651,
|
||||
struct iio_chan_spec const *chan, int *val)
|
||||
struct iio_chan_spec const *chan, int *val2)
|
||||
{
|
||||
switch (chan->type) {
|
||||
case IIO_LIGHT:
|
||||
if (cm36651->cs_int_time[chan->address] == CM36651_CS_IT1)
|
||||
*val = 80000;
|
||||
*val2 = 80000;
|
||||
else if (cm36651->cs_int_time[chan->address] == CM36651_CS_IT2)
|
||||
*val = 160000;
|
||||
*val2 = 160000;
|
||||
else if (cm36651->cs_int_time[chan->address] == CM36651_CS_IT3)
|
||||
*val = 320000;
|
||||
*val2 = 320000;
|
||||
else if (cm36651->cs_int_time[chan->address] == CM36651_CS_IT4)
|
||||
*val = 640000;
|
||||
*val2 = 640000;
|
||||
else
|
||||
return -EINVAL;
|
||||
break;
|
||||
case IIO_PROXIMITY:
|
||||
if (cm36651->ps_int_time == CM36651_PS_IT1)
|
||||
*val = 320;
|
||||
*val2 = 320;
|
||||
else if (cm36651->ps_int_time == CM36651_PS_IT2)
|
||||
*val = 420;
|
||||
*val2 = 420;
|
||||
else if (cm36651->ps_int_time == CM36651_PS_IT3)
|
||||
*val = 520;
|
||||
*val2 = 520;
|
||||
else if (cm36651->ps_int_time == CM36651_PS_IT4)
|
||||
*val = 640;
|
||||
*val2 = 640;
|
||||
else
|
||||
return -EINVAL;
|
||||
break;
|
||||
|
@ -387,7 +387,7 @@ static int cm36651_read_int_time(struct cm36651_data *cm36651,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
return IIO_VAL_INT;
|
||||
return IIO_VAL_INT_PLUS_MICRO;
|
||||
}
|
||||
|
||||
static int cm36651_write_int_time(struct cm36651_data *cm36651,
|
||||
|
@ -459,7 +459,8 @@ static int cm36651_read_raw(struct iio_dev *indio_dev,
|
|||
ret = cm36651_read_channel(cm36651, chan, val);
|
||||
break;
|
||||
case IIO_CHAN_INFO_INT_TIME:
|
||||
ret = cm36651_read_int_time(cm36651, chan, val);
|
||||
*val = 0;
|
||||
ret = cm36651_read_int_time(cm36651, chan, val2);
|
||||
break;
|
||||
default:
|
||||
ret = -EINVAL;
|
||||
|
@ -479,7 +480,7 @@ static int cm36651_write_raw(struct iio_dev *indio_dev,
|
|||
int ret = -EINVAL;
|
||||
|
||||
if (mask == IIO_CHAN_INFO_INT_TIME) {
|
||||
ret = cm36651_write_int_time(cm36651, chan, val);
|
||||
ret = cm36651_write_int_time(cm36651, chan, val2);
|
||||
if (ret < 0)
|
||||
dev_err(&client->dev, "Integration time write failed\n");
|
||||
}
|
||||
|
|
|
@ -37,7 +37,6 @@ static void arizona_haptics_work(struct work_struct *work)
|
|||
struct arizona_haptics,
|
||||
work);
|
||||
struct arizona *arizona = haptics->arizona;
|
||||
struct mutex *dapm_mutex = &arizona->dapm->card->dapm_mutex;
|
||||
int ret;
|
||||
|
||||
if (!haptics->arizona->dapm) {
|
||||
|
@ -67,13 +66,10 @@ static void arizona_haptics_work(struct work_struct *work)
|
|||
return;
|
||||
}
|
||||
|
||||
mutex_lock_nested(dapm_mutex, SND_SOC_DAPM_CLASS_RUNTIME);
|
||||
|
||||
ret = snd_soc_dapm_enable_pin(arizona->dapm, "HAPTICS");
|
||||
if (ret != 0) {
|
||||
dev_err(arizona->dev, "Failed to start HAPTICS: %d\n",
|
||||
ret);
|
||||
mutex_unlock(dapm_mutex);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -81,21 +77,14 @@ static void arizona_haptics_work(struct work_struct *work)
|
|||
if (ret != 0) {
|
||||
dev_err(arizona->dev, "Failed to sync DAPM: %d\n",
|
||||
ret);
|
||||
mutex_unlock(dapm_mutex);
|
||||
return;
|
||||
}
|
||||
|
||||
mutex_unlock(dapm_mutex);
|
||||
|
||||
} else {
|
||||
/* This disable sequence will be a noop if already enabled */
|
||||
mutex_lock_nested(dapm_mutex, SND_SOC_DAPM_CLASS_RUNTIME);
|
||||
|
||||
ret = snd_soc_dapm_disable_pin(arizona->dapm, "HAPTICS");
|
||||
if (ret != 0) {
|
||||
dev_err(arizona->dev, "Failed to disable HAPTICS: %d\n",
|
||||
ret);
|
||||
mutex_unlock(dapm_mutex);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -103,12 +92,9 @@ static void arizona_haptics_work(struct work_struct *work)
|
|||
if (ret != 0) {
|
||||
dev_err(arizona->dev, "Failed to sync DAPM: %d\n",
|
||||
ret);
|
||||
mutex_unlock(dapm_mutex);
|
||||
return;
|
||||
}
|
||||
|
||||
mutex_unlock(dapm_mutex);
|
||||
|
||||
ret = regmap_update_bits(arizona->regmap,
|
||||
ARIZONA_HAPTICS_CONTROL_1,
|
||||
ARIZONA_HAP_CTRL_MASK,
|
||||
|
@ -155,16 +141,11 @@ static int arizona_haptics_play(struct input_dev *input, void *data,
|
|||
static void arizona_haptics_close(struct input_dev *input)
|
||||
{
|
||||
struct arizona_haptics *haptics = input_get_drvdata(input);
|
||||
struct mutex *dapm_mutex = &haptics->arizona->dapm->card->dapm_mutex;
|
||||
|
||||
cancel_work_sync(&haptics->work);
|
||||
|
||||
mutex_lock_nested(dapm_mutex, SND_SOC_DAPM_CLASS_RUNTIME);
|
||||
|
||||
if (haptics->arizona->dapm)
|
||||
snd_soc_dapm_disable_pin(haptics->arizona->dapm, "HAPTICS");
|
||||
|
||||
mutex_unlock(dapm_mutex);
|
||||
}
|
||||
|
||||
static int arizona_haptics_probe(struct platform_device *pdev)
|
||||
|
|
|
@ -354,8 +354,8 @@ DEBUG_FOPS(mem);
|
|||
return -ENOMEM; \
|
||||
}
|
||||
|
||||
#define DEBUG_ADD_FILE(name) __DEBUG_ADD_FILE(name, 600)
|
||||
#define DEBUG_ADD_FILE_RO(name) __DEBUG_ADD_FILE(name, 400)
|
||||
#define DEBUG_ADD_FILE(name) __DEBUG_ADD_FILE(name, 0600)
|
||||
#define DEBUG_ADD_FILE_RO(name) __DEBUG_ADD_FILE(name, 0400)
|
||||
|
||||
static int iommu_debug_register(struct device *dev, void *data)
|
||||
{
|
||||
|
|
|
@ -515,7 +515,7 @@ static int meta_intc_set_affinity(struct irq_data *data,
|
|||
* one cpu (the interrupt code doesn't support it), so we just
|
||||
* pick the first cpu we find in 'cpumask'.
|
||||
*/
|
||||
cpu = cpumask_any(cpumask);
|
||||
cpu = cpumask_any_and(cpumask, cpu_online_mask);
|
||||
thread = cpu_2_hwthread_id[cpu];
|
||||
|
||||
metag_out32(TBI_TRIG_VEC(TBID_SIGNUM_TR2(thread)), vec_addr);
|
||||
|
|
|
@ -201,7 +201,7 @@ static int metag_internal_irq_set_affinity(struct irq_data *data,
|
|||
* one cpu (the interrupt code doesn't support it), so we just
|
||||
* pick the first cpu we find in 'cpumask'.
|
||||
*/
|
||||
cpu = cpumask_any(cpumask);
|
||||
cpu = cpumask_any_and(cpumask, cpu_online_mask);
|
||||
thread = cpu_2_hwthread_id[cpu];
|
||||
|
||||
metag_out32(TBI_TRIG_VEC(TBID_SIGNUM_TR1(thread)),
|
||||
|
|
|
@ -289,6 +289,7 @@ struct per_bio_data {
|
|||
bool tick:1;
|
||||
unsigned req_nr:2;
|
||||
struct dm_deferred_entry *all_io_entry;
|
||||
struct dm_hook_info hook_info;
|
||||
|
||||
/*
|
||||
* writethrough fields. These MUST remain at the end of this
|
||||
|
@ -297,7 +298,6 @@ struct per_bio_data {
|
|||
*/
|
||||
struct cache *cache;
|
||||
dm_cblock_t cblock;
|
||||
struct dm_hook_info hook_info;
|
||||
struct dm_bio_details bio_details;
|
||||
};
|
||||
|
||||
|
@ -671,15 +671,16 @@ static void remap_to_cache(struct cache *cache, struct bio *bio,
|
|||
dm_cblock_t cblock)
|
||||
{
|
||||
sector_t bi_sector = bio->bi_iter.bi_sector;
|
||||
sector_t block = from_cblock(cblock);
|
||||
|
||||
bio->bi_bdev = cache->cache_dev->bdev;
|
||||
if (!block_size_is_power_of_two(cache))
|
||||
bio->bi_iter.bi_sector =
|
||||
(from_cblock(cblock) * cache->sectors_per_block) +
|
||||
(block * cache->sectors_per_block) +
|
||||
sector_div(bi_sector, cache->sectors_per_block);
|
||||
else
|
||||
bio->bi_iter.bi_sector =
|
||||
(from_cblock(cblock) << cache->sectors_per_block_shift) |
|
||||
(block << cache->sectors_per_block_shift) |
|
||||
(bi_sector & (cache->sectors_per_block - 1));
|
||||
}
|
||||
|
||||
|
@ -1010,13 +1011,15 @@ static void overwrite_endio(struct bio *bio, int err)
|
|||
struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
|
||||
unsigned long flags;
|
||||
|
||||
dm_unhook_bio(&pb->hook_info, bio);
|
||||
|
||||
if (err)
|
||||
mg->err = true;
|
||||
|
||||
mg->requeue_holder = false;
|
||||
|
||||
spin_lock_irqsave(&cache->lock, flags);
|
||||
list_add_tail(&mg->list, &cache->completed_migrations);
|
||||
dm_unhook_bio(&pb->hook_info, bio);
|
||||
mg->requeue_holder = false;
|
||||
spin_unlock_irqrestore(&cache->lock, flags);
|
||||
|
||||
wake_worker(cache);
|
||||
|
|
|
@ -201,29 +201,28 @@ static void list_dp_init(struct dpages *dp, struct page_list *pl, unsigned offse
|
|||
/*
|
||||
* Functions for getting the pages from a bvec.
|
||||
*/
|
||||
static void bio_get_page(struct dpages *dp,
|
||||
struct page **p, unsigned long *len, unsigned *offset)
|
||||
static void bio_get_page(struct dpages *dp, struct page **p,
|
||||
unsigned long *len, unsigned *offset)
|
||||
{
|
||||
struct bio *bio = dp->context_ptr;
|
||||
struct bio_vec bvec = bio_iovec(bio);
|
||||
*p = bvec.bv_page;
|
||||
*len = bvec.bv_len;
|
||||
*offset = bvec.bv_offset;
|
||||
struct bio_vec *bvec = dp->context_ptr;
|
||||
*p = bvec->bv_page;
|
||||
*len = bvec->bv_len - dp->context_u;
|
||||
*offset = bvec->bv_offset + dp->context_u;
|
||||
}
|
||||
|
||||
static void bio_next_page(struct dpages *dp)
|
||||
{
|
||||
struct bio *bio = dp->context_ptr;
|
||||
struct bio_vec bvec = bio_iovec(bio);
|
||||
|
||||
bio_advance(bio, bvec.bv_len);
|
||||
struct bio_vec *bvec = dp->context_ptr;
|
||||
dp->context_ptr = bvec + 1;
|
||||
dp->context_u = 0;
|
||||
}
|
||||
|
||||
static void bio_dp_init(struct dpages *dp, struct bio *bio)
|
||||
{
|
||||
dp->get_page = bio_get_page;
|
||||
dp->next_page = bio_next_page;
|
||||
dp->context_ptr = bio;
|
||||
dp->context_ptr = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
|
||||
dp->context_u = bio->bi_iter.bi_bvec_done;
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -1626,8 +1626,11 @@ static int multipath_ioctl(struct dm_target *ti, unsigned int cmd,
|
|||
/*
|
||||
* Only pass ioctls through if the device sizes match exactly.
|
||||
*/
|
||||
if (!r && ti->len != i_size_read(bdev->bd_inode) >> SECTOR_SHIFT)
|
||||
r = scsi_verify_blk_ioctl(NULL, cmd);
|
||||
if (!bdev || ti->len != i_size_read(bdev->bd_inode) >> SECTOR_SHIFT) {
|
||||
int err = scsi_verify_blk_ioctl(NULL, cmd);
|
||||
if (err)
|
||||
r = err;
|
||||
}
|
||||
|
||||
if (r == -ENOTCONN && !fatal_signal_pending(current))
|
||||
queue_work(kmultipathd, &m->process_queued_ios);
|
||||
|
|
|
@ -1244,6 +1244,9 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio, int error)
|
|||
|
||||
dm_bio_restore(bd, bio);
|
||||
bio_record->details.bi_bdev = NULL;
|
||||
|
||||
atomic_inc(&bio->bi_remaining);
|
||||
|
||||
queue_bio(ms, bio, rw);
|
||||
return DM_ENDIO_INCOMPLETE;
|
||||
}
|
||||
|
|
|
@ -483,7 +483,7 @@ static int __write_initial_superblock(struct dm_pool_metadata *pmd)
|
|||
|
||||
disk_super->data_mapping_root = cpu_to_le64(pmd->root);
|
||||
disk_super->device_details_root = cpu_to_le64(pmd->details_root);
|
||||
disk_super->metadata_block_size = cpu_to_le32(THIN_METADATA_BLOCK_SIZE >> SECTOR_SHIFT);
|
||||
disk_super->metadata_block_size = cpu_to_le32(THIN_METADATA_BLOCK_SIZE);
|
||||
disk_super->metadata_nr_blocks = cpu_to_le64(bdev_size >> SECTOR_TO_BLOCK_SHIFT);
|
||||
disk_super->data_block_size = cpu_to_le32(pmd->data_block_size);
|
||||
|
||||
|
@ -651,7 +651,7 @@ static int __create_persistent_data_objects(struct dm_pool_metadata *pmd, bool f
|
|||
{
|
||||
int r;
|
||||
|
||||
pmd->bm = dm_block_manager_create(pmd->bdev, THIN_METADATA_BLOCK_SIZE,
|
||||
pmd->bm = dm_block_manager_create(pmd->bdev, THIN_METADATA_BLOCK_SIZE << SECTOR_SHIFT,
|
||||
THIN_METADATA_CACHE_SIZE,
|
||||
THIN_MAX_CONCURRENT_LOCKS);
|
||||
if (IS_ERR(pmd->bm)) {
|
||||
|
@ -1489,6 +1489,23 @@ bool dm_thin_changed_this_transaction(struct dm_thin_device *td)
|
|||
return r;
|
||||
}
|
||||
|
||||
bool dm_pool_changed_this_transaction(struct dm_pool_metadata *pmd)
|
||||
{
|
||||
bool r = false;
|
||||
struct dm_thin_device *td, *tmp;
|
||||
|
||||
down_read(&pmd->root_lock);
|
||||
list_for_each_entry_safe(td, tmp, &pmd->thin_devices, list) {
|
||||
if (td->changed) {
|
||||
r = td->changed;
|
||||
break;
|
||||
}
|
||||
}
|
||||
up_read(&pmd->root_lock);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
bool dm_thin_aborted_changes(struct dm_thin_device *td)
|
||||
{
|
||||
bool r;
|
||||
|
|
|
@ -9,16 +9,14 @@
|
|||
|
||||
#include "persistent-data/dm-block-manager.h"
|
||||
#include "persistent-data/dm-space-map.h"
|
||||
#include "persistent-data/dm-space-map-metadata.h"
|
||||
|
||||
#define THIN_METADATA_BLOCK_SIZE 4096
|
||||
#define THIN_METADATA_BLOCK_SIZE DM_SM_METADATA_BLOCK_SIZE
|
||||
|
||||
/*
|
||||
* The metadata device is currently limited in size.
|
||||
*
|
||||
* We have one block of index, which can hold 255 index entries. Each
|
||||
* index entry contains allocation info about 16k metadata blocks.
|
||||
*/
|
||||
#define THIN_METADATA_MAX_SECTORS (255 * (1 << 14) * (THIN_METADATA_BLOCK_SIZE / (1 << SECTOR_SHIFT)))
|
||||
#define THIN_METADATA_MAX_SECTORS DM_SM_METADATA_MAX_SECTORS
|
||||
|
||||
/*
|
||||
* A metadata device larger than 16GB triggers a warning.
|
||||
|
@ -161,6 +159,8 @@ int dm_thin_remove_block(struct dm_thin_device *td, dm_block_t block);
|
|||
*/
|
||||
bool dm_thin_changed_this_transaction(struct dm_thin_device *td);
|
||||
|
||||
bool dm_pool_changed_this_transaction(struct dm_pool_metadata *pmd);
|
||||
|
||||
bool dm_thin_aborted_changes(struct dm_thin_device *td);
|
||||
|
||||
int dm_thin_get_highest_mapped_block(struct dm_thin_device *td,
|
||||
|
|
|
@ -1357,7 +1357,8 @@ static void process_deferred_bios(struct pool *pool)
|
|||
bio_list_init(&pool->deferred_flush_bios);
|
||||
spin_unlock_irqrestore(&pool->lock, flags);
|
||||
|
||||
if (bio_list_empty(&bios) && !need_commit_due_to_time(pool))
|
||||
if (bio_list_empty(&bios) &&
|
||||
!(dm_pool_changed_this_transaction(pool->pmd) && need_commit_due_to_time(pool)))
|
||||
return;
|
||||
|
||||
if (commit(pool)) {
|
||||
|
@ -1999,16 +2000,27 @@ static void metadata_low_callback(void *context)
|
|||
dm_table_event(pool->ti->table);
|
||||
}
|
||||
|
||||
static sector_t get_metadata_dev_size(struct block_device *bdev)
|
||||
static sector_t get_dev_size(struct block_device *bdev)
|
||||
{
|
||||
sector_t metadata_dev_size = i_size_read(bdev->bd_inode) >> SECTOR_SHIFT;
|
||||
return i_size_read(bdev->bd_inode) >> SECTOR_SHIFT;
|
||||
}
|
||||
|
||||
static void warn_if_metadata_device_too_big(struct block_device *bdev)
|
||||
{
|
||||
sector_t metadata_dev_size = get_dev_size(bdev);
|
||||
char buffer[BDEVNAME_SIZE];
|
||||
|
||||
if (metadata_dev_size > THIN_METADATA_MAX_SECTORS_WARNING) {
|
||||
if (metadata_dev_size > THIN_METADATA_MAX_SECTORS_WARNING)
|
||||
DMWARN("Metadata device %s is larger than %u sectors: excess space will not be used.",
|
||||
bdevname(bdev, buffer), THIN_METADATA_MAX_SECTORS);
|
||||
metadata_dev_size = THIN_METADATA_MAX_SECTORS_WARNING;
|
||||
}
|
||||
}
|
||||
|
||||
static sector_t get_metadata_dev_size(struct block_device *bdev)
|
||||
{
|
||||
sector_t metadata_dev_size = get_dev_size(bdev);
|
||||
|
||||
if (metadata_dev_size > THIN_METADATA_MAX_SECTORS)
|
||||
metadata_dev_size = THIN_METADATA_MAX_SECTORS;
|
||||
|
||||
return metadata_dev_size;
|
||||
}
|
||||
|
@ -2017,7 +2029,7 @@ static dm_block_t get_metadata_dev_size_in_blocks(struct block_device *bdev)
|
|||
{
|
||||
sector_t metadata_dev_size = get_metadata_dev_size(bdev);
|
||||
|
||||
sector_div(metadata_dev_size, THIN_METADATA_BLOCK_SIZE >> SECTOR_SHIFT);
|
||||
sector_div(metadata_dev_size, THIN_METADATA_BLOCK_SIZE);
|
||||
|
||||
return metadata_dev_size;
|
||||
}
|
||||
|
@ -2095,12 +2107,7 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
|
|||
ti->error = "Error opening metadata block device";
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
/*
|
||||
* Run for the side-effect of possibly issuing a warning if the
|
||||
* device is too big.
|
||||
*/
|
||||
(void) get_metadata_dev_size(metadata_dev->bdev);
|
||||
warn_if_metadata_device_too_big(metadata_dev->bdev);
|
||||
|
||||
r = dm_get_device(ti, argv[1], FMODE_READ | FMODE_WRITE, &data_dev);
|
||||
if (r) {
|
||||
|
@ -2287,6 +2294,7 @@ static int maybe_resize_metadata_dev(struct dm_target *ti, bool *need_commit)
|
|||
return -EINVAL;
|
||||
|
||||
} else if (metadata_dev_size > sb_metadata_dev_size) {
|
||||
warn_if_metadata_device_too_big(pool->md_dev);
|
||||
DMINFO("%s: growing the metadata device from %llu to %llu blocks",
|
||||
dm_device_name(pool->pool_md),
|
||||
sb_metadata_dev_size, metadata_dev_size);
|
||||
|
@ -2894,6 +2902,7 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
|
|||
|
||||
if (get_pool_mode(tc->pool) == PM_FAIL) {
|
||||
ti->error = "Couldn't open thin device, Pool is in fail mode";
|
||||
r = -EINVAL;
|
||||
goto bad_thin_open;
|
||||
}
|
||||
|
||||
|
@ -2905,7 +2914,7 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
|
|||
|
||||
r = dm_set_target_max_io_len(ti, tc->pool->sectors_per_block);
|
||||
if (r)
|
||||
goto bad_thin_open;
|
||||
goto bad_target_max_io_len;
|
||||
|
||||
ti->num_flush_bios = 1;
|
||||
ti->flush_supported = true;
|
||||
|
@ -2926,6 +2935,8 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
|
|||
|
||||
return 0;
|
||||
|
||||
bad_target_max_io_len:
|
||||
dm_pool_close_thin_device(tc->td);
|
||||
bad_thin_open:
|
||||
__pool_dec(tc->pool);
|
||||
bad_pool_lookup:
|
||||
|
|
|
@ -680,6 +680,8 @@ int dm_sm_metadata_create(struct dm_space_map *sm,
|
|||
if (r)
|
||||
return r;
|
||||
|
||||
if (nr_blocks > DM_SM_METADATA_MAX_BLOCKS)
|
||||
nr_blocks = DM_SM_METADATA_MAX_BLOCKS;
|
||||
r = sm_ll_extend(&smm->ll, nr_blocks);
|
||||
if (r)
|
||||
return r;
|
||||
|
|
|
@ -9,6 +9,17 @@
|
|||
|
||||
#include "dm-transaction-manager.h"
|
||||
|
||||
#define DM_SM_METADATA_BLOCK_SIZE (4096 >> SECTOR_SHIFT)
|
||||
|
||||
/*
|
||||
* The metadata device is currently limited in size.
|
||||
*
|
||||
* We have one block of index, which can hold 255 index entries. Each
|
||||
* index entry contains allocation info about ~16k metadata blocks.
|
||||
*/
|
||||
#define DM_SM_METADATA_MAX_BLOCKS (255 * ((1 << 14) - 64))
|
||||
#define DM_SM_METADATA_MAX_SECTORS (DM_SM_METADATA_MAX_BLOCKS * DM_SM_METADATA_BLOCK_SIZE)
|
||||
|
||||
/*
|
||||
* Unfortunately we have to use two-phase construction due to the cycle
|
||||
* between the tm and sm.
|
||||
|
|
|
@ -1584,7 +1584,7 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
|
|||
}
|
||||
|
||||
if (mtd->ecc_stats.failed - ecc_failures) {
|
||||
if (retry_mode + 1 <= chip->read_retries) {
|
||||
if (retry_mode + 1 < chip->read_retries) {
|
||||
retry_mode++;
|
||||
ret = nand_setup_read_retry(mtd,
|
||||
retry_mode);
|
||||
|
|
|
@ -1633,6 +1633,7 @@ static int omap_nand_probe(struct platform_device *pdev)
|
|||
int i;
|
||||
dma_cap_mask_t mask;
|
||||
unsigned sig;
|
||||
unsigned oob_index;
|
||||
struct resource *res;
|
||||
struct mtd_part_parser_data ppdata = {};
|
||||
|
||||
|
@ -1826,11 +1827,14 @@ static int omap_nand_probe(struct platform_device *pdev)
|
|||
(mtd->writesize /
|
||||
nand_chip->ecc.size);
|
||||
if (nand_chip->options & NAND_BUSWIDTH_16)
|
||||
ecclayout->eccpos[0] = BADBLOCK_MARKER_LENGTH;
|
||||
oob_index = BADBLOCK_MARKER_LENGTH;
|
||||
else
|
||||
ecclayout->eccpos[0] = 1;
|
||||
ecclayout->oobfree->offset = ecclayout->eccpos[0] +
|
||||
ecclayout->eccbytes;
|
||||
oob_index = 1;
|
||||
for (i = 0; i < ecclayout->eccbytes; i++, oob_index++)
|
||||
ecclayout->eccpos[i] = oob_index;
|
||||
/* no reserved-marker in ecclayout for this ecc-scheme */
|
||||
ecclayout->oobfree->offset =
|
||||
ecclayout->eccpos[ecclayout->eccbytes - 1] + 1;
|
||||
break;
|
||||
|
||||
case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
|
||||
|
@ -1847,9 +1851,15 @@ static int omap_nand_probe(struct platform_device *pdev)
|
|||
ecclayout->eccbytes = nand_chip->ecc.bytes *
|
||||
(mtd->writesize /
|
||||
nand_chip->ecc.size);
|
||||
ecclayout->eccpos[0] = BADBLOCK_MARKER_LENGTH;
|
||||
ecclayout->oobfree->offset = ecclayout->eccpos[0] +
|
||||
ecclayout->eccbytes;
|
||||
oob_index = BADBLOCK_MARKER_LENGTH;
|
||||
for (i = 0; i < ecclayout->eccbytes; i++, oob_index++) {
|
||||
ecclayout->eccpos[i] = oob_index;
|
||||
if (((i + 1) % nand_chip->ecc.bytes) == 0)
|
||||
oob_index++;
|
||||
}
|
||||
/* include reserved-marker in ecclayout->oobfree calculation */
|
||||
ecclayout->oobfree->offset = 1 +
|
||||
ecclayout->eccpos[ecclayout->eccbytes - 1] + 1;
|
||||
/* software bch library is used for locating errors */
|
||||
nand_chip->ecc.priv = nand_bch_init(mtd,
|
||||
nand_chip->ecc.size,
|
||||
|
@ -1883,9 +1893,12 @@ static int omap_nand_probe(struct platform_device *pdev)
|
|||
ecclayout->eccbytes = nand_chip->ecc.bytes *
|
||||
(mtd->writesize /
|
||||
nand_chip->ecc.size);
|
||||
ecclayout->eccpos[0] = BADBLOCK_MARKER_LENGTH;
|
||||
ecclayout->oobfree->offset = ecclayout->eccpos[0] +
|
||||
ecclayout->eccbytes;
|
||||
oob_index = BADBLOCK_MARKER_LENGTH;
|
||||
for (i = 0; i < ecclayout->eccbytes; i++, oob_index++)
|
||||
ecclayout->eccpos[i] = oob_index;
|
||||
/* reserved marker already included in ecclayout->eccbytes */
|
||||
ecclayout->oobfree->offset =
|
||||
ecclayout->eccpos[ecclayout->eccbytes - 1] + 1;
|
||||
/* This ECC scheme requires ELM H/W block */
|
||||
if (is_elm_present(info, pdata->elm_of_node, BCH4_ECC) < 0) {
|
||||
pr_err("nand: error: could not initialize ELM\n");
|
||||
|
@ -1913,9 +1926,15 @@ static int omap_nand_probe(struct platform_device *pdev)
|
|||
ecclayout->eccbytes = nand_chip->ecc.bytes *
|
||||
(mtd->writesize /
|
||||
nand_chip->ecc.size);
|
||||
ecclayout->eccpos[0] = BADBLOCK_MARKER_LENGTH;
|
||||
ecclayout->oobfree->offset = ecclayout->eccpos[0] +
|
||||
ecclayout->eccbytes;
|
||||
oob_index = BADBLOCK_MARKER_LENGTH;
|
||||
for (i = 0; i < ecclayout->eccbytes; i++, oob_index++) {
|
||||
ecclayout->eccpos[i] = oob_index;
|
||||
if (((i + 1) % nand_chip->ecc.bytes) == 0)
|
||||
oob_index++;
|
||||
}
|
||||
/* include reserved-marker in ecclayout->oobfree calculation */
|
||||
ecclayout->oobfree->offset = 1 +
|
||||
ecclayout->eccpos[ecclayout->eccbytes - 1] + 1;
|
||||
/* software bch library is used for locating errors */
|
||||
nand_chip->ecc.priv = nand_bch_init(mtd,
|
||||
nand_chip->ecc.size,
|
||||
|
@ -1956,9 +1975,12 @@ static int omap_nand_probe(struct platform_device *pdev)
|
|||
ecclayout->eccbytes = nand_chip->ecc.bytes *
|
||||
(mtd->writesize /
|
||||
nand_chip->ecc.size);
|
||||
ecclayout->eccpos[0] = BADBLOCK_MARKER_LENGTH;
|
||||
ecclayout->oobfree->offset = ecclayout->eccpos[0] +
|
||||
ecclayout->eccbytes;
|
||||
oob_index = BADBLOCK_MARKER_LENGTH;
|
||||
for (i = 0; i < ecclayout->eccbytes; i++, oob_index++)
|
||||
ecclayout->eccpos[i] = oob_index;
|
||||
/* reserved marker already included in ecclayout->eccbytes */
|
||||
ecclayout->oobfree->offset =
|
||||
ecclayout->eccpos[ecclayout->eccbytes - 1] + 1;
|
||||
break;
|
||||
#else
|
||||
pr_err("nand: error: CONFIG_MTD_NAND_OMAP_BCH not enabled\n");
|
||||
|
@ -1972,11 +1994,8 @@ static int omap_nand_probe(struct platform_device *pdev)
|
|||
goto return_error;
|
||||
}
|
||||
|
||||
/* populate remaining ECC layout data */
|
||||
ecclayout->oobfree->length = mtd->oobsize - (BADBLOCK_MARKER_LENGTH +
|
||||
ecclayout->eccbytes);
|
||||
for (i = 1; i < ecclayout->eccbytes; i++)
|
||||
ecclayout->eccpos[i] = ecclayout->eccpos[0] + i;
|
||||
/* all OOB bytes from oobfree->offset till end off OOB are free */
|
||||
ecclayout->oobfree->length = mtd->oobsize - ecclayout->oobfree->offset;
|
||||
/* check if NAND device's OOB is enough to store ECC signatures */
|
||||
if (mtd->oobsize < (ecclayout->eccbytes + BADBLOCK_MARKER_LENGTH)) {
|
||||
pr_err("not enough OOB bytes required = %d, available=%d\n",
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue