mirror of https://gitee.com/openkylin/linux.git
ARC updates for 5.13-rc2
- PAE fixes - syscall num check off-by-one bug - miscll fixes -----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEEOXpuCuR6hedrdLCJadfx3eKKwl4FAmCfOAIACgkQadfx3eKK wl6mVg//ZeVQXng1Km/J3icPVX/Hl8AXLVEcvTNmlkaHbceRAt5G5iguTcoEpbtY 8rprCcH+4dmhoeS3zqWX5mORiKKQxsIzmEIq/7xWlS2do8KuknwZC3yrkC8JZSGM Mh2ezNPV6gNXG61zD6/sRpM+AmM/AZFUZWiwVBxHcJhH3rnlguascQX5tB+ClDjH /a1uaczgWF0urpDa8j+njJHNKMy1NQ6hJfYooDx9ShIHfOjCHrf9C1JfKugpNaqF fI/GUThpxKXMZnWw2WggENoZ8CcdhV1oPJR1PoIrxFT+nVo/LS1LXjgJjVLQ/SyT O5eaZuXoJf0lJK+TS0sqdpOSb4zzfe+j+hsv0SVFqGj3cS7n0ld5/V2k3q1dSbdQ WQdYEgacMWkUSNikDnCj4FeYF8B9F4jd6HkZYkalPZJDl2kUAAxflqavWzG+ZBoi j8wHYqqMp9VN/51I/jMjUlakN5oeCEGk40MDm28Q5hWEEI/kPG8Wt8ocRS+u39w9 W1cXAZ0r7Ike2vZuHufqO1yEcMa3LJu0kJm20V3r/6huUBRE46+n3kiSpz0ySrE1 Tir73s+vSEXWsg+TsW9GfkISl32cSNu9QUUAK6Mvj/p8edb5BS4KL66tdVyaIejJ fFVElQrII19MTtUyjHxMKfkFgppilROL4enqUmFWay6IhO3OgyY= =b7JE -----END PGP SIGNATURE----- Merge tag 'arc-5.13-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc Pull ARC fixes from Vineet Gupta: - PAE fixes - syscall num check off-by-one bug - misc fixes * tag 'arc-5.13-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc: ARC: mm: Use max_high_pfn as a HIGHMEM zone border ARC: mm: PAE: use 40-bit physical page mask ARC: entry: fix off-by-one error in syscall number validation ARC: kgdb: add 'fallthrough' to prevent a warning arc: Fix typos/spellos
This commit is contained in:
commit
f36edc5533
|
@ -31,7 +31,7 @@ endif
|
|||
|
||||
|
||||
ifdef CONFIG_ARC_CURR_IN_REG
|
||||
# For a global register defintion, make sure it gets passed to every file
|
||||
# For a global register definition, make sure it gets passed to every file
|
||||
# We had a customer reported bug where some code built in kernel was NOT using
|
||||
# any kernel headers, and missing the r25 global register
|
||||
# Can't do unconditionally because of recursive include issues
|
||||
|
|
|
@ -116,7 +116,7 @@ static inline unsigned long __xchg(unsigned long val, volatile void *ptr,
|
|||
*
|
||||
* Technically the lock is also needed for UP (boils down to irq save/restore)
|
||||
* but we can cheat a bit since cmpxchg() atomic_ops_lock() would cause irqs to
|
||||
* be disabled thus can't possibly be interrpted/preempted/clobbered by xchg()
|
||||
* be disabled thus can't possibly be interrupted/preempted/clobbered by xchg()
|
||||
* Other way around, xchg is one instruction anyways, so can't be interrupted
|
||||
* as such
|
||||
*/
|
||||
|
@ -143,7 +143,7 @@ static inline unsigned long __xchg(unsigned long val, volatile void *ptr,
|
|||
/*
|
||||
* "atomic" variant of xchg()
|
||||
* REQ: It needs to follow the same serialization rules as other atomic_xxx()
|
||||
* Since xchg() doesn't always do that, it would seem that following defintion
|
||||
* Since xchg() doesn't always do that, it would seem that following definition
|
||||
* is incorrect. But here's the rationale:
|
||||
* SMP : Even xchg() takes the atomic_ops_lock, so OK.
|
||||
* LLSC: atomic_ops_lock are not relevant at all (even if SMP, since LLSC
|
||||
|
|
|
@ -7,6 +7,18 @@
|
|||
|
||||
#include <uapi/asm/page.h>
|
||||
|
||||
#ifdef CONFIG_ARC_HAS_PAE40
|
||||
|
||||
#define MAX_POSSIBLE_PHYSMEM_BITS 40
|
||||
#define PAGE_MASK_PHYS (0xff00000000ull | PAGE_MASK)
|
||||
|
||||
#else /* CONFIG_ARC_HAS_PAE40 */
|
||||
|
||||
#define MAX_POSSIBLE_PHYSMEM_BITS 32
|
||||
#define PAGE_MASK_PHYS PAGE_MASK
|
||||
|
||||
#endif /* CONFIG_ARC_HAS_PAE40 */
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#define clear_page(paddr) memset((paddr), 0, PAGE_SIZE)
|
||||
|
|
|
@ -107,8 +107,8 @@
|
|||
#define ___DEF (_PAGE_PRESENT | _PAGE_CACHEABLE)
|
||||
|
||||
/* Set of bits not changed in pte_modify */
|
||||
#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_SPECIAL)
|
||||
|
||||
#define _PAGE_CHG_MASK (PAGE_MASK_PHYS | _PAGE_ACCESSED | _PAGE_DIRTY | \
|
||||
_PAGE_SPECIAL)
|
||||
/* More Abbrevaited helpers */
|
||||
#define PAGE_U_NONE __pgprot(___DEF)
|
||||
#define PAGE_U_R __pgprot(___DEF | _PAGE_READ)
|
||||
|
@ -132,13 +132,7 @@
|
|||
#define PTE_BITS_IN_PD0 (_PAGE_GLOBAL | _PAGE_PRESENT | _PAGE_HW_SZ)
|
||||
#define PTE_BITS_RWX (_PAGE_EXECUTE | _PAGE_WRITE | _PAGE_READ)
|
||||
|
||||
#ifdef CONFIG_ARC_HAS_PAE40
|
||||
#define PTE_BITS_NON_RWX_IN_PD1 (0xff00000000 | PAGE_MASK | _PAGE_CACHEABLE)
|
||||
#define MAX_POSSIBLE_PHYSMEM_BITS 40
|
||||
#else
|
||||
#define PTE_BITS_NON_RWX_IN_PD1 (PAGE_MASK | _PAGE_CACHEABLE)
|
||||
#define MAX_POSSIBLE_PHYSMEM_BITS 32
|
||||
#endif
|
||||
#define PTE_BITS_NON_RWX_IN_PD1 (PAGE_MASK_PHYS | _PAGE_CACHEABLE)
|
||||
|
||||
/**************************************************************************
|
||||
* Mapping of vm_flags (Generic VM) to PTE flags (arch specific)
|
||||
|
|
|
@ -33,5 +33,4 @@
|
|||
|
||||
#define PAGE_MASK (~(PAGE_SIZE-1))
|
||||
|
||||
|
||||
#endif /* _UAPI__ASM_ARC_PAGE_H */
|
||||
|
|
|
@ -177,7 +177,7 @@ tracesys:
|
|||
|
||||
; Do the Sys Call as we normally would.
|
||||
; Validate the Sys Call number
|
||||
cmp r8, NR_syscalls
|
||||
cmp r8, NR_syscalls - 1
|
||||
mov.hi r0, -ENOSYS
|
||||
bhi tracesys_exit
|
||||
|
||||
|
@ -255,7 +255,7 @@ ENTRY(EV_Trap)
|
|||
;============ Normal syscall case
|
||||
|
||||
; syscall num shd not exceed the total system calls avail
|
||||
cmp r8, NR_syscalls
|
||||
cmp r8, NR_syscalls - 1
|
||||
mov.hi r0, -ENOSYS
|
||||
bhi .Lret_from_system_call
|
||||
|
||||
|
|
|
@ -140,6 +140,7 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
|
|||
ptr = &remcomInBuffer[1];
|
||||
if (kgdb_hex2long(&ptr, &addr))
|
||||
regs->ret = addr;
|
||||
fallthrough;
|
||||
|
||||
case 'D':
|
||||
case 'k':
|
||||
|
|
|
@ -50,14 +50,14 @@ SYSCALL_DEFINE3(arc_usr_cmpxchg, int *, uaddr, int, expected, int, new)
|
|||
int ret;
|
||||
|
||||
/*
|
||||
* This is only for old cores lacking LLOCK/SCOND, which by defintion
|
||||
* This is only for old cores lacking LLOCK/SCOND, which by definition
|
||||
* can't possibly be SMP. Thus doesn't need to be SMP safe.
|
||||
* And this also helps reduce the overhead for serializing in
|
||||
* the UP case
|
||||
*/
|
||||
WARN_ON_ONCE(IS_ENABLED(CONFIG_SMP));
|
||||
|
||||
/* Z indicates to userspace if operation succeded */
|
||||
/* Z indicates to userspace if operation succeeded */
|
||||
regs->status32 &= ~STATUS_Z_MASK;
|
||||
|
||||
ret = access_ok(uaddr, sizeof(*uaddr));
|
||||
|
@ -107,7 +107,7 @@ SYSCALL_DEFINE3(arc_usr_cmpxchg, int *, uaddr, int, expected, int, new)
|
|||
|
||||
void arch_cpu_idle(void)
|
||||
{
|
||||
/* Re-enable interrupts <= default irq priority before commiting SLEEP */
|
||||
/* Re-enable interrupts <= default irq priority before committing SLEEP */
|
||||
const unsigned int arg = 0x10 | ARCV2_IRQ_DEF_PRIO;
|
||||
|
||||
__asm__ __volatile__(
|
||||
|
@ -120,7 +120,7 @@ void arch_cpu_idle(void)
|
|||
|
||||
void arch_cpu_idle(void)
|
||||
{
|
||||
/* sleep, but enable both set E1/E2 (levels of interrutps) before committing */
|
||||
/* sleep, but enable both set E1/E2 (levels of interrupts) before committing */
|
||||
__asm__ __volatile__("sleep 0x3 \n");
|
||||
}
|
||||
|
||||
|
|
|
@ -259,7 +259,7 @@ setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs)
|
|||
regs->r2 = (unsigned long)&sf->uc;
|
||||
|
||||
/*
|
||||
* small optim to avoid unconditonally calling do_sigaltstack
|
||||
* small optim to avoid unconditionally calling do_sigaltstack
|
||||
* in sigreturn path, now that we only have rt_sigreturn
|
||||
*/
|
||||
magic = MAGIC_SIGALTSTK;
|
||||
|
@ -391,7 +391,7 @@ void do_signal(struct pt_regs *regs)
|
|||
void do_notify_resume(struct pt_regs *regs)
|
||||
{
|
||||
/*
|
||||
* ASM glue gaurantees that this is only called when returning to
|
||||
* ASM glue guarantees that this is only called when returning to
|
||||
* user mode
|
||||
*/
|
||||
if (test_thread_flag(TIF_NOTIFY_RESUME))
|
||||
|
|
|
@ -157,7 +157,16 @@ void __init setup_arch_memory(void)
|
|||
min_high_pfn = PFN_DOWN(high_mem_start);
|
||||
max_high_pfn = PFN_DOWN(high_mem_start + high_mem_sz);
|
||||
|
||||
max_zone_pfn[ZONE_HIGHMEM] = min_low_pfn;
|
||||
/*
|
||||
* max_high_pfn should be ok here for both HIGHMEM and HIGHMEM+PAE.
|
||||
* For HIGHMEM without PAE max_high_pfn should be less than
|
||||
* min_low_pfn to guarantee that these two regions don't overlap.
|
||||
* For PAE case highmem is greater than lowmem, so it is natural
|
||||
* to use max_high_pfn.
|
||||
*
|
||||
* In both cases, holes should be handled by pfn_valid().
|
||||
*/
|
||||
max_zone_pfn[ZONE_HIGHMEM] = max_high_pfn;
|
||||
|
||||
high_memory = (void *)(min_high_pfn << PAGE_SHIFT);
|
||||
|
||||
|
|
|
@ -53,9 +53,10 @@ EXPORT_SYMBOL(ioremap);
|
|||
void __iomem *ioremap_prot(phys_addr_t paddr, unsigned long size,
|
||||
unsigned long flags)
|
||||
{
|
||||
unsigned int off;
|
||||
unsigned long vaddr;
|
||||
struct vm_struct *area;
|
||||
phys_addr_t off, end;
|
||||
phys_addr_t end;
|
||||
pgprot_t prot = __pgprot(flags);
|
||||
|
||||
/* Don't allow wraparound, zero size */
|
||||
|
@ -72,7 +73,7 @@ void __iomem *ioremap_prot(phys_addr_t paddr, unsigned long size,
|
|||
|
||||
/* Mappings have to be page-aligned */
|
||||
off = paddr & ~PAGE_MASK;
|
||||
paddr &= PAGE_MASK;
|
||||
paddr &= PAGE_MASK_PHYS;
|
||||
size = PAGE_ALIGN(end + 1) - paddr;
|
||||
|
||||
/*
|
||||
|
|
|
@ -576,7 +576,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr_unaligned,
|
|||
pte_t *ptep)
|
||||
{
|
||||
unsigned long vaddr = vaddr_unaligned & PAGE_MASK;
|
||||
phys_addr_t paddr = pte_val(*ptep) & PAGE_MASK;
|
||||
phys_addr_t paddr = pte_val(*ptep) & PAGE_MASK_PHYS;
|
||||
struct page *page = pfn_to_page(pte_pfn(*ptep));
|
||||
|
||||
create_tlb(vma, vaddr, ptep);
|
||||
|
|
Loading…
Reference in New Issue