mirror of https://gitee.com/openkylin/linux.git
Merge branch 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: x86, memory hotplug: remove wrong -1 in calling init_memory_mapping() x86: keep the /proc/meminfo page count correct x86/uv: memory allocation at initialization xen: fix Xen domU boot with batched mprotect
This commit is contained in:
commit
f8245e91a5
|
@ -15,7 +15,6 @@
|
||||||
#include <linux/ctype.h>
|
#include <linux/ctype.h>
|
||||||
#include <linux/init.h>
|
#include <linux/init.h>
|
||||||
#include <linux/sched.h>
|
#include <linux/sched.h>
|
||||||
#include <linux/bootmem.h>
|
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
#include <linux/hardirq.h>
|
#include <linux/hardirq.h>
|
||||||
#include <asm/smp.h>
|
#include <asm/smp.h>
|
||||||
|
@ -398,16 +397,16 @@ void __init uv_system_init(void)
|
||||||
printk(KERN_DEBUG "UV: Found %d blades\n", uv_num_possible_blades());
|
printk(KERN_DEBUG "UV: Found %d blades\n", uv_num_possible_blades());
|
||||||
|
|
||||||
bytes = sizeof(struct uv_blade_info) * uv_num_possible_blades();
|
bytes = sizeof(struct uv_blade_info) * uv_num_possible_blades();
|
||||||
uv_blade_info = alloc_bootmem_pages(bytes);
|
uv_blade_info = kmalloc(bytes, GFP_KERNEL);
|
||||||
|
|
||||||
get_lowmem_redirect(&lowmem_redir_base, &lowmem_redir_size);
|
get_lowmem_redirect(&lowmem_redir_base, &lowmem_redir_size);
|
||||||
|
|
||||||
bytes = sizeof(uv_node_to_blade[0]) * num_possible_nodes();
|
bytes = sizeof(uv_node_to_blade[0]) * num_possible_nodes();
|
||||||
uv_node_to_blade = alloc_bootmem_pages(bytes);
|
uv_node_to_blade = kmalloc(bytes, GFP_KERNEL);
|
||||||
memset(uv_node_to_blade, 255, bytes);
|
memset(uv_node_to_blade, 255, bytes);
|
||||||
|
|
||||||
bytes = sizeof(uv_cpu_to_blade[0]) * num_possible_cpus();
|
bytes = sizeof(uv_cpu_to_blade[0]) * num_possible_cpus();
|
||||||
uv_cpu_to_blade = alloc_bootmem_pages(bytes);
|
uv_cpu_to_blade = kmalloc(bytes, GFP_KERNEL);
|
||||||
memset(uv_cpu_to_blade, 255, bytes);
|
memset(uv_cpu_to_blade, 255, bytes);
|
||||||
|
|
||||||
blade = 0;
|
blade = 0;
|
||||||
|
|
|
@ -350,8 +350,10 @@ phys_pte_init(pte_t *pte_page, unsigned long addr, unsigned long end,
|
||||||
* pagetable pages as RO. So assume someone who pre-setup
|
* pagetable pages as RO. So assume someone who pre-setup
|
||||||
* these mappings are more intelligent.
|
* these mappings are more intelligent.
|
||||||
*/
|
*/
|
||||||
if (pte_val(*pte))
|
if (pte_val(*pte)) {
|
||||||
|
pages++;
|
||||||
continue;
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
if (0)
|
if (0)
|
||||||
printk(" pte=%p addr=%lx pte=%016lx\n",
|
printk(" pte=%p addr=%lx pte=%016lx\n",
|
||||||
|
@ -418,8 +420,10 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end,
|
||||||
* not differ with respect to page frame and
|
* not differ with respect to page frame and
|
||||||
* attributes.
|
* attributes.
|
||||||
*/
|
*/
|
||||||
if (page_size_mask & (1 << PG_LEVEL_2M))
|
if (page_size_mask & (1 << PG_LEVEL_2M)) {
|
||||||
|
pages++;
|
||||||
continue;
|
continue;
|
||||||
|
}
|
||||||
new_prot = pte_pgprot(pte_clrhuge(*(pte_t *)pmd));
|
new_prot = pte_pgprot(pte_clrhuge(*(pte_t *)pmd));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -499,8 +503,10 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
|
||||||
* not differ with respect to page frame and
|
* not differ with respect to page frame and
|
||||||
* attributes.
|
* attributes.
|
||||||
*/
|
*/
|
||||||
if (page_size_mask & (1 << PG_LEVEL_1G))
|
if (page_size_mask & (1 << PG_LEVEL_1G)) {
|
||||||
|
pages++;
|
||||||
continue;
|
continue;
|
||||||
|
}
|
||||||
prot = pte_pgprot(pte_clrhuge(*(pte_t *)pud));
|
prot = pte_pgprot(pte_clrhuge(*(pte_t *)pud));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -831,7 +837,7 @@ int arch_add_memory(int nid, u64 start, u64 size)
|
||||||
unsigned long nr_pages = size >> PAGE_SHIFT;
|
unsigned long nr_pages = size >> PAGE_SHIFT;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
last_mapped_pfn = init_memory_mapping(start, start + size-1);
|
last_mapped_pfn = init_memory_mapping(start, start + size);
|
||||||
if (last_mapped_pfn > max_pfn_mapped)
|
if (last_mapped_pfn > max_pfn_mapped)
|
||||||
max_pfn_mapped = last_mapped_pfn;
|
max_pfn_mapped = last_mapped_pfn;
|
||||||
|
|
||||||
|
|
|
@ -246,11 +246,21 @@ xmaddr_t arbitrary_virt_to_machine(void *vaddr)
|
||||||
{
|
{
|
||||||
unsigned long address = (unsigned long)vaddr;
|
unsigned long address = (unsigned long)vaddr;
|
||||||
unsigned int level;
|
unsigned int level;
|
||||||
pte_t *pte = lookup_address(address, &level);
|
pte_t *pte;
|
||||||
unsigned offset = address & ~PAGE_MASK;
|
unsigned offset;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* if the PFN is in the linear mapped vaddr range, we can just use
|
||||||
|
* the (quick) virt_to_machine() p2m lookup
|
||||||
|
*/
|
||||||
|
if (virt_addr_valid(vaddr))
|
||||||
|
return virt_to_machine(vaddr);
|
||||||
|
|
||||||
|
/* otherwise we have to do a (slower) full page-table walk */
|
||||||
|
|
||||||
|
pte = lookup_address(address, &level);
|
||||||
BUG_ON(pte == NULL);
|
BUG_ON(pte == NULL);
|
||||||
|
offset = address & ~PAGE_MASK;
|
||||||
return XMADDR(((phys_addr_t)pte_mfn(*pte) << PAGE_SHIFT) + offset);
|
return XMADDR(((phys_addr_t)pte_mfn(*pte) << PAGE_SHIFT) + offset);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -410,7 +420,7 @@ void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
|
||||||
|
|
||||||
xen_mc_batch();
|
xen_mc_batch();
|
||||||
|
|
||||||
u.ptr = virt_to_machine(ptep).maddr | MMU_PT_UPDATE_PRESERVE_AD;
|
u.ptr = arbitrary_virt_to_machine(ptep).maddr | MMU_PT_UPDATE_PRESERVE_AD;
|
||||||
u.val = pte_val_ma(pte);
|
u.val = pte_val_ma(pte);
|
||||||
xen_extend_mmu_update(&u);
|
xen_extend_mmu_update(&u);
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue