2007-07-27 01:41:02 +08:00
|
|
|
/*P:010
|
|
|
|
* A hypervisor allows multiple Operating Systems to run on a single machine.
|
|
|
|
* To quote David Wheeler: "Any problem in computer science can be solved with
|
|
|
|
* another layer of indirection."
|
|
|
|
*
|
|
|
|
* We keep things simple in two ways. First, we start with a normal Linux
|
|
|
|
* kernel and insert a module (lg.ko) which allows us to run other Linux
|
|
|
|
* kernels the same way we'd run processes. We call the first kernel the Host,
|
|
|
|
* and the others the Guests. The program which sets up and configures Guests
|
2013-07-15 07:56:07 +08:00
|
|
|
* (such as the example in tools/lguest/lguest.c) is called the Launcher.
|
2007-07-27 01:41:02 +08:00
|
|
|
*
|
2008-03-29 00:05:53 +08:00
|
|
|
* Secondly, we only run specially modified Guests, not normal kernels: setting
|
|
|
|
* CONFIG_LGUEST_GUEST to "y" compiles this file into the kernel so it knows
|
|
|
|
* how to be a Guest at boot time. This means that you can use the same kernel
|
|
|
|
* you boot normally (ie. as a Host) as a Guest.
|
2007-07-19 16:49:22 +08:00
|
|
|
*
|
2007-07-27 01:41:02 +08:00
|
|
|
* These Guests know that they cannot do privileged operations, such as disable
|
|
|
|
* interrupts, and that they have to ask the Host to do such things explicitly.
|
|
|
|
* This file consists of all the replacements for such low-level native
|
|
|
|
* hardware operations: these special Guest versions call the Host.
|
|
|
|
*
|
2008-03-29 00:05:53 +08:00
|
|
|
* So how does the kernel know it's a Guest? We'll see that later, but let's
|
|
|
|
* just say that we end up here where we replace the native functions various
|
2009-07-31 06:03:45 +08:00
|
|
|
* "paravirt" structures with our Guest versions, then boot like normal.
|
|
|
|
:*/
|
2007-07-27 01:41:02 +08:00
|
|
|
|
|
|
|
/*
|
2007-07-19 16:49:22 +08:00
|
|
|
* Copyright (C) 2006, Rusty Russell <rusty@rustcorp.com.au> IBM Corporation.
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License as published by
|
|
|
|
* the Free Software Foundation; either version 2 of the License, or
|
|
|
|
* (at your option) any later version.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful, but
|
|
|
|
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
|
|
|
* NON INFRINGEMENT. See the GNU General Public License for more
|
|
|
|
* details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License
|
|
|
|
* along with this program; if not, write to the Free Software
|
|
|
|
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
|
|
|
|
*/
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/start_kernel.h>
|
|
|
|
#include <linux/string.h>
|
|
|
|
#include <linux/console.h>
|
|
|
|
#include <linux/screen_info.h>
|
|
|
|
#include <linux/irq.h>
|
|
|
|
#include <linux/interrupt.h>
|
2007-07-19 16:49:23 +08:00
|
|
|
#include <linux/clocksource.h>
|
|
|
|
#include <linux/clockchips.h>
|
2007-07-19 16:49:22 +08:00
|
|
|
#include <linux/lguest.h>
|
|
|
|
#include <linux/lguest_launcher.h>
|
2007-10-22 09:24:21 +08:00
|
|
|
#include <linux/virtio_console.h>
|
2007-10-25 12:15:09 +08:00
|
|
|
#include <linux/pm.h>
|
2011-07-22 01:03:20 +08:00
|
|
|
#include <linux/export.h>
|
2015-02-11 12:45:10 +08:00
|
|
|
#include <linux/pci.h>
|
2015-02-11 12:56:01 +08:00
|
|
|
#include <linux/virtio_pci.h>
|
2015-02-11 12:45:10 +08:00
|
|
|
#include <asm/acpi.h>
|
2009-02-17 20:58:15 +08:00
|
|
|
#include <asm/apic.h>
|
2008-02-14 05:14:35 +08:00
|
|
|
#include <asm/lguest.h>
|
2007-07-19 16:49:22 +08:00
|
|
|
#include <asm/paravirt.h>
|
|
|
|
#include <asm/param.h>
|
|
|
|
#include <asm/page.h>
|
|
|
|
#include <asm/pgtable.h>
|
|
|
|
#include <asm/desc.h>
|
|
|
|
#include <asm/setup.h>
|
|
|
|
#include <asm/e820.h>
|
|
|
|
#include <asm/mce.h>
|
|
|
|
#include <asm/io.h>
|
2015-04-24 08:46:00 +08:00
|
|
|
#include <asm/fpu/api.h>
|
2009-06-03 13:22:24 +08:00
|
|
|
#include <asm/stackprotector.h>
|
2007-12-28 16:56:24 +08:00
|
|
|
#include <asm/reboot.h> /* for struct machine_ops */
|
2011-10-27 08:26:17 +08:00
|
|
|
#include <asm/kvm_para.h>
|
2015-02-11 12:45:10 +08:00
|
|
|
#include <asm/pci_x86.h>
|
2015-02-11 12:56:01 +08:00
|
|
|
#include <asm/pci-direct.h>
|
2007-07-19 16:49:22 +08:00
|
|
|
|
2011-07-22 13:09:50 +08:00
|
|
|
/*G:010
|
|
|
|
* Welcome to the Guest!
|
2007-07-27 01:41:02 +08:00
|
|
|
*
|
|
|
|
* The Guest in our tale is a simple creature: identical to the Host but
|
|
|
|
* behaving in simplified but equivalent ways. In particular, the Guest is the
|
2009-07-31 06:03:45 +08:00
|
|
|
* same kernel as the Host (or at least, built from the same source code).
|
|
|
|
:*/
|
2007-07-27 01:41:02 +08:00
|
|
|
|
2007-07-19 16:49:22 +08:00
|
|
|
struct lguest_data lguest_data = {
|
|
|
|
.hcall_status = { [0 ... LHCALL_RING_SIZE-1] = 0xFF },
|
2015-03-24 09:21:39 +08:00
|
|
|
.noirq_iret = (u32)lguest_noirq_iret,
|
2007-10-22 09:03:36 +08:00
|
|
|
.kernel_address = PAGE_OFFSET,
|
2007-07-19 16:49:22 +08:00
|
|
|
.blocked_interrupts = { 1 }, /* Block timer interrupts */
|
2015-05-09 23:36:52 +08:00
|
|
|
.syscall_vec = IA32_SYSCALL_VECTOR,
|
2007-07-19 16:49:22 +08:00
|
|
|
};
|
|
|
|
|
2009-07-31 06:03:45 +08:00
|
|
|
/*G:037
|
|
|
|
* async_hcall() is pretty simple: I'm quite proud of it really. We have a
|
2007-07-27 01:41:02 +08:00
|
|
|
* ring buffer of stored hypercalls which the Host will run though next time we
|
2009-06-13 12:27:07 +08:00
|
|
|
* do a normal hypercall. Each entry in the ring has 5 slots for the hypercall
|
2007-07-27 01:41:02 +08:00
|
|
|
* arguments, and a "hcall_status" word which is 0 if the call is ready to go,
|
|
|
|
* and 255 once the Host has finished with it.
|
|
|
|
*
|
|
|
|
* If we come around to a slot which hasn't been finished, then the table is
|
|
|
|
* full and we just make the hypercall directly. This has the nice side
|
|
|
|
* effect of causing the Host to run all the stored calls in the ring buffer
|
2009-07-31 06:03:45 +08:00
|
|
|
* which empties it for next time!
|
|
|
|
*/
|
2007-11-02 23:43:10 +08:00
|
|
|
static void async_hcall(unsigned long call, unsigned long arg1,
|
2009-06-13 12:27:07 +08:00
|
|
|
unsigned long arg2, unsigned long arg3,
|
|
|
|
unsigned long arg4)
|
2007-07-19 16:49:22 +08:00
|
|
|
{
|
|
|
|
/* Note: This code assumes we're uniprocessor. */
|
|
|
|
static unsigned int next_call;
|
|
|
|
unsigned long flags;
|
|
|
|
|
2009-07-31 06:03:45 +08:00
|
|
|
/*
|
|
|
|
* Disable interrupts if not already disabled: we don't want an
|
2007-07-27 01:41:02 +08:00
|
|
|
* interrupt handler making a hypercall while we're already doing
|
2009-07-31 06:03:45 +08:00
|
|
|
* one!
|
|
|
|
*/
|
2007-07-19 16:49:22 +08:00
|
|
|
local_irq_save(flags);
|
|
|
|
if (lguest_data.hcall_status[next_call] != 0xFF) {
|
|
|
|
/* Table full, so do normal hcall which will flush table. */
|
2010-04-15 11:43:54 +08:00
|
|
|
hcall(call, arg1, arg2, arg3, arg4);
|
2007-07-19 16:49:22 +08:00
|
|
|
} else {
|
2007-10-22 09:03:31 +08:00
|
|
|
lguest_data.hcalls[next_call].arg0 = call;
|
|
|
|
lguest_data.hcalls[next_call].arg1 = arg1;
|
|
|
|
lguest_data.hcalls[next_call].arg2 = arg2;
|
|
|
|
lguest_data.hcalls[next_call].arg3 = arg3;
|
2009-06-13 12:27:07 +08:00
|
|
|
lguest_data.hcalls[next_call].arg4 = arg4;
|
2007-07-27 01:41:02 +08:00
|
|
|
/* Arguments must all be written before we mark it to go */
|
2007-07-19 16:49:22 +08:00
|
|
|
wmb();
|
|
|
|
lguest_data.hcall_status[next_call] = 0;
|
|
|
|
if (++next_call == LHCALL_RING_SIZE)
|
|
|
|
next_call = 0;
|
|
|
|
}
|
|
|
|
local_irq_restore(flags);
|
|
|
|
}
|
2007-11-02 23:43:10 +08:00
|
|
|
|
2009-07-31 06:03:45 +08:00
|
|
|
/*G:035
|
|
|
|
* Notice the lazy_hcall() above, rather than hcall(). This is our first real
|
|
|
|
* optimization trick!
|
2007-11-05 18:55:57 +08:00
|
|
|
*
|
|
|
|
* When lazy_mode is set, it means we're allowed to defer all hypercalls and do
|
|
|
|
* them as a batch when lazy_mode is eventually turned off. Because hypercalls
|
|
|
|
* are reasonably expensive, batching them up makes sense. For example, a
|
|
|
|
* large munmap might update dozens of page table entries: that code calls
|
|
|
|
* paravirt_enter_lazy_mmu(), does the dozen updates, then calls
|
|
|
|
* lguest_leave_lazy_mode().
|
|
|
|
*
|
|
|
|
* So, when we're in lazy mode, we call async_hcall() to store the call for
|
2009-07-31 06:03:45 +08:00
|
|
|
* future processing:
|
|
|
|
*/
|
2010-04-15 11:43:54 +08:00
|
|
|
static void lazy_hcall1(unsigned long call, unsigned long arg1)
|
2009-03-14 23:37:52 +08:00
|
|
|
{
|
|
|
|
if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE)
|
2010-04-15 11:43:54 +08:00
|
|
|
hcall(call, arg1, 0, 0, 0);
|
2009-03-14 23:37:52 +08:00
|
|
|
else
|
2009-06-13 12:27:07 +08:00
|
|
|
async_hcall(call, arg1, 0, 0, 0);
|
2009-03-14 23:37:52 +08:00
|
|
|
}
|
|
|
|
|
2009-07-31 06:03:45 +08:00
|
|
|
/* You can imagine what lazy_hcall2, 3 and 4 look like. :*/
|
2009-03-14 23:37:52 +08:00
|
|
|
static void lazy_hcall2(unsigned long call,
|
2010-04-15 11:43:54 +08:00
|
|
|
unsigned long arg1,
|
|
|
|
unsigned long arg2)
|
2009-03-14 23:37:52 +08:00
|
|
|
{
|
|
|
|
if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE)
|
2010-04-15 11:43:54 +08:00
|
|
|
hcall(call, arg1, arg2, 0, 0);
|
2009-03-14 23:37:52 +08:00
|
|
|
else
|
2009-06-13 12:27:07 +08:00
|
|
|
async_hcall(call, arg1, arg2, 0, 0);
|
2009-03-14 23:37:52 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void lazy_hcall3(unsigned long call,
|
2010-04-15 11:43:54 +08:00
|
|
|
unsigned long arg1,
|
|
|
|
unsigned long arg2,
|
|
|
|
unsigned long arg3)
|
2007-11-02 23:43:10 +08:00
|
|
|
{
|
|
|
|
if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE)
|
2010-04-15 11:43:54 +08:00
|
|
|
hcall(call, arg1, arg2, arg3, 0);
|
2007-11-02 23:43:10 +08:00
|
|
|
else
|
2009-06-13 12:27:07 +08:00
|
|
|
async_hcall(call, arg1, arg2, arg3, 0);
|
|
|
|
}
|
|
|
|
|
2009-06-13 12:27:07 +08:00
|
|
|
#ifdef CONFIG_X86_PAE
|
2009-06-13 12:27:07 +08:00
|
|
|
static void lazy_hcall4(unsigned long call,
|
2010-04-15 11:43:54 +08:00
|
|
|
unsigned long arg1,
|
|
|
|
unsigned long arg2,
|
|
|
|
unsigned long arg3,
|
|
|
|
unsigned long arg4)
|
2009-06-13 12:27:07 +08:00
|
|
|
{
|
|
|
|
if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE)
|
2010-04-15 11:43:54 +08:00
|
|
|
hcall(call, arg1, arg2, arg3, arg4);
|
2009-06-13 12:27:07 +08:00
|
|
|
else
|
|
|
|
async_hcall(call, arg1, arg2, arg3, arg4);
|
2007-11-02 23:43:10 +08:00
|
|
|
}
|
2009-06-13 12:27:07 +08:00
|
|
|
#endif
|
2007-11-05 18:55:57 +08:00
|
|
|
|
2009-07-31 06:03:45 +08:00
|
|
|
/*G:036
|
2011-07-22 13:09:50 +08:00
|
|
|
* When lazy mode is turned off, we issue the do-nothing hypercall to
|
|
|
|
* flush any stored calls, and call the generic helper to reset the
|
|
|
|
* per-cpu lazy mode variable.
|
|
|
|
*/
|
2009-02-18 15:46:21 +08:00
|
|
|
static void lguest_leave_lazy_mmu_mode(void)
|
2007-11-05 18:55:57 +08:00
|
|
|
{
|
2010-04-15 11:43:54 +08:00
|
|
|
hcall(LHCALL_FLUSH_ASYNC, 0, 0, 0, 0);
|
2009-02-18 15:46:21 +08:00
|
|
|
paravirt_leave_lazy_mmu();
|
|
|
|
}
|
|
|
|
|
2011-07-22 13:09:50 +08:00
|
|
|
/*
|
|
|
|
* We also catch the end of context switch; we enter lazy mode for much of
|
|
|
|
* that too, so again we need to flush here.
|
|
|
|
*
|
|
|
|
* (Technically, this is lazy CPU mode, and normally we're in lazy MMU
|
|
|
|
* mode, but unlike Xen, lguest doesn't care about the difference).
|
|
|
|
*/
|
2009-02-19 03:18:57 +08:00
|
|
|
static void lguest_end_context_switch(struct task_struct *next)
|
2009-02-18 15:46:21 +08:00
|
|
|
{
|
2010-04-15 11:43:54 +08:00
|
|
|
hcall(LHCALL_FLUSH_ASYNC, 0, 0, 0, 0);
|
2009-02-19 03:18:57 +08:00
|
|
|
paravirt_end_context_switch(next);
|
2007-11-05 18:55:57 +08:00
|
|
|
}
|
2007-07-19 16:49:22 +08:00
|
|
|
|
lguest: optimize by coding restore_flags and irq_enable in assembler.
The downside of the last patch which made restore_flags and irq_enable
check interrupts is that they are now too big to be patched directly
into the callsites, so the C versions are always used.
But the C versions go via PV_CALLEE_SAVE_REGS_THUNK which saves all
the registers. In fact, we don't need any registers in the fast path,
so we can do better than this if we actually code them in assembler.
The results are in the noise, but since it's about the same amount of
code, it's worth applying.
1GB Guest->Host: input(suppressed),output(suppressed)
Before:
Seconds: 0:16.53
Packets: 377268,753673
Interrupts: 22461,24297
Notifications: 1(5245),21303(732370)
Net IRQs triggered: 377023(245),42578(711095)
After:
Seconds: 0:16.48
Packets: 377289,753673
Interrupts: 22281,24465
Notifications: 1(5245),21296(732377)
Net IRQs triggered: 377060(229),42564(711109)
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
2009-06-13 12:27:03 +08:00
|
|
|
/*G:032
|
2007-10-25 13:02:50 +08:00
|
|
|
* After that diversion we return to our first native-instruction
|
|
|
|
* replacements: four functions for interrupt control.
|
2007-07-27 01:41:02 +08:00
|
|
|
*
|
|
|
|
* The simplest way of implementing these would be to have "turn interrupts
|
|
|
|
* off" and "turn interrupts on" hypercalls. Unfortunately, this is too slow:
|
|
|
|
* these are by far the most commonly called functions of those we override.
|
|
|
|
*
|
|
|
|
* So instead we keep an "irq_enabled" field inside our "struct lguest_data",
|
|
|
|
* which the Guest can update with a single instruction. The Host knows to
|
2008-03-29 00:05:53 +08:00
|
|
|
* check there before it tries to deliver an interrupt.
|
2007-07-27 01:41:02 +08:00
|
|
|
*/
|
|
|
|
|
2009-07-31 06:03:45 +08:00
|
|
|
/*
|
|
|
|
* save_flags() is expected to return the processor state (ie. "flags"). The
|
2008-01-30 20:30:56 +08:00
|
|
|
* flags word contains all kind of stuff, but in practice Linux only cares
|
2009-07-31 06:03:45 +08:00
|
|
|
* about the interrupt flag. Our "save_flags()" just returns that.
|
|
|
|
*/
|
2014-05-02 06:44:37 +08:00
|
|
|
asmlinkage __visible unsigned long lguest_save_fl(void)
|
2007-07-19 16:49:22 +08:00
|
|
|
{
|
|
|
|
return lguest_data.irq_enabled;
|
|
|
|
}
|
|
|
|
|
2007-07-27 01:41:02 +08:00
|
|
|
/* Interrupts go off... */
|
2014-05-02 06:44:37 +08:00
|
|
|
asmlinkage __visible void lguest_irq_disable(void)
|
2007-07-19 16:49:22 +08:00
|
|
|
{
|
|
|
|
lguest_data.irq_enabled = 0;
|
|
|
|
}
|
|
|
|
|
2009-07-31 06:03:45 +08:00
|
|
|
/*
|
|
|
|
* Let's pause a moment. Remember how I said these are called so often?
|
lguest: optimize by coding restore_flags and irq_enable in assembler.
The downside of the last patch which made restore_flags and irq_enable
check interrupts is that they are now too big to be patched directly
into the callsites, so the C versions are always used.
But the C versions go via PV_CALLEE_SAVE_REGS_THUNK which saves all
the registers. In fact, we don't need any registers in the fast path,
so we can do better than this if we actually code them in assembler.
The results are in the noise, but since it's about the same amount of
code, it's worth applying.
1GB Guest->Host: input(suppressed),output(suppressed)
Before:
Seconds: 0:16.53
Packets: 377268,753673
Interrupts: 22461,24297
Notifications: 1(5245),21303(732370)
Net IRQs triggered: 377023(245),42578(711095)
After:
Seconds: 0:16.48
Packets: 377289,753673
Interrupts: 22281,24465
Notifications: 1(5245),21296(732377)
Net IRQs triggered: 377060(229),42564(711109)
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
2009-06-13 12:27:03 +08:00
|
|
|
* Jeremy Fitzhardinge optimized them so hard early in 2009 that he had to
|
|
|
|
* break some rules. In particular, these functions are assumed to save their
|
|
|
|
* own registers if they need to: normal C functions assume they can trash the
|
|
|
|
* eax register. To use normal C functions, we use
|
|
|
|
* PV_CALLEE_SAVE_REGS_THUNK(), which pushes %eax onto the stack, calls the
|
2009-07-31 06:03:45 +08:00
|
|
|
* C function, then restores it.
|
|
|
|
*/
|
2013-10-23 00:07:54 +08:00
|
|
|
PV_CALLEE_SAVE_REGS_THUNK(lguest_save_fl);
|
|
|
|
PV_CALLEE_SAVE_REGS_THUNK(lguest_irq_disable);
|
lguest: optimize by coding restore_flags and irq_enable in assembler.
The downside of the last patch which made restore_flags and irq_enable
check interrupts is that they are now too big to be patched directly
into the callsites, so the C versions are always used.
But the C versions go via PV_CALLEE_SAVE_REGS_THUNK which saves all
the registers. In fact, we don't need any registers in the fast path,
so we can do better than this if we actually code them in assembler.
The results are in the noise, but since it's about the same amount of
code, it's worth applying.
1GB Guest->Host: input(suppressed),output(suppressed)
Before:
Seconds: 0:16.53
Packets: 377268,753673
Interrupts: 22461,24297
Notifications: 1(5245),21303(732370)
Net IRQs triggered: 377023(245),42578(711095)
After:
Seconds: 0:16.48
Packets: 377289,753673
Interrupts: 22281,24465
Notifications: 1(5245),21296(732377)
Net IRQs triggered: 377060(229),42564(711109)
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
2009-06-13 12:27:03 +08:00
|
|
|
/*:*/
|
2009-06-13 12:27:02 +08:00
|
|
|
|
2015-03-24 09:21:38 +08:00
|
|
|
/* These are in head_32.S */
|
lguest: optimize by coding restore_flags and irq_enable in assembler.
The downside of the last patch which made restore_flags and irq_enable
check interrupts is that they are now too big to be patched directly
into the callsites, so the C versions are always used.
But the C versions go via PV_CALLEE_SAVE_REGS_THUNK which saves all
the registers. In fact, we don't need any registers in the fast path,
so we can do better than this if we actually code them in assembler.
The results are in the noise, but since it's about the same amount of
code, it's worth applying.
1GB Guest->Host: input(suppressed),output(suppressed)
Before:
Seconds: 0:16.53
Packets: 377268,753673
Interrupts: 22461,24297
Notifications: 1(5245),21303(732370)
Net IRQs triggered: 377023(245),42578(711095)
After:
Seconds: 0:16.48
Packets: 377289,753673
Interrupts: 22281,24465
Notifications: 1(5245),21296(732377)
Net IRQs triggered: 377060(229),42564(711109)
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
2009-06-13 12:27:03 +08:00
|
|
|
extern void lg_irq_enable(void);
|
|
|
|
extern void lg_restore_fl(unsigned long flags);
|
x86/paravirt: add register-saving thunks to reduce caller register pressure
Impact: Optimization
One of the problems with inserting a pile of C calls where previously
there were none is that the register pressure is greatly increased.
The C calling convention says that the caller must expect a certain
set of registers may be trashed by the callee, and that the callee can
use those registers without restriction. This includes the function
argument registers, and several others.
This patch seeks to alleviate this pressure by introducing wrapper
thunks that will do the register saving/restoring, so that the
callsite doesn't need to worry about it, but the callee function can
be conventional compiler-generated code. In many cases (particularly
performance-sensitive cases) the callee will be in assembler anyway,
and need not use the compiler's calling convention.
Standard calling convention is:
arguments return scratch
x86-32 eax edx ecx eax ?
x86-64 rdi rsi rdx rcx rax r8 r9 r10 r11
The thunk preserves all argument and scratch registers. The return
register is not preserved, and is available as a scratch register for
unwrapped callee code (and of course the return value).
Wrapped function pointers are themselves wrapped in a struct
paravirt_callee_save structure, in order to get some warning from the
compiler when functions with mismatched calling conventions are used.
The most common paravirt ops, both statically and dynamically, are
interrupt enable/disable/save/restore, so handle them first. This is
particularly easy since their calls are handled specially anyway.
XXX Deal with VMI. What's their calling convention?
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
2009-01-29 06:35:05 +08:00
|
|
|
|
2009-07-31 06:03:45 +08:00
|
|
|
/*M:003
|
2009-07-31 06:03:45 +08:00
|
|
|
* We could be more efficient in our checking of outstanding interrupts, rather
|
|
|
|
* than using a branch. One way would be to put the "irq_enabled" field in a
|
|
|
|
* page by itself, and have the Host write-protect it when an interrupt comes
|
|
|
|
* in when irqs are disabled. There will then be a page fault as soon as
|
|
|
|
* interrupts are re-enabled.
|
2008-03-29 00:05:53 +08:00
|
|
|
*
|
|
|
|
* A better method is to implement soft interrupt disable generally for x86:
|
|
|
|
* instead of disabling interrupts, we set a flag. If an interrupt does come
|
|
|
|
* in, we then disable them for real. This is uncommon, so we could simply use
|
2009-07-31 06:03:45 +08:00
|
|
|
* a hypercall for interrupt control and not worry about efficiency.
|
|
|
|
:*/
|
2007-07-19 16:49:22 +08:00
|
|
|
|
2007-07-27 01:41:02 +08:00
|
|
|
/*G:034
|
|
|
|
* The Interrupt Descriptor Table (IDT).
|
|
|
|
*
|
|
|
|
* The IDT tells the processor what to do when an interrupt comes in. Each
|
|
|
|
* entry in the table is a 64-bit descriptor: this holds the privilege level,
|
|
|
|
* address of the handler, and... well, who cares? The Guest just asks the
|
|
|
|
* Host to make the change anyway, because the Host controls the real IDT.
|
|
|
|
*/
|
2008-01-30 20:31:12 +08:00
|
|
|
static void lguest_write_idt_entry(gate_desc *dt,
|
|
|
|
int entrynum, const gate_desc *g)
|
2007-07-19 16:49:22 +08:00
|
|
|
{
|
2009-07-31 06:03:45 +08:00
|
|
|
/*
|
|
|
|
* The gate_desc structure is 8 bytes long: we hand it to the Host in
|
2008-03-29 00:05:53 +08:00
|
|
|
* two 32-bit chunks. The whole 32-bit kernel used to hand descriptors
|
|
|
|
* around like this; typesafety wasn't a big concern in Linux's early
|
2009-07-31 06:03:45 +08:00
|
|
|
* years.
|
|
|
|
*/
|
2008-01-30 20:31:12 +08:00
|
|
|
u32 *desc = (u32 *)g;
|
2007-07-27 01:41:02 +08:00
|
|
|
/* Keep the local copy up to date. */
|
2008-01-30 20:31:12 +08:00
|
|
|
native_write_idt_entry(dt, entrynum, g);
|
2007-07-27 01:41:02 +08:00
|
|
|
/* Tell Host about this new entry. */
|
2010-04-15 11:43:54 +08:00
|
|
|
hcall(LHCALL_LOAD_IDT_ENTRY, entrynum, desc[0], desc[1], 0);
|
2007-07-19 16:49:22 +08:00
|
|
|
}
|
|
|
|
|
2009-07-31 06:03:45 +08:00
|
|
|
/*
|
|
|
|
* Changing to a different IDT is very rare: we keep the IDT up-to-date every
|
2007-07-27 01:41:02 +08:00
|
|
|
* time it is written, so we can simply loop through all entries and tell the
|
2009-07-31 06:03:45 +08:00
|
|
|
* Host about them.
|
|
|
|
*/
|
2008-01-30 20:31:12 +08:00
|
|
|
static void lguest_load_idt(const struct desc_ptr *desc)
|
2007-07-19 16:49:22 +08:00
|
|
|
{
|
|
|
|
unsigned int i;
|
|
|
|
struct desc_struct *idt = (void *)desc->address;
|
|
|
|
|
|
|
|
for (i = 0; i < (desc->size+1)/8; i++)
|
2010-04-15 11:43:54 +08:00
|
|
|
hcall(LHCALL_LOAD_IDT_ENTRY, i, idt[i].a, idt[i].b, 0);
|
2007-07-19 16:49:22 +08:00
|
|
|
}
|
|
|
|
|
2007-07-27 01:41:02 +08:00
|
|
|
/*
|
|
|
|
* The Global Descriptor Table.
|
|
|
|
*
|
|
|
|
* The Intel architecture defines another table, called the Global Descriptor
|
|
|
|
* Table (GDT). You tell the CPU where it is (and its size) using the "lgdt"
|
|
|
|
* instruction, and then several other instructions refer to entries in the
|
|
|
|
* table. There are three entries which the Switcher needs, so the Host simply
|
|
|
|
* controls the entire thing and the Guest asks it to make changes using the
|
|
|
|
* LOAD_GDT hypercall.
|
|
|
|
*
|
2009-04-20 13:14:00 +08:00
|
|
|
* This is the exactly like the IDT code.
|
2007-07-27 01:41:02 +08:00
|
|
|
*/
|
2008-01-30 20:31:12 +08:00
|
|
|
static void lguest_load_gdt(const struct desc_ptr *desc)
|
2007-07-19 16:49:22 +08:00
|
|
|
{
|
2009-04-20 13:14:00 +08:00
|
|
|
unsigned int i;
|
|
|
|
struct desc_struct *gdt = (void *)desc->address;
|
|
|
|
|
|
|
|
for (i = 0; i < (desc->size+1)/8; i++)
|
2010-04-15 11:43:54 +08:00
|
|
|
hcall(LHCALL_LOAD_GDT_ENTRY, i, gdt[i].a, gdt[i].b, 0);
|
2007-07-19 16:49:22 +08:00
|
|
|
}
|
|
|
|
|
2009-07-31 06:03:45 +08:00
|
|
|
/*
|
2010-09-22 00:54:01 +08:00
|
|
|
* For a single GDT entry which changes, we simply change our copy and
|
|
|
|
* then tell the host about it.
|
2009-07-31 06:03:45 +08:00
|
|
|
*/
|
2008-01-30 20:31:13 +08:00
|
|
|
static void lguest_write_gdt_entry(struct desc_struct *dt, int entrynum,
|
|
|
|
const void *desc, int type)
|
2007-07-19 16:49:22 +08:00
|
|
|
{
|
2008-01-30 20:31:13 +08:00
|
|
|
native_write_gdt_entry(dt, entrynum, desc, type);
|
2009-04-20 13:14:00 +08:00
|
|
|
/* Tell Host about this new entry. */
|
2010-04-15 11:43:54 +08:00
|
|
|
hcall(LHCALL_LOAD_GDT_ENTRY, entrynum,
|
|
|
|
dt[entrynum].a, dt[entrynum].b, 0);
|
2007-07-19 16:49:22 +08:00
|
|
|
}
|
|
|
|
|
2009-07-31 06:03:45 +08:00
|
|
|
/*
|
2010-09-22 00:54:01 +08:00
|
|
|
* There are three "thread local storage" GDT entries which change
|
2007-07-27 01:41:02 +08:00
|
|
|
* on every context switch (these three entries are how glibc implements
|
2010-09-22 00:54:01 +08:00
|
|
|
* __thread variables). As an optimization, we have a hypercall
|
|
|
|
* specifically for this case.
|
|
|
|
*
|
|
|
|
* Wouldn't it be nicer to have a general LOAD_GDT_ENTRIES hypercall
|
|
|
|
* which took a range of entries?
|
2009-07-31 06:03:45 +08:00
|
|
|
*/
|
2007-07-19 16:49:22 +08:00
|
|
|
static void lguest_load_tls(struct thread_struct *t, unsigned int cpu)
|
|
|
|
{
|
2009-07-31 06:03:45 +08:00
|
|
|
/*
|
|
|
|
* There's one problem which normal hardware doesn't have: the Host
|
2007-08-09 18:57:13 +08:00
|
|
|
* can't handle us removing entries we're currently using. So we clear
|
2009-07-31 06:03:45 +08:00
|
|
|
* the GS register here: if it's needed it'll be reloaded anyway.
|
|
|
|
*/
|
2009-02-09 21:17:40 +08:00
|
|
|
lazy_load_gs(0);
|
2009-03-14 23:37:52 +08:00
|
|
|
lazy_hcall2(LHCALL_LOAD_TLS, __pa(&t->tls_array), cpu);
|
2007-07-19 16:49:22 +08:00
|
|
|
}
|
|
|
|
|
2009-07-31 06:03:45 +08:00
|
|
|
/*G:038
|
|
|
|
* That's enough excitement for now, back to ploughing through each of the
|
|
|
|
* different pv_ops structures (we're about 1/3 of the way through).
|
2007-07-27 01:41:02 +08:00
|
|
|
*
|
|
|
|
* This is the Local Descriptor Table, another weird Intel thingy. Linux only
|
|
|
|
* uses this for some strange applications like Wine. We don't do anything
|
2009-07-31 06:03:45 +08:00
|
|
|
* here, so they'll get an informative and friendly Segmentation Fault.
|
|
|
|
*/
|
2007-07-19 16:49:22 +08:00
|
|
|
static void lguest_set_ldt(const void *addr, unsigned entries)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2009-07-31 06:03:45 +08:00
|
|
|
/*
|
|
|
|
* This loads a GDT entry into the "Task Register": that entry points to a
|
2007-07-27 01:41:02 +08:00
|
|
|
* structure called the Task State Segment. Some comments scattered though the
|
|
|
|
* kernel code indicate that this used for task switching in ages past, along
|
|
|
|
* with blood sacrifice and astrology.
|
|
|
|
*
|
|
|
|
* Now there's nothing interesting in here that we don't get told elsewhere.
|
|
|
|
* But the native version uses the "ltr" instruction, which makes the Host
|
|
|
|
* complain to the Guest about a Segmentation Fault and it'll oops. So we
|
2009-07-31 06:03:45 +08:00
|
|
|
* override the native version with a do-nothing version.
|
|
|
|
*/
|
2007-07-19 16:49:22 +08:00
|
|
|
static void lguest_load_tr_desc(void)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2009-07-31 06:03:45 +08:00
|
|
|
/*
|
|
|
|
* The "cpuid" instruction is a way of querying both the CPU identity
|
2007-07-27 01:41:02 +08:00
|
|
|
* (manufacturer, model, etc) and its features. It was introduced before the
|
2008-03-29 00:05:53 +08:00
|
|
|
* Pentium in 1993 and keeps getting extended by both Intel, AMD and others.
|
|
|
|
* As you might imagine, after a decade and a half this treatment, it is now a
|
|
|
|
* giant ball of hair. Its entry in the current Intel manual runs to 28 pages.
|
2007-07-27 01:41:02 +08:00
|
|
|
*
|
|
|
|
* This instruction even it has its own Wikipedia entry. The Wikipedia entry
|
2011-07-12 00:08:47 +08:00
|
|
|
* has been translated into 6 languages. I am not making this up!
|
2007-07-27 01:41:02 +08:00
|
|
|
*
|
|
|
|
* We could get funky here and identify ourselves as "GenuineLguest", but
|
|
|
|
* instead we just use the real "cpuid" instruction. Then I pretty much turned
|
|
|
|
* off feature bits until the Guest booted. (Don't say that: you'll damage
|
|
|
|
* lguest sales!) Shut up, inner voice! (Hey, just pointing out that this is
|
2011-03-18 03:24:16 +08:00
|
|
|
* hardly future proof.) No one's listening! They don't like you anyway,
|
2007-07-27 01:41:02 +08:00
|
|
|
* parenthetic weirdo!
|
|
|
|
*
|
|
|
|
* Replacing the cpuid so we can turn features off is great for the kernel, but
|
|
|
|
* anyone (including userspace) can just use the raw "cpuid" instruction and
|
|
|
|
* the Host won't even notice since it isn't privileged. So we try not to get
|
2009-07-31 06:03:45 +08:00
|
|
|
* too worked up about it.
|
|
|
|
*/
|
2008-01-30 20:30:56 +08:00
|
|
|
static void lguest_cpuid(unsigned int *ax, unsigned int *bx,
|
|
|
|
unsigned int *cx, unsigned int *dx)
|
2007-07-19 16:49:22 +08:00
|
|
|
{
|
2008-01-30 20:30:56 +08:00
|
|
|
int function = *ax;
|
2007-07-19 16:49:22 +08:00
|
|
|
|
2008-01-30 20:30:56 +08:00
|
|
|
native_cpuid(ax, bx, cx, dx);
|
2007-07-19 16:49:22 +08:00
|
|
|
switch (function) {
|
2009-07-31 06:03:45 +08:00
|
|
|
/*
|
|
|
|
* CPUID 0 gives the highest legal CPUID number (and the ID string).
|
|
|
|
* We futureproof our code a little by sticking to known CPUID values.
|
|
|
|
*/
|
|
|
|
case 0:
|
2009-07-18 11:47:44 +08:00
|
|
|
if (*ax > 5)
|
|
|
|
*ax = 5;
|
|
|
|
break;
|
2009-07-31 06:03:45 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* CPUID 1 is a basic feature request.
|
|
|
|
*
|
|
|
|
* CX: we only allow kernel to see SSE3, CMPXCHG16B and SSSE3
|
|
|
|
* DX: SSE, SSE2, FXSR, MMX, CMOV, CMPXCHG8B, TSC, FPU and PAE.
|
|
|
|
*/
|
|
|
|
case 1:
|
2008-01-30 20:30:56 +08:00
|
|
|
*cx &= 0x00002201;
|
2009-06-13 12:27:07 +08:00
|
|
|
*dx &= 0x07808151;
|
2009-07-31 06:03:45 +08:00
|
|
|
/*
|
|
|
|
* The Host can do a nice optimization if it knows that the
|
2007-07-27 01:41:02 +08:00
|
|
|
* kernel mappings (addresses above 0xC0000000 or whatever
|
|
|
|
* PAGE_OFFSET is set to) haven't changed. But Linux calls
|
|
|
|
* flush_tlb_user() for both user and kernel mappings unless
|
2009-07-31 06:03:45 +08:00
|
|
|
* the Page Global Enable (PGE) feature bit is set.
|
|
|
|
*/
|
2008-01-30 20:30:56 +08:00
|
|
|
*dx |= 0x00002000;
|
2009-07-31 06:03:45 +08:00
|
|
|
/*
|
|
|
|
* We also lie, and say we're family id 5. 6 or greater
|
2009-03-10 00:06:22 +08:00
|
|
|
* leads to a rdmsr in early_init_intel which we can't handle.
|
2009-07-31 06:03:45 +08:00
|
|
|
* Family ID is returned as bits 8-12 in ax.
|
|
|
|
*/
|
2009-03-10 00:06:22 +08:00
|
|
|
*ax &= 0xFFFFF0FF;
|
|
|
|
*ax |= 0x00000500;
|
2007-07-19 16:49:22 +08:00
|
|
|
break;
|
2011-10-27 08:26:17 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* This is used to detect if we're running under KVM. We might be,
|
|
|
|
* but that's a Host matter, not us. So say we're not.
|
|
|
|
*/
|
|
|
|
case KVM_CPUID_SIGNATURE:
|
|
|
|
*bx = *cx = *dx = 0;
|
|
|
|
break;
|
|
|
|
|
2009-07-31 06:03:45 +08:00
|
|
|
/*
|
|
|
|
* 0x80000000 returns the highest Extended Function, so we futureproof
|
|
|
|
* like we do above by limiting it to known fields.
|
|
|
|
*/
|
2007-07-19 16:49:22 +08:00
|
|
|
case 0x80000000:
|
2008-01-30 20:30:56 +08:00
|
|
|
if (*ax > 0x80000008)
|
|
|
|
*ax = 0x80000008;
|
2007-07-19 16:49:22 +08:00
|
|
|
break;
|
2009-07-31 06:03:45 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* PAE systems can mark pages as non-executable. Linux calls this the
|
|
|
|
* NX bit. Intel calls it XD (eXecute Disable), AMD EVP (Enhanced
|
2011-07-12 00:07:14 +08:00
|
|
|
* Virus Protection). We just switch it off here, since we don't
|
2009-07-31 06:03:45 +08:00
|
|
|
* support it.
|
|
|
|
*/
|
2009-06-13 12:27:07 +08:00
|
|
|
case 0x80000001:
|
|
|
|
*dx &= ~(1 << 20);
|
|
|
|
break;
|
2007-07-19 16:49:22 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-07-31 06:03:45 +08:00
|
|
|
/*
|
|
|
|
* Intel has four control registers, imaginatively named cr0, cr2, cr3 and cr4.
|
2007-07-27 01:41:02 +08:00
|
|
|
* I assume there's a cr1, but it hasn't bothered us yet, so we'll not bother
|
|
|
|
* it. The Host needs to know when the Guest wants to change them, so we have
|
|
|
|
* a whole series of functions like read_cr0() and write_cr0().
|
|
|
|
*
|
2007-10-25 13:02:50 +08:00
|
|
|
* We start with cr0. cr0 allows you to turn on and off all kinds of basic
|
2007-07-27 01:41:02 +08:00
|
|
|
* features, but Linux only really cares about one: the horrifically-named Task
|
|
|
|
* Switched (TS) bit at bit 3 (ie. 8)
|
|
|
|
*
|
|
|
|
* What does the TS bit do? Well, it causes the CPU to trap (interrupt 7) if
|
|
|
|
* the floating point unit is used. Which allows us to restore FPU state
|
|
|
|
* lazily after a task switch, and Linux uses that gratefully, but wouldn't a
|
|
|
|
* name like "FPUTRAP bit" be a little less cryptic?
|
|
|
|
*
|
2008-11-01 00:24:27 +08:00
|
|
|
* We store cr0 locally because the Host never changes it. The Guest sometimes
|
2009-07-31 06:03:45 +08:00
|
|
|
* wants to read it and we'd prefer not to bother the Host unnecessarily.
|
|
|
|
*/
|
2008-11-01 00:24:27 +08:00
|
|
|
static unsigned long current_cr0;
|
2007-07-19 16:49:22 +08:00
|
|
|
static void lguest_write_cr0(unsigned long val)
|
|
|
|
{
|
2009-03-14 23:37:52 +08:00
|
|
|
lazy_hcall1(LHCALL_TS, val & X86_CR0_TS);
|
2007-07-19 16:49:22 +08:00
|
|
|
current_cr0 = val;
|
|
|
|
}
|
|
|
|
|
|
|
|
static unsigned long lguest_read_cr0(void)
|
|
|
|
{
|
|
|
|
return current_cr0;
|
|
|
|
}
|
|
|
|
|
2009-07-31 06:03:45 +08:00
|
|
|
/*
|
|
|
|
* Intel provided a special instruction to clear the TS bit for people too cool
|
2007-07-27 01:41:02 +08:00
|
|
|
* to use write_cr0() to do it. This "clts" instruction is faster, because all
|
2009-07-31 06:03:45 +08:00
|
|
|
* the vowels have been optimized out.
|
|
|
|
*/
|
2007-07-19 16:49:22 +08:00
|
|
|
static void lguest_clts(void)
|
|
|
|
{
|
2009-03-14 23:37:52 +08:00
|
|
|
lazy_hcall1(LHCALL_TS, 0);
|
2007-10-25 12:09:53 +08:00
|
|
|
current_cr0 &= ~X86_CR0_TS;
|
2007-07-19 16:49:22 +08:00
|
|
|
}
|
|
|
|
|
2009-07-31 06:03:45 +08:00
|
|
|
/*
|
|
|
|
* cr2 is the virtual address of the last page fault, which the Guest only ever
|
2007-07-27 01:41:02 +08:00
|
|
|
* reads. The Host kindly writes this into our "struct lguest_data", so we
|
2009-07-31 06:03:45 +08:00
|
|
|
* just read it out of there.
|
|
|
|
*/
|
2007-07-19 16:49:22 +08:00
|
|
|
static unsigned long lguest_read_cr2(void)
|
|
|
|
{
|
|
|
|
return lguest_data.cr2;
|
|
|
|
}
|
|
|
|
|
2008-11-01 00:24:27 +08:00
|
|
|
/* See lguest_set_pte() below. */
|
|
|
|
static bool cr3_changed = false;
|
2011-07-22 13:09:48 +08:00
|
|
|
static unsigned long current_cr3;
|
2008-11-01 00:24:27 +08:00
|
|
|
|
2009-07-31 06:03:45 +08:00
|
|
|
/*
|
|
|
|
* cr3 is the current toplevel pagetable page: the principle is the same as
|
2011-07-22 13:09:48 +08:00
|
|
|
* cr0. Keep a local copy, and tell the Host when it changes.
|
2009-07-31 06:03:45 +08:00
|
|
|
*/
|
2007-07-19 16:49:22 +08:00
|
|
|
static void lguest_write_cr3(unsigned long cr3)
|
|
|
|
{
|
2009-03-14 23:37:52 +08:00
|
|
|
lazy_hcall1(LHCALL_NEW_PGTABLE, cr3);
|
2011-07-22 13:09:48 +08:00
|
|
|
current_cr3 = cr3;
|
2010-12-17 07:03:15 +08:00
|
|
|
|
|
|
|
/* These two page tables are simple, linear, and used during boot */
|
2012-11-17 05:58:12 +08:00
|
|
|
if (cr3 != __pa_symbol(swapper_pg_dir) &&
|
|
|
|
cr3 != __pa_symbol(initial_page_table))
|
2010-12-17 07:03:15 +08:00
|
|
|
cr3_changed = true;
|
2007-07-19 16:49:22 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static unsigned long lguest_read_cr3(void)
|
|
|
|
{
|
2011-07-22 13:09:48 +08:00
|
|
|
return current_cr3;
|
2007-07-19 16:49:22 +08:00
|
|
|
}
|
|
|
|
|
2007-10-25 13:02:50 +08:00
|
|
|
/* cr4 is used to enable and disable PGE, but we don't care. */
|
2007-07-19 16:49:22 +08:00
|
|
|
static unsigned long lguest_read_cr4(void)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void lguest_write_cr4(unsigned long val)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2007-07-27 01:41:02 +08:00
|
|
|
/*
|
|
|
|
* Page Table Handling.
|
|
|
|
*
|
|
|
|
* Now would be a good time to take a rest and grab a coffee or similarly
|
|
|
|
* relaxing stimulant. The easy parts are behind us, and the trek gradually
|
|
|
|
* winds uphill from here.
|
|
|
|
*
|
|
|
|
* Quick refresher: memory is divided into "pages" of 4096 bytes each. The CPU
|
|
|
|
* maps virtual addresses to physical addresses using "page tables". We could
|
|
|
|
* use one huge index of 1 million entries: each address is 4 bytes, so that's
|
|
|
|
* 1024 pages just to hold the page tables. But since most virtual addresses
|
2007-10-25 13:02:50 +08:00
|
|
|
* are unused, we use a two level index which saves space. The cr3 register
|
2007-07-27 01:41:02 +08:00
|
|
|
* contains the physical address of the top level "page directory" page, which
|
|
|
|
* contains physical addresses of up to 1024 second-level pages. Each of these
|
|
|
|
* second level pages contains up to 1024 physical addresses of actual pages,
|
|
|
|
* or Page Table Entries (PTEs).
|
|
|
|
*
|
|
|
|
* Here's a diagram, where arrows indicate physical addresses:
|
|
|
|
*
|
2007-10-25 13:02:50 +08:00
|
|
|
* cr3 ---> +---------+
|
2007-07-27 01:41:02 +08:00
|
|
|
* | --------->+---------+
|
|
|
|
* | | | PADDR1 |
|
2009-07-31 06:03:45 +08:00
|
|
|
* Mid-level | | PADDR2 |
|
2007-07-27 01:41:02 +08:00
|
|
|
* (PMD) page | | |
|
|
|
|
* | | Lower-level |
|
|
|
|
* | | (PTE) page |
|
|
|
|
* | | | |
|
|
|
|
* .... ....
|
|
|
|
*
|
|
|
|
* So to convert a virtual address to a physical address, we look up the top
|
|
|
|
* level, which points us to the second level, which gives us the physical
|
|
|
|
* address of that page. If the top level entry was not present, or the second
|
|
|
|
* level entry was not present, then the virtual address is invalid (we
|
|
|
|
* say "the page was not mapped").
|
|
|
|
*
|
|
|
|
* Put another way, a 32-bit virtual address is divided up like so:
|
|
|
|
*
|
|
|
|
* 1 1 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
|
|
|
|
* |<---- 10 bits ---->|<---- 10 bits ---->|<------ 12 bits ------>|
|
|
|
|
* Index into top Index into second Offset within page
|
|
|
|
* page directory page pagetable page
|
|
|
|
*
|
2009-07-31 06:03:45 +08:00
|
|
|
* Now, unfortunately, this isn't the whole story: Intel added Physical Address
|
|
|
|
* Extension (PAE) to allow 32 bit systems to use 64GB of memory (ie. 36 bits).
|
|
|
|
* These are held in 64-bit page table entries, so we can now only fit 512
|
|
|
|
* entries in a page, and the neat three-level tree breaks down.
|
|
|
|
*
|
|
|
|
* The result is a four level page table:
|
|
|
|
*
|
|
|
|
* cr3 --> [ 4 Upper ]
|
|
|
|
* [ Level ]
|
|
|
|
* [ Entries ]
|
|
|
|
* [(PUD Page)]---> +---------+
|
|
|
|
* | --------->+---------+
|
|
|
|
* | | | PADDR1 |
|
|
|
|
* Mid-level | | PADDR2 |
|
|
|
|
* (PMD) page | | |
|
|
|
|
* | | Lower-level |
|
|
|
|
* | | (PTE) page |
|
|
|
|
* | | | |
|
|
|
|
* .... ....
|
|
|
|
*
|
|
|
|
*
|
|
|
|
* And the virtual address is decoded as:
|
|
|
|
*
|
|
|
|
* 1 1 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
|
|
|
|
* |<-2->|<--- 9 bits ---->|<---- 9 bits --->|<------ 12 bits ------>|
|
|
|
|
* Index into Index into mid Index into lower Offset within page
|
|
|
|
* top entries directory page pagetable page
|
|
|
|
*
|
|
|
|
* It's too hard to switch between these two formats at runtime, so Linux only
|
|
|
|
* supports one or the other depending on whether CONFIG_X86_PAE is set. Many
|
|
|
|
* distributions turn it on, and not just for people with silly amounts of
|
|
|
|
* memory: the larger PTE entries allow room for the NX bit, which lets the
|
|
|
|
* kernel disable execution of pages and increase security.
|
|
|
|
*
|
|
|
|
* This was a problem for lguest, which couldn't run on these distributions;
|
|
|
|
* then Matias Zabaljauregui figured it all out and implemented it, and only a
|
|
|
|
* handful of puppies were crushed in the process!
|
|
|
|
*
|
|
|
|
* Back to our point: the kernel spends a lot of time changing both the
|
|
|
|
* top-level page directory and lower-level pagetable pages. The Guest doesn't
|
|
|
|
* know physical addresses, so while it maintains these page tables exactly
|
|
|
|
* like normal, it also needs to keep the Host informed whenever it makes a
|
|
|
|
* change: the Host will create the real page tables based on the Guests'.
|
2007-07-27 01:41:02 +08:00
|
|
|
*/
|
|
|
|
|
2009-07-31 06:03:45 +08:00
|
|
|
/*
|
2009-07-31 06:03:45 +08:00
|
|
|
* The Guest calls this after it has set a second-level entry (pte), ie. to map
|
2011-07-22 13:09:50 +08:00
|
|
|
* a page into a process' address space. We tell the Host the toplevel and
|
2009-07-31 06:03:45 +08:00
|
|
|
* address this corresponds to. The Guest uses one pagetable per process, so
|
|
|
|
* we need to tell the Host which one we're changing (mm->pgd).
|
2009-07-31 06:03:45 +08:00
|
|
|
*/
|
2009-03-31 11:55:23 +08:00
|
|
|
static void lguest_pte_update(struct mm_struct *mm, unsigned long addr,
|
|
|
|
pte_t *ptep)
|
|
|
|
{
|
2009-06-13 12:27:07 +08:00
|
|
|
#ifdef CONFIG_X86_PAE
|
2009-07-31 06:03:45 +08:00
|
|
|
/* PAE needs to hand a 64 bit page table entry, so it uses two args. */
|
2009-06-13 12:27:07 +08:00
|
|
|
lazy_hcall4(LHCALL_SET_PTE, __pa(mm->pgd), addr,
|
|
|
|
ptep->pte_low, ptep->pte_high);
|
|
|
|
#else
|
2009-03-14 23:37:52 +08:00
|
|
|
lazy_hcall3(LHCALL_SET_PTE, __pa(mm->pgd), addr, ptep->pte_low);
|
2009-06-13 12:27:07 +08:00
|
|
|
#endif
|
2009-03-31 11:55:23 +08:00
|
|
|
}
|
|
|
|
|
2009-07-31 06:03:45 +08:00
|
|
|
/* This is the "set and update" combo-meal-deal version. */
|
2007-07-19 16:49:22 +08:00
|
|
|
static void lguest_set_pte_at(struct mm_struct *mm, unsigned long addr,
|
|
|
|
pte_t *ptep, pte_t pteval)
|
|
|
|
{
|
2009-06-13 12:27:06 +08:00
|
|
|
native_set_pte(ptep, pteval);
|
2009-03-31 11:55:23 +08:00
|
|
|
lguest_pte_update(mm, addr, ptep);
|
2007-07-19 16:49:22 +08:00
|
|
|
}
|
|
|
|
|
2009-07-31 06:03:45 +08:00
|
|
|
/*
|
|
|
|
* The Guest calls lguest_set_pud to set a top-level entry and lguest_set_pmd
|
2009-06-13 12:27:07 +08:00
|
|
|
* to set a middle-level entry when PAE is activated.
|
2009-07-31 06:03:45 +08:00
|
|
|
*
|
2009-06-13 12:27:07 +08:00
|
|
|
* Again, we set the entry then tell the Host which page we changed,
|
2009-07-31 06:03:45 +08:00
|
|
|
* and the index of the entry we changed.
|
|
|
|
*/
|
2009-06-13 12:27:07 +08:00
|
|
|
#ifdef CONFIG_X86_PAE
|
|
|
|
static void lguest_set_pud(pud_t *pudp, pud_t pudval)
|
|
|
|
{
|
|
|
|
native_set_pud(pudp, pudval);
|
|
|
|
|
|
|
|
/* 32 bytes aligned pdpt address and the index. */
|
|
|
|
lazy_hcall2(LHCALL_SET_PGD, __pa(pudp) & 0xFFFFFFE0,
|
|
|
|
(__pa(pudp) & 0x1F) / sizeof(pud_t));
|
|
|
|
}
|
|
|
|
|
|
|
|
static void lguest_set_pmd(pmd_t *pmdp, pmd_t pmdval)
|
|
|
|
{
|
|
|
|
native_set_pmd(pmdp, pmdval);
|
|
|
|
lazy_hcall2(LHCALL_SET_PMD, __pa(pmdp) & PAGE_MASK,
|
|
|
|
(__pa(pmdp) & (PAGE_SIZE - 1)) / sizeof(pmd_t));
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
|
2009-07-31 06:03:45 +08:00
|
|
|
/* The Guest calls lguest_set_pmd to set a top-level entry when !PAE. */
|
2007-07-19 16:49:22 +08:00
|
|
|
static void lguest_set_pmd(pmd_t *pmdp, pmd_t pmdval)
|
|
|
|
{
|
2009-06-13 12:27:06 +08:00
|
|
|
native_set_pmd(pmdp, pmdval);
|
2009-05-31 02:48:08 +08:00
|
|
|
lazy_hcall2(LHCALL_SET_PGD, __pa(pmdp) & PAGE_MASK,
|
2009-06-13 12:27:06 +08:00
|
|
|
(__pa(pmdp) & (PAGE_SIZE - 1)) / sizeof(pmd_t));
|
2007-07-19 16:49:22 +08:00
|
|
|
}
|
2009-06-13 12:27:07 +08:00
|
|
|
#endif
|
2007-07-19 16:49:22 +08:00
|
|
|
|
2009-07-31 06:03:45 +08:00
|
|
|
/*
|
|
|
|
* There are a couple of legacy places where the kernel sets a PTE, but we
|
2007-07-27 01:41:02 +08:00
|
|
|
* don't know the top level any more. This is useless for us, since we don't
|
|
|
|
* know which pagetable is changing or what address, so we just tell the Host
|
|
|
|
* to forget all of them. Fortunately, this is very rare.
|
|
|
|
*
|
|
|
|
* ... except in early boot when the kernel sets up the initial pagetables,
|
2010-12-17 07:03:15 +08:00
|
|
|
* which makes booting astonishingly slow: 48 seconds! So we don't even tell
|
|
|
|
* the Host anything changed until we've done the first real page table switch,
|
|
|
|
* which brings boot back to 4.3 seconds.
|
2009-07-31 06:03:45 +08:00
|
|
|
*/
|
2007-07-19 16:49:22 +08:00
|
|
|
static void lguest_set_pte(pte_t *ptep, pte_t pteval)
|
|
|
|
{
|
2009-06-13 12:27:06 +08:00
|
|
|
native_set_pte(ptep, pteval);
|
2008-11-01 00:24:27 +08:00
|
|
|
if (cr3_changed)
|
2009-03-14 23:37:52 +08:00
|
|
|
lazy_hcall1(LHCALL_FLUSH_TLB, 1);
|
2007-07-19 16:49:22 +08:00
|
|
|
}
|
|
|
|
|
2009-06-13 12:27:07 +08:00
|
|
|
#ifdef CONFIG_X86_PAE
|
2009-07-31 06:03:45 +08:00
|
|
|
/*
|
|
|
|
* With 64-bit PTE values, we need to be careful setting them: if we set 32
|
|
|
|
* bits at a time, the hardware could see a weird half-set entry. These
|
|
|
|
* versions ensure we update all 64 bits at once.
|
|
|
|
*/
|
2009-06-13 12:27:07 +08:00
|
|
|
static void lguest_set_pte_atomic(pte_t *ptep, pte_t pte)
|
|
|
|
{
|
|
|
|
native_set_pte_atomic(ptep, pte);
|
|
|
|
if (cr3_changed)
|
|
|
|
lazy_hcall1(LHCALL_FLUSH_TLB, 1);
|
|
|
|
}
|
|
|
|
|
2009-07-31 06:03:45 +08:00
|
|
|
static void lguest_pte_clear(struct mm_struct *mm, unsigned long addr,
|
|
|
|
pte_t *ptep)
|
2009-06-13 12:27:07 +08:00
|
|
|
{
|
|
|
|
native_pte_clear(mm, addr, ptep);
|
|
|
|
lguest_pte_update(mm, addr, ptep);
|
|
|
|
}
|
|
|
|
|
2009-07-31 06:03:45 +08:00
|
|
|
static void lguest_pmd_clear(pmd_t *pmdp)
|
2009-06-13 12:27:07 +08:00
|
|
|
{
|
|
|
|
lguest_set_pmd(pmdp, __pmd(0));
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2009-07-31 06:03:45 +08:00
|
|
|
/*
|
|
|
|
* Unfortunately for Lguest, the pv_mmu_ops for page tables were based on
|
2007-07-27 01:41:02 +08:00
|
|
|
* native page table operations. On native hardware you can set a new page
|
|
|
|
* table entry whenever you want, but if you want to remove one you have to do
|
|
|
|
* a TLB flush (a TLB is a little cache of page table entries kept by the CPU).
|
|
|
|
*
|
|
|
|
* So the lguest_set_pte_at() and lguest_set_pmd() functions above are only
|
|
|
|
* called when a valid entry is written, not when it's removed (ie. marked not
|
|
|
|
* present). Instead, this is where we come when the Guest wants to remove a
|
|
|
|
* page table entry: we tell the Host to set that entry to 0 (ie. the present
|
2009-07-31 06:03:45 +08:00
|
|
|
* bit is zero).
|
|
|
|
*/
|
2007-07-19 16:49:22 +08:00
|
|
|
static void lguest_flush_tlb_single(unsigned long addr)
|
|
|
|
{
|
2007-07-27 01:41:02 +08:00
|
|
|
/* Simply set it to zero: if it was not, it will fault back in. */
|
2011-07-22 13:09:48 +08:00
|
|
|
lazy_hcall3(LHCALL_SET_PTE, current_cr3, addr, 0);
|
2007-07-19 16:49:22 +08:00
|
|
|
}
|
|
|
|
|
2009-07-31 06:03:45 +08:00
|
|
|
/*
|
|
|
|
* This is what happens after the Guest has removed a large number of entries.
|
2007-07-27 01:41:02 +08:00
|
|
|
* This tells the Host that any of the page table entries for userspace might
|
2009-07-31 06:03:45 +08:00
|
|
|
* have changed, ie. virtual addresses below PAGE_OFFSET.
|
|
|
|
*/
|
2007-07-19 16:49:22 +08:00
|
|
|
static void lguest_flush_tlb_user(void)
|
|
|
|
{
|
2009-03-14 23:37:52 +08:00
|
|
|
lazy_hcall1(LHCALL_FLUSH_TLB, 0);
|
2007-07-19 16:49:22 +08:00
|
|
|
}
|
|
|
|
|
2009-07-31 06:03:45 +08:00
|
|
|
/*
|
|
|
|
* This is called when the kernel page tables have changed. That's not very
|
2007-07-27 01:41:02 +08:00
|
|
|
* common (unless the Guest is using highmem, which makes the Guest extremely
|
2009-07-31 06:03:45 +08:00
|
|
|
* slow), so it's worth separating this from the user flushing above.
|
|
|
|
*/
|
2007-07-19 16:49:22 +08:00
|
|
|
static void lguest_flush_tlb_kernel(void)
|
|
|
|
{
|
2009-03-14 23:37:52 +08:00
|
|
|
lazy_hcall1(LHCALL_FLUSH_TLB, 1);
|
2007-07-19 16:49:22 +08:00
|
|
|
}
|
|
|
|
|
2007-07-27 01:41:02 +08:00
|
|
|
/*
|
|
|
|
* The Unadvanced Programmable Interrupt Controller.
|
|
|
|
*
|
|
|
|
* This is an attempt to implement the simplest possible interrupt controller.
|
|
|
|
* I spent some time looking though routines like set_irq_chip_and_handler,
|
|
|
|
* set_irq_chip_and_handler_name, set_irq_chip_data and set_phasers_to_stun and
|
|
|
|
* I *think* this is as simple as it gets.
|
|
|
|
*
|
|
|
|
* We can tell the Host what interrupts we want blocked ready for using the
|
|
|
|
* lguest_data.interrupts bitmap, so disabling (aka "masking") them is as
|
|
|
|
* simple as setting a bit. We don't actually "ack" interrupts as such, we
|
|
|
|
* just mask and unmask them. I wonder if we should be cleverer?
|
|
|
|
*/
|
2010-09-28 20:57:24 +08:00
|
|
|
static void disable_lguest_irq(struct irq_data *data)
|
2007-07-19 16:49:22 +08:00
|
|
|
{
|
2010-09-28 20:57:24 +08:00
|
|
|
set_bit(data->irq, lguest_data.blocked_interrupts);
|
2007-07-19 16:49:22 +08:00
|
|
|
}
|
|
|
|
|
2010-09-28 20:57:24 +08:00
|
|
|
static void enable_lguest_irq(struct irq_data *data)
|
2007-07-19 16:49:22 +08:00
|
|
|
{
|
2010-09-28 20:57:24 +08:00
|
|
|
clear_bit(data->irq, lguest_data.blocked_interrupts);
|
2007-07-19 16:49:22 +08:00
|
|
|
}
|
|
|
|
|
2007-07-27 01:41:02 +08:00
|
|
|
/* This structure describes the lguest IRQ controller. */
|
2007-07-19 16:49:22 +08:00
|
|
|
static struct irq_chip lguest_irq_controller = {
|
|
|
|
.name = "lguest",
|
2010-09-28 20:57:24 +08:00
|
|
|
.irq_mask = disable_lguest_irq,
|
|
|
|
.irq_mask_ack = disable_lguest_irq,
|
|
|
|
.irq_unmask = enable_lguest_irq,
|
2007-07-19 16:49:22 +08:00
|
|
|
};
|
|
|
|
|
2015-08-04 12:32:55 +08:00
|
|
|
/*
|
|
|
|
* Interrupt descriptors are allocated as-needed, but low-numbered ones are
|
|
|
|
* reserved by the generic x86 code. So we ignore irq_alloc_desc_at if it
|
|
|
|
* tells us the irq is already used: other errors (ie. ENOMEM) we take
|
|
|
|
* seriously.
|
|
|
|
*/
|
|
|
|
static int lguest_setup_irq(unsigned int irq)
|
|
|
|
{
|
2015-08-03 04:38:27 +08:00
|
|
|
struct irq_desc *desc;
|
2015-08-04 12:32:55 +08:00
|
|
|
int err;
|
|
|
|
|
|
|
|
/* Returns -ve error or vector number. */
|
|
|
|
err = irq_alloc_desc_at(irq, 0);
|
|
|
|
if (err < 0 && err != -EEXIST)
|
|
|
|
return err;
|
|
|
|
|
2015-08-04 12:32:56 +08:00
|
|
|
/*
|
|
|
|
* Tell the Linux infrastructure that the interrupt is
|
|
|
|
* controlled by our level-based lguest interrupt controller.
|
|
|
|
*/
|
2015-08-04 12:32:55 +08:00
|
|
|
irq_set_chip_and_handler_name(irq, &lguest_irq_controller,
|
|
|
|
handle_level_irq, "level");
|
2015-08-04 12:32:56 +08:00
|
|
|
|
|
|
|
/* Some systems map "vectors" to interrupts weirdly. Not us! */
|
2015-08-03 04:38:27 +08:00
|
|
|
desc = irq_to_desc(irq);
|
|
|
|
__this_cpu_write(vector_irq[FIRST_EXTERNAL_VECTOR + irq], desc);
|
2015-08-04 12:32:55 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-02-11 12:45:10 +08:00
|
|
|
static int lguest_enable_irq(struct pci_dev *dev)
|
|
|
|
{
|
2015-08-04 12:32:56 +08:00
|
|
|
int err;
|
2015-02-11 12:45:10 +08:00
|
|
|
u8 line = 0;
|
|
|
|
|
|
|
|
/* We literally use the PCI interrupt line as the irq number. */
|
|
|
|
pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &line);
|
2015-08-04 12:32:56 +08:00
|
|
|
err = lguest_setup_irq(line);
|
|
|
|
if (!err)
|
|
|
|
dev->irq = line;
|
|
|
|
return err;
|
2015-02-11 12:45:10 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* We don't do hotplug PCI, so this shouldn't be called. */
|
|
|
|
static void lguest_disable_irq(struct pci_dev *dev)
|
|
|
|
{
|
|
|
|
WARN_ON(1);
|
|
|
|
}
|
|
|
|
|
2009-07-31 06:03:45 +08:00
|
|
|
/*
|
|
|
|
* This sets up the Interrupt Descriptor Table (IDT) entry for each hardware
|
2015-08-04 12:32:56 +08:00
|
|
|
* interrupt (except 128, which is used for system calls).
|
2009-07-31 06:03:45 +08:00
|
|
|
*/
|
2007-07-19 16:49:22 +08:00
|
|
|
static void __init lguest_init_IRQ(void)
|
|
|
|
{
|
|
|
|
unsigned int i;
|
|
|
|
|
2014-11-03 16:39:43 +08:00
|
|
|
for (i = FIRST_EXTERNAL_VECTOR; i < FIRST_SYSTEM_VECTOR; i++) {
|
2015-05-09 23:36:52 +08:00
|
|
|
if (i != IA32_SYSCALL_VECTOR)
|
2015-04-04 03:49:13 +08:00
|
|
|
set_intr_gate(i, irq_entries_start +
|
|
|
|
8 * (i - FIRST_EXTERNAL_VECTOR));
|
2007-07-19 16:49:22 +08:00
|
|
|
}
|
2009-07-31 06:03:45 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* This call is required to set up for 4k stacks, where we have
|
|
|
|
* separate stacks for hard and soft interrupts.
|
|
|
|
*/
|
2007-07-19 16:49:22 +08:00
|
|
|
irq_ctx_init(smp_processor_id());
|
|
|
|
}
|
|
|
|
|
2007-07-27 01:41:02 +08:00
|
|
|
/*
|
|
|
|
* Time.
|
|
|
|
*
|
|
|
|
* It would be far better for everyone if the Guest had its own clock, but
|
2007-07-27 11:42:52 +08:00
|
|
|
* until then the Host gives us the time on every interrupt.
|
2007-07-27 01:41:02 +08:00
|
|
|
*/
|
2013-05-14 01:56:06 +08:00
|
|
|
static void lguest_get_wallclock(struct timespec *now)
|
2007-07-19 16:49:22 +08:00
|
|
|
{
|
2013-05-14 01:56:06 +08:00
|
|
|
*now = lguest_data.time;
|
2007-07-19 16:49:22 +08:00
|
|
|
}
|
|
|
|
|
2009-07-31 06:03:45 +08:00
|
|
|
/*
|
|
|
|
* The TSC is an Intel thing called the Time Stamp Counter. The Host tells us
|
2008-03-29 00:05:53 +08:00
|
|
|
* what speed it runs at, or 0 if it's unusable as a reliable clock source.
|
|
|
|
* This matches what we want here: if we return 0 from this function, the x86
|
2009-07-31 06:03:45 +08:00
|
|
|
* TSC clock will give up and not register itself.
|
|
|
|
*/
|
2008-07-02 02:43:36 +08:00
|
|
|
static unsigned long lguest_tsc_khz(void)
|
2008-03-11 22:35:56 +08:00
|
|
|
{
|
|
|
|
return lguest_data.tsc_khz;
|
|
|
|
}
|
|
|
|
|
2009-07-31 06:03:45 +08:00
|
|
|
/*
|
|
|
|
* If we can't use the TSC, the kernel falls back to our lower-priority
|
|
|
|
* "lguest_clock", where we read the time value given to us by the Host.
|
|
|
|
*/
|
2009-04-22 03:24:00 +08:00
|
|
|
static cycle_t lguest_clock_read(struct clocksource *cs)
|
2007-07-19 16:49:23 +08:00
|
|
|
{
|
2007-07-27 11:42:52 +08:00
|
|
|
unsigned long sec, nsec;
|
|
|
|
|
2009-07-31 06:03:45 +08:00
|
|
|
/*
|
|
|
|
* Since the time is in two parts (seconds and nanoseconds), we risk
|
2008-03-11 22:35:56 +08:00
|
|
|
* reading it just as it's changing from 99 & 0.999999999 to 100 and 0,
|
|
|
|
* and getting 99 and 0. As Linux tends to come apart under the stress
|
2009-07-31 06:03:45 +08:00
|
|
|
* of time travel, we must be careful:
|
|
|
|
*/
|
2007-07-27 11:42:52 +08:00
|
|
|
do {
|
|
|
|
/* First we read the seconds part. */
|
|
|
|
sec = lguest_data.time.tv_sec;
|
2009-07-31 06:03:45 +08:00
|
|
|
/*
|
|
|
|
* This read memory barrier tells the compiler and the CPU that
|
2007-07-27 11:42:52 +08:00
|
|
|
* this can't be reordered: we have to complete the above
|
2009-07-31 06:03:45 +08:00
|
|
|
* before going on.
|
|
|
|
*/
|
2007-07-27 11:42:52 +08:00
|
|
|
rmb();
|
|
|
|
/* Now we read the nanoseconds part. */
|
|
|
|
nsec = lguest_data.time.tv_nsec;
|
|
|
|
/* Make sure we've done that. */
|
|
|
|
rmb();
|
|
|
|
/* Now if the seconds part has changed, try again. */
|
|
|
|
} while (unlikely(lguest_data.time.tv_sec != sec));
|
|
|
|
|
2008-03-11 22:35:56 +08:00
|
|
|
/* Our lguest clock is in real nanoseconds. */
|
2007-07-27 11:42:52 +08:00
|
|
|
return sec*1000000000ULL + nsec;
|
2007-07-19 16:49:23 +08:00
|
|
|
}
|
|
|
|
|
2008-03-11 22:35:56 +08:00
|
|
|
/* This is the fallback clocksource: lower priority than the TSC clocksource. */
|
2007-07-19 16:49:23 +08:00
|
|
|
static struct clocksource lguest_clock = {
|
|
|
|
.name = "lguest",
|
2008-03-11 22:35:56 +08:00
|
|
|
.rating = 200,
|
2007-07-19 16:49:23 +08:00
|
|
|
.read = lguest_clock_read,
|
2007-07-27 11:42:52 +08:00
|
|
|
.mask = CLOCKSOURCE_MASK(64),
|
2007-10-22 08:56:25 +08:00
|
|
|
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
|
2007-07-19 16:49:23 +08:00
|
|
|
};
|
|
|
|
|
2009-07-31 06:03:45 +08:00
|
|
|
/*
|
|
|
|
* We also need a "struct clock_event_device": Linux asks us to set it to go
|
2007-07-19 16:49:23 +08:00
|
|
|
* off some time in the future. Actually, James Morris figured all this out, I
|
2009-07-31 06:03:45 +08:00
|
|
|
* just applied the patch.
|
|
|
|
*/
|
2007-07-19 16:49:23 +08:00
|
|
|
static int lguest_clockevent_set_next_event(unsigned long delta,
|
|
|
|
struct clock_event_device *evt)
|
|
|
|
{
|
2008-03-29 00:05:53 +08:00
|
|
|
/* FIXME: I don't think this can ever happen, but James tells me he had
|
|
|
|
* to put this code in. Maybe we should remove it now. Anyone? */
|
2007-07-19 16:49:23 +08:00
|
|
|
if (delta < LG_CLOCK_MIN_DELTA) {
|
|
|
|
if (printk_ratelimit())
|
|
|
|
printk(KERN_DEBUG "%s: small delta %lu ns\n",
|
2008-03-04 03:37:23 +08:00
|
|
|
__func__, delta);
|
2007-07-19 16:49:23 +08:00
|
|
|
return -ETIME;
|
|
|
|
}
|
2008-03-29 00:05:53 +08:00
|
|
|
|
|
|
|
/* Please wake us this far in the future. */
|
2010-04-15 11:43:54 +08:00
|
|
|
hcall(LHCALL_SET_CLOCKEVENT, delta, 0, 0, 0);
|
2007-07-19 16:49:23 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-07-16 18:58:46 +08:00
|
|
|
static int lguest_clockevent_shutdown(struct clock_event_device *evt)
|
2007-07-19 16:49:23 +08:00
|
|
|
{
|
2015-07-16 18:58:46 +08:00
|
|
|
/* A 0 argument shuts the clock down. */
|
|
|
|
hcall(LHCALL_SET_CLOCKEVENT, 0, 0, 0, 0);
|
|
|
|
return 0;
|
2007-07-19 16:49:23 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* This describes our primitive timer chip. */
|
|
|
|
static struct clock_event_device lguest_clockevent = {
|
|
|
|
.name = "lguest",
|
|
|
|
.features = CLOCK_EVT_FEAT_ONESHOT,
|
|
|
|
.set_next_event = lguest_clockevent_set_next_event,
|
2015-07-16 18:58:46 +08:00
|
|
|
.set_state_shutdown = lguest_clockevent_shutdown,
|
2007-07-19 16:49:23 +08:00
|
|
|
.rating = INT_MAX,
|
|
|
|
.mult = 1,
|
|
|
|
.shift = 0,
|
|
|
|
.min_delta_ns = LG_CLOCK_MIN_DELTA,
|
|
|
|
.max_delta_ns = LG_CLOCK_MAX_DELTA,
|
|
|
|
};
|
|
|
|
|
2009-07-31 06:03:45 +08:00
|
|
|
/*
|
|
|
|
* This is the Guest timer interrupt handler (hardware interrupt 0). We just
|
|
|
|
* call the clockevent infrastructure and it does whatever needs doing.
|
|
|
|
*/
|
2015-09-14 16:42:37 +08:00
|
|
|
static void lguest_time_irq(struct irq_desc *desc)
|
2007-07-19 16:49:22 +08:00
|
|
|
{
|
2007-07-19 16:49:23 +08:00
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
/* Don't interrupt us while this is running. */
|
|
|
|
local_irq_save(flags);
|
|
|
|
lguest_clockevent.event_handler(&lguest_clockevent);
|
|
|
|
local_irq_restore(flags);
|
2007-07-19 16:49:22 +08:00
|
|
|
}
|
|
|
|
|
2009-07-31 06:03:45 +08:00
|
|
|
/*
|
|
|
|
* At some point in the boot process, we get asked to set up our timing
|
2007-07-27 01:41:02 +08:00
|
|
|
* infrastructure. The kernel doesn't expect timer interrupts before this, but
|
|
|
|
* we cleverly initialized the "blocked_interrupts" field of "struct
|
2009-07-31 06:03:45 +08:00
|
|
|
* lguest_data" so that timer interrupts were blocked until now.
|
|
|
|
*/
|
2007-07-19 16:49:22 +08:00
|
|
|
static void lguest_time_init(void)
|
|
|
|
{
|
2007-07-27 01:41:02 +08:00
|
|
|
/* Set up the timer interrupt (0) to go to our simple timer routine */
|
2015-08-04 12:32:55 +08:00
|
|
|
if (lguest_setup_irq(0) != 0)
|
|
|
|
panic("Could not set up timer irq");
|
2011-03-12 19:20:43 +08:00
|
|
|
irq_set_handler(0, lguest_time_irq);
|
2007-07-19 16:49:22 +08:00
|
|
|
|
2010-04-27 10:03:05 +08:00
|
|
|
clocksource_register_hz(&lguest_clock, NSEC_PER_SEC);
|
2007-07-19 16:49:23 +08:00
|
|
|
|
2007-07-27 01:41:02 +08:00
|
|
|
/* We can't set cpumask in the initializer: damn C limitations! Set it
|
|
|
|
* here and register our timer device. */
|
2008-12-13 18:50:26 +08:00
|
|
|
lguest_clockevent.cpumask = cpumask_of(0);
|
2007-07-19 16:49:23 +08:00
|
|
|
clockevents_register_device(&lguest_clockevent);
|
|
|
|
|
2007-07-27 01:41:02 +08:00
|
|
|
/* Finally, we unblock the timer interrupt. */
|
2010-12-17 07:03:13 +08:00
|
|
|
clear_bit(0, lguest_data.blocked_interrupts);
|
2007-07-19 16:49:22 +08:00
|
|
|
}
|
|
|
|
|
2007-07-27 01:41:02 +08:00
|
|
|
/*
|
|
|
|
* Miscellaneous bits and pieces.
|
|
|
|
*
|
|
|
|
* Here is an oddball collection of functions which the Guest needs for things
|
|
|
|
* to work. They're pretty simple.
|
|
|
|
*/
|
|
|
|
|
2009-07-31 06:03:45 +08:00
|
|
|
/*
|
|
|
|
* The Guest needs to tell the Host what stack it expects traps to use. For
|
2007-07-27 01:41:02 +08:00
|
|
|
* native hardware, this is part of the Task State Segment mentioned above in
|
|
|
|
* lguest_load_tr_desc(), but to help hypervisors there's this special call.
|
|
|
|
*
|
|
|
|
* We tell the Host the segment we want to use (__KERNEL_DS is the kernel data
|
|
|
|
* segment), the privilege level (we're privilege level 1, the Host is 0 and
|
|
|
|
* will not tolerate us trying to use that), the stack pointer, and the number
|
2009-07-31 06:03:45 +08:00
|
|
|
* of pages in the stack.
|
|
|
|
*/
|
2008-01-30 20:31:02 +08:00
|
|
|
static void lguest_load_sp0(struct tss_struct *tss,
|
2008-03-29 00:05:53 +08:00
|
|
|
struct thread_struct *thread)
|
2007-07-19 16:49:22 +08:00
|
|
|
{
|
2009-03-14 23:37:52 +08:00
|
|
|
lazy_hcall3(LHCALL_SET_STACK, __KERNEL_DS | 0x1, thread->sp0,
|
|
|
|
THREAD_SIZE / PAGE_SIZE);
|
2015-03-06 11:19:02 +08:00
|
|
|
tss->x86_tss.sp0 = thread->sp0;
|
2007-07-19 16:49:22 +08:00
|
|
|
}
|
|
|
|
|
2007-07-27 01:41:02 +08:00
|
|
|
/* Let's just say, I wouldn't do debugging under a Guest. */
|
2013-09-05 16:15:54 +08:00
|
|
|
static unsigned long lguest_get_debugreg(int regno)
|
|
|
|
{
|
|
|
|
/* FIXME: Implement */
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2007-07-19 16:49:22 +08:00
|
|
|
static void lguest_set_debugreg(int regno, unsigned long value)
|
|
|
|
{
|
|
|
|
/* FIXME: Implement */
|
|
|
|
}
|
|
|
|
|
2009-07-31 06:03:45 +08:00
|
|
|
/*
|
|
|
|
* There are times when the kernel wants to make sure that no memory writes are
|
2007-07-27 01:41:02 +08:00
|
|
|
* caught in the cache (that they've all reached real hardware devices). This
|
|
|
|
* doesn't matter for the Guest which has virtual hardware.
|
|
|
|
*
|
|
|
|
* On the Pentium 4 and above, cpuid() indicates that the Cache Line Flush
|
|
|
|
* (clflush) instruction is available and the kernel uses that. Otherwise, it
|
|
|
|
* uses the older "Write Back and Invalidate Cache" (wbinvd) instruction.
|
|
|
|
* Unlike clflush, wbinvd can only be run at privilege level 0. So we can
|
|
|
|
* ignore clflush, but replace wbinvd.
|
|
|
|
*/
|
2007-07-19 16:49:22 +08:00
|
|
|
static void lguest_wbinvd(void)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2009-07-31 06:03:45 +08:00
|
|
|
/*
|
|
|
|
* If the Guest expects to have an Advanced Programmable Interrupt Controller,
|
2007-07-27 01:41:02 +08:00
|
|
|
* we play dumb by ignoring writes and returning 0 for reads. So it's no
|
|
|
|
* longer Programmable nor Controlling anything, and I don't think 8 lines of
|
|
|
|
* code qualifies for Advanced. It will also never interrupt anything. It
|
2009-07-31 06:03:45 +08:00
|
|
|
* does, however, allow us to get through the Linux boot code.
|
|
|
|
*/
|
2007-07-19 16:49:22 +08:00
|
|
|
#ifdef CONFIG_X86_LOCAL_APIC
|
2008-07-12 04:11:56 +08:00
|
|
|
static void lguest_apic_write(u32 reg, u32 v)
|
2007-07-19 16:49:22 +08:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2008-07-12 04:11:56 +08:00
|
|
|
static u32 lguest_apic_read(u32 reg)
|
2007-07-19 16:49:22 +08:00
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
2008-07-15 00:49:14 +08:00
|
|
|
|
|
|
|
static u64 lguest_apic_icr_read(void)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void lguest_apic_icr_write(u32 low, u32 id)
|
|
|
|
{
|
|
|
|
/* Warn to see if there's any stray references */
|
|
|
|
WARN_ON(1);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void lguest_apic_wait_icr_idle(void)
|
|
|
|
{
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
static u32 lguest_apic_safe_wait_icr_idle(void)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2009-02-17 15:02:14 +08:00
|
|
|
static void set_lguest_basic_apic_ops(void)
|
|
|
|
{
|
|
|
|
apic->read = lguest_apic_read;
|
|
|
|
apic->write = lguest_apic_write;
|
|
|
|
apic->icr_read = lguest_apic_icr_read;
|
|
|
|
apic->icr_write = lguest_apic_icr_write;
|
|
|
|
apic->wait_icr_idle = lguest_apic_wait_icr_idle;
|
|
|
|
apic->safe_wait_icr_idle = lguest_apic_safe_wait_icr_idle;
|
2008-07-15 00:49:14 +08:00
|
|
|
};
|
2007-07-19 16:49:22 +08:00
|
|
|
#endif
|
|
|
|
|
2007-07-27 01:41:02 +08:00
|
|
|
/* STOP! Until an interrupt comes in. */
|
2007-07-19 16:49:22 +08:00
|
|
|
static void lguest_safe_halt(void)
|
|
|
|
{
|
2010-04-15 11:43:54 +08:00
|
|
|
hcall(LHCALL_HALT, 0, 0, 0, 0);
|
2007-07-19 16:49:22 +08:00
|
|
|
}
|
|
|
|
|
2009-07-31 06:03:45 +08:00
|
|
|
/*
|
|
|
|
* The SHUTDOWN hypercall takes a string to describe what's happening, and
|
2008-03-29 00:05:53 +08:00
|
|
|
* an argument which says whether this to restart (reboot) the Guest or not.
|
2007-07-27 01:41:02 +08:00
|
|
|
*
|
|
|
|
* Note that the Host always prefers that the Guest speak in physical addresses
|
2009-07-31 06:03:45 +08:00
|
|
|
* rather than virtual addresses, so we use __pa() here.
|
|
|
|
*/
|
2007-07-19 16:49:22 +08:00
|
|
|
static void lguest_power_off(void)
|
|
|
|
{
|
2010-04-15 11:43:54 +08:00
|
|
|
hcall(LHCALL_SHUTDOWN, __pa("Power down"),
|
|
|
|
LGUEST_SHUTDOWN_POWEROFF, 0, 0);
|
2007-07-19 16:49:22 +08:00
|
|
|
}
|
|
|
|
|
2007-07-27 01:41:02 +08:00
|
|
|
/*
|
|
|
|
* Panicing.
|
|
|
|
*
|
|
|
|
* Don't. But if you did, this is what happens.
|
|
|
|
*/
|
2007-07-19 16:49:22 +08:00
|
|
|
static int lguest_panic(struct notifier_block *nb, unsigned long l, void *p)
|
|
|
|
{
|
2010-04-15 11:43:54 +08:00
|
|
|
hcall(LHCALL_SHUTDOWN, __pa(p), LGUEST_SHUTDOWN_POWEROFF, 0, 0);
|
2007-07-27 01:41:02 +08:00
|
|
|
/* The hcall won't return, but to keep gcc happy, we're "done". */
|
2007-07-19 16:49:22 +08:00
|
|
|
return NOTIFY_DONE;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct notifier_block paniced = {
|
|
|
|
.notifier_call = lguest_panic
|
|
|
|
};
|
|
|
|
|
2007-07-27 01:41:02 +08:00
|
|
|
/* Setting up memory is fairly easy. */
|
2007-07-19 16:49:22 +08:00
|
|
|
static __init char *lguest_memory_setup(void)
|
|
|
|
{
|
2009-07-31 06:03:45 +08:00
|
|
|
/*
|
2011-07-22 13:09:50 +08:00
|
|
|
* The Linux bootloader header contains an "e820" memory map: the
|
2009-07-31 06:03:45 +08:00
|
|
|
* Launcher populated the first entry with our memory limit.
|
|
|
|
*/
|
2008-06-16 09:58:51 +08:00
|
|
|
e820_add_region(boot_params.e820_map[0].addr,
|
2007-10-16 08:13:22 +08:00
|
|
|
boot_params.e820_map[0].size,
|
|
|
|
boot_params.e820_map[0].type);
|
2007-07-27 01:41:02 +08:00
|
|
|
|
|
|
|
/* This string is for the boot messages. */
|
2007-07-19 16:49:22 +08:00
|
|
|
return "LGUEST";
|
|
|
|
}
|
|
|
|
|
2015-02-11 12:56:01 +08:00
|
|
|
/* Offset within PCI config space of BAR access capability. */
|
|
|
|
static int console_cfg_offset = 0;
|
|
|
|
static int console_access_cap;
|
|
|
|
|
|
|
|
/* Set up so that we access off in bar0 (on bus 0, device 1, function 0) */
|
|
|
|
static void set_cfg_window(u32 cfg_offset, u32 off)
|
|
|
|
{
|
|
|
|
write_pci_config_byte(0, 1, 0,
|
|
|
|
cfg_offset + offsetof(struct virtio_pci_cap, bar),
|
|
|
|
0);
|
|
|
|
write_pci_config(0, 1, 0,
|
|
|
|
cfg_offset + offsetof(struct virtio_pci_cap, length),
|
|
|
|
4);
|
|
|
|
write_pci_config(0, 1, 0,
|
|
|
|
cfg_offset + offsetof(struct virtio_pci_cap, offset),
|
|
|
|
off);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void write_bar_via_cfg(u32 cfg_offset, u32 off, u32 val)
|
|
|
|
{
|
2015-02-13 14:43:43 +08:00
|
|
|
/*
|
|
|
|
* We could set this up once, then leave it; nothing else in the *
|
|
|
|
* kernel should touch these registers. But if it went wrong, that
|
|
|
|
* would be a horrible bug to find.
|
|
|
|
*/
|
2015-02-11 12:56:01 +08:00
|
|
|
set_cfg_window(cfg_offset, off);
|
|
|
|
write_pci_config(0, 1, 0,
|
|
|
|
cfg_offset + sizeof(struct virtio_pci_cap), val);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void probe_pci_console(void)
|
|
|
|
{
|
|
|
|
u8 cap, common_cap = 0, device_cap = 0;
|
2015-02-13 14:43:43 +08:00
|
|
|
/* Offset within BAR0 */
|
|
|
|
u32 device_offset;
|
|
|
|
u32 device_len;
|
2015-02-11 12:56:01 +08:00
|
|
|
|
|
|
|
/* Avoid recursive printk into here. */
|
|
|
|
console_cfg_offset = -1;
|
|
|
|
|
|
|
|
if (!early_pci_allowed()) {
|
|
|
|
printk(KERN_ERR "lguest: early PCI access not allowed!\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* We expect a console PCI device at BUS0, slot 1. */
|
|
|
|
if (read_pci_config(0, 1, 0, 0) != 0x10431AF4) {
|
|
|
|
printk(KERN_ERR "lguest: PCI device is %#x!\n",
|
|
|
|
read_pci_config(0, 1, 0, 0));
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Find the capabilities we need (must be in bar0) */
|
|
|
|
cap = read_pci_config_byte(0, 1, 0, PCI_CAPABILITY_LIST);
|
|
|
|
while (cap) {
|
|
|
|
u8 vndr = read_pci_config_byte(0, 1, 0, cap);
|
|
|
|
if (vndr == PCI_CAP_ID_VNDR) {
|
|
|
|
u8 type, bar;
|
2015-02-13 14:43:43 +08:00
|
|
|
u32 offset, length;
|
2015-02-11 12:56:01 +08:00
|
|
|
|
|
|
|
type = read_pci_config_byte(0, 1, 0,
|
|
|
|
cap + offsetof(struct virtio_pci_cap, cfg_type));
|
|
|
|
bar = read_pci_config_byte(0, 1, 0,
|
|
|
|
cap + offsetof(struct virtio_pci_cap, bar));
|
|
|
|
offset = read_pci_config(0, 1, 0,
|
|
|
|
cap + offsetof(struct virtio_pci_cap, offset));
|
2015-02-13 14:43:43 +08:00
|
|
|
length = read_pci_config(0, 1, 0,
|
|
|
|
cap + offsetof(struct virtio_pci_cap, length));
|
2015-02-11 12:56:01 +08:00
|
|
|
|
|
|
|
switch (type) {
|
|
|
|
case VIRTIO_PCI_CAP_DEVICE_CFG:
|
|
|
|
if (bar == 0) {
|
|
|
|
device_cap = cap;
|
|
|
|
device_offset = offset;
|
2015-02-13 14:43:43 +08:00
|
|
|
device_len = length;
|
2015-02-11 12:56:01 +08:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
case VIRTIO_PCI_CAP_PCI_CFG:
|
|
|
|
console_access_cap = cap;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
cap = read_pci_config_byte(0, 1, 0, cap + PCI_CAP_LIST_NEXT);
|
|
|
|
}
|
2015-02-13 14:43:43 +08:00
|
|
|
if (!device_cap || !console_access_cap) {
|
2015-02-11 12:56:01 +08:00
|
|
|
printk(KERN_ERR "lguest: No caps (%u/%u/%u) in console!\n",
|
|
|
|
common_cap, device_cap, console_access_cap);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2015-02-13 14:43:43 +08:00
|
|
|
/*
|
|
|
|
* Note that we can't check features, until we've set the DRIVER
|
|
|
|
* status bit. We don't want to do that until we have a real driver,
|
|
|
|
* so we just check that the device-specific config has room for
|
|
|
|
* emerg_wr. If it doesn't support VIRTIO_CONSOLE_F_EMERG_WRITE
|
|
|
|
* it should ignore the access.
|
|
|
|
*/
|
|
|
|
if (device_len < (offsetof(struct virtio_console_config, emerg_wr)
|
|
|
|
+ sizeof(u32))) {
|
|
|
|
printk(KERN_ERR "lguest: console missing emerg_wr field\n");
|
2015-02-11 12:56:01 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
console_cfg_offset = device_offset;
|
2015-02-13 14:43:43 +08:00
|
|
|
printk(KERN_INFO "lguest: Console via virtio-pci emerg_wr\n");
|
2015-02-11 12:56:01 +08:00
|
|
|
}
|
|
|
|
|
2009-07-31 06:03:45 +08:00
|
|
|
/*
|
|
|
|
* We will eventually use the virtio console device to produce console output,
|
2015-02-11 12:56:01 +08:00
|
|
|
* but before that is set up we use the virtio PCI console's backdoor mmio
|
|
|
|
* access and the "emergency" write facility (which is legal even before the
|
|
|
|
* device is configured).
|
2009-07-31 06:03:45 +08:00
|
|
|
*/
|
2007-10-22 09:24:21 +08:00
|
|
|
static __init int early_put_chars(u32 vtermno, const char *buf, int count)
|
|
|
|
{
|
2015-02-11 12:56:01 +08:00
|
|
|
/* If we couldn't find PCI console, forget it. */
|
|
|
|
if (console_cfg_offset < 0)
|
|
|
|
return count;
|
2007-10-22 09:24:21 +08:00
|
|
|
|
2015-02-11 12:56:01 +08:00
|
|
|
if (unlikely(!console_cfg_offset)) {
|
|
|
|
probe_pci_console();
|
|
|
|
if (console_cfg_offset < 0)
|
|
|
|
return count;
|
|
|
|
}
|
2007-10-22 09:24:21 +08:00
|
|
|
|
2015-02-11 12:56:01 +08:00
|
|
|
write_bar_via_cfg(console_access_cap,
|
|
|
|
console_cfg_offset
|
|
|
|
+ offsetof(struct virtio_console_config, emerg_wr),
|
|
|
|
buf[0]);
|
|
|
|
return 1;
|
2007-10-22 09:24:21 +08:00
|
|
|
}
|
|
|
|
|
2009-07-31 06:03:45 +08:00
|
|
|
/*
|
|
|
|
* Rebooting also tells the Host we're finished, but the RESTART flag tells the
|
|
|
|
* Launcher to reboot us.
|
|
|
|
*/
|
2008-03-29 00:05:53 +08:00
|
|
|
static void lguest_restart(char *reason)
|
|
|
|
{
|
2010-04-15 11:43:54 +08:00
|
|
|
hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
|
2008-03-29 00:05:53 +08:00
|
|
|
}
|
|
|
|
|
2007-07-27 01:41:02 +08:00
|
|
|
/*G:050
|
|
|
|
* Patching (Powerfully Placating Performance Pedants)
|
|
|
|
*
|
2008-03-29 00:05:53 +08:00
|
|
|
* We have already seen that pv_ops structures let us replace simple native
|
|
|
|
* instructions with calls to the appropriate back end all throughout the
|
|
|
|
* kernel. This allows the same kernel to run as a Guest and as a native
|
2007-07-27 01:41:02 +08:00
|
|
|
* kernel, but it's slow because of all the indirect branches.
|
|
|
|
*
|
|
|
|
* Remember that David Wheeler quote about "Any problem in computer science can
|
|
|
|
* be solved with another layer of indirection"? The rest of that quote is
|
|
|
|
* "... But that usually will create another problem." This is the first of
|
|
|
|
* those problems.
|
|
|
|
*
|
|
|
|
* Our current solution is to allow the paravirt back end to optionally patch
|
|
|
|
* over the indirect calls to replace them with something more efficient. We
|
2009-06-13 12:27:02 +08:00
|
|
|
* patch two of the simplest of the most commonly called functions: disable
|
|
|
|
* interrupts and save interrupts. We usually have 6 or 10 bytes to patch
|
|
|
|
* into: the Guest versions of these operations are small enough that we can
|
|
|
|
* fit comfortably.
|
2007-07-27 01:41:02 +08:00
|
|
|
*
|
|
|
|
* First we need assembly templates of each of the patchable Guest operations,
|
2015-03-24 09:21:38 +08:00
|
|
|
* and these are in head_32.S.
|
2009-07-31 06:03:45 +08:00
|
|
|
*/
|
2007-07-27 01:41:02 +08:00
|
|
|
|
|
|
|
/*G:060 We construct a table from the assembler templates: */
|
2007-07-19 16:49:22 +08:00
|
|
|
static const struct lguest_insns
|
|
|
|
{
|
|
|
|
const char *start, *end;
|
|
|
|
} lguest_insns[] = {
|
2007-10-17 02:51:29 +08:00
|
|
|
[PARAVIRT_PATCH(pv_irq_ops.irq_disable)] = { lgstart_cli, lgend_cli },
|
|
|
|
[PARAVIRT_PATCH(pv_irq_ops.save_fl)] = { lgstart_pushf, lgend_pushf },
|
2007-07-19 16:49:22 +08:00
|
|
|
};
|
2007-07-27 01:41:02 +08:00
|
|
|
|
2009-07-31 06:03:45 +08:00
|
|
|
/*
|
|
|
|
* Now our patch routine is fairly simple (based on the native one in
|
2007-07-27 01:41:02 +08:00
|
|
|
* paravirt.c). If we have a replacement, we copy it in and return how much of
|
2009-07-31 06:03:45 +08:00
|
|
|
* the available space we used.
|
|
|
|
*/
|
2007-08-11 04:31:03 +08:00
|
|
|
static unsigned lguest_patch(u8 type, u16 clobber, void *ibuf,
|
|
|
|
unsigned long addr, unsigned len)
|
2007-07-19 16:49:22 +08:00
|
|
|
{
|
|
|
|
unsigned int insn_len;
|
|
|
|
|
2007-07-27 01:41:02 +08:00
|
|
|
/* Don't do anything special if we don't have a replacement */
|
2007-07-19 16:49:22 +08:00
|
|
|
if (type >= ARRAY_SIZE(lguest_insns) || !lguest_insns[type].start)
|
2007-08-11 04:31:03 +08:00
|
|
|
return paravirt_patch_default(type, clobber, ibuf, addr, len);
|
2007-07-19 16:49:22 +08:00
|
|
|
|
|
|
|
insn_len = lguest_insns[type].end - lguest_insns[type].start;
|
|
|
|
|
2009-07-31 06:03:45 +08:00
|
|
|
/* Similarly if it can't fit (doesn't happen, but let's be thorough). */
|
2007-07-19 16:49:22 +08:00
|
|
|
if (len < insn_len)
|
2007-08-11 04:31:03 +08:00
|
|
|
return paravirt_patch_default(type, clobber, ibuf, addr, len);
|
2007-07-19 16:49:22 +08:00
|
|
|
|
2007-07-27 01:41:02 +08:00
|
|
|
/* Copy in our instructions. */
|
2007-08-11 04:31:03 +08:00
|
|
|
memcpy(ibuf, lguest_insns[type].start, insn_len);
|
2007-07-19 16:49:22 +08:00
|
|
|
return insn_len;
|
|
|
|
}
|
|
|
|
|
2009-07-31 06:03:45 +08:00
|
|
|
/*G:029
|
|
|
|
* Once we get to lguest_init(), we know we're a Guest. The various
|
2008-03-29 00:05:53 +08:00
|
|
|
* pv_ops structures in the kernel provide points for (almost) every routine we
|
2009-07-31 06:03:45 +08:00
|
|
|
* have to override to avoid privileged instructions.
|
|
|
|
*/
|
2007-10-22 09:29:44 +08:00
|
|
|
__init void lguest_init(void)
|
2007-07-19 16:49:22 +08:00
|
|
|
{
|
2009-07-31 06:03:45 +08:00
|
|
|
/* We're under lguest. */
|
2007-10-17 02:51:29 +08:00
|
|
|
pv_info.name = "lguest";
|
2009-07-31 06:03:45 +08:00
|
|
|
/* Paravirt is enabled. */
|
2007-10-17 02:51:29 +08:00
|
|
|
pv_info.paravirt_enabled = 1;
|
2009-07-31 06:03:45 +08:00
|
|
|
/* We're running at privilege level 1, not 0 as normal. */
|
2007-10-17 02:51:29 +08:00
|
|
|
pv_info.kernel_rpl = 1;
|
2009-07-31 06:03:45 +08:00
|
|
|
/* Everyone except Xen runs with this set. */
|
2009-06-13 12:27:07 +08:00
|
|
|
pv_info.shared_kernel_pmd = 1;
|
2015-12-11 22:07:53 +08:00
|
|
|
pv_info.features = 0;
|
2007-07-19 16:49:22 +08:00
|
|
|
|
2009-07-31 06:03:45 +08:00
|
|
|
/*
|
|
|
|
* We set up all the lguest overrides for sensitive operations. These
|
|
|
|
* are detailed with the operations themselves.
|
|
|
|
*/
|
2007-10-17 02:51:29 +08:00
|
|
|
|
2009-07-31 06:03:45 +08:00
|
|
|
/* Interrupt-related operations */
|
2013-10-23 00:07:54 +08:00
|
|
|
pv_irq_ops.save_fl = PV_CALLEE_SAVE(lguest_save_fl);
|
lguest: optimize by coding restore_flags and irq_enable in assembler.
The downside of the last patch which made restore_flags and irq_enable
check interrupts is that they are now too big to be patched directly
into the callsites, so the C versions are always used.
But the C versions go via PV_CALLEE_SAVE_REGS_THUNK which saves all
the registers. In fact, we don't need any registers in the fast path,
so we can do better than this if we actually code them in assembler.
The results are in the noise, but since it's about the same amount of
code, it's worth applying.
1GB Guest->Host: input(suppressed),output(suppressed)
Before:
Seconds: 0:16.53
Packets: 377268,753673
Interrupts: 22461,24297
Notifications: 1(5245),21303(732370)
Net IRQs triggered: 377023(245),42578(711095)
After:
Seconds: 0:16.48
Packets: 377289,753673
Interrupts: 22281,24465
Notifications: 1(5245),21296(732377)
Net IRQs triggered: 377060(229),42564(711109)
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
2009-06-13 12:27:03 +08:00
|
|
|
pv_irq_ops.restore_fl = __PV_IS_CALLEE_SAVE(lg_restore_fl);
|
2013-10-23 00:07:54 +08:00
|
|
|
pv_irq_ops.irq_disable = PV_CALLEE_SAVE(lguest_irq_disable);
|
lguest: optimize by coding restore_flags and irq_enable in assembler.
The downside of the last patch which made restore_flags and irq_enable
check interrupts is that they are now too big to be patched directly
into the callsites, so the C versions are always used.
But the C versions go via PV_CALLEE_SAVE_REGS_THUNK which saves all
the registers. In fact, we don't need any registers in the fast path,
so we can do better than this if we actually code them in assembler.
The results are in the noise, but since it's about the same amount of
code, it's worth applying.
1GB Guest->Host: input(suppressed),output(suppressed)
Before:
Seconds: 0:16.53
Packets: 377268,753673
Interrupts: 22461,24297
Notifications: 1(5245),21303(732370)
Net IRQs triggered: 377023(245),42578(711095)
After:
Seconds: 0:16.48
Packets: 377289,753673
Interrupts: 22281,24465
Notifications: 1(5245),21296(732377)
Net IRQs triggered: 377060(229),42564(711109)
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
2009-06-13 12:27:03 +08:00
|
|
|
pv_irq_ops.irq_enable = __PV_IS_CALLEE_SAVE(lg_irq_enable);
|
2007-10-17 02:51:29 +08:00
|
|
|
pv_irq_ops.safe_halt = lguest_safe_halt;
|
|
|
|
|
2009-07-31 06:03:45 +08:00
|
|
|
/* Setup operations */
|
2007-10-17 02:51:29 +08:00
|
|
|
pv_init_ops.patch = lguest_patch;
|
|
|
|
|
2009-07-31 06:03:45 +08:00
|
|
|
/* Intercepts of various CPU instructions */
|
2007-10-17 02:51:29 +08:00
|
|
|
pv_cpu_ops.load_gdt = lguest_load_gdt;
|
|
|
|
pv_cpu_ops.cpuid = lguest_cpuid;
|
|
|
|
pv_cpu_ops.load_idt = lguest_load_idt;
|
|
|
|
pv_cpu_ops.iret = lguest_iret;
|
2008-01-30 20:31:02 +08:00
|
|
|
pv_cpu_ops.load_sp0 = lguest_load_sp0;
|
2007-10-17 02:51:29 +08:00
|
|
|
pv_cpu_ops.load_tr_desc = lguest_load_tr_desc;
|
|
|
|
pv_cpu_ops.set_ldt = lguest_set_ldt;
|
|
|
|
pv_cpu_ops.load_tls = lguest_load_tls;
|
2013-09-05 16:15:54 +08:00
|
|
|
pv_cpu_ops.get_debugreg = lguest_get_debugreg;
|
2007-10-17 02:51:29 +08:00
|
|
|
pv_cpu_ops.set_debugreg = lguest_set_debugreg;
|
|
|
|
pv_cpu_ops.clts = lguest_clts;
|
|
|
|
pv_cpu_ops.read_cr0 = lguest_read_cr0;
|
|
|
|
pv_cpu_ops.write_cr0 = lguest_write_cr0;
|
|
|
|
pv_cpu_ops.read_cr4 = lguest_read_cr4;
|
|
|
|
pv_cpu_ops.write_cr4 = lguest_write_cr4;
|
|
|
|
pv_cpu_ops.write_gdt_entry = lguest_write_gdt_entry;
|
|
|
|
pv_cpu_ops.write_idt_entry = lguest_write_idt_entry;
|
|
|
|
pv_cpu_ops.wbinvd = lguest_wbinvd;
|
2009-02-19 03:18:57 +08:00
|
|
|
pv_cpu_ops.start_context_switch = paravirt_start_context_switch;
|
|
|
|
pv_cpu_ops.end_context_switch = lguest_end_context_switch;
|
2007-10-17 02:51:29 +08:00
|
|
|
|
2009-07-31 06:03:45 +08:00
|
|
|
/* Pagetable management */
|
2007-10-17 02:51:29 +08:00
|
|
|
pv_mmu_ops.write_cr3 = lguest_write_cr3;
|
|
|
|
pv_mmu_ops.flush_tlb_user = lguest_flush_tlb_user;
|
|
|
|
pv_mmu_ops.flush_tlb_single = lguest_flush_tlb_single;
|
|
|
|
pv_mmu_ops.flush_tlb_kernel = lguest_flush_tlb_kernel;
|
|
|
|
pv_mmu_ops.set_pte = lguest_set_pte;
|
|
|
|
pv_mmu_ops.set_pte_at = lguest_set_pte_at;
|
|
|
|
pv_mmu_ops.set_pmd = lguest_set_pmd;
|
2009-06-13 12:27:07 +08:00
|
|
|
#ifdef CONFIG_X86_PAE
|
|
|
|
pv_mmu_ops.set_pte_atomic = lguest_set_pte_atomic;
|
|
|
|
pv_mmu_ops.pte_clear = lguest_pte_clear;
|
|
|
|
pv_mmu_ops.pmd_clear = lguest_pmd_clear;
|
|
|
|
pv_mmu_ops.set_pud = lguest_set_pud;
|
|
|
|
#endif
|
2007-10-17 02:51:29 +08:00
|
|
|
pv_mmu_ops.read_cr2 = lguest_read_cr2;
|
|
|
|
pv_mmu_ops.read_cr3 = lguest_read_cr3;
|
paravirt: clean up lazy mode handling
Currently, the set_lazy_mode pv_op is overloaded with 5 functions:
1. enter lazy cpu mode
2. leave lazy cpu mode
3. enter lazy mmu mode
4. leave lazy mmu mode
5. flush pending batched operations
This complicates each paravirt backend, since it needs to deal with
all the possible state transitions, handling flushing, etc. In
particular, flushing is quite distinct from the other 4 functions, and
seems to just cause complication.
This patch removes the set_lazy_mode operation, and adds "enter" and
"leave" lazy mode operations on mmu_ops and cpu_ops. All the logic
associated with enter and leaving lazy states is now in common code
(basically BUG_ONs to make sure that no mode is current when entering
a lazy mode, and make sure that the mode is current when leaving).
Also, flush is handled in a common way, by simply leaving and
re-entering the lazy mode.
The result is that the Xen, lguest and VMI lazy mode implementations
are much simpler.
Signed-off-by: Jeremy Fitzhardinge <jeremy@xensource.com>
Cc: Andi Kleen <ak@suse.de>
Cc: Zach Amsden <zach@vmware.com>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: Avi Kivity <avi@qumranet.com>
Cc: Anthony Liguory <aliguori@us.ibm.com>
Cc: "Glauber de Oliveira Costa" <glommer@gmail.com>
Cc: Jun Nakajima <jun.nakajima@intel.com>
2007-10-17 02:51:29 +08:00
|
|
|
pv_mmu_ops.lazy_mode.enter = paravirt_enter_lazy_mmu;
|
2009-02-18 15:46:21 +08:00
|
|
|
pv_mmu_ops.lazy_mode.leave = lguest_leave_lazy_mmu_mode;
|
2013-03-23 21:36:36 +08:00
|
|
|
pv_mmu_ops.lazy_mode.flush = paravirt_flush_lazy_mmu;
|
2009-03-31 11:55:23 +08:00
|
|
|
pv_mmu_ops.pte_update = lguest_pte_update;
|
2007-10-17 02:51:29 +08:00
|
|
|
|
2007-07-19 16:49:22 +08:00
|
|
|
#ifdef CONFIG_X86_LOCAL_APIC
|
2009-07-31 06:03:45 +08:00
|
|
|
/* APIC read/write intercepts */
|
2009-02-17 15:02:14 +08:00
|
|
|
set_lguest_basic_apic_ops();
|
2007-07-19 16:49:22 +08:00
|
|
|
#endif
|
2007-10-17 02:51:29 +08:00
|
|
|
|
2009-08-20 16:19:54 +08:00
|
|
|
x86_init.resources.memory_setup = lguest_memory_setup;
|
2009-08-20 15:59:09 +08:00
|
|
|
x86_init.irqs.intr_init = lguest_init_IRQ;
|
2009-08-19 21:37:03 +08:00
|
|
|
x86_init.timers.timer_init = lguest_time_init;
|
2009-08-20 23:06:25 +08:00
|
|
|
x86_platform.calibrate_tsc = lguest_tsc_khz;
|
2009-09-10 10:48:56 +08:00
|
|
|
x86_platform.get_wallclock = lguest_get_wallclock;
|
2009-08-20 16:19:54 +08:00
|
|
|
|
2009-07-31 06:03:45 +08:00
|
|
|
/*
|
|
|
|
* Now is a good time to look at the implementations of these functions
|
|
|
|
* before returning to the rest of lguest_init().
|
|
|
|
*/
|
2007-07-27 01:41:02 +08:00
|
|
|
|
2009-07-31 06:03:45 +08:00
|
|
|
/*G:070
|
|
|
|
* Now we've seen all the paravirt_ops, we return to
|
2007-07-27 01:41:02 +08:00
|
|
|
* lguest_init() where the rest of the fairly chaotic boot setup
|
2009-07-31 06:03:45 +08:00
|
|
|
* occurs.
|
|
|
|
*/
|
2007-07-19 16:49:22 +08:00
|
|
|
|
2009-07-31 06:03:45 +08:00
|
|
|
/*
|
|
|
|
* The stack protector is a weird thing where gcc places a canary
|
2009-06-03 13:22:24 +08:00
|
|
|
* value on the stack and then checks it on return. This file is
|
|
|
|
* compiled with -fno-stack-protector it, so we got this far without
|
|
|
|
* problems. The value of the canary is kept at offset 20 from the
|
|
|
|
* %gs register, so we need to set that up before calling C functions
|
2009-07-31 06:03:45 +08:00
|
|
|
* in other files.
|
|
|
|
*/
|
2009-06-03 13:22:24 +08:00
|
|
|
setup_stack_canary_segment(0);
|
2009-07-31 06:03:45 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* We could just call load_stack_canary_segment(), but we might as well
|
|
|
|
* call switch_to_new_gdt() which loads the whole table and sets up the
|
|
|
|
* per-cpu segment descriptor register %fs as well.
|
|
|
|
*/
|
2009-06-03 13:22:24 +08:00
|
|
|
switch_to_new_gdt(0);
|
|
|
|
|
2009-07-31 06:03:45 +08:00
|
|
|
/*
|
|
|
|
* The Host<->Guest Switcher lives at the top of our address space, and
|
2008-03-29 00:05:53 +08:00
|
|
|
* the Host told us how big it is when we made LGUEST_INIT hypercall:
|
2009-07-31 06:03:45 +08:00
|
|
|
* it put the answer in lguest_data.reserve_mem
|
|
|
|
*/
|
2007-07-19 16:49:22 +08:00
|
|
|
reserve_top_address(lguest_data.reserve_mem);
|
|
|
|
|
2009-07-31 06:03:45 +08:00
|
|
|
/*
|
|
|
|
* If we don't initialize the lock dependency checker now, it crashes
|
2009-09-24 12:26:42 +08:00
|
|
|
* atomic_notifier_chain_register, then paravirt_disable_iospace.
|
2009-07-31 06:03:45 +08:00
|
|
|
*/
|
2007-07-19 16:49:22 +08:00
|
|
|
lockdep_init();
|
|
|
|
|
2009-09-24 12:26:42 +08:00
|
|
|
/* Hook in our special panic hypercall code. */
|
|
|
|
atomic_notifier_chain_register(&panic_notifier_list, &paniced);
|
|
|
|
|
2009-07-31 06:03:45 +08:00
|
|
|
/*
|
|
|
|
* This is messy CPU setup stuff which the native boot code does before
|
|
|
|
* start_kernel, so we have to do, too:
|
|
|
|
*/
|
2007-07-19 16:49:22 +08:00
|
|
|
cpu_detect(&new_cpu_data);
|
|
|
|
/* head.S usually sets up the first capability word, so do it here. */
|
|
|
|
new_cpu_data.x86_capability[0] = cpuid_edx(1);
|
|
|
|
|
|
|
|
/* Math is always hard! */
|
2013-04-29 22:04:20 +08:00
|
|
|
set_cpu_cap(&new_cpu_data, X86_FEATURE_FPU);
|
2007-07-19 16:49:22 +08:00
|
|
|
|
2008-03-29 00:05:53 +08:00
|
|
|
/* We don't have features. We have puppies! Puppies! */
|
2007-07-19 16:49:22 +08:00
|
|
|
#ifdef CONFIG_X86_MCE
|
2012-10-17 18:05:33 +08:00
|
|
|
mca_cfg.disabled = true;
|
2007-07-19 16:49:22 +08:00
|
|
|
#endif
|
|
|
|
#ifdef CONFIG_ACPI
|
|
|
|
acpi_disabled = 1;
|
|
|
|
#endif
|
|
|
|
|
2009-07-31 06:03:45 +08:00
|
|
|
/*
|
|
|
|
* We set the preferred console to "hvc". This is the "hypervisor
|
2007-07-27 01:41:02 +08:00
|
|
|
* virtual console" driver written by the PowerPC people, which we also
|
2009-07-31 06:03:45 +08:00
|
|
|
* adapted for lguest's use.
|
|
|
|
*/
|
2007-07-19 16:49:22 +08:00
|
|
|
add_preferred_console("hvc", 0, NULL);
|
|
|
|
|
2007-10-22 09:24:21 +08:00
|
|
|
/* Register our very early console. */
|
|
|
|
virtio_cons_early_init(early_put_chars);
|
|
|
|
|
2015-02-11 12:45:10 +08:00
|
|
|
/* Don't let ACPI try to control our PCI interrupts. */
|
|
|
|
disable_acpi();
|
|
|
|
|
2015-02-11 12:45:10 +08:00
|
|
|
/* We control them ourselves, by overriding these two hooks. */
|
|
|
|
pcibios_enable_irq = lguest_enable_irq;
|
|
|
|
pcibios_disable_irq = lguest_disable_irq;
|
|
|
|
|
2009-07-31 06:03:45 +08:00
|
|
|
/*
|
|
|
|
* Last of all, we set the power management poweroff hook to point to
|
2008-03-29 00:05:53 +08:00
|
|
|
* the Guest routine to power off, and the reboot hook to our restart
|
2009-07-31 06:03:45 +08:00
|
|
|
* routine.
|
|
|
|
*/
|
2007-07-19 16:49:22 +08:00
|
|
|
pm_power_off = lguest_power_off;
|
2007-12-28 16:56:24 +08:00
|
|
|
machine_ops.restart = lguest_restart;
|
2008-03-29 00:05:53 +08:00
|
|
|
|
2009-07-31 06:03:45 +08:00
|
|
|
/*
|
|
|
|
* Now we're set up, call i386_start_kernel() in head32.c and we proceed
|
|
|
|
* to boot as normal. It never returns.
|
|
|
|
*/
|
2008-05-30 03:56:36 +08:00
|
|
|
i386_start_kernel();
|
2007-07-19 16:49:22 +08:00
|
|
|
}
|
2007-07-27 01:41:02 +08:00
|
|
|
/*
|
|
|
|
* This marks the end of stage II of our journey, The Guest.
|
|
|
|
*
|
2007-10-25 13:02:50 +08:00
|
|
|
* It is now time for us to explore the layer of virtual drivers and complete
|
|
|
|
* our understanding of the Guest in "make Drivers".
|
2007-07-27 01:41:02 +08:00
|
|
|
*/
|