mirror of https://gitee.com/openkylin/linux.git
Merge branch 'master' into upstream
This commit is contained in:
commit
f630fe2817
|
@ -0,0 +1,96 @@
|
|||
#
|
||||
# This list is used by git-shortlog to fix a few botched name translations
|
||||
# in the git archive, either because the author's full name was messed up
|
||||
# and/or not always written the same way, making contributions from the
|
||||
# same person appearing not to be so or badly displayed.
|
||||
#
|
||||
# repo-abbrev: /pub/scm/linux/kernel/git/
|
||||
#
|
||||
|
||||
Aaron Durbin <adurbin@google.com>
|
||||
Adam Oldham <oldhamca@gmail.com>
|
||||
Adam Radford <aradford@gmail.com>
|
||||
Adrian Bunk <bunk@stusta.de>
|
||||
Alan Cox <alan@lxorguk.ukuu.org.uk>
|
||||
Alan Cox <root@hraefn.swansea.linux.org.uk>
|
||||
Aleksey Gorelov <aleksey_gorelov@phoenix.com>
|
||||
Al Viro <viro@ftp.linux.org.uk>
|
||||
Al Viro <viro@zenIV.linux.org.uk>
|
||||
Andreas Herrmann <aherrman@de.ibm.com>
|
||||
Andrew Morton <akpm@osdl.org>
|
||||
Andrew Vasquez <andrew.vasquez@qlogic.com>
|
||||
Andy Adamson <andros@citi.umich.edu>
|
||||
Arnaud Patard <arnaud.patard@rtp-net.org>
|
||||
Arnd Bergmann <arnd@arndb.de>
|
||||
Axel Dyks <xl@xlsigned.net>
|
||||
Ben Gardner <bgardner@wabtec.com>
|
||||
Ben M Cahill <ben.m.cahill@intel.com>
|
||||
Björn Steinbrink <B.Steinbrink@gmx.de>
|
||||
Brian Avery <b.avery@hp.com>
|
||||
Brian King <brking@us.ibm.com>
|
||||
Christoph Hellwig <hch@lst.de>
|
||||
Corey Minyard <minyard@acm.org>
|
||||
David Brownell <david-b@pacbell.net>
|
||||
David Woodhouse <dwmw2@shinybook.infradead.org>
|
||||
Domen Puncer <domen@coderock.org>
|
||||
Douglas Gilbert <dougg@torque.net>
|
||||
Ed L. Cashin <ecashin@coraid.com>
|
||||
Evgeniy Polyakov <johnpol@2ka.mipt.ru>
|
||||
Felipe W Damasio <felipewd@terra.com.br>
|
||||
Felix Kuhling <fxkuehl@gmx.de>
|
||||
Felix Moeller <felix@derklecks.de>
|
||||
Filipe Lautert <filipe@icewall.org>
|
||||
Franck Bui-Huu <vagabon.xyz@gmail.com>
|
||||
Frank Zago <fzago@systemfabricworks.com>
|
||||
Greg Kroah-Hartman <greg@echidna.(none)>
|
||||
Greg Kroah-Hartman <gregkh@suse.de>
|
||||
Greg Kroah-Hartman <greg@kroah.com>
|
||||
Henk Vergonet <Henk.Vergonet@gmail.com>
|
||||
Henrik Kretzschmar <henne@nachtwindheim.de>
|
||||
Herbert Xu <herbert@gondor.apana.org.au>
|
||||
Jacob Shin <Jacob.Shin@amd.com>
|
||||
James Bottomley <jejb@mulgrave.(none)>
|
||||
James Bottomley <jejb@titanic.il.steeleye.com>
|
||||
James E Wilson <wilson@specifix.com>
|
||||
James Ketrenos <jketreno@io.(none)>
|
||||
Jean Tourrilhes <jt@hpl.hp.com>
|
||||
Jeff Garzik <jgarzik@pretzel.yyz.us>
|
||||
Jens Axboe <axboe@suse.de>
|
||||
Jens Osterkamp <Jens.Osterkamp@de.ibm.com>
|
||||
John Stultz <johnstul@us.ibm.com>
|
||||
Juha Yrjola <at solidboot.com>
|
||||
Juha Yrjola <juha.yrjola@nokia.com>
|
||||
Juha Yrjola <juha.yrjola@solidboot.com>
|
||||
Kay Sievers <kay.sievers@vrfy.org>
|
||||
Kenneth W Chen <kenneth.w.chen@intel.com>
|
||||
Koushik <raghavendra.koushik@neterion.com>
|
||||
Leonid I Ananiev <leonid.i.ananiev@intel.com>
|
||||
Linas Vepstas <linas@austin.ibm.com>
|
||||
Matthieu CASTET <castet.matthieu@free.fr>
|
||||
Michel Dänzer <michel@tungstengraphics.com>
|
||||
Mitesh shah <mshah@teja.com>
|
||||
Morten Welinder <terra@gnome.org>
|
||||
Morten Welinder <welinder@anemone.rentec.com>
|
||||
Morten Welinder <welinder@darter.rentec.com>
|
||||
Morten Welinder <welinder@troll.com>
|
||||
Nguyen Anh Quynh <aquynh@gmail.com>
|
||||
Paolo 'Blaisorblade' Giarrusso <blaisorblade@yahoo.it>
|
||||
Patrick Mochel <mochel@digitalimplant.org>
|
||||
Peter A Jonsson <pj@ludd.ltu.se>
|
||||
Praveen BP <praveenbp@ti.com>
|
||||
Rajesh Shah <rajesh.shah@intel.com>
|
||||
Ralf Baechle <ralf@linux-mips.org>
|
||||
Ralf Wildenhues <Ralf.Wildenhues@gmx.de>
|
||||
Rémi Denis-Courmont <rdenis@simphalempin.com>
|
||||
Rudolf Marek <R.Marek@sh.cvut.cz>
|
||||
Rui Saraiva <rmps@joel.ist.utl.pt>
|
||||
Sachin P Sant <ssant@in.ibm.com>
|
||||
Sam Ravnborg <sam@mars.ravnborg.org>
|
||||
Simon Kelley <simon@thekelleys.org.uk>
|
||||
Stéphane Witzmann <stephane.witzmann@ubpmes.univ-bpclermont.fr>
|
||||
Stephen Hemminger <shemminger@osdl.org>
|
||||
Tejun Heo <htejun@gmail.com>
|
||||
Thomas Graf <tgraf@suug.ch>
|
||||
Tony Luck <tony.luck@intel.com>
|
||||
Tsuneo Yoshioka <Tsuneo.Yoshioka@f-secure.com>
|
||||
Valdis Kletnieks <Valdis.Kletnieks@vt.edu>
|
|
@ -78,7 +78,8 @@ Identifying GPIOs
|
|||
-----------------
|
||||
GPIOs are identified by unsigned integers in the range 0..MAX_INT. That
|
||||
reserves "negative" numbers for other purposes like marking signals as
|
||||
"not available on this board", or indicating faults.
|
||||
"not available on this board", or indicating faults. Code that doesn't
|
||||
touch the underlying hardware treats these integers as opaque cookies.
|
||||
|
||||
Platforms define how they use those integers, and usually #define symbols
|
||||
for the GPIO lines so that board-specific setup code directly corresponds
|
||||
|
@ -139,10 +140,10 @@ issues including wire-OR and output latencies.
|
|||
The get/set calls have no error returns because "invalid GPIO" should have
|
||||
been reported earlier in gpio_set_direction(). However, note that not all
|
||||
platforms can read the value of output pins; those that can't should always
|
||||
return zero. Also, these calls will be ignored for GPIOs that can't safely
|
||||
be accessed wihtout sleeping (see below).
|
||||
return zero. Also, using these calls for GPIOs that can't safely be accessed
|
||||
without sleeping (see below) is an error.
|
||||
|
||||
Platform-specific implementations are encouraged to optimise the two
|
||||
Platform-specific implementations are encouraged to optimize the two
|
||||
calls to access the GPIO value in cases where the GPIO number (and for
|
||||
output, value) are constant. It's normal for them to need only a couple
|
||||
of instructions in such cases (reading or writing a hardware register),
|
||||
|
@ -239,7 +240,8 @@ options are part of the IRQ interface, e.g. IRQF_TRIGGER_FALLING, as are
|
|||
system wakeup capabilities.
|
||||
|
||||
Non-error values returned from irq_to_gpio() would most commonly be used
|
||||
with gpio_get_value().
|
||||
with gpio_get_value(), for example to initialize or update driver state
|
||||
when the IRQ is edge-triggered.
|
||||
|
||||
|
||||
|
||||
|
@ -260,9 +262,10 @@ pullups (or pulldowns) so that the on-chip ones should not be used.
|
|||
There are other system-specific mechanisms that are not specified here,
|
||||
like the aforementioned options for input de-glitching and wire-OR output.
|
||||
Hardware may support reading or writing GPIOs in gangs, but that's usually
|
||||
configuration dependednt: for GPIOs sharing the same bank. (GPIOs are
|
||||
configuration dependent: for GPIOs sharing the same bank. (GPIOs are
|
||||
commonly grouped in banks of 16 or 32, with a given SOC having several such
|
||||
banks.) Code relying on such mechanisms will necessarily be nonportable.
|
||||
banks.) Some systems can trigger IRQs from output GPIOs. Code relying on
|
||||
such mechanisms will necessarily be nonportable.
|
||||
|
||||
Dynamic definition of GPIOs is not currently supported; for example, as
|
||||
a side effect of configuring an add-on board with some GPIO expanders.
|
||||
|
|
|
@ -0,0 +1,68 @@
|
|||
timer_stats - timer usage statistics
|
||||
------------------------------------
|
||||
|
||||
timer_stats is a debugging facility to make the timer (ab)usage in a Linux
|
||||
system visible to kernel and userspace developers. It is not intended for
|
||||
production usage as it adds significant overhead to the (hr)timer code and the
|
||||
(hr)timer data structures.
|
||||
|
||||
timer_stats should be used by kernel and userspace developers to verify that
|
||||
their code does not make unduly use of timers. This helps to avoid unnecessary
|
||||
wakeups, which should be avoided to optimize power consumption.
|
||||
|
||||
It can be enabled by CONFIG_TIMER_STATS in the "Kernel hacking" configuration
|
||||
section.
|
||||
|
||||
timer_stats collects information about the timer events which are fired in a
|
||||
Linux system over a sample period:
|
||||
|
||||
- the pid of the task(process) which initialized the timer
|
||||
- the name of the process which initialized the timer
|
||||
- the function where the timer was intialized
|
||||
- the callback function which is associated to the timer
|
||||
- the number of events (callbacks)
|
||||
|
||||
timer_stats adds an entry to /proc: /proc/timer_stats
|
||||
|
||||
This entry is used to control the statistics functionality and to read out the
|
||||
sampled information.
|
||||
|
||||
The timer_stats functionality is inactive on bootup.
|
||||
|
||||
To activate a sample period issue:
|
||||
# echo 1 >/proc/timer_stats
|
||||
|
||||
To stop a sample period issue:
|
||||
# echo 0 >/proc/timer_stats
|
||||
|
||||
The statistics can be retrieved by:
|
||||
# cat /proc/timer_stats
|
||||
|
||||
The readout of /proc/timer_stats automatically disables sampling. The sampled
|
||||
information is kept until a new sample period is started. This allows multiple
|
||||
readouts.
|
||||
|
||||
Sample output of /proc/timer_stats:
|
||||
|
||||
Timerstats sample period: 3.888770 s
|
||||
12, 0 swapper hrtimer_stop_sched_tick (hrtimer_sched_tick)
|
||||
15, 1 swapper hcd_submit_urb (rh_timer_func)
|
||||
4, 959 kedac schedule_timeout (process_timeout)
|
||||
1, 0 swapper page_writeback_init (wb_timer_fn)
|
||||
28, 0 swapper hrtimer_stop_sched_tick (hrtimer_sched_tick)
|
||||
22, 2948 IRQ 4 tty_flip_buffer_push (delayed_work_timer_fn)
|
||||
3, 3100 bash schedule_timeout (process_timeout)
|
||||
1, 1 swapper queue_delayed_work_on (delayed_work_timer_fn)
|
||||
1, 1 swapper queue_delayed_work_on (delayed_work_timer_fn)
|
||||
1, 1 swapper neigh_table_init_no_netlink (neigh_periodic_timer)
|
||||
1, 2292 ip __netdev_watchdog_up (dev_watchdog)
|
||||
1, 23 events/1 do_cache_clean (delayed_work_timer_fn)
|
||||
90 total events, 30.0 events/sec
|
||||
|
||||
The first column is the number of events, the second column the pid, the third
|
||||
column is the name of the process. The forth column shows the function which
|
||||
initialized the timer and in parantheses the callback function which was
|
||||
executed on expiry.
|
||||
|
||||
Thomas, Ingo
|
||||
|
|
@ -0,0 +1,249 @@
|
|||
High resolution timers and dynamic ticks design notes
|
||||
-----------------------------------------------------
|
||||
|
||||
Further information can be found in the paper of the OLS 2006 talk "hrtimers
|
||||
and beyond". The paper is part of the OLS 2006 Proceedings Volume 1, which can
|
||||
be found on the OLS website:
|
||||
http://www.linuxsymposium.org/2006/linuxsymposium_procv1.pdf
|
||||
|
||||
The slides to this talk are available from:
|
||||
http://tglx.de/projects/hrtimers/ols2006-hrtimers.pdf
|
||||
|
||||
The slides contain five figures (pages 2, 15, 18, 20, 22), which illustrate the
|
||||
changes in the time(r) related Linux subsystems. Figure #1 (p. 2) shows the
|
||||
design of the Linux time(r) system before hrtimers and other building blocks
|
||||
got merged into mainline.
|
||||
|
||||
Note: the paper and the slides are talking about "clock event source", while we
|
||||
switched to the name "clock event devices" in meantime.
|
||||
|
||||
The design contains the following basic building blocks:
|
||||
|
||||
- hrtimer base infrastructure
|
||||
- timeofday and clock source management
|
||||
- clock event management
|
||||
- high resolution timer functionality
|
||||
- dynamic ticks
|
||||
|
||||
|
||||
hrtimer base infrastructure
|
||||
---------------------------
|
||||
|
||||
The hrtimer base infrastructure was merged into the 2.6.16 kernel. Details of
|
||||
the base implementation are covered in Documentation/hrtimers/hrtimer.txt. See
|
||||
also figure #2 (OLS slides p. 15)
|
||||
|
||||
The main differences to the timer wheel, which holds the armed timer_list type
|
||||
timers are:
|
||||
- time ordered enqueueing into a rb-tree
|
||||
- independent of ticks (the processing is based on nanoseconds)
|
||||
|
||||
|
||||
timeofday and clock source management
|
||||
-------------------------------------
|
||||
|
||||
John Stultz's Generic Time Of Day (GTOD) framework moves a large portion of
|
||||
code out of the architecture-specific areas into a generic management
|
||||
framework, as illustrated in figure #3 (OLS slides p. 18). The architecture
|
||||
specific portion is reduced to the low level hardware details of the clock
|
||||
sources, which are registered in the framework and selected on a quality based
|
||||
decision. The low level code provides hardware setup and readout routines and
|
||||
initializes data structures, which are used by the generic time keeping code to
|
||||
convert the clock ticks to nanosecond based time values. All other time keeping
|
||||
related functionality is moved into the generic code. The GTOD base patch got
|
||||
merged into the 2.6.18 kernel.
|
||||
|
||||
Further information about the Generic Time Of Day framework is available in the
|
||||
OLS 2005 Proceedings Volume 1:
|
||||
http://www.linuxsymposium.org/2005/linuxsymposium_procv1.pdf
|
||||
|
||||
The paper "We Are Not Getting Any Younger: A New Approach to Time and
|
||||
Timers" was written by J. Stultz, D.V. Hart, & N. Aravamudan.
|
||||
|
||||
Figure #3 (OLS slides p.18) illustrates the transformation.
|
||||
|
||||
|
||||
clock event management
|
||||
----------------------
|
||||
|
||||
While clock sources provide read access to the monotonically increasing time
|
||||
value, clock event devices are used to schedule the next event
|
||||
interrupt(s). The next event is currently defined to be periodic, with its
|
||||
period defined at compile time. The setup and selection of the event device
|
||||
for various event driven functionalities is hardwired into the architecture
|
||||
dependent code. This results in duplicated code across all architectures and
|
||||
makes it extremely difficult to change the configuration of the system to use
|
||||
event interrupt devices other than those already built into the
|
||||
architecture. Another implication of the current design is that it is necessary
|
||||
to touch all the architecture-specific implementations in order to provide new
|
||||
functionality like high resolution timers or dynamic ticks.
|
||||
|
||||
The clock events subsystem tries to address this problem by providing a generic
|
||||
solution to manage clock event devices and their usage for the various clock
|
||||
event driven kernel functionalities. The goal of the clock event subsystem is
|
||||
to minimize the clock event related architecture dependent code to the pure
|
||||
hardware related handling and to allow easy addition and utilization of new
|
||||
clock event devices. It also minimizes the duplicated code across the
|
||||
architectures as it provides generic functionality down to the interrupt
|
||||
service handler, which is almost inherently hardware dependent.
|
||||
|
||||
Clock event devices are registered either by the architecture dependent boot
|
||||
code or at module insertion time. Each clock event device fills a data
|
||||
structure with clock-specific property parameters and callback functions. The
|
||||
clock event management decides, by using the specified property parameters, the
|
||||
set of system functions a clock event device will be used to support. This
|
||||
includes the distinction of per-CPU and per-system global event devices.
|
||||
|
||||
System-level global event devices are used for the Linux periodic tick. Per-CPU
|
||||
event devices are used to provide local CPU functionality such as process
|
||||
accounting, profiling, and high resolution timers.
|
||||
|
||||
The management layer assignes one or more of the folliwing functions to a clock
|
||||
event device:
|
||||
- system global periodic tick (jiffies update)
|
||||
- cpu local update_process_times
|
||||
- cpu local profiling
|
||||
- cpu local next event interrupt (non periodic mode)
|
||||
|
||||
The clock event device delegates the selection of those timer interrupt related
|
||||
functions completely to the management layer. The clock management layer stores
|
||||
a function pointer in the device description structure, which has to be called
|
||||
from the hardware level handler. This removes a lot of duplicated code from the
|
||||
architecture specific timer interrupt handlers and hands the control over the
|
||||
clock event devices and the assignment of timer interrupt related functionality
|
||||
to the core code.
|
||||
|
||||
The clock event layer API is rather small. Aside from the clock event device
|
||||
registration interface it provides functions to schedule the next event
|
||||
interrupt, clock event device notification service and support for suspend and
|
||||
resume.
|
||||
|
||||
The framework adds about 700 lines of code which results in a 2KB increase of
|
||||
the kernel binary size. The conversion of i386 removes about 100 lines of
|
||||
code. The binary size decrease is in the range of 400 byte. We believe that the
|
||||
increase of flexibility and the avoidance of duplicated code across
|
||||
architectures justifies the slight increase of the binary size.
|
||||
|
||||
The conversion of an architecture has no functional impact, but allows to
|
||||
utilize the high resolution and dynamic tick functionalites without any change
|
||||
to the clock event device and timer interrupt code. After the conversion the
|
||||
enabling of high resolution timers and dynamic ticks is simply provided by
|
||||
adding the kernel/time/Kconfig file to the architecture specific Kconfig and
|
||||
adding the dynamic tick specific calls to the idle routine (a total of 3 lines
|
||||
added to the idle function and the Kconfig file)
|
||||
|
||||
Figure #4 (OLS slides p.20) illustrates the transformation.
|
||||
|
||||
|
||||
high resolution timer functionality
|
||||
-----------------------------------
|
||||
|
||||
During system boot it is not possible to use the high resolution timer
|
||||
functionality, while making it possible would be difficult and would serve no
|
||||
useful function. The initialization of the clock event device framework, the
|
||||
clock source framework (GTOD) and hrtimers itself has to be done and
|
||||
appropriate clock sources and clock event devices have to be registered before
|
||||
the high resolution functionality can work. Up to the point where hrtimers are
|
||||
initialized, the system works in the usual low resolution periodic mode. The
|
||||
clock source and the clock event device layers provide notification functions
|
||||
which inform hrtimers about availability of new hardware. hrtimers validates
|
||||
the usability of the registered clock sources and clock event devices before
|
||||
switching to high resolution mode. This ensures also that a kernel which is
|
||||
configured for high resolution timers can run on a system which lacks the
|
||||
necessary hardware support.
|
||||
|
||||
The high resolution timer code does not support SMP machines which have only
|
||||
global clock event devices. The support of such hardware would involve IPI
|
||||
calls when an interrupt happens. The overhead would be much larger than the
|
||||
benefit. This is the reason why we currently disable high resolution and
|
||||
dynamic ticks on i386 SMP systems which stop the local APIC in C3 power
|
||||
state. A workaround is available as an idea, but the problem has not been
|
||||
tackled yet.
|
||||
|
||||
The time ordered insertion of timers provides all the infrastructure to decide
|
||||
whether the event device has to be reprogrammed when a timer is added. The
|
||||
decision is made per timer base and synchronized across per-cpu timer bases in
|
||||
a support function. The design allows the system to utilize separate per-CPU
|
||||
clock event devices for the per-CPU timer bases, but currently only one
|
||||
reprogrammable clock event device per-CPU is utilized.
|
||||
|
||||
When the timer interrupt happens, the next event interrupt handler is called
|
||||
from the clock event distribution code and moves expired timers from the
|
||||
red-black tree to a separate double linked list and invokes the softirq
|
||||
handler. An additional mode field in the hrtimer structure allows the system to
|
||||
execute callback functions directly from the next event interrupt handler. This
|
||||
is restricted to code which can safely be executed in the hard interrupt
|
||||
context. This applies, for example, to the common case of a wakeup function as
|
||||
used by nanosleep. The advantage of executing the handler in the interrupt
|
||||
context is the avoidance of up to two context switches - from the interrupted
|
||||
context to the softirq and to the task which is woken up by the expired
|
||||
timer.
|
||||
|
||||
Once a system has switched to high resolution mode, the periodic tick is
|
||||
switched off. This disables the per system global periodic clock event device -
|
||||
e.g. the PIT on i386 SMP systems.
|
||||
|
||||
The periodic tick functionality is provided by an per-cpu hrtimer. The callback
|
||||
function is executed in the next event interrupt context and updates jiffies
|
||||
and calls update_process_times and profiling. The implementation of the hrtimer
|
||||
based periodic tick is designed to be extended with dynamic tick functionality.
|
||||
This allows to use a single clock event device to schedule high resolution
|
||||
timer and periodic events (jiffies tick, profiling, process accounting) on UP
|
||||
systems. This has been proved to work with the PIT on i386 and the Incrementer
|
||||
on PPC.
|
||||
|
||||
The softirq for running the hrtimer queues and executing the callbacks has been
|
||||
separated from the tick bound timer softirq to allow accurate delivery of high
|
||||
resolution timer signals which are used by itimer and POSIX interval
|
||||
timers. The execution of this softirq can still be delayed by other softirqs,
|
||||
but the overall latencies have been significantly improved by this separation.
|
||||
|
||||
Figure #5 (OLS slides p.22) illustrates the transformation.
|
||||
|
||||
|
||||
dynamic ticks
|
||||
-------------
|
||||
|
||||
Dynamic ticks are the logical consequence of the hrtimer based periodic tick
|
||||
replacement (sched_tick). The functionality of the sched_tick hrtimer is
|
||||
extended by three functions:
|
||||
|
||||
- hrtimer_stop_sched_tick
|
||||
- hrtimer_restart_sched_tick
|
||||
- hrtimer_update_jiffies
|
||||
|
||||
hrtimer_stop_sched_tick() is called when a CPU goes into idle state. The code
|
||||
evaluates the next scheduled timer event (from both hrtimers and the timer
|
||||
wheel) and in case that the next event is further away than the next tick it
|
||||
reprograms the sched_tick to this future event, to allow longer idle sleeps
|
||||
without worthless interruption by the periodic tick. The function is also
|
||||
called when an interrupt happens during the idle period, which does not cause a
|
||||
reschedule. The call is necessary as the interrupt handler might have armed a
|
||||
new timer whose expiry time is before the time which was identified as the
|
||||
nearest event in the previous call to hrtimer_stop_sched_tick.
|
||||
|
||||
hrtimer_restart_sched_tick() is called when the CPU leaves the idle state before
|
||||
it calls schedule(). hrtimer_restart_sched_tick() resumes the periodic tick,
|
||||
which is kept active until the next call to hrtimer_stop_sched_tick().
|
||||
|
||||
hrtimer_update_jiffies() is called from irq_enter() when an interrupt happens
|
||||
in the idle period to make sure that jiffies are up to date and the interrupt
|
||||
handler has not to deal with an eventually stale jiffy value.
|
||||
|
||||
The dynamic tick feature provides statistical values which are exported to
|
||||
userspace via /proc/stats and can be made available for enhanced power
|
||||
management control.
|
||||
|
||||
The implementation leaves room for further development like full tickless
|
||||
systems, where the time slice is controlled by the scheduler, variable
|
||||
frequency profiling, and a complete removal of jiffies in the future.
|
||||
|
||||
|
||||
Aside the current initial submission of i386 support, the patchset has been
|
||||
extended to x86_64 and ARM already. Initial (work in progress) support is also
|
||||
available for MIPS and PowerPC.
|
||||
|
||||
Thomas, Ingo
|
||||
|
||||
|
||||
|
|
@ -48,14 +48,9 @@ following:
|
|||
The SMBus controller is function 3 in device 1f. Class 0c05 is SMBus Serial
|
||||
Controller.
|
||||
|
||||
If you do NOT see the 24x3 device at function 3, and you can't figure out
|
||||
any way in the BIOS to enable it,
|
||||
|
||||
The ICH chips are quite similar to Intel's PIIX4 chip, at least in the
|
||||
SMBus controller.
|
||||
|
||||
See the file i2c-piix4 for some additional information.
|
||||
|
||||
|
||||
Process Call Support
|
||||
--------------------
|
||||
|
@ -74,6 +69,61 @@ SMBus 2.0 Support
|
|||
|
||||
The 82801DB (ICH4) and later chips support several SMBus 2.0 features.
|
||||
|
||||
|
||||
Hidden ICH SMBus
|
||||
----------------
|
||||
|
||||
If your system has an Intel ICH south bridge, but you do NOT see the
|
||||
SMBus device at 00:1f.3 in lspci, and you can't figure out any way in the
|
||||
BIOS to enable it, it means it has been hidden by the BIOS code. Asus is
|
||||
well known for first doing this on their P4B motherboard, and many other
|
||||
boards after that. Some vendor machines are affected as well.
|
||||
|
||||
The first thing to try is the "i2c_ec" ACPI driver. It could be that the
|
||||
SMBus was hidden on purpose because it'll be driven by ACPI. If the
|
||||
i2c_ec driver works for you, just forget about the i2c-i801 driver and
|
||||
don't try to unhide the ICH SMBus. Even if i2c_ec doesn't work, you
|
||||
better make sure that the SMBus isn't used by the ACPI code. Try loading
|
||||
the "fan" and "thermal" drivers, and check in /proc/acpi/fan and
|
||||
/proc/acpi/thermal_zone. If you find anything there, it's likely that
|
||||
the ACPI is accessing the SMBus and it's safer not to unhide it. Only
|
||||
once you are certain that ACPI isn't using the SMBus, you can attempt
|
||||
to unhide it.
|
||||
|
||||
In order to unhide the SMBus, we need to change the value of a PCI
|
||||
register before the kernel enumerates the PCI devices. This is done in
|
||||
drivers/pci/quirks.c, where all affected boards must be listed (see
|
||||
function asus_hides_smbus_hostbridge.) If the SMBus device is missing,
|
||||
and you think there's something interesting on the SMBus (e.g. a
|
||||
hardware monitoring chip), you need to add your board to the list.
|
||||
|
||||
The motherboard is identified using the subvendor and subdevice IDs of the
|
||||
host bridge PCI device. Get yours with "lspci -n -v -s 00:00.0":
|
||||
|
||||
00:00.0 Class 0600: 8086:2570 (rev 02)
|
||||
Subsystem: 1043:80f2
|
||||
Flags: bus master, fast devsel, latency 0
|
||||
Memory at fc000000 (32-bit, prefetchable) [size=32M]
|
||||
Capabilities: [e4] #09 [2106]
|
||||
Capabilities: [a0] AGP version 3.0
|
||||
|
||||
Here the host bridge ID is 2570 (82865G/PE/P), the subvendor ID is 1043
|
||||
(Asus) and the subdevice ID is 80f2 (P4P800-X). You can find the symbolic
|
||||
names for the bridge ID and the subvendor ID in include/linux/pci_ids.h,
|
||||
and then add a case for your subdevice ID at the right place in
|
||||
drivers/pci/quirks.c. Then please give it very good testing, to make sure
|
||||
that the unhidden SMBus doesn't conflict with e.g. ACPI.
|
||||
|
||||
If it works, proves useful (i.e. there are usable chips on the SMBus)
|
||||
and seems safe, please submit a patch for inclusion into the kernel.
|
||||
|
||||
Note: There's a useful script in lm_sensors 2.10.2 and later, named
|
||||
unhide_ICH_SMBus (in prog/hotplug), which uses the fakephp driver to
|
||||
temporarily unhide the SMBus without having to patch and recompile your
|
||||
kernel. It's very convenient if you just want to check if there's
|
||||
anything interesting on your hidden ICH SMBus.
|
||||
|
||||
|
||||
**********************
|
||||
The lm_sensors project gratefully acknowledges the support of Texas
|
||||
Instruments in the initial development of this driver.
|
||||
|
|
|
@ -19,6 +19,7 @@ It currently supports the following devices:
|
|||
* (type=4) Analog Devices ADM1032 evaluation board
|
||||
* (type=5) Analog Devices evaluation boards: ADM1025, ADM1030, ADM1031
|
||||
* (type=6) Barco LPT->DVI (K5800236) adapter
|
||||
* (type=7) One For All JP1 parallel port adapter
|
||||
|
||||
These devices use different pinout configurations, so you have to tell
|
||||
the driver what you have, using the type module parameter. There is no
|
||||
|
@ -157,3 +158,17 @@ many more, using /dev/velleman.
|
|||
http://home.wanadoo.nl/hihihi/libk8005.htm
|
||||
http://struyve.mine.nu:8080/index.php?block=k8000
|
||||
http://sourceforge.net/projects/libk8005/
|
||||
|
||||
|
||||
One For All JP1 parallel port adapter
|
||||
-------------------------------------
|
||||
|
||||
The JP1 project revolves around a set of remote controls which expose
|
||||
the I2C bus their internal configuration EEPROM lives on via a 6 pin
|
||||
jumper in the battery compartment. More details can be found at:
|
||||
|
||||
http://www.hifi-remote.com/jp1/
|
||||
|
||||
Details of the simple parallel port hardware can be found at:
|
||||
|
||||
http://www.hifi-remote.com/jp1/hardware.shtml
|
||||
|
|
|
@ -6,7 +6,7 @@ Supported adapters:
|
|||
Datasheet: Publicly available at the Intel website
|
||||
* ServerWorks OSB4, CSB5, CSB6 and HT-1000 southbridges
|
||||
Datasheet: Only available via NDA from ServerWorks
|
||||
* ATI IXP southbridges IXP200, IXP300, IXP400
|
||||
* ATI IXP200, IXP300, IXP400 and SB600 southbridges
|
||||
Datasheet: Not publicly available
|
||||
* Standard Microsystems (SMSC) SLC90E66 (Victory66) southbridge
|
||||
Datasheet: Publicly available at the SMSC website http://www.smsc.com
|
||||
|
|
|
@ -13,6 +13,9 @@ Supported adapters:
|
|||
* VIA Technologies, Inc. VT8235, VT8237R, VT8237A, VT8251
|
||||
Datasheet: available on request and under NDA from VIA
|
||||
|
||||
* VIA Technologies, Inc. CX700
|
||||
Datasheet: available on request and under NDA from VIA
|
||||
|
||||
Authors:
|
||||
Kyösti Mälkki <kmalkki@cc.hut.fi>,
|
||||
Mark D. Studebaker <mdsxyz123@yahoo.com>,
|
||||
|
@ -44,6 +47,7 @@ Your lspci -n listing must show one of these :
|
|||
device 1106:3227 (VT8237R)
|
||||
device 1106:3337 (VT8237A)
|
||||
device 1106:3287 (VT8251)
|
||||
device 1106:8324 (CX700)
|
||||
|
||||
If none of these show up, you should look in the BIOS for settings like
|
||||
enable ACPI / SMBus or even USB.
|
||||
|
@ -51,3 +55,6 @@ enable ACPI / SMBus or even USB.
|
|||
Except for the oldest chips (VT82C596A/B, VT82C686A and most probably
|
||||
VT8231), this driver supports I2C block transactions. Such transactions
|
||||
are mainly useful to read from and write to EEPROMs.
|
||||
|
||||
The CX700 additionally appears to support SMBus PEC, although this driver
|
||||
doesn't implement it yet.
|
||||
|
|
|
@ -129,6 +129,12 @@ Technical changes:
|
|||
structure, those name member should be initialized to a driver name
|
||||
string. i2c_driver itself has no name member anymore.
|
||||
|
||||
* [Driver model] Instead of shutdown or reboot notifiers, provide a
|
||||
shutdown() method in your driver.
|
||||
|
||||
* [Power management] Use the driver model suspend() and resume()
|
||||
callbacks instead of the obsolete pm_register() calls.
|
||||
|
||||
Coding policy:
|
||||
|
||||
* [Copyright] Use (C), not (c), for copyright.
|
||||
|
|
|
@ -97,7 +97,7 @@ SMBus Write Word Data
|
|||
=====================
|
||||
|
||||
This is the opposite operation of the Read Word Data command. 16 bits
|
||||
of data is read from a device, from a designated register that is
|
||||
of data is written to a device, to the designated register that is
|
||||
specified through the Comm byte.
|
||||
|
||||
S Addr Wr [A] Comm [A] DataLow [A] DataHigh [A] P
|
||||
|
|
|
@ -21,20 +21,26 @@ The driver structure
|
|||
|
||||
Usually, you will implement a single driver structure, and instantiate
|
||||
all clients from it. Remember, a driver structure contains general access
|
||||
routines, a client structure specific information like the actual I2C
|
||||
address.
|
||||
routines, and should be zero-initialized except for fields with data you
|
||||
provide. A client structure holds device-specific information like the
|
||||
driver model device node, and its I2C address.
|
||||
|
||||
static struct i2c_driver foo_driver = {
|
||||
.driver = {
|
||||
.name = "foo",
|
||||
},
|
||||
.attach_adapter = &foo_attach_adapter,
|
||||
.detach_client = &foo_detach_client,
|
||||
.command = &foo_command /* may be NULL */
|
||||
.attach_adapter = foo_attach_adapter,
|
||||
.detach_client = foo_detach_client,
|
||||
.shutdown = foo_shutdown, /* optional */
|
||||
.suspend = foo_suspend, /* optional */
|
||||
.resume = foo_resume, /* optional */
|
||||
.command = foo_command, /* optional */
|
||||
}
|
||||
|
||||
The name field must match the driver name, including the case. It must not
|
||||
contain spaces, and may be up to 31 characters long.
|
||||
The name field is the driver name, and must not contain spaces. It
|
||||
should match the module name (if the driver can be compiled as a module),
|
||||
although you can use MODULE_ALIAS (passing "foo" in this example) to add
|
||||
another name for the module.
|
||||
|
||||
All other fields are for call-back functions which will be explained
|
||||
below.
|
||||
|
@ -43,11 +49,18 @@ below.
|
|||
Extra client data
|
||||
=================
|
||||
|
||||
The client structure has a special `data' field that can point to any
|
||||
structure at all. You can use this to keep client-specific data. You
|
||||
Each client structure has a special `data' field that can point to any
|
||||
structure at all. You should use this to keep device-specific data,
|
||||
especially in drivers that handle multiple I2C or SMBUS devices. You
|
||||
do not always need this, but especially for `sensors' drivers, it can
|
||||
be very useful.
|
||||
|
||||
/* store the value */
|
||||
void i2c_set_clientdata(struct i2c_client *client, void *data);
|
||||
|
||||
/* retrieve the value */
|
||||
void *i2c_get_clientdata(struct i2c_client *client);
|
||||
|
||||
An example structure is below.
|
||||
|
||||
struct foo_data {
|
||||
|
@ -493,6 +506,33 @@ by `__init_data'. Hose functions and structures can be removed after
|
|||
kernel booting (or module loading) is completed.
|
||||
|
||||
|
||||
Power Management
|
||||
================
|
||||
|
||||
If your I2C device needs special handling when entering a system low
|
||||
power state -- like putting a transceiver into a low power mode, or
|
||||
activating a system wakeup mechanism -- do that in the suspend() method.
|
||||
The resume() method should reverse what the suspend() method does.
|
||||
|
||||
These are standard driver model calls, and they work just like they
|
||||
would for any other driver stack. The calls can sleep, and can use
|
||||
I2C messaging to the device being suspended or resumed (since their
|
||||
parent I2C adapter is active when these calls are issued, and IRQs
|
||||
are still enabled).
|
||||
|
||||
|
||||
System Shutdown
|
||||
===============
|
||||
|
||||
If your I2C device needs special handling when the system shuts down
|
||||
or reboots (including kexec) -- like turning something off -- use a
|
||||
shutdown() method.
|
||||
|
||||
Again, this is a standard driver model call, working just like it
|
||||
would for any other driver stack: the calls can sleep, and can use
|
||||
I2C messaging.
|
||||
|
||||
|
||||
Command function
|
||||
================
|
||||
|
||||
|
|
|
@ -104,6 +104,9 @@ loader, and have no meaning to the kernel directly.
|
|||
Do not modify the syntax of boot loader parameters without extreme
|
||||
need or coordination with <Documentation/i386/boot.txt>.
|
||||
|
||||
There are also arch-specific kernel-parameters not documented here.
|
||||
See for example <Documentation/x86_64/boot-options.txt>.
|
||||
|
||||
Note that ALL kernel parameters listed below are CASE SENSITIVE, and that
|
||||
a trailing = on the name of any parameter states that that parameter will
|
||||
be entered as an environment variable, whereas its absence indicates that
|
||||
|
@ -361,6 +364,11 @@ and is between 256 and 4096 characters. It is defined in the file
|
|||
clocksource is not available, it defaults to PIT.
|
||||
Format: { pit | tsc | cyclone | pmtmr }
|
||||
|
||||
code_bytes [IA32] How many bytes of object code to print in an
|
||||
oops report.
|
||||
Range: 0 - 8192
|
||||
Default: 64
|
||||
|
||||
disable_8254_timer
|
||||
enable_8254_timer
|
||||
[IA32/X86_64] Disable/Enable interrupt 0 timer routing
|
||||
|
@ -601,6 +609,10 @@ and is between 256 and 4096 characters. It is defined in the file
|
|||
highmem otherwise. This also works to reduce highmem
|
||||
size on bigger boxes.
|
||||
|
||||
highres= [KNL] Enable/disable high resolution timer mode.
|
||||
Valid parameters: "on", "off"
|
||||
Default: "on"
|
||||
|
||||
hisax= [HW,ISDN]
|
||||
See Documentation/isdn/README.HiSax.
|
||||
|
||||
|
@ -1070,6 +1082,10 @@ and is between 256 and 4096 characters. It is defined in the file
|
|||
in certain environments such as networked servers or
|
||||
real-time systems.
|
||||
|
||||
nohz= [KNL] Boottime enable/disable dynamic ticks
|
||||
Valid arguments: on, off
|
||||
Default: on
|
||||
|
||||
noirqbalance [IA-32,SMP,KNL] Disable kernel irq balancing
|
||||
|
||||
noirqdebug [IA-32] Disables the code which attempts to detect and
|
||||
|
|
|
@ -1334,6 +1334,9 @@ platforms are moved over to use the flattened-device-tree model.
|
|||
fsl-usb2-mph compatible controllers. Either this property or
|
||||
"port0" (or both) must be defined for "fsl-usb2-mph" compatible
|
||||
controllers.
|
||||
- dr_mode : indicates the working mode for "fsl-usb2-dr" compatible
|
||||
controllers. Can be "host", "peripheral", or "otg". Default to
|
||||
"host" if not defined for backward compatibility.
|
||||
|
||||
Recommended properties :
|
||||
- interrupts : <a b> where a is the interrupt number and b is a
|
||||
|
@ -1367,6 +1370,7 @@ platforms are moved over to use the flattened-device-tree model.
|
|||
#size-cells = <0>;
|
||||
interrupt-parent = <700>;
|
||||
interrupts = <26 1>;
|
||||
dr_mode = "otg";
|
||||
phy = "ulpi";
|
||||
};
|
||||
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
MPC52xx Device Tree Bindings
|
||||
MPC5200 Device Tree Bindings
|
||||
----------------------------
|
||||
|
||||
(c) 2006 Secret Lab Technologies Ltd
|
||||
(c) 2006-2007 Secret Lab Technologies Ltd
|
||||
Grant Likely <grant.likely at secretlab.ca>
|
||||
|
||||
********** DRAFT ***********
|
||||
|
@ -20,11 +20,11 @@ described in Documentation/powerpc/booting-without-of.txt), or passed
|
|||
by Open Firmare (IEEE 1275) compatible firmware using an OF compatible
|
||||
client interface API.
|
||||
|
||||
This document specifies the requirements on the device-tree for mpc52xx
|
||||
This document specifies the requirements on the device-tree for mpc5200
|
||||
based boards. These requirements are above and beyond the details
|
||||
specified in either the OpenFirmware spec or booting-without-of.txt
|
||||
|
||||
All new mpc52xx-based boards are expected to match this document. In
|
||||
All new mpc5200-based boards are expected to match this document. In
|
||||
cases where this document is not sufficient to support a new board port,
|
||||
this document should be updated as part of adding the new board support.
|
||||
|
||||
|
@ -32,26 +32,26 @@ II - Philosophy
|
|||
===============
|
||||
The core of this document is naming convention. The whole point of
|
||||
defining this convention is to reduce or eliminate the number of
|
||||
special cases required to support a 52xx board. If all 52xx boards
|
||||
follow the same convention, then generic 52xx support code will work
|
||||
special cases required to support a 5200 board. If all 5200 boards
|
||||
follow the same convention, then generic 5200 support code will work
|
||||
rather than coding special cases for each new board.
|
||||
|
||||
This section tries to capture the thought process behind why the naming
|
||||
convention is what it is.
|
||||
|
||||
1. Node names
|
||||
-------------
|
||||
1. names
|
||||
---------
|
||||
There is strong convention/requirements already established for children
|
||||
of the root node. 'cpus' describes the processor cores, 'memory'
|
||||
describes memory, and 'chosen' provides boot configuration. Other nodes
|
||||
are added to describe devices attached to the processor local bus.
|
||||
Following convention already established with other system-on-chip
|
||||
processors, MPC52xx boards must have an 'soc5200' node as a child of the
|
||||
root node.
|
||||
|
||||
The soc5200 node holds child nodes for all on chip devices. Child nodes
|
||||
are typically named after the configured function. ie. the FEC node is
|
||||
named 'ethernet', and a PSC in uart mode is named 'serial'.
|
||||
Following convention already established with other system-on-chip
|
||||
processors, 5200 device trees should use the name 'soc5200' for the
|
||||
parent node of on chip devices, and the root node should be its parent.
|
||||
|
||||
Child nodes are typically named after the configured function. ie.
|
||||
the FEC node is named 'ethernet', and a PSC in uart mode is named 'serial'.
|
||||
|
||||
2. device_type property
|
||||
-----------------------
|
||||
|
@ -66,28 +66,47 @@ exactly.
|
|||
Since device_type isn't enough to match devices to drivers, there also
|
||||
needs to be a naming convention for the compatible property. Compatible
|
||||
is an list of device descriptions sorted from specific to generic. For
|
||||
the mpc52xx, the required format for each compatible value is
|
||||
<chip>-<device>[-<mode>]. At the minimum, the list shall contain two
|
||||
items; the first specifying the exact chip, and the second specifying
|
||||
mpc52xx for the chip.
|
||||
the mpc5200, the required format for each compatible value is
|
||||
<chip>-<device>[-<mode>]. The OS should be able to match a device driver
|
||||
to the device based solely on the compatible value. If two drivers
|
||||
match on the compatible list; the 'most compatible' driver should be
|
||||
selected.
|
||||
|
||||
ie. ethernet on mpc5200b: compatible = "mpc5200b-ethernet\0mpc52xx-ethernet"
|
||||
The split between the MPC5200 and the MPC5200B leaves a bit of a
|
||||
connundrum. How should the compatible property be set up to provide
|
||||
maximum compatability information; but still acurately describe the
|
||||
chip? For the MPC5200; the answer is easy. Most of the SoC devices
|
||||
originally appeared on the MPC5200. Since they didn't exist anywhere
|
||||
else; the 5200 compatible properties will contain only one item;
|
||||
"mpc5200-<device>".
|
||||
|
||||
The idea here is that most drivers will match to the most generic field
|
||||
in the compatible list (mpc52xx-*), but can also test the more specific
|
||||
field for enabling bug fixes or extra features.
|
||||
The 5200B is almost the same as the 5200, but not quite. It fixes
|
||||
silicon bugs and it adds a small number of enhancements. Most of the
|
||||
devices either provide exactly the same interface as on the 5200. A few
|
||||
devices have extra functions but still have a backwards compatible mode.
|
||||
To express this infomation as completely as possible, 5200B device trees
|
||||
should have two items in the compatible list;
|
||||
"mpc5200b-<device>\0mpc5200-<device>". It is *strongly* recommended
|
||||
that 5200B device trees follow this convention (instead of only listing
|
||||
the base mpc5200 item).
|
||||
|
||||
If another chip appear on the market with one of the mpc5200 SoC
|
||||
devices, then the compatible list should include mpc5200-<device>.
|
||||
|
||||
ie. ethernet on mpc5200: compatible = "mpc5200-ethernet"
|
||||
ethernet on mpc5200b: compatible = "mpc5200b-ethernet\0mpc5200-ethernet"
|
||||
|
||||
Modal devices, like PSCs, also append the configured function to the
|
||||
end of the compatible field. ie. A PSC in i2s mode would specify
|
||||
"mpc52xx-psc-i2s", not "mpc52xx-i2s". This convention is chosen to
|
||||
"mpc5200-psc-i2s", not "mpc5200-i2s". This convention is chosen to
|
||||
avoid naming conflicts with non-psc devices providing the same
|
||||
function. For example, "mpc52xx-spi" and "mpc52xx-psc-spi" describe
|
||||
function. For example, "mpc5200-spi" and "mpc5200-psc-spi" describe
|
||||
the mpc5200 simple spi device and a PSC spi mode respectively.
|
||||
|
||||
If the soc device is more generic and present on other SOCs, the
|
||||
compatible property can specify the more generic device type also.
|
||||
|
||||
ie. mscan: compatible = "mpc5200-mscan\0mpc52xx-mscan\0fsl,mscan";
|
||||
ie. mscan: compatible = "mpc5200-mscan\0fsl,mscan";
|
||||
|
||||
At the time of writing, exact chip may be either 'mpc5200' or
|
||||
'mpc5200b'.
|
||||
|
@ -96,7 +115,7 @@ Device drivers should always try to match as generically as possible.
|
|||
|
||||
III - Structure
|
||||
===============
|
||||
The device tree for an mpc52xx board follows the structure defined in
|
||||
The device tree for an mpc5200 board follows the structure defined in
|
||||
booting-without-of.txt with the following additional notes:
|
||||
|
||||
0) the root node
|
||||
|
@ -115,7 +134,7 @@ Typical memory description node; see booting-without-of.
|
|||
|
||||
3) The soc5200 node
|
||||
-------------------
|
||||
This node describes the on chip SOC peripherals. Every mpc52xx based
|
||||
This node describes the on chip SOC peripherals. Every mpc5200 based
|
||||
board will have this node, and as such there is a common naming
|
||||
convention for SOC devices.
|
||||
|
||||
|
@ -125,71 +144,111 @@ name type description
|
|||
device_type string must be "soc"
|
||||
ranges int should be <0 baseaddr baseaddr+10000>
|
||||
reg int must be <baseaddr 10000>
|
||||
compatible string mpc5200: "mpc5200-soc"
|
||||
mpc5200b: "mpc5200b-soc\0mpc5200-soc"
|
||||
system-frequency int Fsystem frequency; source of all
|
||||
other clocks.
|
||||
bus-frequency int IPB bus frequency in HZ. Clock rate
|
||||
used by most of the soc devices.
|
||||
#interrupt-cells int must be <3>.
|
||||
|
||||
Recommended properties:
|
||||
name type description
|
||||
---- ---- -----------
|
||||
compatible string should be "<chip>-soc\0mpc52xx-soc"
|
||||
ie. "mpc5200b-soc\0mpc52xx-soc"
|
||||
#interrupt-cells int must be <3>. If it is not defined
|
||||
here then it must be defined in every
|
||||
soc device node.
|
||||
bus-frequency int IPB bus frequency in HZ. Clock rate
|
||||
used by most of the soc devices.
|
||||
Defining it here avoids needing it
|
||||
added to every device node.
|
||||
model string Exact model of the chip;
|
||||
ie: model="fsl,mpc5200"
|
||||
revision string Silicon revision of chip
|
||||
ie: revision="M08A"
|
||||
|
||||
The 'model' and 'revision' properties are *strongly* recommended. Having
|
||||
them presence acts as a bit of a safety net for working around as yet
|
||||
undiscovered bugs on one version of silicon. For example, device drivers
|
||||
can use the model and revision properties to decide if a bug fix should
|
||||
be turned on.
|
||||
|
||||
4) soc5200 child nodes
|
||||
----------------------
|
||||
Any on chip SOC devices available to Linux must appear as soc5200 child nodes.
|
||||
|
||||
Note: in the tables below, '*' matches all <chip> values. ie.
|
||||
*-pic would translate to "mpc5200-pic\0mpc52xx-pic"
|
||||
Note: The tables below show the value for the mpc5200. A mpc5200b device
|
||||
tree should use the "mpc5200b-<device>\0mpc5200-<device> form.
|
||||
|
||||
Required soc5200 child nodes:
|
||||
name device_type compatible Description
|
||||
---- ----------- ---------- -----------
|
||||
cdm@<addr> cdm *-cmd Clock Distribution
|
||||
pic@<addr> interrupt-controller *-pic need an interrupt
|
||||
cdm@<addr> cdm mpc5200-cmd Clock Distribution
|
||||
pic@<addr> interrupt-controller mpc5200-pic need an interrupt
|
||||
controller to boot
|
||||
bestcomm@<addr> dma-controller *-bestcomm 52xx pic also requires
|
||||
the bestcomm device
|
||||
bestcomm@<addr> dma-controller mpc5200-bestcomm 5200 pic also requires
|
||||
the bestcomm device
|
||||
|
||||
Recommended soc5200 child nodes; populate as needed for your board
|
||||
name device_type compatible Description
|
||||
---- ----------- ---------- -----------
|
||||
gpt@<addr> gpt *-gpt General purpose timers
|
||||
rtc@<addr> rtc *-rtc Real time clock
|
||||
mscan@<addr> mscan *-mscan CAN bus controller
|
||||
pci@<addr> pci *-pci PCI bridge
|
||||
serial@<addr> serial *-psc-uart PSC in serial mode
|
||||
i2s@<addr> sound *-psc-i2s PSC in i2s mode
|
||||
ac97@<addr> sound *-psc-ac97 PSC in ac97 mode
|
||||
spi@<addr> spi *-psc-spi PSC in spi mode
|
||||
irda@<addr> irda *-psc-irda PSC in IrDA mode
|
||||
spi@<addr> spi *-spi MPC52xx spi device
|
||||
ethernet@<addr> network *-fec MPC52xx ethernet device
|
||||
ata@<addr> ata *-ata IDE ATA interface
|
||||
i2c@<addr> i2c *-i2c I2C controller
|
||||
usb@<addr> usb-ohci-be *-ohci,ohci-be USB controller
|
||||
xlb@<addr> xlb *-xlb XLB arbritrator
|
||||
name device_type compatible Description
|
||||
---- ----------- ---------- -----------
|
||||
gpt@<addr> gpt mpc5200-gpt General purpose timers
|
||||
rtc@<addr> rtc mpc5200-rtc Real time clock
|
||||
mscan@<addr> mscan mpc5200-mscan CAN bus controller
|
||||
pci@<addr> pci mpc5200-pci PCI bridge
|
||||
serial@<addr> serial mpc5200-psc-uart PSC in serial mode
|
||||
i2s@<addr> sound mpc5200-psc-i2s PSC in i2s mode
|
||||
ac97@<addr> sound mpc5200-psc-ac97 PSC in ac97 mode
|
||||
spi@<addr> spi mpc5200-psc-spi PSC in spi mode
|
||||
irda@<addr> irda mpc5200-psc-irda PSC in IrDA mode
|
||||
spi@<addr> spi mpc5200-spi MPC5200 spi device
|
||||
ethernet@<addr> network mpc5200-fec MPC5200 ethernet device
|
||||
ata@<addr> ata mpc5200-ata IDE ATA interface
|
||||
i2c@<addr> i2c mpc5200-i2c I2C controller
|
||||
usb@<addr> usb-ohci-be mpc5200-ohci,ohci-be USB controller
|
||||
xlb@<addr> xlb mpc5200-xlb XLB arbritrator
|
||||
|
||||
Important child node properties
|
||||
name type description
|
||||
---- ---- -----------
|
||||
cell-index int When multiple devices are present, is the
|
||||
index of the device in the hardware (ie. There
|
||||
are 6 PSC on the 5200 numbered PSC1 to PSC6)
|
||||
PSC1 has 'cell-index = <0>'
|
||||
PSC4 has 'cell-index = <3>'
|
||||
|
||||
5) General Purpose Timer nodes (child of soc5200 node)
|
||||
On the mpc5200 and 5200b, GPT0 has a watchdog timer function. If the board
|
||||
design supports the internal wdt, then the device node for GPT0 should
|
||||
include the empty property 'has-wdt'.
|
||||
|
||||
6) PSC nodes (child of soc5200 node)
|
||||
PSC nodes can define the optional 'port-number' property to force assignment
|
||||
order of serial ports. For example, PSC5 might be physically connected to
|
||||
the port labeled 'COM1' and PSC1 wired to 'COM1'. In this case, PSC5 would
|
||||
have a "port-number = <0>" property, and PSC1 would have "port-number = <1>".
|
||||
|
||||
PSC in i2s mode: The mpc5200 and mpc5200b PSCs are not compatible when in
|
||||
i2s mode. An 'mpc5200b-psc-i2s' node cannot include 'mpc5200-psc-i2s' in the
|
||||
compatible field.
|
||||
|
||||
IV - Extra Notes
|
||||
================
|
||||
|
||||
1. Interrupt mapping
|
||||
--------------------
|
||||
The mpc52xx pic driver splits hardware IRQ numbers into two levels. The
|
||||
The mpc5200 pic driver splits hardware IRQ numbers into two levels. The
|
||||
split reflects the layout of the PIC hardware itself, which groups
|
||||
interrupts into one of three groups; CRIT, MAIN or PERP. Also, the
|
||||
Bestcomm dma engine has it's own set of interrupt sources which are
|
||||
cascaded off of peripheral interrupt 0, which the driver interprets as a
|
||||
fourth group, SDMA.
|
||||
|
||||
The interrupts property for device nodes using the mpc52xx pic consists
|
||||
The interrupts property for device nodes using the mpc5200 pic consists
|
||||
of three cells; <L1 L2 level>
|
||||
|
||||
L1 := [CRIT=0, MAIN=1, PERP=2, SDMA=3]
|
||||
L2 := interrupt number; directly mapped from the value in the
|
||||
"ICTL PerStat, MainStat, CritStat Encoded Register"
|
||||
level := [LEVEL_HIGH=0, EDGE_RISING=1, EDGE_FALLING=2, LEVEL_LOW=3]
|
||||
|
||||
2. Shared registers
|
||||
-------------------
|
||||
Some SoC devices share registers between them. ie. the i2c devices use
|
||||
a single clock control register, and almost all device are affected by
|
||||
the port_config register. Devices which need to manipulate shared regs
|
||||
should look to the parent SoC node. The soc node is responsible
|
||||
for arbitrating all shared register access.
|
||||
|
|
|
@ -180,40 +180,81 @@ PCI
|
|||
pci=lastbus=NUMBER Scan upto NUMBER busses, no matter what the mptable says.
|
||||
pci=noacpi Don't use ACPI to set up PCI interrupt routing.
|
||||
|
||||
IOMMU
|
||||
IOMMU (input/output memory management unit)
|
||||
|
||||
iommu=[size][,noagp][,off][,force][,noforce][,leak][,memaper[=order]][,merge]
|
||||
[,forcesac][,fullflush][,nomerge][,noaperture][,calgary]
|
||||
size set size of iommu (in bytes)
|
||||
noagp don't initialize the AGP driver and use full aperture.
|
||||
off don't use the IOMMU
|
||||
leak turn on simple iommu leak tracing (only when CONFIG_IOMMU_LEAK is on)
|
||||
memaper[=order] allocate an own aperture over RAM with size 32MB^order.
|
||||
noforce don't force IOMMU usage. Default.
|
||||
force Force IOMMU.
|
||||
merge Do SG merging. Implies force (experimental)
|
||||
nomerge Don't do SG merging.
|
||||
forcesac For SAC mode for masks <40bits (experimental)
|
||||
fullflush Flush IOMMU on each allocation (default)
|
||||
nofullflush Don't use IOMMU fullflush
|
||||
allowed overwrite iommu off workarounds for specific chipsets.
|
||||
soft Use software bounce buffering (default for Intel machines)
|
||||
noaperture Don't touch the aperture for AGP.
|
||||
allowdac Allow DMA >4GB
|
||||
When off all DMA over >4GB is forced through an IOMMU or bounce
|
||||
buffering.
|
||||
nodac Forbid DMA >4GB
|
||||
panic Always panic when IOMMU overflows
|
||||
calgary Use the Calgary IOMMU if it is available
|
||||
Currently four x86-64 PCI-DMA mapping implementations exist:
|
||||
|
||||
swiotlb=pages[,force]
|
||||
1. <arch/x86_64/kernel/pci-nommu.c>: use no hardware/software IOMMU at all
|
||||
(e.g. because you have < 3 GB memory).
|
||||
Kernel boot message: "PCI-DMA: Disabling IOMMU"
|
||||
|
||||
pages Prereserve that many 128K pages for the software IO bounce buffering.
|
||||
force Force all IO through the software TLB.
|
||||
2. <arch/x86_64/kernel/pci-gart.c>: AMD GART based hardware IOMMU.
|
||||
Kernel boot message: "PCI-DMA: using GART IOMMU"
|
||||
|
||||
calgary=[64k,128k,256k,512k,1M,2M,4M,8M]
|
||||
calgary=[translate_empty_slots]
|
||||
calgary=[disable=<PCI bus number>]
|
||||
3. <arch/x86_64/kernel/pci-swiotlb.c> : Software IOMMU implementation. Used
|
||||
e.g. if there is no hardware IOMMU in the system and it is need because
|
||||
you have >3GB memory or told the kernel to us it (iommu=soft))
|
||||
Kernel boot message: "PCI-DMA: Using software bounce buffering
|
||||
for IO (SWIOTLB)"
|
||||
|
||||
4. <arch/x86_64/pci-calgary.c> : IBM Calgary hardware IOMMU. Used in IBM
|
||||
pSeries and xSeries servers. This hardware IOMMU supports DMA address
|
||||
mapping with memory protection, etc.
|
||||
Kernel boot message: "PCI-DMA: Using Calgary IOMMU"
|
||||
|
||||
iommu=[<size>][,noagp][,off][,force][,noforce][,leak[=<nr_of_leak_pages>]
|
||||
[,memaper[=<order>]][,merge][,forcesac][,fullflush][,nomerge]
|
||||
[,noaperture][,calgary]
|
||||
|
||||
General iommu options:
|
||||
off Don't initialize and use any kind of IOMMU.
|
||||
noforce Don't force hardware IOMMU usage when it is not needed.
|
||||
(default).
|
||||
force Force the use of the hardware IOMMU even when it is
|
||||
not actually needed (e.g. because < 3 GB memory).
|
||||
soft Use software bounce buffering (SWIOTLB) (default for
|
||||
Intel machines). This can be used to prevent the usage
|
||||
of an available hardware IOMMU.
|
||||
|
||||
iommu options only relevant to the AMD GART hardware IOMMU:
|
||||
<size> Set the size of the remapping area in bytes.
|
||||
allowed Overwrite iommu off workarounds for specific chipsets.
|
||||
fullflush Flush IOMMU on each allocation (default).
|
||||
nofullflush Don't use IOMMU fullflush.
|
||||
leak Turn on simple iommu leak tracing (only when
|
||||
CONFIG_IOMMU_LEAK is on). Default number of leak pages
|
||||
is 20.
|
||||
memaper[=<order>] Allocate an own aperture over RAM with size 32MB<<order.
|
||||
(default: order=1, i.e. 64MB)
|
||||
merge Do scatter-gather (SG) merging. Implies "force"
|
||||
(experimental).
|
||||
nomerge Don't do scatter-gather (SG) merging.
|
||||
noaperture Ask the IOMMU not to touch the aperture for AGP.
|
||||
forcesac Force single-address cycle (SAC) mode for masks <40bits
|
||||
(experimental).
|
||||
noagp Don't initialize the AGP driver and use full aperture.
|
||||
allowdac Allow double-address cycle (DAC) mode, i.e. DMA >4GB.
|
||||
DAC is used with 32-bit PCI to push a 64-bit address in
|
||||
two cycles. When off all DMA over >4GB is forced through
|
||||
an IOMMU or software bounce buffering.
|
||||
nodac Forbid DAC mode, i.e. DMA >4GB.
|
||||
panic Always panic when IOMMU overflows.
|
||||
calgary Use the Calgary IOMMU if it is available
|
||||
|
||||
iommu options only relevant to the software bounce buffering (SWIOTLB) IOMMU
|
||||
implementation:
|
||||
swiotlb=<pages>[,force]
|
||||
<pages> Prereserve that many 128K pages for the software IO
|
||||
bounce buffering.
|
||||
force Force all IO through the software TLB.
|
||||
|
||||
Settings for the IBM Calgary hardware IOMMU currently found in IBM
|
||||
pSeries and xSeries machines:
|
||||
|
||||
calgary=[64k,128k,256k,512k,1M,2M,4M,8M]
|
||||
calgary=[translate_empty_slots]
|
||||
calgary=[disable=<PCI bus number>]
|
||||
panic Always panic when IOMMU overflows
|
||||
|
||||
64k,...,8M - Set the size of each PCI slot's translation table
|
||||
when using the Calgary IOMMU. This is the size of the translation
|
||||
|
@ -234,14 +275,14 @@ IOMMU
|
|||
|
||||
Debugging
|
||||
|
||||
oops=panic Always panic on oopses. Default is to just kill the process,
|
||||
but there is a small probability of deadlocking the machine.
|
||||
This will also cause panics on machine check exceptions.
|
||||
Useful together with panic=30 to trigger a reboot.
|
||||
oops=panic Always panic on oopses. Default is to just kill the process,
|
||||
but there is a small probability of deadlocking the machine.
|
||||
This will also cause panics on machine check exceptions.
|
||||
Useful together with panic=30 to trigger a reboot.
|
||||
|
||||
kstack=N Print that many words from the kernel stack in oops dumps.
|
||||
kstack=N Print N words from the kernel stack in oops dumps.
|
||||
|
||||
pagefaulttrace Dump all page faults. Only useful for extreme debugging
|
||||
pagefaulttrace Dump all page faults. Only useful for extreme debugging
|
||||
and will create a lot of output.
|
||||
|
||||
call_trace=[old|both|newfallback|new]
|
||||
|
@ -251,15 +292,8 @@ Debugging
|
|||
newfallback: use new unwinder but fall back to old if it gets
|
||||
stuck (default)
|
||||
|
||||
call_trace=[old|both|newfallback|new]
|
||||
old: use old inexact backtracer
|
||||
new: use new exact dwarf2 unwinder
|
||||
both: print entries from both
|
||||
newfallback: use new unwinder but fall back to old if it gets
|
||||
stuck (default)
|
||||
|
||||
Misc
|
||||
Miscellaneous
|
||||
|
||||
noreplacement Don't replace instructions with more appropriate ones
|
||||
for the CPU. This may be useful on asymmetric MP systems
|
||||
where some CPU have less capabilities than the others.
|
||||
where some CPUs have less capabilities than others.
|
||||
|
|
|
@ -2,7 +2,7 @@ Firmware support for CPU hotplug under Linux/x86-64
|
|||
---------------------------------------------------
|
||||
|
||||
Linux/x86-64 supports CPU hotplug now. For various reasons Linux wants to
|
||||
know in advance boot time the maximum number of CPUs that could be plugged
|
||||
know in advance of boot time the maximum number of CPUs that could be plugged
|
||||
into the system. ACPI 3.0 currently has no official way to supply
|
||||
this information from the firmware to the operating system.
|
||||
|
||||
|
|
|
@ -9,9 +9,9 @@ zombie. While the thread is in user space the kernel stack is empty
|
|||
except for the thread_info structure at the bottom.
|
||||
|
||||
In addition to the per thread stacks, there are specialized stacks
|
||||
associated with each cpu. These stacks are only used while the kernel
|
||||
is in control on that cpu, when a cpu returns to user space the
|
||||
specialized stacks contain no useful data. The main cpu stacks is
|
||||
associated with each CPU. These stacks are only used while the kernel
|
||||
is in control on that CPU; when a CPU returns to user space the
|
||||
specialized stacks contain no useful data. The main CPU stacks are:
|
||||
|
||||
* Interrupt stack. IRQSTACKSIZE
|
||||
|
||||
|
@ -32,17 +32,17 @@ x86_64 also has a feature which is not available on i386, the ability
|
|||
to automatically switch to a new stack for designated events such as
|
||||
double fault or NMI, which makes it easier to handle these unusual
|
||||
events on x86_64. This feature is called the Interrupt Stack Table
|
||||
(IST). There can be up to 7 IST entries per cpu. The IST code is an
|
||||
index into the Task State Segment (TSS), the IST entries in the TSS
|
||||
point to dedicated stacks, each stack can be a different size.
|
||||
(IST). There can be up to 7 IST entries per CPU. The IST code is an
|
||||
index into the Task State Segment (TSS). The IST entries in the TSS
|
||||
point to dedicated stacks; each stack can be a different size.
|
||||
|
||||
An IST is selected by an non-zero value in the IST field of an
|
||||
An IST is selected by a non-zero value in the IST field of an
|
||||
interrupt-gate descriptor. When an interrupt occurs and the hardware
|
||||
loads such a descriptor, the hardware automatically sets the new stack
|
||||
pointer based on the IST value, then invokes the interrupt handler. If
|
||||
software wants to allow nested IST interrupts then the handler must
|
||||
adjust the IST values on entry to and exit from the interrupt handler.
|
||||
(this is occasionally done, e.g. for debug exceptions)
|
||||
(This is occasionally done, e.g. for debug exceptions.)
|
||||
|
||||
Events with different IST codes (i.e. with different stacks) can be
|
||||
nested. For example, a debug interrupt can safely be interrupted by an
|
||||
|
@ -58,17 +58,17 @@ The currently assigned IST stacks are :-
|
|||
|
||||
Used for interrupt 12 - Stack Fault Exception (#SS).
|
||||
|
||||
This allows to recover from invalid stack segments. Rarely
|
||||
This allows the CPU to recover from invalid stack segments. Rarely
|
||||
happens.
|
||||
|
||||
* DOUBLEFAULT_STACK. EXCEPTION_STKSZ (PAGE_SIZE).
|
||||
|
||||
Used for interrupt 8 - Double Fault Exception (#DF).
|
||||
|
||||
Invoked when handling a exception causes another exception. Happens
|
||||
when the kernel is very confused (e.g. kernel stack pointer corrupt)
|
||||
Using a separate stack allows to recover from it well enough in many
|
||||
cases to still output an oops.
|
||||
Invoked when handling one exception causes another exception. Happens
|
||||
when the kernel is very confused (e.g. kernel stack pointer corrupt).
|
||||
Using a separate stack allows the kernel to recover from it well enough
|
||||
in many cases to still output an oops.
|
||||
|
||||
* NMI_STACK. EXCEPTION_STKSZ (PAGE_SIZE).
|
||||
|
||||
|
|
|
@ -0,0 +1,70 @@
|
|||
|
||||
Configurable sysfs parameters for the x86-64 machine check code.
|
||||
|
||||
Machine checks report internal hardware error conditions detected
|
||||
by the CPU. Uncorrected errors typically cause a machine check
|
||||
(often with panic), corrected ones cause a machine check log entry.
|
||||
|
||||
Machine checks are organized in banks (normally associated with
|
||||
a hardware subsystem) and subevents in a bank. The exact meaning
|
||||
of the banks and subevent is CPU specific.
|
||||
|
||||
mcelog knows how to decode them.
|
||||
|
||||
When you see the "Machine check errors logged" message in the system
|
||||
log then mcelog should run to collect and decode machine check entries
|
||||
from /dev/mcelog. Normally mcelog should be run regularly from a cronjob.
|
||||
|
||||
Each CPU has a directory in /sys/devices/system/machinecheck/machinecheckN
|
||||
(N = CPU number)
|
||||
|
||||
The directory contains some configurable entries:
|
||||
|
||||
Entries:
|
||||
|
||||
bankNctl
|
||||
(N bank number)
|
||||
64bit Hex bitmask enabling/disabling specific subevents for bank N
|
||||
When a bit in the bitmask is zero then the respective
|
||||
subevent will not be reported.
|
||||
By default all events are enabled.
|
||||
Note that BIOS maintain another mask to disable specific events
|
||||
per bank. This is not visible here
|
||||
|
||||
The following entries appear for each CPU, but they are truly shared
|
||||
between all CPUs.
|
||||
|
||||
check_interval
|
||||
How often to poll for corrected machine check errors, in seconds
|
||||
(Note output is hexademical). Default 5 minutes.
|
||||
|
||||
tolerant
|
||||
Tolerance level. When a machine check exception occurs for a non
|
||||
corrected machine check the kernel can take different actions.
|
||||
Since machine check exceptions can happen any time it is sometimes
|
||||
risky for the kernel to kill a process because it defies
|
||||
normal kernel locking rules. The tolerance level configures
|
||||
how hard the kernel tries to recover even at some risk of deadlock.
|
||||
|
||||
0: always panic,
|
||||
1: panic if deadlock possible,
|
||||
2: try to avoid panic,
|
||||
3: never panic or exit (for testing only)
|
||||
|
||||
Default: 1
|
||||
|
||||
Note this only makes a difference if the CPU allows recovery
|
||||
from a machine check exception. Current x86 CPUs generally do not.
|
||||
|
||||
trigger
|
||||
Program to run when a machine check event is detected.
|
||||
This is an alternative to running mcelog regularly from cron
|
||||
and allows to detect events faster.
|
||||
|
||||
TBD document entries for AMD threshold interrupt configuration
|
||||
|
||||
For more details about the x86 machine check architecture
|
||||
see the Intel and AMD architecture manuals from their developer websites.
|
||||
|
||||
For more details about the architecture see
|
||||
see http://one.firstfloor.org/~andi/mce.pdf
|
|
@ -3,26 +3,26 @@
|
|||
|
||||
Virtual memory map with 4 level page tables:
|
||||
|
||||
0000000000000000 - 00007fffffffffff (=47bits) user space, different per mm
|
||||
0000000000000000 - 00007fffffffffff (=47 bits) user space, different per mm
|
||||
hole caused by [48:63] sign extension
|
||||
ffff800000000000 - ffff80ffffffffff (=40bits) guard hole
|
||||
ffff810000000000 - ffffc0ffffffffff (=46bits) direct mapping of all phys. memory
|
||||
ffffc10000000000 - ffffc1ffffffffff (=40bits) hole
|
||||
ffffc20000000000 - ffffe1ffffffffff (=45bits) vmalloc/ioremap space
|
||||
ffff800000000000 - ffff80ffffffffff (=40 bits) guard hole
|
||||
ffff810000000000 - ffffc0ffffffffff (=46 bits) direct mapping of all phys. memory
|
||||
ffffc10000000000 - ffffc1ffffffffff (=40 bits) hole
|
||||
ffffc20000000000 - ffffe1ffffffffff (=45 bits) vmalloc/ioremap space
|
||||
... unused hole ...
|
||||
ffffffff80000000 - ffffffff82800000 (=40MB) kernel text mapping, from phys 0
|
||||
ffffffff80000000 - ffffffff82800000 (=40 MB) kernel text mapping, from phys 0
|
||||
... unused hole ...
|
||||
ffffffff88000000 - fffffffffff00000 (=1919MB) module mapping space
|
||||
ffffffff88000000 - fffffffffff00000 (=1919 MB) module mapping space
|
||||
|
||||
The direct mapping covers all memory in the system upto the highest
|
||||
The direct mapping covers all memory in the system up to the highest
|
||||
memory address (this means in some cases it can also include PCI memory
|
||||
holes)
|
||||
holes).
|
||||
|
||||
vmalloc space is lazily synchronized into the different PML4 pages of
|
||||
the processes using the page fault handler, with init_level4_pgt as
|
||||
reference.
|
||||
|
||||
Current X86-64 implementations only support 40 bit of address space,
|
||||
but we support upto 46bits. This expands into MBZ space in the page tables.
|
||||
Current X86-64 implementations only support 40 bits of address space,
|
||||
but we support up to 46 bits. This expands into MBZ space in the page tables.
|
||||
|
||||
-Andi Kleen, Jul 2004
|
||||
|
|
12
MAINTAINERS
12
MAINTAINERS
|
@ -620,6 +620,11 @@ P: Haavard Skinnemoen
|
|||
M: hskinnemoen@atmel.com
|
||||
S: Supported
|
||||
|
||||
ATMEL SPI DRIVER
|
||||
P: Haavard Skinnemoen
|
||||
M: hskinnemoen@atmel.com
|
||||
S: Supported
|
||||
|
||||
ATMEL WIRELESS DRIVER
|
||||
P: Simon Kelley
|
||||
M: simon@thekelleys.org.uk
|
||||
|
@ -2523,6 +2528,12 @@ M: olof@lixom.net
|
|||
L: netdev@vger.kernel.org
|
||||
S: Maintained
|
||||
|
||||
PA SEMI SMBUS DRIVER
|
||||
P: Olof Johansson
|
||||
M: olof@lixom.net
|
||||
L: i2c@lm-sensors.org
|
||||
S: Maintained
|
||||
|
||||
PARALLEL PORT SUPPORT
|
||||
P: Phil Blundell
|
||||
M: philb@gnu.org
|
||||
|
@ -3768,6 +3779,7 @@ P: Andi Kleen
|
|||
M: ak@suse.de
|
||||
L: discuss@x86-64.org
|
||||
W: http://www.x86-64.org
|
||||
T: quilt ftp://ftp.firstfloor.org/pub/ak/x86_64/quilt-current
|
||||
S: Maintained
|
||||
|
||||
YAM DRIVER FOR AX.25
|
||||
|
|
11
Makefile
11
Makefile
|
@ -825,9 +825,6 @@ include/config/kernel.release: include/config/auto.conf FORCE
|
|||
# Listed in dependency order
|
||||
PHONY += prepare archprepare prepare0 prepare1 prepare2 prepare3
|
||||
|
||||
# prepare-all is deprecated, use prepare as valid replacement
|
||||
PHONY += prepare-all
|
||||
|
||||
# prepare3 is used to check if we are building in a separate output directory,
|
||||
# and if so do:
|
||||
# 1) Check that make has not been executed in the kernel src $(srctree)
|
||||
|
@ -860,7 +857,7 @@ prepare0: archprepare FORCE
|
|||
$(Q)$(MAKE) $(build)=.
|
||||
|
||||
# All the preparing..
|
||||
prepare prepare-all: prepare0
|
||||
prepare: prepare0
|
||||
|
||||
# Leave this as default for preprocessing vmlinux.lds.S, which is now
|
||||
# done in arch/$(ARCH)/kernel/Makefile
|
||||
|
@ -931,6 +928,12 @@ headers_install: include/linux/version.h scripts_basic FORCE
|
|||
$(Q)$(MAKE) $(build)=scripts scripts/unifdef
|
||||
$(Q)$(MAKE) -f $(srctree)/scripts/Makefile.headersinst obj=include
|
||||
|
||||
PHONY += headers_check_all
|
||||
headers_check_all: headers_install_all
|
||||
$(Q)for arch in $(HDRARCHES); do \
|
||||
$(MAKE) ARCH=$$arch -f $(srctree)/scripts/Makefile.headersinst obj=include BIASMDIR=-bi-$$arch HDRCHECK=1 ;\
|
||||
done
|
||||
|
||||
PHONY += headers_check
|
||||
headers_check: headers_install
|
||||
$(Q)$(MAKE) -f $(srctree)/scripts/Makefile.headersinst obj=include HDRCHECK=1
|
||||
|
|
|
@ -159,8 +159,7 @@ void __init init_IRQ(void)
|
|||
int irq;
|
||||
|
||||
for (irq = 0; irq < NR_IRQS; irq++)
|
||||
irq_desc[irq].status |= IRQ_NOREQUEST | IRQ_DELAYED_DISABLE |
|
||||
IRQ_NOPROBE;
|
||||
irq_desc[irq].status |= IRQ_NOREQUEST | IRQ_NOPROBE;
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
bad_irq_desc.affinity = CPU_MASK_ALL;
|
||||
|
|
|
@ -70,5 +70,5 @@ register_isa_ports(unsigned int membase, unsigned int portbase, unsigned int por
|
|||
isa_membase = membase;
|
||||
isa_portbase = portbase;
|
||||
isa_portshift = portshift;
|
||||
isa_sysctl_header = register_sysctl_table(ctl_bus, 0);
|
||||
isa_sysctl_header = register_sysctl_table(ctl_bus);
|
||||
}
|
||||
|
|
|
@ -87,7 +87,7 @@ static struct clocksource clocksource_imx = {
|
|||
.read = imx_get_cycles,
|
||||
.mask = 0xFFFFFFFF,
|
||||
.shift = 20,
|
||||
.is_continuous = 1,
|
||||
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
|
||||
};
|
||||
|
||||
static int __init imx_clocksource_init(void)
|
||||
|
|
|
@ -395,7 +395,7 @@ static struct clocksource clocksource_ixp4xx = {
|
|||
.read = ixp4xx_get_cycles,
|
||||
.mask = CLOCKSOURCE_MASK(32),
|
||||
.shift = 20,
|
||||
.is_continuous = 1,
|
||||
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
|
||||
};
|
||||
|
||||
unsigned long ixp4xx_timer_freq = FREQ;
|
||||
|
|
|
@ -62,7 +62,7 @@ static struct clocksource clocksource_netx = {
|
|||
.read = netx_get_cycles,
|
||||
.mask = CLOCKSOURCE_MASK(32),
|
||||
.shift = 20,
|
||||
.is_continuous = 1,
|
||||
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
|
||||
};
|
||||
|
||||
/*
|
||||
|
|
|
@ -112,7 +112,7 @@ static struct clocksource clocksource_pxa = {
|
|||
.read = pxa_get_cycles,
|
||||
.mask = CLOCKSOURCE_MASK(32),
|
||||
.shift = 20,
|
||||
.is_continuous = 1,
|
||||
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
|
||||
};
|
||||
|
||||
static void __init pxa_timer_init(void)
|
||||
|
|
|
@ -8,7 +8,6 @@
|
|||
* published by the Free Software Foundation.
|
||||
*/
|
||||
#include <linux/clk.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/etherdevice.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/kernel.h>
|
||||
|
@ -36,12 +35,11 @@ static struct eth_addr __initdata hw_addr[2];
|
|||
static struct eth_platform_data __initdata eth_data[2];
|
||||
extern struct lcdc_platform_data atstk1000_fb0_data;
|
||||
|
||||
static struct spi_board_info spi_board_info[] __initdata = {
|
||||
static struct spi_board_info spi0_board_info[] __initdata = {
|
||||
{
|
||||
/* QVGA display */
|
||||
.modalias = "ltv350qv",
|
||||
.controller_data = (void *)GPIO_PIN_PA(4),
|
||||
.max_speed_hz = 16000000,
|
||||
.bus_num = 0,
|
||||
.chip_select = 1,
|
||||
},
|
||||
};
|
||||
|
@ -149,8 +147,7 @@ static int __init atstk1002_init(void)
|
|||
|
||||
set_hw_addr(at32_add_device_eth(0, ð_data[0]));
|
||||
|
||||
spi_register_board_info(spi_board_info, ARRAY_SIZE(spi_board_info));
|
||||
at32_add_device_spi(0);
|
||||
at32_add_device_spi(0, spi0_board_info, ARRAY_SIZE(spi0_board_info));
|
||||
at32_add_device_lcdc(0, &atstk1000_fb0_data);
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -8,14 +8,6 @@
|
|||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#if !defined(CONFIG_NFSD) && !defined(CONFIG_NFSD_MODULE)
|
||||
#define sys_nfsservctl sys_ni_syscall
|
||||
#endif
|
||||
|
||||
#if !defined(CONFIG_SYSV_IPC)
|
||||
# define sys_ipc sys_ni_syscall
|
||||
#endif
|
||||
|
||||
.section .rodata,"a",@progbits
|
||||
.type sys_call_table,@object
|
||||
.global sys_call_table
|
||||
|
@ -129,7 +121,7 @@ sys_call_table:
|
|||
.long sys_getitimer /* 105 */
|
||||
.long sys_swapoff
|
||||
.long sys_sysinfo
|
||||
.long sys_ipc
|
||||
.long sys_ni_syscall /* was sys_ipc briefly */
|
||||
.long sys_sendfile
|
||||
.long sys_setdomainname /* 110 */
|
||||
.long sys_newuname
|
||||
|
@ -287,4 +279,16 @@ sys_call_table:
|
|||
.long sys_tee
|
||||
.long sys_vmsplice
|
||||
.long __sys_epoll_pwait /* 265 */
|
||||
.long sys_msgget
|
||||
.long sys_msgsnd
|
||||
.long sys_msgrcv
|
||||
.long sys_msgctl
|
||||
.long sys_semget /* 270 */
|
||||
.long sys_semop
|
||||
.long sys_semctl
|
||||
.long sys_semtimedop
|
||||
.long sys_shmat
|
||||
.long sys_shmget /* 275 */
|
||||
.long sys_shmdt
|
||||
.long sys_shmctl
|
||||
.long sys_ni_syscall /* r8 is saturated at nr_syscalls */
|
||||
|
|
|
@ -37,7 +37,7 @@ static struct clocksource clocksource_avr32 = {
|
|||
.read = read_cycle_count,
|
||||
.mask = CLOCKSOURCE_MASK(32),
|
||||
.shift = 16,
|
||||
.is_continuous = 1,
|
||||
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
|
||||
};
|
||||
|
||||
/*
|
||||
|
|
|
@ -8,6 +8,7 @@
|
|||
#include <linux/clk.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/spi/spi.h>
|
||||
|
||||
#include <asm/io.h>
|
||||
|
||||
|
@ -310,8 +311,6 @@ static void genclk_mode(struct clk *clk, int enabled)
|
|||
{
|
||||
u32 control;
|
||||
|
||||
BUG_ON(clk->index > 7);
|
||||
|
||||
control = sm_readl(&system_manager, PM_GCCTRL + 4 * clk->index);
|
||||
if (enabled)
|
||||
control |= SM_BIT(CEN);
|
||||
|
@ -325,11 +324,6 @@ static unsigned long genclk_get_rate(struct clk *clk)
|
|||
u32 control;
|
||||
unsigned long div = 1;
|
||||
|
||||
BUG_ON(clk->index > 7);
|
||||
|
||||
if (!clk->parent)
|
||||
return 0;
|
||||
|
||||
control = sm_readl(&system_manager, PM_GCCTRL + 4 * clk->index);
|
||||
if (control & SM_BIT(DIVEN))
|
||||
div = 2 * (SM_BFEXT(DIV, control) + 1);
|
||||
|
@ -342,11 +336,6 @@ static long genclk_set_rate(struct clk *clk, unsigned long rate, int apply)
|
|||
u32 control;
|
||||
unsigned long parent_rate, actual_rate, div;
|
||||
|
||||
BUG_ON(clk->index > 7);
|
||||
|
||||
if (!clk->parent)
|
||||
return 0;
|
||||
|
||||
parent_rate = clk->parent->get_rate(clk->parent);
|
||||
control = sm_readl(&system_manager, PM_GCCTRL + 4 * clk->index);
|
||||
|
||||
|
@ -373,11 +362,8 @@ int genclk_set_parent(struct clk *clk, struct clk *parent)
|
|||
{
|
||||
u32 control;
|
||||
|
||||
BUG_ON(clk->index > 7);
|
||||
|
||||
printk("clk %s: new parent %s (was %s)\n",
|
||||
clk->name, parent->name,
|
||||
clk->parent ? clk->parent->name : "(null)");
|
||||
clk->name, parent->name, clk->parent->name);
|
||||
|
||||
control = sm_readl(&system_manager, PM_GCCTRL + 4 * clk->index);
|
||||
|
||||
|
@ -399,6 +385,22 @@ int genclk_set_parent(struct clk *clk, struct clk *parent)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void __init genclk_init_parent(struct clk *clk)
|
||||
{
|
||||
u32 control;
|
||||
struct clk *parent;
|
||||
|
||||
BUG_ON(clk->index > 7);
|
||||
|
||||
control = sm_readl(&system_manager, PM_GCCTRL + 4 * clk->index);
|
||||
if (control & SM_BIT(OSCSEL))
|
||||
parent = (control & SM_BIT(PLLSEL)) ? &pll1 : &osc1;
|
||||
else
|
||||
parent = (control & SM_BIT(PLLSEL)) ? &pll0 : &osc0;
|
||||
|
||||
clk->parent = parent;
|
||||
}
|
||||
|
||||
/* --------------------------------------------------------------------
|
||||
* System peripherals
|
||||
* -------------------------------------------------------------------- */
|
||||
|
@ -750,8 +752,41 @@ static struct resource atmel_spi1_resource[] = {
|
|||
DEFINE_DEV(atmel_spi, 1);
|
||||
DEV_CLK(spi_clk, atmel_spi1, pba, 1);
|
||||
|
||||
struct platform_device *__init at32_add_device_spi(unsigned int id)
|
||||
static void
|
||||
at32_spi_setup_slaves(unsigned int bus_num, struct spi_board_info *b,
|
||||
unsigned int n, const u8 *pins)
|
||||
{
|
||||
unsigned int pin, mode;
|
||||
|
||||
for (; n; n--, b++) {
|
||||
b->bus_num = bus_num;
|
||||
if (b->chip_select >= 4)
|
||||
continue;
|
||||
pin = (unsigned)b->controller_data;
|
||||
if (!pin) {
|
||||
pin = pins[b->chip_select];
|
||||
b->controller_data = (void *)pin;
|
||||
}
|
||||
mode = AT32_GPIOF_OUTPUT;
|
||||
if (!(b->mode & SPI_CS_HIGH))
|
||||
mode |= AT32_GPIOF_HIGH;
|
||||
at32_select_gpio(pin, mode);
|
||||
}
|
||||
}
|
||||
|
||||
struct platform_device *__init
|
||||
at32_add_device_spi(unsigned int id, struct spi_board_info *b, unsigned int n)
|
||||
{
|
||||
/*
|
||||
* Manage the chipselects as GPIOs, normally using the same pins
|
||||
* the SPI controller expects; but boards can use other pins.
|
||||
*/
|
||||
static u8 __initdata spi0_pins[] =
|
||||
{ GPIO_PIN_PA(3), GPIO_PIN_PA(4),
|
||||
GPIO_PIN_PA(5), GPIO_PIN_PA(20), };
|
||||
static u8 __initdata spi1_pins[] =
|
||||
{ GPIO_PIN_PB(2), GPIO_PIN_PB(3),
|
||||
GPIO_PIN_PB(4), GPIO_PIN_PA(27), };
|
||||
struct platform_device *pdev;
|
||||
|
||||
switch (id) {
|
||||
|
@ -760,14 +795,7 @@ struct platform_device *__init at32_add_device_spi(unsigned int id)
|
|||
select_peripheral(PA(0), PERIPH_A, 0); /* MISO */
|
||||
select_peripheral(PA(1), PERIPH_A, 0); /* MOSI */
|
||||
select_peripheral(PA(2), PERIPH_A, 0); /* SCK */
|
||||
|
||||
/* NPCS[2:0] */
|
||||
at32_select_gpio(GPIO_PIN_PA(3),
|
||||
AT32_GPIOF_OUTPUT | AT32_GPIOF_HIGH);
|
||||
at32_select_gpio(GPIO_PIN_PA(4),
|
||||
AT32_GPIOF_OUTPUT | AT32_GPIOF_HIGH);
|
||||
at32_select_gpio(GPIO_PIN_PA(5),
|
||||
AT32_GPIOF_OUTPUT | AT32_GPIOF_HIGH);
|
||||
at32_spi_setup_slaves(0, b, n, spi0_pins);
|
||||
break;
|
||||
|
||||
case 1:
|
||||
|
@ -775,20 +803,14 @@ struct platform_device *__init at32_add_device_spi(unsigned int id)
|
|||
select_peripheral(PB(0), PERIPH_B, 0); /* MISO */
|
||||
select_peripheral(PB(1), PERIPH_B, 0); /* MOSI */
|
||||
select_peripheral(PB(5), PERIPH_B, 0); /* SCK */
|
||||
|
||||
/* NPCS[2:0] */
|
||||
at32_select_gpio(GPIO_PIN_PB(2),
|
||||
AT32_GPIOF_OUTPUT | AT32_GPIOF_HIGH);
|
||||
at32_select_gpio(GPIO_PIN_PB(3),
|
||||
AT32_GPIOF_OUTPUT | AT32_GPIOF_HIGH);
|
||||
at32_select_gpio(GPIO_PIN_PB(4),
|
||||
AT32_GPIOF_OUTPUT | AT32_GPIOF_HIGH);
|
||||
at32_spi_setup_slaves(1, b, n, spi1_pins);
|
||||
break;
|
||||
|
||||
default:
|
||||
return NULL;
|
||||
}
|
||||
|
||||
spi_register_board_info(b, n);
|
||||
platform_device_register(pdev);
|
||||
return pdev;
|
||||
}
|
||||
|
@ -872,6 +894,50 @@ at32_add_device_lcdc(unsigned int id, struct lcdc_platform_data *data)
|
|||
return pdev;
|
||||
}
|
||||
|
||||
/* --------------------------------------------------------------------
|
||||
* GCLK
|
||||
* -------------------------------------------------------------------- */
|
||||
static struct clk gclk0 = {
|
||||
.name = "gclk0",
|
||||
.mode = genclk_mode,
|
||||
.get_rate = genclk_get_rate,
|
||||
.set_rate = genclk_set_rate,
|
||||
.set_parent = genclk_set_parent,
|
||||
.index = 0,
|
||||
};
|
||||
static struct clk gclk1 = {
|
||||
.name = "gclk1",
|
||||
.mode = genclk_mode,
|
||||
.get_rate = genclk_get_rate,
|
||||
.set_rate = genclk_set_rate,
|
||||
.set_parent = genclk_set_parent,
|
||||
.index = 1,
|
||||
};
|
||||
static struct clk gclk2 = {
|
||||
.name = "gclk2",
|
||||
.mode = genclk_mode,
|
||||
.get_rate = genclk_get_rate,
|
||||
.set_rate = genclk_set_rate,
|
||||
.set_parent = genclk_set_parent,
|
||||
.index = 2,
|
||||
};
|
||||
static struct clk gclk3 = {
|
||||
.name = "gclk3",
|
||||
.mode = genclk_mode,
|
||||
.get_rate = genclk_get_rate,
|
||||
.set_rate = genclk_set_rate,
|
||||
.set_parent = genclk_set_parent,
|
||||
.index = 3,
|
||||
};
|
||||
static struct clk gclk4 = {
|
||||
.name = "gclk4",
|
||||
.mode = genclk_mode,
|
||||
.get_rate = genclk_get_rate,
|
||||
.set_rate = genclk_set_rate,
|
||||
.set_parent = genclk_set_parent,
|
||||
.index = 4,
|
||||
};
|
||||
|
||||
struct clk *at32_clock_list[] = {
|
||||
&osc32k,
|
||||
&osc0,
|
||||
|
@ -908,6 +974,11 @@ struct clk *at32_clock_list[] = {
|
|||
&atmel_spi1_spi_clk,
|
||||
&lcdc0_hclk,
|
||||
&lcdc0_pixclk,
|
||||
&gclk0,
|
||||
&gclk1,
|
||||
&gclk2,
|
||||
&gclk3,
|
||||
&gclk4,
|
||||
};
|
||||
unsigned int at32_nr_clocks = ARRAY_SIZE(at32_clock_list);
|
||||
|
||||
|
@ -936,6 +1007,13 @@ void __init at32_clock_init(void)
|
|||
if (sm_readl(sm, PM_PLL1) & SM_BIT(PLLOSC))
|
||||
pll1.parent = &osc1;
|
||||
|
||||
genclk_init_parent(&gclk0);
|
||||
genclk_init_parent(&gclk1);
|
||||
genclk_init_parent(&gclk2);
|
||||
genclk_init_parent(&gclk3);
|
||||
genclk_init_parent(&gclk4);
|
||||
genclk_init_parent(&lcdc0_pixclk);
|
||||
|
||||
/*
|
||||
* Turn on all clocks that have at least one user already, and
|
||||
* turn off everything else. We only do this for module
|
||||
|
|
|
@ -63,7 +63,11 @@ EXPORT_SYMBOL(clk_enable);
|
|||
|
||||
static void __clk_disable(struct clk *clk)
|
||||
{
|
||||
BUG_ON(clk->users == 0);
|
||||
if (clk->users == 0) {
|
||||
printk(KERN_ERR "%s: mismatched disable\n", clk->name);
|
||||
WARN_ON(1);
|
||||
return;
|
||||
}
|
||||
|
||||
if (--clk->users == 0 && clk->mode)
|
||||
clk->mode(clk, 0);
|
||||
|
|
|
@ -125,7 +125,6 @@ unsigned long sleep_phys_sp(void *sp)
|
|||
* Use a temporary sysctl number. Horrid, but will be cleaned up in 2.6
|
||||
* when all the PM interfaces exist nicely.
|
||||
*/
|
||||
#define CTL_PM 9899
|
||||
#define CTL_PM_SUSPEND 1
|
||||
#define CTL_PM_CMODE 2
|
||||
#define CTL_PM_P0 4
|
||||
|
@ -402,17 +401,53 @@ static int cm_sysctl(ctl_table *table, int __user *name, int nlen,
|
|||
|
||||
static struct ctl_table pm_table[] =
|
||||
{
|
||||
{CTL_PM_SUSPEND, "suspend", NULL, 0, 0200, NULL, &sysctl_pm_do_suspend},
|
||||
{CTL_PM_CMODE, "cmode", &clock_cmode_current, sizeof(int), 0644, NULL, &cmode_procctl, &cmode_sysctl, NULL},
|
||||
{CTL_PM_P0, "p0", &clock_p0_current, sizeof(int), 0644, NULL, &p0_procctl, &p0_sysctl, NULL},
|
||||
{CTL_PM_CM, "cm", &clock_cm_current, sizeof(int), 0644, NULL, &cm_procctl, &cm_sysctl, NULL},
|
||||
{0}
|
||||
{
|
||||
.ctl_name = CTL_PM_SUSPEND,
|
||||
.procname = "suspend",
|
||||
.data = NULL,
|
||||
.maxlen = 0,
|
||||
.mode = 0200,
|
||||
.proc_handler = &sysctl_pm_do_suspend,
|
||||
},
|
||||
{
|
||||
.ctl_name = CTL_PM_CMODE,
|
||||
.procname = "cmode",
|
||||
.data = &clock_cmode_current,
|
||||
.maxlen = sizeof(int),
|
||||
.mode = 0644,
|
||||
.proc_handler = &cmode_procctl,
|
||||
.strategy = &cmode_sysctl,
|
||||
},
|
||||
{
|
||||
.ctl_name = CTL_PM_P0,
|
||||
.procname = "p0",
|
||||
.data = &clock_p0_current,
|
||||
.maxlen = sizeof(int),
|
||||
.mode = 0644,
|
||||
.proc_handler = &p0_procctl,
|
||||
.strategy = &p0_sysctl,
|
||||
},
|
||||
{
|
||||
.ctl_name = CTL_PM_CM,
|
||||
.procname = "cm",
|
||||
.data = &clock_cm_current,
|
||||
.maxlen = sizeof(int),
|
||||
.mode = 0644,
|
||||
.proc_handler = &cm_procctl,
|
||||
.strategy = &cm_sysctl,
|
||||
},
|
||||
{ .ctl_name = 0}
|
||||
};
|
||||
|
||||
static struct ctl_table pm_dir_table[] =
|
||||
{
|
||||
{CTL_PM, "pm", NULL, 0, 0555, pm_table},
|
||||
{0}
|
||||
{
|
||||
.ctl_name = CTL_PM,
|
||||
.procname = "pm",
|
||||
.mode = 0555,
|
||||
.child = pm_table,
|
||||
},
|
||||
{ .ctl_name = 0}
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -420,7 +455,7 @@ static struct ctl_table pm_dir_table[] =
|
|||
*/
|
||||
static int __init pm_init(void)
|
||||
{
|
||||
register_sysctl_table(pm_dir_table, 1);
|
||||
register_sysctl_table(pm_dir_table);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -175,22 +175,40 @@ static int procctl_frv_pin_cxnr(ctl_table *table, int write, struct file *filp,
|
|||
*/
|
||||
static struct ctl_table frv_table[] =
|
||||
{
|
||||
{ 1, "cache-mode", NULL, 0, 0644, NULL, &procctl_frv_cachemode },
|
||||
{
|
||||
.ctl_name = 1,
|
||||
.procname = "cache-mode",
|
||||
.data = NULL,
|
||||
.maxlen = 0,
|
||||
.mode = 0644,
|
||||
.proc_handler = &procctl_frv_cachemode,
|
||||
},
|
||||
#ifdef CONFIG_MMU
|
||||
{ 2, "pin-cxnr", NULL, 0, 0644, NULL, &procctl_frv_pin_cxnr },
|
||||
{
|
||||
.ctl_name = 2,
|
||||
.procname = "pin-cxnr",
|
||||
.data = NULL,
|
||||
.maxlen = 0,
|
||||
.mode = 0644,
|
||||
.proc_handler = &procctl_frv_pin_cxnr
|
||||
},
|
||||
#endif
|
||||
{ 0 }
|
||||
{}
|
||||
};
|
||||
|
||||
/*
|
||||
* Use a temporary sysctl number. Horrid, but will be cleaned up in 2.6
|
||||
* when all the PM interfaces exist nicely.
|
||||
*/
|
||||
#define CTL_FRV 9898
|
||||
static struct ctl_table frv_dir_table[] =
|
||||
{
|
||||
{CTL_FRV, "frv", NULL, 0, 0555, frv_table},
|
||||
{0}
|
||||
{
|
||||
.ctl_name = CTL_FRV,
|
||||
.procname = "frv",
|
||||
.mode = 0555,
|
||||
.child = frv_table
|
||||
},
|
||||
{}
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -198,7 +216,7 @@ static struct ctl_table frv_dir_table[] =
|
|||
*/
|
||||
static int __init frv_sysctl_init(void)
|
||||
{
|
||||
register_sysctl_table(frv_dir_table, 1);
|
||||
register_sysctl_table(frv_dir_table);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -18,6 +18,18 @@ config GENERIC_TIME
|
|||
bool
|
||||
default y
|
||||
|
||||
config CLOCKSOURCE_WATCHDOG
|
||||
bool
|
||||
default y
|
||||
|
||||
config GENERIC_CLOCKEVENTS
|
||||
bool
|
||||
default y
|
||||
|
||||
config GENERIC_CLOCKEVENTS_BROADCAST
|
||||
bool
|
||||
default y
|
||||
|
||||
config LOCKDEP_SUPPORT
|
||||
bool
|
||||
default y
|
||||
|
@ -74,6 +86,8 @@ source "init/Kconfig"
|
|||
|
||||
menu "Processor type and features"
|
||||
|
||||
source "kernel/time/Kconfig"
|
||||
|
||||
config SMP
|
||||
bool "Symmetric multi-processing support"
|
||||
---help---
|
||||
|
@ -203,6 +217,15 @@ config PARAVIRT
|
|||
However, when run without a hypervisor the kernel is
|
||||
theoretically slower. If in doubt, say N.
|
||||
|
||||
config VMI
|
||||
bool "VMI Paravirt-ops support"
|
||||
depends on PARAVIRT && !NO_HZ
|
||||
default y
|
||||
help
|
||||
VMI provides a paravirtualized interface to multiple hypervisors
|
||||
include VMware ESX server and Xen by connecting to a ROM module
|
||||
provided by the hypervisor.
|
||||
|
||||
config ACPI_SRAT
|
||||
bool
|
||||
default y
|
||||
|
@ -1263,3 +1286,12 @@ config X86_TRAMPOLINE
|
|||
config KTIME_SCALAR
|
||||
bool
|
||||
default y
|
||||
|
||||
config NO_IDLE_HZ
|
||||
bool
|
||||
depends on PARAVIRT
|
||||
default y
|
||||
help
|
||||
Switches the regular HZ timer off when the system is going idle.
|
||||
This helps a hypervisor detect that the Linux system is idle,
|
||||
reducing the overhead of idle systems.
|
||||
|
|
|
@ -226,11 +226,6 @@ config X86_CMPXCHG
|
|||
depends on !M386
|
||||
default y
|
||||
|
||||
config X86_XADD
|
||||
bool
|
||||
depends on !M386
|
||||
default y
|
||||
|
||||
config X86_L1_CACHE_SHIFT
|
||||
int
|
||||
default "7" if MPENTIUM4 || X86_GENERIC
|
||||
|
|
|
@ -87,7 +87,7 @@ config DOUBLEFAULT
|
|||
|
||||
config DEBUG_PARAVIRT
|
||||
bool "Enable some paravirtualization debugging"
|
||||
default y
|
||||
default n
|
||||
depends on PARAVIRT && DEBUG_KERNEL
|
||||
help
|
||||
Currently deliberately clobbers regs which are allowed to be
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
#
|
||||
# Automatically generated make config: don't edit
|
||||
# Linux kernel version: 2.6.20-rc3
|
||||
# Fri Jan 5 11:54:46 2007
|
||||
# Linux kernel version: 2.6.20-git8
|
||||
# Tue Feb 13 11:25:18 2007
|
||||
#
|
||||
CONFIG_X86_32=y
|
||||
CONFIG_GENERIC_TIME=y
|
||||
|
@ -10,6 +10,7 @@ CONFIG_STACKTRACE_SUPPORT=y
|
|||
CONFIG_SEMAPHORE_SLEEPERS=y
|
||||
CONFIG_X86=y
|
||||
CONFIG_MMU=y
|
||||
CONFIG_ZONE_DMA=y
|
||||
CONFIG_GENERIC_ISA_DMA=y
|
||||
CONFIG_GENERIC_IOMAP=y
|
||||
CONFIG_GENERIC_BUG=y
|
||||
|
@ -139,7 +140,6 @@ CONFIG_MPENTIUMIII=y
|
|||
# CONFIG_MVIAC3_2 is not set
|
||||
CONFIG_X86_GENERIC=y
|
||||
CONFIG_X86_CMPXCHG=y
|
||||
CONFIG_X86_XADD=y
|
||||
CONFIG_X86_L1_CACHE_SHIFT=7
|
||||
CONFIG_RWSEM_XCHGADD_ALGORITHM=y
|
||||
# CONFIG_ARCH_HAS_ILOG2_U32 is not set
|
||||
|
@ -198,6 +198,7 @@ CONFIG_FLAT_NODE_MEM_MAP=y
|
|||
# CONFIG_SPARSEMEM_STATIC is not set
|
||||
CONFIG_SPLIT_PTLOCK_CPUS=4
|
||||
CONFIG_RESOURCES_64BIT=y
|
||||
CONFIG_ZONE_DMA_FLAG=1
|
||||
# CONFIG_HIGHPTE is not set
|
||||
# CONFIG_MATH_EMULATION is not set
|
||||
CONFIG_MTRR=y
|
||||
|
@ -211,6 +212,7 @@ CONFIG_HZ_250=y
|
|||
CONFIG_HZ=250
|
||||
# CONFIG_KEXEC is not set
|
||||
# CONFIG_CRASH_DUMP is not set
|
||||
CONFIG_PHYSICAL_START=0x100000
|
||||
# CONFIG_RELOCATABLE is not set
|
||||
CONFIG_PHYSICAL_ALIGN=0x100000
|
||||
# CONFIG_HOTPLUG_CPU is not set
|
||||
|
@ -229,13 +231,14 @@ CONFIG_PM_SYSFS_DEPRECATED=y
|
|||
# ACPI (Advanced Configuration and Power Interface) Support
|
||||
#
|
||||
CONFIG_ACPI=y
|
||||
CONFIG_ACPI_PROCFS=y
|
||||
CONFIG_ACPI_AC=y
|
||||
CONFIG_ACPI_BATTERY=y
|
||||
CONFIG_ACPI_BUTTON=y
|
||||
# CONFIG_ACPI_VIDEO is not set
|
||||
# CONFIG_ACPI_HOTKEY is not set
|
||||
CONFIG_ACPI_FAN=y
|
||||
# CONFIG_ACPI_DOCK is not set
|
||||
# CONFIG_ACPI_BAY is not set
|
||||
CONFIG_ACPI_PROCESSOR=y
|
||||
CONFIG_ACPI_THERMAL=y
|
||||
# CONFIG_ACPI_ASUS is not set
|
||||
|
@ -306,7 +309,6 @@ CONFIG_PCI_DIRECT=y
|
|||
CONFIG_PCI_MMCONFIG=y
|
||||
# CONFIG_PCIEPORTBUS is not set
|
||||
CONFIG_PCI_MSI=y
|
||||
# CONFIG_PCI_MULTITHREAD_PROBE is not set
|
||||
# CONFIG_PCI_DEBUG is not set
|
||||
# CONFIG_HT_IRQ is not set
|
||||
CONFIG_ISA_DMA_API=y
|
||||
|
@ -347,6 +349,7 @@ CONFIG_UNIX=y
|
|||
CONFIG_XFRM=y
|
||||
# CONFIG_XFRM_USER is not set
|
||||
# CONFIG_XFRM_SUB_POLICY is not set
|
||||
# CONFIG_XFRM_MIGRATE is not set
|
||||
# CONFIG_NET_KEY is not set
|
||||
CONFIG_INET=y
|
||||
CONFIG_IP_MULTICAST=y
|
||||
|
@ -446,6 +449,7 @@ CONFIG_STANDALONE=y
|
|||
CONFIG_PREVENT_FIRMWARE_BUILD=y
|
||||
CONFIG_FW_LOADER=y
|
||||
# CONFIG_DEBUG_DRIVER is not set
|
||||
# CONFIG_DEBUG_DEVRES is not set
|
||||
# CONFIG_SYS_HYPERVISOR is not set
|
||||
|
||||
#
|
||||
|
@ -466,8 +470,7 @@ CONFIG_FW_LOADER=y
|
|||
#
|
||||
# Plug and Play support
|
||||
#
|
||||
CONFIG_PNP=y
|
||||
CONFIG_PNPACPI=y
|
||||
# CONFIG_PNP is not set
|
||||
|
||||
#
|
||||
# Block devices
|
||||
|
@ -515,6 +518,7 @@ CONFIG_BLK_DEV_IDECD=y
|
|||
# CONFIG_BLK_DEV_IDETAPE is not set
|
||||
# CONFIG_BLK_DEV_IDEFLOPPY is not set
|
||||
# CONFIG_BLK_DEV_IDESCSI is not set
|
||||
CONFIG_BLK_DEV_IDEACPI=y
|
||||
# CONFIG_IDE_TASK_IOCTL is not set
|
||||
|
||||
#
|
||||
|
@ -547,6 +551,7 @@ CONFIG_BLK_DEV_AMD74XX=y
|
|||
# CONFIG_BLK_DEV_JMICRON is not set
|
||||
# CONFIG_BLK_DEV_SC1200 is not set
|
||||
CONFIG_BLK_DEV_PIIX=y
|
||||
# CONFIG_BLK_DEV_IT8213 is not set
|
||||
# CONFIG_BLK_DEV_IT821X is not set
|
||||
# CONFIG_BLK_DEV_NS87415 is not set
|
||||
# CONFIG_BLK_DEV_PDC202XX_OLD is not set
|
||||
|
@ -557,6 +562,7 @@ CONFIG_BLK_DEV_PIIX=y
|
|||
# CONFIG_BLK_DEV_SLC90E66 is not set
|
||||
# CONFIG_BLK_DEV_TRM290 is not set
|
||||
# CONFIG_BLK_DEV_VIA82CXXX is not set
|
||||
# CONFIG_BLK_DEV_TC86C001 is not set
|
||||
# CONFIG_IDE_ARM is not set
|
||||
CONFIG_BLK_DEV_IDEDMA=y
|
||||
# CONFIG_IDEDMA_IVB is not set
|
||||
|
@ -655,6 +661,7 @@ CONFIG_AIC79XX_DEBUG_MASK=0
|
|||
# Serial ATA (prod) and Parallel ATA (experimental) drivers
|
||||
#
|
||||
CONFIG_ATA=y
|
||||
# CONFIG_ATA_NONSTANDARD is not set
|
||||
CONFIG_SATA_AHCI=y
|
||||
CONFIG_SATA_SVW=y
|
||||
CONFIG_ATA_PIIX=y
|
||||
|
@ -670,6 +677,7 @@ CONFIG_SATA_SIL=y
|
|||
# CONFIG_SATA_ULI is not set
|
||||
CONFIG_SATA_VIA=y
|
||||
# CONFIG_SATA_VITESSE is not set
|
||||
# CONFIG_SATA_INIC162X is not set
|
||||
CONFIG_SATA_INTEL_COMBINED=y
|
||||
# CONFIG_PATA_ALI is not set
|
||||
# CONFIG_PATA_AMD is not set
|
||||
|
@ -687,6 +695,7 @@ CONFIG_SATA_INTEL_COMBINED=y
|
|||
# CONFIG_PATA_HPT3X2N is not set
|
||||
# CONFIG_PATA_HPT3X3 is not set
|
||||
# CONFIG_PATA_IT821X is not set
|
||||
# CONFIG_PATA_IT8213 is not set
|
||||
# CONFIG_PATA_JMICRON is not set
|
||||
# CONFIG_PATA_TRIFLEX is not set
|
||||
# CONFIG_PATA_MARVELL is not set
|
||||
|
@ -739,9 +748,7 @@ CONFIG_IEEE1394=y
|
|||
# Subsystem Options
|
||||
#
|
||||
# CONFIG_IEEE1394_VERBOSEDEBUG is not set
|
||||
# CONFIG_IEEE1394_OUI_DB is not set
|
||||
# CONFIG_IEEE1394_EXTRA_CONFIG_ROMS is not set
|
||||
# CONFIG_IEEE1394_EXPORT_FULL_API is not set
|
||||
|
||||
#
|
||||
# Device Drivers
|
||||
|
@ -766,6 +773,11 @@ CONFIG_IEEE1394_RAWIO=y
|
|||
#
|
||||
# CONFIG_I2O is not set
|
||||
|
||||
#
|
||||
# Macintosh device drivers
|
||||
#
|
||||
# CONFIG_MAC_EMUMOUSEBTN is not set
|
||||
|
||||
#
|
||||
# Network device support
|
||||
#
|
||||
|
@ -833,6 +845,7 @@ CONFIG_8139TOO=y
|
|||
# CONFIG_SUNDANCE is not set
|
||||
# CONFIG_TLAN is not set
|
||||
# CONFIG_VIA_RHINE is not set
|
||||
# CONFIG_SC92031 is not set
|
||||
|
||||
#
|
||||
# Ethernet (1000 Mbit)
|
||||
|
@ -855,11 +868,13 @@ CONFIG_SKY2=y
|
|||
CONFIG_TIGON3=y
|
||||
CONFIG_BNX2=y
|
||||
# CONFIG_QLA3XXX is not set
|
||||
# CONFIG_ATL1 is not set
|
||||
|
||||
#
|
||||
# Ethernet (10000 Mbit)
|
||||
#
|
||||
# CONFIG_CHELSIO_T1 is not set
|
||||
# CONFIG_CHELSIO_T3 is not set
|
||||
# CONFIG_IXGB is not set
|
||||
# CONFIG_S2IO is not set
|
||||
# CONFIG_MYRI10GE is not set
|
||||
|
@ -1090,6 +1105,7 @@ CONFIG_SOUND=y
|
|||
# Open Sound System
|
||||
#
|
||||
CONFIG_SOUND_PRIME=y
|
||||
CONFIG_OBSOLETE_OSS=y
|
||||
# CONFIG_SOUND_BT878 is not set
|
||||
# CONFIG_SOUND_ES1371 is not set
|
||||
CONFIG_SOUND_ICH=y
|
||||
|
@ -1103,6 +1119,7 @@ CONFIG_SOUND_ICH=y
|
|||
# HID Devices
|
||||
#
|
||||
CONFIG_HID=y
|
||||
# CONFIG_HID_DEBUG is not set
|
||||
|
||||
#
|
||||
# USB support
|
||||
|
@ -1117,10 +1134,8 @@ CONFIG_USB=y
|
|||
# Miscellaneous USB options
|
||||
#
|
||||
CONFIG_USB_DEVICEFS=y
|
||||
# CONFIG_USB_BANDWIDTH is not set
|
||||
# CONFIG_USB_DYNAMIC_MINORS is not set
|
||||
# CONFIG_USB_SUSPEND is not set
|
||||
# CONFIG_USB_MULTITHREAD_PROBE is not set
|
||||
# CONFIG_USB_OTG is not set
|
||||
|
||||
#
|
||||
|
@ -1130,9 +1145,11 @@ CONFIG_USB_EHCI_HCD=y
|
|||
# CONFIG_USB_EHCI_SPLIT_ISO is not set
|
||||
# CONFIG_USB_EHCI_ROOT_HUB_TT is not set
|
||||
# CONFIG_USB_EHCI_TT_NEWSCHED is not set
|
||||
# CONFIG_USB_EHCI_BIG_ENDIAN_MMIO is not set
|
||||
# CONFIG_USB_ISP116X_HCD is not set
|
||||
CONFIG_USB_OHCI_HCD=y
|
||||
# CONFIG_USB_OHCI_BIG_ENDIAN is not set
|
||||
# CONFIG_USB_OHCI_BIG_ENDIAN_DESC is not set
|
||||
# CONFIG_USB_OHCI_BIG_ENDIAN_MMIO is not set
|
||||
CONFIG_USB_OHCI_LITTLE_ENDIAN=y
|
||||
CONFIG_USB_UHCI_HCD=y
|
||||
# CONFIG_USB_SL811_HCD is not set
|
||||
|
@ -1183,6 +1200,7 @@ CONFIG_USB_HID=y
|
|||
# CONFIG_USB_ATI_REMOTE2 is not set
|
||||
# CONFIG_USB_KEYSPAN_REMOTE is not set
|
||||
# CONFIG_USB_APPLETOUCH is not set
|
||||
# CONFIG_USB_GTCO is not set
|
||||
|
||||
#
|
||||
# USB Imaging devices
|
||||
|
@ -1287,6 +1305,10 @@ CONFIG_USB_MON=y
|
|||
# DMA Devices
|
||||
#
|
||||
|
||||
#
|
||||
# Auxiliary Display support
|
||||
#
|
||||
|
||||
#
|
||||
# Virtualization
|
||||
#
|
||||
|
@ -1480,6 +1502,7 @@ CONFIG_UNUSED_SYMBOLS=y
|
|||
# CONFIG_DEBUG_FS is not set
|
||||
# CONFIG_HEADERS_CHECK is not set
|
||||
CONFIG_DEBUG_KERNEL=y
|
||||
# CONFIG_DEBUG_SHIRQ is not set
|
||||
CONFIG_LOG_BUF_SHIFT=18
|
||||
CONFIG_DETECT_SOFTLOCKUP=y
|
||||
# CONFIG_SCHEDSTATS is not set
|
||||
|
@ -1488,7 +1511,6 @@ CONFIG_DETECT_SOFTLOCKUP=y
|
|||
# CONFIG_RT_MUTEX_TESTER is not set
|
||||
# CONFIG_DEBUG_SPINLOCK is not set
|
||||
# CONFIG_DEBUG_MUTEXES is not set
|
||||
# CONFIG_DEBUG_RWSEMS is not set
|
||||
# CONFIG_DEBUG_LOCK_ALLOC is not set
|
||||
# CONFIG_PROVE_LOCKING is not set
|
||||
# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
|
||||
|
@ -1533,7 +1555,8 @@ CONFIG_CRC32=y
|
|||
# CONFIG_LIBCRC32C is not set
|
||||
CONFIG_ZLIB_INFLATE=y
|
||||
CONFIG_PLIST=y
|
||||
CONFIG_IOMAP_COPY=y
|
||||
CONFIG_HAS_IOMEM=y
|
||||
CONFIG_HAS_IOPORT=y
|
||||
CONFIG_GENERIC_HARDIRQS=y
|
||||
CONFIG_GENERIC_IRQ_PROBE=y
|
||||
CONFIG_GENERIC_PENDING_IRQ=y
|
||||
|
|
|
@ -18,7 +18,7 @@ obj-$(CONFIG_X86_MSR) += msr.o
|
|||
obj-$(CONFIG_X86_CPUID) += cpuid.o
|
||||
obj-$(CONFIG_MICROCODE) += microcode.o
|
||||
obj-$(CONFIG_APM) += apm.o
|
||||
obj-$(CONFIG_X86_SMP) += smp.o smpboot.o
|
||||
obj-$(CONFIG_X86_SMP) += smp.o smpboot.o tsc_sync.o
|
||||
obj-$(CONFIG_X86_TRAMPOLINE) += trampoline.o
|
||||
obj-$(CONFIG_X86_MPPARSE) += mpparse.o
|
||||
obj-$(CONFIG_X86_LOCAL_APIC) += apic.o nmi.o
|
||||
|
@ -32,7 +32,6 @@ obj-$(CONFIG_KPROBES) += kprobes.o
|
|||
obj-$(CONFIG_MODULES) += module.o
|
||||
obj-y += sysenter.o vsyscall.o
|
||||
obj-$(CONFIG_ACPI_SRAT) += srat.o
|
||||
obj-$(CONFIG_HPET_TIMER) += time_hpet.o
|
||||
obj-$(CONFIG_EFI) += efi.o efi_stub.o
|
||||
obj-$(CONFIG_DOUBLEFAULT) += doublefault.o
|
||||
obj-$(CONFIG_VM86) += vm86.o
|
||||
|
@ -40,8 +39,9 @@ obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
|
|||
obj-$(CONFIG_HPET_TIMER) += hpet.o
|
||||
obj-$(CONFIG_K8_NB) += k8.o
|
||||
|
||||
# Make sure this is linked after any other paravirt_ops structs: see head.S
|
||||
obj-$(CONFIG_VMI) += vmi.o vmitime.o
|
||||
obj-$(CONFIG_PARAVIRT) += paravirt.o
|
||||
obj-y += pcspeaker.o
|
||||
|
||||
EXTRA_AFLAGS := -traditional
|
||||
|
||||
|
|
|
@ -25,6 +25,7 @@
|
|||
|
||||
#include <linux/init.h>
|
||||
#include <linux/acpi.h>
|
||||
#include <linux/acpi_pmtmr.h>
|
||||
#include <linux/efi.h>
|
||||
#include <linux/cpumask.h>
|
||||
#include <linux/module.h>
|
||||
|
@ -615,6 +616,7 @@ static int __init acpi_parse_sbf(struct acpi_table_header *table)
|
|||
}
|
||||
|
||||
#ifdef CONFIG_HPET_TIMER
|
||||
#include <asm/hpet.h>
|
||||
|
||||
static int __init acpi_parse_hpet(struct acpi_table_header *table)
|
||||
{
|
||||
|
@ -645,24 +647,11 @@ static int __init acpi_parse_hpet(struct acpi_table_header *table)
|
|||
hpet_res->end = (1 * 1024) - 1;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
vxtime.hpet_address = hpet_tbl->address.address;
|
||||
|
||||
hpet_address = hpet_tbl->address.address;
|
||||
printk(KERN_INFO PREFIX "HPET id: %#x base: %#lx\n",
|
||||
hpet_tbl->id, vxtime.hpet_address);
|
||||
hpet_tbl->id, hpet_address);
|
||||
|
||||
res_start = vxtime.hpet_address;
|
||||
#else /* X86 */
|
||||
{
|
||||
extern unsigned long hpet_address;
|
||||
|
||||
hpet_address = hpet_tbl->address.address;
|
||||
printk(KERN_INFO PREFIX "HPET id: %#x base: %#lx\n",
|
||||
hpet_tbl->id, hpet_address);
|
||||
|
||||
res_start = hpet_address;
|
||||
}
|
||||
#endif /* X86 */
|
||||
res_start = hpet_address;
|
||||
|
||||
if (hpet_res) {
|
||||
hpet_res->start = res_start;
|
||||
|
@ -676,10 +665,6 @@ static int __init acpi_parse_hpet(struct acpi_table_header *table)
|
|||
#define acpi_parse_hpet NULL
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_X86_PM_TIMER
|
||||
extern u32 pmtmr_ioport;
|
||||
#endif
|
||||
|
||||
static int __init acpi_parse_fadt(struct acpi_table_header *table)
|
||||
{
|
||||
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -211,6 +211,7 @@
|
|||
#include <linux/slab.h>
|
||||
#include <linux/stat.h>
|
||||
#include <linux/proc_fs.h>
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/miscdevice.h>
|
||||
#include <linux/apm_bios.h>
|
||||
#include <linux/init.h>
|
||||
|
@ -235,7 +236,6 @@
|
|||
|
||||
#include "io_ports.h"
|
||||
|
||||
extern unsigned long get_cmos_time(void);
|
||||
extern void machine_real_restart(unsigned char *, int);
|
||||
|
||||
#if defined(CONFIG_APM_DISPLAY_BLANK) && defined(CONFIG_VT)
|
||||
|
@ -1175,28 +1175,6 @@ static void queue_event(apm_event_t event, struct apm_user *sender)
|
|||
spin_unlock(&user_list_lock);
|
||||
}
|
||||
|
||||
static void set_time(void)
|
||||
{
|
||||
struct timespec ts;
|
||||
if (got_clock_diff) { /* Must know time zone in order to set clock */
|
||||
ts.tv_sec = get_cmos_time() + clock_cmos_diff;
|
||||
ts.tv_nsec = 0;
|
||||
do_settimeofday(&ts);
|
||||
}
|
||||
}
|
||||
|
||||
static void get_time_diff(void)
|
||||
{
|
||||
#ifndef CONFIG_APM_RTC_IS_GMT
|
||||
/*
|
||||
* Estimate time zone so that set_time can update the clock
|
||||
*/
|
||||
clock_cmos_diff = -get_cmos_time();
|
||||
clock_cmos_diff += get_seconds();
|
||||
got_clock_diff = 1;
|
||||
#endif
|
||||
}
|
||||
|
||||
static void reinit_timer(void)
|
||||
{
|
||||
#ifdef INIT_TIMER_AFTER_SUSPEND
|
||||
|
@ -1236,19 +1214,6 @@ static int suspend(int vetoable)
|
|||
local_irq_disable();
|
||||
device_power_down(PMSG_SUSPEND);
|
||||
|
||||
/* serialize with the timer interrupt */
|
||||
write_seqlock(&xtime_lock);
|
||||
|
||||
/* protect against access to timer chip registers */
|
||||
spin_lock(&i8253_lock);
|
||||
|
||||
get_time_diff();
|
||||
/*
|
||||
* Irq spinlock must be dropped around set_system_power_state.
|
||||
* We'll undo any timer changes due to interrupts below.
|
||||
*/
|
||||
spin_unlock(&i8253_lock);
|
||||
write_sequnlock(&xtime_lock);
|
||||
local_irq_enable();
|
||||
|
||||
save_processor_state();
|
||||
|
@ -1257,7 +1222,6 @@ static int suspend(int vetoable)
|
|||
restore_processor_state();
|
||||
|
||||
local_irq_disable();
|
||||
set_time();
|
||||
reinit_timer();
|
||||
|
||||
if (err == APM_NO_ERROR)
|
||||
|
@ -1287,11 +1251,6 @@ static void standby(void)
|
|||
|
||||
local_irq_disable();
|
||||
device_power_down(PMSG_SUSPEND);
|
||||
/* serialize with the timer interrupt */
|
||||
write_seqlock(&xtime_lock);
|
||||
/* If needed, notify drivers here */
|
||||
get_time_diff();
|
||||
write_sequnlock(&xtime_lock);
|
||||
local_irq_enable();
|
||||
|
||||
err = set_system_power_state(APM_STATE_STANDBY);
|
||||
|
@ -1385,7 +1344,6 @@ static void check_events(void)
|
|||
ignore_bounce = 1;
|
||||
if ((event != APM_NORMAL_RESUME)
|
||||
|| (ignore_normal_resume == 0)) {
|
||||
set_time();
|
||||
device_resume();
|
||||
pm_send_all(PM_RESUME, (void *)0);
|
||||
queue_event(event, NULL);
|
||||
|
@ -1401,7 +1359,6 @@ static void check_events(void)
|
|||
break;
|
||||
|
||||
case APM_UPDATE_TIME:
|
||||
set_time();
|
||||
break;
|
||||
|
||||
case APM_CRITICAL_SUSPEND:
|
||||
|
@ -1636,9 +1593,8 @@ static int do_open(struct inode * inode, struct file * filp)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int apm_get_info(char *buf, char **start, off_t fpos, int length)
|
||||
static int proc_apm_show(struct seq_file *m, void *v)
|
||||
{
|
||||
char * p;
|
||||
unsigned short bx;
|
||||
unsigned short cx;
|
||||
unsigned short dx;
|
||||
|
@ -1650,8 +1606,6 @@ static int apm_get_info(char *buf, char **start, off_t fpos, int length)
|
|||
int time_units = -1;
|
||||
char *units = "?";
|
||||
|
||||
p = buf;
|
||||
|
||||
if ((num_online_cpus() == 1) &&
|
||||
!(error = apm_get_power_status(&bx, &cx, &dx))) {
|
||||
ac_line_status = (bx >> 8) & 0xff;
|
||||
|
@ -1705,7 +1659,7 @@ static int apm_get_info(char *buf, char **start, off_t fpos, int length)
|
|||
-1: Unknown
|
||||
8) min = minutes; sec = seconds */
|
||||
|
||||
p += sprintf(p, "%s %d.%d 0x%02x 0x%02x 0x%02x 0x%02x %d%% %d %s\n",
|
||||
seq_printf(m, "%s %d.%d 0x%02x 0x%02x 0x%02x 0x%02x %d%% %d %s\n",
|
||||
driver_version,
|
||||
(apm_info.bios.version >> 8) & 0xff,
|
||||
apm_info.bios.version & 0xff,
|
||||
|
@ -1716,10 +1670,22 @@ static int apm_get_info(char *buf, char **start, off_t fpos, int length)
|
|||
percentage,
|
||||
time_units,
|
||||
units);
|
||||
|
||||
return p - buf;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int proc_apm_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
return single_open(file, proc_apm_show, NULL);
|
||||
}
|
||||
|
||||
static const struct file_operations apm_file_ops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = proc_apm_open,
|
||||
.read = seq_read,
|
||||
.llseek = seq_lseek,
|
||||
.release = single_release,
|
||||
};
|
||||
|
||||
static int apm(void *unused)
|
||||
{
|
||||
unsigned short bx;
|
||||
|
@ -2341,9 +2307,9 @@ static int __init apm_init(void)
|
|||
set_base(gdt[APM_DS >> 3],
|
||||
__va((unsigned long)apm_info.bios.dseg << 4));
|
||||
|
||||
apm_proc = create_proc_info_entry("apm", 0, NULL, apm_get_info);
|
||||
apm_proc = create_proc_entry("apm", 0, NULL);
|
||||
if (apm_proc)
|
||||
apm_proc->owner = THIS_MODULE;
|
||||
apm_proc->proc_fops = &apm_file_ops;
|
||||
|
||||
kapmd_task = kthread_create(apm, NULL, "kapmd");
|
||||
if (IS_ERR(kapmd_task)) {
|
||||
|
|
|
@ -72,7 +72,7 @@ void foo(void)
|
|||
OFFSET(PT_EAX, pt_regs, eax);
|
||||
OFFSET(PT_DS, pt_regs, xds);
|
||||
OFFSET(PT_ES, pt_regs, xes);
|
||||
OFFSET(PT_GS, pt_regs, xgs);
|
||||
OFFSET(PT_FS, pt_regs, xfs);
|
||||
OFFSET(PT_ORIG_EAX, pt_regs, orig_eax);
|
||||
OFFSET(PT_EIP, pt_regs, eip);
|
||||
OFFSET(PT_CS, pt_regs, xcs);
|
||||
|
|
|
@ -605,7 +605,7 @@ void __init early_cpu_init(void)
|
|||
struct pt_regs * __devinit idle_regs(struct pt_regs *regs)
|
||||
{
|
||||
memset(regs, 0, sizeof(struct pt_regs));
|
||||
regs->xgs = __KERNEL_PDA;
|
||||
regs->xfs = __KERNEL_PDA;
|
||||
return regs;
|
||||
}
|
||||
|
||||
|
@ -662,12 +662,12 @@ struct i386_pda boot_pda = {
|
|||
.pcurrent = &init_task,
|
||||
};
|
||||
|
||||
static inline void set_kernel_gs(void)
|
||||
static inline void set_kernel_fs(void)
|
||||
{
|
||||
/* Set %gs for this CPU's PDA. Memory clobber is to create a
|
||||
/* Set %fs for this CPU's PDA. Memory clobber is to create a
|
||||
barrier with respect to any PDA operations, so the compiler
|
||||
doesn't move any before here. */
|
||||
asm volatile ("mov %0, %%gs" : : "r" (__KERNEL_PDA) : "memory");
|
||||
asm volatile ("mov %0, %%fs" : : "r" (__KERNEL_PDA) : "memory");
|
||||
}
|
||||
|
||||
/* Initialize the CPU's GDT and PDA. The boot CPU does this for
|
||||
|
@ -718,7 +718,7 @@ void __cpuinit cpu_set_gdt(int cpu)
|
|||
the boot CPU, this will transition from the boot gdt+pda to
|
||||
the real ones). */
|
||||
load_gdt(cpu_gdt_descr);
|
||||
set_kernel_gs();
|
||||
set_kernel_fs();
|
||||
}
|
||||
|
||||
/* Common CPU init for both boot and secondary CPUs */
|
||||
|
@ -764,8 +764,8 @@ static void __cpuinit _cpu_init(int cpu, struct task_struct *curr)
|
|||
__set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss);
|
||||
#endif
|
||||
|
||||
/* Clear %fs. */
|
||||
asm volatile ("mov %0, %%fs" : : "r" (0));
|
||||
/* Clear %gs. */
|
||||
asm volatile ("mov %0, %%gs" : : "r" (0));
|
||||
|
||||
/* Clear all 6 debug registers: */
|
||||
set_debugreg(0, 0);
|
||||
|
|
|
@ -217,6 +217,15 @@ config X86_LONGHAUL
|
|||
|
||||
If in doubt, say N.
|
||||
|
||||
config X86_E_POWERSAVER
|
||||
tristate "VIA C7 Enhanced PowerSaver (EXPERIMENTAL)"
|
||||
select CPU_FREQ_TABLE
|
||||
depends on EXPERIMENTAL
|
||||
help
|
||||
This adds the CPUFreq driver for VIA C7 processors.
|
||||
|
||||
If in doubt, say N.
|
||||
|
||||
comment "shared options"
|
||||
|
||||
config X86_ACPI_CPUFREQ_PROC_INTF
|
||||
|
|
|
@ -2,6 +2,7 @@ obj-$(CONFIG_X86_POWERNOW_K6) += powernow-k6.o
|
|||
obj-$(CONFIG_X86_POWERNOW_K7) += powernow-k7.o
|
||||
obj-$(CONFIG_X86_POWERNOW_K8) += powernow-k8.o
|
||||
obj-$(CONFIG_X86_LONGHAUL) += longhaul.o
|
||||
obj-$(CONFIG_X86_E_POWERSAVER) += e_powersaver.o
|
||||
obj-$(CONFIG_ELAN_CPUFREQ) += elanfreq.o
|
||||
obj-$(CONFIG_SC520_CPUFREQ) += sc520_freq.o
|
||||
obj-$(CONFIG_X86_LONGRUN) += longrun.o
|
||||
|
|
|
@ -0,0 +1,334 @@
|
|||
/*
|
||||
* Based on documentation provided by Dave Jones. Thanks!
|
||||
*
|
||||
* Licensed under the terms of the GNU GPL License version 2.
|
||||
*
|
||||
* BIG FAT DISCLAIMER: Work in progress code. Possibly *dangerous*
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/cpufreq.h>
|
||||
#include <linux/ioport.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
#include <asm/msr.h>
|
||||
#include <asm/tsc.h>
|
||||
#include <asm/timex.h>
|
||||
#include <asm/io.h>
|
||||
#include <asm/delay.h>
|
||||
|
||||
#define EPS_BRAND_C7M 0
|
||||
#define EPS_BRAND_C7 1
|
||||
#define EPS_BRAND_EDEN 2
|
||||
#define EPS_BRAND_C3 3
|
||||
|
||||
struct eps_cpu_data {
|
||||
u32 fsb;
|
||||
struct cpufreq_frequency_table freq_table[];
|
||||
};
|
||||
|
||||
static struct eps_cpu_data *eps_cpu[NR_CPUS];
|
||||
|
||||
|
||||
static unsigned int eps_get(unsigned int cpu)
|
||||
{
|
||||
struct eps_cpu_data *centaur;
|
||||
u32 lo, hi;
|
||||
|
||||
if (cpu)
|
||||
return 0;
|
||||
centaur = eps_cpu[cpu];
|
||||
if (centaur == NULL)
|
||||
return 0;
|
||||
|
||||
/* Return current frequency */
|
||||
rdmsr(MSR_IA32_PERF_STATUS, lo, hi);
|
||||
return centaur->fsb * ((lo >> 8) & 0xff);
|
||||
}
|
||||
|
||||
static int eps_set_state(struct eps_cpu_data *centaur,
|
||||
unsigned int cpu,
|
||||
u32 dest_state)
|
||||
{
|
||||
struct cpufreq_freqs freqs;
|
||||
u32 lo, hi;
|
||||
int err = 0;
|
||||
int i;
|
||||
|
||||
freqs.old = eps_get(cpu);
|
||||
freqs.new = centaur->fsb * ((dest_state >> 8) & 0xff);
|
||||
freqs.cpu = cpu;
|
||||
cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
|
||||
|
||||
/* Wait while CPU is busy */
|
||||
rdmsr(MSR_IA32_PERF_STATUS, lo, hi);
|
||||
i = 0;
|
||||
while (lo & ((1 << 16) | (1 << 17))) {
|
||||
udelay(16);
|
||||
rdmsr(MSR_IA32_PERF_STATUS, lo, hi);
|
||||
i++;
|
||||
if (unlikely(i > 64)) {
|
||||
err = -ENODEV;
|
||||
goto postchange;
|
||||
}
|
||||
}
|
||||
/* Set new multiplier and voltage */
|
||||
wrmsr(MSR_IA32_PERF_CTL, dest_state & 0xffff, 0);
|
||||
/* Wait until transition end */
|
||||
i = 0;
|
||||
do {
|
||||
udelay(16);
|
||||
rdmsr(MSR_IA32_PERF_STATUS, lo, hi);
|
||||
i++;
|
||||
if (unlikely(i > 64)) {
|
||||
err = -ENODEV;
|
||||
goto postchange;
|
||||
}
|
||||
} while (lo & ((1 << 16) | (1 << 17)));
|
||||
|
||||
/* Return current frequency */
|
||||
postchange:
|
||||
rdmsr(MSR_IA32_PERF_STATUS, lo, hi);
|
||||
freqs.new = centaur->fsb * ((lo >> 8) & 0xff);
|
||||
|
||||
cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int eps_target(struct cpufreq_policy *policy,
|
||||
unsigned int target_freq,
|
||||
unsigned int relation)
|
||||
{
|
||||
struct eps_cpu_data *centaur;
|
||||
unsigned int newstate = 0;
|
||||
unsigned int cpu = policy->cpu;
|
||||
unsigned int dest_state;
|
||||
int ret;
|
||||
|
||||
if (unlikely(eps_cpu[cpu] == NULL))
|
||||
return -ENODEV;
|
||||
centaur = eps_cpu[cpu];
|
||||
|
||||
if (unlikely(cpufreq_frequency_table_target(policy,
|
||||
&eps_cpu[cpu]->freq_table[0],
|
||||
target_freq,
|
||||
relation,
|
||||
&newstate))) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Make frequency transition */
|
||||
dest_state = centaur->freq_table[newstate].index & 0xffff;
|
||||
ret = eps_set_state(centaur, cpu, dest_state);
|
||||
if (ret)
|
||||
printk(KERN_ERR "eps: Timeout!\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int eps_verify(struct cpufreq_policy *policy)
|
||||
{
|
||||
return cpufreq_frequency_table_verify(policy,
|
||||
&eps_cpu[policy->cpu]->freq_table[0]);
|
||||
}
|
||||
|
||||
static int eps_cpu_init(struct cpufreq_policy *policy)
|
||||
{
|
||||
unsigned int i;
|
||||
u32 lo, hi;
|
||||
u64 val;
|
||||
u8 current_multiplier, current_voltage;
|
||||
u8 max_multiplier, max_voltage;
|
||||
u8 min_multiplier, min_voltage;
|
||||
u8 brand;
|
||||
u32 fsb;
|
||||
struct eps_cpu_data *centaur;
|
||||
struct cpufreq_frequency_table *f_table;
|
||||
int k, step, voltage;
|
||||
int ret;
|
||||
int states;
|
||||
|
||||
if (policy->cpu != 0)
|
||||
return -ENODEV;
|
||||
|
||||
/* Check brand */
|
||||
printk("eps: Detected VIA ");
|
||||
rdmsr(0x1153, lo, hi);
|
||||
brand = (((lo >> 2) ^ lo) >> 18) & 3;
|
||||
switch(brand) {
|
||||
case EPS_BRAND_C7M:
|
||||
printk("C7-M\n");
|
||||
break;
|
||||
case EPS_BRAND_C7:
|
||||
printk("C7\n");
|
||||
break;
|
||||
case EPS_BRAND_EDEN:
|
||||
printk("Eden\n");
|
||||
break;
|
||||
case EPS_BRAND_C3:
|
||||
printk("C3\n");
|
||||
return -ENODEV;
|
||||
break;
|
||||
}
|
||||
/* Enable Enhanced PowerSaver */
|
||||
rdmsrl(MSR_IA32_MISC_ENABLE, val);
|
||||
if (!(val & 1 << 16)) {
|
||||
val |= 1 << 16;
|
||||
wrmsrl(MSR_IA32_MISC_ENABLE, val);
|
||||
/* Can be locked at 0 */
|
||||
rdmsrl(MSR_IA32_MISC_ENABLE, val);
|
||||
if (!(val & 1 << 16)) {
|
||||
printk("eps: Can't enable Enhanced PowerSaver\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
}
|
||||
|
||||
/* Print voltage and multiplier */
|
||||
rdmsr(MSR_IA32_PERF_STATUS, lo, hi);
|
||||
current_voltage = lo & 0xff;
|
||||
printk("eps: Current voltage = %dmV\n", current_voltage * 16 + 700);
|
||||
current_multiplier = (lo >> 8) & 0xff;
|
||||
printk("eps: Current multiplier = %d\n", current_multiplier);
|
||||
|
||||
/* Print limits */
|
||||
max_voltage = hi & 0xff;
|
||||
printk("eps: Highest voltage = %dmV\n", max_voltage * 16 + 700);
|
||||
max_multiplier = (hi >> 8) & 0xff;
|
||||
printk("eps: Highest multiplier = %d\n", max_multiplier);
|
||||
min_voltage = (hi >> 16) & 0xff;
|
||||
printk("eps: Lowest voltage = %dmV\n", min_voltage * 16 + 700);
|
||||
min_multiplier = (hi >> 24) & 0xff;
|
||||
printk("eps: Lowest multiplier = %d\n", min_multiplier);
|
||||
|
||||
/* Sanity checks */
|
||||
if (current_multiplier == 0 || max_multiplier == 0
|
||||
|| min_multiplier == 0)
|
||||
return -EINVAL;
|
||||
if (current_multiplier > max_multiplier
|
||||
|| max_multiplier <= min_multiplier)
|
||||
return -EINVAL;
|
||||
if (current_voltage > 0x1c || max_voltage > 0x1c)
|
||||
return -EINVAL;
|
||||
if (max_voltage < min_voltage)
|
||||
return -EINVAL;
|
||||
|
||||
/* Calc FSB speed */
|
||||
fsb = cpu_khz / current_multiplier;
|
||||
/* Calc number of p-states supported */
|
||||
if (brand == EPS_BRAND_C7M)
|
||||
states = max_multiplier - min_multiplier + 1;
|
||||
else
|
||||
states = 2;
|
||||
|
||||
/* Allocate private data and frequency table for current cpu */
|
||||
centaur = kzalloc(sizeof(struct eps_cpu_data)
|
||||
+ (states + 1) * sizeof(struct cpufreq_frequency_table),
|
||||
GFP_KERNEL);
|
||||
if (!centaur)
|
||||
return -ENOMEM;
|
||||
eps_cpu[0] = centaur;
|
||||
|
||||
/* Copy basic values */
|
||||
centaur->fsb = fsb;
|
||||
|
||||
/* Fill frequency and MSR value table */
|
||||
f_table = ¢aur->freq_table[0];
|
||||
if (brand != EPS_BRAND_C7M) {
|
||||
f_table[0].frequency = fsb * min_multiplier;
|
||||
f_table[0].index = (min_multiplier << 8) | min_voltage;
|
||||
f_table[1].frequency = fsb * max_multiplier;
|
||||
f_table[1].index = (max_multiplier << 8) | max_voltage;
|
||||
f_table[2].frequency = CPUFREQ_TABLE_END;
|
||||
} else {
|
||||
k = 0;
|
||||
step = ((max_voltage - min_voltage) * 256)
|
||||
/ (max_multiplier - min_multiplier);
|
||||
for (i = min_multiplier; i <= max_multiplier; i++) {
|
||||
voltage = (k * step) / 256 + min_voltage;
|
||||
f_table[k].frequency = fsb * i;
|
||||
f_table[k].index = (i << 8) | voltage;
|
||||
k++;
|
||||
}
|
||||
f_table[k].frequency = CPUFREQ_TABLE_END;
|
||||
}
|
||||
|
||||
policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
|
||||
policy->cpuinfo.transition_latency = 140000; /* 844mV -> 700mV in ns */
|
||||
policy->cur = fsb * current_multiplier;
|
||||
|
||||
ret = cpufreq_frequency_table_cpuinfo(policy, ¢aur->freq_table[0]);
|
||||
if (ret) {
|
||||
kfree(centaur);
|
||||
return ret;
|
||||
}
|
||||
|
||||
cpufreq_frequency_table_get_attr(¢aur->freq_table[0], policy->cpu);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int eps_cpu_exit(struct cpufreq_policy *policy)
|
||||
{
|
||||
unsigned int cpu = policy->cpu;
|
||||
struct eps_cpu_data *centaur;
|
||||
u32 lo, hi;
|
||||
|
||||
if (eps_cpu[cpu] == NULL)
|
||||
return -ENODEV;
|
||||
centaur = eps_cpu[cpu];
|
||||
|
||||
/* Get max frequency */
|
||||
rdmsr(MSR_IA32_PERF_STATUS, lo, hi);
|
||||
/* Set max frequency */
|
||||
eps_set_state(centaur, cpu, hi & 0xffff);
|
||||
/* Bye */
|
||||
cpufreq_frequency_table_put_attr(policy->cpu);
|
||||
kfree(eps_cpu[cpu]);
|
||||
eps_cpu[cpu] = NULL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct freq_attr* eps_attr[] = {
|
||||
&cpufreq_freq_attr_scaling_available_freqs,
|
||||
NULL,
|
||||
};
|
||||
|
||||
static struct cpufreq_driver eps_driver = {
|
||||
.verify = eps_verify,
|
||||
.target = eps_target,
|
||||
.init = eps_cpu_init,
|
||||
.exit = eps_cpu_exit,
|
||||
.get = eps_get,
|
||||
.name = "e_powersaver",
|
||||
.owner = THIS_MODULE,
|
||||
.attr = eps_attr,
|
||||
};
|
||||
|
||||
static int __init eps_init(void)
|
||||
{
|
||||
struct cpuinfo_x86 *c = cpu_data;
|
||||
|
||||
/* This driver will work only on Centaur C7 processors with
|
||||
* Enhanced SpeedStep/PowerSaver registers */
|
||||
if (c->x86_vendor != X86_VENDOR_CENTAUR
|
||||
|| c->x86 != 6 || c->x86_model != 10)
|
||||
return -ENODEV;
|
||||
if (!cpu_has(c, X86_FEATURE_EST))
|
||||
return -ENODEV;
|
||||
|
||||
if (cpufreq_register_driver(&eps_driver))
|
||||
return -EINVAL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __exit eps_exit(void)
|
||||
{
|
||||
cpufreq_unregister_driver(&eps_driver);
|
||||
}
|
||||
|
||||
MODULE_AUTHOR("Rafa³ Bilski <rafalbilski@interia.pl>");
|
||||
MODULE_DESCRIPTION("Enhanced PowerSaver driver for VIA C7 CPU's.");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
||||
module_init(eps_init);
|
||||
module_exit(eps_exit);
|
|
@ -8,12 +8,11 @@
|
|||
* VIA have currently 3 different versions of Longhaul.
|
||||
* Version 1 (Longhaul) uses the BCR2 MSR at 0x1147.
|
||||
* It is present only in Samuel 1 (C5A), Samuel 2 (C5B) stepping 0.
|
||||
* Version 2 of longhaul is the same as v1, but adds voltage scaling.
|
||||
* Present in Samuel 2 (steppings 1-7 only) (C5B), and Ezra (C5C)
|
||||
* voltage scaling support has currently been disabled in this driver
|
||||
* until we have code that gets it right.
|
||||
* Version 2 of longhaul is backward compatible with v1, but adds
|
||||
* LONGHAUL MSR for purpose of both frequency and voltage scaling.
|
||||
* Present in Samuel 2 (steppings 1-7 only) (C5B), and Ezra (C5C).
|
||||
* Version 3 of longhaul got renamed to Powersaver and redesigned
|
||||
* to use the POWERSAVER MSR at 0x110a.
|
||||
* to use only the POWERSAVER MSR at 0x110a.
|
||||
* It is present in Ezra-T (C5M), Nehemiah (C5X) and above.
|
||||
* It's pretty much the same feature wise to longhaul v2, though
|
||||
* there is provision for scaling FSB too, but this doesn't work
|
||||
|
@ -51,10 +50,12 @@
|
|||
#define CPU_EZRA 3
|
||||
#define CPU_EZRA_T 4
|
||||
#define CPU_NEHEMIAH 5
|
||||
#define CPU_NEHEMIAH_C 6
|
||||
|
||||
/* Flags */
|
||||
#define USE_ACPI_C3 (1 << 1)
|
||||
#define USE_NORTHBRIDGE (1 << 2)
|
||||
#define USE_VT8235 (1 << 3)
|
||||
|
||||
static int cpu_model;
|
||||
static unsigned int numscales=16;
|
||||
|
@ -63,7 +64,8 @@ static unsigned int fsb;
|
|||
static struct mV_pos *vrm_mV_table;
|
||||
static unsigned char *mV_vrm_table;
|
||||
struct f_msr {
|
||||
unsigned char vrm;
|
||||
u8 vrm;
|
||||
u8 pos;
|
||||
};
|
||||
static struct f_msr f_msr_table[32];
|
||||
|
||||
|
@ -73,10 +75,10 @@ static int can_scale_voltage;
|
|||
static struct acpi_processor *pr = NULL;
|
||||
static struct acpi_processor_cx *cx = NULL;
|
||||
static u8 longhaul_flags;
|
||||
static u8 longhaul_pos;
|
||||
|
||||
/* Module parameters */
|
||||
static int scale_voltage;
|
||||
static int ignore_latency;
|
||||
|
||||
#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "longhaul", msg)
|
||||
|
||||
|
@ -164,26 +166,47 @@ static void do_longhaul1(unsigned int clock_ratio_index)
|
|||
static void do_powersaver(int cx_address, unsigned int clock_ratio_index)
|
||||
{
|
||||
union msr_longhaul longhaul;
|
||||
u8 dest_pos;
|
||||
u32 t;
|
||||
|
||||
dest_pos = f_msr_table[clock_ratio_index].pos;
|
||||
|
||||
rdmsrl(MSR_VIA_LONGHAUL, longhaul.val);
|
||||
/* Setup new frequency */
|
||||
longhaul.bits.RevisionKey = longhaul.bits.RevisionID;
|
||||
longhaul.bits.SoftBusRatio = clock_ratio_index & 0xf;
|
||||
longhaul.bits.SoftBusRatio4 = (clock_ratio_index & 0x10) >> 4;
|
||||
longhaul.bits.EnableSoftBusRatio = 1;
|
||||
|
||||
if (can_scale_voltage) {
|
||||
/* Setup new voltage */
|
||||
if (can_scale_voltage)
|
||||
longhaul.bits.SoftVID = f_msr_table[clock_ratio_index].vrm;
|
||||
longhaul.bits.EnableSoftVID = 1;
|
||||
}
|
||||
|
||||
/* Sync to timer tick */
|
||||
safe_halt();
|
||||
/* Raise voltage if necessary */
|
||||
if (can_scale_voltage && longhaul_pos < dest_pos) {
|
||||
longhaul.bits.EnableSoftVID = 1;
|
||||
wrmsrl(MSR_VIA_LONGHAUL, longhaul.val);
|
||||
/* Change voltage */
|
||||
if (!cx_address) {
|
||||
ACPI_FLUSH_CPU_CACHE();
|
||||
halt();
|
||||
} else {
|
||||
ACPI_FLUSH_CPU_CACHE();
|
||||
/* Invoke C3 */
|
||||
inb(cx_address);
|
||||
/* Dummy op - must do something useless after P_LVL3
|
||||
* read */
|
||||
t = inl(acpi_gbl_FADT.xpm_timer_block.address);
|
||||
}
|
||||
longhaul.bits.EnableSoftVID = 0;
|
||||
wrmsrl(MSR_VIA_LONGHAUL, longhaul.val);
|
||||
longhaul_pos = dest_pos;
|
||||
}
|
||||
|
||||
/* Change frequency on next halt or sleep */
|
||||
longhaul.bits.EnableSoftBusRatio = 1;
|
||||
wrmsrl(MSR_VIA_LONGHAUL, longhaul.val);
|
||||
if (!cx_address) {
|
||||
ACPI_FLUSH_CPU_CACHE();
|
||||
/* Invoke C1 */
|
||||
halt();
|
||||
} else {
|
||||
ACPI_FLUSH_CPU_CACHE();
|
||||
|
@ -193,12 +216,29 @@ static void do_powersaver(int cx_address, unsigned int clock_ratio_index)
|
|||
t = inl(acpi_gbl_FADT.xpm_timer_block.address);
|
||||
}
|
||||
/* Disable bus ratio bit */
|
||||
local_irq_disable();
|
||||
longhaul.bits.RevisionKey = longhaul.bits.RevisionID;
|
||||
longhaul.bits.EnableSoftBusRatio = 0;
|
||||
longhaul.bits.EnableSoftBSEL = 0;
|
||||
longhaul.bits.EnableSoftVID = 0;
|
||||
wrmsrl(MSR_VIA_LONGHAUL, longhaul.val);
|
||||
|
||||
/* Reduce voltage if necessary */
|
||||
if (can_scale_voltage && longhaul_pos > dest_pos) {
|
||||
longhaul.bits.EnableSoftVID = 1;
|
||||
wrmsrl(MSR_VIA_LONGHAUL, longhaul.val);
|
||||
/* Change voltage */
|
||||
if (!cx_address) {
|
||||
ACPI_FLUSH_CPU_CACHE();
|
||||
halt();
|
||||
} else {
|
||||
ACPI_FLUSH_CPU_CACHE();
|
||||
/* Invoke C3 */
|
||||
inb(cx_address);
|
||||
/* Dummy op - must do something useless after P_LVL3
|
||||
* read */
|
||||
t = inl(acpi_gbl_FADT.xpm_timer_block.address);
|
||||
}
|
||||
longhaul.bits.EnableSoftVID = 0;
|
||||
wrmsrl(MSR_VIA_LONGHAUL, longhaul.val);
|
||||
longhaul_pos = dest_pos;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -257,26 +297,19 @@ static void longhaul_setstate(unsigned int clock_ratio_index)
|
|||
/*
|
||||
* Longhaul v1. (Samuel[C5A] and Samuel2 stepping 0[C5B])
|
||||
* Software controlled multipliers only.
|
||||
*
|
||||
* *NB* Until we get voltage scaling working v1 & v2 are the same code.
|
||||
* Longhaul v2 appears in Samuel2 Steppings 1->7 [C5b] and Ezra [C5C]
|
||||
*/
|
||||
case TYPE_LONGHAUL_V1:
|
||||
case TYPE_LONGHAUL_V2:
|
||||
do_longhaul1(clock_ratio_index);
|
||||
break;
|
||||
|
||||
/*
|
||||
* Longhaul v2 appears in Samuel2 Steppings 1->7 [C5B] and Ezra [C5C]
|
||||
*
|
||||
* Longhaul v3 (aka Powersaver). (Ezra-T [C5M] & Nehemiah [C5N])
|
||||
* We can scale voltage with this too, but that's currently
|
||||
* disabled until we come up with a decent 'match freq to voltage'
|
||||
* algorithm.
|
||||
* When we add voltage scaling, we will also need to do the
|
||||
* voltage/freq setting in order depending on the direction
|
||||
* of scaling (like we do in powernow-k7.c)
|
||||
* Nehemiah can do FSB scaling too, but this has never been proven
|
||||
* to work in practice.
|
||||
*/
|
||||
case TYPE_LONGHAUL_V2:
|
||||
case TYPE_POWERSAVER:
|
||||
if (longhaul_flags & USE_ACPI_C3) {
|
||||
/* Don't allow wakeup */
|
||||
|
@ -301,6 +334,7 @@ static void longhaul_setstate(unsigned int clock_ratio_index)
|
|||
local_irq_restore(flags);
|
||||
preempt_enable();
|
||||
|
||||
freqs.new = calc_speed(longhaul_get_cpu_mult());
|
||||
cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
|
||||
}
|
||||
|
||||
|
@ -315,31 +349,19 @@ static void longhaul_setstate(unsigned int clock_ratio_index)
|
|||
|
||||
#define ROUNDING 0xf
|
||||
|
||||
static int _guess(int guess, int mult)
|
||||
{
|
||||
int target;
|
||||
|
||||
target = ((mult/10)*guess);
|
||||
if (mult%10 != 0)
|
||||
target += (guess/2);
|
||||
target += ROUNDING/2;
|
||||
target &= ~ROUNDING;
|
||||
return target;
|
||||
}
|
||||
|
||||
|
||||
static int guess_fsb(int mult)
|
||||
{
|
||||
int speed = (cpu_khz/1000);
|
||||
int speed = cpu_khz / 1000;
|
||||
int i;
|
||||
int speeds[] = { 66, 100, 133, 200 };
|
||||
int speeds[] = { 666, 1000, 1333, 2000 };
|
||||
int f_max, f_min;
|
||||
|
||||
speed += ROUNDING/2;
|
||||
speed &= ~ROUNDING;
|
||||
|
||||
for (i=0; i<4; i++) {
|
||||
if (_guess(speeds[i], mult) == speed)
|
||||
return speeds[i];
|
||||
for (i = 0; i < 4; i++) {
|
||||
f_max = ((speeds[i] * mult) + 50) / 100;
|
||||
f_max += (ROUNDING / 2);
|
||||
f_min = f_max - ROUNDING;
|
||||
if ((speed <= f_max) && (speed >= f_min))
|
||||
return speeds[i] / 10;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -347,67 +369,40 @@ static int guess_fsb(int mult)
|
|||
|
||||
static int __init longhaul_get_ranges(void)
|
||||
{
|
||||
unsigned long invalue;
|
||||
unsigned int ezra_t_multipliers[32]= {
|
||||
90, 30, 40, 100, 55, 35, 45, 95,
|
||||
50, 70, 80, 60, 120, 75, 85, 65,
|
||||
-1, 110, 120, -1, 135, 115, 125, 105,
|
||||
130, 150, 160, 140, -1, 155, -1, 145 };
|
||||
unsigned int j, k = 0;
|
||||
union msr_longhaul longhaul;
|
||||
int mult = 0;
|
||||
int mult;
|
||||
|
||||
switch (longhaul_version) {
|
||||
case TYPE_LONGHAUL_V1:
|
||||
case TYPE_LONGHAUL_V2:
|
||||
/* Ugh, Longhaul v1 didn't have the min/max MSRs.
|
||||
Assume min=3.0x & max = whatever we booted at. */
|
||||
minmult = 30;
|
||||
maxmult = mult = longhaul_get_cpu_mult();
|
||||
break;
|
||||
|
||||
case TYPE_POWERSAVER:
|
||||
/* Ezra-T */
|
||||
if (cpu_model==CPU_EZRA_T) {
|
||||
minmult = 30;
|
||||
rdmsrl (MSR_VIA_LONGHAUL, longhaul.val);
|
||||
invalue = longhaul.bits.MaxMHzBR;
|
||||
if (longhaul.bits.MaxMHzBR4)
|
||||
invalue += 16;
|
||||
maxmult = mult = ezra_t_multipliers[invalue];
|
||||
break;
|
||||
}
|
||||
|
||||
/* Nehemiah */
|
||||
if (cpu_model==CPU_NEHEMIAH) {
|
||||
rdmsrl (MSR_VIA_LONGHAUL, longhaul.val);
|
||||
|
||||
/*
|
||||
* TODO: This code works, but raises a lot of questions.
|
||||
* - Some Nehemiah's seem to have broken Min/MaxMHzBR's.
|
||||
* We get around this by using a hardcoded multiplier of 4.0x
|
||||
* for the minimimum speed, and the speed we booted up at for the max.
|
||||
* This is done in longhaul_get_cpu_mult() by reading the EBLCR register.
|
||||
* - According to some VIA documentation EBLCR is only
|
||||
* in pre-Nehemiah C3s. How this still works is a mystery.
|
||||
* We're possibly using something undocumented and unsupported,
|
||||
* But it works, so we don't grumble.
|
||||
*/
|
||||
minmult=40;
|
||||
maxmult = mult = longhaul_get_cpu_mult();
|
||||
break;
|
||||
}
|
||||
/* Get current frequency */
|
||||
mult = longhaul_get_cpu_mult();
|
||||
if (mult == -1) {
|
||||
printk(KERN_INFO PFX "Invalid (reserved) multiplier!\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
fsb = guess_fsb(mult);
|
||||
if (fsb == 0) {
|
||||
printk(KERN_INFO PFX "Invalid (reserved) FSB!\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
/* Get max multiplier - as we always did.
|
||||
* Longhaul MSR is usefull only when voltage scaling is enabled.
|
||||
* C3 is booting at max anyway. */
|
||||
maxmult = mult;
|
||||
/* Get min multiplier */
|
||||
switch (cpu_model) {
|
||||
case CPU_NEHEMIAH:
|
||||
minmult = 50;
|
||||
break;
|
||||
case CPU_NEHEMIAH_C:
|
||||
minmult = 40;
|
||||
break;
|
||||
default:
|
||||
minmult = 30;
|
||||
break;
|
||||
}
|
||||
|
||||
dprintk ("MinMult:%d.%dx MaxMult:%d.%dx\n",
|
||||
minmult/10, minmult%10, maxmult/10, maxmult%10);
|
||||
|
||||
if (fsb == 0) {
|
||||
printk (KERN_INFO PFX "Invalid (reserved) FSB!\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
highest_speed = calc_speed(maxmult);
|
||||
lowest_speed = calc_speed(minmult);
|
||||
dprintk ("FSB:%dMHz Lowest speed: %s Highest speed:%s\n", fsb,
|
||||
|
@ -455,6 +450,7 @@ static void __init longhaul_setup_voltagescaling(void)
|
|||
union msr_longhaul longhaul;
|
||||
struct mV_pos minvid, maxvid;
|
||||
unsigned int j, speed, pos, kHz_step, numvscales;
|
||||
int min_vid_speed;
|
||||
|
||||
rdmsrl(MSR_VIA_LONGHAUL, longhaul.val);
|
||||
if (!(longhaul.bits.RevisionID & 1)) {
|
||||
|
@ -468,14 +464,14 @@ static void __init longhaul_setup_voltagescaling(void)
|
|||
mV_vrm_table = &mV_vrm85[0];
|
||||
} else {
|
||||
printk (KERN_INFO PFX "Mobile VRM\n");
|
||||
if (cpu_model < CPU_NEHEMIAH)
|
||||
return;
|
||||
vrm_mV_table = &mobilevrm_mV[0];
|
||||
mV_vrm_table = &mV_mobilevrm[0];
|
||||
}
|
||||
|
||||
minvid = vrm_mV_table[longhaul.bits.MinimumVID];
|
||||
maxvid = vrm_mV_table[longhaul.bits.MaximumVID];
|
||||
numvscales = maxvid.pos - minvid.pos + 1;
|
||||
kHz_step = (highest_speed - lowest_speed) / numvscales;
|
||||
|
||||
if (minvid.mV == 0 || maxvid.mV == 0 || minvid.mV > maxvid.mV) {
|
||||
printk (KERN_INFO PFX "Bogus values Min:%d.%03d Max:%d.%03d. "
|
||||
|
@ -491,20 +487,59 @@ static void __init longhaul_setup_voltagescaling(void)
|
|||
return;
|
||||
}
|
||||
|
||||
printk(KERN_INFO PFX "Max VID=%d.%03d Min VID=%d.%03d, %d possible voltage scales\n",
|
||||
/* How many voltage steps */
|
||||
numvscales = maxvid.pos - minvid.pos + 1;
|
||||
printk(KERN_INFO PFX
|
||||
"Max VID=%d.%03d "
|
||||
"Min VID=%d.%03d, "
|
||||
"%d possible voltage scales\n",
|
||||
maxvid.mV/1000, maxvid.mV%1000,
|
||||
minvid.mV/1000, minvid.mV%1000,
|
||||
numvscales);
|
||||
|
||||
/* Calculate max frequency at min voltage */
|
||||
j = longhaul.bits.MinMHzBR;
|
||||
if (longhaul.bits.MinMHzBR4)
|
||||
j += 16;
|
||||
min_vid_speed = eblcr_table[j];
|
||||
if (min_vid_speed == -1)
|
||||
return;
|
||||
switch (longhaul.bits.MinMHzFSB) {
|
||||
case 0:
|
||||
min_vid_speed *= 13333;
|
||||
break;
|
||||
case 1:
|
||||
min_vid_speed *= 10000;
|
||||
break;
|
||||
case 3:
|
||||
min_vid_speed *= 6666;
|
||||
break;
|
||||
default:
|
||||
return;
|
||||
break;
|
||||
}
|
||||
if (min_vid_speed >= highest_speed)
|
||||
return;
|
||||
/* Calculate kHz for one voltage step */
|
||||
kHz_step = (highest_speed - min_vid_speed) / numvscales;
|
||||
|
||||
|
||||
j = 0;
|
||||
while (longhaul_table[j].frequency != CPUFREQ_TABLE_END) {
|
||||
speed = longhaul_table[j].frequency;
|
||||
pos = (speed - lowest_speed) / kHz_step + minvid.pos;
|
||||
if (speed > min_vid_speed)
|
||||
pos = (speed - min_vid_speed) / kHz_step + minvid.pos;
|
||||
else
|
||||
pos = minvid.pos;
|
||||
f_msr_table[longhaul_table[j].index].vrm = mV_vrm_table[pos];
|
||||
f_msr_table[longhaul_table[j].index].pos = pos;
|
||||
j++;
|
||||
}
|
||||
|
||||
longhaul_pos = maxvid.pos;
|
||||
can_scale_voltage = 1;
|
||||
printk(KERN_INFO PFX "Voltage scaling enabled. "
|
||||
"Use of \"conservative\" governor is highly recommended.\n");
|
||||
}
|
||||
|
||||
|
||||
|
@ -573,20 +608,51 @@ static int enable_arbiter_disable(void)
|
|||
if (dev != NULL) {
|
||||
/* Enable access to port 0x22 */
|
||||
pci_read_config_byte(dev, reg, &pci_cmd);
|
||||
if ( !(pci_cmd & 1<<7) ) {
|
||||
if (!(pci_cmd & 1<<7)) {
|
||||
pci_cmd |= 1<<7;
|
||||
pci_write_config_byte(dev, reg, pci_cmd);
|
||||
pci_read_config_byte(dev, reg, &pci_cmd);
|
||||
if (!(pci_cmd & 1<<7)) {
|
||||
printk(KERN_ERR PFX
|
||||
"Can't enable access to port 0x22.\n");
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int longhaul_setup_vt8235(void)
|
||||
{
|
||||
struct pci_dev *dev;
|
||||
u8 pci_cmd;
|
||||
|
||||
/* Find VT8235 southbridge */
|
||||
dev = pci_find_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235, NULL);
|
||||
if (dev != NULL) {
|
||||
/* Set transition time to max */
|
||||
pci_read_config_byte(dev, 0xec, &pci_cmd);
|
||||
pci_cmd &= ~(1 << 2);
|
||||
pci_write_config_byte(dev, 0xec, pci_cmd);
|
||||
pci_read_config_byte(dev, 0xe4, &pci_cmd);
|
||||
pci_cmd &= ~(1 << 7);
|
||||
pci_write_config_byte(dev, 0xe4, pci_cmd);
|
||||
pci_read_config_byte(dev, 0xe5, &pci_cmd);
|
||||
pci_cmd |= 1 << 7;
|
||||
pci_write_config_byte(dev, 0xe5, pci_cmd);
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __init longhaul_cpu_init(struct cpufreq_policy *policy)
|
||||
{
|
||||
struct cpuinfo_x86 *c = cpu_data;
|
||||
char *cpuname=NULL;
|
||||
int ret;
|
||||
u32 lo, hi;
|
||||
int vt8235_present;
|
||||
|
||||
/* Check what we have on this motherboard */
|
||||
switch (c->x86_model) {
|
||||
|
@ -599,16 +665,20 @@ static int __init longhaul_cpu_init(struct cpufreq_policy *policy)
|
|||
break;
|
||||
|
||||
case 7:
|
||||
longhaul_version = TYPE_LONGHAUL_V1;
|
||||
switch (c->x86_mask) {
|
||||
case 0:
|
||||
longhaul_version = TYPE_LONGHAUL_V1;
|
||||
cpu_model = CPU_SAMUEL2;
|
||||
cpuname = "C3 'Samuel 2' [C5B]";
|
||||
/* Note, this is not a typo, early Samuel2's had Samuel1 ratios. */
|
||||
memcpy (clock_ratio, samuel1_clock_ratio, sizeof(samuel1_clock_ratio));
|
||||
memcpy (eblcr_table, samuel2_eblcr, sizeof(samuel2_eblcr));
|
||||
/* Note, this is not a typo, early Samuel2's had
|
||||
* Samuel1 ratios. */
|
||||
memcpy(clock_ratio, samuel1_clock_ratio,
|
||||
sizeof(samuel1_clock_ratio));
|
||||
memcpy(eblcr_table, samuel2_eblcr,
|
||||
sizeof(samuel2_eblcr));
|
||||
break;
|
||||
case 1 ... 15:
|
||||
longhaul_version = TYPE_LONGHAUL_V2;
|
||||
if (c->x86_mask < 8) {
|
||||
cpu_model = CPU_SAMUEL2;
|
||||
cpuname = "C3 'Samuel 2' [C5B]";
|
||||
|
@ -616,8 +686,10 @@ static int __init longhaul_cpu_init(struct cpufreq_policy *policy)
|
|||
cpu_model = CPU_EZRA;
|
||||
cpuname = "C3 'Ezra' [C5C]";
|
||||
}
|
||||
memcpy (clock_ratio, ezra_clock_ratio, sizeof(ezra_clock_ratio));
|
||||
memcpy (eblcr_table, ezra_eblcr, sizeof(ezra_eblcr));
|
||||
memcpy(clock_ratio, ezra_clock_ratio,
|
||||
sizeof(ezra_clock_ratio));
|
||||
memcpy(eblcr_table, ezra_eblcr,
|
||||
sizeof(ezra_eblcr));
|
||||
break;
|
||||
}
|
||||
break;
|
||||
|
@ -632,24 +704,24 @@ static int __init longhaul_cpu_init(struct cpufreq_policy *policy)
|
|||
break;
|
||||
|
||||
case 9:
|
||||
cpu_model = CPU_NEHEMIAH;
|
||||
longhaul_version = TYPE_POWERSAVER;
|
||||
numscales=32;
|
||||
numscales = 32;
|
||||
memcpy(clock_ratio,
|
||||
nehemiah_clock_ratio,
|
||||
sizeof(nehemiah_clock_ratio));
|
||||
memcpy(eblcr_table, nehemiah_eblcr, sizeof(nehemiah_eblcr));
|
||||
switch (c->x86_mask) {
|
||||
case 0 ... 1:
|
||||
cpuname = "C3 'Nehemiah A' [C5N]";
|
||||
memcpy (clock_ratio, nehemiah_a_clock_ratio, sizeof(nehemiah_a_clock_ratio));
|
||||
memcpy (eblcr_table, nehemiah_a_eblcr, sizeof(nehemiah_a_eblcr));
|
||||
cpu_model = CPU_NEHEMIAH;
|
||||
cpuname = "C3 'Nehemiah A' [C5XLOE]";
|
||||
break;
|
||||
case 2 ... 4:
|
||||
cpuname = "C3 'Nehemiah B' [C5N]";
|
||||
memcpy (clock_ratio, nehemiah_b_clock_ratio, sizeof(nehemiah_b_clock_ratio));
|
||||
memcpy (eblcr_table, nehemiah_b_eblcr, sizeof(nehemiah_b_eblcr));
|
||||
cpu_model = CPU_NEHEMIAH;
|
||||
cpuname = "C3 'Nehemiah B' [C5XLOH]";
|
||||
break;
|
||||
case 5 ... 15:
|
||||
cpuname = "C3 'Nehemiah C' [C5N]";
|
||||
memcpy (clock_ratio, nehemiah_c_clock_ratio, sizeof(nehemiah_c_clock_ratio));
|
||||
memcpy (eblcr_table, nehemiah_c_eblcr, sizeof(nehemiah_c_eblcr));
|
||||
cpu_model = CPU_NEHEMIAH_C;
|
||||
cpuname = "C3 'Nehemiah C' [C5P]";
|
||||
break;
|
||||
}
|
||||
break;
|
||||
|
@ -658,6 +730,13 @@ static int __init longhaul_cpu_init(struct cpufreq_policy *policy)
|
|||
cpuname = "Unknown";
|
||||
break;
|
||||
}
|
||||
/* Check Longhaul ver. 2 */
|
||||
if (longhaul_version == TYPE_LONGHAUL_V2) {
|
||||
rdmsr(MSR_VIA_LONGHAUL, lo, hi);
|
||||
if (lo == 0 && hi == 0)
|
||||
/* Looks like MSR isn't present */
|
||||
longhaul_version = TYPE_LONGHAUL_V1;
|
||||
}
|
||||
|
||||
printk (KERN_INFO PFX "VIA %s CPU detected. ", cpuname);
|
||||
switch (longhaul_version) {
|
||||
|
@ -670,15 +749,18 @@ static int __init longhaul_cpu_init(struct cpufreq_policy *policy)
|
|||
break;
|
||||
};
|
||||
|
||||
/* Doesn't hurt */
|
||||
vt8235_present = longhaul_setup_vt8235();
|
||||
|
||||
/* Find ACPI data for processor */
|
||||
acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT, ACPI_UINT32_MAX,
|
||||
&longhaul_walk_callback, NULL, (void *)&pr);
|
||||
acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT,
|
||||
ACPI_UINT32_MAX, &longhaul_walk_callback,
|
||||
NULL, (void *)&pr);
|
||||
|
||||
/* Check ACPI support for C3 state */
|
||||
if ((pr != NULL) && (longhaul_version == TYPE_POWERSAVER)) {
|
||||
if (pr != NULL && longhaul_version != TYPE_LONGHAUL_V1) {
|
||||
cx = &pr->power.states[ACPI_STATE_C3];
|
||||
if (cx->address > 0 &&
|
||||
(cx->latency <= 1000 || ignore_latency != 0) ) {
|
||||
if (cx->address > 0 && cx->latency <= 1000) {
|
||||
longhaul_flags |= USE_ACPI_C3;
|
||||
goto print_support_type;
|
||||
}
|
||||
|
@ -688,8 +770,11 @@ static int __init longhaul_cpu_init(struct cpufreq_policy *policy)
|
|||
longhaul_flags |= USE_NORTHBRIDGE;
|
||||
goto print_support_type;
|
||||
}
|
||||
|
||||
/* No ACPI C3 or we can't use it */
|
||||
/* Use VT8235 southbridge if present */
|
||||
if (longhaul_version == TYPE_POWERSAVER && vt8235_present) {
|
||||
longhaul_flags |= USE_VT8235;
|
||||
goto print_support_type;
|
||||
}
|
||||
/* Check ACPI support for bus master arbiter disable */
|
||||
if ((pr == NULL) || !(pr->flags.bm_control)) {
|
||||
printk(KERN_ERR PFX
|
||||
|
@ -698,18 +783,18 @@ static int __init longhaul_cpu_init(struct cpufreq_policy *policy)
|
|||
}
|
||||
|
||||
print_support_type:
|
||||
if (!(longhaul_flags & USE_NORTHBRIDGE)) {
|
||||
printk (KERN_INFO PFX "Using ACPI support.\n");
|
||||
} else {
|
||||
if (longhaul_flags & USE_NORTHBRIDGE)
|
||||
printk (KERN_INFO PFX "Using northbridge support.\n");
|
||||
}
|
||||
else if (longhaul_flags & USE_VT8235)
|
||||
printk (KERN_INFO PFX "Using VT8235 support.\n");
|
||||
else
|
||||
printk (KERN_INFO PFX "Using ACPI support.\n");
|
||||
|
||||
ret = longhaul_get_ranges();
|
||||
if (ret != 0)
|
||||
return ret;
|
||||
|
||||
if ((longhaul_version==TYPE_LONGHAUL_V2 || longhaul_version==TYPE_POWERSAVER) &&
|
||||
(scale_voltage != 0))
|
||||
if ((longhaul_version != TYPE_LONGHAUL_V1) && (scale_voltage != 0))
|
||||
longhaul_setup_voltagescaling();
|
||||
|
||||
policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
|
||||
|
@ -797,8 +882,6 @@ static void __exit longhaul_exit(void)
|
|||
|
||||
module_param (scale_voltage, int, 0644);
|
||||
MODULE_PARM_DESC(scale_voltage, "Scale voltage of processor");
|
||||
module_param(ignore_latency, int, 0644);
|
||||
MODULE_PARM_DESC(ignore_latency, "Skip ACPI C3 latency test");
|
||||
|
||||
MODULE_AUTHOR ("Dave Jones <davej@codemonkey.org.uk>");
|
||||
MODULE_DESCRIPTION ("Longhaul driver for VIA Cyrix processors.");
|
||||
|
|
|
@ -235,49 +235,14 @@ static int __initdata ezrat_eblcr[32] = {
|
|||
/*
|
||||
* VIA C3 Nehemiah */
|
||||
|
||||
static int __initdata nehemiah_a_clock_ratio[32] = {
|
||||
static int __initdata nehemiah_clock_ratio[32] = {
|
||||
100, /* 0000 -> 10.0x */
|
||||
160, /* 0001 -> 16.0x */
|
||||
-1, /* 0010 -> RESERVED */
|
||||
40, /* 0010 -> 4.0x */
|
||||
90, /* 0011 -> 9.0x */
|
||||
95, /* 0100 -> 9.5x */
|
||||
-1, /* 0101 -> RESERVED */
|
||||
-1, /* 0110 -> RESERVED */
|
||||
55, /* 0111 -> 5.5x */
|
||||
60, /* 1000 -> 6.0x */
|
||||
70, /* 1001 -> 7.0x */
|
||||
80, /* 1010 -> 8.0x */
|
||||
50, /* 1011 -> 5.0x */
|
||||
65, /* 1100 -> 6.5x */
|
||||
75, /* 1101 -> 7.5x */
|
||||
85, /* 1110 -> 8.5x */
|
||||
120, /* 1111 -> 12.0x */
|
||||
100, /* 0000 -> 10.0x */
|
||||
-1, /* 0001 -> RESERVED */
|
||||
120, /* 0010 -> 12.0x */
|
||||
90, /* 0011 -> 9.0x */
|
||||
105, /* 0100 -> 10.5x */
|
||||
115, /* 0101 -> 11.5x */
|
||||
125, /* 0110 -> 12.5x */
|
||||
135, /* 0111 -> 13.5x */
|
||||
140, /* 1000 -> 14.0x */
|
||||
150, /* 1001 -> 15.0x */
|
||||
160, /* 1010 -> 16.0x */
|
||||
130, /* 1011 -> 13.0x */
|
||||
145, /* 1100 -> 14.5x */
|
||||
155, /* 1101 -> 15.5x */
|
||||
-1, /* 1110 -> RESERVED (13.0x) */
|
||||
120, /* 1111 -> 12.0x */
|
||||
};
|
||||
|
||||
static int __initdata nehemiah_b_clock_ratio[32] = {
|
||||
100, /* 0000 -> 10.0x */
|
||||
160, /* 0001 -> 16.0x */
|
||||
-1, /* 0010 -> RESERVED */
|
||||
90, /* 0011 -> 9.0x */
|
||||
95, /* 0100 -> 9.5x */
|
||||
-1, /* 0101 -> RESERVED */
|
||||
-1, /* 0110 -> RESERVED */
|
||||
45, /* 0110 -> 4.5x */
|
||||
55, /* 0111 -> 5.5x */
|
||||
60, /* 1000 -> 6.0x */
|
||||
70, /* 1001 -> 7.0x */
|
||||
|
@ -305,84 +270,14 @@ static int __initdata nehemiah_b_clock_ratio[32] = {
|
|||
120, /* 1111 -> 12.0x */
|
||||
};
|
||||
|
||||
static int __initdata nehemiah_c_clock_ratio[32] = {
|
||||
100, /* 0000 -> 10.0x */
|
||||
160, /* 0001 -> 16.0x */
|
||||
40, /* 0010 -> RESERVED */
|
||||
90, /* 0011 -> 9.0x */
|
||||
95, /* 0100 -> 9.5x */
|
||||
-1, /* 0101 -> RESERVED */
|
||||
45, /* 0110 -> RESERVED */
|
||||
55, /* 0111 -> 5.5x */
|
||||
60, /* 1000 -> 6.0x */
|
||||
70, /* 1001 -> 7.0x */
|
||||
80, /* 1010 -> 8.0x */
|
||||
50, /* 1011 -> 5.0x */
|
||||
65, /* 1100 -> 6.5x */
|
||||
75, /* 1101 -> 7.5x */
|
||||
85, /* 1110 -> 8.5x */
|
||||
120, /* 1111 -> 12.0x */
|
||||
100, /* 0000 -> 10.0x */
|
||||
110, /* 0001 -> 11.0x */
|
||||
120, /* 0010 -> 12.0x */
|
||||
90, /* 0011 -> 9.0x */
|
||||
105, /* 0100 -> 10.5x */
|
||||
115, /* 0101 -> 11.5x */
|
||||
125, /* 0110 -> 12.5x */
|
||||
135, /* 0111 -> 13.5x */
|
||||
140, /* 1000 -> 14.0x */
|
||||
150, /* 1001 -> 15.0x */
|
||||
160, /* 1010 -> 16.0x */
|
||||
130, /* 1011 -> 13.0x */
|
||||
145, /* 1100 -> 14.5x */
|
||||
155, /* 1101 -> 15.5x */
|
||||
-1, /* 1110 -> RESERVED (13.0x) */
|
||||
120, /* 1111 -> 12.0x */
|
||||
};
|
||||
|
||||
static int __initdata nehemiah_a_eblcr[32] = {
|
||||
static int __initdata nehemiah_eblcr[32] = {
|
||||
50, /* 0000 -> 5.0x */
|
||||
160, /* 0001 -> 16.0x */
|
||||
-1, /* 0010 -> RESERVED */
|
||||
40, /* 0010 -> 4.0x */
|
||||
100, /* 0011 -> 10.0x */
|
||||
55, /* 0100 -> 5.5x */
|
||||
-1, /* 0101 -> RESERVED */
|
||||
-1, /* 0110 -> RESERVED */
|
||||
95, /* 0111 -> 9.5x */
|
||||
90, /* 1000 -> 9.0x */
|
||||
70, /* 1001 -> 7.0x */
|
||||
80, /* 1010 -> 8.0x */
|
||||
60, /* 1011 -> 6.0x */
|
||||
120, /* 1100 -> 12.0x */
|
||||
75, /* 1101 -> 7.5x */
|
||||
85, /* 1110 -> 8.5x */
|
||||
65, /* 1111 -> 6.5x */
|
||||
90, /* 0000 -> 9.0x */
|
||||
-1, /* 0001 -> RESERVED */
|
||||
120, /* 0010 -> 12.0x */
|
||||
100, /* 0011 -> 10.0x */
|
||||
135, /* 0100 -> 13.5x */
|
||||
115, /* 0101 -> 11.5x */
|
||||
125, /* 0110 -> 12.5x */
|
||||
105, /* 0111 -> 10.5x */
|
||||
130, /* 1000 -> 13.0x */
|
||||
150, /* 1001 -> 15.0x */
|
||||
160, /* 1010 -> 16.0x */
|
||||
140, /* 1011 -> 14.0x */
|
||||
120, /* 1100 -> 12.0x */
|
||||
155, /* 1101 -> 15.5x */
|
||||
-1, /* 1110 -> RESERVED (13.0x) */
|
||||
145 /* 1111 -> 14.5x */
|
||||
/* end of table */
|
||||
};
|
||||
static int __initdata nehemiah_b_eblcr[32] = {
|
||||
50, /* 0000 -> 5.0x */
|
||||
160, /* 0001 -> 16.0x */
|
||||
-1, /* 0010 -> RESERVED */
|
||||
100, /* 0011 -> 10.0x */
|
||||
55, /* 0100 -> 5.5x */
|
||||
-1, /* 0101 -> RESERVED */
|
||||
-1, /* 0110 -> RESERVED */
|
||||
45, /* 0110 -> 4.5x */
|
||||
95, /* 0111 -> 9.5x */
|
||||
90, /* 1000 -> 9.0x */
|
||||
70, /* 1001 -> 7.0x */
|
||||
|
@ -408,42 +303,6 @@ static int __initdata nehemiah_b_eblcr[32] = {
|
|||
155, /* 1101 -> 15.5x */
|
||||
-1, /* 1110 -> RESERVED (13.0x) */
|
||||
145 /* 1111 -> 14.5x */
|
||||
/* end of table */
|
||||
};
|
||||
static int __initdata nehemiah_c_eblcr[32] = {
|
||||
50, /* 0000 -> 5.0x */
|
||||
160, /* 0001 -> 16.0x */
|
||||
40, /* 0010 -> RESERVED */
|
||||
100, /* 0011 -> 10.0x */
|
||||
55, /* 0100 -> 5.5x */
|
||||
-1, /* 0101 -> RESERVED */
|
||||
45, /* 0110 -> RESERVED */
|
||||
95, /* 0111 -> 9.5x */
|
||||
90, /* 1000 -> 9.0x */
|
||||
70, /* 1001 -> 7.0x */
|
||||
80, /* 1010 -> 8.0x */
|
||||
60, /* 1011 -> 6.0x */
|
||||
120, /* 1100 -> 12.0x */
|
||||
75, /* 1101 -> 7.5x */
|
||||
85, /* 1110 -> 8.5x */
|
||||
65, /* 1111 -> 6.5x */
|
||||
90, /* 0000 -> 9.0x */
|
||||
110, /* 0001 -> 11.0x */
|
||||
120, /* 0010 -> 12.0x */
|
||||
100, /* 0011 -> 10.0x */
|
||||
135, /* 0100 -> 13.5x */
|
||||
115, /* 0101 -> 11.5x */
|
||||
125, /* 0110 -> 12.5x */
|
||||
105, /* 0111 -> 10.5x */
|
||||
130, /* 1000 -> 13.0x */
|
||||
150, /* 1001 -> 15.0x */
|
||||
160, /* 1010 -> 16.0x */
|
||||
140, /* 1011 -> 14.0x */
|
||||
120, /* 1100 -> 12.0x */
|
||||
155, /* 1101 -> 15.5x */
|
||||
-1, /* 1110 -> RESERVED (13.0x) */
|
||||
145 /* 1111 -> 14.5x */
|
||||
/* end of table */
|
||||
};
|
||||
|
||||
/*
|
||||
|
|
|
@ -1289,7 +1289,11 @@ static unsigned int powernowk8_get (unsigned int cpu)
|
|||
if (query_current_values_with_pending_wait(data))
|
||||
goto out;
|
||||
|
||||
khz = find_khz_freq_from_fid(data->currfid);
|
||||
if (cpu_family == CPU_HW_PSTATE)
|
||||
khz = find_khz_freq_from_fiddid(data->currfid, data->currdid);
|
||||
else
|
||||
khz = find_khz_freq_from_fid(data->currfid);
|
||||
|
||||
|
||||
out:
|
||||
set_cpus_allowed(current, oldmask);
|
||||
|
|
|
@ -6,6 +6,7 @@
|
|||
#include <asm/io.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/timer.h>
|
||||
#include <asm/pci-direct.h>
|
||||
|
||||
#include "cpu.h"
|
||||
|
||||
|
@ -161,19 +162,19 @@ static void __cpuinit set_cx86_inc(void)
|
|||
static void __cpuinit geode_configure(void)
|
||||
{
|
||||
unsigned long flags;
|
||||
u8 ccr3, ccr4;
|
||||
u8 ccr3;
|
||||
local_irq_save(flags);
|
||||
|
||||
/* Suspend on halt power saving and enable #SUSP pin */
|
||||
setCx86(CX86_CCR2, getCx86(CX86_CCR2) | 0x88);
|
||||
|
||||
ccr3 = getCx86(CX86_CCR3);
|
||||
setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* Enable */
|
||||
setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */
|
||||
|
||||
ccr4 = getCx86(CX86_CCR4);
|
||||
ccr4 |= 0x38; /* FPU fast, DTE cache, Mem bypass */
|
||||
|
||||
setCx86(CX86_CCR3, ccr3);
|
||||
|
||||
/* FPU fast, DTE cache, Mem bypass */
|
||||
setCx86(CX86_CCR4, getCx86(CX86_CCR4) | 0x38);
|
||||
setCx86(CX86_CCR3, ccr3); /* disable MAPEN */
|
||||
|
||||
set_cx86_memwb();
|
||||
set_cx86_reorder();
|
||||
|
@ -183,14 +184,6 @@ static void __cpuinit geode_configure(void)
|
|||
}
|
||||
|
||||
|
||||
#ifdef CONFIG_PCI
|
||||
static struct pci_device_id __cpuinitdata cyrix_55x0[] = {
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5510) },
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5520) },
|
||||
{ },
|
||||
};
|
||||
#endif
|
||||
|
||||
static void __cpuinit init_cyrix(struct cpuinfo_x86 *c)
|
||||
{
|
||||
unsigned char dir0, dir0_msn, dir0_lsn, dir1 = 0;
|
||||
|
@ -258,6 +251,8 @@ static void __cpuinit init_cyrix(struct cpuinfo_x86 *c)
|
|||
|
||||
case 4: /* MediaGX/GXm or Geode GXM/GXLV/GX1 */
|
||||
#ifdef CONFIG_PCI
|
||||
{
|
||||
u32 vendor, device;
|
||||
/* It isn't really a PCI quirk directly, but the cure is the
|
||||
same. The MediaGX has deep magic SMM stuff that handles the
|
||||
SB emulation. It thows away the fifo on disable_dma() which
|
||||
|
@ -273,22 +268,34 @@ static void __cpuinit init_cyrix(struct cpuinfo_x86 *c)
|
|||
printk(KERN_INFO "Working around Cyrix MediaGX virtual DMA bugs.\n");
|
||||
isa_dma_bridge_buggy = 2;
|
||||
|
||||
/* We do this before the PCI layer is running. However we
|
||||
are safe here as we know the bridge must be a Cyrix
|
||||
companion and must be present */
|
||||
vendor = read_pci_config_16(0, 0, 0x12, PCI_VENDOR_ID);
|
||||
device = read_pci_config_16(0, 0, 0x12, PCI_DEVICE_ID);
|
||||
|
||||
/*
|
||||
* The 5510/5520 companion chips have a funky PIT.
|
||||
*/
|
||||
if (pci_dev_present(cyrix_55x0))
|
||||
if (vendor == PCI_VENDOR_ID_CYRIX &&
|
||||
(device == PCI_DEVICE_ID_CYRIX_5510 || device == PCI_DEVICE_ID_CYRIX_5520))
|
||||
pit_latch_buggy = 1;
|
||||
}
|
||||
#endif
|
||||
c->x86_cache_size=16; /* Yep 16K integrated cache thats it */
|
||||
|
||||
/* GXm supports extended cpuid levels 'ala' AMD */
|
||||
if (c->cpuid_level == 2) {
|
||||
/* Enable cxMMX extensions (GX1 Datasheet 54) */
|
||||
setCx86(CX86_CCR7, getCx86(CX86_CCR7)|1);
|
||||
setCx86(CX86_CCR7, getCx86(CX86_CCR7) | 1);
|
||||
|
||||
/* GXlv/GXm/GX1 */
|
||||
if((dir1 >= 0x50 && dir1 <= 0x54) || dir1 >= 0x63)
|
||||
/*
|
||||
* GXm : 0x30 ... 0x5f GXm datasheet 51
|
||||
* GXlv: 0x6x GXlv datasheet 54
|
||||
* ? : 0x7x
|
||||
* GX1 : 0x8x GX1 datasheet 56
|
||||
*/
|
||||
if((0x30 <= dir1 && dir1 <= 0x6f) || (0x80 <=dir1 && dir1 <= 0x8f))
|
||||
geode_configure();
|
||||
get_model_name(c); /* get CPU marketing name */
|
||||
return;
|
||||
|
@ -415,15 +422,14 @@ static void __cpuinit cyrix_identify(struct cpuinfo_x86 * c)
|
|||
|
||||
if (dir0 == 5 || dir0 == 3)
|
||||
{
|
||||
unsigned char ccr3, ccr4;
|
||||
unsigned char ccr3;
|
||||
unsigned long flags;
|
||||
printk(KERN_INFO "Enabling CPUID on Cyrix processor.\n");
|
||||
local_irq_save(flags);
|
||||
ccr3 = getCx86(CX86_CCR3);
|
||||
setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */
|
||||
ccr4 = getCx86(CX86_CCR4);
|
||||
setCx86(CX86_CCR4, ccr4 | 0x80); /* enable cpuid */
|
||||
setCx86(CX86_CCR3, ccr3); /* disable MAPEN */
|
||||
setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */
|
||||
setCx86(CX86_CCR4, getCx86(CX86_CCR4) | 0x80); /* enable cpuid */
|
||||
setCx86(CX86_CCR3, ccr3); /* disable MAPEN */
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -12,6 +12,7 @@
|
|||
|
||||
#include <asm/processor.h>
|
||||
#include <asm/system.h>
|
||||
#include <asm/mce.h>
|
||||
|
||||
#include "mce.h"
|
||||
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
#include <linux/init.h>
|
||||
#include <asm/mce.h>
|
||||
|
||||
void amd_mcheck_init(struct cpuinfo_x86 *c);
|
||||
void intel_p4_mcheck_init(struct cpuinfo_x86 *c);
|
||||
|
@ -9,6 +10,5 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c);
|
|||
/* Call the installed machine check handler for this CPU setup. */
|
||||
extern fastcall void (*machine_check_vector)(struct pt_regs *, long error_code);
|
||||
|
||||
extern int mce_disabled;
|
||||
extern int nr_mce_banks;
|
||||
|
||||
|
|
|
@ -12,6 +12,7 @@
|
|||
#include <asm/system.h>
|
||||
#include <asm/msr.h>
|
||||
#include <asm/apic.h>
|
||||
#include <asm/idle.h>
|
||||
|
||||
#include <asm/therm_throt.h>
|
||||
|
||||
|
@ -59,6 +60,7 @@ static void (*vendor_thermal_interrupt)(struct pt_regs *regs) = unexpected_therm
|
|||
|
||||
fastcall void smp_thermal_interrupt(struct pt_regs *regs)
|
||||
{
|
||||
exit_idle();
|
||||
irq_enter();
|
||||
vendor_thermal_interrupt(regs);
|
||||
irq_exit();
|
||||
|
|
|
@ -211,6 +211,9 @@ mtrr_ioctl(struct file *file, unsigned int cmd, unsigned long __arg)
|
|||
default:
|
||||
return -ENOTTY;
|
||||
case MTRRIOC_ADD_ENTRY:
|
||||
#ifdef CONFIG_COMPAT
|
||||
case MTRRIOC32_ADD_ENTRY:
|
||||
#endif
|
||||
if (!capable(CAP_SYS_ADMIN))
|
||||
return -EPERM;
|
||||
err =
|
||||
|
@ -218,21 +221,33 @@ mtrr_ioctl(struct file *file, unsigned int cmd, unsigned long __arg)
|
|||
file, 0);
|
||||
break;
|
||||
case MTRRIOC_SET_ENTRY:
|
||||
#ifdef CONFIG_COMPAT
|
||||
case MTRRIOC32_SET_ENTRY:
|
||||
#endif
|
||||
if (!capable(CAP_SYS_ADMIN))
|
||||
return -EPERM;
|
||||
err = mtrr_add(sentry.base, sentry.size, sentry.type, 0);
|
||||
break;
|
||||
case MTRRIOC_DEL_ENTRY:
|
||||
#ifdef CONFIG_COMPAT
|
||||
case MTRRIOC32_DEL_ENTRY:
|
||||
#endif
|
||||
if (!capable(CAP_SYS_ADMIN))
|
||||
return -EPERM;
|
||||
err = mtrr_file_del(sentry.base, sentry.size, file, 0);
|
||||
break;
|
||||
case MTRRIOC_KILL_ENTRY:
|
||||
#ifdef CONFIG_COMPAT
|
||||
case MTRRIOC32_KILL_ENTRY:
|
||||
#endif
|
||||
if (!capable(CAP_SYS_ADMIN))
|
||||
return -EPERM;
|
||||
err = mtrr_del(-1, sentry.base, sentry.size);
|
||||
break;
|
||||
case MTRRIOC_GET_ENTRY:
|
||||
#ifdef CONFIG_COMPAT
|
||||
case MTRRIOC32_GET_ENTRY:
|
||||
#endif
|
||||
if (gentry.regnum >= num_var_ranges)
|
||||
return -EINVAL;
|
||||
mtrr_if->get(gentry.regnum, &gentry.base, &size, &type);
|
||||
|
@ -249,6 +264,9 @@ mtrr_ioctl(struct file *file, unsigned int cmd, unsigned long __arg)
|
|||
|
||||
break;
|
||||
case MTRRIOC_ADD_PAGE_ENTRY:
|
||||
#ifdef CONFIG_COMPAT
|
||||
case MTRRIOC32_ADD_PAGE_ENTRY:
|
||||
#endif
|
||||
if (!capable(CAP_SYS_ADMIN))
|
||||
return -EPERM;
|
||||
err =
|
||||
|
@ -256,21 +274,33 @@ mtrr_ioctl(struct file *file, unsigned int cmd, unsigned long __arg)
|
|||
file, 1);
|
||||
break;
|
||||
case MTRRIOC_SET_PAGE_ENTRY:
|
||||
#ifdef CONFIG_COMPAT
|
||||
case MTRRIOC32_SET_PAGE_ENTRY:
|
||||
#endif
|
||||
if (!capable(CAP_SYS_ADMIN))
|
||||
return -EPERM;
|
||||
err = mtrr_add_page(sentry.base, sentry.size, sentry.type, 0);
|
||||
break;
|
||||
case MTRRIOC_DEL_PAGE_ENTRY:
|
||||
#ifdef CONFIG_COMPAT
|
||||
case MTRRIOC32_DEL_PAGE_ENTRY:
|
||||
#endif
|
||||
if (!capable(CAP_SYS_ADMIN))
|
||||
return -EPERM;
|
||||
err = mtrr_file_del(sentry.base, sentry.size, file, 1);
|
||||
break;
|
||||
case MTRRIOC_KILL_PAGE_ENTRY:
|
||||
#ifdef CONFIG_COMPAT
|
||||
case MTRRIOC32_KILL_PAGE_ENTRY:
|
||||
#endif
|
||||
if (!capable(CAP_SYS_ADMIN))
|
||||
return -EPERM;
|
||||
err = mtrr_del_page(-1, sentry.base, sentry.size);
|
||||
break;
|
||||
case MTRRIOC_GET_PAGE_ENTRY:
|
||||
#ifdef CONFIG_COMPAT
|
||||
case MTRRIOC32_GET_PAGE_ENTRY:
|
||||
#endif
|
||||
if (gentry.regnum >= num_var_ranges)
|
||||
return -EINVAL;
|
||||
mtrr_if->get(gentry.regnum, &gentry.base, &size, &type);
|
||||
|
|
|
@ -50,7 +50,7 @@ u32 num_var_ranges = 0;
|
|||
unsigned int *usage_table;
|
||||
static DEFINE_MUTEX(mtrr_mutex);
|
||||
|
||||
u32 size_or_mask, size_and_mask;
|
||||
u64 size_or_mask, size_and_mask;
|
||||
|
||||
static struct mtrr_ops * mtrr_ops[X86_VENDOR_NUM] = {};
|
||||
|
||||
|
@ -662,8 +662,8 @@ void __init mtrr_bp_init(void)
|
|||
boot_cpu_data.x86_mask == 0x4))
|
||||
phys_addr = 36;
|
||||
|
||||
size_or_mask = ~((1 << (phys_addr - PAGE_SHIFT)) - 1);
|
||||
size_and_mask = ~size_or_mask & 0xfff00000;
|
||||
size_or_mask = ~((1ULL << (phys_addr - PAGE_SHIFT)) - 1);
|
||||
size_and_mask = ~size_or_mask & 0xfffff00000ULL;
|
||||
} else if (boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR &&
|
||||
boot_cpu_data.x86 == 6) {
|
||||
/* VIA C* family have Intel style MTRRs, but
|
||||
|
|
|
@ -84,7 +84,7 @@ void get_mtrr_state(void);
|
|||
|
||||
extern void set_mtrr_ops(struct mtrr_ops * ops);
|
||||
|
||||
extern u32 size_or_mask, size_and_mask;
|
||||
extern u64 size_or_mask, size_and_mask;
|
||||
extern struct mtrr_ops * mtrr_if;
|
||||
|
||||
#define is_cpu(vnd) (mtrr_if && mtrr_if->vendor == X86_VENDOR_##vnd)
|
||||
|
|
|
@ -29,7 +29,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
|
|||
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
|
||||
NULL, NULL, NULL, "syscall", NULL, NULL, NULL, NULL,
|
||||
NULL, NULL, NULL, "mp", "nx", NULL, "mmxext", NULL,
|
||||
NULL, "fxsr_opt", "rdtscp", NULL, NULL, "lm", "3dnowext", "3dnow",
|
||||
NULL, "fxsr_opt", "pdpe1gb", "rdtscp", NULL, "lm", "3dnowext", "3dnow",
|
||||
|
||||
/* Transmeta-defined */
|
||||
"recovery", "longrun", NULL, "lrti", NULL, NULL, NULL, NULL,
|
||||
|
@ -47,7 +47,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
|
|||
/* Intel-defined (#2) */
|
||||
"pni", NULL, NULL, "monitor", "ds_cpl", "vmx", "smx", "est",
|
||||
"tm2", "ssse3", "cid", NULL, NULL, "cx16", "xtpr", NULL,
|
||||
NULL, NULL, "dca", NULL, NULL, NULL, NULL, NULL,
|
||||
NULL, NULL, "dca", NULL, NULL, NULL, NULL, "popcnt",
|
||||
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
|
||||
|
||||
/* VIA/Cyrix/Centaur-defined */
|
||||
|
@ -57,8 +57,9 @@ static int show_cpuinfo(struct seq_file *m, void *v)
|
|||
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
|
||||
|
||||
/* AMD-defined (#2) */
|
||||
"lahf_lm", "cmp_legacy", "svm", NULL, "cr8legacy", NULL, NULL, NULL,
|
||||
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
|
||||
"lahf_lm", "cmp_legacy", "svm", "extapic", "cr8legacy", "abm",
|
||||
"sse4a", "misalignsse",
|
||||
"3dnowprefetch", "osvw", "ibs", NULL, NULL, NULL, NULL, NULL,
|
||||
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
|
||||
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
|
||||
};
|
||||
|
@ -69,8 +70,11 @@ static int show_cpuinfo(struct seq_file *m, void *v)
|
|||
"ttp", /* thermal trip */
|
||||
"tm",
|
||||
"stc",
|
||||
"100mhzsteps",
|
||||
"hwpstate",
|
||||
NULL,
|
||||
/* nothing */ /* constant_tsc - moved to flags */
|
||||
NULL, /* constant_tsc - moved to flags */
|
||||
/* nothing */
|
||||
};
|
||||
struct cpuinfo_x86 *c = v;
|
||||
int i, n = c - cpu_data;
|
||||
|
|
|
@ -9,7 +9,7 @@ static void __cpuinit init_transmeta(struct cpuinfo_x86 *c)
|
|||
{
|
||||
unsigned int cap_mask, uk, max, dummy;
|
||||
unsigned int cms_rev1, cms_rev2;
|
||||
unsigned int cpu_rev, cpu_freq, cpu_flags, new_cpu_rev;
|
||||
unsigned int cpu_rev, cpu_freq = 0, cpu_flags, new_cpu_rev;
|
||||
char cpu_info[65];
|
||||
|
||||
get_model_name(c); /* Same as AMD/Cyrix */
|
||||
|
@ -72,6 +72,9 @@ static void __cpuinit init_transmeta(struct cpuinfo_x86 *c)
|
|||
wrmsr(0x80860004, ~0, uk);
|
||||
c->x86_capability[0] = cpuid_edx(0x00000001);
|
||||
wrmsr(0x80860004, cap_mask, uk);
|
||||
|
||||
/* All Transmeta CPUs have a constant TSC */
|
||||
set_bit(X86_FEATURE_CONSTANT_TSC, c->x86_capability);
|
||||
|
||||
/* If we can run i686 user-space code, call us an i686 */
|
||||
#define USER686 (X86_FEATURE_TSC|X86_FEATURE_CX8|X86_FEATURE_CMOV)
|
||||
|
|
|
@ -48,7 +48,6 @@ static struct class *cpuid_class;
|
|||
#ifdef CONFIG_SMP
|
||||
|
||||
struct cpuid_command {
|
||||
int cpu;
|
||||
u32 reg;
|
||||
u32 *data;
|
||||
};
|
||||
|
@ -57,8 +56,7 @@ static void cpuid_smp_cpuid(void *cmd_block)
|
|||
{
|
||||
struct cpuid_command *cmd = (struct cpuid_command *)cmd_block;
|
||||
|
||||
if (cmd->cpu == smp_processor_id())
|
||||
cpuid(cmd->reg, &cmd->data[0], &cmd->data[1], &cmd->data[2],
|
||||
cpuid(cmd->reg, &cmd->data[0], &cmd->data[1], &cmd->data[2],
|
||||
&cmd->data[3]);
|
||||
}
|
||||
|
||||
|
@ -70,11 +68,10 @@ static inline void do_cpuid(int cpu, u32 reg, u32 * data)
|
|||
if (cpu == smp_processor_id()) {
|
||||
cpuid(reg, &data[0], &data[1], &data[2], &data[3]);
|
||||
} else {
|
||||
cmd.cpu = cpu;
|
||||
cmd.reg = reg;
|
||||
cmd.data = data;
|
||||
|
||||
smp_call_function(cpuid_smp_cpuid, &cmd, 1, 1);
|
||||
smp_call_function_single(cpu, cpuid_smp_cpuid, &cmd, 1, 1);
|
||||
}
|
||||
preempt_enable();
|
||||
}
|
||||
|
|
|
@ -14,6 +14,7 @@
|
|||
#include <asm/pgtable.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/e820.h>
|
||||
#include <asm/setup.h>
|
||||
|
||||
#ifdef CONFIG_EFI
|
||||
int efi_enabled = 0;
|
||||
|
@ -156,21 +157,22 @@ static struct resource standard_io_resources[] = { {
|
|||
.flags = IORESOURCE_BUSY | IORESOURCE_IO
|
||||
} };
|
||||
|
||||
static int romsignature(const unsigned char *x)
|
||||
#define ROMSIGNATURE 0xaa55
|
||||
|
||||
static int __init romsignature(const unsigned char *rom)
|
||||
{
|
||||
unsigned short sig;
|
||||
int ret = 0;
|
||||
if (probe_kernel_address((const unsigned short *)x, sig) == 0)
|
||||
ret = (sig == 0xaa55);
|
||||
return ret;
|
||||
|
||||
return probe_kernel_address((const unsigned short *)rom, sig) == 0 &&
|
||||
sig == ROMSIGNATURE;
|
||||
}
|
||||
|
||||
static int __init romchecksum(unsigned char *rom, unsigned long length)
|
||||
{
|
||||
unsigned char *p, sum = 0;
|
||||
unsigned char sum;
|
||||
|
||||
for (p = rom; p < rom + length; p++)
|
||||
sum += *p;
|
||||
for (sum = 0; length; length--)
|
||||
sum += *rom++;
|
||||
return sum == 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -30,7 +30,7 @@
|
|||
* 18(%esp) - %eax
|
||||
* 1C(%esp) - %ds
|
||||
* 20(%esp) - %es
|
||||
* 24(%esp) - %gs
|
||||
* 24(%esp) - %fs
|
||||
* 28(%esp) - orig_eax
|
||||
* 2C(%esp) - %eip
|
||||
* 30(%esp) - %cs
|
||||
|
@ -99,9 +99,9 @@ VM_MASK = 0x00020000
|
|||
|
||||
#define SAVE_ALL \
|
||||
cld; \
|
||||
pushl %gs; \
|
||||
pushl %fs; \
|
||||
CFI_ADJUST_CFA_OFFSET 4;\
|
||||
/*CFI_REL_OFFSET gs, 0;*/\
|
||||
/*CFI_REL_OFFSET fs, 0;*/\
|
||||
pushl %es; \
|
||||
CFI_ADJUST_CFA_OFFSET 4;\
|
||||
/*CFI_REL_OFFSET es, 0;*/\
|
||||
|
@ -133,7 +133,7 @@ VM_MASK = 0x00020000
|
|||
movl %edx, %ds; \
|
||||
movl %edx, %es; \
|
||||
movl $(__KERNEL_PDA), %edx; \
|
||||
movl %edx, %gs
|
||||
movl %edx, %fs
|
||||
|
||||
#define RESTORE_INT_REGS \
|
||||
popl %ebx; \
|
||||
|
@ -166,9 +166,9 @@ VM_MASK = 0x00020000
|
|||
2: popl %es; \
|
||||
CFI_ADJUST_CFA_OFFSET -4;\
|
||||
/*CFI_RESTORE es;*/\
|
||||
3: popl %gs; \
|
||||
3: popl %fs; \
|
||||
CFI_ADJUST_CFA_OFFSET -4;\
|
||||
/*CFI_RESTORE gs;*/\
|
||||
/*CFI_RESTORE fs;*/\
|
||||
.pushsection .fixup,"ax"; \
|
||||
4: movl $0,(%esp); \
|
||||
jmp 1b; \
|
||||
|
@ -227,6 +227,7 @@ ENTRY(ret_from_fork)
|
|||
CFI_ADJUST_CFA_OFFSET -4
|
||||
jmp syscall_exit
|
||||
CFI_ENDPROC
|
||||
END(ret_from_fork)
|
||||
|
||||
/*
|
||||
* Return to user mode is not as complex as all this looks,
|
||||
|
@ -258,6 +259,7 @@ ENTRY(resume_userspace)
|
|||
# int/exception return?
|
||||
jne work_pending
|
||||
jmp restore_all
|
||||
END(ret_from_exception)
|
||||
|
||||
#ifdef CONFIG_PREEMPT
|
||||
ENTRY(resume_kernel)
|
||||
|
@ -272,6 +274,7 @@ need_resched:
|
|||
jz restore_all
|
||||
call preempt_schedule_irq
|
||||
jmp need_resched
|
||||
END(resume_kernel)
|
||||
#endif
|
||||
CFI_ENDPROC
|
||||
|
||||
|
@ -349,16 +352,17 @@ sysenter_past_esp:
|
|||
movl PT_OLDESP(%esp), %ecx
|
||||
xorl %ebp,%ebp
|
||||
TRACE_IRQS_ON
|
||||
1: mov PT_GS(%esp), %gs
|
||||
1: mov PT_FS(%esp), %fs
|
||||
ENABLE_INTERRUPTS_SYSEXIT
|
||||
CFI_ENDPROC
|
||||
.pushsection .fixup,"ax"
|
||||
2: movl $0,PT_GS(%esp)
|
||||
2: movl $0,PT_FS(%esp)
|
||||
jmp 1b
|
||||
.section __ex_table,"a"
|
||||
.align 4
|
||||
.long 1b,2b
|
||||
.popsection
|
||||
ENDPROC(sysenter_entry)
|
||||
|
||||
# system call handler stub
|
||||
ENTRY(system_call)
|
||||
|
@ -459,6 +463,7 @@ ldt_ss:
|
|||
CFI_ADJUST_CFA_OFFSET -8
|
||||
jmp restore_nocheck
|
||||
CFI_ENDPROC
|
||||
ENDPROC(system_call)
|
||||
|
||||
# perform work that needs to be done immediately before resumption
|
||||
ALIGN
|
||||
|
@ -504,6 +509,7 @@ work_notifysig_v86:
|
|||
xorl %edx, %edx
|
||||
call do_notify_resume
|
||||
jmp resume_userspace_sig
|
||||
END(work_pending)
|
||||
|
||||
# perform syscall exit tracing
|
||||
ALIGN
|
||||
|
@ -519,6 +525,7 @@ syscall_trace_entry:
|
|||
cmpl $(nr_syscalls), %eax
|
||||
jnae syscall_call
|
||||
jmp syscall_exit
|
||||
END(syscall_trace_entry)
|
||||
|
||||
# perform syscall exit tracing
|
||||
ALIGN
|
||||
|
@ -532,6 +539,7 @@ syscall_exit_work:
|
|||
movl $1, %edx
|
||||
call do_syscall_trace
|
||||
jmp resume_userspace
|
||||
END(syscall_exit_work)
|
||||
CFI_ENDPROC
|
||||
|
||||
RING0_INT_FRAME # can't unwind into user space anyway
|
||||
|
@ -542,15 +550,17 @@ syscall_fault:
|
|||
GET_THREAD_INFO(%ebp)
|
||||
movl $-EFAULT,PT_EAX(%esp)
|
||||
jmp resume_userspace
|
||||
END(syscall_fault)
|
||||
|
||||
syscall_badsys:
|
||||
movl $-ENOSYS,PT_EAX(%esp)
|
||||
jmp resume_userspace
|
||||
END(syscall_badsys)
|
||||
CFI_ENDPROC
|
||||
|
||||
#define FIXUP_ESPFIX_STACK \
|
||||
/* since we are on a wrong stack, we cant make it a C code :( */ \
|
||||
movl %gs:PDA_cpu, %ebx; \
|
||||
movl %fs:PDA_cpu, %ebx; \
|
||||
PER_CPU(cpu_gdt_descr, %ebx); \
|
||||
movl GDS_address(%ebx), %ebx; \
|
||||
GET_DESC_BASE(GDT_ENTRY_ESPFIX_SS, %ebx, %eax, %ax, %al, %ah); \
|
||||
|
@ -581,9 +591,9 @@ syscall_badsys:
|
|||
ENTRY(interrupt)
|
||||
.text
|
||||
|
||||
vector=0
|
||||
ENTRY(irq_entries_start)
|
||||
RING0_INT_FRAME
|
||||
vector=0
|
||||
.rept NR_IRQS
|
||||
ALIGN
|
||||
.if vector
|
||||
|
@ -592,11 +602,16 @@ ENTRY(irq_entries_start)
|
|||
1: pushl $~(vector)
|
||||
CFI_ADJUST_CFA_OFFSET 4
|
||||
jmp common_interrupt
|
||||
.data
|
||||
.previous
|
||||
.long 1b
|
||||
.text
|
||||
.text
|
||||
vector=vector+1
|
||||
.endr
|
||||
END(irq_entries_start)
|
||||
|
||||
.previous
|
||||
END(interrupt)
|
||||
.previous
|
||||
|
||||
/*
|
||||
* the CPU automatically disables interrupts when executing an IRQ vector,
|
||||
|
@ -609,6 +624,7 @@ common_interrupt:
|
|||
movl %esp,%eax
|
||||
call do_IRQ
|
||||
jmp ret_from_intr
|
||||
ENDPROC(common_interrupt)
|
||||
CFI_ENDPROC
|
||||
|
||||
#define BUILD_INTERRUPT(name, nr) \
|
||||
|
@ -621,18 +637,24 @@ ENTRY(name) \
|
|||
movl %esp,%eax; \
|
||||
call smp_/**/name; \
|
||||
jmp ret_from_intr; \
|
||||
CFI_ENDPROC
|
||||
CFI_ENDPROC; \
|
||||
ENDPROC(name)
|
||||
|
||||
/* The include is where all of the SMP etc. interrupts come from */
|
||||
#include "entry_arch.h"
|
||||
|
||||
/* This alternate entry is needed because we hijack the apic LVTT */
|
||||
#if defined(CONFIG_VMI) && defined(CONFIG_X86_LOCAL_APIC)
|
||||
BUILD_INTERRUPT(apic_vmi_timer_interrupt,LOCAL_TIMER_VECTOR)
|
||||
#endif
|
||||
|
||||
KPROBE_ENTRY(page_fault)
|
||||
RING0_EC_FRAME
|
||||
pushl $do_page_fault
|
||||
CFI_ADJUST_CFA_OFFSET 4
|
||||
ALIGN
|
||||
error_code:
|
||||
/* the function address is in %gs's slot on the stack */
|
||||
/* the function address is in %fs's slot on the stack */
|
||||
pushl %es
|
||||
CFI_ADJUST_CFA_OFFSET 4
|
||||
/*CFI_REL_OFFSET es, 0*/
|
||||
|
@ -661,20 +683,20 @@ error_code:
|
|||
CFI_ADJUST_CFA_OFFSET 4
|
||||
CFI_REL_OFFSET ebx, 0
|
||||
cld
|
||||
pushl %gs
|
||||
pushl %fs
|
||||
CFI_ADJUST_CFA_OFFSET 4
|
||||
/*CFI_REL_OFFSET gs, 0*/
|
||||
/*CFI_REL_OFFSET fs, 0*/
|
||||
movl $(__KERNEL_PDA), %ecx
|
||||
movl %ecx, %gs
|
||||
movl %ecx, %fs
|
||||
UNWIND_ESPFIX_STACK
|
||||
popl %ecx
|
||||
CFI_ADJUST_CFA_OFFSET -4
|
||||
/*CFI_REGISTER es, ecx*/
|
||||
movl PT_GS(%esp), %edi # get the function address
|
||||
movl PT_FS(%esp), %edi # get the function address
|
||||
movl PT_ORIG_EAX(%esp), %edx # get the error code
|
||||
movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
|
||||
mov %ecx, PT_GS(%esp)
|
||||
/*CFI_REL_OFFSET gs, ES*/
|
||||
mov %ecx, PT_FS(%esp)
|
||||
/*CFI_REL_OFFSET fs, ES*/
|
||||
movl $(__USER_DS), %ecx
|
||||
movl %ecx, %ds
|
||||
movl %ecx, %es
|
||||
|
@ -692,6 +714,7 @@ ENTRY(coprocessor_error)
|
|||
CFI_ADJUST_CFA_OFFSET 4
|
||||
jmp error_code
|
||||
CFI_ENDPROC
|
||||
END(coprocessor_error)
|
||||
|
||||
ENTRY(simd_coprocessor_error)
|
||||
RING0_INT_FRAME
|
||||
|
@ -701,6 +724,7 @@ ENTRY(simd_coprocessor_error)
|
|||
CFI_ADJUST_CFA_OFFSET 4
|
||||
jmp error_code
|
||||
CFI_ENDPROC
|
||||
END(simd_coprocessor_error)
|
||||
|
||||
ENTRY(device_not_available)
|
||||
RING0_INT_FRAME
|
||||
|
@ -721,6 +745,7 @@ device_not_available_emulate:
|
|||
CFI_ADJUST_CFA_OFFSET -4
|
||||
jmp ret_from_exception
|
||||
CFI_ENDPROC
|
||||
END(device_not_available)
|
||||
|
||||
/*
|
||||
* Debug traps and NMI can happen at the one SYSENTER instruction
|
||||
|
@ -864,10 +889,12 @@ ENTRY(native_iret)
|
|||
.align 4
|
||||
.long 1b,iret_exc
|
||||
.previous
|
||||
END(native_iret)
|
||||
|
||||
ENTRY(native_irq_enable_sysexit)
|
||||
sti
|
||||
sysexit
|
||||
END(native_irq_enable_sysexit)
|
||||
#endif
|
||||
|
||||
KPROBE_ENTRY(int3)
|
||||
|
@ -890,6 +917,7 @@ ENTRY(overflow)
|
|||
CFI_ADJUST_CFA_OFFSET 4
|
||||
jmp error_code
|
||||
CFI_ENDPROC
|
||||
END(overflow)
|
||||
|
||||
ENTRY(bounds)
|
||||
RING0_INT_FRAME
|
||||
|
@ -899,6 +927,7 @@ ENTRY(bounds)
|
|||
CFI_ADJUST_CFA_OFFSET 4
|
||||
jmp error_code
|
||||
CFI_ENDPROC
|
||||
END(bounds)
|
||||
|
||||
ENTRY(invalid_op)
|
||||
RING0_INT_FRAME
|
||||
|
@ -908,6 +937,7 @@ ENTRY(invalid_op)
|
|||
CFI_ADJUST_CFA_OFFSET 4
|
||||
jmp error_code
|
||||
CFI_ENDPROC
|
||||
END(invalid_op)
|
||||
|
||||
ENTRY(coprocessor_segment_overrun)
|
||||
RING0_INT_FRAME
|
||||
|
@ -917,6 +947,7 @@ ENTRY(coprocessor_segment_overrun)
|
|||
CFI_ADJUST_CFA_OFFSET 4
|
||||
jmp error_code
|
||||
CFI_ENDPROC
|
||||
END(coprocessor_segment_overrun)
|
||||
|
||||
ENTRY(invalid_TSS)
|
||||
RING0_EC_FRAME
|
||||
|
@ -924,6 +955,7 @@ ENTRY(invalid_TSS)
|
|||
CFI_ADJUST_CFA_OFFSET 4
|
||||
jmp error_code
|
||||
CFI_ENDPROC
|
||||
END(invalid_TSS)
|
||||
|
||||
ENTRY(segment_not_present)
|
||||
RING0_EC_FRAME
|
||||
|
@ -931,6 +963,7 @@ ENTRY(segment_not_present)
|
|||
CFI_ADJUST_CFA_OFFSET 4
|
||||
jmp error_code
|
||||
CFI_ENDPROC
|
||||
END(segment_not_present)
|
||||
|
||||
ENTRY(stack_segment)
|
||||
RING0_EC_FRAME
|
||||
|
@ -938,6 +971,7 @@ ENTRY(stack_segment)
|
|||
CFI_ADJUST_CFA_OFFSET 4
|
||||
jmp error_code
|
||||
CFI_ENDPROC
|
||||
END(stack_segment)
|
||||
|
||||
KPROBE_ENTRY(general_protection)
|
||||
RING0_EC_FRAME
|
||||
|
@ -953,6 +987,7 @@ ENTRY(alignment_check)
|
|||
CFI_ADJUST_CFA_OFFSET 4
|
||||
jmp error_code
|
||||
CFI_ENDPROC
|
||||
END(alignment_check)
|
||||
|
||||
ENTRY(divide_error)
|
||||
RING0_INT_FRAME
|
||||
|
@ -962,6 +997,7 @@ ENTRY(divide_error)
|
|||
CFI_ADJUST_CFA_OFFSET 4
|
||||
jmp error_code
|
||||
CFI_ENDPROC
|
||||
END(divide_error)
|
||||
|
||||
#ifdef CONFIG_X86_MCE
|
||||
ENTRY(machine_check)
|
||||
|
@ -972,6 +1008,7 @@ ENTRY(machine_check)
|
|||
CFI_ADJUST_CFA_OFFSET 4
|
||||
jmp error_code
|
||||
CFI_ENDPROC
|
||||
END(machine_check)
|
||||
#endif
|
||||
|
||||
ENTRY(spurious_interrupt_bug)
|
||||
|
@ -982,6 +1019,7 @@ ENTRY(spurious_interrupt_bug)
|
|||
CFI_ADJUST_CFA_OFFSET 4
|
||||
jmp error_code
|
||||
CFI_ENDPROC
|
||||
END(spurious_interrupt_bug)
|
||||
|
||||
ENTRY(kernel_thread_helper)
|
||||
pushl $0 # fake return address for unwinder
|
||||
|
|
|
@ -53,6 +53,7 @@
|
|||
* any particular GDT layout, because we load our own as soon as we
|
||||
* can.
|
||||
*/
|
||||
.section .text.head,"ax",@progbits
|
||||
ENTRY(startup_32)
|
||||
|
||||
#ifdef CONFIG_PARAVIRT
|
||||
|
@ -141,16 +142,25 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
|
|||
jb 10b
|
||||
movl %edi,(init_pg_tables_end - __PAGE_OFFSET)
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
xorl %ebx,%ebx /* This is the boot CPU (BSP) */
|
||||
jmp 3f
|
||||
|
||||
/*
|
||||
* Non-boot CPU entry point; entered from trampoline.S
|
||||
* We can't lgdt here, because lgdt itself uses a data segment, but
|
||||
* we know the trampoline has already loaded the boot_gdt_table GDT
|
||||
* for us.
|
||||
*
|
||||
* If cpu hotplug is not supported then this code can go in init section
|
||||
* which will be freed later
|
||||
*/
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
.section .text,"ax",@progbits
|
||||
#else
|
||||
.section .init.text,"ax",@progbits
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
ENTRY(startup_32_smp)
|
||||
cld
|
||||
movl $(__BOOT_DS),%eax
|
||||
|
@ -208,8 +218,8 @@ ENTRY(startup_32_smp)
|
|||
xorl %ebx,%ebx
|
||||
incl %ebx
|
||||
|
||||
3:
|
||||
#endif /* CONFIG_SMP */
|
||||
3:
|
||||
|
||||
/*
|
||||
* Enable paging
|
||||
|
@ -309,7 +319,7 @@ is386: movl $2,%ecx # set MP
|
|||
|
||||
call check_x87
|
||||
call setup_pda
|
||||
lgdt cpu_gdt_descr
|
||||
lgdt early_gdt_descr
|
||||
lidt idt_descr
|
||||
ljmp $(__KERNEL_CS),$1f
|
||||
1: movl $(__KERNEL_DS),%eax # reload all the segment registers
|
||||
|
@ -319,12 +329,12 @@ is386: movl $2,%ecx # set MP
|
|||
movl %eax,%ds
|
||||
movl %eax,%es
|
||||
|
||||
xorl %eax,%eax # Clear FS and LDT
|
||||
movl %eax,%fs
|
||||
xorl %eax,%eax # Clear GS and LDT
|
||||
movl %eax,%gs
|
||||
lldt %ax
|
||||
|
||||
movl $(__KERNEL_PDA),%eax
|
||||
mov %eax,%gs
|
||||
mov %eax,%fs
|
||||
|
||||
cld # gcc2 wants the direction flag cleared at all times
|
||||
pushl $0 # fake return address for unwinder
|
||||
|
@ -360,12 +370,12 @@ check_x87:
|
|||
* cpu_gdt_table and boot_pda; for secondary CPUs, these will be
|
||||
* that CPU's GDT and PDA.
|
||||
*/
|
||||
setup_pda:
|
||||
ENTRY(setup_pda)
|
||||
/* get the PDA pointer */
|
||||
movl start_pda, %eax
|
||||
|
||||
/* slot the PDA address into the GDT */
|
||||
mov cpu_gdt_descr+2, %ecx
|
||||
mov early_gdt_descr+2, %ecx
|
||||
mov %ax, (__KERNEL_PDA+0+2)(%ecx) /* base & 0x0000ffff */
|
||||
shr $16, %eax
|
||||
mov %al, (__KERNEL_PDA+4+0)(%ecx) /* base & 0x00ff0000 */
|
||||
|
@ -492,6 +502,7 @@ ignore_int:
|
|||
#endif
|
||||
iret
|
||||
|
||||
.section .text
|
||||
#ifdef CONFIG_PARAVIRT
|
||||
startup_paravirt:
|
||||
cld
|
||||
|
@ -502,10 +513,11 @@ startup_paravirt:
|
|||
pushl %ecx
|
||||
pushl %eax
|
||||
|
||||
/* paravirt.o is last in link, and that probe fn never returns */
|
||||
pushl $__start_paravirtprobe
|
||||
1:
|
||||
movl 0(%esp), %eax
|
||||
cmpl $__stop_paravirtprobe, %eax
|
||||
je unhandled_paravirt
|
||||
pushl (%eax)
|
||||
movl 8(%esp), %eax
|
||||
call *(%esp)
|
||||
|
@ -517,6 +529,10 @@ startup_paravirt:
|
|||
|
||||
addl $4, (%esp)
|
||||
jmp 1b
|
||||
|
||||
unhandled_paravirt:
|
||||
/* Nothing wanted us: we're screwed. */
|
||||
ud2
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
@ -581,7 +597,7 @@ idt_descr:
|
|||
|
||||
# boot GDT descriptor (later on used by CPU#0):
|
||||
.word 0 # 32 bit align gdt_desc.address
|
||||
ENTRY(cpu_gdt_descr)
|
||||
ENTRY(early_gdt_descr)
|
||||
.word GDT_ENTRIES*8-1
|
||||
.long cpu_gdt_table
|
||||
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
#include <linux/clocksource.h>
|
||||
#include <linux/clockchips.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/hpet.h>
|
||||
#include <linux/init.h>
|
||||
|
@ -6,17 +7,278 @@
|
|||
#include <asm/hpet.h>
|
||||
#include <asm/io.h>
|
||||
|
||||
extern struct clock_event_device *global_clock_event;
|
||||
|
||||
#define HPET_MASK CLOCKSOURCE_MASK(32)
|
||||
#define HPET_SHIFT 22
|
||||
|
||||
/* FSEC = 10^-15 NSEC = 10^-9 */
|
||||
#define FSEC_PER_NSEC 1000000
|
||||
|
||||
static void __iomem *hpet_ptr;
|
||||
/*
|
||||
* HPET address is set in acpi/boot.c, when an ACPI entry exists
|
||||
*/
|
||||
unsigned long hpet_address;
|
||||
static void __iomem * hpet_virt_address;
|
||||
|
||||
static inline unsigned long hpet_readl(unsigned long a)
|
||||
{
|
||||
return readl(hpet_virt_address + a);
|
||||
}
|
||||
|
||||
static inline void hpet_writel(unsigned long d, unsigned long a)
|
||||
{
|
||||
writel(d, hpet_virt_address + a);
|
||||
}
|
||||
|
||||
/*
|
||||
* HPET command line enable / disable
|
||||
*/
|
||||
static int boot_hpet_disable;
|
||||
|
||||
static int __init hpet_setup(char* str)
|
||||
{
|
||||
if (str) {
|
||||
if (!strncmp("disable", str, 7))
|
||||
boot_hpet_disable = 1;
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
__setup("hpet=", hpet_setup);
|
||||
|
||||
static inline int is_hpet_capable(void)
|
||||
{
|
||||
return (!boot_hpet_disable && hpet_address);
|
||||
}
|
||||
|
||||
/*
|
||||
* HPET timer interrupt enable / disable
|
||||
*/
|
||||
static int hpet_legacy_int_enabled;
|
||||
|
||||
/**
|
||||
* is_hpet_enabled - check whether the hpet timer interrupt is enabled
|
||||
*/
|
||||
int is_hpet_enabled(void)
|
||||
{
|
||||
return is_hpet_capable() && hpet_legacy_int_enabled;
|
||||
}
|
||||
|
||||
/*
|
||||
* When the hpet driver (/dev/hpet) is enabled, we need to reserve
|
||||
* timer 0 and timer 1 in case of RTC emulation.
|
||||
*/
|
||||
#ifdef CONFIG_HPET
|
||||
static void hpet_reserve_platform_timers(unsigned long id)
|
||||
{
|
||||
struct hpet __iomem *hpet = hpet_virt_address;
|
||||
struct hpet_timer __iomem *timer = &hpet->hpet_timers[2];
|
||||
unsigned int nrtimers, i;
|
||||
struct hpet_data hd;
|
||||
|
||||
nrtimers = ((id & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT) + 1;
|
||||
|
||||
memset(&hd, 0, sizeof (hd));
|
||||
hd.hd_phys_address = hpet_address;
|
||||
hd.hd_address = hpet_virt_address;
|
||||
hd.hd_nirqs = nrtimers;
|
||||
hd.hd_flags = HPET_DATA_PLATFORM;
|
||||
hpet_reserve_timer(&hd, 0);
|
||||
|
||||
#ifdef CONFIG_HPET_EMULATE_RTC
|
||||
hpet_reserve_timer(&hd, 1);
|
||||
#endif
|
||||
|
||||
hd.hd_irq[0] = HPET_LEGACY_8254;
|
||||
hd.hd_irq[1] = HPET_LEGACY_RTC;
|
||||
|
||||
for (i = 2; i < nrtimers; timer++, i++)
|
||||
hd.hd_irq[i] = (timer->hpet_config & Tn_INT_ROUTE_CNF_MASK) >>
|
||||
Tn_INT_ROUTE_CNF_SHIFT;
|
||||
|
||||
hpet_alloc(&hd);
|
||||
|
||||
}
|
||||
#else
|
||||
static void hpet_reserve_platform_timers(unsigned long id) { }
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Common hpet info
|
||||
*/
|
||||
static unsigned long hpet_period;
|
||||
|
||||
static void hpet_set_mode(enum clock_event_mode mode,
|
||||
struct clock_event_device *evt);
|
||||
static int hpet_next_event(unsigned long delta,
|
||||
struct clock_event_device *evt);
|
||||
|
||||
/*
|
||||
* The hpet clock event device
|
||||
*/
|
||||
static struct clock_event_device hpet_clockevent = {
|
||||
.name = "hpet",
|
||||
.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
|
||||
.set_mode = hpet_set_mode,
|
||||
.set_next_event = hpet_next_event,
|
||||
.shift = 32,
|
||||
.irq = 0,
|
||||
};
|
||||
|
||||
static void hpet_start_counter(void)
|
||||
{
|
||||
unsigned long cfg = hpet_readl(HPET_CFG);
|
||||
|
||||
cfg &= ~HPET_CFG_ENABLE;
|
||||
hpet_writel(cfg, HPET_CFG);
|
||||
hpet_writel(0, HPET_COUNTER);
|
||||
hpet_writel(0, HPET_COUNTER + 4);
|
||||
cfg |= HPET_CFG_ENABLE;
|
||||
hpet_writel(cfg, HPET_CFG);
|
||||
}
|
||||
|
||||
static void hpet_enable_int(void)
|
||||
{
|
||||
unsigned long cfg = hpet_readl(HPET_CFG);
|
||||
|
||||
cfg |= HPET_CFG_LEGACY;
|
||||
hpet_writel(cfg, HPET_CFG);
|
||||
hpet_legacy_int_enabled = 1;
|
||||
}
|
||||
|
||||
static void hpet_set_mode(enum clock_event_mode mode,
|
||||
struct clock_event_device *evt)
|
||||
{
|
||||
unsigned long cfg, cmp, now;
|
||||
uint64_t delta;
|
||||
|
||||
switch(mode) {
|
||||
case CLOCK_EVT_MODE_PERIODIC:
|
||||
delta = ((uint64_t)(NSEC_PER_SEC/HZ)) * hpet_clockevent.mult;
|
||||
delta >>= hpet_clockevent.shift;
|
||||
now = hpet_readl(HPET_COUNTER);
|
||||
cmp = now + (unsigned long) delta;
|
||||
cfg = hpet_readl(HPET_T0_CFG);
|
||||
cfg |= HPET_TN_ENABLE | HPET_TN_PERIODIC |
|
||||
HPET_TN_SETVAL | HPET_TN_32BIT;
|
||||
hpet_writel(cfg, HPET_T0_CFG);
|
||||
/*
|
||||
* The first write after writing TN_SETVAL to the
|
||||
* config register sets the counter value, the second
|
||||
* write sets the period.
|
||||
*/
|
||||
hpet_writel(cmp, HPET_T0_CMP);
|
||||
udelay(1);
|
||||
hpet_writel((unsigned long) delta, HPET_T0_CMP);
|
||||
break;
|
||||
|
||||
case CLOCK_EVT_MODE_ONESHOT:
|
||||
cfg = hpet_readl(HPET_T0_CFG);
|
||||
cfg &= ~HPET_TN_PERIODIC;
|
||||
cfg |= HPET_TN_ENABLE | HPET_TN_32BIT;
|
||||
hpet_writel(cfg, HPET_T0_CFG);
|
||||
break;
|
||||
|
||||
case CLOCK_EVT_MODE_UNUSED:
|
||||
case CLOCK_EVT_MODE_SHUTDOWN:
|
||||
cfg = hpet_readl(HPET_T0_CFG);
|
||||
cfg &= ~HPET_TN_ENABLE;
|
||||
hpet_writel(cfg, HPET_T0_CFG);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static int hpet_next_event(unsigned long delta,
|
||||
struct clock_event_device *evt)
|
||||
{
|
||||
unsigned long cnt;
|
||||
|
||||
cnt = hpet_readl(HPET_COUNTER);
|
||||
cnt += delta;
|
||||
hpet_writel(cnt, HPET_T0_CMP);
|
||||
|
||||
return ((long)(hpet_readl(HPET_COUNTER) - cnt ) > 0);
|
||||
}
|
||||
|
||||
/*
|
||||
* Try to setup the HPET timer
|
||||
*/
|
||||
int __init hpet_enable(void)
|
||||
{
|
||||
unsigned long id;
|
||||
uint64_t hpet_freq;
|
||||
|
||||
if (!is_hpet_capable())
|
||||
return 0;
|
||||
|
||||
hpet_virt_address = ioremap_nocache(hpet_address, HPET_MMAP_SIZE);
|
||||
|
||||
/*
|
||||
* Read the period and check for a sane value:
|
||||
*/
|
||||
hpet_period = hpet_readl(HPET_PERIOD);
|
||||
if (hpet_period < HPET_MIN_PERIOD || hpet_period > HPET_MAX_PERIOD)
|
||||
goto out_nohpet;
|
||||
|
||||
/*
|
||||
* The period is a femto seconds value. We need to calculate the
|
||||
* scaled math multiplication factor for nanosecond to hpet tick
|
||||
* conversion.
|
||||
*/
|
||||
hpet_freq = 1000000000000000ULL;
|
||||
do_div(hpet_freq, hpet_period);
|
||||
hpet_clockevent.mult = div_sc((unsigned long) hpet_freq,
|
||||
NSEC_PER_SEC, 32);
|
||||
/* Calculate the min / max delta */
|
||||
hpet_clockevent.max_delta_ns = clockevent_delta2ns(0x7FFFFFFF,
|
||||
&hpet_clockevent);
|
||||
hpet_clockevent.min_delta_ns = clockevent_delta2ns(0x30,
|
||||
&hpet_clockevent);
|
||||
|
||||
/*
|
||||
* Read the HPET ID register to retrieve the IRQ routing
|
||||
* information and the number of channels
|
||||
*/
|
||||
id = hpet_readl(HPET_ID);
|
||||
|
||||
#ifdef CONFIG_HPET_EMULATE_RTC
|
||||
/*
|
||||
* The legacy routing mode needs at least two channels, tick timer
|
||||
* and the rtc emulation channel.
|
||||
*/
|
||||
if (!(id & HPET_ID_NUMBER))
|
||||
goto out_nohpet;
|
||||
#endif
|
||||
|
||||
/* Start the counter */
|
||||
hpet_start_counter();
|
||||
|
||||
if (id & HPET_ID_LEGSUP) {
|
||||
hpet_enable_int();
|
||||
hpet_reserve_platform_timers(id);
|
||||
/*
|
||||
* Start hpet with the boot cpu mask and make it
|
||||
* global after the IO_APIC has been initialized.
|
||||
*/
|
||||
hpet_clockevent.cpumask =cpumask_of_cpu(0);
|
||||
clockevents_register_device(&hpet_clockevent);
|
||||
global_clock_event = &hpet_clockevent;
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
|
||||
out_nohpet:
|
||||
iounmap(hpet_virt_address);
|
||||
hpet_virt_address = NULL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Clock source related code
|
||||
*/
|
||||
static cycle_t read_hpet(void)
|
||||
{
|
||||
return (cycle_t)readl(hpet_ptr);
|
||||
return (cycle_t)hpet_readl(HPET_COUNTER);
|
||||
}
|
||||
|
||||
static struct clocksource clocksource_hpet = {
|
||||
|
@ -24,28 +286,17 @@ static struct clocksource clocksource_hpet = {
|
|||
.rating = 250,
|
||||
.read = read_hpet,
|
||||
.mask = HPET_MASK,
|
||||
.mult = 0, /* set below */
|
||||
.shift = HPET_SHIFT,
|
||||
.is_continuous = 1,
|
||||
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
|
||||
};
|
||||
|
||||
static int __init init_hpet_clocksource(void)
|
||||
{
|
||||
unsigned long hpet_period;
|
||||
void __iomem* hpet_base;
|
||||
u64 tmp;
|
||||
int err;
|
||||
|
||||
if (!is_hpet_enabled())
|
||||
if (!hpet_virt_address)
|
||||
return -ENODEV;
|
||||
|
||||
/* calculate the hpet address: */
|
||||
hpet_base = ioremap_nocache(hpet_address, HPET_MMAP_SIZE);
|
||||
hpet_ptr = hpet_base + HPET_COUNTER;
|
||||
|
||||
/* calculate the frequency: */
|
||||
hpet_period = readl(hpet_base + HPET_PERIOD);
|
||||
|
||||
/*
|
||||
* hpet period is in femto seconds per cycle
|
||||
* so we need to convert this to ns/cyc units
|
||||
|
@ -61,11 +312,218 @@ static int __init init_hpet_clocksource(void)
|
|||
do_div(tmp, FSEC_PER_NSEC);
|
||||
clocksource_hpet.mult = (u32)tmp;
|
||||
|
||||
err = clocksource_register(&clocksource_hpet);
|
||||
if (err)
|
||||
iounmap(hpet_base);
|
||||
|
||||
return err;
|
||||
return clocksource_register(&clocksource_hpet);
|
||||
}
|
||||
|
||||
module_init(init_hpet_clocksource);
|
||||
|
||||
#ifdef CONFIG_HPET_EMULATE_RTC
|
||||
|
||||
/* HPET in LegacyReplacement Mode eats up RTC interrupt line. When, HPET
|
||||
* is enabled, we support RTC interrupt functionality in software.
|
||||
* RTC has 3 kinds of interrupts:
|
||||
* 1) Update Interrupt - generate an interrupt, every sec, when RTC clock
|
||||
* is updated
|
||||
* 2) Alarm Interrupt - generate an interrupt at a specific time of day
|
||||
* 3) Periodic Interrupt - generate periodic interrupt, with frequencies
|
||||
* 2Hz-8192Hz (2Hz-64Hz for non-root user) (all freqs in powers of 2)
|
||||
* (1) and (2) above are implemented using polling at a frequency of
|
||||
* 64 Hz. The exact frequency is a tradeoff between accuracy and interrupt
|
||||
* overhead. (DEFAULT_RTC_INT_FREQ)
|
||||
* For (3), we use interrupts at 64Hz or user specified periodic
|
||||
* frequency, whichever is higher.
|
||||
*/
|
||||
#include <linux/mc146818rtc.h>
|
||||
#include <linux/rtc.h>
|
||||
|
||||
#define DEFAULT_RTC_INT_FREQ 64
|
||||
#define DEFAULT_RTC_SHIFT 6
|
||||
#define RTC_NUM_INTS 1
|
||||
|
||||
static unsigned long hpet_rtc_flags;
|
||||
static unsigned long hpet_prev_update_sec;
|
||||
static struct rtc_time hpet_alarm_time;
|
||||
static unsigned long hpet_pie_count;
|
||||
static unsigned long hpet_t1_cmp;
|
||||
static unsigned long hpet_default_delta;
|
||||
static unsigned long hpet_pie_delta;
|
||||
static unsigned long hpet_pie_limit;
|
||||
|
||||
/*
|
||||
* Timer 1 for RTC emulation. We use one shot mode, as periodic mode
|
||||
* is not supported by all HPET implementations for timer 1.
|
||||
*
|
||||
* hpet_rtc_timer_init() is called when the rtc is initialized.
|
||||
*/
|
||||
int hpet_rtc_timer_init(void)
|
||||
{
|
||||
unsigned long cfg, cnt, delta, flags;
|
||||
|
||||
if (!is_hpet_enabled())
|
||||
return 0;
|
||||
|
||||
if (!hpet_default_delta) {
|
||||
uint64_t clc;
|
||||
|
||||
clc = (uint64_t) hpet_clockevent.mult * NSEC_PER_SEC;
|
||||
clc >>= hpet_clockevent.shift + DEFAULT_RTC_SHIFT;
|
||||
hpet_default_delta = (unsigned long) clc;
|
||||
}
|
||||
|
||||
if (!(hpet_rtc_flags & RTC_PIE) || hpet_pie_limit)
|
||||
delta = hpet_default_delta;
|
||||
else
|
||||
delta = hpet_pie_delta;
|
||||
|
||||
local_irq_save(flags);
|
||||
|
||||
cnt = delta + hpet_readl(HPET_COUNTER);
|
||||
hpet_writel(cnt, HPET_T1_CMP);
|
||||
hpet_t1_cmp = cnt;
|
||||
|
||||
cfg = hpet_readl(HPET_T1_CFG);
|
||||
cfg &= ~HPET_TN_PERIODIC;
|
||||
cfg |= HPET_TN_ENABLE | HPET_TN_32BIT;
|
||||
hpet_writel(cfg, HPET_T1_CFG);
|
||||
|
||||
local_irq_restore(flags);
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* The functions below are called from rtc driver.
|
||||
* Return 0 if HPET is not being used.
|
||||
* Otherwise do the necessary changes and return 1.
|
||||
*/
|
||||
int hpet_mask_rtc_irq_bit(unsigned long bit_mask)
|
||||
{
|
||||
if (!is_hpet_enabled())
|
||||
return 0;
|
||||
|
||||
hpet_rtc_flags &= ~bit_mask;
|
||||
return 1;
|
||||
}
|
||||
|
||||
int hpet_set_rtc_irq_bit(unsigned long bit_mask)
|
||||
{
|
||||
unsigned long oldbits = hpet_rtc_flags;
|
||||
|
||||
if (!is_hpet_enabled())
|
||||
return 0;
|
||||
|
||||
hpet_rtc_flags |= bit_mask;
|
||||
|
||||
if (!oldbits)
|
||||
hpet_rtc_timer_init();
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
int hpet_set_alarm_time(unsigned char hrs, unsigned char min,
|
||||
unsigned char sec)
|
||||
{
|
||||
if (!is_hpet_enabled())
|
||||
return 0;
|
||||
|
||||
hpet_alarm_time.tm_hour = hrs;
|
||||
hpet_alarm_time.tm_min = min;
|
||||
hpet_alarm_time.tm_sec = sec;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
int hpet_set_periodic_freq(unsigned long freq)
|
||||
{
|
||||
uint64_t clc;
|
||||
|
||||
if (!is_hpet_enabled())
|
||||
return 0;
|
||||
|
||||
if (freq <= DEFAULT_RTC_INT_FREQ)
|
||||
hpet_pie_limit = DEFAULT_RTC_INT_FREQ / freq;
|
||||
else {
|
||||
clc = (uint64_t) hpet_clockevent.mult * NSEC_PER_SEC;
|
||||
do_div(clc, freq);
|
||||
clc >>= hpet_clockevent.shift;
|
||||
hpet_pie_delta = (unsigned long) clc;
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
|
||||
int hpet_rtc_dropped_irq(void)
|
||||
{
|
||||
return is_hpet_enabled();
|
||||
}
|
||||
|
||||
static void hpet_rtc_timer_reinit(void)
|
||||
{
|
||||
unsigned long cfg, delta;
|
||||
int lost_ints = -1;
|
||||
|
||||
if (unlikely(!hpet_rtc_flags)) {
|
||||
cfg = hpet_readl(HPET_T1_CFG);
|
||||
cfg &= ~HPET_TN_ENABLE;
|
||||
hpet_writel(cfg, HPET_T1_CFG);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!(hpet_rtc_flags & RTC_PIE) || hpet_pie_limit)
|
||||
delta = hpet_default_delta;
|
||||
else
|
||||
delta = hpet_pie_delta;
|
||||
|
||||
/*
|
||||
* Increment the comparator value until we are ahead of the
|
||||
* current count.
|
||||
*/
|
||||
do {
|
||||
hpet_t1_cmp += delta;
|
||||
hpet_writel(hpet_t1_cmp, HPET_T1_CMP);
|
||||
lost_ints++;
|
||||
} while ((long)(hpet_readl(HPET_COUNTER) - hpet_t1_cmp) > 0);
|
||||
|
||||
if (lost_ints) {
|
||||
if (hpet_rtc_flags & RTC_PIE)
|
||||
hpet_pie_count += lost_ints;
|
||||
if (printk_ratelimit())
|
||||
printk(KERN_WARNING "rtc: lost %d interrupts\n",
|
||||
lost_ints);
|
||||
}
|
||||
}
|
||||
|
||||
irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id)
|
||||
{
|
||||
struct rtc_time curr_time;
|
||||
unsigned long rtc_int_flag = 0;
|
||||
|
||||
hpet_rtc_timer_reinit();
|
||||
|
||||
if (hpet_rtc_flags & (RTC_UIE | RTC_AIE))
|
||||
rtc_get_rtc_time(&curr_time);
|
||||
|
||||
if (hpet_rtc_flags & RTC_UIE &&
|
||||
curr_time.tm_sec != hpet_prev_update_sec) {
|
||||
rtc_int_flag = RTC_UF;
|
||||
hpet_prev_update_sec = curr_time.tm_sec;
|
||||
}
|
||||
|
||||
if (hpet_rtc_flags & RTC_PIE &&
|
||||
++hpet_pie_count >= hpet_pie_limit) {
|
||||
rtc_int_flag |= RTC_PF;
|
||||
hpet_pie_count = 0;
|
||||
}
|
||||
|
||||
if (hpet_rtc_flags & RTC_PIE &&
|
||||
(curr_time.tm_sec == hpet_alarm_time.tm_sec) &&
|
||||
(curr_time.tm_min == hpet_alarm_time.tm_min) &&
|
||||
(curr_time.tm_hour == hpet_alarm_time.tm_hour))
|
||||
rtc_int_flag |= RTC_AF;
|
||||
|
||||
if (rtc_int_flag) {
|
||||
rtc_int_flag |= (RTC_IRQF | (RTC_NUM_INTS << 8));
|
||||
rtc_interrupt(rtc_int_flag, dev_id);
|
||||
}
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
* i8253.c 8253/PIT functions
|
||||
*
|
||||
*/
|
||||
#include <linux/clocksource.h>
|
||||
#include <linux/clockchips.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/jiffies.h>
|
||||
#include <linux/sysdev.h>
|
||||
|
@ -19,19 +19,99 @@
|
|||
DEFINE_SPINLOCK(i8253_lock);
|
||||
EXPORT_SYMBOL(i8253_lock);
|
||||
|
||||
void setup_pit_timer(void)
|
||||
/*
|
||||
* HPET replaces the PIT, when enabled. So we need to know, which of
|
||||
* the two timers is used
|
||||
*/
|
||||
struct clock_event_device *global_clock_event;
|
||||
|
||||
/*
|
||||
* Initialize the PIT timer.
|
||||
*
|
||||
* This is also called after resume to bring the PIT into operation again.
|
||||
*/
|
||||
static void init_pit_timer(enum clock_event_mode mode,
|
||||
struct clock_event_device *evt)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&i8253_lock, flags);
|
||||
outb_p(0x34,PIT_MODE); /* binary, mode 2, LSB/MSB, ch 0 */
|
||||
udelay(10);
|
||||
outb_p(LATCH & 0xff , PIT_CH0); /* LSB */
|
||||
udelay(10);
|
||||
outb(LATCH >> 8 , PIT_CH0); /* MSB */
|
||||
|
||||
switch(mode) {
|
||||
case CLOCK_EVT_MODE_PERIODIC:
|
||||
/* binary, mode 2, LSB/MSB, ch 0 */
|
||||
outb_p(0x34, PIT_MODE);
|
||||
udelay(10);
|
||||
outb_p(LATCH & 0xff , PIT_CH0); /* LSB */
|
||||
udelay(10);
|
||||
outb(LATCH >> 8 , PIT_CH0); /* MSB */
|
||||
break;
|
||||
|
||||
case CLOCK_EVT_MODE_ONESHOT:
|
||||
case CLOCK_EVT_MODE_SHUTDOWN:
|
||||
case CLOCK_EVT_MODE_UNUSED:
|
||||
/* One shot setup */
|
||||
outb_p(0x38, PIT_MODE);
|
||||
udelay(10);
|
||||
break;
|
||||
}
|
||||
spin_unlock_irqrestore(&i8253_lock, flags);
|
||||
}
|
||||
|
||||
/*
|
||||
* Program the next event in oneshot mode
|
||||
*
|
||||
* Delta is given in PIT ticks
|
||||
*/
|
||||
static int pit_next_event(unsigned long delta, struct clock_event_device *evt)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&i8253_lock, flags);
|
||||
outb_p(delta & 0xff , PIT_CH0); /* LSB */
|
||||
outb(delta >> 8 , PIT_CH0); /* MSB */
|
||||
spin_unlock_irqrestore(&i8253_lock, flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* On UP the PIT can serve all of the possible timer functions. On SMP systems
|
||||
* it can be solely used for the global tick.
|
||||
*
|
||||
* The profiling and update capabilites are switched off once the local apic is
|
||||
* registered. This mechanism replaces the previous #ifdef LOCAL_APIC -
|
||||
* !using_apic_timer decisions in do_timer_interrupt_hook()
|
||||
*/
|
||||
struct clock_event_device pit_clockevent = {
|
||||
.name = "pit",
|
||||
.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
|
||||
.set_mode = init_pit_timer,
|
||||
.set_next_event = pit_next_event,
|
||||
.shift = 32,
|
||||
.irq = 0,
|
||||
};
|
||||
|
||||
/*
|
||||
* Initialize the conversion factor and the min/max deltas of the clock event
|
||||
* structure and register the clock event source with the framework.
|
||||
*/
|
||||
void __init setup_pit_timer(void)
|
||||
{
|
||||
/*
|
||||
* Start pit with the boot cpu mask and make it global after the
|
||||
* IO_APIC has been initialized.
|
||||
*/
|
||||
pit_clockevent.cpumask = cpumask_of_cpu(0);
|
||||
pit_clockevent.mult = div_sc(CLOCK_TICK_RATE, NSEC_PER_SEC, 32);
|
||||
pit_clockevent.max_delta_ns =
|
||||
clockevent_delta2ns(0x7FFF, &pit_clockevent);
|
||||
pit_clockevent.min_delta_ns =
|
||||
clockevent_delta2ns(0xF, &pit_clockevent);
|
||||
clockevents_register_device(&pit_clockevent);
|
||||
global_clock_event = &pit_clockevent;
|
||||
}
|
||||
|
||||
/*
|
||||
* Since the PIT overflows every tick, its not very useful
|
||||
* to just read by itself. So use jiffies to emulate a free
|
||||
|
@ -46,7 +126,7 @@ static cycle_t pit_read(void)
|
|||
static u32 old_jifs;
|
||||
|
||||
spin_lock_irqsave(&i8253_lock, flags);
|
||||
/*
|
||||
/*
|
||||
* Although our caller may have the read side of xtime_lock,
|
||||
* this is now a seqlock, and we are cheating in this routine
|
||||
* by having side effects on state that we cannot undo if
|
||||
|
|
|
@ -41,6 +41,7 @@ static void mask_and_ack_8259A(unsigned int);
|
|||
static struct irq_chip i8259A_chip = {
|
||||
.name = "XT-PIC",
|
||||
.mask = disable_8259A_irq,
|
||||
.disable = disable_8259A_irq,
|
||||
.unmask = enable_8259A_irq,
|
||||
.mask_ack = mask_and_ack_8259A,
|
||||
};
|
||||
|
@ -409,12 +410,6 @@ void __init native_init_IRQ(void)
|
|||
*/
|
||||
intr_init_hook();
|
||||
|
||||
/*
|
||||
* Set the clock to HZ Hz, we already have a valid
|
||||
* vector now:
|
||||
*/
|
||||
setup_pit_timer();
|
||||
|
||||
/*
|
||||
* External FPU? Set up irq13 if so, for
|
||||
* original braindamaged IBM FERR coupling.
|
||||
|
|
|
@ -482,8 +482,8 @@ static void do_irq_balance(void)
|
|||
package_index = CPU_TO_PACKAGEINDEX(i);
|
||||
for (j = 0; j < NR_IRQS; j++) {
|
||||
unsigned long value_now, delta;
|
||||
/* Is this an active IRQ? */
|
||||
if (!irq_desc[j].action)
|
||||
/* Is this an active IRQ or balancing disabled ? */
|
||||
if (!irq_desc[j].action || irq_balancing_disabled(j))
|
||||
continue;
|
||||
if ( package_index == i )
|
||||
IRQ_DELTA(package_index,j) = 0;
|
||||
|
@ -1281,11 +1281,9 @@ static void ioapic_register_intr(int irq, int vector, unsigned long trigger)
|
|||
trigger == IOAPIC_LEVEL)
|
||||
set_irq_chip_and_handler_name(irq, &ioapic_chip,
|
||||
handle_fasteoi_irq, "fasteoi");
|
||||
else {
|
||||
irq_desc[irq].status |= IRQ_DELAYED_DISABLE;
|
||||
else
|
||||
set_irq_chip_and_handler_name(irq, &ioapic_chip,
|
||||
handle_edge_irq, "edge");
|
||||
}
|
||||
set_intr_gate(vector, interrupt[irq]);
|
||||
}
|
||||
|
||||
|
@ -1588,7 +1586,7 @@ void /*__init*/ print_local_APIC(void * dummy)
|
|||
v = apic_read(APIC_LVR);
|
||||
printk(KERN_INFO "... APIC VERSION: %08x\n", v);
|
||||
ver = GET_APIC_VERSION(v);
|
||||
maxlvt = get_maxlvt();
|
||||
maxlvt = lapic_get_maxlvt();
|
||||
|
||||
v = apic_read(APIC_TASKPRI);
|
||||
printk(KERN_DEBUG "... APIC TASKPRI: %08x (%02x)\n", v, v & APIC_TPRI_MASK);
|
||||
|
@ -1920,7 +1918,7 @@ static void __init setup_ioapic_ids_from_mpc(void)
|
|||
static void __init setup_ioapic_ids_from_mpc(void) { }
|
||||
#endif
|
||||
|
||||
static int no_timer_check __initdata;
|
||||
int no_timer_check __initdata;
|
||||
|
||||
static int __init notimercheck(char *s)
|
||||
{
|
||||
|
@ -2310,7 +2308,7 @@ static inline void __init check_timer(void)
|
|||
|
||||
disable_8259A_irq(0);
|
||||
set_irq_chip_and_handler_name(0, &lapic_chip, handle_fasteoi_irq,
|
||||
"fasteio");
|
||||
"fasteoi");
|
||||
apic_write_around(APIC_LVT0, APIC_DM_FIXED | vector); /* Fixed mode */
|
||||
enable_8259A_irq(0);
|
||||
|
||||
|
|
|
@ -10,7 +10,6 @@
|
|||
* io_apic.c.)
|
||||
*/
|
||||
|
||||
#include <asm/uaccess.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/interrupt.h>
|
||||
|
@ -19,19 +18,36 @@
|
|||
#include <linux/cpu.h>
|
||||
#include <linux/delay.h>
|
||||
|
||||
#include <asm/idle.h>
|
||||
|
||||
#include <asm/apic.h>
|
||||
#include <asm/uaccess.h>
|
||||
|
||||
DEFINE_PER_CPU(irq_cpustat_t, irq_stat) ____cacheline_internodealigned_in_smp;
|
||||
EXPORT_PER_CPU_SYMBOL(irq_stat);
|
||||
|
||||
#ifndef CONFIG_X86_LOCAL_APIC
|
||||
/*
|
||||
* 'what should we do if we get a hw irq event on an illegal vector'.
|
||||
* each architecture has to answer this themselves.
|
||||
*/
|
||||
void ack_bad_irq(unsigned int irq)
|
||||
{
|
||||
printk("unexpected IRQ trap at vector %02x\n", irq);
|
||||
}
|
||||
printk(KERN_ERR "unexpected IRQ trap at vector %02x\n", irq);
|
||||
|
||||
#ifdef CONFIG_X86_LOCAL_APIC
|
||||
/*
|
||||
* Currently unexpected vectors happen only on SMP and APIC.
|
||||
* We _must_ ack these because every local APIC has only N
|
||||
* irq slots per priority level, and a 'hanging, unacked' IRQ
|
||||
* holds up an irq slot - in excessive cases (when multiple
|
||||
* unexpected vectors occur) that might lock up the APIC
|
||||
* completely.
|
||||
* But only ack when the APIC is enabled -AK
|
||||
*/
|
||||
if (cpu_has_apic)
|
||||
ack_APIC_irq();
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef CONFIG_4KSTACKS
|
||||
/*
|
||||
|
@ -61,6 +77,7 @@ fastcall unsigned int do_IRQ(struct pt_regs *regs)
|
|||
union irq_ctx *curctx, *irqctx;
|
||||
u32 *isp;
|
||||
#endif
|
||||
exit_idle();
|
||||
|
||||
if (unlikely((unsigned)irq >= NR_IRQS)) {
|
||||
printk(KERN_EMERG "%s: cannot handle IRQ %d\n",
|
||||
|
|
|
@ -363,7 +363,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
|
|||
" pushf\n"
|
||||
/* skip cs, eip, orig_eax */
|
||||
" subl $12, %esp\n"
|
||||
" pushl %gs\n"
|
||||
" pushl %fs\n"
|
||||
" pushl %ds\n"
|
||||
" pushl %es\n"
|
||||
" pushl %eax\n"
|
||||
|
@ -387,7 +387,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
|
|||
" popl %edi\n"
|
||||
" popl %ebp\n"
|
||||
" popl %eax\n"
|
||||
/* skip eip, orig_eax, es, ds, gs */
|
||||
/* skip eip, orig_eax, es, ds, fs */
|
||||
" addl $20, %esp\n"
|
||||
" popf\n"
|
||||
" ret\n");
|
||||
|
@ -408,7 +408,7 @@ fastcall void *__kprobes trampoline_handler(struct pt_regs *regs)
|
|||
spin_lock_irqsave(&kretprobe_lock, flags);
|
||||
head = kretprobe_inst_table_head(current);
|
||||
/* fixup registers */
|
||||
regs->xcs = __KERNEL_CS;
|
||||
regs->xcs = __KERNEL_CS | get_kernel_rpl();
|
||||
regs->eip = trampoline_address;
|
||||
regs->orig_eax = 0xffffffff;
|
||||
|
||||
|
|
|
@ -384,7 +384,7 @@ static int do_microcode_update (void)
|
|||
{
|
||||
long cursor = 0;
|
||||
int error = 0;
|
||||
void *new_mc;
|
||||
void *new_mc = NULL;
|
||||
int cpu;
|
||||
cpumask_t old;
|
||||
|
||||
|
|
|
@ -68,7 +68,6 @@ static inline int rdmsr_eio(u32 reg, u32 *eax, u32 *edx)
|
|||
#ifdef CONFIG_SMP
|
||||
|
||||
struct msr_command {
|
||||
int cpu;
|
||||
int err;
|
||||
u32 reg;
|
||||
u32 data[2];
|
||||
|
@ -78,16 +77,14 @@ static void msr_smp_wrmsr(void *cmd_block)
|
|||
{
|
||||
struct msr_command *cmd = (struct msr_command *)cmd_block;
|
||||
|
||||
if (cmd->cpu == smp_processor_id())
|
||||
cmd->err = wrmsr_eio(cmd->reg, cmd->data[0], cmd->data[1]);
|
||||
cmd->err = wrmsr_eio(cmd->reg, cmd->data[0], cmd->data[1]);
|
||||
}
|
||||
|
||||
static void msr_smp_rdmsr(void *cmd_block)
|
||||
{
|
||||
struct msr_command *cmd = (struct msr_command *)cmd_block;
|
||||
|
||||
if (cmd->cpu == smp_processor_id())
|
||||
cmd->err = rdmsr_eio(cmd->reg, &cmd->data[0], &cmd->data[1]);
|
||||
cmd->err = rdmsr_eio(cmd->reg, &cmd->data[0], &cmd->data[1]);
|
||||
}
|
||||
|
||||
static inline int do_wrmsr(int cpu, u32 reg, u32 eax, u32 edx)
|
||||
|
@ -99,12 +96,11 @@ static inline int do_wrmsr(int cpu, u32 reg, u32 eax, u32 edx)
|
|||
if (cpu == smp_processor_id()) {
|
||||
ret = wrmsr_eio(reg, eax, edx);
|
||||
} else {
|
||||
cmd.cpu = cpu;
|
||||
cmd.reg = reg;
|
||||
cmd.data[0] = eax;
|
||||
cmd.data[1] = edx;
|
||||
|
||||
smp_call_function(msr_smp_wrmsr, &cmd, 1, 1);
|
||||
smp_call_function_single(cpu, msr_smp_wrmsr, &cmd, 1, 1);
|
||||
ret = cmd.err;
|
||||
}
|
||||
preempt_enable();
|
||||
|
@ -120,10 +116,9 @@ static inline int do_rdmsr(int cpu, u32 reg, u32 * eax, u32 * edx)
|
|||
if (cpu == smp_processor_id()) {
|
||||
ret = rdmsr_eio(reg, eax, edx);
|
||||
} else {
|
||||
cmd.cpu = cpu;
|
||||
cmd.reg = reg;
|
||||
|
||||
smp_call_function(msr_smp_rdmsr, &cmd, 1, 1);
|
||||
smp_call_function_single(cpu, msr_smp_rdmsr, &cmd, 1, 1);
|
||||
|
||||
*eax = cmd.data[0];
|
||||
*edx = cmd.data[1];
|
||||
|
|
|
@ -23,6 +23,7 @@
|
|||
#include <linux/dmi.h>
|
||||
#include <linux/kprobes.h>
|
||||
#include <linux/cpumask.h>
|
||||
#include <linux/kernel_stat.h>
|
||||
|
||||
#include <asm/smp.h>
|
||||
#include <asm/nmi.h>
|
||||
|
@ -185,7 +186,8 @@ static __cpuinit inline int nmi_known_cpu(void)
|
|||
{
|
||||
switch (boot_cpu_data.x86_vendor) {
|
||||
case X86_VENDOR_AMD:
|
||||
return ((boot_cpu_data.x86 == 15) || (boot_cpu_data.x86 == 6));
|
||||
return ((boot_cpu_data.x86 == 15) || (boot_cpu_data.x86 == 6)
|
||||
|| (boot_cpu_data.x86 == 16));
|
||||
case X86_VENDOR_INTEL:
|
||||
if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
|
||||
return 1;
|
||||
|
@ -216,6 +218,28 @@ static __init void nmi_cpu_busy(void *data)
|
|||
}
|
||||
#endif
|
||||
|
||||
static unsigned int adjust_for_32bit_ctr(unsigned int hz)
|
||||
{
|
||||
u64 counter_val;
|
||||
unsigned int retval = hz;
|
||||
|
||||
/*
|
||||
* On Intel CPUs with P6/ARCH_PERFMON only 32 bits in the counter
|
||||
* are writable, with higher bits sign extending from bit 31.
|
||||
* So, we can only program the counter with 31 bit values and
|
||||
* 32nd bit should be 1, for 33.. to be 1.
|
||||
* Find the appropriate nmi_hz
|
||||
*/
|
||||
counter_val = (u64)cpu_khz * 1000;
|
||||
do_div(counter_val, retval);
|
||||
if (counter_val > 0x7fffffffULL) {
|
||||
u64 count = (u64)cpu_khz * 1000;
|
||||
do_div(count, 0x7fffffffUL);
|
||||
retval = count + 1;
|
||||
}
|
||||
return retval;
|
||||
}
|
||||
|
||||
static int __init check_nmi_watchdog(void)
|
||||
{
|
||||
unsigned int *prev_nmi_count;
|
||||
|
@ -281,18 +305,10 @@ static int __init check_nmi_watchdog(void)
|
|||
struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
|
||||
|
||||
nmi_hz = 1;
|
||||
/*
|
||||
* On Intel CPUs with ARCH_PERFMON only 32 bits in the counter
|
||||
* are writable, with higher bits sign extending from bit 31.
|
||||
* So, we can only program the counter with 31 bit values and
|
||||
* 32nd bit should be 1, for 33.. to be 1.
|
||||
* Find the appropriate nmi_hz
|
||||
*/
|
||||
if (wd->perfctr_msr == MSR_ARCH_PERFMON_PERFCTR0 &&
|
||||
((u64)cpu_khz * 1000) > 0x7fffffffULL) {
|
||||
u64 count = (u64)cpu_khz * 1000;
|
||||
do_div(count, 0x7fffffffUL);
|
||||
nmi_hz = count + 1;
|
||||
|
||||
if (wd->perfctr_msr == MSR_P6_PERFCTR0 ||
|
||||
wd->perfctr_msr == MSR_ARCH_PERFMON_PERFCTR0) {
|
||||
nmi_hz = adjust_for_32bit_ctr(nmi_hz);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -369,6 +385,34 @@ void enable_timer_nmi_watchdog(void)
|
|||
}
|
||||
}
|
||||
|
||||
static void __acpi_nmi_disable(void *__unused)
|
||||
{
|
||||
apic_write_around(APIC_LVT0, APIC_DM_NMI | APIC_LVT_MASKED);
|
||||
}
|
||||
|
||||
/*
|
||||
* Disable timer based NMIs on all CPUs:
|
||||
*/
|
||||
void acpi_nmi_disable(void)
|
||||
{
|
||||
if (atomic_read(&nmi_active) && nmi_watchdog == NMI_IO_APIC)
|
||||
on_each_cpu(__acpi_nmi_disable, NULL, 0, 1);
|
||||
}
|
||||
|
||||
static void __acpi_nmi_enable(void *__unused)
|
||||
{
|
||||
apic_write_around(APIC_LVT0, APIC_DM_NMI);
|
||||
}
|
||||
|
||||
/*
|
||||
* Enable timer based NMIs on all CPUs:
|
||||
*/
|
||||
void acpi_nmi_enable(void)
|
||||
{
|
||||
if (atomic_read(&nmi_active) && nmi_watchdog == NMI_IO_APIC)
|
||||
on_each_cpu(__acpi_nmi_enable, NULL, 0, 1);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
|
||||
static int nmi_pm_active; /* nmi_active before suspend */
|
||||
|
@ -442,6 +486,17 @@ static void write_watchdog_counter(unsigned int perfctr_msr, const char *descr)
|
|||
wrmsrl(perfctr_msr, 0 - count);
|
||||
}
|
||||
|
||||
static void write_watchdog_counter32(unsigned int perfctr_msr,
|
||||
const char *descr)
|
||||
{
|
||||
u64 count = (u64)cpu_khz * 1000;
|
||||
|
||||
do_div(count, nmi_hz);
|
||||
if(descr)
|
||||
Dprintk("setting %s to -0x%08Lx\n", descr, count);
|
||||
wrmsr(perfctr_msr, (u32)(-count), 0);
|
||||
}
|
||||
|
||||
/* Note that these events don't tick when the CPU idles. This means
|
||||
the frequency varies with CPU load. */
|
||||
|
||||
|
@ -531,7 +586,8 @@ static int setup_p6_watchdog(void)
|
|||
|
||||
/* setup the timer */
|
||||
wrmsr(evntsel_msr, evntsel, 0);
|
||||
write_watchdog_counter(perfctr_msr, "P6_PERFCTR0");
|
||||
nmi_hz = adjust_for_32bit_ctr(nmi_hz);
|
||||
write_watchdog_counter32(perfctr_msr, "P6_PERFCTR0");
|
||||
apic_write(APIC_LVTPC, APIC_DM_NMI);
|
||||
evntsel |= P6_EVNTSEL0_ENABLE;
|
||||
wrmsr(evntsel_msr, evntsel, 0);
|
||||
|
@ -704,7 +760,8 @@ static int setup_intel_arch_watchdog(void)
|
|||
|
||||
/* setup the timer */
|
||||
wrmsr(evntsel_msr, evntsel, 0);
|
||||
write_watchdog_counter(perfctr_msr, "INTEL_ARCH_PERFCTR0");
|
||||
nmi_hz = adjust_for_32bit_ctr(nmi_hz);
|
||||
write_watchdog_counter32(perfctr_msr, "INTEL_ARCH_PERFCTR0");
|
||||
apic_write(APIC_LVTPC, APIC_DM_NMI);
|
||||
evntsel |= ARCH_PERFMON_EVENTSEL0_ENABLE;
|
||||
wrmsr(evntsel_msr, evntsel, 0);
|
||||
|
@ -762,7 +819,8 @@ void setup_apic_nmi_watchdog (void *unused)
|
|||
if (nmi_watchdog == NMI_LOCAL_APIC) {
|
||||
switch (boot_cpu_data.x86_vendor) {
|
||||
case X86_VENDOR_AMD:
|
||||
if (boot_cpu_data.x86 != 6 && boot_cpu_data.x86 != 15)
|
||||
if (boot_cpu_data.x86 != 6 && boot_cpu_data.x86 != 15 &&
|
||||
boot_cpu_data.x86 != 16)
|
||||
return;
|
||||
if (!setup_k7_watchdog())
|
||||
return;
|
||||
|
@ -916,9 +974,13 @@ __kprobes int nmi_watchdog_tick(struct pt_regs * regs, unsigned reason)
|
|||
cpu_clear(cpu, backtrace_mask);
|
||||
}
|
||||
|
||||
sum = per_cpu(irq_stat, cpu).apic_timer_irqs;
|
||||
/*
|
||||
* Take the local apic timer and PIT/HPET into account. We don't
|
||||
* know which one is active, when we have highres/dyntick on
|
||||
*/
|
||||
sum = per_cpu(irq_stat, cpu).apic_timer_irqs + kstat_irqs(0);
|
||||
|
||||
/* if the apic timer isn't firing, this cpu isn't doing much */
|
||||
/* if the none of the timers isn't firing, this cpu isn't doing much */
|
||||
if (!touched && last_irq_sums[cpu] == sum) {
|
||||
/*
|
||||
* Ayiee, looks like this CPU is stuck ...
|
||||
|
@ -956,6 +1018,8 @@ __kprobes int nmi_watchdog_tick(struct pt_regs * regs, unsigned reason)
|
|||
dummy &= ~P4_CCCR_OVF;
|
||||
wrmsrl(wd->cccr_msr, dummy);
|
||||
apic_write(APIC_LVTPC, APIC_DM_NMI);
|
||||
/* start the cycle over again */
|
||||
write_watchdog_counter(wd->perfctr_msr, NULL);
|
||||
}
|
||||
else if (wd->perfctr_msr == MSR_P6_PERFCTR0 ||
|
||||
wd->perfctr_msr == MSR_ARCH_PERFMON_PERFCTR0) {
|
||||
|
@ -964,9 +1028,12 @@ __kprobes int nmi_watchdog_tick(struct pt_regs * regs, unsigned reason)
|
|||
* other P6 variant.
|
||||
* ArchPerfom/Core Duo also needs this */
|
||||
apic_write(APIC_LVTPC, APIC_DM_NMI);
|
||||
/* P6/ARCH_PERFMON has 32 bit counter write */
|
||||
write_watchdog_counter32(wd->perfctr_msr, NULL);
|
||||
} else {
|
||||
/* start the cycle over again */
|
||||
write_watchdog_counter(wd->perfctr_msr, NULL);
|
||||
}
|
||||
/* start the cycle over again */
|
||||
write_watchdog_counter(wd->perfctr_msr, NULL);
|
||||
rc = 1;
|
||||
} else if (nmi_watchdog == NMI_IO_APIC) {
|
||||
/* don't know how to accurately check for this.
|
||||
|
|
|
@ -92,7 +92,7 @@ static unsigned native_patch(u8 type, u16 clobbers, void *insns, unsigned len)
|
|||
return insn_len;
|
||||
}
|
||||
|
||||
static fastcall unsigned long native_get_debugreg(int regno)
|
||||
static unsigned long native_get_debugreg(int regno)
|
||||
{
|
||||
unsigned long val = 0; /* Damn you, gcc! */
|
||||
|
||||
|
@ -115,7 +115,7 @@ static fastcall unsigned long native_get_debugreg(int regno)
|
|||
return val;
|
||||
}
|
||||
|
||||
static fastcall void native_set_debugreg(int regno, unsigned long value)
|
||||
static void native_set_debugreg(int regno, unsigned long value)
|
||||
{
|
||||
switch (regno) {
|
||||
case 0:
|
||||
|
@ -146,55 +146,55 @@ void init_IRQ(void)
|
|||
paravirt_ops.init_IRQ();
|
||||
}
|
||||
|
||||
static fastcall void native_clts(void)
|
||||
static void native_clts(void)
|
||||
{
|
||||
asm volatile ("clts");
|
||||
}
|
||||
|
||||
static fastcall unsigned long native_read_cr0(void)
|
||||
static unsigned long native_read_cr0(void)
|
||||
{
|
||||
unsigned long val;
|
||||
asm volatile("movl %%cr0,%0\n\t" :"=r" (val));
|
||||
return val;
|
||||
}
|
||||
|
||||
static fastcall void native_write_cr0(unsigned long val)
|
||||
static void native_write_cr0(unsigned long val)
|
||||
{
|
||||
asm volatile("movl %0,%%cr0": :"r" (val));
|
||||
}
|
||||
|
||||
static fastcall unsigned long native_read_cr2(void)
|
||||
static unsigned long native_read_cr2(void)
|
||||
{
|
||||
unsigned long val;
|
||||
asm volatile("movl %%cr2,%0\n\t" :"=r" (val));
|
||||
return val;
|
||||
}
|
||||
|
||||
static fastcall void native_write_cr2(unsigned long val)
|
||||
static void native_write_cr2(unsigned long val)
|
||||
{
|
||||
asm volatile("movl %0,%%cr2": :"r" (val));
|
||||
}
|
||||
|
||||
static fastcall unsigned long native_read_cr3(void)
|
||||
static unsigned long native_read_cr3(void)
|
||||
{
|
||||
unsigned long val;
|
||||
asm volatile("movl %%cr3,%0\n\t" :"=r" (val));
|
||||
return val;
|
||||
}
|
||||
|
||||
static fastcall void native_write_cr3(unsigned long val)
|
||||
static void native_write_cr3(unsigned long val)
|
||||
{
|
||||
asm volatile("movl %0,%%cr3": :"r" (val));
|
||||
}
|
||||
|
||||
static fastcall unsigned long native_read_cr4(void)
|
||||
static unsigned long native_read_cr4(void)
|
||||
{
|
||||
unsigned long val;
|
||||
asm volatile("movl %%cr4,%0\n\t" :"=r" (val));
|
||||
return val;
|
||||
}
|
||||
|
||||
static fastcall unsigned long native_read_cr4_safe(void)
|
||||
static unsigned long native_read_cr4_safe(void)
|
||||
{
|
||||
unsigned long val;
|
||||
/* This could fault if %cr4 does not exist */
|
||||
|
@ -207,51 +207,51 @@ static fastcall unsigned long native_read_cr4_safe(void)
|
|||
return val;
|
||||
}
|
||||
|
||||
static fastcall void native_write_cr4(unsigned long val)
|
||||
static void native_write_cr4(unsigned long val)
|
||||
{
|
||||
asm volatile("movl %0,%%cr4": :"r" (val));
|
||||
}
|
||||
|
||||
static fastcall unsigned long native_save_fl(void)
|
||||
static unsigned long native_save_fl(void)
|
||||
{
|
||||
unsigned long f;
|
||||
asm volatile("pushfl ; popl %0":"=g" (f): /* no input */);
|
||||
return f;
|
||||
}
|
||||
|
||||
static fastcall void native_restore_fl(unsigned long f)
|
||||
static void native_restore_fl(unsigned long f)
|
||||
{
|
||||
asm volatile("pushl %0 ; popfl": /* no output */
|
||||
:"g" (f)
|
||||
:"memory", "cc");
|
||||
}
|
||||
|
||||
static fastcall void native_irq_disable(void)
|
||||
static void native_irq_disable(void)
|
||||
{
|
||||
asm volatile("cli": : :"memory");
|
||||
}
|
||||
|
||||
static fastcall void native_irq_enable(void)
|
||||
static void native_irq_enable(void)
|
||||
{
|
||||
asm volatile("sti": : :"memory");
|
||||
}
|
||||
|
||||
static fastcall void native_safe_halt(void)
|
||||
static void native_safe_halt(void)
|
||||
{
|
||||
asm volatile("sti; hlt": : :"memory");
|
||||
}
|
||||
|
||||
static fastcall void native_halt(void)
|
||||
static void native_halt(void)
|
||||
{
|
||||
asm volatile("hlt": : :"memory");
|
||||
}
|
||||
|
||||
static fastcall void native_wbinvd(void)
|
||||
static void native_wbinvd(void)
|
||||
{
|
||||
asm volatile("wbinvd": : :"memory");
|
||||
}
|
||||
|
||||
static fastcall unsigned long long native_read_msr(unsigned int msr, int *err)
|
||||
static unsigned long long native_read_msr(unsigned int msr, int *err)
|
||||
{
|
||||
unsigned long long val;
|
||||
|
||||
|
@ -270,7 +270,7 @@ static fastcall unsigned long long native_read_msr(unsigned int msr, int *err)
|
|||
return val;
|
||||
}
|
||||
|
||||
static fastcall int native_write_msr(unsigned int msr, unsigned long long val)
|
||||
static int native_write_msr(unsigned int msr, unsigned long long val)
|
||||
{
|
||||
int err;
|
||||
asm volatile("2: wrmsr ; xorl %0,%0\n"
|
||||
|
@ -288,53 +288,53 @@ static fastcall int native_write_msr(unsigned int msr, unsigned long long val)
|
|||
return err;
|
||||
}
|
||||
|
||||
static fastcall unsigned long long native_read_tsc(void)
|
||||
static unsigned long long native_read_tsc(void)
|
||||
{
|
||||
unsigned long long val;
|
||||
asm volatile("rdtsc" : "=A" (val));
|
||||
return val;
|
||||
}
|
||||
|
||||
static fastcall unsigned long long native_read_pmc(void)
|
||||
static unsigned long long native_read_pmc(void)
|
||||
{
|
||||
unsigned long long val;
|
||||
asm volatile("rdpmc" : "=A" (val));
|
||||
return val;
|
||||
}
|
||||
|
||||
static fastcall void native_load_tr_desc(void)
|
||||
static void native_load_tr_desc(void)
|
||||
{
|
||||
asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
|
||||
}
|
||||
|
||||
static fastcall void native_load_gdt(const struct Xgt_desc_struct *dtr)
|
||||
static void native_load_gdt(const struct Xgt_desc_struct *dtr)
|
||||
{
|
||||
asm volatile("lgdt %0"::"m" (*dtr));
|
||||
}
|
||||
|
||||
static fastcall void native_load_idt(const struct Xgt_desc_struct *dtr)
|
||||
static void native_load_idt(const struct Xgt_desc_struct *dtr)
|
||||
{
|
||||
asm volatile("lidt %0"::"m" (*dtr));
|
||||
}
|
||||
|
||||
static fastcall void native_store_gdt(struct Xgt_desc_struct *dtr)
|
||||
static void native_store_gdt(struct Xgt_desc_struct *dtr)
|
||||
{
|
||||
asm ("sgdt %0":"=m" (*dtr));
|
||||
}
|
||||
|
||||
static fastcall void native_store_idt(struct Xgt_desc_struct *dtr)
|
||||
static void native_store_idt(struct Xgt_desc_struct *dtr)
|
||||
{
|
||||
asm ("sidt %0":"=m" (*dtr));
|
||||
}
|
||||
|
||||
static fastcall unsigned long native_store_tr(void)
|
||||
static unsigned long native_store_tr(void)
|
||||
{
|
||||
unsigned long tr;
|
||||
asm ("str %0":"=r" (tr));
|
||||
return tr;
|
||||
}
|
||||
|
||||
static fastcall void native_load_tls(struct thread_struct *t, unsigned int cpu)
|
||||
static void native_load_tls(struct thread_struct *t, unsigned int cpu)
|
||||
{
|
||||
#define C(i) get_cpu_gdt_table(cpu)[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i]
|
||||
C(0); C(1); C(2);
|
||||
|
@ -348,22 +348,22 @@ static inline void native_write_dt_entry(void *dt, int entry, u32 entry_low, u32
|
|||
lp[1] = entry_high;
|
||||
}
|
||||
|
||||
static fastcall void native_write_ldt_entry(void *dt, int entrynum, u32 low, u32 high)
|
||||
static void native_write_ldt_entry(void *dt, int entrynum, u32 low, u32 high)
|
||||
{
|
||||
native_write_dt_entry(dt, entrynum, low, high);
|
||||
}
|
||||
|
||||
static fastcall void native_write_gdt_entry(void *dt, int entrynum, u32 low, u32 high)
|
||||
static void native_write_gdt_entry(void *dt, int entrynum, u32 low, u32 high)
|
||||
{
|
||||
native_write_dt_entry(dt, entrynum, low, high);
|
||||
}
|
||||
|
||||
static fastcall void native_write_idt_entry(void *dt, int entrynum, u32 low, u32 high)
|
||||
static void native_write_idt_entry(void *dt, int entrynum, u32 low, u32 high)
|
||||
{
|
||||
native_write_dt_entry(dt, entrynum, low, high);
|
||||
}
|
||||
|
||||
static fastcall void native_load_esp0(struct tss_struct *tss,
|
||||
static void native_load_esp0(struct tss_struct *tss,
|
||||
struct thread_struct *thread)
|
||||
{
|
||||
tss->esp0 = thread->esp0;
|
||||
|
@ -375,12 +375,12 @@ static fastcall void native_load_esp0(struct tss_struct *tss,
|
|||
}
|
||||
}
|
||||
|
||||
static fastcall void native_io_delay(void)
|
||||
static void native_io_delay(void)
|
||||
{
|
||||
asm volatile("outb %al,$0x80");
|
||||
}
|
||||
|
||||
static fastcall void native_flush_tlb(void)
|
||||
static void native_flush_tlb(void)
|
||||
{
|
||||
__native_flush_tlb();
|
||||
}
|
||||
|
@ -389,49 +389,49 @@ static fastcall void native_flush_tlb(void)
|
|||
* Global pages have to be flushed a bit differently. Not a real
|
||||
* performance problem because this does not happen often.
|
||||
*/
|
||||
static fastcall void native_flush_tlb_global(void)
|
||||
static void native_flush_tlb_global(void)
|
||||
{
|
||||
__native_flush_tlb_global();
|
||||
}
|
||||
|
||||
static fastcall void native_flush_tlb_single(u32 addr)
|
||||
static void native_flush_tlb_single(u32 addr)
|
||||
{
|
||||
__native_flush_tlb_single(addr);
|
||||
}
|
||||
|
||||
#ifndef CONFIG_X86_PAE
|
||||
static fastcall void native_set_pte(pte_t *ptep, pte_t pteval)
|
||||
static void native_set_pte(pte_t *ptep, pte_t pteval)
|
||||
{
|
||||
*ptep = pteval;
|
||||
}
|
||||
|
||||
static fastcall void native_set_pte_at(struct mm_struct *mm, u32 addr, pte_t *ptep, pte_t pteval)
|
||||
static void native_set_pte_at(struct mm_struct *mm, u32 addr, pte_t *ptep, pte_t pteval)
|
||||
{
|
||||
*ptep = pteval;
|
||||
}
|
||||
|
||||
static fastcall void native_set_pmd(pmd_t *pmdp, pmd_t pmdval)
|
||||
static void native_set_pmd(pmd_t *pmdp, pmd_t pmdval)
|
||||
{
|
||||
*pmdp = pmdval;
|
||||
}
|
||||
|
||||
#else /* CONFIG_X86_PAE */
|
||||
|
||||
static fastcall void native_set_pte(pte_t *ptep, pte_t pte)
|
||||
static void native_set_pte(pte_t *ptep, pte_t pte)
|
||||
{
|
||||
ptep->pte_high = pte.pte_high;
|
||||
smp_wmb();
|
||||
ptep->pte_low = pte.pte_low;
|
||||
}
|
||||
|
||||
static fastcall void native_set_pte_at(struct mm_struct *mm, u32 addr, pte_t *ptep, pte_t pte)
|
||||
static void native_set_pte_at(struct mm_struct *mm, u32 addr, pte_t *ptep, pte_t pte)
|
||||
{
|
||||
ptep->pte_high = pte.pte_high;
|
||||
smp_wmb();
|
||||
ptep->pte_low = pte.pte_low;
|
||||
}
|
||||
|
||||
static fastcall void native_set_pte_present(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte)
|
||||
static void native_set_pte_present(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte)
|
||||
{
|
||||
ptep->pte_low = 0;
|
||||
smp_wmb();
|
||||
|
@ -440,29 +440,29 @@ static fastcall void native_set_pte_present(struct mm_struct *mm, unsigned long
|
|||
ptep->pte_low = pte.pte_low;
|
||||
}
|
||||
|
||||
static fastcall void native_set_pte_atomic(pte_t *ptep, pte_t pteval)
|
||||
static void native_set_pte_atomic(pte_t *ptep, pte_t pteval)
|
||||
{
|
||||
set_64bit((unsigned long long *)ptep,pte_val(pteval));
|
||||
}
|
||||
|
||||
static fastcall void native_set_pmd(pmd_t *pmdp, pmd_t pmdval)
|
||||
static void native_set_pmd(pmd_t *pmdp, pmd_t pmdval)
|
||||
{
|
||||
set_64bit((unsigned long long *)pmdp,pmd_val(pmdval));
|
||||
}
|
||||
|
||||
static fastcall void native_set_pud(pud_t *pudp, pud_t pudval)
|
||||
static void native_set_pud(pud_t *pudp, pud_t pudval)
|
||||
{
|
||||
*pudp = pudval;
|
||||
}
|
||||
|
||||
static fastcall void native_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
|
||||
static void native_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
|
||||
{
|
||||
ptep->pte_low = 0;
|
||||
smp_wmb();
|
||||
ptep->pte_high = 0;
|
||||
}
|
||||
|
||||
static fastcall void native_pmd_clear(pmd_t *pmd)
|
||||
static void native_pmd_clear(pmd_t *pmd)
|
||||
{
|
||||
u32 *tmp = (u32 *)pmd;
|
||||
*tmp = 0;
|
||||
|
@ -472,8 +472,8 @@ static fastcall void native_pmd_clear(pmd_t *pmd)
|
|||
#endif /* CONFIG_X86_PAE */
|
||||
|
||||
/* These are in entry.S */
|
||||
extern fastcall void native_iret(void);
|
||||
extern fastcall void native_irq_enable_sysexit(void);
|
||||
extern void native_iret(void);
|
||||
extern void native_irq_enable_sysexit(void);
|
||||
|
||||
static int __init print_banner(void)
|
||||
{
|
||||
|
@ -482,9 +482,6 @@ static int __init print_banner(void)
|
|||
}
|
||||
core_initcall(print_banner);
|
||||
|
||||
/* We simply declare start_kernel to be the paravirt probe of last resort. */
|
||||
paravirt_probe(start_kernel);
|
||||
|
||||
struct paravirt_ops paravirt_ops = {
|
||||
.name = "bare hardware",
|
||||
.paravirt_enabled = 0,
|
||||
|
@ -544,12 +541,21 @@ struct paravirt_ops paravirt_ops = {
|
|||
.apic_write = native_apic_write,
|
||||
.apic_write_atomic = native_apic_write_atomic,
|
||||
.apic_read = native_apic_read,
|
||||
.setup_boot_clock = setup_boot_APIC_clock,
|
||||
.setup_secondary_clock = setup_secondary_APIC_clock,
|
||||
#endif
|
||||
.set_lazy_mode = (void *)native_nop,
|
||||
|
||||
.flush_tlb_user = native_flush_tlb,
|
||||
.flush_tlb_kernel = native_flush_tlb_global,
|
||||
.flush_tlb_single = native_flush_tlb_single,
|
||||
|
||||
.alloc_pt = (void *)native_nop,
|
||||
.alloc_pd = (void *)native_nop,
|
||||
.alloc_pd_clone = (void *)native_nop,
|
||||
.release_pt = (void *)native_nop,
|
||||
.release_pd = (void *)native_nop,
|
||||
|
||||
.set_pte = native_set_pte,
|
||||
.set_pte_at = native_set_pte_at,
|
||||
.set_pmd = native_set_pmd,
|
||||
|
@ -565,6 +571,8 @@ struct paravirt_ops paravirt_ops = {
|
|||
|
||||
.irq_enable_sysexit = native_irq_enable_sysexit,
|
||||
.iret = native_iret,
|
||||
|
||||
.startup_ipi_hook = (void *)native_nop,
|
||||
};
|
||||
|
||||
/*
|
||||
|
|
|
@ -0,0 +1,20 @@
|
|||
#include <linux/platform_device.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/init.h>
|
||||
|
||||
static __init int add_pcspkr(void)
|
||||
{
|
||||
struct platform_device *pd;
|
||||
int ret;
|
||||
|
||||
pd = platform_device_alloc("pcspkr", -1);
|
||||
if (!pd)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = platform_device_add(pd);
|
||||
if (ret)
|
||||
platform_device_put(pd);
|
||||
|
||||
return ret;
|
||||
}
|
||||
device_initcall(add_pcspkr);
|
|
@ -38,6 +38,7 @@
|
|||
#include <linux/ptrace.h>
|
||||
#include <linux/random.h>
|
||||
#include <linux/personality.h>
|
||||
#include <linux/tick.h>
|
||||
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/pgtable.h>
|
||||
|
@ -48,6 +49,7 @@
|
|||
#include <asm/i387.h>
|
||||
#include <asm/desc.h>
|
||||
#include <asm/vm86.h>
|
||||
#include <asm/idle.h>
|
||||
#ifdef CONFIG_MATH_EMULATION
|
||||
#include <asm/math_emu.h>
|
||||
#endif
|
||||
|
@ -80,6 +82,42 @@ void (*pm_idle)(void);
|
|||
EXPORT_SYMBOL(pm_idle);
|
||||
static DEFINE_PER_CPU(unsigned int, cpu_idle_state);
|
||||
|
||||
static ATOMIC_NOTIFIER_HEAD(idle_notifier);
|
||||
|
||||
void idle_notifier_register(struct notifier_block *n)
|
||||
{
|
||||
atomic_notifier_chain_register(&idle_notifier, n);
|
||||
}
|
||||
|
||||
void idle_notifier_unregister(struct notifier_block *n)
|
||||
{
|
||||
atomic_notifier_chain_unregister(&idle_notifier, n);
|
||||
}
|
||||
|
||||
static DEFINE_PER_CPU(volatile unsigned long, idle_state);
|
||||
|
||||
void enter_idle(void)
|
||||
{
|
||||
/* needs to be atomic w.r.t. interrupts, not against other CPUs */
|
||||
__set_bit(0, &__get_cpu_var(idle_state));
|
||||
atomic_notifier_call_chain(&idle_notifier, IDLE_START, NULL);
|
||||
}
|
||||
|
||||
static void __exit_idle(void)
|
||||
{
|
||||
/* needs to be atomic w.r.t. interrupts, not against other CPUs */
|
||||
if (__test_and_clear_bit(0, &__get_cpu_var(idle_state)) == 0)
|
||||
return;
|
||||
atomic_notifier_call_chain(&idle_notifier, IDLE_END, NULL);
|
||||
}
|
||||
|
||||
void exit_idle(void)
|
||||
{
|
||||
if (current->pid)
|
||||
return;
|
||||
__exit_idle();
|
||||
}
|
||||
|
||||
void disable_hlt(void)
|
||||
{
|
||||
hlt_counter++;
|
||||
|
@ -130,6 +168,7 @@ EXPORT_SYMBOL(default_idle);
|
|||
*/
|
||||
static void poll_idle (void)
|
||||
{
|
||||
local_irq_enable();
|
||||
cpu_relax();
|
||||
}
|
||||
|
||||
|
@ -173,6 +212,7 @@ void cpu_idle(void)
|
|||
|
||||
/* endless idle loop with no priority at all */
|
||||
while (1) {
|
||||
tick_nohz_stop_sched_tick();
|
||||
while (!need_resched()) {
|
||||
void (*idle)(void);
|
||||
|
||||
|
@ -189,8 +229,18 @@ void cpu_idle(void)
|
|||
play_dead();
|
||||
|
||||
__get_cpu_var(irq_stat).idle_timestamp = jiffies;
|
||||
|
||||
/*
|
||||
* Idle routines should keep interrupts disabled
|
||||
* from here on, until they go to idle.
|
||||
* Otherwise, idle callbacks can misfire.
|
||||
*/
|
||||
local_irq_disable();
|
||||
enter_idle();
|
||||
idle();
|
||||
__exit_idle();
|
||||
}
|
||||
tick_nohz_restart_sched_tick();
|
||||
preempt_enable_no_resched();
|
||||
schedule();
|
||||
preempt_disable();
|
||||
|
@ -243,7 +293,11 @@ void mwait_idle_with_hints(unsigned long eax, unsigned long ecx)
|
|||
__monitor((void *)¤t_thread_info()->flags, 0, 0);
|
||||
smp_mb();
|
||||
if (!need_resched())
|
||||
__mwait(eax, ecx);
|
||||
__sti_mwait(eax, ecx);
|
||||
else
|
||||
local_irq_enable();
|
||||
} else {
|
||||
local_irq_enable();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -308,8 +362,8 @@ void show_regs(struct pt_regs * regs)
|
|||
regs->eax,regs->ebx,regs->ecx,regs->edx);
|
||||
printk("ESI: %08lx EDI: %08lx EBP: %08lx",
|
||||
regs->esi, regs->edi, regs->ebp);
|
||||
printk(" DS: %04x ES: %04x GS: %04x\n",
|
||||
0xffff & regs->xds,0xffff & regs->xes, 0xffff & regs->xgs);
|
||||
printk(" DS: %04x ES: %04x FS: %04x\n",
|
||||
0xffff & regs->xds,0xffff & regs->xes, 0xffff & regs->xfs);
|
||||
|
||||
cr0 = read_cr0();
|
||||
cr2 = read_cr2();
|
||||
|
@ -340,7 +394,7 @@ int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
|
|||
|
||||
regs.xds = __USER_DS;
|
||||
regs.xes = __USER_DS;
|
||||
regs.xgs = __KERNEL_PDA;
|
||||
regs.xfs = __KERNEL_PDA;
|
||||
regs.orig_eax = -1;
|
||||
regs.eip = (unsigned long) kernel_thread_helper;
|
||||
regs.xcs = __KERNEL_CS | get_kernel_rpl();
|
||||
|
@ -425,7 +479,7 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long esp,
|
|||
|
||||
p->thread.eip = (unsigned long) ret_from_fork;
|
||||
|
||||
savesegment(fs,p->thread.fs);
|
||||
savesegment(gs,p->thread.gs);
|
||||
|
||||
tsk = current;
|
||||
if (unlikely(test_tsk_thread_flag(tsk, TIF_IO_BITMAP))) {
|
||||
|
@ -501,8 +555,8 @@ void dump_thread(struct pt_regs * regs, struct user * dump)
|
|||
dump->regs.eax = regs->eax;
|
||||
dump->regs.ds = regs->xds;
|
||||
dump->regs.es = regs->xes;
|
||||
savesegment(fs,dump->regs.fs);
|
||||
dump->regs.gs = regs->xgs;
|
||||
dump->regs.fs = regs->xfs;
|
||||
savesegment(gs,dump->regs.gs);
|
||||
dump->regs.orig_eax = regs->orig_eax;
|
||||
dump->regs.eip = regs->eip;
|
||||
dump->regs.cs = regs->xcs;
|
||||
|
@ -653,7 +707,7 @@ struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct tas
|
|||
load_esp0(tss, next);
|
||||
|
||||
/*
|
||||
* Save away %fs. No need to save %gs, as it was saved on the
|
||||
* Save away %gs. No need to save %fs, as it was saved on the
|
||||
* stack on entry. No need to save %es and %ds, as those are
|
||||
* always kernel segments while inside the kernel. Doing this
|
||||
* before setting the new TLS descriptors avoids the situation
|
||||
|
@ -662,7 +716,7 @@ struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct tas
|
|||
* used %fs or %gs (it does not today), or if the kernel is
|
||||
* running inside of a hypervisor layer.
|
||||
*/
|
||||
savesegment(fs, prev->fs);
|
||||
savesegment(gs, prev->gs);
|
||||
|
||||
/*
|
||||
* Load the per-thread Thread-Local Storage descriptor.
|
||||
|
@ -670,14 +724,13 @@ struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct tas
|
|||
load_TLS(next, cpu);
|
||||
|
||||
/*
|
||||
* Restore %fs if needed.
|
||||
*
|
||||
* Glibc normally makes %fs be zero.
|
||||
* Restore IOPL if needed. In normal use, the flags restore
|
||||
* in the switch assembly will handle this. But if the kernel
|
||||
* is running virtualized at a non-zero CPL, the popf will
|
||||
* not restore flags, so it must be done in a separate step.
|
||||
*/
|
||||
if (unlikely(prev->fs | next->fs))
|
||||
loadsegment(fs, next->fs);
|
||||
|
||||
write_pda(pcurrent, next_p);
|
||||
if (get_kernel_rpl() && unlikely(prev->iopl != next->iopl))
|
||||
set_iopl_mask(next->iopl);
|
||||
|
||||
/*
|
||||
* Now maybe handle debug registers and/or IO bitmaps
|
||||
|
@ -688,6 +741,15 @@ struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct tas
|
|||
|
||||
disable_tsc(prev_p, next_p);
|
||||
|
||||
/*
|
||||
* Leave lazy mode, flushing any hypercalls made here.
|
||||
* This must be done before restoring TLS segments so
|
||||
* the GDT and LDT are properly updated, and must be
|
||||
* done before math_state_restore, so the TS bit is up
|
||||
* to date.
|
||||
*/
|
||||
arch_leave_lazy_cpu_mode();
|
||||
|
||||
/* If the task has used fpu the last 5 timeslices, just do a full
|
||||
* restore of the math state immediately to avoid the trap; the
|
||||
* chances of needing FPU soon are obviously high now
|
||||
|
@ -695,6 +757,14 @@ struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct tas
|
|||
if (next_p->fpu_counter > 5)
|
||||
math_state_restore();
|
||||
|
||||
/*
|
||||
* Restore %gs if needed (which is common)
|
||||
*/
|
||||
if (prev->gs | next->gs)
|
||||
loadsegment(gs, next->gs);
|
||||
|
||||
write_pda(pcurrent, next_p);
|
||||
|
||||
return prev_p;
|
||||
}
|
||||
|
||||
|
|
|
@ -89,14 +89,14 @@ static int putreg(struct task_struct *child,
|
|||
unsigned long regno, unsigned long value)
|
||||
{
|
||||
switch (regno >> 2) {
|
||||
case FS:
|
||||
case GS:
|
||||
if (value && (value & 3) != 3)
|
||||
return -EIO;
|
||||
child->thread.fs = value;
|
||||
child->thread.gs = value;
|
||||
return 0;
|
||||
case DS:
|
||||
case ES:
|
||||
case GS:
|
||||
case FS:
|
||||
if (value && (value & 3) != 3)
|
||||
return -EIO;
|
||||
value &= 0xffff;
|
||||
|
@ -112,7 +112,7 @@ static int putreg(struct task_struct *child,
|
|||
value |= get_stack_long(child, EFL_OFFSET) & ~FLAG_MASK;
|
||||
break;
|
||||
}
|
||||
if (regno > ES*4)
|
||||
if (regno > FS*4)
|
||||
regno -= 1*4;
|
||||
put_stack_long(child, regno, value);
|
||||
return 0;
|
||||
|
@ -124,18 +124,18 @@ static unsigned long getreg(struct task_struct *child,
|
|||
unsigned long retval = ~0UL;
|
||||
|
||||
switch (regno >> 2) {
|
||||
case FS:
|
||||
retval = child->thread.fs;
|
||||
case GS:
|
||||
retval = child->thread.gs;
|
||||
break;
|
||||
case DS:
|
||||
case ES:
|
||||
case GS:
|
||||
case FS:
|
||||
case SS:
|
||||
case CS:
|
||||
retval = 0xffff;
|
||||
/* fall through */
|
||||
default:
|
||||
if (regno > ES*4)
|
||||
if (regno > FS*4)
|
||||
regno -= 1*4;
|
||||
retval &= get_stack_long(child, regno);
|
||||
}
|
||||
|
|
|
@ -33,7 +33,6 @@
|
|||
#include <linux/initrd.h>
|
||||
#include <linux/bootmem.h>
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/console.h>
|
||||
#include <linux/mca.h>
|
||||
#include <linux/root_dev.h>
|
||||
|
@ -60,6 +59,7 @@
|
|||
#include <asm/io_apic.h>
|
||||
#include <asm/ist.h>
|
||||
#include <asm/io.h>
|
||||
#include <asm/vmi.h>
|
||||
#include <setup_arch.h>
|
||||
#include <bios_ebda.h>
|
||||
|
||||
|
@ -581,6 +581,14 @@ void __init setup_arch(char **cmdline_p)
|
|||
|
||||
max_low_pfn = setup_memory();
|
||||
|
||||
#ifdef CONFIG_VMI
|
||||
/*
|
||||
* Must be after max_low_pfn is determined, and before kernel
|
||||
* pagetables are setup.
|
||||
*/
|
||||
vmi_init();
|
||||
#endif
|
||||
|
||||
/*
|
||||
* NOTE: before this point _nobody_ is allowed to allocate
|
||||
* any memory using the bootmem allocator. Although the
|
||||
|
@ -651,28 +659,3 @@ void __init setup_arch(char **cmdline_p)
|
|||
#endif
|
||||
tsc_init();
|
||||
}
|
||||
|
||||
static __init int add_pcspkr(void)
|
||||
{
|
||||
struct platform_device *pd;
|
||||
int ret;
|
||||
|
||||
pd = platform_device_alloc("pcspkr", -1);
|
||||
if (!pd)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = platform_device_add(pd);
|
||||
if (ret)
|
||||
platform_device_put(pd);
|
||||
|
||||
return ret;
|
||||
}
|
||||
device_initcall(add_pcspkr);
|
||||
|
||||
/*
|
||||
* Local Variables:
|
||||
* mode:c
|
||||
* c-file-style:"k&r"
|
||||
* c-basic-offset:8
|
||||
* End:
|
||||
*/
|
||||
|
|
|
@ -21,6 +21,7 @@
|
|||
#include <linux/suspend.h>
|
||||
#include <linux/ptrace.h>
|
||||
#include <linux/elf.h>
|
||||
#include <linux/binfmts.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/ucontext.h>
|
||||
#include <asm/uaccess.h>
|
||||
|
@ -128,8 +129,8 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, int *peax
|
|||
X86_EFLAGS_TF | X86_EFLAGS_SF | X86_EFLAGS_ZF | \
|
||||
X86_EFLAGS_AF | X86_EFLAGS_PF | X86_EFLAGS_CF)
|
||||
|
||||
COPY_SEG(gs);
|
||||
GET_SEG(fs);
|
||||
GET_SEG(gs);
|
||||
COPY_SEG(fs);
|
||||
COPY_SEG(es);
|
||||
COPY_SEG(ds);
|
||||
COPY(edi);
|
||||
|
@ -244,9 +245,9 @@ setup_sigcontext(struct sigcontext __user *sc, struct _fpstate __user *fpstate,
|
|||
{
|
||||
int tmp, err = 0;
|
||||
|
||||
err |= __put_user(regs->xgs, (unsigned int __user *)&sc->gs);
|
||||
savesegment(fs, tmp);
|
||||
err |= __put_user(tmp, (unsigned int __user *)&sc->fs);
|
||||
err |= __put_user(regs->xfs, (unsigned int __user *)&sc->fs);
|
||||
savesegment(gs, tmp);
|
||||
err |= __put_user(tmp, (unsigned int __user *)&sc->gs);
|
||||
|
||||
err |= __put_user(regs->xes, (unsigned int __user *)&sc->es);
|
||||
err |= __put_user(regs->xds, (unsigned int __user *)&sc->ds);
|
||||
|
@ -349,7 +350,10 @@ static int setup_frame(int sig, struct k_sigaction *ka,
|
|||
goto give_sigsegv;
|
||||
}
|
||||
|
||||
restorer = (void *)VDSO_SYM(&__kernel_sigreturn);
|
||||
if (current->binfmt->hasvdso)
|
||||
restorer = (void *)VDSO_SYM(&__kernel_sigreturn);
|
||||
else
|
||||
restorer = (void *)&frame->retcode;
|
||||
if (ka->sa.sa_flags & SA_RESTORER)
|
||||
restorer = ka->sa.sa_restorer;
|
||||
|
||||
|
|
|
@ -23,6 +23,7 @@
|
|||
|
||||
#include <asm/mtrr.h>
|
||||
#include <asm/tlbflush.h>
|
||||
#include <asm/idle.h>
|
||||
#include <mach_apic.h>
|
||||
|
||||
/*
|
||||
|
@ -374,8 +375,7 @@ static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
|
|||
/*
|
||||
* i'm not happy about this global shared spinlock in the
|
||||
* MM hot path, but we'll see how contended it is.
|
||||
* Temporarily this turns IRQs off, so that lockups are
|
||||
* detected by the NMI watchdog.
|
||||
* AK: x86-64 has a faster method that could be ported.
|
||||
*/
|
||||
spin_lock(&tlbstate_lock);
|
||||
|
||||
|
@ -400,7 +400,7 @@ static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
|
|||
|
||||
while (!cpus_empty(flush_cpumask))
|
||||
/* nothing. lockup detection does not belong here */
|
||||
mb();
|
||||
cpu_relax();
|
||||
|
||||
flush_mm = NULL;
|
||||
flush_va = 0;
|
||||
|
@ -624,6 +624,7 @@ fastcall void smp_call_function_interrupt(struct pt_regs *regs)
|
|||
/*
|
||||
* At this point the info structure may be out of scope unless wait==1
|
||||
*/
|
||||
exit_idle();
|
||||
irq_enter();
|
||||
(*func)(info);
|
||||
irq_exit();
|
||||
|
|
|
@ -63,6 +63,7 @@
|
|||
#include <mach_apic.h>
|
||||
#include <mach_wakecpu.h>
|
||||
#include <smpboot_hooks.h>
|
||||
#include <asm/vmi.h>
|
||||
|
||||
/* Set if we find a B stepping CPU */
|
||||
static int __devinitdata smp_b_stepping;
|
||||
|
@ -93,12 +94,6 @@ cpumask_t cpu_possible_map;
|
|||
EXPORT_SYMBOL(cpu_possible_map);
|
||||
static cpumask_t smp_commenced_mask;
|
||||
|
||||
/* TSC's upper 32 bits can't be written in eariler CPU (before prescott), there
|
||||
* is no way to resync one AP against BP. TBD: for prescott and above, we
|
||||
* should use IA64's algorithm
|
||||
*/
|
||||
static int __devinitdata tsc_sync_disabled;
|
||||
|
||||
/* Per CPU bogomips and other parameters */
|
||||
struct cpuinfo_x86 cpu_data[NR_CPUS] __cacheline_aligned;
|
||||
EXPORT_SYMBOL(cpu_data);
|
||||
|
@ -215,151 +210,6 @@ static void __cpuinit smp_store_cpu_info(int id)
|
|||
;
|
||||
}
|
||||
|
||||
/*
|
||||
* TSC synchronization.
|
||||
*
|
||||
* We first check whether all CPUs have their TSC's synchronized,
|
||||
* then we print a warning if not, and always resync.
|
||||
*/
|
||||
|
||||
static struct {
|
||||
atomic_t start_flag;
|
||||
atomic_t count_start;
|
||||
atomic_t count_stop;
|
||||
unsigned long long values[NR_CPUS];
|
||||
} tsc __cpuinitdata = {
|
||||
.start_flag = ATOMIC_INIT(0),
|
||||
.count_start = ATOMIC_INIT(0),
|
||||
.count_stop = ATOMIC_INIT(0),
|
||||
};
|
||||
|
||||
#define NR_LOOPS 5
|
||||
|
||||
static void __init synchronize_tsc_bp(void)
|
||||
{
|
||||
int i;
|
||||
unsigned long long t0;
|
||||
unsigned long long sum, avg;
|
||||
long long delta;
|
||||
unsigned int one_usec;
|
||||
int buggy = 0;
|
||||
|
||||
printk(KERN_INFO "checking TSC synchronization across %u CPUs: ", num_booting_cpus());
|
||||
|
||||
/* convert from kcyc/sec to cyc/usec */
|
||||
one_usec = cpu_khz / 1000;
|
||||
|
||||
atomic_set(&tsc.start_flag, 1);
|
||||
wmb();
|
||||
|
||||
/*
|
||||
* We loop a few times to get a primed instruction cache,
|
||||
* then the last pass is more or less synchronized and
|
||||
* the BP and APs set their cycle counters to zero all at
|
||||
* once. This reduces the chance of having random offsets
|
||||
* between the processors, and guarantees that the maximum
|
||||
* delay between the cycle counters is never bigger than
|
||||
* the latency of information-passing (cachelines) between
|
||||
* two CPUs.
|
||||
*/
|
||||
for (i = 0; i < NR_LOOPS; i++) {
|
||||
/*
|
||||
* all APs synchronize but they loop on '== num_cpus'
|
||||
*/
|
||||
while (atomic_read(&tsc.count_start) != num_booting_cpus()-1)
|
||||
cpu_relax();
|
||||
atomic_set(&tsc.count_stop, 0);
|
||||
wmb();
|
||||
/*
|
||||
* this lets the APs save their current TSC:
|
||||
*/
|
||||
atomic_inc(&tsc.count_start);
|
||||
|
||||
rdtscll(tsc.values[smp_processor_id()]);
|
||||
/*
|
||||
* We clear the TSC in the last loop:
|
||||
*/
|
||||
if (i == NR_LOOPS-1)
|
||||
write_tsc(0, 0);
|
||||
|
||||
/*
|
||||
* Wait for all APs to leave the synchronization point:
|
||||
*/
|
||||
while (atomic_read(&tsc.count_stop) != num_booting_cpus()-1)
|
||||
cpu_relax();
|
||||
atomic_set(&tsc.count_start, 0);
|
||||
wmb();
|
||||
atomic_inc(&tsc.count_stop);
|
||||
}
|
||||
|
||||
sum = 0;
|
||||
for (i = 0; i < NR_CPUS; i++) {
|
||||
if (cpu_isset(i, cpu_callout_map)) {
|
||||
t0 = tsc.values[i];
|
||||
sum += t0;
|
||||
}
|
||||
}
|
||||
avg = sum;
|
||||
do_div(avg, num_booting_cpus());
|
||||
|
||||
for (i = 0; i < NR_CPUS; i++) {
|
||||
if (!cpu_isset(i, cpu_callout_map))
|
||||
continue;
|
||||
delta = tsc.values[i] - avg;
|
||||
if (delta < 0)
|
||||
delta = -delta;
|
||||
/*
|
||||
* We report bigger than 2 microseconds clock differences.
|
||||
*/
|
||||
if (delta > 2*one_usec) {
|
||||
long long realdelta;
|
||||
|
||||
if (!buggy) {
|
||||
buggy = 1;
|
||||
printk("\n");
|
||||
}
|
||||
realdelta = delta;
|
||||
do_div(realdelta, one_usec);
|
||||
if (tsc.values[i] < avg)
|
||||
realdelta = -realdelta;
|
||||
|
||||
if (realdelta)
|
||||
printk(KERN_INFO "CPU#%d had %Ld usecs TSC "
|
||||
"skew, fixed it up.\n", i, realdelta);
|
||||
}
|
||||
}
|
||||
if (!buggy)
|
||||
printk("passed.\n");
|
||||
}
|
||||
|
||||
static void __cpuinit synchronize_tsc_ap(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
/*
|
||||
* Not every cpu is online at the time
|
||||
* this gets called, so we first wait for the BP to
|
||||
* finish SMP initialization:
|
||||
*/
|
||||
while (!atomic_read(&tsc.start_flag))
|
||||
cpu_relax();
|
||||
|
||||
for (i = 0; i < NR_LOOPS; i++) {
|
||||
atomic_inc(&tsc.count_start);
|
||||
while (atomic_read(&tsc.count_start) != num_booting_cpus())
|
||||
cpu_relax();
|
||||
|
||||
rdtscll(tsc.values[smp_processor_id()]);
|
||||
if (i == NR_LOOPS-1)
|
||||
write_tsc(0, 0);
|
||||
|
||||
atomic_inc(&tsc.count_stop);
|
||||
while (atomic_read(&tsc.count_stop) != num_booting_cpus())
|
||||
cpu_relax();
|
||||
}
|
||||
}
|
||||
#undef NR_LOOPS
|
||||
|
||||
extern void calibrate_delay(void);
|
||||
|
||||
static atomic_t init_deasserted;
|
||||
|
@ -437,20 +287,12 @@ static void __cpuinit smp_callin(void)
|
|||
/*
|
||||
* Save our processor parameters
|
||||
*/
|
||||
smp_store_cpu_info(cpuid);
|
||||
|
||||
disable_APIC_timer();
|
||||
smp_store_cpu_info(cpuid);
|
||||
|
||||
/*
|
||||
* Allow the master to continue.
|
||||
*/
|
||||
cpu_set(cpuid, cpu_callin_map);
|
||||
|
||||
/*
|
||||
* Synchronize the TSC with the BP
|
||||
*/
|
||||
if (cpu_has_tsc && cpu_khz && !tsc_sync_disabled)
|
||||
synchronize_tsc_ap();
|
||||
}
|
||||
|
||||
static int cpucount;
|
||||
|
@ -545,18 +387,25 @@ static void __cpuinit start_secondary(void *unused)
|
|||
* booting is too fragile that we want to limit the
|
||||
* things done here to the most necessary things.
|
||||
*/
|
||||
#ifdef CONFIG_VMI
|
||||
vmi_bringup();
|
||||
#endif
|
||||
secondary_cpu_init();
|
||||
preempt_disable();
|
||||
smp_callin();
|
||||
while (!cpu_isset(smp_processor_id(), smp_commenced_mask))
|
||||
rep_nop();
|
||||
setup_secondary_APIC_clock();
|
||||
/*
|
||||
* Check TSC synchronization with the BP:
|
||||
*/
|
||||
check_tsc_sync_target();
|
||||
|
||||
setup_secondary_clock();
|
||||
if (nmi_watchdog == NMI_IO_APIC) {
|
||||
disable_8259A_irq(0);
|
||||
enable_NMI_through_LVT0(NULL);
|
||||
enable_8259A_irq(0);
|
||||
}
|
||||
enable_APIC_timer();
|
||||
/*
|
||||
* low-memory mappings have been cleared, flush them from
|
||||
* the local TLBs too.
|
||||
|
@ -619,7 +468,6 @@ extern struct {
|
|||
unsigned short ss;
|
||||
} stack_start;
|
||||
extern struct i386_pda *start_pda;
|
||||
extern struct Xgt_desc_struct cpu_gdt_descr;
|
||||
|
||||
#ifdef CONFIG_NUMA
|
||||
|
||||
|
@ -749,7 +597,7 @@ wakeup_secondary_cpu(int logical_apicid, unsigned long start_eip)
|
|||
/*
|
||||
* Due to the Pentium erratum 3AP.
|
||||
*/
|
||||
maxlvt = get_maxlvt();
|
||||
maxlvt = lapic_get_maxlvt();
|
||||
if (maxlvt > 3) {
|
||||
apic_read_around(APIC_SPIV);
|
||||
apic_write(APIC_ESR, 0);
|
||||
|
@ -834,12 +682,19 @@ wakeup_secondary_cpu(int phys_apicid, unsigned long start_eip)
|
|||
else
|
||||
num_starts = 0;
|
||||
|
||||
/*
|
||||
* Paravirt / VMI wants a startup IPI hook here to set up the
|
||||
* target processor state.
|
||||
*/
|
||||
startup_ipi_hook(phys_apicid, (unsigned long) start_secondary,
|
||||
(unsigned long) stack_start.esp);
|
||||
|
||||
/*
|
||||
* Run STARTUP IPI loop.
|
||||
*/
|
||||
Dprintk("#startup loops: %d.\n", num_starts);
|
||||
|
||||
maxlvt = get_maxlvt();
|
||||
maxlvt = lapic_get_maxlvt();
|
||||
|
||||
for (j = 1; j <= num_starts; j++) {
|
||||
Dprintk("Sending STARTUP #%d.\n",j);
|
||||
|
@ -1115,8 +970,6 @@ static int __cpuinit __smp_prepare_cpu(int cpu)
|
|||
info.cpu = cpu;
|
||||
INIT_WORK(&info.task, do_warm_boot_cpu);
|
||||
|
||||
tsc_sync_disabled = 1;
|
||||
|
||||
/* init low mem mapping */
|
||||
clone_pgd_range(swapper_pg_dir, swapper_pg_dir + USER_PGD_PTRS,
|
||||
min_t(unsigned long, KERNEL_PGD_PTRS, USER_PGD_PTRS));
|
||||
|
@ -1124,7 +977,6 @@ static int __cpuinit __smp_prepare_cpu(int cpu)
|
|||
schedule_work(&info.task);
|
||||
wait_for_completion(&done);
|
||||
|
||||
tsc_sync_disabled = 0;
|
||||
zap_low_mappings();
|
||||
ret = 0;
|
||||
exit:
|
||||
|
@ -1320,13 +1172,7 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
|
|||
|
||||
smpboot_setup_io_apic();
|
||||
|
||||
setup_boot_APIC_clock();
|
||||
|
||||
/*
|
||||
* Synchronize the TSC with the AP
|
||||
*/
|
||||
if (cpu_has_tsc && cpucount && cpu_khz)
|
||||
synchronize_tsc_bp();
|
||||
setup_boot_clock();
|
||||
}
|
||||
|
||||
/* These are wrappers to interface to the new boot process. Someone
|
||||
|
@ -1461,9 +1307,16 @@ int __cpuinit __cpu_up(unsigned int cpu)
|
|||
}
|
||||
|
||||
local_irq_enable();
|
||||
|
||||
per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
|
||||
/* Unleash the CPU! */
|
||||
cpu_set(cpu, smp_commenced_mask);
|
||||
|
||||
/*
|
||||
* Check TSC synchronization with the AP:
|
||||
*/
|
||||
check_tsc_sync_source(cpu);
|
||||
|
||||
while (!cpu_isset(cpu, cpu_online_map))
|
||||
cpu_relax();
|
||||
|
||||
|
|
|
@ -78,7 +78,7 @@ int __init sysenter_setup(void)
|
|||
syscall_pages[0] = virt_to_page(syscall_page);
|
||||
|
||||
#ifdef CONFIG_COMPAT_VDSO
|
||||
__set_fixmap(FIX_VDSO, __pa(syscall_page), PAGE_READONLY);
|
||||
__set_fixmap(FIX_VDSO, __pa(syscall_page), PAGE_READONLY_EXEC);
|
||||
printk("Compat vDSO mapped to %08lx.\n", __fix_to_virt(FIX_VDSO));
|
||||
#endif
|
||||
|
||||
|
|
|
@ -131,15 +131,13 @@ unsigned long profile_pc(struct pt_regs *regs)
|
|||
unsigned long pc = instruction_pointer(regs);
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
if (!user_mode_vm(regs) && in_lock_functions(pc)) {
|
||||
if (!v8086_mode(regs) && SEGMENT_IS_KERNEL_CODE(regs->xcs) &&
|
||||
in_lock_functions(pc)) {
|
||||
#ifdef CONFIG_FRAME_POINTER
|
||||
return *(unsigned long *)(regs->ebp + 4);
|
||||
#else
|
||||
unsigned long *sp;
|
||||
if ((regs->xcs & 3) == 0)
|
||||
sp = (unsigned long *)®s->esp;
|
||||
else
|
||||
sp = (unsigned long *)regs->esp;
|
||||
unsigned long *sp = (unsigned long *)®s->esp;
|
||||
|
||||
/* Return address is either directly at stack pointer
|
||||
or above a saved eflags. Eflags has bits 22-31 zero,
|
||||
kernel addresses don't. */
|
||||
|
@ -161,15 +159,6 @@ EXPORT_SYMBOL(profile_pc);
|
|||
*/
|
||||
irqreturn_t timer_interrupt(int irq, void *dev_id)
|
||||
{
|
||||
/*
|
||||
* Here we are in the timer irq handler. We just have irqs locally
|
||||
* disabled but we don't know if the timer_bh is running on the other
|
||||
* CPU. We need to avoid to SMP race with it. NOTE: we don' t need
|
||||
* the irq version of write_lock because as just said we have irq
|
||||
* locally disabled. -arca
|
||||
*/
|
||||
write_seqlock(&xtime_lock);
|
||||
|
||||
#ifdef CONFIG_X86_IO_APIC
|
||||
if (timer_ack) {
|
||||
/*
|
||||
|
@ -188,7 +177,6 @@ irqreturn_t timer_interrupt(int irq, void *dev_id)
|
|||
|
||||
do_timer_interrupt_hook();
|
||||
|
||||
|
||||
if (MCA_bus) {
|
||||
/* The PS/2 uses level-triggered interrupts. You can't
|
||||
turn them off, nor would you want to (any attempt to
|
||||
|
@ -203,18 +191,11 @@ irqreturn_t timer_interrupt(int irq, void *dev_id)
|
|||
outb_p( irq_v|0x80, 0x61 ); /* reset the IRQ */
|
||||
}
|
||||
|
||||
write_sequnlock(&xtime_lock);
|
||||
|
||||
#ifdef CONFIG_X86_LOCAL_APIC
|
||||
if (using_apic_timer)
|
||||
smp_send_timer_broadcast_ipi();
|
||||
#endif
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
/* not static: needed by APM */
|
||||
unsigned long get_cmos_time(void)
|
||||
unsigned long read_persistent_clock(void)
|
||||
{
|
||||
unsigned long retval;
|
||||
unsigned long flags;
|
||||
|
@ -227,11 +208,11 @@ unsigned long get_cmos_time(void)
|
|||
|
||||
return retval;
|
||||
}
|
||||
EXPORT_SYMBOL(get_cmos_time);
|
||||
|
||||
static void sync_cmos_clock(unsigned long dummy);
|
||||
|
||||
static DEFINE_TIMER(sync_cmos_timer, sync_cmos_clock, 0, 0);
|
||||
int no_sync_cmos_clock;
|
||||
|
||||
static void sync_cmos_clock(unsigned long dummy)
|
||||
{
|
||||
|
@ -275,117 +256,20 @@ static void sync_cmos_clock(unsigned long dummy)
|
|||
|
||||
void notify_arch_cmos_timer(void)
|
||||
{
|
||||
mod_timer(&sync_cmos_timer, jiffies + 1);
|
||||
if (!no_sync_cmos_clock)
|
||||
mod_timer(&sync_cmos_timer, jiffies + 1);
|
||||
}
|
||||
|
||||
static long clock_cmos_diff;
|
||||
static unsigned long sleep_start;
|
||||
|
||||
static int timer_suspend(struct sys_device *dev, pm_message_t state)
|
||||
{
|
||||
/*
|
||||
* Estimate time zone so that set_time can update the clock
|
||||
*/
|
||||
unsigned long ctime = get_cmos_time();
|
||||
|
||||
clock_cmos_diff = -ctime;
|
||||
clock_cmos_diff += get_seconds();
|
||||
sleep_start = ctime;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int timer_resume(struct sys_device *dev)
|
||||
{
|
||||
unsigned long flags;
|
||||
unsigned long sec;
|
||||
unsigned long ctime = get_cmos_time();
|
||||
long sleep_length = (ctime - sleep_start) * HZ;
|
||||
struct timespec ts;
|
||||
|
||||
if (sleep_length < 0) {
|
||||
printk(KERN_WARNING "CMOS clock skew detected in timer resume!\n");
|
||||
/* The time after the resume must not be earlier than the time
|
||||
* before the suspend or some nasty things will happen
|
||||
*/
|
||||
sleep_length = 0;
|
||||
ctime = sleep_start;
|
||||
}
|
||||
#ifdef CONFIG_HPET_TIMER
|
||||
if (is_hpet_enabled())
|
||||
hpet_reenable();
|
||||
#endif
|
||||
setup_pit_timer();
|
||||
|
||||
sec = ctime + clock_cmos_diff;
|
||||
ts.tv_sec = sec;
|
||||
ts.tv_nsec = 0;
|
||||
do_settimeofday(&ts);
|
||||
write_seqlock_irqsave(&xtime_lock, flags);
|
||||
jiffies_64 += sleep_length;
|
||||
write_sequnlock_irqrestore(&xtime_lock, flags);
|
||||
touch_softlockup_watchdog();
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct sysdev_class timer_sysclass = {
|
||||
.resume = timer_resume,
|
||||
.suspend = timer_suspend,
|
||||
set_kset_name("timer"),
|
||||
};
|
||||
|
||||
|
||||
/* XXX this driverfs stuff should probably go elsewhere later -john */
|
||||
static struct sys_device device_timer = {
|
||||
.id = 0,
|
||||
.cls = &timer_sysclass,
|
||||
};
|
||||
|
||||
static int time_init_device(void)
|
||||
{
|
||||
int error = sysdev_class_register(&timer_sysclass);
|
||||
if (!error)
|
||||
error = sysdev_register(&device_timer);
|
||||
return error;
|
||||
}
|
||||
|
||||
device_initcall(time_init_device);
|
||||
|
||||
#ifdef CONFIG_HPET_TIMER
|
||||
extern void (*late_time_init)(void);
|
||||
/* Duplicate of time_init() below, with hpet_enable part added */
|
||||
static void __init hpet_time_init(void)
|
||||
{
|
||||
struct timespec ts;
|
||||
ts.tv_sec = get_cmos_time();
|
||||
ts.tv_nsec = (INITIAL_JIFFIES % HZ) * (NSEC_PER_SEC / HZ);
|
||||
|
||||
do_settimeofday(&ts);
|
||||
|
||||
if ((hpet_enable() >= 0) && hpet_use_timer) {
|
||||
printk("Using HPET for base-timer\n");
|
||||
}
|
||||
|
||||
if (!hpet_enable())
|
||||
setup_pit_timer();
|
||||
do_time_init();
|
||||
}
|
||||
#endif
|
||||
|
||||
void __init time_init(void)
|
||||
{
|
||||
struct timespec ts;
|
||||
#ifdef CONFIG_HPET_TIMER
|
||||
if (is_hpet_capable()) {
|
||||
/*
|
||||
* HPET initialization needs to do memory-mapped io. So, let
|
||||
* us do a late initialization after mem_init().
|
||||
*/
|
||||
late_time_init = hpet_time_init;
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
ts.tv_sec = get_cmos_time();
|
||||
ts.tv_nsec = (INITIAL_JIFFIES % HZ) * (NSEC_PER_SEC / HZ);
|
||||
|
||||
do_settimeofday(&ts);
|
||||
|
||||
do_time_init();
|
||||
late_time_init = hpet_time_init;
|
||||
}
|
||||
|
|
|
@ -94,6 +94,7 @@ asmlinkage void spurious_interrupt_bug(void);
|
|||
asmlinkage void machine_check(void);
|
||||
|
||||
int kstack_depth_to_print = 24;
|
||||
static unsigned int code_bytes = 64;
|
||||
ATOMIC_NOTIFIER_HEAD(i386die_chain);
|
||||
|
||||
int register_die_notifier(struct notifier_block *nb)
|
||||
|
@ -291,10 +292,11 @@ void show_registers(struct pt_regs *regs)
|
|||
int i;
|
||||
int in_kernel = 1;
|
||||
unsigned long esp;
|
||||
unsigned short ss;
|
||||
unsigned short ss, gs;
|
||||
|
||||
esp = (unsigned long) (®s->esp);
|
||||
savesegment(ss, ss);
|
||||
savesegment(gs, gs);
|
||||
if (user_mode_vm(regs)) {
|
||||
in_kernel = 0;
|
||||
esp = regs->esp;
|
||||
|
@ -313,8 +315,8 @@ void show_registers(struct pt_regs *regs)
|
|||
regs->eax, regs->ebx, regs->ecx, regs->edx);
|
||||
printk(KERN_EMERG "esi: %08lx edi: %08lx ebp: %08lx esp: %08lx\n",
|
||||
regs->esi, regs->edi, regs->ebp, esp);
|
||||
printk(KERN_EMERG "ds: %04x es: %04x ss: %04x\n",
|
||||
regs->xds & 0xffff, regs->xes & 0xffff, ss);
|
||||
printk(KERN_EMERG "ds: %04x es: %04x fs: %04x gs: %04x ss: %04x\n",
|
||||
regs->xds & 0xffff, regs->xes & 0xffff, regs->xfs & 0xffff, gs, ss);
|
||||
printk(KERN_EMERG "Process %.*s (pid: %d, ti=%p task=%p task.ti=%p)",
|
||||
TASK_COMM_LEN, current->comm, current->pid,
|
||||
current_thread_info(), current, current->thread_info);
|
||||
|
@ -324,7 +326,8 @@ void show_registers(struct pt_regs *regs)
|
|||
*/
|
||||
if (in_kernel) {
|
||||
u8 *eip;
|
||||
int code_bytes = 64;
|
||||
unsigned int code_prologue = code_bytes * 43 / 64;
|
||||
unsigned int code_len = code_bytes;
|
||||
unsigned char c;
|
||||
|
||||
printk("\n" KERN_EMERG "Stack: ");
|
||||
|
@ -332,14 +335,14 @@ void show_registers(struct pt_regs *regs)
|
|||
|
||||
printk(KERN_EMERG "Code: ");
|
||||
|
||||
eip = (u8 *)regs->eip - 43;
|
||||
eip = (u8 *)regs->eip - code_prologue;
|
||||
if (eip < (u8 *)PAGE_OFFSET ||
|
||||
probe_kernel_address(eip, c)) {
|
||||
/* try starting at EIP */
|
||||
eip = (u8 *)regs->eip;
|
||||
code_bytes = 32;
|
||||
code_len = code_len - code_prologue + 1;
|
||||
}
|
||||
for (i = 0; i < code_bytes; i++, eip++) {
|
||||
for (i = 0; i < code_len; i++, eip++) {
|
||||
if (eip < (u8 *)PAGE_OFFSET ||
|
||||
probe_kernel_address(eip, c)) {
|
||||
printk(" Bad EIP value.");
|
||||
|
@ -1191,3 +1194,13 @@ static int __init kstack_setup(char *s)
|
|||
return 1;
|
||||
}
|
||||
__setup("kstack=", kstack_setup);
|
||||
|
||||
static int __init code_bytes_setup(char *s)
|
||||
{
|
||||
code_bytes = simple_strtoul(s, NULL, 0);
|
||||
if (code_bytes > 8192)
|
||||
code_bytes = 8192;
|
||||
|
||||
return 1;
|
||||
}
|
||||
__setup("code_bytes=", code_bytes_setup);
|
||||
|
|
|
@ -23,6 +23,7 @@
|
|||
* an extra value to store the TSC freq
|
||||
*/
|
||||
unsigned int tsc_khz;
|
||||
unsigned long long (*custom_sched_clock)(void);
|
||||
|
||||
int tsc_disable;
|
||||
|
||||
|
@ -59,12 +60,6 @@ static inline int check_tsc_unstable(void)
|
|||
return tsc_unstable;
|
||||
}
|
||||
|
||||
void mark_tsc_unstable(void)
|
||||
{
|
||||
tsc_unstable = 1;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mark_tsc_unstable);
|
||||
|
||||
/* Accellerators for sched_clock()
|
||||
* convert from cycles(64bits) => nanoseconds (64bits)
|
||||
* basic equation:
|
||||
|
@ -107,14 +102,14 @@ unsigned long long sched_clock(void)
|
|||
{
|
||||
unsigned long long this_offset;
|
||||
|
||||
if (unlikely(custom_sched_clock))
|
||||
return (*custom_sched_clock)();
|
||||
|
||||
/*
|
||||
* in the NUMA case we dont use the TSC as they are not
|
||||
* synchronized across all CPUs.
|
||||
* Fall back to jiffies if there's no TSC available:
|
||||
*/
|
||||
#ifndef CONFIG_NUMA
|
||||
if (!cpu_khz || check_tsc_unstable())
|
||||
#endif
|
||||
/* no locking but a rare wrong value is not a big deal */
|
||||
if (unlikely(tsc_disable))
|
||||
/* No locking but a rare wrong value is not a big deal: */
|
||||
return (jiffies_64 - INITIAL_JIFFIES) * (1000000000 / HZ);
|
||||
|
||||
/* read the Time Stamp Counter: */
|
||||
|
@ -194,13 +189,13 @@ EXPORT_SYMBOL(recalibrate_cpu_khz);
|
|||
void __init tsc_init(void)
|
||||
{
|
||||
if (!cpu_has_tsc || tsc_disable)
|
||||
return;
|
||||
goto out_no_tsc;
|
||||
|
||||
cpu_khz = calculate_cpu_khz();
|
||||
tsc_khz = cpu_khz;
|
||||
|
||||
if (!cpu_khz)
|
||||
return;
|
||||
goto out_no_tsc;
|
||||
|
||||
printk("Detected %lu.%03lu MHz processor.\n",
|
||||
(unsigned long)cpu_khz / 1000,
|
||||
|
@ -208,38 +203,19 @@ void __init tsc_init(void)
|
|||
|
||||
set_cyc2ns_scale(cpu_khz);
|
||||
use_tsc_delay();
|
||||
return;
|
||||
|
||||
out_no_tsc:
|
||||
/*
|
||||
* Set the tsc_disable flag if there's no TSC support, this
|
||||
* makes it a fast flag for the kernel to see whether it
|
||||
* should be using the TSC.
|
||||
*/
|
||||
tsc_disable = 1;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_CPU_FREQ
|
||||
|
||||
static unsigned int cpufreq_delayed_issched = 0;
|
||||
static unsigned int cpufreq_init = 0;
|
||||
static struct work_struct cpufreq_delayed_get_work;
|
||||
|
||||
static void handle_cpufreq_delayed_get(struct work_struct *work)
|
||||
{
|
||||
unsigned int cpu;
|
||||
|
||||
for_each_online_cpu(cpu)
|
||||
cpufreq_get(cpu);
|
||||
|
||||
cpufreq_delayed_issched = 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* if we notice cpufreq oddness, schedule a call to cpufreq_get() as it tries
|
||||
* to verify the CPU frequency the timing core thinks the CPU is running
|
||||
* at is still correct.
|
||||
*/
|
||||
static inline void cpufreq_delayed_get(void)
|
||||
{
|
||||
if (cpufreq_init && !cpufreq_delayed_issched) {
|
||||
cpufreq_delayed_issched = 1;
|
||||
printk(KERN_DEBUG "Checking if CPU frequency changed.\n");
|
||||
schedule_work(&cpufreq_delayed_get_work);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* if the CPU frequency is scaled, TSC-based delays will need a different
|
||||
* loops_per_jiffy value to function properly.
|
||||
|
@ -303,17 +279,9 @@ static struct notifier_block time_cpufreq_notifier_block = {
|
|||
|
||||
static int __init cpufreq_tsc(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
INIT_WORK(&cpufreq_delayed_get_work, handle_cpufreq_delayed_get);
|
||||
ret = cpufreq_register_notifier(&time_cpufreq_notifier_block,
|
||||
CPUFREQ_TRANSITION_NOTIFIER);
|
||||
if (!ret)
|
||||
cpufreq_init = 1;
|
||||
|
||||
return ret;
|
||||
return cpufreq_register_notifier(&time_cpufreq_notifier_block,
|
||||
CPUFREQ_TRANSITION_NOTIFIER);
|
||||
}
|
||||
|
||||
core_initcall(cpufreq_tsc);
|
||||
|
||||
#endif
|
||||
|
@ -321,7 +289,6 @@ core_initcall(cpufreq_tsc);
|
|||
/* clock source code */
|
||||
|
||||
static unsigned long current_tsc_khz = 0;
|
||||
static int tsc_update_callback(void);
|
||||
|
||||
static cycle_t read_tsc(void)
|
||||
{
|
||||
|
@ -339,37 +306,28 @@ static struct clocksource clocksource_tsc = {
|
|||
.mask = CLOCKSOURCE_MASK(64),
|
||||
.mult = 0, /* to be set */
|
||||
.shift = 22,
|
||||
.update_callback = tsc_update_callback,
|
||||
.is_continuous = 1,
|
||||
.flags = CLOCK_SOURCE_IS_CONTINUOUS |
|
||||
CLOCK_SOURCE_MUST_VERIFY,
|
||||
};
|
||||
|
||||
static int tsc_update_callback(void)
|
||||
void mark_tsc_unstable(void)
|
||||
{
|
||||
int change = 0;
|
||||
|
||||
/* check to see if we should switch to the safe clocksource: */
|
||||
if (clocksource_tsc.rating != 0 && check_tsc_unstable()) {
|
||||
clocksource_tsc.rating = 0;
|
||||
clocksource_reselect();
|
||||
change = 1;
|
||||
if (!tsc_unstable) {
|
||||
tsc_unstable = 1;
|
||||
/* Can be called before registration */
|
||||
if (clocksource_tsc.mult)
|
||||
clocksource_change_rating(&clocksource_tsc, 0);
|
||||
else
|
||||
clocksource_tsc.rating = 0;
|
||||
}
|
||||
|
||||
/* only update if tsc_khz has changed: */
|
||||
if (current_tsc_khz != tsc_khz) {
|
||||
current_tsc_khz = tsc_khz;
|
||||
clocksource_tsc.mult = clocksource_khz2mult(current_tsc_khz,
|
||||
clocksource_tsc.shift);
|
||||
change = 1;
|
||||
}
|
||||
|
||||
return change;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mark_tsc_unstable);
|
||||
|
||||
static int __init dmi_mark_tsc_unstable(struct dmi_system_id *d)
|
||||
{
|
||||
printk(KERN_NOTICE "%s detected: marking TSC unstable.\n",
|
||||
d->ident);
|
||||
mark_tsc_unstable();
|
||||
tsc_unstable = 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -386,66 +344,45 @@ static struct dmi_system_id __initdata bad_tsc_dmi_table[] = {
|
|||
{}
|
||||
};
|
||||
|
||||
#define TSC_FREQ_CHECK_INTERVAL (10*MSEC_PER_SEC) /* 10sec in MS */
|
||||
static struct timer_list verify_tsc_freq_timer;
|
||||
|
||||
/* XXX - Probably should add locking */
|
||||
static void verify_tsc_freq(unsigned long unused)
|
||||
{
|
||||
static u64 last_tsc;
|
||||
static unsigned long last_jiffies;
|
||||
|
||||
u64 now_tsc, interval_tsc;
|
||||
unsigned long now_jiffies, interval_jiffies;
|
||||
|
||||
|
||||
if (check_tsc_unstable())
|
||||
return;
|
||||
|
||||
rdtscll(now_tsc);
|
||||
now_jiffies = jiffies;
|
||||
|
||||
if (!last_jiffies) {
|
||||
goto out;
|
||||
}
|
||||
|
||||
interval_jiffies = now_jiffies - last_jiffies;
|
||||
interval_tsc = now_tsc - last_tsc;
|
||||
interval_tsc *= HZ;
|
||||
do_div(interval_tsc, cpu_khz*1000);
|
||||
|
||||
if (interval_tsc < (interval_jiffies * 3 / 4)) {
|
||||
printk("TSC appears to be running slowly. "
|
||||
"Marking it as unstable\n");
|
||||
mark_tsc_unstable();
|
||||
return;
|
||||
}
|
||||
|
||||
out:
|
||||
last_tsc = now_tsc;
|
||||
last_jiffies = now_jiffies;
|
||||
/* set us up to go off on the next interval: */
|
||||
mod_timer(&verify_tsc_freq_timer,
|
||||
jiffies + msecs_to_jiffies(TSC_FREQ_CHECK_INTERVAL));
|
||||
}
|
||||
|
||||
/*
|
||||
* Make an educated guess if the TSC is trustworthy and synchronized
|
||||
* over all CPUs.
|
||||
*/
|
||||
static __init int unsynchronized_tsc(void)
|
||||
__cpuinit int unsynchronized_tsc(void)
|
||||
{
|
||||
if (!cpu_has_tsc || tsc_unstable)
|
||||
return 1;
|
||||
/*
|
||||
* Intel systems are normally all synchronized.
|
||||
* Exceptions must mark TSC as unstable:
|
||||
*/
|
||||
if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
|
||||
return 0;
|
||||
|
||||
/* assume multi socket systems are not synchronized: */
|
||||
return num_possible_cpus() > 1;
|
||||
if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) {
|
||||
/* assume multi socket systems are not synchronized: */
|
||||
if (num_possible_cpus() > 1)
|
||||
tsc_unstable = 1;
|
||||
}
|
||||
return tsc_unstable;
|
||||
}
|
||||
|
||||
/*
|
||||
* Geode_LX - the OLPC CPU has a possibly a very reliable TSC
|
||||
*/
|
||||
#ifdef CONFIG_MGEODE_LX
|
||||
/* RTSC counts during suspend */
|
||||
#define RTSC_SUSP 0x100
|
||||
|
||||
static void __init check_geode_tsc_reliable(void)
|
||||
{
|
||||
unsigned long val;
|
||||
|
||||
rdmsrl(MSR_GEODE_BUSCONT_CONF0, val);
|
||||
if ((val & RTSC_SUSP))
|
||||
clocksource_tsc.flags &= ~CLOCK_SOURCE_MUST_VERIFY;
|
||||
}
|
||||
#else
|
||||
static inline void check_geode_tsc_reliable(void) { }
|
||||
#endif
|
||||
|
||||
static int __init init_tsc_clocksource(void)
|
||||
{
|
||||
|
||||
|
@ -453,20 +390,16 @@ static int __init init_tsc_clocksource(void)
|
|||
/* check blacklist */
|
||||
dmi_check_system(bad_tsc_dmi_table);
|
||||
|
||||
if (unsynchronized_tsc()) /* mark unstable if unsynced */
|
||||
mark_tsc_unstable();
|
||||
unsynchronized_tsc();
|
||||
check_geode_tsc_reliable();
|
||||
current_tsc_khz = tsc_khz;
|
||||
clocksource_tsc.mult = clocksource_khz2mult(current_tsc_khz,
|
||||
clocksource_tsc.shift);
|
||||
/* lower the rating if we already know its unstable: */
|
||||
if (check_tsc_unstable())
|
||||
if (check_tsc_unstable()) {
|
||||
clocksource_tsc.rating = 0;
|
||||
|
||||
init_timer(&verify_tsc_freq_timer);
|
||||
verify_tsc_freq_timer.function = verify_tsc_freq;
|
||||
verify_tsc_freq_timer.expires =
|
||||
jiffies + msecs_to_jiffies(TSC_FREQ_CHECK_INTERVAL);
|
||||
add_timer(&verify_tsc_freq_timer);
|
||||
clocksource_tsc.flags &= ~CLOCK_SOURCE_IS_CONTINUOUS;
|
||||
}
|
||||
|
||||
return clocksource_register(&clocksource_tsc);
|
||||
}
|
||||
|
|
|
@ -0,0 +1 @@
|
|||
#include "../../x86_64/kernel/tsc_sync.c"
|
|
@ -96,12 +96,12 @@ static int copy_vm86_regs_to_user(struct vm86_regs __user *user,
|
|||
{
|
||||
int ret = 0;
|
||||
|
||||
/* kernel_vm86_regs is missing xfs, so copy everything up to
|
||||
(but not including) xgs, and then rest after xgs. */
|
||||
ret += copy_to_user(user, regs, offsetof(struct kernel_vm86_regs, pt.xgs));
|
||||
ret += copy_to_user(&user->__null_gs, ®s->pt.xgs,
|
||||
/* kernel_vm86_regs is missing xgs, so copy everything up to
|
||||
(but not including) orig_eax, and then rest including orig_eax. */
|
||||
ret += copy_to_user(user, regs, offsetof(struct kernel_vm86_regs, pt.orig_eax));
|
||||
ret += copy_to_user(&user->orig_eax, ®s->pt.orig_eax,
|
||||
sizeof(struct kernel_vm86_regs) -
|
||||
offsetof(struct kernel_vm86_regs, pt.xgs));
|
||||
offsetof(struct kernel_vm86_regs, pt.orig_eax));
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -113,12 +113,13 @@ static int copy_vm86_regs_from_user(struct kernel_vm86_regs *regs,
|
|||
{
|
||||
int ret = 0;
|
||||
|
||||
ret += copy_from_user(regs, user, offsetof(struct kernel_vm86_regs, pt.xgs));
|
||||
ret += copy_from_user(®s->pt.xgs, &user->__null_gs,
|
||||
/* copy eax-xfs inclusive */
|
||||
ret += copy_from_user(regs, user, offsetof(struct kernel_vm86_regs, pt.orig_eax));
|
||||
/* copy orig_eax-__gsh+extra */
|
||||
ret += copy_from_user(®s->pt.orig_eax, &user->orig_eax,
|
||||
sizeof(struct kernel_vm86_regs) -
|
||||
offsetof(struct kernel_vm86_regs, pt.xgs) +
|
||||
offsetof(struct kernel_vm86_regs, pt.orig_eax) +
|
||||
extra);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -157,8 +158,8 @@ struct pt_regs * fastcall save_v86_state(struct kernel_vm86_regs * regs)
|
|||
|
||||
ret = KVM86->regs32;
|
||||
|
||||
loadsegment(fs, current->thread.saved_fs);
|
||||
ret->xgs = current->thread.saved_gs;
|
||||
ret->xfs = current->thread.saved_fs;
|
||||
loadsegment(gs, current->thread.saved_gs);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -285,9 +286,9 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
|
|||
*/
|
||||
info->regs.pt.xds = 0;
|
||||
info->regs.pt.xes = 0;
|
||||
info->regs.pt.xgs = 0;
|
||||
info->regs.pt.xfs = 0;
|
||||
|
||||
/* we are clearing fs later just before "jmp resume_userspace",
|
||||
/* we are clearing gs later just before "jmp resume_userspace",
|
||||
* because it is not saved/restored.
|
||||
*/
|
||||
|
||||
|
@ -321,8 +322,8 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
|
|||
*/
|
||||
info->regs32->eax = 0;
|
||||
tsk->thread.saved_esp0 = tsk->thread.esp0;
|
||||
savesegment(fs, tsk->thread.saved_fs);
|
||||
tsk->thread.saved_gs = info->regs32->xgs;
|
||||
tsk->thread.saved_fs = info->regs32->xfs;
|
||||
savesegment(gs, tsk->thread.saved_gs);
|
||||
|
||||
tss = &per_cpu(init_tss, get_cpu());
|
||||
tsk->thread.esp0 = (unsigned long) &info->VM86_TSS_ESP0;
|
||||
|
@ -342,7 +343,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
|
|||
__asm__ __volatile__(
|
||||
"movl %0,%%esp\n\t"
|
||||
"movl %1,%%ebp\n\t"
|
||||
"mov %2, %%fs\n\t"
|
||||
"mov %2, %%gs\n\t"
|
||||
"jmp resume_userspace"
|
||||
: /* no outputs */
|
||||
:"r" (&info->regs), "r" (task_thread_info(tsk)), "r" (0));
|
||||
|
|
|
@ -0,0 +1,949 @@
|
|||
/*
|
||||
* VMI specific paravirt-ops implementation
|
||||
*
|
||||
* Copyright (C) 2005, VMware, Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for more
|
||||
* details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
|
||||
*
|
||||
* Send feedback to zach@vmware.com
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/license.h>
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/bootmem.h>
|
||||
#include <linux/mm.h>
|
||||
#include <asm/vmi.h>
|
||||
#include <asm/io.h>
|
||||
#include <asm/fixmap.h>
|
||||
#include <asm/apicdef.h>
|
||||
#include <asm/apic.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/timer.h>
|
||||
#include <asm/vmi_time.h>
|
||||
|
||||
/* Convenient for calling VMI functions indirectly in the ROM */
|
||||
typedef u32 __attribute__((regparm(1))) (VROMFUNC)(void);
|
||||
typedef u64 __attribute__((regparm(2))) (VROMLONGFUNC)(int);
|
||||
|
||||
#define call_vrom_func(rom,func) \
|
||||
(((VROMFUNC *)(rom->func))())
|
||||
|
||||
#define call_vrom_long_func(rom,func,arg) \
|
||||
(((VROMLONGFUNC *)(rom->func)) (arg))
|
||||
|
||||
static struct vrom_header *vmi_rom;
|
||||
static int license_gplok;
|
||||
static int disable_nodelay;
|
||||
static int disable_pge;
|
||||
static int disable_pse;
|
||||
static int disable_sep;
|
||||
static int disable_tsc;
|
||||
static int disable_mtrr;
|
||||
|
||||
/* Cached VMI operations */
|
||||
struct {
|
||||
void (*cpuid)(void /* non-c */);
|
||||
void (*_set_ldt)(u32 selector);
|
||||
void (*set_tr)(u32 selector);
|
||||
void (*set_kernel_stack)(u32 selector, u32 esp0);
|
||||
void (*allocate_page)(u32, u32, u32, u32, u32);
|
||||
void (*release_page)(u32, u32);
|
||||
void (*set_pte)(pte_t, pte_t *, unsigned);
|
||||
void (*update_pte)(pte_t *, unsigned);
|
||||
void (*set_linear_mapping)(int, u32, u32, u32);
|
||||
void (*flush_tlb)(int);
|
||||
void (*set_initial_ap_state)(int, int);
|
||||
void (*halt)(void);
|
||||
} vmi_ops;
|
||||
|
||||
/* XXX move this to alternative.h */
|
||||
extern struct paravirt_patch __start_parainstructions[],
|
||||
__stop_parainstructions[];
|
||||
|
||||
/*
|
||||
* VMI patching routines.
|
||||
*/
|
||||
#define MNEM_CALL 0xe8
|
||||
#define MNEM_JMP 0xe9
|
||||
#define MNEM_RET 0xc3
|
||||
|
||||
static char irq_save_disable_callout[] = {
|
||||
MNEM_CALL, 0, 0, 0, 0,
|
||||
MNEM_CALL, 0, 0, 0, 0,
|
||||
MNEM_RET
|
||||
};
|
||||
#define IRQ_PATCH_INT_MASK 0
|
||||
#define IRQ_PATCH_DISABLE 5
|
||||
|
||||
static inline void patch_offset(unsigned char *eip, unsigned char *dest)
|
||||
{
|
||||
*(unsigned long *)(eip+1) = dest-eip-5;
|
||||
}
|
||||
|
||||
static unsigned patch_internal(int call, unsigned len, void *insns)
|
||||
{
|
||||
u64 reloc;
|
||||
struct vmi_relocation_info *const rel = (struct vmi_relocation_info *)&reloc;
|
||||
reloc = call_vrom_long_func(vmi_rom, get_reloc, call);
|
||||
switch(rel->type) {
|
||||
case VMI_RELOCATION_CALL_REL:
|
||||
BUG_ON(len < 5);
|
||||
*(char *)insns = MNEM_CALL;
|
||||
patch_offset(insns, rel->eip);
|
||||
return 5;
|
||||
|
||||
case VMI_RELOCATION_JUMP_REL:
|
||||
BUG_ON(len < 5);
|
||||
*(char *)insns = MNEM_JMP;
|
||||
patch_offset(insns, rel->eip);
|
||||
return 5;
|
||||
|
||||
case VMI_RELOCATION_NOP:
|
||||
/* obliterate the whole thing */
|
||||
return 0;
|
||||
|
||||
case VMI_RELOCATION_NONE:
|
||||
/* leave native code in place */
|
||||
break;
|
||||
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
return len;
|
||||
}
|
||||
|
||||
/*
|
||||
* Apply patch if appropriate, return length of new instruction
|
||||
* sequence. The callee does nop padding for us.
|
||||
*/
|
||||
static unsigned vmi_patch(u8 type, u16 clobbers, void *insns, unsigned len)
|
||||
{
|
||||
switch (type) {
|
||||
case PARAVIRT_IRQ_DISABLE:
|
||||
return patch_internal(VMI_CALL_DisableInterrupts, len, insns);
|
||||
case PARAVIRT_IRQ_ENABLE:
|
||||
return patch_internal(VMI_CALL_EnableInterrupts, len, insns);
|
||||
case PARAVIRT_RESTORE_FLAGS:
|
||||
return patch_internal(VMI_CALL_SetInterruptMask, len, insns);
|
||||
case PARAVIRT_SAVE_FLAGS:
|
||||
return patch_internal(VMI_CALL_GetInterruptMask, len, insns);
|
||||
case PARAVIRT_SAVE_FLAGS_IRQ_DISABLE:
|
||||
if (len >= 10) {
|
||||
patch_internal(VMI_CALL_GetInterruptMask, len, insns);
|
||||
patch_internal(VMI_CALL_DisableInterrupts, len-5, insns+5);
|
||||
return 10;
|
||||
} else {
|
||||
/*
|
||||
* You bastards didn't leave enough room to
|
||||
* patch save_flags_irq_disable inline. Patch
|
||||
* to a helper
|
||||
*/
|
||||
BUG_ON(len < 5);
|
||||
*(char *)insns = MNEM_CALL;
|
||||
patch_offset(insns, irq_save_disable_callout);
|
||||
return 5;
|
||||
}
|
||||
case PARAVIRT_INTERRUPT_RETURN:
|
||||
return patch_internal(VMI_CALL_IRET, len, insns);
|
||||
case PARAVIRT_STI_SYSEXIT:
|
||||
return patch_internal(VMI_CALL_SYSEXIT, len, insns);
|
||||
default:
|
||||
break;
|
||||
}
|
||||
return len;
|
||||
}
|
||||
|
||||
/* CPUID has non-C semantics, and paravirt-ops API doesn't match hardware ISA */
|
||||
static void vmi_cpuid(unsigned int *eax, unsigned int *ebx,
|
||||
unsigned int *ecx, unsigned int *edx)
|
||||
{
|
||||
int override = 0;
|
||||
if (*eax == 1)
|
||||
override = 1;
|
||||
asm volatile ("call *%6"
|
||||
: "=a" (*eax),
|
||||
"=b" (*ebx),
|
||||
"=c" (*ecx),
|
||||
"=d" (*edx)
|
||||
: "0" (*eax), "2" (*ecx), "r" (vmi_ops.cpuid));
|
||||
if (override) {
|
||||
if (disable_pse)
|
||||
*edx &= ~X86_FEATURE_PSE;
|
||||
if (disable_pge)
|
||||
*edx &= ~X86_FEATURE_PGE;
|
||||
if (disable_sep)
|
||||
*edx &= ~X86_FEATURE_SEP;
|
||||
if (disable_tsc)
|
||||
*edx &= ~X86_FEATURE_TSC;
|
||||
if (disable_mtrr)
|
||||
*edx &= ~X86_FEATURE_MTRR;
|
||||
}
|
||||
}
|
||||
|
||||
static inline void vmi_maybe_load_tls(struct desc_struct *gdt, int nr, struct desc_struct *new)
|
||||
{
|
||||
if (gdt[nr].a != new->a || gdt[nr].b != new->b)
|
||||
write_gdt_entry(gdt, nr, new->a, new->b);
|
||||
}
|
||||
|
||||
static void vmi_load_tls(struct thread_struct *t, unsigned int cpu)
|
||||
{
|
||||
struct desc_struct *gdt = get_cpu_gdt_table(cpu);
|
||||
vmi_maybe_load_tls(gdt, GDT_ENTRY_TLS_MIN + 0, &t->tls_array[0]);
|
||||
vmi_maybe_load_tls(gdt, GDT_ENTRY_TLS_MIN + 1, &t->tls_array[1]);
|
||||
vmi_maybe_load_tls(gdt, GDT_ENTRY_TLS_MIN + 2, &t->tls_array[2]);
|
||||
}
|
||||
|
||||
static void vmi_set_ldt(const void *addr, unsigned entries)
|
||||
{
|
||||
unsigned cpu = smp_processor_id();
|
||||
u32 low, high;
|
||||
|
||||
pack_descriptor(&low, &high, (unsigned long)addr,
|
||||
entries * sizeof(struct desc_struct) - 1,
|
||||
DESCTYPE_LDT, 0);
|
||||
write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_LDT, low, high);
|
||||
vmi_ops._set_ldt(entries ? GDT_ENTRY_LDT*sizeof(struct desc_struct) : 0);
|
||||
}
|
||||
|
||||
static void vmi_set_tr(void)
|
||||
{
|
||||
vmi_ops.set_tr(GDT_ENTRY_TSS*sizeof(struct desc_struct));
|
||||
}
|
||||
|
||||
static void vmi_load_esp0(struct tss_struct *tss,
|
||||
struct thread_struct *thread)
|
||||
{
|
||||
tss->esp0 = thread->esp0;
|
||||
|
||||
/* This can only happen when SEP is enabled, no need to test "SEP"arately */
|
||||
if (unlikely(tss->ss1 != thread->sysenter_cs)) {
|
||||
tss->ss1 = thread->sysenter_cs;
|
||||
wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0);
|
||||
}
|
||||
vmi_ops.set_kernel_stack(__KERNEL_DS, tss->esp0);
|
||||
}
|
||||
|
||||
static void vmi_flush_tlb_user(void)
|
||||
{
|
||||
vmi_ops.flush_tlb(VMI_FLUSH_TLB);
|
||||
}
|
||||
|
||||
static void vmi_flush_tlb_kernel(void)
|
||||
{
|
||||
vmi_ops.flush_tlb(VMI_FLUSH_TLB | VMI_FLUSH_GLOBAL);
|
||||
}
|
||||
|
||||
/* Stub to do nothing at all; used for delays and unimplemented calls */
|
||||
static void vmi_nop(void)
|
||||
{
|
||||
}
|
||||
|
||||
/* For NO_IDLE_HZ, we stop the clock when halting the kernel */
|
||||
#ifdef CONFIG_NO_IDLE_HZ
|
||||
static fastcall void vmi_safe_halt(void)
|
||||
{
|
||||
int idle = vmi_stop_hz_timer();
|
||||
vmi_ops.halt();
|
||||
if (idle) {
|
||||
local_irq_disable();
|
||||
vmi_account_time_restart_hz_timer();
|
||||
local_irq_enable();
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_DEBUG_PAGE_TYPE
|
||||
|
||||
#ifdef CONFIG_X86_PAE
|
||||
#define MAX_BOOT_PTS (2048+4+1)
|
||||
#else
|
||||
#define MAX_BOOT_PTS (1024+1)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* During boot, mem_map is not yet available in paging_init, so stash
|
||||
* all the boot page allocations here.
|
||||
*/
|
||||
static struct {
|
||||
u32 pfn;
|
||||
int type;
|
||||
} boot_page_allocations[MAX_BOOT_PTS];
|
||||
static int num_boot_page_allocations;
|
||||
static int boot_allocations_applied;
|
||||
|
||||
void vmi_apply_boot_page_allocations(void)
|
||||
{
|
||||
int i;
|
||||
BUG_ON(!mem_map);
|
||||
for (i = 0; i < num_boot_page_allocations; i++) {
|
||||
struct page *page = pfn_to_page(boot_page_allocations[i].pfn);
|
||||
page->type = boot_page_allocations[i].type;
|
||||
page->type = boot_page_allocations[i].type &
|
||||
~(VMI_PAGE_ZEROED | VMI_PAGE_CLONE);
|
||||
}
|
||||
boot_allocations_applied = 1;
|
||||
}
|
||||
|
||||
static void record_page_type(u32 pfn, int type)
|
||||
{
|
||||
BUG_ON(num_boot_page_allocations >= MAX_BOOT_PTS);
|
||||
boot_page_allocations[num_boot_page_allocations].pfn = pfn;
|
||||
boot_page_allocations[num_boot_page_allocations].type = type;
|
||||
num_boot_page_allocations++;
|
||||
}
|
||||
|
||||
static void check_zeroed_page(u32 pfn, int type, struct page *page)
|
||||
{
|
||||
u32 *ptr;
|
||||
int i;
|
||||
int limit = PAGE_SIZE / sizeof(int);
|
||||
|
||||
if (page_address(page))
|
||||
ptr = (u32 *)page_address(page);
|
||||
else
|
||||
ptr = (u32 *)__va(pfn << PAGE_SHIFT);
|
||||
/*
|
||||
* When cloning the root in non-PAE mode, only the userspace
|
||||
* pdes need to be zeroed.
|
||||
*/
|
||||
if (type & VMI_PAGE_CLONE)
|
||||
limit = USER_PTRS_PER_PGD;
|
||||
for (i = 0; i < limit; i++)
|
||||
BUG_ON(ptr[i]);
|
||||
}
|
||||
|
||||
/*
|
||||
* We stash the page type into struct page so we can verify the page
|
||||
* types are used properly.
|
||||
*/
|
||||
static void vmi_set_page_type(u32 pfn, int type)
|
||||
{
|
||||
/* PAE can have multiple roots per page - don't track */
|
||||
if (PTRS_PER_PMD > 1 && (type & VMI_PAGE_PDP))
|
||||
return;
|
||||
|
||||
if (boot_allocations_applied) {
|
||||
struct page *page = pfn_to_page(pfn);
|
||||
if (type != VMI_PAGE_NORMAL)
|
||||
BUG_ON(page->type);
|
||||
else
|
||||
BUG_ON(page->type == VMI_PAGE_NORMAL);
|
||||
page->type = type & ~(VMI_PAGE_ZEROED | VMI_PAGE_CLONE);
|
||||
if (type & VMI_PAGE_ZEROED)
|
||||
check_zeroed_page(pfn, type, page);
|
||||
} else {
|
||||
record_page_type(pfn, type);
|
||||
}
|
||||
}
|
||||
|
||||
static void vmi_check_page_type(u32 pfn, int type)
|
||||
{
|
||||
/* PAE can have multiple roots per page - skip checks */
|
||||
if (PTRS_PER_PMD > 1 && (type & VMI_PAGE_PDP))
|
||||
return;
|
||||
|
||||
type &= ~(VMI_PAGE_ZEROED | VMI_PAGE_CLONE);
|
||||
if (boot_allocations_applied) {
|
||||
struct page *page = pfn_to_page(pfn);
|
||||
BUG_ON((page->type ^ type) & VMI_PAGE_PAE);
|
||||
BUG_ON(type == VMI_PAGE_NORMAL && page->type);
|
||||
BUG_ON((type & page->type) == 0);
|
||||
}
|
||||
}
|
||||
#else
|
||||
#define vmi_set_page_type(p,t) do { } while (0)
|
||||
#define vmi_check_page_type(p,t) do { } while (0)
|
||||
#endif
|
||||
|
||||
static void vmi_allocate_pt(u32 pfn)
|
||||
{
|
||||
vmi_set_page_type(pfn, VMI_PAGE_L1);
|
||||
vmi_ops.allocate_page(pfn, VMI_PAGE_L1, 0, 0, 0);
|
||||
}
|
||||
|
||||
static void vmi_allocate_pd(u32 pfn)
|
||||
{
|
||||
/*
|
||||
* This call comes in very early, before mem_map is setup.
|
||||
* It is called only for swapper_pg_dir, which already has
|
||||
* data on it.
|
||||
*/
|
||||
vmi_set_page_type(pfn, VMI_PAGE_L2);
|
||||
vmi_ops.allocate_page(pfn, VMI_PAGE_L2, 0, 0, 0);
|
||||
}
|
||||
|
||||
static void vmi_allocate_pd_clone(u32 pfn, u32 clonepfn, u32 start, u32 count)
|
||||
{
|
||||
vmi_set_page_type(pfn, VMI_PAGE_L2 | VMI_PAGE_CLONE);
|
||||
vmi_check_page_type(clonepfn, VMI_PAGE_L2);
|
||||
vmi_ops.allocate_page(pfn, VMI_PAGE_L2 | VMI_PAGE_CLONE, clonepfn, start, count);
|
||||
}
|
||||
|
||||
static void vmi_release_pt(u32 pfn)
|
||||
{
|
||||
vmi_ops.release_page(pfn, VMI_PAGE_L1);
|
||||
vmi_set_page_type(pfn, VMI_PAGE_NORMAL);
|
||||
}
|
||||
|
||||
static void vmi_release_pd(u32 pfn)
|
||||
{
|
||||
vmi_ops.release_page(pfn, VMI_PAGE_L2);
|
||||
vmi_set_page_type(pfn, VMI_PAGE_NORMAL);
|
||||
}
|
||||
|
||||
/*
|
||||
* Helper macros for MMU update flags. We can defer updates until a flush
|
||||
* or page invalidation only if the update is to the current address space
|
||||
* (otherwise, there is no flush). We must check against init_mm, since
|
||||
* this could be a kernel update, which usually passes init_mm, although
|
||||
* sometimes this check can be skipped if we know the particular function
|
||||
* is only called on user mode PTEs. We could change the kernel to pass
|
||||
* current->active_mm here, but in particular, I was unsure if changing
|
||||
* mm/highmem.c to do this would still be correct on other architectures.
|
||||
*/
|
||||
#define is_current_as(mm, mustbeuser) ((mm) == current->active_mm || \
|
||||
(!mustbeuser && (mm) == &init_mm))
|
||||
#define vmi_flags_addr(mm, addr, level, user) \
|
||||
((level) | (is_current_as(mm, user) ? \
|
||||
(VMI_PAGE_CURRENT_AS | ((addr) & VMI_PAGE_VA_MASK)) : 0))
|
||||
#define vmi_flags_addr_defer(mm, addr, level, user) \
|
||||
((level) | (is_current_as(mm, user) ? \
|
||||
(VMI_PAGE_DEFER | VMI_PAGE_CURRENT_AS | ((addr) & VMI_PAGE_VA_MASK)) : 0))
|
||||
|
||||
static void vmi_update_pte(struct mm_struct *mm, u32 addr, pte_t *ptep)
|
||||
{
|
||||
vmi_check_page_type(__pa(ptep) >> PAGE_SHIFT, VMI_PAGE_PTE);
|
||||
vmi_ops.update_pte(ptep, vmi_flags_addr(mm, addr, VMI_PAGE_PT, 0));
|
||||
}
|
||||
|
||||
static void vmi_update_pte_defer(struct mm_struct *mm, u32 addr, pte_t *ptep)
|
||||
{
|
||||
vmi_check_page_type(__pa(ptep) >> PAGE_SHIFT, VMI_PAGE_PTE);
|
||||
vmi_ops.update_pte(ptep, vmi_flags_addr_defer(mm, addr, VMI_PAGE_PT, 0));
|
||||
}
|
||||
|
||||
static void vmi_set_pte(pte_t *ptep, pte_t pte)
|
||||
{
|
||||
/* XXX because of set_pmd_pte, this can be called on PT or PD layers */
|
||||
vmi_check_page_type(__pa(ptep) >> PAGE_SHIFT, VMI_PAGE_PTE | VMI_PAGE_PD);
|
||||
vmi_ops.set_pte(pte, ptep, VMI_PAGE_PT);
|
||||
}
|
||||
|
||||
static void vmi_set_pte_at(struct mm_struct *mm, u32 addr, pte_t *ptep, pte_t pte)
|
||||
{
|
||||
vmi_check_page_type(__pa(ptep) >> PAGE_SHIFT, VMI_PAGE_PTE);
|
||||
vmi_ops.set_pte(pte, ptep, vmi_flags_addr(mm, addr, VMI_PAGE_PT, 0));
|
||||
}
|
||||
|
||||
static void vmi_set_pmd(pmd_t *pmdp, pmd_t pmdval)
|
||||
{
|
||||
#ifdef CONFIG_X86_PAE
|
||||
const pte_t pte = { pmdval.pmd, pmdval.pmd >> 32 };
|
||||
vmi_check_page_type(__pa(pmdp) >> PAGE_SHIFT, VMI_PAGE_PMD);
|
||||
#else
|
||||
const pte_t pte = { pmdval.pud.pgd.pgd };
|
||||
vmi_check_page_type(__pa(pmdp) >> PAGE_SHIFT, VMI_PAGE_PGD);
|
||||
#endif
|
||||
vmi_ops.set_pte(pte, (pte_t *)pmdp, VMI_PAGE_PD);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_PAE
|
||||
|
||||
static void vmi_set_pte_atomic(pte_t *ptep, pte_t pteval)
|
||||
{
|
||||
/*
|
||||
* XXX This is called from set_pmd_pte, but at both PT
|
||||
* and PD layers so the VMI_PAGE_PT flag is wrong. But
|
||||
* it is only called for large page mapping changes,
|
||||
* the Xen backend, doesn't support large pages, and the
|
||||
* ESX backend doesn't depend on the flag.
|
||||
*/
|
||||
set_64bit((unsigned long long *)ptep,pte_val(pteval));
|
||||
vmi_ops.update_pte(ptep, VMI_PAGE_PT);
|
||||
}
|
||||
|
||||
static void vmi_set_pte_present(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte)
|
||||
{
|
||||
vmi_check_page_type(__pa(ptep) >> PAGE_SHIFT, VMI_PAGE_PTE);
|
||||
vmi_ops.set_pte(pte, ptep, vmi_flags_addr_defer(mm, addr, VMI_PAGE_PT, 1));
|
||||
}
|
||||
|
||||
static void vmi_set_pud(pud_t *pudp, pud_t pudval)
|
||||
{
|
||||
/* Um, eww */
|
||||
const pte_t pte = { pudval.pgd.pgd, pudval.pgd.pgd >> 32 };
|
||||
vmi_check_page_type(__pa(pudp) >> PAGE_SHIFT, VMI_PAGE_PGD);
|
||||
vmi_ops.set_pte(pte, (pte_t *)pudp, VMI_PAGE_PDP);
|
||||
}
|
||||
|
||||
static void vmi_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
|
||||
{
|
||||
const pte_t pte = { 0 };
|
||||
vmi_check_page_type(__pa(ptep) >> PAGE_SHIFT, VMI_PAGE_PTE);
|
||||
vmi_ops.set_pte(pte, ptep, vmi_flags_addr(mm, addr, VMI_PAGE_PT, 0));
|
||||
}
|
||||
|
||||
void vmi_pmd_clear(pmd_t *pmd)
|
||||
{
|
||||
const pte_t pte = { 0 };
|
||||
vmi_check_page_type(__pa(pmd) >> PAGE_SHIFT, VMI_PAGE_PMD);
|
||||
vmi_ops.set_pte(pte, (pte_t *)pmd, VMI_PAGE_PD);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
struct vmi_ap_state ap;
|
||||
extern void setup_pda(void);
|
||||
|
||||
static void __init /* XXX cpu hotplug */
|
||||
vmi_startup_ipi_hook(int phys_apicid, unsigned long start_eip,
|
||||
unsigned long start_esp)
|
||||
{
|
||||
/* Default everything to zero. This is fine for most GPRs. */
|
||||
memset(&ap, 0, sizeof(struct vmi_ap_state));
|
||||
|
||||
ap.gdtr_limit = GDT_SIZE - 1;
|
||||
ap.gdtr_base = (unsigned long) get_cpu_gdt_table(phys_apicid);
|
||||
|
||||
ap.idtr_limit = IDT_ENTRIES * 8 - 1;
|
||||
ap.idtr_base = (unsigned long) idt_table;
|
||||
|
||||
ap.ldtr = 0;
|
||||
|
||||
ap.cs = __KERNEL_CS;
|
||||
ap.eip = (unsigned long) start_eip;
|
||||
ap.ss = __KERNEL_DS;
|
||||
ap.esp = (unsigned long) start_esp;
|
||||
|
||||
ap.ds = __USER_DS;
|
||||
ap.es = __USER_DS;
|
||||
ap.fs = __KERNEL_PDA;
|
||||
ap.gs = 0;
|
||||
|
||||
ap.eflags = 0;
|
||||
|
||||
setup_pda();
|
||||
|
||||
#ifdef CONFIG_X86_PAE
|
||||
/* efer should match BSP efer. */
|
||||
if (cpu_has_nx) {
|
||||
unsigned l, h;
|
||||
rdmsr(MSR_EFER, l, h);
|
||||
ap.efer = (unsigned long long) h << 32 | l;
|
||||
}
|
||||
#endif
|
||||
|
||||
ap.cr3 = __pa(swapper_pg_dir);
|
||||
/* Protected mode, paging, AM, WP, NE, MP. */
|
||||
ap.cr0 = 0x80050023;
|
||||
ap.cr4 = mmu_cr4_features;
|
||||
vmi_ops.set_initial_ap_state(__pa(&ap), phys_apicid);
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline int __init check_vmi_rom(struct vrom_header *rom)
|
||||
{
|
||||
struct pci_header *pci;
|
||||
struct pnp_header *pnp;
|
||||
const char *manufacturer = "UNKNOWN";
|
||||
const char *product = "UNKNOWN";
|
||||
const char *license = "unspecified";
|
||||
|
||||
if (rom->rom_signature != 0xaa55)
|
||||
return 0;
|
||||
if (rom->vrom_signature != VMI_SIGNATURE)
|
||||
return 0;
|
||||
if (rom->api_version_maj != VMI_API_REV_MAJOR ||
|
||||
rom->api_version_min+1 < VMI_API_REV_MINOR+1) {
|
||||
printk(KERN_WARNING "VMI: Found mismatched rom version %d.%d\n",
|
||||
rom->api_version_maj,
|
||||
rom->api_version_min);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Relying on the VMI_SIGNATURE field is not 100% safe, so check
|
||||
* the PCI header and device type to make sure this is really a
|
||||
* VMI device.
|
||||
*/
|
||||
if (!rom->pci_header_offs) {
|
||||
printk(KERN_WARNING "VMI: ROM does not contain PCI header.\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
pci = (struct pci_header *)((char *)rom+rom->pci_header_offs);
|
||||
if (pci->vendorID != PCI_VENDOR_ID_VMWARE ||
|
||||
pci->deviceID != PCI_DEVICE_ID_VMWARE_VMI) {
|
||||
/* Allow it to run... anyways, but warn */
|
||||
printk(KERN_WARNING "VMI: ROM from unknown manufacturer\n");
|
||||
}
|
||||
|
||||
if (rom->pnp_header_offs) {
|
||||
pnp = (struct pnp_header *)((char *)rom+rom->pnp_header_offs);
|
||||
if (pnp->manufacturer_offset)
|
||||
manufacturer = (const char *)rom+pnp->manufacturer_offset;
|
||||
if (pnp->product_offset)
|
||||
product = (const char *)rom+pnp->product_offset;
|
||||
}
|
||||
|
||||
if (rom->license_offs)
|
||||
license = (char *)rom+rom->license_offs;
|
||||
|
||||
printk(KERN_INFO "VMI: Found %s %s, API version %d.%d, ROM version %d.%d\n",
|
||||
manufacturer, product,
|
||||
rom->api_version_maj, rom->api_version_min,
|
||||
pci->rom_version_maj, pci->rom_version_min);
|
||||
|
||||
license_gplok = license_is_gpl_compatible(license);
|
||||
if (!license_gplok) {
|
||||
printk(KERN_WARNING "VMI: ROM license '%s' taints kernel... "
|
||||
"inlining disabled\n",
|
||||
license);
|
||||
add_taint(TAINT_PROPRIETARY_MODULE);
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* Probe for the VMI option ROM
|
||||
*/
|
||||
static inline int __init probe_vmi_rom(void)
|
||||
{
|
||||
unsigned long base;
|
||||
|
||||
/* VMI ROM is in option ROM area, check signature */
|
||||
for (base = 0xC0000; base < 0xE0000; base += 2048) {
|
||||
struct vrom_header *romstart;
|
||||
romstart = (struct vrom_header *)isa_bus_to_virt(base);
|
||||
if (check_vmi_rom(romstart)) {
|
||||
vmi_rom = romstart;
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* VMI setup common to all processors
|
||||
*/
|
||||
void vmi_bringup(void)
|
||||
{
|
||||
/* We must establish the lowmem mapping for MMU ops to work */
|
||||
if (vmi_rom)
|
||||
vmi_ops.set_linear_mapping(0, __PAGE_OFFSET, max_low_pfn, 0);
|
||||
}
|
||||
|
||||
/*
|
||||
* Return a pointer to the VMI function or a NOP stub
|
||||
*/
|
||||
static void *vmi_get_function(int vmicall)
|
||||
{
|
||||
u64 reloc;
|
||||
const struct vmi_relocation_info *rel = (struct vmi_relocation_info *)&reloc;
|
||||
reloc = call_vrom_long_func(vmi_rom, get_reloc, vmicall);
|
||||
BUG_ON(rel->type == VMI_RELOCATION_JUMP_REL);
|
||||
if (rel->type == VMI_RELOCATION_CALL_REL)
|
||||
return (void *)rel->eip;
|
||||
else
|
||||
return (void *)vmi_nop;
|
||||
}
|
||||
|
||||
/*
|
||||
* Helper macro for making the VMI paravirt-ops fill code readable.
|
||||
* For unimplemented operations, fall back to default.
|
||||
*/
|
||||
#define para_fill(opname, vmicall) \
|
||||
do { \
|
||||
reloc = call_vrom_long_func(vmi_rom, get_reloc, \
|
||||
VMI_CALL_##vmicall); \
|
||||
if (rel->type != VMI_RELOCATION_NONE) { \
|
||||
BUG_ON(rel->type != VMI_RELOCATION_CALL_REL); \
|
||||
paravirt_ops.opname = (void *)rel->eip; \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
/*
|
||||
* Activate the VMI interface and switch into paravirtualized mode
|
||||
*/
|
||||
static inline int __init activate_vmi(void)
|
||||
{
|
||||
short kernel_cs;
|
||||
u64 reloc;
|
||||
const struct vmi_relocation_info *rel = (struct vmi_relocation_info *)&reloc;
|
||||
|
||||
if (call_vrom_func(vmi_rom, vmi_init) != 0) {
|
||||
printk(KERN_ERR "VMI ROM failed to initialize!");
|
||||
return 0;
|
||||
}
|
||||
savesegment(cs, kernel_cs);
|
||||
|
||||
paravirt_ops.paravirt_enabled = 1;
|
||||
paravirt_ops.kernel_rpl = kernel_cs & SEGMENT_RPL_MASK;
|
||||
|
||||
paravirt_ops.patch = vmi_patch;
|
||||
paravirt_ops.name = "vmi";
|
||||
|
||||
/*
|
||||
* Many of these operations are ABI compatible with VMI.
|
||||
* This means we can fill in the paravirt-ops with direct
|
||||
* pointers into the VMI ROM. If the calling convention for
|
||||
* these operations changes, this code needs to be updated.
|
||||
*
|
||||
* Exceptions
|
||||
* CPUID paravirt-op uses pointers, not the native ISA
|
||||
* halt has no VMI equivalent; all VMI halts are "safe"
|
||||
* no MSR support yet - just trap and emulate. VMI uses the
|
||||
* same ABI as the native ISA, but Linux wants exceptions
|
||||
* from bogus MSR read / write handled
|
||||
* rdpmc is not yet used in Linux
|
||||
*/
|
||||
|
||||
/* CPUID is special, so very special */
|
||||
reloc = call_vrom_long_func(vmi_rom, get_reloc, VMI_CALL_CPUID);
|
||||
if (rel->type != VMI_RELOCATION_NONE) {
|
||||
BUG_ON(rel->type != VMI_RELOCATION_CALL_REL);
|
||||
vmi_ops.cpuid = (void *)rel->eip;
|
||||
paravirt_ops.cpuid = vmi_cpuid;
|
||||
}
|
||||
|
||||
para_fill(clts, CLTS);
|
||||
para_fill(get_debugreg, GetDR);
|
||||
para_fill(set_debugreg, SetDR);
|
||||
para_fill(read_cr0, GetCR0);
|
||||
para_fill(read_cr2, GetCR2);
|
||||
para_fill(read_cr3, GetCR3);
|
||||
para_fill(read_cr4, GetCR4);
|
||||
para_fill(write_cr0, SetCR0);
|
||||
para_fill(write_cr2, SetCR2);
|
||||
para_fill(write_cr3, SetCR3);
|
||||
para_fill(write_cr4, SetCR4);
|
||||
para_fill(save_fl, GetInterruptMask);
|
||||
para_fill(restore_fl, SetInterruptMask);
|
||||
para_fill(irq_disable, DisableInterrupts);
|
||||
para_fill(irq_enable, EnableInterrupts);
|
||||
/* irq_save_disable !!! sheer pain */
|
||||
patch_offset(&irq_save_disable_callout[IRQ_PATCH_INT_MASK],
|
||||
(char *)paravirt_ops.save_fl);
|
||||
patch_offset(&irq_save_disable_callout[IRQ_PATCH_DISABLE],
|
||||
(char *)paravirt_ops.irq_disable);
|
||||
#ifndef CONFIG_NO_IDLE_HZ
|
||||
para_fill(safe_halt, Halt);
|
||||
#else
|
||||
vmi_ops.halt = vmi_get_function(VMI_CALL_Halt);
|
||||
paravirt_ops.safe_halt = vmi_safe_halt;
|
||||
#endif
|
||||
para_fill(wbinvd, WBINVD);
|
||||
/* paravirt_ops.read_msr = vmi_rdmsr */
|
||||
/* paravirt_ops.write_msr = vmi_wrmsr */
|
||||
para_fill(read_tsc, RDTSC);
|
||||
/* paravirt_ops.rdpmc = vmi_rdpmc */
|
||||
|
||||
/* TR interface doesn't pass TR value */
|
||||
reloc = call_vrom_long_func(vmi_rom, get_reloc, VMI_CALL_SetTR);
|
||||
if (rel->type != VMI_RELOCATION_NONE) {
|
||||
BUG_ON(rel->type != VMI_RELOCATION_CALL_REL);
|
||||
vmi_ops.set_tr = (void *)rel->eip;
|
||||
paravirt_ops.load_tr_desc = vmi_set_tr;
|
||||
}
|
||||
|
||||
/* LDT is special, too */
|
||||
reloc = call_vrom_long_func(vmi_rom, get_reloc, VMI_CALL_SetLDT);
|
||||
if (rel->type != VMI_RELOCATION_NONE) {
|
||||
BUG_ON(rel->type != VMI_RELOCATION_CALL_REL);
|
||||
vmi_ops._set_ldt = (void *)rel->eip;
|
||||
paravirt_ops.set_ldt = vmi_set_ldt;
|
||||
}
|
||||
|
||||
para_fill(load_gdt, SetGDT);
|
||||
para_fill(load_idt, SetIDT);
|
||||
para_fill(store_gdt, GetGDT);
|
||||
para_fill(store_idt, GetIDT);
|
||||
para_fill(store_tr, GetTR);
|
||||
paravirt_ops.load_tls = vmi_load_tls;
|
||||
para_fill(write_ldt_entry, WriteLDTEntry);
|
||||
para_fill(write_gdt_entry, WriteGDTEntry);
|
||||
para_fill(write_idt_entry, WriteIDTEntry);
|
||||
reloc = call_vrom_long_func(vmi_rom, get_reloc,
|
||||
VMI_CALL_UpdateKernelStack);
|
||||
if (rel->type != VMI_RELOCATION_NONE) {
|
||||
BUG_ON(rel->type != VMI_RELOCATION_CALL_REL);
|
||||
vmi_ops.set_kernel_stack = (void *)rel->eip;
|
||||
paravirt_ops.load_esp0 = vmi_load_esp0;
|
||||
}
|
||||
|
||||
para_fill(set_iopl_mask, SetIOPLMask);
|
||||
paravirt_ops.io_delay = (void *)vmi_nop;
|
||||
if (!disable_nodelay) {
|
||||
paravirt_ops.const_udelay = (void *)vmi_nop;
|
||||
}
|
||||
|
||||
para_fill(set_lazy_mode, SetLazyMode);
|
||||
|
||||
reloc = call_vrom_long_func(vmi_rom, get_reloc, VMI_CALL_FlushTLB);
|
||||
if (rel->type != VMI_RELOCATION_NONE) {
|
||||
vmi_ops.flush_tlb = (void *)rel->eip;
|
||||
paravirt_ops.flush_tlb_user = vmi_flush_tlb_user;
|
||||
paravirt_ops.flush_tlb_kernel = vmi_flush_tlb_kernel;
|
||||
}
|
||||
para_fill(flush_tlb_single, InvalPage);
|
||||
|
||||
/*
|
||||
* Until a standard flag format can be agreed on, we need to
|
||||
* implement these as wrappers in Linux. Get the VMI ROM
|
||||
* function pointers for the two backend calls.
|
||||
*/
|
||||
#ifdef CONFIG_X86_PAE
|
||||
vmi_ops.set_pte = vmi_get_function(VMI_CALL_SetPxELong);
|
||||
vmi_ops.update_pte = vmi_get_function(VMI_CALL_UpdatePxELong);
|
||||
#else
|
||||
vmi_ops.set_pte = vmi_get_function(VMI_CALL_SetPxE);
|
||||
vmi_ops.update_pte = vmi_get_function(VMI_CALL_UpdatePxE);
|
||||
#endif
|
||||
vmi_ops.set_linear_mapping = vmi_get_function(VMI_CALL_SetLinearMapping);
|
||||
vmi_ops.allocate_page = vmi_get_function(VMI_CALL_AllocatePage);
|
||||
vmi_ops.release_page = vmi_get_function(VMI_CALL_ReleasePage);
|
||||
|
||||
paravirt_ops.alloc_pt = vmi_allocate_pt;
|
||||
paravirt_ops.alloc_pd = vmi_allocate_pd;
|
||||
paravirt_ops.alloc_pd_clone = vmi_allocate_pd_clone;
|
||||
paravirt_ops.release_pt = vmi_release_pt;
|
||||
paravirt_ops.release_pd = vmi_release_pd;
|
||||
paravirt_ops.set_pte = vmi_set_pte;
|
||||
paravirt_ops.set_pte_at = vmi_set_pte_at;
|
||||
paravirt_ops.set_pmd = vmi_set_pmd;
|
||||
paravirt_ops.pte_update = vmi_update_pte;
|
||||
paravirt_ops.pte_update_defer = vmi_update_pte_defer;
|
||||
#ifdef CONFIG_X86_PAE
|
||||
paravirt_ops.set_pte_atomic = vmi_set_pte_atomic;
|
||||
paravirt_ops.set_pte_present = vmi_set_pte_present;
|
||||
paravirt_ops.set_pud = vmi_set_pud;
|
||||
paravirt_ops.pte_clear = vmi_pte_clear;
|
||||
paravirt_ops.pmd_clear = vmi_pmd_clear;
|
||||
#endif
|
||||
/*
|
||||
* These MUST always be patched. Don't support indirect jumps
|
||||
* through these operations, as the VMI interface may use either
|
||||
* a jump or a call to get to these operations, depending on
|
||||
* the backend. They are performance critical anyway, so requiring
|
||||
* a patch is not a big problem.
|
||||
*/
|
||||
paravirt_ops.irq_enable_sysexit = (void *)0xfeedbab0;
|
||||
paravirt_ops.iret = (void *)0xbadbab0;
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
paravirt_ops.startup_ipi_hook = vmi_startup_ipi_hook;
|
||||
vmi_ops.set_initial_ap_state = vmi_get_function(VMI_CALL_SetInitialAPState);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_X86_LOCAL_APIC
|
||||
paravirt_ops.apic_read = vmi_get_function(VMI_CALL_APICRead);
|
||||
paravirt_ops.apic_write = vmi_get_function(VMI_CALL_APICWrite);
|
||||
paravirt_ops.apic_write_atomic = vmi_get_function(VMI_CALL_APICWrite);
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Check for VMI timer functionality by probing for a cycle frequency method
|
||||
*/
|
||||
reloc = call_vrom_long_func(vmi_rom, get_reloc, VMI_CALL_GetCycleFrequency);
|
||||
if (rel->type != VMI_RELOCATION_NONE) {
|
||||
vmi_timer_ops.get_cycle_frequency = (void *)rel->eip;
|
||||
vmi_timer_ops.get_cycle_counter =
|
||||
vmi_get_function(VMI_CALL_GetCycleCounter);
|
||||
vmi_timer_ops.get_wallclock =
|
||||
vmi_get_function(VMI_CALL_GetWallclockTime);
|
||||
vmi_timer_ops.wallclock_updated =
|
||||
vmi_get_function(VMI_CALL_WallclockUpdated);
|
||||
vmi_timer_ops.set_alarm = vmi_get_function(VMI_CALL_SetAlarm);
|
||||
vmi_timer_ops.cancel_alarm =
|
||||
vmi_get_function(VMI_CALL_CancelAlarm);
|
||||
paravirt_ops.time_init = vmi_time_init;
|
||||
paravirt_ops.get_wallclock = vmi_get_wallclock;
|
||||
paravirt_ops.set_wallclock = vmi_set_wallclock;
|
||||
#ifdef CONFIG_X86_LOCAL_APIC
|
||||
paravirt_ops.setup_boot_clock = vmi_timer_setup_boot_alarm;
|
||||
paravirt_ops.setup_secondary_clock = vmi_timer_setup_secondary_alarm;
|
||||
#endif
|
||||
custom_sched_clock = vmi_sched_clock;
|
||||
}
|
||||
|
||||
/*
|
||||
* Alternative instruction rewriting doesn't happen soon enough
|
||||
* to convert VMI_IRET to a call instead of a jump; so we have
|
||||
* to do this before IRQs get reenabled. Fortunately, it is
|
||||
* idempotent.
|
||||
*/
|
||||
apply_paravirt(__start_parainstructions, __stop_parainstructions);
|
||||
|
||||
vmi_bringup();
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
#undef para_fill
|
||||
|
||||
void __init vmi_init(void)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
if (!vmi_rom)
|
||||
probe_vmi_rom();
|
||||
else
|
||||
check_vmi_rom(vmi_rom);
|
||||
|
||||
/* In case probing for or validating the ROM failed, basil */
|
||||
if (!vmi_rom)
|
||||
return;
|
||||
|
||||
reserve_top_address(-vmi_rom->virtual_top);
|
||||
|
||||
local_irq_save(flags);
|
||||
activate_vmi();
|
||||
#ifdef CONFIG_SMP
|
||||
no_timer_check = 1;
|
||||
#endif
|
||||
local_irq_restore(flags & X86_EFLAGS_IF);
|
||||
}
|
||||
|
||||
static int __init parse_vmi(char *arg)
|
||||
{
|
||||
if (!arg)
|
||||
return -EINVAL;
|
||||
|
||||
if (!strcmp(arg, "disable_nodelay"))
|
||||
disable_nodelay = 1;
|
||||
else if (!strcmp(arg, "disable_pge")) {
|
||||
clear_bit(X86_FEATURE_PGE, boot_cpu_data.x86_capability);
|
||||
disable_pge = 1;
|
||||
} else if (!strcmp(arg, "disable_pse")) {
|
||||
clear_bit(X86_FEATURE_PSE, boot_cpu_data.x86_capability);
|
||||
disable_pse = 1;
|
||||
} else if (!strcmp(arg, "disable_sep")) {
|
||||
clear_bit(X86_FEATURE_SEP, boot_cpu_data.x86_capability);
|
||||
disable_sep = 1;
|
||||
} else if (!strcmp(arg, "disable_tsc")) {
|
||||
clear_bit(X86_FEATURE_TSC, boot_cpu_data.x86_capability);
|
||||
disable_tsc = 1;
|
||||
} else if (!strcmp(arg, "disable_mtrr")) {
|
||||
clear_bit(X86_FEATURE_MTRR, boot_cpu_data.x86_capability);
|
||||
disable_mtrr = 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
early_param("vmi", parse_vmi);
|
|
@ -0,0 +1,499 @@
|
|||
/*
|
||||
* VMI paravirtual timer support routines.
|
||||
*
|
||||
* Copyright (C) 2005, VMware, Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for more
|
||||
* details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
|
||||
*
|
||||
* Send feedback to dhecht@vmware.com
|
||||
*
|
||||
*/
|
||||
|
||||
/*
|
||||
* Portions of this code from arch/i386/kernel/timers/timer_tsc.c.
|
||||
* Portions of the CONFIG_NO_IDLE_HZ code from arch/s390/kernel/time.c.
|
||||
* See comments there for proper credits.
|
||||
*/
|
||||
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/jiffies.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/kernel_stat.h>
|
||||
#include <linux/rcupdate.h>
|
||||
#include <linux/clocksource.h>
|
||||
|
||||
#include <asm/timer.h>
|
||||
#include <asm/io.h>
|
||||
#include <asm/apic.h>
|
||||
#include <asm/div64.h>
|
||||
#include <asm/timer.h>
|
||||
#include <asm/desc.h>
|
||||
|
||||
#include <asm/vmi.h>
|
||||
#include <asm/vmi_time.h>
|
||||
|
||||
#include <mach_timer.h>
|
||||
#include <io_ports.h>
|
||||
|
||||
#ifdef CONFIG_X86_LOCAL_APIC
|
||||
#define VMI_ALARM_WIRING VMI_ALARM_WIRED_LVTT
|
||||
#else
|
||||
#define VMI_ALARM_WIRING VMI_ALARM_WIRED_IRQ0
|
||||
#endif
|
||||
|
||||
/* Cached VMI operations */
|
||||
struct vmi_timer_ops vmi_timer_ops;
|
||||
|
||||
#ifdef CONFIG_NO_IDLE_HZ
|
||||
|
||||
/* /proc/sys/kernel/hz_timer state. */
|
||||
int sysctl_hz_timer;
|
||||
|
||||
/* Some stats */
|
||||
static DEFINE_PER_CPU(unsigned long, vmi_idle_no_hz_irqs);
|
||||
static DEFINE_PER_CPU(unsigned long, vmi_idle_no_hz_jiffies);
|
||||
static DEFINE_PER_CPU(unsigned long, idle_start_jiffies);
|
||||
|
||||
#endif /* CONFIG_NO_IDLE_HZ */
|
||||
|
||||
/* Number of alarms per second. By default this is CONFIG_VMI_ALARM_HZ. */
|
||||
static int alarm_hz = CONFIG_VMI_ALARM_HZ;
|
||||
|
||||
/* Cache of the value get_cycle_frequency / HZ. */
|
||||
static signed long long cycles_per_jiffy;
|
||||
|
||||
/* Cache of the value get_cycle_frequency / alarm_hz. */
|
||||
static signed long long cycles_per_alarm;
|
||||
|
||||
/* The number of cycles accounted for by the 'jiffies'/'xtime' count.
|
||||
* Protected by xtime_lock. */
|
||||
static unsigned long long real_cycles_accounted_system;
|
||||
|
||||
/* The number of cycles accounted for by update_process_times(), per cpu. */
|
||||
static DEFINE_PER_CPU(unsigned long long, process_times_cycles_accounted_cpu);
|
||||
|
||||
/* The number of stolen cycles accounted, per cpu. */
|
||||
static DEFINE_PER_CPU(unsigned long long, stolen_cycles_accounted_cpu);
|
||||
|
||||
/* Clock source. */
|
||||
static cycle_t read_real_cycles(void)
|
||||
{
|
||||
return vmi_timer_ops.get_cycle_counter(VMI_CYCLES_REAL);
|
||||
}
|
||||
|
||||
static cycle_t read_available_cycles(void)
|
||||
{
|
||||
return vmi_timer_ops.get_cycle_counter(VMI_CYCLES_AVAILABLE);
|
||||
}
|
||||
|
||||
#if 0
|
||||
static cycle_t read_stolen_cycles(void)
|
||||
{
|
||||
return vmi_timer_ops.get_cycle_counter(VMI_CYCLES_STOLEN);
|
||||
}
|
||||
#endif /* 0 */
|
||||
|
||||
static struct clocksource clocksource_vmi = {
|
||||
.name = "vmi-timer",
|
||||
.rating = 450,
|
||||
.read = read_real_cycles,
|
||||
.mask = CLOCKSOURCE_MASK(64),
|
||||
.mult = 0, /* to be set */
|
||||
.shift = 22,
|
||||
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
|
||||
};
|
||||
|
||||
|
||||
/* Timer interrupt handler. */
|
||||
static irqreturn_t vmi_timer_interrupt(int irq, void *dev_id);
|
||||
|
||||
static struct irqaction vmi_timer_irq = {
|
||||
vmi_timer_interrupt,
|
||||
SA_INTERRUPT,
|
||||
CPU_MASK_NONE,
|
||||
"VMI-alarm",
|
||||
NULL,
|
||||
NULL
|
||||
};
|
||||
|
||||
/* Alarm rate */
|
||||
static int __init vmi_timer_alarm_rate_setup(char* str)
|
||||
{
|
||||
int alarm_rate;
|
||||
if (get_option(&str, &alarm_rate) == 1 && alarm_rate > 0) {
|
||||
alarm_hz = alarm_rate;
|
||||
printk(KERN_WARNING "VMI timer alarm HZ set to %d\n", alarm_hz);
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
__setup("vmi_timer_alarm_hz=", vmi_timer_alarm_rate_setup);
|
||||
|
||||
|
||||
/* Initialization */
|
||||
static void vmi_get_wallclock_ts(struct timespec *ts)
|
||||
{
|
||||
unsigned long long wallclock;
|
||||
wallclock = vmi_timer_ops.get_wallclock(); // nsec units
|
||||
ts->tv_nsec = do_div(wallclock, 1000000000);
|
||||
ts->tv_sec = wallclock;
|
||||
}
|
||||
|
||||
static void update_xtime_from_wallclock(void)
|
||||
{
|
||||
struct timespec ts;
|
||||
vmi_get_wallclock_ts(&ts);
|
||||
do_settimeofday(&ts);
|
||||
}
|
||||
|
||||
unsigned long vmi_get_wallclock(void)
|
||||
{
|
||||
struct timespec ts;
|
||||
vmi_get_wallclock_ts(&ts);
|
||||
return ts.tv_sec;
|
||||
}
|
||||
|
||||
int vmi_set_wallclock(unsigned long now)
|
||||
{
|
||||
return -1;
|
||||
}
|
||||
|
||||
unsigned long long vmi_sched_clock(void)
|
||||
{
|
||||
return read_available_cycles();
|
||||
}
|
||||
|
||||
void __init vmi_time_init(void)
|
||||
{
|
||||
unsigned long long cycles_per_sec, cycles_per_msec;
|
||||
unsigned long flags;
|
||||
|
||||
local_irq_save(flags);
|
||||
setup_irq(0, &vmi_timer_irq);
|
||||
#ifdef CONFIG_X86_LOCAL_APIC
|
||||
set_intr_gate(LOCAL_TIMER_VECTOR, apic_vmi_timer_interrupt);
|
||||
#endif
|
||||
|
||||
no_sync_cmos_clock = 1;
|
||||
|
||||
vmi_get_wallclock_ts(&xtime);
|
||||
set_normalized_timespec(&wall_to_monotonic,
|
||||
-xtime.tv_sec, -xtime.tv_nsec);
|
||||
|
||||
real_cycles_accounted_system = read_real_cycles();
|
||||
update_xtime_from_wallclock();
|
||||
per_cpu(process_times_cycles_accounted_cpu, 0) = read_available_cycles();
|
||||
|
||||
cycles_per_sec = vmi_timer_ops.get_cycle_frequency();
|
||||
|
||||
cycles_per_jiffy = cycles_per_sec;
|
||||
(void)do_div(cycles_per_jiffy, HZ);
|
||||
cycles_per_alarm = cycles_per_sec;
|
||||
(void)do_div(cycles_per_alarm, alarm_hz);
|
||||
cycles_per_msec = cycles_per_sec;
|
||||
(void)do_div(cycles_per_msec, 1000);
|
||||
cpu_khz = cycles_per_msec;
|
||||
|
||||
printk(KERN_WARNING "VMI timer cycles/sec = %llu ; cycles/jiffy = %llu ;"
|
||||
"cycles/alarm = %llu\n", cycles_per_sec, cycles_per_jiffy,
|
||||
cycles_per_alarm);
|
||||
|
||||
clocksource_vmi.mult = clocksource_khz2mult(cycles_per_msec,
|
||||
clocksource_vmi.shift);
|
||||
if (clocksource_register(&clocksource_vmi))
|
||||
printk(KERN_WARNING "Error registering VMITIME clocksource.");
|
||||
|
||||
/* Disable PIT. */
|
||||
outb_p(0x3a, PIT_MODE); /* binary, mode 5, LSB/MSB, ch 0 */
|
||||
|
||||
/* schedule the alarm. do this in phase with process_times_cycles_accounted_cpu
|
||||
* reduce the latency calling update_process_times. */
|
||||
vmi_timer_ops.set_alarm(
|
||||
VMI_ALARM_WIRED_IRQ0 | VMI_ALARM_IS_PERIODIC | VMI_CYCLES_AVAILABLE,
|
||||
per_cpu(process_times_cycles_accounted_cpu, 0) + cycles_per_alarm,
|
||||
cycles_per_alarm);
|
||||
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_LOCAL_APIC
|
||||
|
||||
void __init vmi_timer_setup_boot_alarm(void)
|
||||
{
|
||||
local_irq_disable();
|
||||
|
||||
/* Route the interrupt to the correct vector. */
|
||||
apic_write_around(APIC_LVTT, LOCAL_TIMER_VECTOR);
|
||||
|
||||
/* Cancel the IRQ0 wired alarm, and setup the LVTT alarm. */
|
||||
vmi_timer_ops.cancel_alarm(VMI_CYCLES_AVAILABLE);
|
||||
vmi_timer_ops.set_alarm(
|
||||
VMI_ALARM_WIRED_LVTT | VMI_ALARM_IS_PERIODIC | VMI_CYCLES_AVAILABLE,
|
||||
per_cpu(process_times_cycles_accounted_cpu, 0) + cycles_per_alarm,
|
||||
cycles_per_alarm);
|
||||
local_irq_enable();
|
||||
}
|
||||
|
||||
/* Initialize the time accounting variables for an AP on an SMP system.
|
||||
* Also, set the local alarm for the AP. */
|
||||
void __init vmi_timer_setup_secondary_alarm(void)
|
||||
{
|
||||
int cpu = smp_processor_id();
|
||||
|
||||
/* Route the interrupt to the correct vector. */
|
||||
apic_write_around(APIC_LVTT, LOCAL_TIMER_VECTOR);
|
||||
|
||||
per_cpu(process_times_cycles_accounted_cpu, cpu) = read_available_cycles();
|
||||
|
||||
vmi_timer_ops.set_alarm(
|
||||
VMI_ALARM_WIRED_LVTT | VMI_ALARM_IS_PERIODIC | VMI_CYCLES_AVAILABLE,
|
||||
per_cpu(process_times_cycles_accounted_cpu, cpu) + cycles_per_alarm,
|
||||
cycles_per_alarm);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
/* Update system wide (real) time accounting (e.g. jiffies, xtime). */
|
||||
static void vmi_account_real_cycles(unsigned long long cur_real_cycles)
|
||||
{
|
||||
long long cycles_not_accounted;
|
||||
|
||||
write_seqlock(&xtime_lock);
|
||||
|
||||
cycles_not_accounted = cur_real_cycles - real_cycles_accounted_system;
|
||||
while (cycles_not_accounted >= cycles_per_jiffy) {
|
||||
/* systems wide jiffies and wallclock. */
|
||||
do_timer(1);
|
||||
|
||||
cycles_not_accounted -= cycles_per_jiffy;
|
||||
real_cycles_accounted_system += cycles_per_jiffy;
|
||||
}
|
||||
|
||||
if (vmi_timer_ops.wallclock_updated())
|
||||
update_xtime_from_wallclock();
|
||||
|
||||
write_sequnlock(&xtime_lock);
|
||||
}
|
||||
|
||||
/* Update per-cpu process times. */
|
||||
static void vmi_account_process_times_cycles(struct pt_regs *regs, int cpu,
|
||||
unsigned long long cur_process_times_cycles)
|
||||
{
|
||||
long long cycles_not_accounted;
|
||||
cycles_not_accounted = cur_process_times_cycles -
|
||||
per_cpu(process_times_cycles_accounted_cpu, cpu);
|
||||
|
||||
while (cycles_not_accounted >= cycles_per_jiffy) {
|
||||
/* Account time to the current process. This includes
|
||||
* calling into the scheduler to decrement the timeslice
|
||||
* and possibly reschedule.*/
|
||||
update_process_times(user_mode(regs));
|
||||
/* XXX handle /proc/profile multiplier. */
|
||||
profile_tick(CPU_PROFILING);
|
||||
|
||||
cycles_not_accounted -= cycles_per_jiffy;
|
||||
per_cpu(process_times_cycles_accounted_cpu, cpu) += cycles_per_jiffy;
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_NO_IDLE_HZ
|
||||
/* Update per-cpu idle times. Used when a no-hz halt is ended. */
|
||||
static void vmi_account_no_hz_idle_cycles(int cpu,
|
||||
unsigned long long cur_process_times_cycles)
|
||||
{
|
||||
long long cycles_not_accounted;
|
||||
unsigned long no_idle_hz_jiffies = 0;
|
||||
|
||||
cycles_not_accounted = cur_process_times_cycles -
|
||||
per_cpu(process_times_cycles_accounted_cpu, cpu);
|
||||
|
||||
while (cycles_not_accounted >= cycles_per_jiffy) {
|
||||
no_idle_hz_jiffies++;
|
||||
cycles_not_accounted -= cycles_per_jiffy;
|
||||
per_cpu(process_times_cycles_accounted_cpu, cpu) += cycles_per_jiffy;
|
||||
}
|
||||
/* Account time to the idle process. */
|
||||
account_steal_time(idle_task(cpu), jiffies_to_cputime(no_idle_hz_jiffies));
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Update per-cpu stolen time. */
|
||||
static void vmi_account_stolen_cycles(int cpu,
|
||||
unsigned long long cur_real_cycles,
|
||||
unsigned long long cur_avail_cycles)
|
||||
{
|
||||
long long stolen_cycles_not_accounted;
|
||||
unsigned long stolen_jiffies = 0;
|
||||
|
||||
if (cur_real_cycles < cur_avail_cycles)
|
||||
return;
|
||||
|
||||
stolen_cycles_not_accounted = cur_real_cycles - cur_avail_cycles -
|
||||
per_cpu(stolen_cycles_accounted_cpu, cpu);
|
||||
|
||||
while (stolen_cycles_not_accounted >= cycles_per_jiffy) {
|
||||
stolen_jiffies++;
|
||||
stolen_cycles_not_accounted -= cycles_per_jiffy;
|
||||
per_cpu(stolen_cycles_accounted_cpu, cpu) += cycles_per_jiffy;
|
||||
}
|
||||
/* HACK: pass NULL to force time onto cpustat->steal. */
|
||||
account_steal_time(NULL, jiffies_to_cputime(stolen_jiffies));
|
||||
}
|
||||
|
||||
/* Body of either IRQ0 interrupt handler (UP no local-APIC) or
|
||||
* local-APIC LVTT interrupt handler (UP & local-APIC or SMP). */
|
||||
static void vmi_local_timer_interrupt(int cpu)
|
||||
{
|
||||
unsigned long long cur_real_cycles, cur_process_times_cycles;
|
||||
|
||||
cur_real_cycles = read_real_cycles();
|
||||
cur_process_times_cycles = read_available_cycles();
|
||||
/* Update system wide (real) time state (xtime, jiffies). */
|
||||
vmi_account_real_cycles(cur_real_cycles);
|
||||
/* Update per-cpu process times. */
|
||||
vmi_account_process_times_cycles(get_irq_regs(), cpu, cur_process_times_cycles);
|
||||
/* Update time stolen from this cpu by the hypervisor. */
|
||||
vmi_account_stolen_cycles(cpu, cur_real_cycles, cur_process_times_cycles);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_NO_IDLE_HZ
|
||||
|
||||
/* Must be called only from idle loop, with interrupts disabled. */
|
||||
int vmi_stop_hz_timer(void)
|
||||
{
|
||||
/* Note that cpu_set, cpu_clear are (SMP safe) atomic on x86. */
|
||||
|
||||
unsigned long seq, next;
|
||||
unsigned long long real_cycles_expiry;
|
||||
int cpu = smp_processor_id();
|
||||
int idle;
|
||||
|
||||
BUG_ON(!irqs_disabled());
|
||||
if (sysctl_hz_timer != 0)
|
||||
return 0;
|
||||
|
||||
cpu_set(cpu, nohz_cpu_mask);
|
||||
smp_mb();
|
||||
if (rcu_needs_cpu(cpu) || local_softirq_pending() ||
|
||||
(next = next_timer_interrupt(), time_before_eq(next, jiffies))) {
|
||||
cpu_clear(cpu, nohz_cpu_mask);
|
||||
next = jiffies;
|
||||
idle = 0;
|
||||
} else
|
||||
idle = 1;
|
||||
|
||||
/* Convert jiffies to the real cycle counter. */
|
||||
do {
|
||||
seq = read_seqbegin(&xtime_lock);
|
||||
real_cycles_expiry = real_cycles_accounted_system +
|
||||
(long)(next - jiffies) * cycles_per_jiffy;
|
||||
} while (read_seqretry(&xtime_lock, seq));
|
||||
|
||||
/* This cpu is going idle. Disable the periodic alarm. */
|
||||
if (idle) {
|
||||
vmi_timer_ops.cancel_alarm(VMI_CYCLES_AVAILABLE);
|
||||
per_cpu(idle_start_jiffies, cpu) = jiffies;
|
||||
}
|
||||
|
||||
/* Set the real time alarm to expire at the next event. */
|
||||
vmi_timer_ops.set_alarm(
|
||||
VMI_ALARM_WIRING | VMI_ALARM_IS_ONESHOT | VMI_CYCLES_REAL,
|
||||
real_cycles_expiry, 0);
|
||||
|
||||
return idle;
|
||||
}
|
||||
|
||||
static void vmi_reenable_hz_timer(int cpu)
|
||||
{
|
||||
/* For /proc/vmi/info idle_hz stat. */
|
||||
per_cpu(vmi_idle_no_hz_jiffies, cpu) += jiffies - per_cpu(idle_start_jiffies, cpu);
|
||||
per_cpu(vmi_idle_no_hz_irqs, cpu)++;
|
||||
|
||||
/* Don't bother explicitly cancelling the one-shot alarm -- at
|
||||
* worse we will receive a spurious timer interrupt. */
|
||||
vmi_timer_ops.set_alarm(
|
||||
VMI_ALARM_WIRING | VMI_ALARM_IS_PERIODIC | VMI_CYCLES_AVAILABLE,
|
||||
per_cpu(process_times_cycles_accounted_cpu, cpu) + cycles_per_alarm,
|
||||
cycles_per_alarm);
|
||||
/* Indicate this cpu is no longer nohz idle. */
|
||||
cpu_clear(cpu, nohz_cpu_mask);
|
||||
}
|
||||
|
||||
/* Called from interrupt handlers when (local) HZ timer is disabled. */
|
||||
void vmi_account_time_restart_hz_timer(void)
|
||||
{
|
||||
unsigned long long cur_real_cycles, cur_process_times_cycles;
|
||||
int cpu = smp_processor_id();
|
||||
|
||||
BUG_ON(!irqs_disabled());
|
||||
/* Account the time during which the HZ timer was disabled. */
|
||||
cur_real_cycles = read_real_cycles();
|
||||
cur_process_times_cycles = read_available_cycles();
|
||||
/* Update system wide (real) time state (xtime, jiffies). */
|
||||
vmi_account_real_cycles(cur_real_cycles);
|
||||
/* Update per-cpu idle times. */
|
||||
vmi_account_no_hz_idle_cycles(cpu, cur_process_times_cycles);
|
||||
/* Update time stolen from this cpu by the hypervisor. */
|
||||
vmi_account_stolen_cycles(cpu, cur_real_cycles, cur_process_times_cycles);
|
||||
/* Reenable the hz timer. */
|
||||
vmi_reenable_hz_timer(cpu);
|
||||
}
|
||||
|
||||
#endif /* CONFIG_NO_IDLE_HZ */
|
||||
|
||||
/* UP (and no local-APIC) VMI-timer alarm interrupt handler.
|
||||
* Handler for IRQ0. Not used when SMP or X86_LOCAL_APIC after
|
||||
* APIC setup and setup_boot_vmi_alarm() is called. */
|
||||
static irqreturn_t vmi_timer_interrupt(int irq, void *dev_id)
|
||||
{
|
||||
vmi_local_timer_interrupt(smp_processor_id());
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_LOCAL_APIC
|
||||
|
||||
/* SMP VMI-timer alarm interrupt handler. Handler for LVTT vector.
|
||||
* Also used in UP when CONFIG_X86_LOCAL_APIC.
|
||||
* The wrapper code is from arch/i386/kernel/apic.c#smp_apic_timer_interrupt. */
|
||||
void smp_apic_vmi_timer_interrupt(struct pt_regs *regs)
|
||||
{
|
||||
struct pt_regs *old_regs = set_irq_regs(regs);
|
||||
int cpu = smp_processor_id();
|
||||
|
||||
/*
|
||||
* the NMI deadlock-detector uses this.
|
||||
*/
|
||||
per_cpu(irq_stat,cpu).apic_timer_irqs++;
|
||||
|
||||
/*
|
||||
* NOTE! We'd better ACK the irq immediately,
|
||||
* because timer handling can be slow.
|
||||
*/
|
||||
ack_APIC_irq();
|
||||
|
||||
/*
|
||||
* update_process_times() expects us to have done irq_enter().
|
||||
* Besides, if we don't timer interrupts ignore the global
|
||||
* interrupt lock, which is the WrongThing (tm) to do.
|
||||
*/
|
||||
irq_enter();
|
||||
vmi_local_timer_interrupt(cpu);
|
||||
irq_exit();
|
||||
set_irq_regs(old_regs);
|
||||
}
|
||||
|
||||
#endif /* CONFIG_X86_LOCAL_APIC */
|
|
@ -37,9 +37,14 @@ SECTIONS
|
|||
{
|
||||
. = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
|
||||
phys_startup_32 = startup_32 - LOAD_OFFSET;
|
||||
|
||||
.text.head : AT(ADDR(.text.head) - LOAD_OFFSET) {
|
||||
_text = .; /* Text and read-only data */
|
||||
*(.text.head)
|
||||
} :text = 0x9090
|
||||
|
||||
/* read-only */
|
||||
.text : AT(ADDR(.text) - LOAD_OFFSET) {
|
||||
_text = .; /* Text and read-only data */
|
||||
*(.text)
|
||||
SCHED_TEXT
|
||||
LOCK_TEXT
|
||||
|
|
|
@ -79,7 +79,12 @@ void __init trap_init_hook(void)
|
|||
{
|
||||
}
|
||||
|
||||
static struct irqaction irq0 = { timer_interrupt, IRQF_DISABLED, CPU_MASK_NONE, "timer", NULL, NULL};
|
||||
static struct irqaction irq0 = {
|
||||
.handler = timer_interrupt,
|
||||
.flags = IRQF_DISABLED | IRQF_NOBALANCING,
|
||||
.mask = CPU_MASK_NONE,
|
||||
.name = "timer"
|
||||
};
|
||||
|
||||
/**
|
||||
* time_init_hook - do any specific initialisations for the system timer.
|
||||
|
@ -90,6 +95,7 @@ static struct irqaction irq0 = { timer_interrupt, IRQF_DISABLED, CPU_MASK_NONE,
|
|||
**/
|
||||
void __init time_init_hook(void)
|
||||
{
|
||||
irq0.mask = cpumask_of_cpu(0);
|
||||
setup_irq(0, &irq0);
|
||||
}
|
||||
|
||||
|
|
|
@ -56,15 +56,14 @@ static int reg_offset_vm86[] = {
|
|||
#define VM86_REG_(x) (*(unsigned short *) \
|
||||
(reg_offset_vm86[((unsigned)x)]+(u_char *) FPU_info))
|
||||
|
||||
/* These are dummy, fs and gs are not saved on the stack. */
|
||||
#define ___FS ___ds
|
||||
/* This dummy, gs is not saved on the stack. */
|
||||
#define ___GS ___ds
|
||||
|
||||
static int reg_offset_pm[] = {
|
||||
offsetof(struct info,___cs),
|
||||
offsetof(struct info,___ds),
|
||||
offsetof(struct info,___es),
|
||||
offsetof(struct info,___FS),
|
||||
offsetof(struct info,___fs),
|
||||
offsetof(struct info,___GS),
|
||||
offsetof(struct info,___ss),
|
||||
offsetof(struct info,___ds)
|
||||
|
@ -169,13 +168,10 @@ static long pm_address(u_char FPU_modrm, u_char segment,
|
|||
|
||||
switch ( segment )
|
||||
{
|
||||
/* fs and gs aren't used by the kernel, so they still have their
|
||||
user-space values. */
|
||||
case PREFIX_FS_-1:
|
||||
/* N.B. - movl %seg, mem is a 2 byte write regardless of prefix */
|
||||
savesegment(fs, addr->selector);
|
||||
break;
|
||||
/* gs isn't used by the kernel, so it still has its
|
||||
user-space value. */
|
||||
case PREFIX_GS_-1:
|
||||
/* N.B. - movl %seg, mem is a 2 byte write regardless of prefix */
|
||||
savesegment(gs, addr->selector);
|
||||
break;
|
||||
default:
|
||||
|
|
|
@ -48,9 +48,11 @@
|
|||
|
||||
#define status_word() \
|
||||
((partial_status & ~SW_Top & 0xffff) | ((top << SW_Top_Shift) & SW_Top))
|
||||
#define setcc(cc) ({ \
|
||||
partial_status &= ~(SW_C0|SW_C1|SW_C2|SW_C3); \
|
||||
partial_status |= (cc) & (SW_C0|SW_C1|SW_C2|SW_C3); })
|
||||
static inline void setcc(int cc)
|
||||
{
|
||||
partial_status &= ~(SW_C0|SW_C1|SW_C2|SW_C3);
|
||||
partial_status |= (cc) & (SW_C0|SW_C1|SW_C2|SW_C3);
|
||||
}
|
||||
|
||||
#ifdef PECULIAR_486
|
||||
/* Default, this conveys no information, but an 80486 does it. */
|
||||
|
|
|
@ -101,7 +101,6 @@ extern void find_max_pfn(void);
|
|||
extern void add_one_highpage_init(struct page *, int, int);
|
||||
|
||||
extern struct e820map e820;
|
||||
extern unsigned long init_pg_tables_end;
|
||||
extern unsigned long highend_pfn, highstart_pfn;
|
||||
extern unsigned long max_low_pfn;
|
||||
extern unsigned long totalram_pages;
|
||||
|
|
|
@ -46,17 +46,17 @@ int unregister_page_fault_notifier(struct notifier_block *nb)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(unregister_page_fault_notifier);
|
||||
|
||||
static inline int notify_page_fault(enum die_val val, const char *str,
|
||||
struct pt_regs *regs, long err, int trap, int sig)
|
||||
static inline int notify_page_fault(struct pt_regs *regs, long err)
|
||||
{
|
||||
struct die_args args = {
|
||||
.regs = regs,
|
||||
.str = str,
|
||||
.str = "page fault",
|
||||
.err = err,
|
||||
.trapnr = trap,
|
||||
.signr = sig
|
||||
.trapnr = 14,
|
||||
.signr = SIGSEGV
|
||||
};
|
||||
return atomic_notifier_call_chain(¬ify_page_fault_chain, val, &args);
|
||||
return atomic_notifier_call_chain(¬ify_page_fault_chain,
|
||||
DIE_PAGE_FAULT, &args);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -327,8 +327,7 @@ fastcall void __kprobes do_page_fault(struct pt_regs *regs,
|
|||
if (unlikely(address >= TASK_SIZE)) {
|
||||
if (!(error_code & 0x0000000d) && vmalloc_fault(address) >= 0)
|
||||
return;
|
||||
if (notify_page_fault(DIE_PAGE_FAULT, "page fault", regs, error_code, 14,
|
||||
SIGSEGV) == NOTIFY_STOP)
|
||||
if (notify_page_fault(regs, error_code) == NOTIFY_STOP)
|
||||
return;
|
||||
/*
|
||||
* Don't take the mm semaphore here. If we fixup a prefetch
|
||||
|
@ -337,8 +336,7 @@ fastcall void __kprobes do_page_fault(struct pt_regs *regs,
|
|||
goto bad_area_nosemaphore;
|
||||
}
|
||||
|
||||
if (notify_page_fault(DIE_PAGE_FAULT, "page fault", regs, error_code, 14,
|
||||
SIGSEGV) == NOTIFY_STOP)
|
||||
if (notify_page_fault(regs, error_code) == NOTIFY_STOP)
|
||||
return;
|
||||
|
||||
/* It's safe to allow irq's after cr2 has been saved and the vmalloc
|
||||
|
|
|
@ -62,6 +62,7 @@ static pmd_t * __init one_md_table_init(pgd_t *pgd)
|
|||
|
||||
#ifdef CONFIG_X86_PAE
|
||||
pmd_table = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE);
|
||||
paravirt_alloc_pd(__pa(pmd_table) >> PAGE_SHIFT);
|
||||
set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
|
||||
pud = pud_offset(pgd, 0);
|
||||
if (pmd_table != pmd_offset(pud, 0))
|
||||
|
@ -82,6 +83,7 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
|
|||
{
|
||||
if (pmd_none(*pmd)) {
|
||||
pte_t *page_table = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
|
||||
paravirt_alloc_pt(__pa(page_table) >> PAGE_SHIFT);
|
||||
set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
|
||||
if (page_table != pte_offset_kernel(pmd, 0))
|
||||
BUG();
|
||||
|
@ -345,6 +347,8 @@ static void __init pagetable_init (void)
|
|||
/* Init entries of the first-level page table to the zero page */
|
||||
for (i = 0; i < PTRS_PER_PGD; i++)
|
||||
set_pgd(pgd_base + i, __pgd(__pa(empty_zero_page) | _PAGE_PRESENT));
|
||||
#else
|
||||
paravirt_alloc_pd(__pa(swapper_pg_dir) >> PAGE_SHIFT);
|
||||
#endif
|
||||
|
||||
/* Enable PSE if available */
|
||||
|
|
|
@ -60,6 +60,7 @@ static struct page *split_large_page(unsigned long address, pgprot_t prot,
|
|||
address = __pa(address);
|
||||
addr = address & LARGE_PAGE_MASK;
|
||||
pbase = (pte_t *)page_address(base);
|
||||
paravirt_alloc_pt(page_to_pfn(base));
|
||||
for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) {
|
||||
set_pte(&pbase[i], pfn_pte(addr >> PAGE_SHIFT,
|
||||
addr == address ? prot : ref_prot));
|
||||
|
@ -172,6 +173,7 @@ __change_page_attr(struct page *page, pgprot_t prot)
|
|||
if (!PageReserved(kpte_page)) {
|
||||
if (cpu_has_pse && (page_private(kpte_page) == 0)) {
|
||||
ClearPagePrivate(kpte_page);
|
||||
paravirt_release_pt(page_to_pfn(kpte_page));
|
||||
list_add(&kpte_page->lru, &df_list);
|
||||
revert_page(kpte_page, address);
|
||||
}
|
||||
|
|
|
@ -171,6 +171,8 @@ void __set_fixmap (enum fixed_addresses idx, unsigned long phys, pgprot_t flags)
|
|||
void reserve_top_address(unsigned long reserve)
|
||||
{
|
||||
BUG_ON(fixmaps > 0);
|
||||
printk(KERN_INFO "Reserving virtual address space above 0x%08x\n",
|
||||
(int)-reserve);
|
||||
#ifdef CONFIG_COMPAT_VDSO
|
||||
BUG_ON(reserve != 0);
|
||||
#else
|
||||
|
@ -248,9 +250,15 @@ void pgd_ctor(void *pgd, struct kmem_cache *cache, unsigned long unused)
|
|||
clone_pgd_range((pgd_t *)pgd + USER_PTRS_PER_PGD,
|
||||
swapper_pg_dir + USER_PTRS_PER_PGD,
|
||||
KERNEL_PGD_PTRS);
|
||||
|
||||
if (PTRS_PER_PMD > 1)
|
||||
return;
|
||||
|
||||
/* must happen under lock */
|
||||
paravirt_alloc_pd_clone(__pa(pgd) >> PAGE_SHIFT,
|
||||
__pa(swapper_pg_dir) >> PAGE_SHIFT,
|
||||
USER_PTRS_PER_PGD, PTRS_PER_PGD - USER_PTRS_PER_PGD);
|
||||
|
||||
pgd_list_add(pgd);
|
||||
spin_unlock_irqrestore(&pgd_lock, flags);
|
||||
}
|
||||
|
@ -260,6 +268,7 @@ void pgd_dtor(void *pgd, struct kmem_cache *cache, unsigned long unused)
|
|||
{
|
||||
unsigned long flags; /* can be called from interrupt context */
|
||||
|
||||
paravirt_release_pd(__pa(pgd) >> PAGE_SHIFT);
|
||||
spin_lock_irqsave(&pgd_lock, flags);
|
||||
pgd_list_del(pgd);
|
||||
spin_unlock_irqrestore(&pgd_lock, flags);
|
||||
|
@ -277,13 +286,18 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
|
|||
pmd_t *pmd = kmem_cache_alloc(pmd_cache, GFP_KERNEL);
|
||||
if (!pmd)
|
||||
goto out_oom;
|
||||
paravirt_alloc_pd(__pa(pmd) >> PAGE_SHIFT);
|
||||
set_pgd(&pgd[i], __pgd(1 + __pa(pmd)));
|
||||
}
|
||||
return pgd;
|
||||
|
||||
out_oom:
|
||||
for (i--; i >= 0; i--)
|
||||
kmem_cache_free(pmd_cache, (void *)__va(pgd_val(pgd[i])-1));
|
||||
for (i--; i >= 0; i--) {
|
||||
pgd_t pgdent = pgd[i];
|
||||
void* pmd = (void *)__va(pgd_val(pgdent)-1);
|
||||
paravirt_release_pd(__pa(pmd) >> PAGE_SHIFT);
|
||||
kmem_cache_free(pmd_cache, pmd);
|
||||
}
|
||||
kmem_cache_free(pgd_cache, pgd);
|
||||
return NULL;
|
||||
}
|
||||
|
@ -294,8 +308,12 @@ void pgd_free(pgd_t *pgd)
|
|||
|
||||
/* in the PAE case user pgd entries are overwritten before usage */
|
||||
if (PTRS_PER_PMD > 1)
|
||||
for (i = 0; i < USER_PTRS_PER_PGD; ++i)
|
||||
kmem_cache_free(pmd_cache, (void *)__va(pgd_val(pgd[i])-1));
|
||||
for (i = 0; i < USER_PTRS_PER_PGD; ++i) {
|
||||
pgd_t pgdent = pgd[i];
|
||||
void* pmd = (void *)__va(pgd_val(pgdent)-1);
|
||||
paravirt_release_pd(__pa(pmd) >> PAGE_SHIFT);
|
||||
kmem_cache_free(pmd_cache, pmd);
|
||||
}
|
||||
/* in the non-PAE case, free_pgtables() clears user pgd entries */
|
||||
kmem_cache_free(pgd_cache, pgd);
|
||||
}
|
||||
|
|
|
@ -24,7 +24,8 @@
|
|||
|
||||
#define CTR_IS_RESERVED(msrs,c) (msrs->counters[(c)].addr ? 1 : 0)
|
||||
#define CTR_READ(l,h,msrs,c) do {rdmsr(msrs->counters[(c)].addr, (l), (h));} while (0)
|
||||
#define CTR_WRITE(l,msrs,c) do {wrmsr(msrs->counters[(c)].addr, -(u32)(l), -1);} while (0)
|
||||
#define CTR_32BIT_WRITE(l,msrs,c) \
|
||||
do {wrmsr(msrs->counters[(c)].addr, -(u32)(l), 0);} while (0)
|
||||
#define CTR_OVERFLOWED(n) (!((n) & (1U<<31)))
|
||||
|
||||
#define CTRL_IS_RESERVED(msrs,c) (msrs->controls[(c)].addr ? 1 : 0)
|
||||
|
@ -79,7 +80,7 @@ static void ppro_setup_ctrs(struct op_msrs const * const msrs)
|
|||
for (i = 0; i < NUM_COUNTERS; ++i) {
|
||||
if (unlikely(!CTR_IS_RESERVED(msrs,i)))
|
||||
continue;
|
||||
CTR_WRITE(1, msrs, i);
|
||||
CTR_32BIT_WRITE(1, msrs, i);
|
||||
}
|
||||
|
||||
/* enable active counters */
|
||||
|
@ -87,7 +88,7 @@ static void ppro_setup_ctrs(struct op_msrs const * const msrs)
|
|||
if ((counter_config[i].enabled) && (CTR_IS_RESERVED(msrs,i))) {
|
||||
reset_value[i] = counter_config[i].count;
|
||||
|
||||
CTR_WRITE(counter_config[i].count, msrs, i);
|
||||
CTR_32BIT_WRITE(counter_config[i].count, msrs, i);
|
||||
|
||||
CTRL_READ(low, high, msrs, i);
|
||||
CTRL_CLEAR(low);
|
||||
|
@ -116,7 +117,7 @@ static int ppro_check_ctrs(struct pt_regs * const regs,
|
|||
CTR_READ(low, high, msrs, i);
|
||||
if (CTR_OVERFLOWED(low)) {
|
||||
oprofile_add_sample(regs, i);
|
||||
CTR_WRITE(reset_value[i], msrs, i);
|
||||
CTR_32BIT_WRITE(reset_value[i], msrs, i);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
obj-y := i386.o init.o
|
||||
|
||||
obj-$(CONFIG_PCI_BIOS) += pcbios.o
|
||||
obj-$(CONFIG_PCI_MMCONFIG) += mmconfig.o direct.o
|
||||
obj-$(CONFIG_PCI_MMCONFIG) += mmconfig.o direct.o mmconfig-shared.o
|
||||
obj-$(CONFIG_PCI_DIRECT) += direct.o
|
||||
|
||||
pci-y := fixup.o
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue