Merge remote branch 'keithp/drm-intel-next' of /ssd/git/drm-next into drm-core-next

* 'keithp/drm-intel-next' of /ssd/git/drm-next: (301 commits)
  drm/i915: split PCH clock gating init
  drm/i915: add Ivybridge clock gating init function
  drm/i915: Update the location of the ringbuffers' HWS_PGA registers for IVB.
  drm/i915: Add support for fence registers on Ivybridge.
  drm/i915: Use existing function instead of open-coding fence reg clear.
  drm/i915: split clock gating init into per-chipset functions
  drm/i915: set IBX pch type explicitly
  drm/i915: add Ivy Bridge PCI IDs and driver feature structs
  drm/i915: add PantherPoint PCH ID
  agp/intel: add Ivy Bridge support
  drm/i915: ring support for Ivy Bridge
  drm/i915: page flip support for Ivy Bridge
  drm/i915: interrupt & vblank support for Ivy Bridge
  drm/i915: treat Ivy Bridge watermarks like Sandy Bridge
  drm/i915: manual FDI training for Ivy Bridge
  drm/i915: add swizzle/tiling support for Ivy Bridge
  drm/i915: Ivy Bridge has split display and pipe control
  drm/i915: add IS_IVYBRIDGE macro for checks
  drm/i915: add IS_GEN7 macro to cover Ivy Bridge and later
  drm/i915: split enable/disable vblank code into chipset specific functions
  ...
This commit is contained in:
Dave Airlie 2011-05-16 10:45:40 +10:00
commit 69f7876b2a
255 changed files with 4667 additions and 2280 deletions

View File

@ -294,6 +294,7 @@
<!ENTITY sub-srggb10 SYSTEM "v4l/pixfmt-srggb10.xml">
<!ENTITY sub-srggb8 SYSTEM "v4l/pixfmt-srggb8.xml">
<!ENTITY sub-y10 SYSTEM "v4l/pixfmt-y10.xml">
<!ENTITY sub-y12 SYSTEM "v4l/pixfmt-y12.xml">
<!ENTITY sub-pixfmt SYSTEM "v4l/pixfmt.xml">
<!ENTITY sub-cropcap SYSTEM "v4l/vidioc-cropcap.xml">
<!ENTITY sub-dbg-g-register SYSTEM "v4l/vidioc-dbg-g-register.xml">

View File

@ -34,7 +34,7 @@
<varlistentry>
<term><parameter>request</parameter></term>
<listitem>
<para>MEDIA_IOC_ENUM_LINKS</para>
<para>MEDIA_IOC_SETUP_LINK</para>
</listitem>
</varlistentry>
<varlistentry>

View File

@ -0,0 +1,79 @@
<refentry id="V4L2-PIX-FMT-Y12">
<refmeta>
<refentrytitle>V4L2_PIX_FMT_Y12 ('Y12 ')</refentrytitle>
&manvol;
</refmeta>
<refnamediv>
<refname><constant>V4L2_PIX_FMT_Y12</constant></refname>
<refpurpose>Grey-scale image</refpurpose>
</refnamediv>
<refsect1>
<title>Description</title>
<para>This is a grey-scale image with a depth of 12 bits per pixel. Pixels
are stored in 16-bit words with unused high bits padded with 0. The least
significant byte is stored at lower memory addresses (little-endian).</para>
<example>
<title><constant>V4L2_PIX_FMT_Y12</constant> 4 &times; 4
pixel image</title>
<formalpara>
<title>Byte Order.</title>
<para>Each cell is one byte.
<informaltable frame="none">
<tgroup cols="9" align="center">
<colspec align="left" colwidth="2*" />
<tbody valign="top">
<row>
<entry>start&nbsp;+&nbsp;0:</entry>
<entry>Y'<subscript>00low</subscript></entry>
<entry>Y'<subscript>00high</subscript></entry>
<entry>Y'<subscript>01low</subscript></entry>
<entry>Y'<subscript>01high</subscript></entry>
<entry>Y'<subscript>02low</subscript></entry>
<entry>Y'<subscript>02high</subscript></entry>
<entry>Y'<subscript>03low</subscript></entry>
<entry>Y'<subscript>03high</subscript></entry>
</row>
<row>
<entry>start&nbsp;+&nbsp;8:</entry>
<entry>Y'<subscript>10low</subscript></entry>
<entry>Y'<subscript>10high</subscript></entry>
<entry>Y'<subscript>11low</subscript></entry>
<entry>Y'<subscript>11high</subscript></entry>
<entry>Y'<subscript>12low</subscript></entry>
<entry>Y'<subscript>12high</subscript></entry>
<entry>Y'<subscript>13low</subscript></entry>
<entry>Y'<subscript>13high</subscript></entry>
</row>
<row>
<entry>start&nbsp;+&nbsp;16:</entry>
<entry>Y'<subscript>20low</subscript></entry>
<entry>Y'<subscript>20high</subscript></entry>
<entry>Y'<subscript>21low</subscript></entry>
<entry>Y'<subscript>21high</subscript></entry>
<entry>Y'<subscript>22low</subscript></entry>
<entry>Y'<subscript>22high</subscript></entry>
<entry>Y'<subscript>23low</subscript></entry>
<entry>Y'<subscript>23high</subscript></entry>
</row>
<row>
<entry>start&nbsp;+&nbsp;24:</entry>
<entry>Y'<subscript>30low</subscript></entry>
<entry>Y'<subscript>30high</subscript></entry>
<entry>Y'<subscript>31low</subscript></entry>
<entry>Y'<subscript>31high</subscript></entry>
<entry>Y'<subscript>32low</subscript></entry>
<entry>Y'<subscript>32high</subscript></entry>
<entry>Y'<subscript>33low</subscript></entry>
<entry>Y'<subscript>33high</subscript></entry>
</row>
</tbody>
</tgroup>
</informaltable>
</para>
</formalpara>
</example>
</refsect1>
</refentry>

View File

@ -696,6 +696,7 @@ information.</para>
&sub-packed-yuv;
&sub-grey;
&sub-y10;
&sub-y12;
&sub-y16;
&sub-yuyv;
&sub-uyvy;

View File

@ -456,6 +456,23 @@
<entry>b<subscript>1</subscript></entry>
<entry>b<subscript>0</subscript></entry>
</row>
<row id="V4L2-MBUS-FMT-SGBRG8-1X8">
<entry>V4L2_MBUS_FMT_SGBRG8_1X8</entry>
<entry>0x3013</entry>
<entry></entry>
<entry>-</entry>
<entry>-</entry>
<entry>-</entry>
<entry>-</entry>
<entry>g<subscript>7</subscript></entry>
<entry>g<subscript>6</subscript></entry>
<entry>g<subscript>5</subscript></entry>
<entry>g<subscript>4</subscript></entry>
<entry>g<subscript>3</subscript></entry>
<entry>g<subscript>2</subscript></entry>
<entry>g<subscript>1</subscript></entry>
<entry>g<subscript>0</subscript></entry>
</row>
<row id="V4L2-MBUS-FMT-SGRBG8-1X8">
<entry>V4L2_MBUS_FMT_SGRBG8_1X8</entry>
<entry>0x3002</entry>
@ -473,6 +490,23 @@
<entry>g<subscript>1</subscript></entry>
<entry>g<subscript>0</subscript></entry>
</row>
<row id="V4L2-MBUS-FMT-SRGGB8-1X8">
<entry>V4L2_MBUS_FMT_SRGGB8_1X8</entry>
<entry>0x3014</entry>
<entry></entry>
<entry>-</entry>
<entry>-</entry>
<entry>-</entry>
<entry>-</entry>
<entry>r<subscript>7</subscript></entry>
<entry>r<subscript>6</subscript></entry>
<entry>r<subscript>5</subscript></entry>
<entry>r<subscript>4</subscript></entry>
<entry>r<subscript>3</subscript></entry>
<entry>r<subscript>2</subscript></entry>
<entry>r<subscript>1</subscript></entry>
<entry>r<subscript>0</subscript></entry>
</row>
<row id="V4L2-MBUS-FMT-SBGGR10-DPCM8-1X8">
<entry>V4L2_MBUS_FMT_SBGGR10_DPCM8_1X8</entry>
<entry>0x300b</entry>
@ -2159,6 +2193,31 @@
<entry>u<subscript>1</subscript></entry>
<entry>u<subscript>0</subscript></entry>
</row>
<row id="V4L2-MBUS-FMT-Y12-1X12">
<entry>V4L2_MBUS_FMT_Y12_1X12</entry>
<entry>0x2013</entry>
<entry></entry>
<entry>-</entry>
<entry>-</entry>
<entry>-</entry>
<entry>-</entry>
<entry>-</entry>
<entry>-</entry>
<entry>-</entry>
<entry>-</entry>
<entry>y<subscript>11</subscript></entry>
<entry>y<subscript>10</subscript></entry>
<entry>y<subscript>9</subscript></entry>
<entry>y<subscript>8</subscript></entry>
<entry>y<subscript>7</subscript></entry>
<entry>y<subscript>6</subscript></entry>
<entry>y<subscript>5</subscript></entry>
<entry>y<subscript>4</subscript></entry>
<entry>y<subscript>3</subscript></entry>
<entry>y<subscript>2</subscript></entry>
<entry>y<subscript>1</subscript></entry>
<entry>y<subscript>0</subscript></entry>
</row>
<row id="V4L2-MBUS-FMT-UYVY8-1X16">
<entry>V4L2_MBUS_FMT_UYVY8_1X16</entry>
<entry>0x200f</entry>

View File

@ -52,8 +52,10 @@ Brief summary of control files.
tasks # attach a task(thread) and show list of threads
cgroup.procs # show list of processes
cgroup.event_control # an interface for event_fd()
memory.usage_in_bytes # show current memory(RSS+Cache) usage.
memory.memsw.usage_in_bytes # show current memory+Swap usage
memory.usage_in_bytes # show current res_counter usage for memory
(See 5.5 for details)
memory.memsw.usage_in_bytes # show current res_counter usage for memory+Swap
(See 5.5 for details)
memory.limit_in_bytes # set/show limit of memory usage
memory.memsw.limit_in_bytes # set/show limit of memory+Swap usage
memory.failcnt # show the number of memory usage hits limits
@ -453,6 +455,15 @@ memory under it will be reclaimed.
You can reset failcnt by writing 0 to failcnt file.
# echo 0 > .../memory.failcnt
5.5 usage_in_bytes
For efficiency, as other kernel components, memory cgroup uses some optimization
to avoid unnecessary cacheline false sharing. usage_in_bytes is affected by the
method and doesn't show 'exact' value of memory(and swap) usage, it's an fuzz
value for efficient access. (Of course, when necessary, it's synchronized.)
If you want to know more exact memory usage, you should use RSS+CACHE(+SWAP)
value in memory.stat(see 5.2).
6. Hierarchy support
The memory controller supports a deep hierarchy and hierarchical accounting.

View File

@ -14,10 +14,6 @@ Supported chips:
Prefix: 'gl523sm'
Addresses scanned: I2C 0x18 - 0x1a, 0x29 - 0x2b, 0x4c - 0x4e
Datasheet:
* Intel Xeon Processor
Prefix: - any other - may require 'force_adm1021' parameter
Addresses scanned: none
Datasheet: Publicly available at Intel website
* Maxim MAX1617
Prefix: 'max1617'
Addresses scanned: I2C 0x18 - 0x1a, 0x29 - 0x2b, 0x4c - 0x4e
@ -91,21 +87,27 @@ will do no harm, but will return 'old' values. It is possible to make
ADM1021-clones do faster measurements, but there is really no good reason
for that.
Xeon support
------------
Some Xeon processors have real max1617, adm1021, or compatible chips
within them, with two temperature sensors.
Netburst-based Xeon support
---------------------------
Other Xeons have chips with only one sensor.
Some Xeon processors based on the Netburst (early Pentium 4, from 2001 to
2003) microarchitecture had real MAX1617, ADM1021, or compatible chips
within them, with two temperature sensors. Other Xeon processors of this
era (with 400 MHz FSB) had chips with only one temperature sensor.
If you have a Xeon, and the adm1021 module loads, and both temperatures
appear valid, then things are good.
If you have such an old Xeon, and you get two valid temperatures when
loading the adm1021 module, then things are good.
If the adm1021 module doesn't load, you should try this:
modprobe adm1021 force_adm1021=BUS,ADDRESS
ADDRESS can only be 0x18, 0x1a, 0x29, 0x2b, 0x4c, or 0x4e.
If nothing happens when loading the adm1021 module, and you are certain
that your specific Xeon processor model includes compatible sensors, you
will have to explicitly instantiate the sensor chips from user-space. See
method 4 in Documentation/i2c/instantiating-devices. Possible slave
addresses are 0x18, 0x1a, 0x29, 0x2b, 0x4c, or 0x4e. It is likely that
only temp2 will be correct and temp1 will have to be ignored.
If you have dual Xeons you may have appear to have two separate
adm1021-compatible chips, or two single-temperature sensors, at distinct
addresses.
Previous generations of the Xeon processor (based on Pentium II/III)
didn't have these sensors. Next generations of Xeon processors (533 MHz
FSB and faster) lost them, until the Core-based generation which
introduced integrated digital thermal sensors. These are supported by
the coretemp driver.

View File

@ -32,6 +32,16 @@ Supported chips:
Addresses scanned: I2C 0x4c and 0x4d
Datasheet: Publicly available at the ON Semiconductor website
http://www.onsemi.com/PowerSolutions/product.do?id=ADT7461
* Analog Devices ADT7461A
Prefix: 'adt7461a'
Addresses scanned: I2C 0x4c and 0x4d
Datasheet: Publicly available at the ON Semiconductor website
http://www.onsemi.com/PowerSolutions/product.do?id=ADT7461A
* ON Semiconductor NCT1008
Prefix: 'nct1008'
Addresses scanned: I2C 0x4c and 0x4d
Datasheet: Publicly available at the ON Semiconductor website
http://www.onsemi.com/PowerSolutions/product.do?id=NCT1008
* Maxim MAX6646
Prefix: 'max6646'
Addresses scanned: I2C 0x4d
@ -149,7 +159,7 @@ ADM1032:
* ALERT is triggered by open remote sensor.
* SMBus PEC support for Write Byte and Receive Byte transactions.
ADT7461:
ADT7461, ADT7461A, NCT1008:
* Extended temperature range (breaks compatibility)
* Lower resolution for remote temperature
@ -195,9 +205,9 @@ are exported, one for each channel, but these values are of course linked.
Only the local hysteresis can be set from user-space, and the same delta
applies to the remote hysteresis.
The lm90 driver will not update its values more frequently than every
other second; reading them more often will do no harm, but will return
'old' values.
The lm90 driver will not update its values more frequently than configured with
the update_interval attribute; reading them more often will do no harm, but will
return 'old' values.
SMBus Alert Support
-------------------
@ -205,11 +215,12 @@ SMBus Alert Support
This driver has basic support for SMBus alert. When an alert is received,
the status register is read and the faulty temperature channel is logged.
The Analog Devices chips (ADM1032 and ADT7461) do not implement the SMBus
alert protocol properly so additional care is needed: the ALERT output is
disabled when an alert is received, and is re-enabled only when the alarm
is gone. Otherwise the chip would block alerts from other chips in the bus
as long as the alarm is active.
The Analog Devices chips (ADM1032, ADT7461 and ADT7461A) and ON
Semiconductor chips (NCT1008) do not implement the SMBus alert protocol
properly so additional care is needed: the ALERT output is disabled when
an alert is received, and is re-enabled only when the alarm is gone.
Otherwise the chip would block alerts from other chips in the bus as long
as the alarm is active.
PEC Support
-----------

View File

@ -37,7 +37,7 @@ Generic scaling / cropping scheme
-1'-
In the above chart minuses and slashes represent "real" data amounts, points and
accents represent "useful" data, basically, CEU scaled amd cropped output,
accents represent "useful" data, basically, CEU scaled and cropped output,
mapped back onto the client's source plane.
Such a configuration can be produced by user requests:
@ -65,7 +65,7 @@ Do not touch input rectangle - it is already optimal.
1. Calculate current sensor scales:
scale_s = ((3') - (3)) / ((2') - (2))
scale_s = ((2') - (2)) / ((3') - (3))
2. Calculate "effective" input crop (sensor subwindow) - CEU crop scaled back at
current sensor scales onto input window - this is user S_CROP:
@ -80,7 +80,7 @@ window:
4. Calculate sensor output window by applying combined scales to real input
window:
width_s_out = ((2') - (2)) / scale_comb
width_s_out = ((7') - (7)) = ((2') - (2)) / scale_comb
5. Apply iterative sensor S_FMT for sensor output window.

View File

@ -12,6 +12,7 @@ CONTENTS
4. Application Programming Interface (API)
5. Example Execution Scenarios
6. Guidelines
7. Debugging
1. Introduction
@ -379,3 +380,42 @@ If q1 has WQ_CPU_INTENSIVE set,
* Unless work items are expected to consume a huge amount of CPU
cycles, using a bound wq is usually beneficial due to the increased
level of locality in wq operations and work item execution.
7. Debugging
Because the work functions are executed by generic worker threads
there are a few tricks needed to shed some light on misbehaving
workqueue users.
Worker threads show up in the process list as:
root 5671 0.0 0.0 0 0 ? S 12:07 0:00 [kworker/0:1]
root 5672 0.0 0.0 0 0 ? S 12:07 0:00 [kworker/1:2]
root 5673 0.0 0.0 0 0 ? S 12:12 0:00 [kworker/0:0]
root 5674 0.0 0.0 0 0 ? S 12:13 0:00 [kworker/1:0]
If kworkers are going crazy (using too much cpu), there are two types
of possible problems:
1. Something beeing scheduled in rapid succession
2. A single work item that consumes lots of cpu cycles
The first one can be tracked using tracing:
$ echo workqueue:workqueue_queue_work > /sys/kernel/debug/tracing/set_event
$ cat /sys/kernel/debug/tracing/trace_pipe > out.txt
(wait a few secs)
^C
If something is busy looping on work queueing, it would be dominating
the output and the offender can be determined with the work item
function.
For the second type of problems it should be possible to just check
the stack trace of the offending worker thread.
$ cat /proc/THE_OFFENDING_KWORKER/stack
The work item's function should be trivially visible in the stack
trace.

View File

@ -1032,12 +1032,13 @@ W: http://www.fluff.org/ben/linux/
S: Maintained
F: arch/arm/mach-s3c64xx/
ARM/S5P ARM ARCHITECTURES
ARM/S5P EXYNOS ARM ARCHITECTURES
M: Kukjin Kim <kgene.kim@samsung.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
S: Maintained
F: arch/arm/mach-s5p*/
F: arch/arm/mach-exynos*/
ARM/SAMSUNG MOBILE MACHINE SUPPORT
M: Kyungmin Park <kyungmin.park@samsung.com>
@ -2250,10 +2251,10 @@ F: drivers/gpu/drm/
F: include/drm/
INTEL DRM DRIVERS (excluding Poulsbo, Moorestown and derivative chipsets)
M: Chris Wilson <chris@chris-wilson.co.uk>
M: Keith Packard <keithp@keithp.com>
L: intel-gfx@lists.freedesktop.org (subscribers-only)
L: dri-devel@lists.freedesktop.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/ickle/drm-intel.git
T: git git://git.kernel.org/pub/scm/linux/kernel/git/keithp/linux-2.6.git
S: Supported
F: drivers/gpu/drm/i915
F: include/drm/i915*
@ -2808,7 +2809,7 @@ GPIO SUBSYSTEM
M: Grant Likely <grant.likely@secretlab.ca>
S: Maintained
T: git git://git.secretlab.ca/git/linux-2.6.git
F: Documentation/gpio/gpio.txt
F: Documentation/gpio.txt
F: drivers/gpio/
F: include/linux/gpio*
@ -6555,7 +6556,7 @@ S: Maintained
F: drivers/usb/host/uhci*
USB "USBNET" DRIVER FRAMEWORK
M: David Brownell <dbrownell@users.sourceforge.net>
M: Oliver Neukum <oneukum@suse.de>
L: netdev@vger.kernel.org
W: http://www.linux-usb.org/usbnet
S: Maintained
@ -6921,6 +6922,18 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/mjg59/platform-drivers-x86.
S: Maintained
F: drivers/platform/x86
XEN HYPERVISOR INTERFACE
M: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
M: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
L: xen-devel@lists.xensource.com (moderated for non-subscribers)
L: virtualization@lists.linux-foundation.org
S: Supported
F: arch/x86/xen/
F: drivers/*/xen-*front.c
F: drivers/xen/
F: arch/x86/include/asm/xen/
F: include/xen/
XEN NETWORK BACKEND DRIVER
M: Ian Campbell <ian.campbell@citrix.com>
L: xen-devel@lists.xensource.com (moderated for non-subscribers)
@ -6942,18 +6955,6 @@ S: Supported
F: arch/x86/xen/*swiotlb*
F: drivers/xen/*swiotlb*
XEN HYPERVISOR INTERFACE
M: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
M: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
L: xen-devel@lists.xensource.com (moderated for non-subscribers)
L: virtualization@lists.linux-foundation.org
S: Supported
F: arch/x86/xen/
F: drivers/*/xen-*front.c
F: drivers/xen/
F: arch/x86/include/asm/xen/
F: include/xen/
XFS FILESYSTEM
P: Silicon Graphics Inc
M: Alex Elder <aelder@sgi.com>

View File

@ -1,7 +1,7 @@
VERSION = 2
PATCHLEVEL = 6
SUBLEVEL = 39
EXTRAVERSION = -rc5
EXTRAVERSION = -rc6
NAME = Flesh-Eating Bats with Fangs
# *DOCUMENTATION*

View File

@ -39,10 +39,13 @@ typedef u32 kprobe_opcode_t;
struct kprobe;
typedef void (kprobe_insn_handler_t)(struct kprobe *, struct pt_regs *);
typedef unsigned long (kprobe_check_cc)(unsigned long);
/* Architecture specific copy of original instruction. */
struct arch_specific_insn {
kprobe_opcode_t *insn;
kprobe_insn_handler_t *insn_handler;
kprobe_check_cc *insn_check_cc;
};
struct prev_kprobe {

File diff suppressed because it is too large Load Diff

View File

@ -134,7 +134,8 @@ static void __kprobes singlestep(struct kprobe *p, struct pt_regs *regs,
struct kprobe_ctlblk *kcb)
{
regs->ARM_pc += 4;
p->ainsn.insn_handler(p, regs);
if (p->ainsn.insn_check_cc(regs->ARM_cpsr))
p->ainsn.insn_handler(p, regs);
}
/*

View File

@ -746,7 +746,8 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
tail = (struct frame_tail __user *)regs->ARM_fp - 1;
while (tail && !((unsigned long)tail & 0x3))
while ((entry->nr < PERF_MAX_STACK_DEPTH) &&
tail && !((unsigned long)tail & 0x3))
tail = user_backtrace(tail, entry);
}

View File

@ -479,7 +479,7 @@ static void broadcast_timer_set_mode(enum clock_event_mode mode,
{
}
static void broadcast_timer_setup(struct clock_event_device *evt)
static void __cpuinit broadcast_timer_setup(struct clock_event_device *evt)
{
evt->name = "dummy_timer";
evt->features = CLOCK_EVT_FEAT_ONESHOT |

View File

@ -311,7 +311,7 @@ asmlinkage long sys_oabi_semtimedop(int semid,
long err;
int i;
if (nsops < 1)
if (nsops < 1 || nsops > SEMOPM)
return -EINVAL;
sops = kmalloc(sizeof(*sops) * nsops, GFP_KERNEL);
if (!sops)

View File

@ -63,6 +63,7 @@ config MACH_DAVINCI_EVM
depends on ARCH_DAVINCI_DM644x
select MISC_DEVICES
select EEPROM_AT24
select I2C
help
Configure this option to specify the whether the board used
for development is a DM644x EVM
@ -72,6 +73,7 @@ config MACH_SFFSDR
depends on ARCH_DAVINCI_DM644x
select MISC_DEVICES
select EEPROM_AT24
select I2C
help
Say Y here to select the Lyrtech Small Form Factor
Software Defined Radio (SFFSDR) board.
@ -105,6 +107,7 @@ config MACH_DAVINCI_DM6467_EVM
select MACH_DAVINCI_DM6467TEVM
select MISC_DEVICES
select EEPROM_AT24
select I2C
help
Configure this option to specify the whether the board used
for development is a DM6467 EVM
@ -118,6 +121,7 @@ config MACH_DAVINCI_DM365_EVM
depends on ARCH_DAVINCI_DM365
select MISC_DEVICES
select EEPROM_AT24
select I2C
help
Configure this option to specify whether the board used
for development is a DM365 EVM
@ -129,6 +133,7 @@ config MACH_DAVINCI_DA830_EVM
select GPIO_PCF857X
select MISC_DEVICES
select EEPROM_AT24
select I2C
help
Say Y here to select the TI DA830/OMAP-L137/AM17x Evaluation Module.
@ -205,6 +210,7 @@ config MACH_MITYOMAPL138
depends on ARCH_DAVINCI_DA850
select MISC_DEVICES
select EEPROM_AT24
select I2C
help
Say Y here to select the Critical Link MityDSP-L138/MityARM-1808
System on Module. Information on this SoM may be found at

View File

@ -29,7 +29,7 @@
#include <mach/mux.h>
#include <mach/spi.h>
#define MITYOMAPL138_PHY_ID "0:03"
#define MITYOMAPL138_PHY_ID ""
#define FACTORY_CONFIG_MAGIC 0x012C0138
#define FACTORY_CONFIG_VERSION 0x00010001
@ -414,7 +414,7 @@ static struct resource mityomapl138_nandflash_resource[] = {
static struct platform_device mityomapl138_nandflash_device = {
.name = "davinci_nand",
.id = 0,
.id = 1,
.dev = {
.platform_data = &mityomapl138_nandflash_data,
},

View File

@ -39,7 +39,8 @@
#define DA8XX_GPIO_BASE 0x01e26000
#define DA8XX_I2C1_BASE 0x01e28000
#define DA8XX_SPI0_BASE 0x01c41000
#define DA8XX_SPI1_BASE 0x01f0e000
#define DA830_SPI1_BASE 0x01e12000
#define DA850_SPI1_BASE 0x01f0e000
#define DA8XX_EMAC_CTRL_REG_OFFSET 0x3000
#define DA8XX_EMAC_MOD_REG_OFFSET 0x2000
@ -762,8 +763,8 @@ static struct resource da8xx_spi0_resources[] = {
static struct resource da8xx_spi1_resources[] = {
[0] = {
.start = DA8XX_SPI1_BASE,
.end = DA8XX_SPI1_BASE + SZ_4K - 1,
.start = DA830_SPI1_BASE,
.end = DA830_SPI1_BASE + SZ_4K - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
@ -832,5 +833,10 @@ int __init da8xx_register_spi(int instance, struct spi_board_info *info,
da8xx_spi_pdata[instance].num_chipselect = len;
if (instance == 1 && cpu_is_davinci_da850()) {
da8xx_spi1_resources[0].start = DA850_SPI1_BASE;
da8xx_spi1_resources[0].end = DA850_SPI1_BASE + SZ_4K - 1;
}
return platform_device_register(&da8xx_spi_device[instance]);
}

View File

@ -314,7 +314,7 @@ static struct clk timer2_clk = {
.name = "timer2",
.parent = &pll1_aux_clk,
.lpsc = DAVINCI_LPSC_TIMER2,
.usecount = 1, /* REVISIT: why can't' this be disabled? */
.usecount = 1, /* REVISIT: why can't this be disabled? */
};
static struct clk timer3_clk = {

View File

@ -274,7 +274,7 @@ static struct clk timer2_clk = {
.name = "timer2",
.parent = &pll1_aux_clk,
.lpsc = DAVINCI_LPSC_TIMER2,
.usecount = 1, /* REVISIT: why can't' this be disabled? */
.usecount = 1, /* REVISIT: why can't this be disabled? */
};
static struct clk_lookup dm644x_clks[] = {

View File

@ -24,6 +24,9 @@
#define UART_SHIFT 2
#define davinci_uart_v2p(x) ((x) - PAGE_OFFSET + PLAT_PHYS_OFFSET)
#define davinci_uart_p2v(x) ((x) - PLAT_PHYS_OFFSET + PAGE_OFFSET)
.pushsection .data
davinci_uart_phys: .word 0
davinci_uart_virt: .word 0
@ -34,7 +37,7 @@ davinci_uart_virt: .word 0
/* Use davinci_uart_phys/virt if already configured */
10: mrc p15, 0, \rp, c1, c0
tst \rp, #1 @ MMU enabled?
ldreq \rp, =__virt_to_phys(davinci_uart_phys)
ldreq \rp, =davinci_uart_v2p(davinci_uart_phys)
ldrne \rp, =davinci_uart_phys
add \rv, \rp, #4 @ davinci_uart_virt
ldr \rp, [\rp, #0]
@ -48,18 +51,18 @@ davinci_uart_virt: .word 0
tst \rp, #1 @ MMU enabled?
/* Copy uart phys address from decompressor uart info */
ldreq \rv, =__virt_to_phys(davinci_uart_phys)
ldreq \rv, =davinci_uart_v2p(davinci_uart_phys)
ldrne \rv, =davinci_uart_phys
ldreq \rp, =DAVINCI_UART_INFO
ldrne \rp, =__phys_to_virt(DAVINCI_UART_INFO)
ldrne \rp, =davinci_uart_p2v(DAVINCI_UART_INFO)
ldr \rp, [\rp, #0]
str \rp, [\rv]
/* Copy uart virt address from decompressor uart info */
ldreq \rv, =__virt_to_phys(davinci_uart_virt)
ldreq \rv, =davinci_uart_v2p(davinci_uart_virt)
ldrne \rv, =davinci_uart_virt
ldreq \rp, =DAVINCI_UART_INFO
ldrne \rp, =__phys_to_virt(DAVINCI_UART_INFO)
ldrne \rp, =davinci_uart_p2v(DAVINCI_UART_INFO)
ldr \rp, [\rp, #4]
str \rp, [\rv]

View File

@ -22,7 +22,7 @@
*
* This area sits just below the page tables (see arch/arm/kernel/head.S).
*/
#define DAVINCI_UART_INFO (PHYS_OFFSET + 0x3ff8)
#define DAVINCI_UART_INFO (PLAT_PHYS_OFFSET + 0x3ff8)
#define DAVINCI_UART0_BASE (IO_PHYS + 0x20000)
#define DAVINCI_UART1_BASE (IO_PHYS + 0x20400)

View File

@ -257,11 +257,16 @@ static const struct fsl_usb2_platform_data otg_device_pdata __initconst = {
.workaround = FLS_USB2_WORKAROUND_ENGCM09152,
};
static int vpr200_usbh_init(struct platform_device *pdev)
{
return mx35_initialize_usb_hw(pdev->id,
MXC_EHCI_INTERFACE_SINGLE_UNI | MXC_EHCI_INTERNAL_PHY);
}
/* USB HOST config */
static const struct mxc_usbh_platform_data usb_host_pdata __initconst = {
.portsc = MXC_EHCI_MODE_SERIAL,
.flags = MXC_EHCI_INTERFACE_SINGLE_UNI |
MXC_EHCI_INTERNAL_PHY,
.init = vpr200_usbh_init,
.portsc = MXC_EHCI_MODE_SERIAL,
};
static struct platform_device *devices[] __initdata = {

View File

@ -193,7 +193,7 @@ static iomux_v3_cfg_t mx53_loco_pads[] = {
.wakeup = wake, \
}
static const struct gpio_keys_button loco_buttons[] __initconst = {
static struct gpio_keys_button loco_buttons[] = {
GPIO_BUTTON(MX53_LOCO_POWER, KEY_POWER, 1, "power", 0),
GPIO_BUTTON(MX53_LOCO_UI1, KEY_VOLUMEUP, 1, "volume-up", 0),
GPIO_BUTTON(MX53_LOCO_UI2, KEY_VOLUMEDOWN, 1, "volume-down", 0),

View File

@ -295,11 +295,11 @@ static int name##_set_rate(struct clk *clk, unsigned long rate) \
unsigned long diff, parent_rate, calc_rate; \
int i; \
\
parent_rate = clk_get_rate(clk->parent); \
div_max = BM_CLKCTRL_##dr##_DIV >> BP_CLKCTRL_##dr##_DIV; \
bm_busy = BM_CLKCTRL_##dr##_BUSY; \
\
if (clk->parent == &ref_xtal_clk) { \
parent_rate = clk_get_rate(clk->parent); \
div = DIV_ROUND_UP(parent_rate, rate); \
if (clk == &cpu_clk) { \
div_max = BM_CLKCTRL_CPU_DIV_XTAL >> \
@ -309,6 +309,11 @@ static int name##_set_rate(struct clk *clk, unsigned long rate) \
if (div == 0 || div > div_max) \
return -EINVAL; \
} else { \
/* \
* hack alert: this block modifies clk->parent, too, \
* so the base to use it the grand parent. \
*/ \
parent_rate = clk_get_rate(clk->parent->parent); \
rate >>= PARENT_RATE_SHIFT; \
parent_rate >>= PARENT_RATE_SHIFT; \
diff = parent_rate; \

View File

@ -68,7 +68,7 @@ obj-$(CONFIG_OMAP_SMARTREFLEX) += sr_device.o smartreflex.o
obj-$(CONFIG_OMAP_SMARTREFLEX_CLASS3) += smartreflex-class3.o
AFLAGS_sleep24xx.o :=-Wa,-march=armv6
AFLAGS_sleep34xx.o :=-Wa,-march=armv7-a
AFLAGS_sleep34xx.o :=-Wa,-march=armv7-a$(plus_sec)
ifeq ($(CONFIG_PM_VERBOSE),y)
CFLAGS_pm_bus.o += -DDEBUG

View File

@ -141,14 +141,19 @@ static void __init rx51_init(void)
static void __init rx51_map_io(void)
{
omap2_set_globals_3xxx();
rx51_video_mem_init();
omap34xx_map_common_io();
}
static void __init rx51_reserve(void)
{
rx51_video_mem_init();
omap_reserve();
}
MACHINE_START(NOKIA_RX51, "Nokia RX-51 board")
/* Maintainer: Lauri Leukkunen <lauri.leukkunen@nokia.com> */
.boot_params = 0x80000100,
.reserve = omap_reserve,
.reserve = rx51_reserve,
.map_io = rx51_map_io,
.init_early = rx51_init_early,
.init_irq = omap_init_irq,

View File

@ -3116,14 +3116,9 @@ static struct omap_clk omap44xx_clks[] = {
CLK(NULL, "dsp_fck", &dsp_fck, CK_443X),
CLK("omapdss_dss", "sys_clk", &dss_sys_clk, CK_443X),
CLK("omapdss_dss", "tv_clk", &dss_tv_clk, CK_443X),
CLK("omapdss_dss", "dss_clk", &dss_dss_clk, CK_443X),
CLK("omapdss_dss", "video_clk", &dss_48mhz_clk, CK_443X),
CLK("omapdss_dss", "fck", &dss_fck, CK_443X),
/*
* On OMAP4, DSS ick is a dummy clock; this is needed for compatibility
* with OMAP2/3.
*/
CLK("omapdss_dss", "ick", &dummy_ck, CK_443X),
CLK("omapdss_dss", "fck", &dss_dss_clk, CK_443X),
CLK("omapdss_dss", "ick", &dss_fck, CK_443X),
CLK(NULL, "efuse_ctrl_cust_fck", &efuse_ctrl_cust_fck, CK_443X),
CLK(NULL, "emif1_fck", &emif1_fck, CK_443X),
CLK(NULL, "emif2_fck", &emif2_fck, CK_443X),

View File

@ -247,6 +247,7 @@ struct omap3_cm_regs {
u32 per_cm_clksel;
u32 emu_cm_clksel;
u32 emu_cm_clkstctrl;
u32 pll_cm_autoidle;
u32 pll_cm_autoidle2;
u32 pll_cm_clksel4;
u32 pll_cm_clksel5;
@ -319,6 +320,15 @@ void omap3_cm_save_context(void)
omap2_cm_read_mod_reg(OMAP3430_EMU_MOD, CM_CLKSEL1);
cm_context.emu_cm_clkstctrl =
omap2_cm_read_mod_reg(OMAP3430_EMU_MOD, OMAP2_CM_CLKSTCTRL);
/*
* As per erratum i671, ROM code does not respect the PER DPLL
* programming scheme if CM_AUTOIDLE_PLL.AUTO_PERIPH_DPLL == 1.
* In this case, even though this register has been saved in
* scratchpad contents, we need to restore AUTO_PERIPH_DPLL
* by ourselves. So, we need to save it anyway.
*/
cm_context.pll_cm_autoidle =
omap2_cm_read_mod_reg(PLL_MOD, CM_AUTOIDLE);
cm_context.pll_cm_autoidle2 =
omap2_cm_read_mod_reg(PLL_MOD, CM_AUTOIDLE2);
cm_context.pll_cm_clksel4 =
@ -441,6 +451,13 @@ void omap3_cm_restore_context(void)
CM_CLKSEL1);
omap2_cm_write_mod_reg(cm_context.emu_cm_clkstctrl, OMAP3430_EMU_MOD,
OMAP2_CM_CLKSTCTRL);
/*
* As per erratum i671, ROM code does not respect the PER DPLL
* programming scheme if CM_AUTOIDLE_PLL.AUTO_PERIPH_DPLL == 1.
* In this case, we need to restore AUTO_PERIPH_DPLL by ourselves.
*/
omap2_cm_write_mod_reg(cm_context.pll_cm_autoidle, PLL_MOD,
CM_AUTOIDLE);
omap2_cm_write_mod_reg(cm_context.pll_cm_autoidle2, PLL_MOD,
CM_AUTOIDLE2);
omap2_cm_write_mod_reg(cm_context.pll_cm_clksel4, PLL_MOD,

View File

@ -316,8 +316,14 @@ void omap3_save_scratchpad_contents(void)
omap2_cm_read_mod_reg(WKUP_MOD, CM_CLKSEL);
prcm_block_contents.cm_clken_pll =
omap2_cm_read_mod_reg(PLL_MOD, CM_CLKEN);
/*
* As per erratum i671, ROM code does not respect the PER DPLL
* programming scheme if CM_AUTOIDLE_PLL..AUTO_PERIPH_DPLL == 1.
* Then, in anycase, clear these bits to avoid extra latencies.
*/
prcm_block_contents.cm_autoidle_pll =
omap2_cm_read_mod_reg(PLL_MOD, OMAP3430_CM_AUTOIDLE_PLL);
omap2_cm_read_mod_reg(PLL_MOD, CM_AUTOIDLE) &
~OMAP3430_AUTO_PERIPH_DPLL_MASK;
prcm_block_contents.cm_clksel1_pll =
omap2_cm_read_mod_reg(PLL_MOD, OMAP3430_CM_CLKSEL1_PLL);
prcm_block_contents.cm_clksel2_pll =

View File

@ -1639,6 +1639,7 @@ static struct omap_hwmod_ocp_if *omap2420_gpio1_slaves[] = {
static struct omap_hwmod omap2420_gpio1_hwmod = {
.name = "gpio1",
.flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET,
.mpu_irqs = omap242x_gpio1_irqs,
.mpu_irqs_cnt = ARRAY_SIZE(omap242x_gpio1_irqs),
.main_clk = "gpios_fck",
@ -1669,6 +1670,7 @@ static struct omap_hwmod_ocp_if *omap2420_gpio2_slaves[] = {
static struct omap_hwmod omap2420_gpio2_hwmod = {
.name = "gpio2",
.flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET,
.mpu_irqs = omap242x_gpio2_irqs,
.mpu_irqs_cnt = ARRAY_SIZE(omap242x_gpio2_irqs),
.main_clk = "gpios_fck",
@ -1699,6 +1701,7 @@ static struct omap_hwmod_ocp_if *omap2420_gpio3_slaves[] = {
static struct omap_hwmod omap2420_gpio3_hwmod = {
.name = "gpio3",
.flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET,
.mpu_irqs = omap242x_gpio3_irqs,
.mpu_irqs_cnt = ARRAY_SIZE(omap242x_gpio3_irqs),
.main_clk = "gpios_fck",
@ -1729,6 +1732,7 @@ static struct omap_hwmod_ocp_if *omap2420_gpio4_slaves[] = {
static struct omap_hwmod omap2420_gpio4_hwmod = {
.name = "gpio4",
.flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET,
.mpu_irqs = omap242x_gpio4_irqs,
.mpu_irqs_cnt = ARRAY_SIZE(omap242x_gpio4_irqs),
.main_clk = "gpios_fck",
@ -1782,7 +1786,7 @@ static struct omap_hwmod_irq_info omap2420_dma_system_irqs[] = {
static struct omap_hwmod_addr_space omap2420_dma_system_addrs[] = {
{
.pa_start = 0x48056000,
.pa_end = 0x4a0560ff,
.pa_end = 0x48056fff,
.flags = ADDR_TYPE_RT
},
};

View File

@ -1742,6 +1742,7 @@ static struct omap_hwmod_ocp_if *omap2430_gpio1_slaves[] = {
static struct omap_hwmod omap2430_gpio1_hwmod = {
.name = "gpio1",
.flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET,
.mpu_irqs = omap243x_gpio1_irqs,
.mpu_irqs_cnt = ARRAY_SIZE(omap243x_gpio1_irqs),
.main_clk = "gpios_fck",
@ -1772,6 +1773,7 @@ static struct omap_hwmod_ocp_if *omap2430_gpio2_slaves[] = {
static struct omap_hwmod omap2430_gpio2_hwmod = {
.name = "gpio2",
.flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET,
.mpu_irqs = omap243x_gpio2_irqs,
.mpu_irqs_cnt = ARRAY_SIZE(omap243x_gpio2_irqs),
.main_clk = "gpios_fck",
@ -1802,6 +1804,7 @@ static struct omap_hwmod_ocp_if *omap2430_gpio3_slaves[] = {
static struct omap_hwmod omap2430_gpio3_hwmod = {
.name = "gpio3",
.flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET,
.mpu_irqs = omap243x_gpio3_irqs,
.mpu_irqs_cnt = ARRAY_SIZE(omap243x_gpio3_irqs),
.main_clk = "gpios_fck",
@ -1832,6 +1835,7 @@ static struct omap_hwmod_ocp_if *omap2430_gpio4_slaves[] = {
static struct omap_hwmod omap2430_gpio4_hwmod = {
.name = "gpio4",
.flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET,
.mpu_irqs = omap243x_gpio4_irqs,
.mpu_irqs_cnt = ARRAY_SIZE(omap243x_gpio4_irqs),
.main_clk = "gpios_fck",
@ -1862,6 +1866,7 @@ static struct omap_hwmod_ocp_if *omap2430_gpio5_slaves[] = {
static struct omap_hwmod omap2430_gpio5_hwmod = {
.name = "gpio5",
.flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET,
.mpu_irqs = omap243x_gpio5_irqs,
.mpu_irqs_cnt = ARRAY_SIZE(omap243x_gpio5_irqs),
.main_clk = "gpio5_fck",
@ -1915,7 +1920,7 @@ static struct omap_hwmod_irq_info omap2430_dma_system_irqs[] = {
static struct omap_hwmod_addr_space omap2430_dma_system_addrs[] = {
{
.pa_start = 0x48056000,
.pa_end = 0x4a0560ff,
.pa_end = 0x48056fff,
.flags = ADDR_TYPE_RT
},
};

View File

@ -2141,6 +2141,7 @@ static struct omap_hwmod_ocp_if *omap3xxx_gpio1_slaves[] = {
static struct omap_hwmod omap3xxx_gpio1_hwmod = {
.name = "gpio1",
.flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET,
.mpu_irqs = omap3xxx_gpio1_irqs,
.mpu_irqs_cnt = ARRAY_SIZE(omap3xxx_gpio1_irqs),
.main_clk = "gpio1_ick",
@ -2177,6 +2178,7 @@ static struct omap_hwmod_ocp_if *omap3xxx_gpio2_slaves[] = {
static struct omap_hwmod omap3xxx_gpio2_hwmod = {
.name = "gpio2",
.flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET,
.mpu_irqs = omap3xxx_gpio2_irqs,
.mpu_irqs_cnt = ARRAY_SIZE(omap3xxx_gpio2_irqs),
.main_clk = "gpio2_ick",
@ -2213,6 +2215,7 @@ static struct omap_hwmod_ocp_if *omap3xxx_gpio3_slaves[] = {
static struct omap_hwmod omap3xxx_gpio3_hwmod = {
.name = "gpio3",
.flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET,
.mpu_irqs = omap3xxx_gpio3_irqs,
.mpu_irqs_cnt = ARRAY_SIZE(omap3xxx_gpio3_irqs),
.main_clk = "gpio3_ick",
@ -2249,6 +2252,7 @@ static struct omap_hwmod_ocp_if *omap3xxx_gpio4_slaves[] = {
static struct omap_hwmod omap3xxx_gpio4_hwmod = {
.name = "gpio4",
.flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET,
.mpu_irqs = omap3xxx_gpio4_irqs,
.mpu_irqs_cnt = ARRAY_SIZE(omap3xxx_gpio4_irqs),
.main_clk = "gpio4_ick",
@ -2285,6 +2289,7 @@ static struct omap_hwmod_ocp_if *omap3xxx_gpio5_slaves[] = {
static struct omap_hwmod omap3xxx_gpio5_hwmod = {
.name = "gpio5",
.flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET,
.mpu_irqs = omap3xxx_gpio5_irqs,
.mpu_irqs_cnt = ARRAY_SIZE(omap3xxx_gpio5_irqs),
.main_clk = "gpio5_ick",
@ -2321,6 +2326,7 @@ static struct omap_hwmod_ocp_if *omap3xxx_gpio6_slaves[] = {
static struct omap_hwmod omap3xxx_gpio6_hwmod = {
.name = "gpio6",
.flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET,
.mpu_irqs = omap3xxx_gpio6_irqs,
.mpu_irqs_cnt = ARRAY_SIZE(omap3xxx_gpio6_irqs),
.main_clk = "gpio6_ick",
@ -2386,7 +2392,7 @@ static struct omap_hwmod_irq_info omap3xxx_dma_system_irqs[] = {
static struct omap_hwmod_addr_space omap3xxx_dma_system_addrs[] = {
{
.pa_start = 0x48056000,
.pa_end = 0x4a0560ff,
.pa_end = 0x48056fff,
.flags = ADDR_TYPE_RT
},
};

View File

@ -885,7 +885,7 @@ static struct omap_hwmod_ocp_if *omap44xx_dma_system_masters[] = {
static struct omap_hwmod_addr_space omap44xx_dma_system_addrs[] = {
{
.pa_start = 0x4a056000,
.pa_end = 0x4a0560ff,
.pa_end = 0x4a056fff,
.flags = ADDR_TYPE_RT
},
};

View File

@ -196,11 +196,11 @@ static irqreturn_t omap3_l3_app_irq(int irq, void *_l3)
/* No timeout error for debug sources */
}
base = ((l3->rt) + (*(omap3_l3_bases[int_type] + err_source)));
/* identify the error source */
for (err_source = 0; !(status & (1 << err_source)); err_source++)
;
base = l3->rt + *(omap3_l3_bases[int_type] + err_source);
error = omap3_l3_readll(base, L3_ERROR_LOG);
if (error) {

View File

@ -89,6 +89,7 @@ static void omap2_init_processor_devices(void)
if (cpu_is_omap44xx()) {
_init_omap_device("l3_main_1", &l3_dev);
_init_omap_device("dsp", &dsp_dev);
_init_omap_device("iva", &iva_dev);
} else {
_init_omap_device("l3_main", &l3_dev);
}

View File

@ -114,7 +114,6 @@ static int __init _config_common_vdd_data(struct omap_vdd_info *vdd)
sys_clk_speed /= 1000;
/* Generic voltage parameters */
vdd->curr_volt = 1200000;
vdd->volt_scale = vp_forceupdate_scale_voltage;
vdd->vp_enabled = false;

View File

@ -711,7 +711,7 @@ static struct regulator_consumer_supply bq24022_consumers[] = {
static struct regulator_init_data bq24022_init_data = {
.constraints = {
.max_uA = 500000,
.valid_ops_mask = REGULATOR_CHANGE_CURRENT,
.valid_ops_mask = REGULATOR_CHANGE_CURRENT|REGULATOR_CHANGE_STATUS,
},
.num_consumer_supplies = ARRAY_SIZE(bq24022_consumers),
.consumer_supplies = bq24022_consumers,

View File

@ -599,7 +599,7 @@ static struct regulator_consumer_supply bq24022_consumers[] = {
static struct regulator_init_data bq24022_init_data = {
.constraints = {
.max_uA = 500000,
.valid_ops_mask = REGULATOR_CHANGE_CURRENT,
.valid_ops_mask = REGULATOR_CHANGE_CURRENT | REGULATOR_CHANGE_STATUS,
},
.num_consumer_supplies = ARRAY_SIZE(bq24022_consumers),
.consumer_supplies = bq24022_consumers,

View File

@ -395,7 +395,7 @@ ENTRY(xscale_dma_a0_map_area)
teq r2, #DMA_TO_DEVICE
beq xscale_dma_clean_range
b xscale_dma_flush_range
ENDPROC(xscsale_dma_a0_map_area)
ENDPROC(xscale_dma_a0_map_area)
/*
* dma_unmap_area(start, size, dir)

View File

@ -295,6 +295,12 @@ static int mxc_gpio_direction_output(struct gpio_chip *chip,
return 0;
}
/*
* This lock class tells lockdep that GPIO irqs are in a different
* category than their parents, so it won't report false recursion.
*/
static struct lock_class_key gpio_lock_class;
int __init mxc_gpio_init(struct mxc_gpio_port *port, int cnt)
{
int i, j;
@ -311,6 +317,7 @@ int __init mxc_gpio_init(struct mxc_gpio_port *port, int cnt)
__raw_writel(~0, port[i].base + GPIO_ISR);
for (j = port[i].virtual_irq_start;
j < port[i].virtual_irq_start + 32; j++) {
irq_set_lockdep_class(j, &gpio_lock_class);
irq_set_chip_and_handler(j, &gpio_irq_chip,
handle_level_irq);
set_irq_flags(j, IRQF_VALID);

View File

@ -124,6 +124,8 @@ imx_ssi_fiq_start:
1:
@ return from FIQ
subs pc, lr, #4
.align
imx_ssi_fiq_base:
.word 0x0
imx_ssi_fiq_rx_buffer:

View File

@ -300,6 +300,8 @@ void __init paging_init(void)
zones_size[ZONE_DMA] = m68k_memory[i].size >> PAGE_SHIFT;
free_area_init_node(i, zones_size,
m68k_memory[i].addr >> PAGE_SHIFT, NULL);
if (node_present_pages(i))
node_set_state(i, N_NORMAL_MEMORY);
}
}

View File

@ -266,8 +266,10 @@ static void __init setup_bootmem(void)
}
memset(pfnnid_map, 0xff, sizeof(pfnnid_map));
for (i = 0; i < npmem_ranges; i++)
for (i = 0; i < npmem_ranges; i++) {
node_set_state(i, N_NORMAL_MEMORY);
node_set_online(i);
}
#endif
/*

View File

@ -393,8 +393,8 @@ typedef struct fec {
uint fec_addr_low; /* lower 32 bits of station address */
ushort fec_addr_high; /* upper 16 bits of station address */
ushort res1; /* reserved */
uint fec_hash_table_high; /* upper 32-bits of hash table */
uint fec_hash_table_low; /* lower 32-bits of hash table */
uint fec_grp_hash_table_high; /* upper 32-bits of hash table */
uint fec_grp_hash_table_low; /* lower 32-bits of hash table */
uint fec_r_des_start; /* beginning of Rx descriptor ring */
uint fec_x_des_start; /* beginning of Tx descriptor ring */
uint fec_r_buff_size; /* Rx buffer size */

View File

@ -60,7 +60,7 @@
*
* Obviously, the GART is not cache coherent and so any change to it
* must be flushed to memory (or maybe just make the GART space non
* cachable). AGP memory itself does't seem to be cache coherent neither.
* cachable). AGP memory itself doesn't seem to be cache coherent neither.
*
* In order to invalidate the GART (which is probably necessary to inval
* the bridge internal TLBs), the following sequence has to be written,

View File

@ -76,7 +76,7 @@ static void prng_seed(int nbytes)
/* Add the entropy */
while (nbytes >= 8) {
*((__u64 *)parm_block) ^= *((__u64 *)buf+i);
*((__u64 *)parm_block) ^= *((__u64 *)(buf+i));
prng_add_entropy();
i += 8;
nbytes -= 8;

View File

@ -543,7 +543,6 @@ static void pfault_interrupt(unsigned int ext_int_code,
struct task_struct *tsk;
__u16 subcode;
kstat_cpu(smp_processor_id()).irqs[EXTINT_PFL]++;
/*
* Get the external interruption subcode & pfault
* initial/completion signal bit. VM stores this
@ -553,6 +552,7 @@ static void pfault_interrupt(unsigned int ext_int_code,
subcode = ext_int_code >> 16;
if ((subcode & 0xff00) != __SUBCODE_MASK)
return;
kstat_cpu(smp_processor_id()).irqs[EXTINT_PFL]++;
/*
* Get the token (= address of the task structure of the affected task).

View File

@ -47,7 +47,7 @@ config HOSTFS
config HPPFS
tristate "HoneyPot ProcFS (EXPERIMENTAL)"
depends on EXPERIMENTAL
depends on EXPERIMENTAL && PROC_FS
help
hppfs (HoneyPot ProcFS) is a filesystem which allows UML /proc
entries to be overridden, removed, or fabricated from the host.

View File

@ -49,7 +49,10 @@ static inline struct thread_info *current_thread_info(void)
{
struct thread_info *ti;
unsigned long mask = THREAD_SIZE - 1;
ti = (struct thread_info *) (((unsigned long) &ti) & ~mask);
void *p;
asm volatile ("" : "=r" (p) : "0" (&ti));
ti = (struct thread_info *) (((unsigned long)p) & ~mask);
return ti;
}

View File

@ -4,7 +4,7 @@
obj-y = bug.o bugs.o checksum.o delay.o fault.o ksyms.o ldt.o ptrace.o \
ptrace_user.o setjmp.o signal.o stub.o stub_segv.o syscalls.o sysrq.o \
sys_call_table.o tls.o
sys_call_table.o tls.o atomic64_cx8_32.o
obj-$(CONFIG_BINFMT_ELF) += elfcore.o

View File

@ -0,0 +1,225 @@
/*
* atomic64_t for 586+
*
* Copied from arch/x86/lib/atomic64_cx8_32.S
*
* Copyright © 2010 Luca Barbieri
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
*/
#include <linux/linkage.h>
#include <asm/alternative-asm.h>
#include <asm/dwarf2.h>
.macro SAVE reg
pushl_cfi %\reg
CFI_REL_OFFSET \reg, 0
.endm
.macro RESTORE reg
popl_cfi %\reg
CFI_RESTORE \reg
.endm
.macro read64 reg
movl %ebx, %eax
movl %ecx, %edx
/* we need LOCK_PREFIX since otherwise cmpxchg8b always does the write */
LOCK_PREFIX
cmpxchg8b (\reg)
.endm
ENTRY(atomic64_read_cx8)
CFI_STARTPROC
read64 %ecx
ret
CFI_ENDPROC
ENDPROC(atomic64_read_cx8)
ENTRY(atomic64_set_cx8)
CFI_STARTPROC
1:
/* we don't need LOCK_PREFIX since aligned 64-bit writes
* are atomic on 586 and newer */
cmpxchg8b (%esi)
jne 1b
ret
CFI_ENDPROC
ENDPROC(atomic64_set_cx8)
ENTRY(atomic64_xchg_cx8)
CFI_STARTPROC
movl %ebx, %eax
movl %ecx, %edx
1:
LOCK_PREFIX
cmpxchg8b (%esi)
jne 1b
ret
CFI_ENDPROC
ENDPROC(atomic64_xchg_cx8)
.macro addsub_return func ins insc
ENTRY(atomic64_\func\()_return_cx8)
CFI_STARTPROC
SAVE ebp
SAVE ebx
SAVE esi
SAVE edi
movl %eax, %esi
movl %edx, %edi
movl %ecx, %ebp
read64 %ebp
1:
movl %eax, %ebx
movl %edx, %ecx
\ins\()l %esi, %ebx
\insc\()l %edi, %ecx
LOCK_PREFIX
cmpxchg8b (%ebp)
jne 1b
10:
movl %ebx, %eax
movl %ecx, %edx
RESTORE edi
RESTORE esi
RESTORE ebx
RESTORE ebp
ret
CFI_ENDPROC
ENDPROC(atomic64_\func\()_return_cx8)
.endm
addsub_return add add adc
addsub_return sub sub sbb
.macro incdec_return func ins insc
ENTRY(atomic64_\func\()_return_cx8)
CFI_STARTPROC
SAVE ebx
read64 %esi
1:
movl %eax, %ebx
movl %edx, %ecx
\ins\()l $1, %ebx
\insc\()l $0, %ecx
LOCK_PREFIX
cmpxchg8b (%esi)
jne 1b
10:
movl %ebx, %eax
movl %ecx, %edx
RESTORE ebx
ret
CFI_ENDPROC
ENDPROC(atomic64_\func\()_return_cx8)
.endm
incdec_return inc add adc
incdec_return dec sub sbb
ENTRY(atomic64_dec_if_positive_cx8)
CFI_STARTPROC
SAVE ebx
read64 %esi
1:
movl %eax, %ebx
movl %edx, %ecx
subl $1, %ebx
sbb $0, %ecx
js 2f
LOCK_PREFIX
cmpxchg8b (%esi)
jne 1b
2:
movl %ebx, %eax
movl %ecx, %edx
RESTORE ebx
ret
CFI_ENDPROC
ENDPROC(atomic64_dec_if_positive_cx8)
ENTRY(atomic64_add_unless_cx8)
CFI_STARTPROC
SAVE ebp
SAVE ebx
/* these just push these two parameters on the stack */
SAVE edi
SAVE esi
movl %ecx, %ebp
movl %eax, %esi
movl %edx, %edi
read64 %ebp
1:
cmpl %eax, 0(%esp)
je 4f
2:
movl %eax, %ebx
movl %edx, %ecx
addl %esi, %ebx
adcl %edi, %ecx
LOCK_PREFIX
cmpxchg8b (%ebp)
jne 1b
movl $1, %eax
3:
addl $8, %esp
CFI_ADJUST_CFA_OFFSET -8
RESTORE ebx
RESTORE ebp
ret
4:
cmpl %edx, 4(%esp)
jne 2b
xorl %eax, %eax
jmp 3b
CFI_ENDPROC
ENDPROC(atomic64_add_unless_cx8)
ENTRY(atomic64_inc_not_zero_cx8)
CFI_STARTPROC
SAVE ebx
read64 %esi
1:
testl %eax, %eax
je 4f
2:
movl %eax, %ebx
movl %edx, %ecx
addl $1, %ebx
adcl $0, %ecx
LOCK_PREFIX
cmpxchg8b (%esi)
jne 1b
movl $1, %eax
3:
RESTORE ebx
ret
4:
testl %edx, %edx
jne 2b
jmp 3b
CFI_ENDPROC
ENDPROC(atomic64_inc_not_zero_cx8)

View File

@ -91,7 +91,7 @@ static int detect_memory_e801(void)
if (oreg.ax > 15*1024) {
return -1; /* Bogus! */
} else if (oreg.ax == 15*1024) {
boot_params.alt_mem_k = (oreg.dx << 6) + oreg.ax;
boot_params.alt_mem_k = (oreg.bx << 6) + oreg.ax;
} else {
/*
* This ignores memory above 16MB if we have a memory

View File

@ -150,7 +150,7 @@ void setup_IO_APIC_irq_extra(u32 gsi);
extern void ioapic_and_gsi_init(void);
extern void ioapic_insert_resources(void);
int io_apic_setup_irq_pin(unsigned int irq, int node, struct io_apic_irq_attr *attr);
int io_apic_setup_irq_pin_once(unsigned int irq, int node, struct io_apic_irq_attr *attr);
extern struct IO_APIC_route_entry **alloc_ioapic_entries(void);
extern void free_ioapic_entries(struct IO_APIC_route_entry **ioapic_entries);

View File

@ -128,8 +128,8 @@ static int __init parse_noapic(char *str)
}
early_param("noapic", parse_noapic);
static int io_apic_setup_irq_pin_once(unsigned int irq, int node,
struct io_apic_irq_attr *attr);
static int io_apic_setup_irq_pin(unsigned int irq, int node,
struct io_apic_irq_attr *attr);
/* Will be called in mpparse/acpi/sfi codes for saving IRQ info */
void mp_save_irq(struct mpc_intsrc *m)
@ -3570,7 +3570,7 @@ int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
}
#endif /* CONFIG_HT_IRQ */
int
static int
io_apic_setup_irq_pin(unsigned int irq, int node, struct io_apic_irq_attr *attr)
{
struct irq_cfg *cfg = alloc_irq_and_cfg_at(irq, node);
@ -3585,8 +3585,8 @@ io_apic_setup_irq_pin(unsigned int irq, int node, struct io_apic_irq_attr *attr)
return ret;
}
static int io_apic_setup_irq_pin_once(unsigned int irq, int node,
struct io_apic_irq_attr *attr)
int io_apic_setup_irq_pin_once(unsigned int irq, int node,
struct io_apic_irq_attr *attr)
{
unsigned int id = attr->ioapic, pin = attr->ioapic_pin;
int ret;

View File

@ -698,7 +698,7 @@ cpu_dev_register(amd_cpu_dev);
*/
const int amd_erratum_400[] =
AMD_OSVW_ERRATUM(1, AMD_MODEL_RANGE(0xf, 0x41, 0x2, 0xff, 0xf),
AMD_OSVW_ERRATUM(1, AMD_MODEL_RANGE(0x0f, 0x4, 0x2, 0xff, 0xf),
AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0xff, 0xf));
EXPORT_SYMBOL_GPL(amd_erratum_400);

View File

@ -613,8 +613,8 @@ static int x86_setup_perfctr(struct perf_event *event)
/*
* Branch tracing:
*/
if ((attr->config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS) &&
(hwc->sample_period == 1)) {
if (attr->config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS &&
!attr->freq && hwc->sample_period == 1) {
/* BTS is not supported by this architecture. */
if (!x86_pmu.bts_active)
return -EOPNOTSUPP;
@ -1288,6 +1288,16 @@ static int x86_pmu_handle_irq(struct pt_regs *regs)
cpuc = &__get_cpu_var(cpu_hw_events);
/*
* Some chipsets need to unmask the LVTPC in a particular spot
* inside the nmi handler. As a result, the unmasking was pushed
* into all the nmi handlers.
*
* This generic handler doesn't seem to have any issues where the
* unmasking occurs so it was left at the top.
*/
apic_write(APIC_LVTPC, APIC_DM_NMI);
for (idx = 0; idx < x86_pmu.num_counters; idx++) {
if (!test_bit(idx, cpuc->active_mask)) {
/*
@ -1374,8 +1384,6 @@ perf_event_nmi_handler(struct notifier_block *self,
return NOTIFY_DONE;
}
apic_write(APIC_LVTPC, APIC_DM_NMI);
handled = x86_pmu.handle_irq(args->regs);
if (!handled)
return NOTIFY_DONE;

View File

@ -25,7 +25,7 @@ struct intel_percore {
/*
* Intel PerfMon, used on Core and later.
*/
static const u64 intel_perfmon_event_map[] =
static u64 intel_perfmon_event_map[PERF_COUNT_HW_MAX] __read_mostly =
{
[PERF_COUNT_HW_CPU_CYCLES] = 0x003c,
[PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
@ -933,6 +933,16 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
cpuc = &__get_cpu_var(cpu_hw_events);
/*
* Some chipsets need to unmask the LVTPC in a particular spot
* inside the nmi handler. As a result, the unmasking was pushed
* into all the nmi handlers.
*
* This handler doesn't seem to have any issues with the unmasking
* so it was left at the top.
*/
apic_write(APIC_LVTPC, APIC_DM_NMI);
intel_pmu_disable_all();
handled = intel_pmu_drain_bts_buffer();
status = intel_pmu_get_status();
@ -998,6 +1008,9 @@ intel_bts_constraints(struct perf_event *event)
struct hw_perf_event *hwc = &event->hw;
unsigned int hw_event, bts_event;
if (event->attr.freq)
return NULL;
hw_event = hwc->config & INTEL_ARCH_EVENT_MASK;
bts_event = x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS);
@ -1305,7 +1318,7 @@ static void intel_clovertown_quirks(void)
* AJ106 could possibly be worked around by not allowing LBR
* usage from PEBS, including the fixup.
* AJ68 could possibly be worked around by always programming
* a pebs_event_reset[0] value and coping with the lost events.
* a pebs_event_reset[0] value and coping with the lost events.
*
* But taken together it might just make sense to not enable PEBS on
* these chips.
@ -1409,6 +1422,18 @@ static __init int intel_pmu_init(void)
x86_pmu.percore_constraints = intel_nehalem_percore_constraints;
x86_pmu.enable_all = intel_pmu_nhm_enable_all;
x86_pmu.extra_regs = intel_nehalem_extra_regs;
if (ebx & 0x40) {
/*
* Erratum AAJ80 detected, we work it around by using
* the BR_MISP_EXEC.ANY event. This will over-count
* branch-misses, but it's still much better than the
* architectural event which is often completely bogus:
*/
intel_perfmon_event_map[PERF_COUNT_HW_BRANCH_MISSES] = 0x7f89;
pr_cont("erratum AAJ80 worked around, ");
}
pr_cont("Nehalem events, ");
break;

View File

@ -950,11 +950,20 @@ static int p4_pmu_handle_irq(struct pt_regs *regs)
x86_pmu_stop(event, 0);
}
if (handled) {
/* p4 quirk: unmask it again */
apic_write(APIC_LVTPC, apic_read(APIC_LVTPC) & ~APIC_LVT_MASKED);
if (handled)
inc_irq_stat(apic_perf_irqs);
}
/*
* When dealing with the unmasking of the LVTPC on P4 perf hw, it has
* been observed that the OVF bit flag has to be cleared first _before_
* the LVTPC can be unmasked.
*
* The reason is the NMI line will continue to be asserted while the OVF
* bit is set. This causes a second NMI to generate if the LVTPC is
* unmasked before the OVF bit is cleared, leading to unknown NMI
* messages.
*/
apic_write(APIC_LVTPC, APIC_DM_NMI);
return handled;
}

View File

@ -391,7 +391,7 @@ static int ioapic_xlate(struct irq_domain *id, const u32 *intspec, u32 intsize,
set_io_apic_irq_attr(&attr, idx, line, it->trigger, it->polarity);
return io_apic_setup_irq_pin(*out_hwirq, cpu_to_node(0), &attr);
return io_apic_setup_irq_pin_once(*out_hwirq, cpu_to_node(0), &attr);
}
static void __init ioapic_add_ofnode(struct device_node *np)

View File

@ -21,26 +21,26 @@ r_base = .
/* Get our own relocated address */
call 1f
1: popl %ebx
subl $1b, %ebx
subl $(1b - r_base), %ebx
/* Compute the equivalent real-mode segment */
movl %ebx, %ecx
shrl $4, %ecx
/* Patch post-real-mode segment jump */
movw dispatch_table(%ebx,%eax,2),%ax
movw %ax, 101f(%ebx)
movw %cx, 102f(%ebx)
movw (dispatch_table - r_base)(%ebx,%eax,2),%ax
movw %ax, (101f - r_base)(%ebx)
movw %cx, (102f - r_base)(%ebx)
/* Set up the IDT for real mode. */
lidtl machine_real_restart_idt(%ebx)
lidtl (machine_real_restart_idt - r_base)(%ebx)
/*
* Set up a GDT from which we can load segment descriptors for real
* mode. The GDT is not used in real mode; it is just needed here to
* prepare the descriptors.
*/
lgdtl machine_real_restart_gdt(%ebx)
lgdtl (machine_real_restart_gdt - r_base)(%ebx)
/*
* Load the data segment registers with 16-bit compatible values

View File

@ -306,7 +306,7 @@ int __init numa_cleanup_meminfo(struct numa_meminfo *mi)
bi->end = min(bi->end, high);
/* and there's no empty block */
if (bi->start == bi->end) {
if (bi->start >= bi->end) {
numa_remove_memblk_from(i--, mi);
continue;
}

View File

@ -347,7 +347,7 @@ usb@d,0 {
"pciclass0c03";
reg = <0x16800 0x0 0x0 0x0 0x0>;
interrupts = <22 3>;
interrupts = <22 1>;
};
usb@d,1 {
@ -357,7 +357,7 @@ usb@d,1 {
"pciclass0c03";
reg = <0x16900 0x0 0x0 0x0 0x0>;
interrupts = <22 3>;
interrupts = <22 1>;
};
sata@e,0 {
@ -367,7 +367,7 @@ sata@e,0 {
"pciclass0106";
reg = <0x17000 0x0 0x0 0x0 0x0>;
interrupts = <23 3>;
interrupts = <23 1>;
};
flash@f,0 {

View File

@ -1463,6 +1463,119 @@ static int xen_pgd_alloc(struct mm_struct *mm)
return ret;
}
#ifdef CONFIG_X86_64
static __initdata u64 __last_pgt_set_rw = 0;
static __initdata u64 __pgt_buf_start = 0;
static __initdata u64 __pgt_buf_end = 0;
static __initdata u64 __pgt_buf_top = 0;
/*
* As a consequence of the commit:
*
* commit 4b239f458c229de044d6905c2b0f9fe16ed9e01e
* Author: Yinghai Lu <yinghai@kernel.org>
* Date: Fri Dec 17 16:58:28 2010 -0800
*
* x86-64, mm: Put early page table high
*
* at some point init_memory_mapping is going to reach the pagetable pages
* area and map those pages too (mapping them as normal memory that falls
* in the range of addresses passed to init_memory_mapping as argument).
* Some of those pages are already pagetable pages (they are in the range
* pgt_buf_start-pgt_buf_end) therefore they are going to be mapped RO and
* everything is fine.
* Some of these pages are not pagetable pages yet (they fall in the range
* pgt_buf_end-pgt_buf_top; for example the page at pgt_buf_end) so they
* are going to be mapped RW. When these pages become pagetable pages and
* are hooked into the pagetable, xen will find that the guest has already
* a RW mapping of them somewhere and fail the operation.
* The reason Xen requires pagetables to be RO is that the hypervisor needs
* to verify that the pagetables are valid before using them. The validation
* operations are called "pinning".
*
* In order to fix the issue we mark all the pages in the entire range
* pgt_buf_start-pgt_buf_top as RO, however when the pagetable allocation
* is completed only the range pgt_buf_start-pgt_buf_end is reserved by
* init_memory_mapping. Hence the kernel is going to crash as soon as one
* of the pages in the range pgt_buf_end-pgt_buf_top is reused (b/c those
* ranges are RO).
*
* For this reason, 'mark_rw_past_pgt' is introduced which is called _after_
* the init_memory_mapping has completed (in a perfect world we would
* call this function from init_memory_mapping, but lets ignore that).
*
* Because we are called _after_ init_memory_mapping the pgt_buf_[start,
* end,top] have all changed to new values (b/c init_memory_mapping
* is called and setting up another new page-table). Hence, the first time
* we enter this function, we save away the pgt_buf_start value and update
* the pgt_buf_[end,top].
*
* When we detect that the "old" pgt_buf_start through pgt_buf_end
* PFNs have been reserved (so memblock_x86_reserve_range has been called),
* we immediately set out to RW the "old" pgt_buf_end through pgt_buf_top.
*
* And then we update those "old" pgt_buf_[end|top] with the new ones
* so that we can redo this on the next pagetable.
*/
static __init void mark_rw_past_pgt(void) {
if (pgt_buf_end > pgt_buf_start) {
u64 addr, size;
/* Save it away. */
if (!__pgt_buf_start) {
__pgt_buf_start = pgt_buf_start;
__pgt_buf_end = pgt_buf_end;
__pgt_buf_top = pgt_buf_top;
return;
}
/* If we get the range that starts at __pgt_buf_end that means
* the range is reserved, and that in 'init_memory_mapping'
* the 'memblock_x86_reserve_range' has been called with the
* outdated __pgt_buf_start, __pgt_buf_end (the "new"
* pgt_buf_[start|end|top] refer now to a new pagetable.
* Note: we are called _after_ the pgt_buf_[..] have been
* updated.*/
addr = memblock_x86_find_in_range_size(PFN_PHYS(__pgt_buf_start),
&size, PAGE_SIZE);
/* Still not reserved, meaning 'memblock_x86_reserve_range'
* hasn't been called yet. Update the _end and _top.*/
if (addr == PFN_PHYS(__pgt_buf_start)) {
__pgt_buf_end = pgt_buf_end;
__pgt_buf_top = pgt_buf_top;
return;
}
/* OK, the area is reserved, meaning it is time for us to
* set RW for the old end->top PFNs. */
/* ..unless we had already done this. */
if (__pgt_buf_end == __last_pgt_set_rw)
return;
addr = PFN_PHYS(__pgt_buf_end);
/* set as RW the rest */
printk(KERN_DEBUG "xen: setting RW the range %llx - %llx\n",
PFN_PHYS(__pgt_buf_end), PFN_PHYS(__pgt_buf_top));
while (addr < PFN_PHYS(__pgt_buf_top)) {
make_lowmem_page_readwrite(__va(addr));
addr += PAGE_SIZE;
}
/* And update everything so that we are ready for the next
* pagetable (the one created for regions past 4GB) */
__last_pgt_set_rw = __pgt_buf_end;
__pgt_buf_start = pgt_buf_start;
__pgt_buf_end = pgt_buf_end;
__pgt_buf_top = pgt_buf_top;
}
return;
}
#else
static __init void mark_rw_past_pgt(void) { }
#endif
static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
#ifdef CONFIG_X86_64
@ -1488,6 +1601,14 @@ static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte)
{
unsigned long pfn = pte_pfn(pte);
/*
* A bit of optimization. We do not need to call the workaround
* when xen_set_pte_init is called with a PTE with 0 as PFN.
* That is b/c the pagetable at that point are just being populated
* with empty values and we can save some cycles by not calling
* the 'memblock' code.*/
if (pfn)
mark_rw_past_pgt();
/*
* If the new pfn is within the range of the newly allocated
* kernel pagetable, and it isn't being mapped into an
@ -1495,7 +1616,7 @@ static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte)
* it is RO.
*/
if (((!is_early_ioremap_ptep(ptep) &&
pfn >= pgt_buf_start && pfn < pgt_buf_end)) ||
pfn >= pgt_buf_start && pfn < pgt_buf_top)) ||
(is_early_ioremap_ptep(ptep) && pfn != (pgt_buf_end - 1)))
pte = pte_wrprotect(pte);
@ -1997,6 +2118,8 @@ __init void xen_ident_map_ISA(void)
static __init void xen_post_allocator_init(void)
{
mark_rw_past_pgt();
#ifdef CONFIG_XEN_DEBUG
pv_mmu_ops.make_pte = PV_CALLEE_SAVE(xen_make_pte_debug);
#endif

View File

@ -943,6 +943,10 @@ static int acpi_bus_get_flags(struct acpi_device *device)
if (ACPI_SUCCESS(status))
device->flags.lockable = 1;
/* Power resources cannot be power manageable. */
if (device->device_type == ACPI_BUS_TYPE_POWER)
return 0;
/* Presence of _PS0|_PR0 indicates 'power manageable' */
status = acpi_get_handle(device->handle, "_PS0", &temp);
if (ACPI_FAILURE(status))

View File

@ -63,6 +63,7 @@ void device_pm_init(struct device *dev)
dev->power.wakeup = NULL;
spin_lock_init(&dev->power.lock);
pm_runtime_init(dev);
INIT_LIST_HEAD(&dev->power.entry);
}
/**

View File

@ -258,7 +258,7 @@ void device_set_wakeup_capable(struct device *dev, bool capable)
if (!!dev->power.can_wakeup == !!capable)
return;
if (device_is_registered(dev)) {
if (device_is_registered(dev) && !list_empty(&dev->power.entry)) {
if (capable) {
if (wakeup_sysfs_add(dev))
return;

View File

@ -903,6 +903,9 @@ static struct pci_device_id agp_intel_pci_table[] = {
ID(PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB),
ID(PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB),
ID(PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB),
ID(PCI_DEVICE_ID_INTEL_IVYBRIDGE_HB),
ID(PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_HB),
ID(PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_HB),
{ }
};

View File

@ -225,6 +225,14 @@
#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_PLUS_IG 0x0126
#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB 0x0108 /* Server */
#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG 0x010A
#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_HB 0x0150 /* Desktop */
#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_GT1_IG 0x0152
#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_GT2_IG 0x0162
#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_HB 0x0154 /* Mobile */
#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_GT1_IG 0x0156
#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_GT2_IG 0x0166
#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_HB 0x0158 /* Server */
#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT1_IG 0x015A
int intel_gmch_probe(struct pci_dev *pdev,
struct agp_bridge_data *bridge);

View File

@ -1420,6 +1420,16 @@ static const struct intel_gtt_driver_description {
"Sandybridge", &sandybridge_gtt_driver },
{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG,
"Sandybridge", &sandybridge_gtt_driver },
{ PCI_DEVICE_ID_INTEL_IVYBRIDGE_GT1_IG,
"Ivybridge", &sandybridge_gtt_driver },
{ PCI_DEVICE_ID_INTEL_IVYBRIDGE_GT2_IG,
"Ivybridge", &sandybridge_gtt_driver },
{ PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_GT1_IG,
"Ivybridge", &sandybridge_gtt_driver },
{ PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_GT2_IG,
"Ivybridge", &sandybridge_gtt_driver },
{ PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT1_IG,
"Ivybridge", &sandybridge_gtt_driver },
{ 0, NULL, NULL }
};

View File

@ -32,10 +32,9 @@ static DEFINE_MUTEX(clocks_mutex);
* Then we take the most specific entry - with the following
* order of precedence: dev+con > dev only > con only.
*/
static struct clk *clk_find(const char *dev_id, const char *con_id)
static struct clk_lookup *clk_find(const char *dev_id, const char *con_id)
{
struct clk_lookup *p;
struct clk *clk = NULL;
struct clk_lookup *p, *cl = NULL;
int match, best = 0;
list_for_each_entry(p, &clocks, node) {
@ -52,27 +51,27 @@ static struct clk *clk_find(const char *dev_id, const char *con_id)
}
if (match > best) {
clk = p->clk;
cl = p;
if (match != 3)
best = match;
else
break;
}
}
return clk;
return cl;
}
struct clk *clk_get_sys(const char *dev_id, const char *con_id)
{
struct clk *clk;
struct clk_lookup *cl;
mutex_lock(&clocks_mutex);
clk = clk_find(dev_id, con_id);
if (clk && !__clk_get(clk))
clk = NULL;
cl = clk_find(dev_id, con_id);
if (cl && !__clk_get(cl->clk))
cl = NULL;
mutex_unlock(&clocks_mutex);
return clk ? clk : ERR_PTR(-ENOENT);
return cl ? cl->clk : ERR_PTR(-ENOENT);
}
EXPORT_SYMBOL(clk_get_sys);

View File

@ -933,11 +933,34 @@ EXPORT_SYMBOL(drm_vblank_put);
void drm_vblank_off(struct drm_device *dev, int crtc)
{
struct drm_pending_vblank_event *e, *t;
struct timeval now;
unsigned long irqflags;
unsigned int seq;
spin_lock_irqsave(&dev->vbl_lock, irqflags);
vblank_disable_and_save(dev, crtc);
DRM_WAKEUP(&dev->vbl_queue[crtc]);
/* Send any queued vblank events, lest the natives grow disquiet */
seq = drm_vblank_count_and_time(dev, crtc, &now);
list_for_each_entry_safe(e, t, &dev->vblank_event_list, base.link) {
if (e->pipe != crtc)
continue;
DRM_DEBUG("Sending premature vblank event on disable: \
wanted %d, current %d\n",
e->event.sequence, seq);
e->event.sequence = seq;
e->event.tv_sec = now.tv_sec;
e->event.tv_usec = now.tv_usec;
drm_vblank_put(dev, e->pipe);
list_move_tail(&e->base.link, &e->base.file_priv->event_list);
wake_up_interruptible(&e->base.file_priv->event_wait);
trace_drm_vblank_event_delivered(e->base.pid, e->pipe,
e->event.sequence);
}
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
}
EXPORT_SYMBOL(drm_vblank_off);

View File

@ -106,11 +106,12 @@ static const char *get_tiling_flag(struct drm_i915_gem_object *obj)
}
}
static const char *agp_type_str(int type)
static const char *cache_level_str(int type)
{
switch (type) {
case 0: return " uncached";
case 1: return " snooped";
case I915_CACHE_NONE: return " uncached";
case I915_CACHE_LLC: return " snooped (LLC)";
case I915_CACHE_LLC_MLC: return " snooped (LLC+MLC)";
default: return "";
}
}
@ -127,7 +128,7 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
obj->base.write_domain,
obj->last_rendering_seqno,
obj->last_fenced_seqno,
agp_type_str(obj->agp_type == AGP_USER_CACHED_MEMORY),
cache_level_str(obj->cache_level),
obj->dirty ? " dirty" : "",
obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
if (obj->base.name)
@ -714,7 +715,7 @@ static void print_error_buffers(struct seq_file *m,
dirty_flag(err->dirty),
purgeable_flag(err->purgeable),
ring_str(err->ring),
agp_type_str(err->agp_type));
cache_level_str(err->cache_level));
if (err->name)
seq_printf(m, " (name: %d)", err->name);
@ -852,6 +853,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
int ret;
if (IS_GEN5(dev)) {
u16 rgvswctl = I915_READ16(MEMSWCTL);
@ -873,7 +875,11 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
int max_freq;
/* RPSTAT1 is in the GT power well */
__gen6_gt_force_wake_get(dev_priv);
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
gen6_gt_force_wake_get(dev_priv);
rpstat = I915_READ(GEN6_RPSTAT1);
rpupei = I915_READ(GEN6_RP_CUR_UP_EI);
@ -883,6 +889,9 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
rpcurdown = I915_READ(GEN6_RP_CUR_DOWN);
rpprevdown = I915_READ(GEN6_RP_PREV_DOWN);
gen6_gt_force_wake_put(dev_priv);
mutex_unlock(&dev->struct_mutex);
seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
seq_printf(m, "Render p-state ratio: %d\n",
@ -917,8 +926,6 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
max_freq = rp_state_cap & 0xff;
seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
max_freq * 50);
__gen6_gt_force_wake_put(dev_priv);
} else {
seq_printf(m, "no P-state info available\n");
}
@ -1186,6 +1193,42 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
return 0;
}
static int i915_context_status(struct seq_file *m, void *unused)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
int ret;
ret = mutex_lock_interruptible(&dev->mode_config.mutex);
if (ret)
return ret;
seq_printf(m, "power context ");
describe_obj(m, dev_priv->pwrctx);
seq_printf(m, "\n");
seq_printf(m, "render context ");
describe_obj(m, dev_priv->renderctx);
seq_printf(m, "\n");
mutex_unlock(&dev->mode_config.mutex);
return 0;
}
static int i915_gen6_forcewake_count_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
seq_printf(m, "forcewake count = %d\n",
atomic_read(&dev_priv->forcewake_count));
return 0;
}
static int
i915_wedged_open(struct inode *inode,
struct file *filp)
@ -1288,6 +1331,67 @@ static int i915_wedged_create(struct dentry *root, struct drm_minor *minor)
return drm_add_fake_info_node(minor, ent, &i915_wedged_fops);
}
static int i915_forcewake_open(struct inode *inode, struct file *file)
{
struct drm_device *dev = inode->i_private;
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
if (!IS_GEN6(dev))
return 0;
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
gen6_gt_force_wake_get(dev_priv);
mutex_unlock(&dev->struct_mutex);
return 0;
}
int i915_forcewake_release(struct inode *inode, struct file *file)
{
struct drm_device *dev = inode->i_private;
struct drm_i915_private *dev_priv = dev->dev_private;
if (!IS_GEN6(dev))
return 0;
/*
* It's bad that we can potentially hang userspace if struct_mutex gets
* forever stuck. However, if we cannot acquire this lock it means that
* almost certainly the driver has hung, is not unload-able. Therefore
* hanging here is probably a minor inconvenience not to be seen my
* almost every user.
*/
mutex_lock(&dev->struct_mutex);
gen6_gt_force_wake_put(dev_priv);
mutex_unlock(&dev->struct_mutex);
return 0;
}
static const struct file_operations i915_forcewake_fops = {
.owner = THIS_MODULE,
.open = i915_forcewake_open,
.release = i915_forcewake_release,
};
static int i915_forcewake_create(struct dentry *root, struct drm_minor *minor)
{
struct drm_device *dev = minor->dev;
struct dentry *ent;
ent = debugfs_create_file("i915_forcewake_user",
S_IRUSR,
root, dev,
&i915_forcewake_fops);
if (IS_ERR(ent))
return PTR_ERR(ent);
return drm_add_fake_info_node(minor, ent, &i915_forcewake_fops);
}
static struct drm_info_list i915_debugfs_list[] = {
{"i915_capabilities", i915_capabilities, 0},
{"i915_gem_objects", i915_gem_object_info, 0},
@ -1324,6 +1428,8 @@ static struct drm_info_list i915_debugfs_list[] = {
{"i915_sr_status", i915_sr_status, 0},
{"i915_opregion", i915_opregion, 0},
{"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
{"i915_context_status", i915_context_status, 0},
{"i915_gen6_forcewake_count", i915_gen6_forcewake_count_info, 0},
};
#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
@ -1335,6 +1441,10 @@ int i915_debugfs_init(struct drm_minor *minor)
if (ret)
return ret;
ret = i915_forcewake_create(minor->debugfs_root, minor);
if (ret)
return ret;
return drm_debugfs_create_files(i915_debugfs_list,
I915_DEBUGFS_ENTRIES,
minor->debugfs_root, minor);
@ -1344,6 +1454,8 @@ void i915_debugfs_cleanup(struct drm_minor *minor)
{
drm_debugfs_remove_files(i915_debugfs_list,
I915_DEBUGFS_ENTRIES, minor);
drm_debugfs_remove_files((struct drm_info_list *) &i915_forcewake_fops,
1, minor);
drm_debugfs_remove_files((struct drm_info_list *) &i915_wedged_fops,
1, minor);
}

View File

@ -571,7 +571,7 @@ static int i915_quiescent(struct drm_device *dev)
struct intel_ring_buffer *ring = LP_RING(dev->dev_private);
i915_kernel_lost_context(dev);
return intel_wait_ring_buffer(ring, ring->size - 8);
return intel_wait_ring_idle(ring);
}
static int i915_flush_ioctl(struct drm_device *dev, void *data,
@ -1176,11 +1176,11 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
return can_switch;
}
static int i915_load_modeset_init(struct drm_device *dev)
static int i915_load_gem_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long prealloc_size, gtt_size, mappable_size;
int ret = 0;
int ret;
prealloc_size = dev_priv->mm.gtt->stolen_size;
gtt_size = dev_priv->mm.gtt->gtt_total_entries << PAGE_SHIFT;
@ -1204,7 +1204,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
ret = i915_gem_init_ringbuffer(dev);
mutex_unlock(&dev->struct_mutex);
if (ret)
goto out;
return ret;
/* Try to set up FBC with a reasonable compressed buffer size */
if (I915_HAS_FBC(dev) && i915_powersave) {
@ -1222,6 +1222,13 @@ static int i915_load_modeset_init(struct drm_device *dev)
/* Allow hardware batchbuffers unless told otherwise. */
dev_priv->allow_batchbuffer = 1;
return 0;
}
static int i915_load_modeset_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
ret = intel_parse_bios(dev);
if (ret)
@ -1236,7 +1243,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
*/
ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode);
if (ret && ret != -ENODEV)
goto cleanup_ringbuffer;
goto out;
intel_register_dsm_handler();
@ -1253,10 +1260,40 @@ static int i915_load_modeset_init(struct drm_device *dev)
intel_modeset_init(dev);
ret = drm_irq_install(dev);
ret = i915_load_gem_init(dev);
if (ret)
goto cleanup_vga_switcheroo;
intel_modeset_gem_init(dev);
if (IS_IVYBRIDGE(dev)) {
/* Share pre & uninstall handlers with ILK/SNB */
dev->driver->irq_handler = ivybridge_irq_handler;
dev->driver->irq_preinstall = ironlake_irq_preinstall;
dev->driver->irq_postinstall = ivybridge_irq_postinstall;
dev->driver->irq_uninstall = ironlake_irq_uninstall;
dev->driver->enable_vblank = ivybridge_enable_vblank;
dev->driver->disable_vblank = ivybridge_disable_vblank;
} else if (HAS_PCH_SPLIT(dev)) {
dev->driver->irq_handler = ironlake_irq_handler;
dev->driver->irq_preinstall = ironlake_irq_preinstall;
dev->driver->irq_postinstall = ironlake_irq_postinstall;
dev->driver->irq_uninstall = ironlake_irq_uninstall;
dev->driver->enable_vblank = ironlake_enable_vblank;
dev->driver->disable_vblank = ironlake_disable_vblank;
} else {
dev->driver->irq_preinstall = i915_driver_irq_preinstall;
dev->driver->irq_postinstall = i915_driver_irq_postinstall;
dev->driver->irq_uninstall = i915_driver_irq_uninstall;
dev->driver->irq_handler = i915_driver_irq_handler;
dev->driver->enable_vblank = i915_enable_vblank;
dev->driver->disable_vblank = i915_disable_vblank;
}
ret = drm_irq_install(dev);
if (ret)
goto cleanup_gem;
/* Always safe in the mode setting case. */
/* FIXME: do pre/post-mode set stuff in core KMS code */
dev->vblank_disable_allowed = 1;
@ -1274,14 +1311,14 @@ static int i915_load_modeset_init(struct drm_device *dev)
cleanup_irq:
drm_irq_uninstall(dev);
cleanup_gem:
mutex_lock(&dev->struct_mutex);
i915_gem_cleanup_ringbuffer(dev);
mutex_unlock(&dev->struct_mutex);
cleanup_vga_switcheroo:
vga_switcheroo_unregister_client(dev->pdev);
cleanup_vga_client:
vga_client_register(dev->pdev, NULL, NULL, NULL);
cleanup_ringbuffer:
mutex_lock(&dev->struct_mutex);
i915_gem_cleanup_ringbuffer(dev);
mutex_unlock(&dev->struct_mutex);
out:
return ret;
}
@ -1982,7 +2019,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
dev->driver->get_vblank_counter = i915_get_vblank_counter;
dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
if (IS_G4X(dev) || IS_GEN5(dev) || IS_GEN6(dev)) {
if (IS_G4X(dev) || IS_GEN5(dev) || IS_GEN6(dev) || IS_IVYBRIDGE(dev)) {
dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
dev->driver->get_vblank_counter = gm45_get_vblank_counter;
}
@ -2025,6 +2062,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
spin_lock_init(&dev_priv->irq_lock);
spin_lock_init(&dev_priv->error_lock);
spin_lock_init(&dev_priv->rps_lock);
if (IS_MOBILE(dev) || !IS_GEN2(dev))
dev_priv->num_pipe = 2;

View File

@ -188,6 +188,21 @@ static const struct intel_device_info intel_sandybridge_m_info = {
.has_blt_ring = 1,
};
static const struct intel_device_info intel_ivybridge_d_info = {
.is_ivybridge = 1, .gen = 7,
.need_gfx_hws = 1, .has_hotplug = 1,
.has_bsd_ring = 1,
.has_blt_ring = 1,
};
static const struct intel_device_info intel_ivybridge_m_info = {
.is_ivybridge = 1, .gen = 7, .is_mobile = 1,
.need_gfx_hws = 1, .has_hotplug = 1,
.has_fbc = 0, /* FBC is not enabled on Ivybridge mobile yet */
.has_bsd_ring = 1,
.has_blt_ring = 1,
};
static const struct pci_device_id pciidlist[] = { /* aka */
INTEL_VGA_DEVICE(0x3577, &intel_i830_info), /* I830_M */
INTEL_VGA_DEVICE(0x2562, &intel_845g_info), /* 845_G */
@ -227,6 +242,11 @@ static const struct pci_device_id pciidlist[] = { /* aka */
INTEL_VGA_DEVICE(0x0116, &intel_sandybridge_m_info),
INTEL_VGA_DEVICE(0x0126, &intel_sandybridge_m_info),
INTEL_VGA_DEVICE(0x010A, &intel_sandybridge_d_info),
INTEL_VGA_DEVICE(0x0156, &intel_ivybridge_m_info), /* GT1 mobile */
INTEL_VGA_DEVICE(0x0166, &intel_ivybridge_m_info), /* GT2 mobile */
INTEL_VGA_DEVICE(0x0152, &intel_ivybridge_d_info), /* GT1 desktop */
INTEL_VGA_DEVICE(0x0162, &intel_ivybridge_d_info), /* GT2 desktop */
INTEL_VGA_DEVICE(0x015a, &intel_ivybridge_d_info), /* GT1 server */
{0, 0, 0}
};
@ -235,7 +255,9 @@ MODULE_DEVICE_TABLE(pci, pciidlist);
#endif
#define INTEL_PCH_DEVICE_ID_MASK 0xff00
#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00
#define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00
void intel_detect_pch (struct drm_device *dev)
{
@ -254,16 +276,23 @@ void intel_detect_pch (struct drm_device *dev)
int id;
id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_IBX;
DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
} else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_CPT;
DRM_DEBUG_KMS("Found CougarPoint PCH\n");
} else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
/* PantherPoint is CPT compatible */
dev_priv->pch_type = PCH_CPT;
DRM_DEBUG_KMS("Found PatherPoint PCH\n");
}
}
pci_dev_put(pch);
}
}
void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
{
int count;
@ -279,12 +308,38 @@ void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
udelay(10);
}
void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
/*
* Generally this is called implicitly by the register read function. However,
* if some sequence requires the GT to not power down then this function should
* be called at the beginning of the sequence followed by a call to
* gen6_gt_force_wake_put() at the end of the sequence.
*/
void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
{
WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
/* Forcewake is atomic in case we get in here without the lock */
if (atomic_add_return(1, &dev_priv->forcewake_count) == 1)
__gen6_gt_force_wake_get(dev_priv);
}
static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
{
I915_WRITE_NOTRACE(FORCEWAKE, 0);
POSTING_READ(FORCEWAKE);
}
/*
* see gen6_gt_force_wake_get()
*/
void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
{
WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
if (atomic_dec_and_test(&dev_priv->forcewake_count))
__gen6_gt_force_wake_put(dev_priv);
}
void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
{
int loop = 500;

View File

@ -188,7 +188,7 @@ struct drm_i915_error_state {
u32 dirty:1;
u32 purgeable:1;
u32 ring:4;
u32 agp_type:1;
u32 cache_level:2;
} *active_bo, *pinned_bo;
u32 active_bo_count, pinned_bo_count;
struct intel_overlay_error_state *overlay;
@ -203,12 +203,19 @@ struct drm_i915_display_funcs {
int (*get_display_clock_speed)(struct drm_device *dev);
int (*get_fifo_size)(struct drm_device *dev, int plane);
void (*update_wm)(struct drm_device *dev);
int (*crtc_mode_set)(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode,
int x, int y,
struct drm_framebuffer *old_fb);
void (*fdi_link_train)(struct drm_crtc *crtc);
void (*init_clock_gating)(struct drm_device *dev);
void (*init_pch_clock_gating)(struct drm_device *dev);
/* clock updates for mode set */
/* cursor updates */
/* render clock increase/decrease */
/* display clock increase/decrease */
/* pll clock increase/decrease */
/* clock gating init */
};
struct intel_device_info {
@ -223,6 +230,7 @@ struct intel_device_info {
u8 is_pineview : 1;
u8 is_broadwater : 1;
u8 is_crestline : 1;
u8 is_ivybridge : 1;
u8 has_fbc : 1;
u8 has_pipe_cxsr : 1;
u8 has_hotplug : 1;
@ -676,6 +684,10 @@ typedef struct drm_i915_private {
bool mchbar_need_disable;
struct work_struct rps_work;
spinlock_t rps_lock;
u32 pm_iir;
u8 cur_delay;
u8 min_delay;
u8 max_delay;
@ -703,8 +715,16 @@ typedef struct drm_i915_private {
struct intel_fbdev *fbdev;
struct drm_property *broadcast_rgb_property;
atomic_t forcewake_count;
} drm_i915_private_t;
enum i915_cache_level {
I915_CACHE_NONE,
I915_CACHE_LLC,
I915_CACHE_LLC_MLC, /* gen6+ */
};
struct drm_i915_gem_object {
struct drm_gem_object base;
@ -791,6 +811,8 @@ struct drm_i915_gem_object {
unsigned int pending_fenced_gpu_access:1;
unsigned int fenced_gpu_access:1;
unsigned int cache_level:2;
struct page **pages;
/**
@ -827,8 +849,6 @@ struct drm_i915_gem_object {
/** Record of address bit 17 of each page at last unbind. */
unsigned long *bit_17;
/** AGP mapping type (AGP_USER_MEMORY or AGP_USER_CACHED_MEMORY */
uint32_t agp_type;
/**
* If present, while GEM_DOMAIN_CPU is in the read domain this array
@ -915,13 +935,21 @@ enum intel_chip_family {
#define IS_G33(dev) (INTEL_INFO(dev)->is_g33)
#define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042)
#define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046)
#define IS_IVYBRIDGE(dev) (INTEL_INFO(dev)->is_ivybridge)
#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
/*
* The genX designation typically refers to the render engine, so render
* capability related checks should use IS_GEN, while display and other checks
* have their own (e.g. HAS_PCH_SPLIT for ILK+ display, IS_foo for particular
* chips, etc.).
*/
#define IS_GEN2(dev) (INTEL_INFO(dev)->gen == 2)
#define IS_GEN3(dev) (INTEL_INFO(dev)->gen == 3)
#define IS_GEN4(dev) (INTEL_INFO(dev)->gen == 4)
#define IS_GEN5(dev) (INTEL_INFO(dev)->gen == 5)
#define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6)
#define IS_GEN7(dev) (INTEL_INFO(dev)->gen == 7)
#define HAS_BSD(dev) (INTEL_INFO(dev)->has_bsd_ring)
#define HAS_BLT(dev) (INTEL_INFO(dev)->has_blt_ring)
@ -948,8 +976,8 @@ enum intel_chip_family {
#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
#define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
#define HAS_PCH_SPLIT(dev) (IS_GEN5(dev) || IS_GEN6(dev))
#define HAS_PIPE_CONTROL(dev) (IS_GEN5(dev) || IS_GEN6(dev))
#define HAS_PCH_SPLIT(dev) (IS_GEN5(dev) || IS_GEN6(dev) || IS_IVYBRIDGE(dev))
#define HAS_PIPE_CONTROL(dev) (INTEL_INFO(dev)->gen >= 5)
#define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type)
#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
@ -1010,12 +1038,27 @@ extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS);
extern void i915_driver_irq_preinstall(struct drm_device * dev);
extern int i915_driver_irq_postinstall(struct drm_device *dev);
extern void i915_driver_irq_uninstall(struct drm_device * dev);
extern irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS);
extern void ironlake_irq_preinstall(struct drm_device *dev);
extern int ironlake_irq_postinstall(struct drm_device *dev);
extern void ironlake_irq_uninstall(struct drm_device *dev);
extern irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS);
extern void ivybridge_irq_preinstall(struct drm_device *dev);
extern int ivybridge_irq_postinstall(struct drm_device *dev);
extern void ivybridge_irq_uninstall(struct drm_device *dev);
extern int i915_vblank_pipe_set(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int i915_vblank_pipe_get(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int i915_enable_vblank(struct drm_device *dev, int crtc);
extern void i915_disable_vblank(struct drm_device *dev, int crtc);
extern int ironlake_enable_vblank(struct drm_device *dev, int crtc);
extern void ironlake_disable_vblank(struct drm_device *dev, int crtc);
extern int ivybridge_enable_vblank(struct drm_device *dev, int crtc);
extern void ivybridge_disable_vblank(struct drm_device *dev, int crtc);
extern u32 i915_get_vblank_counter(struct drm_device *dev, int crtc);
extern u32 gm45_get_vblank_counter(struct drm_device *dev, int crtc);
extern int i915_vblank_swap(struct drm_device *dev, void *data,
@ -1265,6 +1308,7 @@ static inline void intel_unregister_dsm_handler(void) { return; }
/* modesetting */
extern void intel_modeset_init(struct drm_device *dev);
extern void intel_modeset_gem_init(struct drm_device *dev);
extern void intel_modeset_cleanup(struct drm_device *dev);
extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
extern void i8xx_disable_fbc(struct drm_device *dev);
@ -1312,13 +1356,34 @@ extern void intel_display_print_error_state(struct seq_file *m,
LOCK_TEST_WITH_RETURN(dev, file); \
} while (0)
/* On SNB platform, before reading ring registers forcewake bit
* must be set to prevent GT core from power down and stale values being
* returned.
*/
void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv);
void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv);
void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv);
/* We give fast paths for the really cool registers */
#define NEEDS_FORCE_WAKE(dev_priv, reg) \
(((dev_priv)->info->gen >= 6) && \
((reg) < 0x40000) && \
((reg) != FORCEWAKE))
#define __i915_read(x, y) \
static inline u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
u##x val = read##y(dev_priv->regs + reg); \
u##x val = 0; \
if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
gen6_gt_force_wake_get(dev_priv); \
val = read##y(dev_priv->regs + reg); \
gen6_gt_force_wake_put(dev_priv); \
} else { \
val = read##y(dev_priv->regs + reg); \
} \
trace_i915_reg_rw(false, reg, val, sizeof(val)); \
return val; \
}
__i915_read(8, b)
__i915_read(16, w)
__i915_read(32, l)
@ -1328,6 +1393,9 @@ __i915_read(64, q)
#define __i915_write(x, y) \
static inline void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \
trace_i915_reg_rw(true, reg, val, sizeof(val)); \
if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
__gen6_gt_wait_for_fifo(dev_priv); \
} \
write##y(val, dev_priv->regs + reg); \
}
__i915_write(8, b)
@ -1356,33 +1424,4 @@ __i915_write(64, q)
#define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg)
/* On SNB platform, before reading ring registers forcewake bit
* must be set to prevent GT core from power down and stale values being
* returned.
*/
void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv);
void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv);
void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv);
static inline u32 i915_gt_read(struct drm_i915_private *dev_priv, u32 reg)
{
u32 val;
if (dev_priv->info->gen >= 6) {
__gen6_gt_force_wake_get(dev_priv);
val = I915_READ(reg);
__gen6_gt_force_wake_put(dev_priv);
} else
val = I915_READ(reg);
return val;
}
static inline void i915_gt_write(struct drm_i915_private *dev_priv,
u32 reg, u32 val)
{
if (dev_priv->info->gen >= 6)
__gen6_gt_wait_for_fifo(dev_priv);
I915_WRITE(reg, val);
}
#endif

View File

@ -2673,6 +2673,7 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
update:
obj->tiling_changed = false;
switch (INTEL_INFO(dev)->gen) {
case 7:
case 6:
ret = sandybridge_write_fence_reg(obj, pipelined);
break;
@ -2706,6 +2707,7 @@ i915_gem_clear_fence_reg(struct drm_device *dev,
uint32_t fence_reg = reg - dev_priv->fence_regs;
switch (INTEL_INFO(dev)->gen) {
case 7:
case 6:
I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + fence_reg*8, 0);
break;
@ -2878,6 +2880,17 @@ i915_gem_clflush_object(struct drm_i915_gem_object *obj)
if (obj->pages == NULL)
return;
/* If the GPU is snooping the contents of the CPU cache,
* we do not need to manually clear the CPU cache lines. However,
* the caches are only snooped when the render cache is
* flushed/invalidated. As we always have to emit invalidations
* and flushes when moving into and out of the RENDER domain, correct
* snooping behaviour occurs naturally as the result of our domain
* tracking.
*/
if (obj->cache_level != I915_CACHE_NONE)
return;
trace_i915_gem_object_clflush(obj);
drm_clflush_pages(obj->pages, obj->base.size / PAGE_SIZE);
@ -3569,7 +3582,7 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
obj->base.write_domain = I915_GEM_DOMAIN_CPU;
obj->base.read_domains = I915_GEM_DOMAIN_CPU;
obj->agp_type = AGP_USER_MEMORY;
obj->cache_level = I915_CACHE_NONE;
obj->base.driver_private = NULL;
obj->fence_reg = I915_FENCE_REG_NONE;
INIT_LIST_HEAD(&obj->mm_list);
@ -3845,25 +3858,10 @@ i915_gem_load(struct drm_device *dev)
dev_priv->num_fence_regs = 8;
/* Initialize fence registers to zero */
switch (INTEL_INFO(dev)->gen) {
case 6:
for (i = 0; i < 16; i++)
I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), 0);
break;
case 5:
case 4:
for (i = 0; i < 16; i++)
I915_WRITE64(FENCE_REG_965_0 + (i * 8), 0);
break;
case 3:
if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
for (i = 0; i < 8; i++)
I915_WRITE(FENCE_REG_945_8 + (i * 4), 0);
case 2:
for (i = 0; i < 8; i++)
I915_WRITE(FENCE_REG_830_0 + (i * 4), 0);
break;
for (i = 0; i < dev_priv->num_fence_regs; i++) {
i915_gem_clear_fence_reg(dev, &dev_priv->fence_regs[i]);
}
i915_gem_detect_bit_6_swizzle(dev);
init_waitqueue_head(&dev_priv->pending_flip_queue);

View File

@ -29,6 +29,26 @@
#include "i915_trace.h"
#include "intel_drv.h"
/* XXX kill agp_type! */
static unsigned int cache_level_to_agp_type(struct drm_device *dev,
enum i915_cache_level cache_level)
{
switch (cache_level) {
case I915_CACHE_LLC_MLC:
if (INTEL_INFO(dev)->gen >= 6)
return AGP_USER_CACHED_MEMORY_LLC_MLC;
/* Older chipsets do not have this extra level of CPU
* cacheing, so fallthrough and request the PTE simply
* as cached.
*/
case I915_CACHE_LLC:
return AGP_USER_CACHED_MEMORY;
default:
case I915_CACHE_NONE:
return AGP_USER_MEMORY;
}
}
void i915_gem_restore_gtt_mappings(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@ -39,6 +59,9 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
(dev_priv->mm.gtt_end - dev_priv->mm.gtt_start) / PAGE_SIZE);
list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
unsigned int agp_type =
cache_level_to_agp_type(dev, obj->cache_level);
i915_gem_clflush_object(obj);
if (dev_priv->mm.gtt->needs_dmar) {
@ -46,15 +69,14 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
intel_gtt_insert_sg_entries(obj->sg_list,
obj->num_sg,
obj->gtt_space->start
>> PAGE_SHIFT,
obj->agp_type);
obj->gtt_space->start >> PAGE_SHIFT,
agp_type);
} else
intel_gtt_insert_pages(obj->gtt_space->start
>> PAGE_SHIFT,
obj->base.size >> PAGE_SHIFT,
obj->pages,
obj->agp_type);
agp_type);
}
intel_gtt_chipset_flush();
@ -64,6 +86,7 @@ int i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj)
{
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned int agp_type = cache_level_to_agp_type(dev, obj->cache_level);
int ret;
if (dev_priv->mm.gtt->needs_dmar) {
@ -77,12 +100,12 @@ int i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj)
intel_gtt_insert_sg_entries(obj->sg_list,
obj->num_sg,
obj->gtt_space->start >> PAGE_SHIFT,
obj->agp_type);
agp_type);
} else
intel_gtt_insert_pages(obj->gtt_space->start >> PAGE_SHIFT,
obj->base.size >> PAGE_SHIFT,
obj->pages,
obj->agp_type);
agp_type);
return 0;
}

View File

@ -92,7 +92,7 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
if (IS_GEN5(dev) || IS_GEN6(dev)) {
if (INTEL_INFO(dev)->gen >= 5) {
/* On Ironlake whatever DRAM config, GPU always do
* same swizzling setup.
*/

View File

@ -367,22 +367,30 @@ static void notify_ring(struct drm_device *dev,
jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
}
static void gen6_pm_irq_handler(struct drm_device *dev)
static void gen6_pm_rps_work(struct work_struct *work)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
rps_work);
u8 new_delay = dev_priv->cur_delay;
u32 pm_iir;
u32 pm_iir, pm_imr;
spin_lock_irq(&dev_priv->rps_lock);
pm_iir = dev_priv->pm_iir;
dev_priv->pm_iir = 0;
pm_imr = I915_READ(GEN6_PMIMR);
spin_unlock_irq(&dev_priv->rps_lock);
pm_iir = I915_READ(GEN6_PMIIR);
if (!pm_iir)
return;
mutex_lock(&dev_priv->dev->struct_mutex);
if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
if (dev_priv->cur_delay != dev_priv->max_delay)
new_delay = dev_priv->cur_delay + 1;
if (new_delay > dev_priv->max_delay)
new_delay = dev_priv->max_delay;
} else if (pm_iir & (GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT)) {
gen6_gt_force_wake_get(dev_priv);
if (dev_priv->cur_delay != dev_priv->min_delay)
new_delay = dev_priv->cur_delay - 1;
if (new_delay < dev_priv->min_delay) {
@ -396,13 +404,19 @@ static void gen6_pm_irq_handler(struct drm_device *dev)
I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
I915_READ(GEN6_RP_INTERRUPT_LIMITS) & ~0x3f0000);
}
gen6_gt_force_wake_put(dev_priv);
}
gen6_set_rps(dev, new_delay);
gen6_set_rps(dev_priv->dev, new_delay);
dev_priv->cur_delay = new_delay;
I915_WRITE(GEN6_PMIIR, pm_iir);
/*
* rps_lock not held here because clearing is non-destructive. There is
* an *extremely* unlikely race with gen6_rps_enable() that is prevented
* by holding struct_mutex for the duration of the write.
*/
I915_WRITE(GEN6_PMIMR, pm_imr & ~pm_iir);
mutex_unlock(&dev_priv->dev->struct_mutex);
}
static void pch_irq_handler(struct drm_device *dev)
@ -448,8 +462,97 @@ static void pch_irq_handler(struct drm_device *dev)
DRM_DEBUG_DRIVER("PCH transcoder A underrun interrupt\n");
}
static irqreturn_t ironlake_irq_handler(struct drm_device *dev)
irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
{
struct drm_device *dev = (struct drm_device *) arg;
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
int ret = IRQ_NONE;
u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
struct drm_i915_master_private *master_priv;
atomic_inc(&dev_priv->irq_received);
/* disable master interrupt before clearing iir */
de_ier = I915_READ(DEIER);
I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
POSTING_READ(DEIER);
de_iir = I915_READ(DEIIR);
gt_iir = I915_READ(GTIIR);
pch_iir = I915_READ(SDEIIR);
pm_iir = I915_READ(GEN6_PMIIR);
if (de_iir == 0 && gt_iir == 0 && pch_iir == 0 && pm_iir == 0)
goto done;
ret = IRQ_HANDLED;
if (dev->primary->master) {
master_priv = dev->primary->master->driver_priv;
if (master_priv->sarea_priv)
master_priv->sarea_priv->last_dispatch =
READ_BREADCRUMB(dev_priv);
}
if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY))
notify_ring(dev, &dev_priv->ring[RCS]);
if (gt_iir & GT_GEN6_BSD_USER_INTERRUPT)
notify_ring(dev, &dev_priv->ring[VCS]);
if (gt_iir & GT_BLT_USER_INTERRUPT)
notify_ring(dev, &dev_priv->ring[BCS]);
if (de_iir & DE_GSE_IVB)
intel_opregion_gse_intr(dev);
if (de_iir & DE_PLANEA_FLIP_DONE_IVB) {
intel_prepare_page_flip(dev, 0);
intel_finish_page_flip_plane(dev, 0);
}
if (de_iir & DE_PLANEB_FLIP_DONE_IVB) {
intel_prepare_page_flip(dev, 1);
intel_finish_page_flip_plane(dev, 1);
}
if (de_iir & DE_PIPEA_VBLANK_IVB)
drm_handle_vblank(dev, 0);
if (de_iir & DE_PIPEB_VBLANK_IVB);
drm_handle_vblank(dev, 1);
/* check event from PCH */
if (de_iir & DE_PCH_EVENT_IVB) {
if (pch_iir & SDE_HOTPLUG_MASK_CPT)
queue_work(dev_priv->wq, &dev_priv->hotplug_work);
pch_irq_handler(dev);
}
if (pm_iir & GEN6_PM_DEFERRED_EVENTS) {
unsigned long flags;
spin_lock_irqsave(&dev_priv->rps_lock, flags);
WARN(dev_priv->pm_iir & pm_iir, "Missed a PM interrupt\n");
I915_WRITE(GEN6_PMIMR, pm_iir);
dev_priv->pm_iir |= pm_iir;
spin_unlock_irqrestore(&dev_priv->rps_lock, flags);
queue_work(dev_priv->wq, &dev_priv->rps_work);
}
/* should clear PCH hotplug event before clear CPU irq */
I915_WRITE(SDEIIR, pch_iir);
I915_WRITE(GTIIR, gt_iir);
I915_WRITE(DEIIR, de_iir);
I915_WRITE(GEN6_PMIIR, pm_iir);
done:
I915_WRITE(DEIER, de_ier);
POSTING_READ(DEIER);
return ret;
}
irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
{
struct drm_device *dev = (struct drm_device *) arg;
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
int ret = IRQ_NONE;
u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
@ -457,6 +560,8 @@ static irqreturn_t ironlake_irq_handler(struct drm_device *dev)
struct drm_i915_master_private *master_priv;
u32 bsd_usr_interrupt = GT_BSD_USER_INTERRUPT;
atomic_inc(&dev_priv->irq_received);
if (IS_GEN6(dev))
bsd_usr_interrupt = GT_GEN6_BSD_USER_INTERRUPT;
@ -526,13 +631,30 @@ static irqreturn_t ironlake_irq_handler(struct drm_device *dev)
i915_handle_rps_change(dev);
}
if (IS_GEN6(dev))
gen6_pm_irq_handler(dev);
if (IS_GEN6(dev) && pm_iir & GEN6_PM_DEFERRED_EVENTS) {
/*
* IIR bits should never already be set because IMR should
* prevent an interrupt from being shown in IIR. The warning
* displays a case where we've unsafely cleared
* dev_priv->pm_iir. Although missing an interrupt of the same
* type is not a problem, it displays a problem in the logic.
*
* The mask bit in IMR is cleared by rps_work.
*/
unsigned long flags;
spin_lock_irqsave(&dev_priv->rps_lock, flags);
WARN(dev_priv->pm_iir & pm_iir, "Missed a PM interrupt\n");
I915_WRITE(GEN6_PMIMR, pm_iir);
dev_priv->pm_iir |= pm_iir;
spin_unlock_irqrestore(&dev_priv->rps_lock, flags);
queue_work(dev_priv->wq, &dev_priv->rps_work);
}
/* should clear PCH hotplug event before clear CPU irq */
I915_WRITE(SDEIIR, pch_iir);
I915_WRITE(GTIIR, gt_iir);
I915_WRITE(DEIIR, de_iir);
I915_WRITE(GEN6_PMIIR, pm_iir);
done:
I915_WRITE(DEIER, de_ier);
@ -676,7 +798,7 @@ static u32 capture_bo_list(struct drm_i915_error_buffer *err,
err->dirty = obj->dirty;
err->purgeable = obj->madv != I915_MADV_WILLNEED;
err->ring = obj->ring ? obj->ring->id : 0;
err->agp_type = obj->agp_type == AGP_USER_CACHED_MEMORY;
err->cache_level = obj->cache_level;
if (++i == count)
break;
@ -1103,9 +1225,6 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
atomic_inc(&dev_priv->irq_received);
if (HAS_PCH_SPLIT(dev))
return ironlake_irq_handler(dev);
iir = I915_READ(IIR);
if (INTEL_INFO(dev)->gen >= 4)
@ -1344,10 +1463,7 @@ int i915_enable_vblank(struct drm_device *dev, int pipe)
return -EINVAL;
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
if (HAS_PCH_SPLIT(dev))
ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
DE_PIPEA_VBLANK: DE_PIPEB_VBLANK);
else if (INTEL_INFO(dev)->gen >= 4)
if (INTEL_INFO(dev)->gen >= 4)
i915_enable_pipestat(dev_priv, pipe,
PIPE_START_VBLANK_INTERRUPT_ENABLE);
else
@ -1362,6 +1478,38 @@ int i915_enable_vblank(struct drm_device *dev, int pipe)
return 0;
}
int ironlake_enable_vblank(struct drm_device *dev, int pipe)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
unsigned long irqflags;
if (!i915_pipe_enabled(dev, pipe))
return -EINVAL;
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
DE_PIPEA_VBLANK: DE_PIPEB_VBLANK);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
return 0;
}
int ivybridge_enable_vblank(struct drm_device *dev, int pipe)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
unsigned long irqflags;
if (!i915_pipe_enabled(dev, pipe))
return -EINVAL;
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
DE_PIPEA_VBLANK_IVB : DE_PIPEB_VBLANK_IVB);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
return 0;
}
/* Called from drm generic code, passed 'crtc' which
* we use as a pipe index
*/
@ -1375,13 +1523,31 @@ void i915_disable_vblank(struct drm_device *dev, int pipe)
I915_WRITE(INSTPM,
INSTPM_AGPBUSY_DIS << 16 | INSTPM_AGPBUSY_DIS);
if (HAS_PCH_SPLIT(dev))
ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
DE_PIPEA_VBLANK: DE_PIPEB_VBLANK);
else
i915_disable_pipestat(dev_priv, pipe,
PIPE_VBLANK_INTERRUPT_ENABLE |
PIPE_START_VBLANK_INTERRUPT_ENABLE);
i915_disable_pipestat(dev_priv, pipe,
PIPE_VBLANK_INTERRUPT_ENABLE |
PIPE_START_VBLANK_INTERRUPT_ENABLE);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
void ironlake_disable_vblank(struct drm_device *dev, int pipe)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
unsigned long irqflags;
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
DE_PIPEA_VBLANK: DE_PIPEB_VBLANK);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
void ivybridge_disable_vblank(struct drm_device *dev, int pipe)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
unsigned long irqflags;
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
DE_PIPEA_VBLANK_IVB : DE_PIPEB_VBLANK_IVB);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
@ -1562,10 +1728,15 @@ void i915_hangcheck_elapsed(unsigned long data)
/* drm_dma.h hooks
*/
static void ironlake_irq_preinstall(struct drm_device *dev)
void ironlake_irq_preinstall(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
atomic_set(&dev_priv->irq_received, 0);
INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
INIT_WORK(&dev_priv->error_work, i915_error_work_func);
I915_WRITE(HWSTAM, 0xeffe);
/* XXX hotplug from PCH */
@ -1585,7 +1756,7 @@ static void ironlake_irq_preinstall(struct drm_device *dev)
POSTING_READ(SDEIER);
}
static int ironlake_irq_postinstall(struct drm_device *dev)
int ironlake_irq_postinstall(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
/* enable kind of interrupts always enabled */
@ -1594,6 +1765,13 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
u32 render_irqs;
u32 hotplug_mask;
DRM_INIT_WAITQUEUE(&dev_priv->ring[RCS].irq_queue);
if (HAS_BSD(dev))
DRM_INIT_WAITQUEUE(&dev_priv->ring[VCS].irq_queue);
if (HAS_BLT(dev))
DRM_INIT_WAITQUEUE(&dev_priv->ring[BCS].irq_queue);
dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
dev_priv->irq_mask = ~display_mask;
/* should always can generate irq */
@ -1650,6 +1828,56 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
return 0;
}
int ivybridge_irq_postinstall(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
/* enable kind of interrupts always enabled */
u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
DE_PCH_EVENT_IVB | DE_PLANEA_FLIP_DONE_IVB |
DE_PLANEB_FLIP_DONE_IVB;
u32 render_irqs;
u32 hotplug_mask;
DRM_INIT_WAITQUEUE(&dev_priv->ring[RCS].irq_queue);
if (HAS_BSD(dev))
DRM_INIT_WAITQUEUE(&dev_priv->ring[VCS].irq_queue);
if (HAS_BLT(dev))
DRM_INIT_WAITQUEUE(&dev_priv->ring[BCS].irq_queue);
dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
dev_priv->irq_mask = ~display_mask;
/* should always can generate irq */
I915_WRITE(DEIIR, I915_READ(DEIIR));
I915_WRITE(DEIMR, dev_priv->irq_mask);
I915_WRITE(DEIER, display_mask | DE_PIPEA_VBLANK_IVB |
DE_PIPEB_VBLANK_IVB);
POSTING_READ(DEIER);
dev_priv->gt_irq_mask = ~0;
I915_WRITE(GTIIR, I915_READ(GTIIR));
I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
render_irqs = GT_USER_INTERRUPT | GT_GEN6_BSD_USER_INTERRUPT |
GT_BLT_USER_INTERRUPT;
I915_WRITE(GTIER, render_irqs);
POSTING_READ(GTIER);
hotplug_mask = (SDE_CRT_HOTPLUG_CPT |
SDE_PORTB_HOTPLUG_CPT |
SDE_PORTC_HOTPLUG_CPT |
SDE_PORTD_HOTPLUG_CPT);
dev_priv->pch_irq_mask = ~hotplug_mask;
I915_WRITE(SDEIIR, I915_READ(SDEIIR));
I915_WRITE(SDEIMR, dev_priv->pch_irq_mask);
I915_WRITE(SDEIER, hotplug_mask);
POSTING_READ(SDEIER);
return 0;
}
void i915_driver_irq_preinstall(struct drm_device * dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@ -1659,11 +1887,7 @@ void i915_driver_irq_preinstall(struct drm_device * dev)
INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
INIT_WORK(&dev_priv->error_work, i915_error_work_func);
if (HAS_PCH_SPLIT(dev)) {
ironlake_irq_preinstall(dev);
return;
}
INIT_WORK(&dev_priv->rps_work, gen6_pm_rps_work);
if (I915_HAS_HOTPLUG(dev)) {
I915_WRITE(PORT_HOTPLUG_EN, 0);
@ -1688,17 +1912,8 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
u32 enable_mask = I915_INTERRUPT_ENABLE_FIX | I915_INTERRUPT_ENABLE_VAR;
u32 error_mask;
DRM_INIT_WAITQUEUE(&dev_priv->ring[RCS].irq_queue);
if (HAS_BSD(dev))
DRM_INIT_WAITQUEUE(&dev_priv->ring[VCS].irq_queue);
if (HAS_BLT(dev))
DRM_INIT_WAITQUEUE(&dev_priv->ring[BCS].irq_queue);
dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
if (HAS_PCH_SPLIT(dev))
return ironlake_irq_postinstall(dev);
/* Unmask the interrupts that we always want on. */
dev_priv->irq_mask = ~I915_INTERRUPT_ENABLE_FIX;
@ -1767,9 +1982,15 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
return 0;
}
static void ironlake_irq_uninstall(struct drm_device *dev)
void ironlake_irq_uninstall(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
if (!dev_priv)
return;
dev_priv->vblank_pipe = 0;
I915_WRITE(HWSTAM, 0xffffffff);
I915_WRITE(DEIMR, 0xffffffff);
@ -1791,11 +2012,6 @@ void i915_driver_irq_uninstall(struct drm_device * dev)
dev_priv->vblank_pipe = 0;
if (HAS_PCH_SPLIT(dev)) {
ironlake_irq_uninstall(dev);
return;
}
if (I915_HAS_HOTPLUG(dev)) {
I915_WRITE(PORT_HOTPLUG_EN, 0);
I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));

View File

@ -291,6 +291,9 @@
#define RING_MAX_IDLE(base) ((base)+0x54)
#define RING_HWS_PGA(base) ((base)+0x80)
#define RING_HWS_PGA_GEN6(base) ((base)+0x2080)
#define RENDER_HWS_PGA_GEN7 (0x04080)
#define BSD_HWS_PGA_GEN7 (0x04180)
#define BLT_HWS_PGA_GEN7 (0x04280)
#define RING_ACTHD(base) ((base)+0x74)
#define RING_NOPID(base) ((base)+0x94)
#define RING_IMR(base) ((base)+0xa8)
@ -2778,6 +2781,19 @@
#define DE_PIPEA_VSYNC (1 << 3)
#define DE_PIPEA_FIFO_UNDERRUN (1 << 0)
/* More Ivybridge lolz */
#define DE_ERR_DEBUG_IVB (1<<30)
#define DE_GSE_IVB (1<<29)
#define DE_PCH_EVENT_IVB (1<<28)
#define DE_DP_A_HOTPLUG_IVB (1<<27)
#define DE_AUX_CHANNEL_A_IVB (1<<26)
#define DE_SPRITEB_FLIP_DONE_IVB (1<<9)
#define DE_SPRITEA_FLIP_DONE_IVB (1<<4)
#define DE_PLANEB_FLIP_DONE_IVB (1<<8)
#define DE_PLANEA_FLIP_DONE_IVB (1<<3)
#define DE_PIPEB_VBLANK_IVB (1<<5)
#define DE_PIPEA_VBLANK_IVB (1<<0)
#define DEISR 0x44000
#define DEIMR 0x44004
#define DEIIR 0x44008
@ -2809,6 +2825,7 @@
#define ILK_eDP_A_DISABLE (1<<24)
#define ILK_DESKTOP (1<<23)
#define ILK_DSPCLK_GATE 0x42020
#define IVB_VRHUNIT_CLK_GATE (1<<28)
#define ILK_DPARB_CLK_GATE (1<<5)
#define ILK_DPFD_CLK_GATE (1<<7)
@ -3057,6 +3074,9 @@
#define TRANS_6BPC (2<<5)
#define TRANS_12BPC (3<<5)
#define SOUTH_CHICKEN2 0xc2004
#define DPLS_EDP_PPS_FIX_DIS (1<<0)
#define _FDI_RXA_CHICKEN 0xc200c
#define _FDI_RXB_CHICKEN 0xc2010
#define FDI_RX_PHASE_SYNC_POINTER_OVR (1<<1)
@ -3104,7 +3124,15 @@
#define FDI_TX_ENHANCE_FRAME_ENABLE (1<<18)
/* Ironlake: hardwired to 1 */
#define FDI_TX_PLL_ENABLE (1<<14)
/* Ivybridge has different bits for lolz */
#define FDI_LINK_TRAIN_PATTERN_1_IVB (0<<8)
#define FDI_LINK_TRAIN_PATTERN_2_IVB (1<<8)
#define FDI_LINK_TRAIN_PATTERN_IDLE_IVB (2<<8)
#define FDI_LINK_TRAIN_NONE_IVB (3<<8)
/* both Tx and Rx */
#define FDI_LINK_TRAIN_AUTO (1<<10)
#define FDI_SCRAMBLING_ENABLE (0<<7)
#define FDI_SCRAMBLING_DISABLE (1<<7)
@ -3114,6 +3142,8 @@
#define FDI_RX_CTL(pipe) _PIPE(pipe, _FDI_RXA_CTL, _FDI_RXB_CTL)
#define FDI_RX_ENABLE (1<<31)
/* train, dp width same as FDI_TX */
#define FDI_FS_ERRC_ENABLE (1<<27)
#define FDI_FE_ERRC_ENABLE (1<<26)
#define FDI_DP_PORT_WIDTH_X8 (7<<19)
#define FDI_8BPC (0<<16)
#define FDI_10BPC (1<<16)
@ -3386,7 +3416,7 @@
#define GEN6_PMINTRMSK 0xA168
#define GEN6_PMISR 0x44020
#define GEN6_PMIMR 0x44024
#define GEN6_PMIMR 0x44024 /* rps_lock */
#define GEN6_PMIIR 0x44028
#define GEN6_PMIER 0x4402C
#define GEN6_PM_MBOX_EVENT (1<<25)
@ -3396,6 +3426,9 @@
#define GEN6_PM_RP_DOWN_THRESHOLD (1<<4)
#define GEN6_PM_RP_UP_EI_EXPIRED (1<<2)
#define GEN6_PM_RP_DOWN_EI_EXPIRED (1<<1)
#define GEN6_PM_DEFERRED_EVENTS (GEN6_PM_RP_UP_THRESHOLD | \
GEN6_PM_RP_DOWN_THRESHOLD | \
GEN6_PM_RP_DOWN_TIMEOUT)
#define GEN6_PCODE_MAILBOX 0x138124
#define GEN6_PCODE_READY (1<<31)

View File

@ -863,8 +863,7 @@ int i915_restore_state(struct drm_device *dev)
I915_WRITE(IMR, dev_priv->saveIMR);
}
/* Clock gating state */
intel_enable_clock_gating(dev);
intel_init_clock_gating(dev);
if (IS_IRONLAKE_M(dev)) {
ironlake_enable_drps(dev);

View File

@ -305,13 +305,11 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector)
}
static enum drm_connector_status
intel_crt_load_detect(struct drm_crtc *crtc, struct intel_crt *crt)
intel_crt_load_detect(struct intel_crt *crt)
{
struct drm_encoder *encoder = &crt->base.base;
struct drm_device *dev = encoder->dev;
struct drm_device *dev = crt->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
uint32_t pipe = intel_crtc->pipe;
uint32_t pipe = to_intel_crtc(crt->base.base.crtc)->pipe;
uint32_t save_bclrpat;
uint32_t save_vtotal;
uint32_t vtotal, vactive;
@ -432,7 +430,6 @@ intel_crt_detect(struct drm_connector *connector, bool force)
struct drm_device *dev = connector->dev;
struct intel_crt *crt = intel_attached_crt(connector);
struct drm_crtc *crtc;
int dpms_mode;
enum drm_connector_status status;
if (I915_HAS_HOTPLUG(dev)) {
@ -454,17 +451,18 @@ intel_crt_detect(struct drm_connector *connector, bool force)
/* for pre-945g platforms use load detect */
crtc = crt->base.base.crtc;
if (crtc && crtc->enabled) {
status = intel_crt_load_detect(crtc, crt);
status = intel_crt_load_detect(crt);
} else {
crtc = intel_get_load_detect_pipe(&crt->base, connector,
NULL, &dpms_mode);
if (crtc) {
struct intel_load_detect_pipe tmp;
if (intel_get_load_detect_pipe(&crt->base, connector, NULL,
&tmp)) {
if (intel_crt_detect_ddc(connector))
status = connector_status_connected;
else
status = intel_crt_load_detect(crtc, crt);
intel_release_load_detect_pipe(&crt->base,
connector, dpms_mode);
status = intel_crt_load_detect(crt);
intel_release_load_detect_pipe(&crt->base, connector,
&tmp);
} else
status = connector_status_unknown;
}

File diff suppressed because it is too large Load Diff

View File

@ -1470,7 +1470,8 @@ intel_dp_link_down(struct intel_dp *intel_dp)
if (!HAS_PCH_CPT(dev) &&
I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) {
struct intel_crtc *intel_crtc = to_intel_crtc(intel_dp->base.base.crtc);
struct drm_crtc *crtc = intel_dp->base.base.crtc;
/* Hardware workaround: leaving our transcoder select
* set to transcoder B while it's off will prevent the
* corresponding HDMI output on transcoder A.
@ -1485,7 +1486,19 @@ intel_dp_link_down(struct intel_dp *intel_dp)
/* Changes to enable or select take place the vblank
* after being written.
*/
intel_wait_for_vblank(dev, intel_crtc->pipe);
if (crtc == NULL) {
/* We can arrive here never having been attached
* to a CRTC, for instance, due to inheriting
* random state from the BIOS.
*
* If the pipe is not running, play safe and
* wait for the clocks to stabilise before
* continuing.
*/
POSTING_READ(intel_dp->output_reg);
msleep(50);
} else
intel_wait_for_vblank(dev, to_intel_crtc(crtc)->pipe);
}
I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);

View File

@ -140,7 +140,6 @@ struct intel_fbdev {
struct intel_encoder {
struct drm_encoder base;
int type;
bool load_detect_temp;
bool needs_tv_clock;
void (*hot_plug)(struct intel_encoder *);
int crtc_mask;
@ -291,13 +290,19 @@ int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern void intel_wait_for_vblank(struct drm_device *dev, int pipe);
extern void intel_wait_for_pipe_off(struct drm_device *dev, int pipe);
extern struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
struct drm_connector *connector,
struct drm_display_mode *mode,
int *dpms_mode);
struct intel_load_detect_pipe {
struct drm_framebuffer *release_fb;
bool load_detect_temp;
int dpms_mode;
};
extern bool intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
struct drm_connector *connector,
struct drm_display_mode *mode,
struct intel_load_detect_pipe *old);
extern void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder,
struct drm_connector *connector,
int dpms_mode);
struct intel_load_detect_pipe *old);
extern struct drm_connector* intel_sdvo_find(struct drm_device *dev, int sdvoB);
extern int intel_sdvo_supports_hotplug(struct drm_connector *connector);
@ -339,4 +344,6 @@ extern int intel_overlay_attrs(struct drm_device *dev, void *data,
extern void intel_fb_output_poll_changed(struct drm_device *dev);
extern void intel_fb_restore_mode(struct drm_device *dev);
extern void intel_init_clock_gating(struct drm_device *dev);
#endif /* __INTEL_DRV_H__ */

View File

@ -539,6 +539,9 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
struct drm_device *dev = dev_priv->dev;
struct drm_connector *connector = dev_priv->int_lvds_connector;
if (dev->switch_power_state != DRM_SWITCH_POWER_ON)
return NOTIFY_OK;
/*
* check and update the status of LVDS connector after receiving
* the LID nofication event.

View File

@ -236,7 +236,7 @@ init_pipe_control(struct intel_ring_buffer *ring)
ret = -ENOMEM;
goto err;
}
obj->agp_type = AGP_USER_CACHED_MEMORY;
obj->cache_level = I915_CACHE_LLC;
ret = i915_gem_object_pin(obj, 4096, true);
if (ret)
@ -286,7 +286,7 @@ static int init_render_ring(struct intel_ring_buffer *ring)
if (INTEL_INFO(dev)->gen > 3) {
int mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH;
if (IS_GEN6(dev))
if (IS_GEN6(dev) || IS_GEN7(dev))
mode |= MI_FLUSH_ENABLE << 16 | MI_FLUSH_ENABLE;
I915_WRITE(MI_MODE, mode);
}
@ -551,10 +551,31 @@ render_ring_put_irq(struct intel_ring_buffer *ring)
void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
{
struct drm_device *dev = ring->dev;
drm_i915_private_t *dev_priv = ring->dev->dev_private;
u32 mmio = IS_GEN6(ring->dev) ?
RING_HWS_PGA_GEN6(ring->mmio_base) :
RING_HWS_PGA(ring->mmio_base);
u32 mmio = 0;
/* The ring status page addresses are no longer next to the rest of
* the ring registers as of gen7.
*/
if (IS_GEN7(dev)) {
switch (ring->id) {
case RING_RENDER:
mmio = RENDER_HWS_PGA_GEN7;
break;
case RING_BLT:
mmio = BLT_HWS_PGA_GEN7;
break;
case RING_BSD:
mmio = BSD_HWS_PGA_GEN7;
break;
}
} else if (IS_GEN6(ring->dev)) {
mmio = RING_HWS_PGA_GEN6(ring->mmio_base);
} else {
mmio = RING_HWS_PGA(ring->mmio_base);
}
I915_WRITE(mmio, (u32)ring->status_page.gfx_addr);
POSTING_READ(mmio);
}
@ -759,7 +780,7 @@ static int init_status_page(struct intel_ring_buffer *ring)
ret = -ENOMEM;
goto err;
}
obj->agp_type = AGP_USER_CACHED_MEMORY;
obj->cache_level = I915_CACHE_LLC;
ret = i915_gem_object_pin(obj, 4096, true);
if (ret != 0) {
@ -800,6 +821,7 @@ int intel_init_ring_buffer(struct drm_device *dev,
INIT_LIST_HEAD(&ring->request_list);
INIT_LIST_HEAD(&ring->gpu_write_list);
init_waitqueue_head(&ring->irq_queue);
spin_lock_init(&ring->irq_lock);
ring->irq_mask = ~0;
@ -872,7 +894,7 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
/* Disable the ring buffer. The ring must be idle at this point */
dev_priv = ring->dev->dev_private;
ret = intel_wait_ring_buffer(ring, ring->size - 8);
ret = intel_wait_ring_idle(ring);
if (ret)
DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
ring->name, ret);
@ -1333,7 +1355,7 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring = &dev_priv->ring[VCS];
if (IS_GEN6(dev))
if (IS_GEN6(dev) || IS_GEN7(dev))
*ring = gen6_bsd_ring;
else
*ring = bsd_ring;

View File

@ -14,27 +14,24 @@ struct intel_hw_status_page {
struct drm_i915_gem_object *obj;
};
#define I915_RING_READ(reg) i915_gt_read(dev_priv, reg)
#define I915_RING_WRITE(reg, val) i915_gt_write(dev_priv, reg, val)
#define I915_READ_TAIL(ring) I915_READ(RING_TAIL((ring)->mmio_base))
#define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL((ring)->mmio_base), val)
#define I915_READ_TAIL(ring) I915_RING_READ(RING_TAIL((ring)->mmio_base))
#define I915_WRITE_TAIL(ring, val) I915_RING_WRITE(RING_TAIL((ring)->mmio_base), val)
#define I915_READ_START(ring) I915_READ(RING_START((ring)->mmio_base))
#define I915_WRITE_START(ring, val) I915_WRITE(RING_START((ring)->mmio_base), val)
#define I915_READ_START(ring) I915_RING_READ(RING_START((ring)->mmio_base))
#define I915_WRITE_START(ring, val) I915_RING_WRITE(RING_START((ring)->mmio_base), val)
#define I915_READ_HEAD(ring) I915_READ(RING_HEAD((ring)->mmio_base))
#define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD((ring)->mmio_base), val)
#define I915_READ_HEAD(ring) I915_RING_READ(RING_HEAD((ring)->mmio_base))
#define I915_WRITE_HEAD(ring, val) I915_RING_WRITE(RING_HEAD((ring)->mmio_base), val)
#define I915_READ_CTL(ring) I915_READ(RING_CTL((ring)->mmio_base))
#define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL((ring)->mmio_base), val)
#define I915_READ_CTL(ring) I915_RING_READ(RING_CTL((ring)->mmio_base))
#define I915_WRITE_CTL(ring, val) I915_RING_WRITE(RING_CTL((ring)->mmio_base), val)
#define I915_READ_IMR(ring) I915_READ(RING_IMR((ring)->mmio_base))
#define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val)
#define I915_READ_IMR(ring) I915_RING_READ(RING_IMR((ring)->mmio_base))
#define I915_WRITE_IMR(ring, val) I915_RING_WRITE(RING_IMR((ring)->mmio_base), val)
#define I915_READ_NOPID(ring) I915_RING_READ(RING_NOPID((ring)->mmio_base))
#define I915_READ_SYNC_0(ring) I915_RING_READ(RING_SYNC_0((ring)->mmio_base))
#define I915_READ_SYNC_1(ring) I915_RING_READ(RING_SYNC_1((ring)->mmio_base))
#define I915_READ_NOPID(ring) I915_READ(RING_NOPID((ring)->mmio_base))
#define I915_READ_SYNC_0(ring) I915_READ(RING_SYNC_0((ring)->mmio_base))
#define I915_READ_SYNC_1(ring) I915_READ(RING_SYNC_1((ring)->mmio_base))
struct intel_ring_buffer {
const char *name;
@ -164,7 +161,13 @@ intel_read_status_page(struct intel_ring_buffer *ring,
#define I915_BREADCRUMB_INDEX 0x21
void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring);
int __must_check intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n);
static inline int intel_wait_ring_idle(struct intel_ring_buffer *ring)
{
return intel_wait_ring_buffer(ring, ring->space - 8);
}
int __must_check intel_ring_begin(struct intel_ring_buffer *ring, int n);
static inline void intel_ring_emit(struct intel_ring_buffer *ring,

View File

@ -1361,15 +1361,14 @@ intel_tv_detect(struct drm_connector *connector, bool force)
if (intel_tv->base.base.crtc && intel_tv->base.base.crtc->enabled) {
type = intel_tv_detect_type(intel_tv, connector);
} else if (force) {
struct drm_crtc *crtc;
int dpms_mode;
struct intel_load_detect_pipe tmp;
crtc = intel_get_load_detect_pipe(&intel_tv->base, connector,
&mode, &dpms_mode);
if (crtc) {
if (intel_get_load_detect_pipe(&intel_tv->base, connector,
&mode, &tmp)) {
type = intel_tv_detect_type(intel_tv, connector);
intel_release_load_detect_pipe(&intel_tv->base, connector,
dpms_mode);
intel_release_load_detect_pipe(&intel_tv->base,
connector,
&tmp);
} else
return connector_status_unknown;
} else

View File

@ -862,9 +862,15 @@ int evergreen_pcie_gart_enable(struct radeon_device *rdev)
SYSTEM_ACCESS_MODE_NOT_IN_SYS |
SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU |
EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
if (rdev->flags & RADEON_IS_IGP) {
WREG32(FUS_MC_VM_MD_L1_TLB0_CNTL, tmp);
WREG32(FUS_MC_VM_MD_L1_TLB1_CNTL, tmp);
WREG32(FUS_MC_VM_MD_L1_TLB2_CNTL, tmp);
} else {
WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
}
WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
@ -2923,11 +2929,6 @@ static int evergreen_startup(struct radeon_device *rdev)
rdev->asic->copy = NULL;
dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
}
/* XXX: ontario has problems blitting to gart at the moment */
if (rdev->family == CHIP_PALM) {
rdev->asic->copy = NULL;
radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
}
/* allocate wb buffer */
r = radeon_wb_init(rdev);

View File

@ -221,6 +221,11 @@
#define MC_VM_MD_L1_TLB0_CNTL 0x2654
#define MC_VM_MD_L1_TLB1_CNTL 0x2658
#define MC_VM_MD_L1_TLB2_CNTL 0x265C
#define FUS_MC_VM_MD_L1_TLB0_CNTL 0x265C
#define FUS_MC_VM_MD_L1_TLB1_CNTL 0x2660
#define FUS_MC_VM_MD_L1_TLB2_CNTL 0x2664
#define MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR 0x203C
#define MC_VM_SYSTEM_APERTURE_HIGH_ADDR 0x2038
#define MC_VM_SYSTEM_APERTURE_LOW_ADDR 0x2034

View File

@ -1599,9 +1599,10 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct
memcpy((u8 *)edid, (u8 *)&fake_edid_record->ucFakeEDIDString[0],
fake_edid_record->ucFakeEDIDLength);
if (drm_edid_is_valid(edid))
if (drm_edid_is_valid(edid)) {
rdev->mode_info.bios_hardcoded_edid = edid;
else
rdev->mode_info.bios_hardcoded_edid_size = edid_size;
} else
kfree(edid);
}
}

View File

@ -234,6 +234,9 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
return -EINVAL;
}
break;
case RADEON_INFO_FUSION_GART_WORKING:
value = 1;
break;
default:
DRM_DEBUG_KMS("Invalid request %d\n", info->request);
return -EINVAL;

View File

@ -110,8 +110,7 @@ config SENSORS_ADM1021
help
If you say yes here you get support for Analog Devices ADM1021
and ADM1023 sensor chips and clones: Maxim MAX1617 and MAX1617A,
Genesys Logic GL523SM, National Semiconductor LM84, TI THMC10,
and the XEON processor built-in sensor.
Genesys Logic GL523SM, National Semiconductor LM84 and TI THMC10.
This driver can also be built as a module. If so, the module
will be called adm1021.
@ -618,10 +617,10 @@ config SENSORS_LM90
depends on I2C
help
If you say yes here you get support for National Semiconductor LM90,
LM86, LM89 and LM99, Analog Devices ADM1032 and ADT7461, Maxim
MAX6646, MAX6647, MAX6648, MAX6649, MAX6657, MAX6658, MAX6659,
MAX6680, MAX6681, MAX6692, MAX6695, MAX6696, and Winbond/Nuvoton
W83L771W/G/AWG/ASG sensor chips.
LM86, LM89 and LM99, Analog Devices ADM1032, ADT7461, and ADT7461A,
Maxim MAX6646, MAX6647, MAX6648, MAX6649, MAX6657, MAX6658, MAX6659,
MAX6680, MAX6681, MAX6692, MAX6695, MAX6696, ON Semiconductor NCT1008,
and Winbond/Nuvoton W83L771W/G/AWG/ASG sensor chips.
This driver can also be built as a module. If so, the module
will be called lm90.

View File

@ -1094,6 +1094,7 @@ static struct attribute *lm85_attributes_minctl[] = {
&sensor_dev_attr_pwm1_auto_pwm_minctl.dev_attr.attr,
&sensor_dev_attr_pwm2_auto_pwm_minctl.dev_attr.attr,
&sensor_dev_attr_pwm3_auto_pwm_minctl.dev_attr.attr,
NULL
};
static const struct attribute_group lm85_group_minctl = {
@ -1104,6 +1105,7 @@ static struct attribute *lm85_attributes_temp_off[] = {
&sensor_dev_attr_temp1_auto_temp_off.dev_attr.attr,
&sensor_dev_attr_temp2_auto_temp_off.dev_attr.attr,
&sensor_dev_attr_temp3_auto_temp_off.dev_attr.attr,
NULL
};
static const struct attribute_group lm85_group_temp_off = {
@ -1329,11 +1331,11 @@ static int lm85_probe(struct i2c_client *client,
if (data->type != emc6d103s) {
err = sysfs_create_group(&client->dev.kobj, &lm85_group_minctl);
if (err)
goto err_kfree;
goto err_remove_files;
err = sysfs_create_group(&client->dev.kobj,
&lm85_group_temp_off);
if (err)
goto err_kfree;
goto err_remove_files;
}
/* The ADT7463/68 have an optional VRM 10 mode where pin 21 is used

View File

@ -49,10 +49,10 @@
* chips, but support three temperature sensors instead of two. MAX6695
* and MAX6696 only differ in the pinout so they can be treated identically.
*
* This driver also supports the ADT7461 chip from Analog Devices.
* It's supported in both compatibility and extended mode. It is mostly
* compatible with LM90 except for a data format difference for the
* temperature value registers.
* This driver also supports ADT7461 and ADT7461A from Analog Devices as well as
* NCT1008 from ON Semiconductor. The chips are supported in both compatibility
* and extended mode. They are mostly compatible with LM90 except for a data
* format difference for the temperature value registers.
*
* Since the LM90 was the first chipset supported by this driver, most
* comments will refer to this chipset, but are actually general and
@ -88,9 +88,10 @@
* Addresses to scan
* Address is fully defined internally and cannot be changed except for
* MAX6659, MAX6680 and MAX6681.
* LM86, LM89, LM90, LM99, ADM1032, ADM1032-1, ADT7461, MAX6649, MAX6657,
* MAX6658 and W83L771 have address 0x4c.
* ADM1032-2, ADT7461-2, LM89-1, LM99-1 and MAX6646 have address 0x4d.
* LM86, LM89, LM90, LM99, ADM1032, ADM1032-1, ADT7461, ADT7461A, MAX6649,
* MAX6657, MAX6658, NCT1008 and W83L771 have address 0x4c.
* ADM1032-2, ADT7461-2, ADT7461A-2, LM89-1, LM99-1, MAX6646, and NCT1008D
* have address 0x4d.
* MAX6647 has address 0x4e.
* MAX6659 can have address 0x4c, 0x4d or 0x4e.
* MAX6680 and MAX6681 can have address 0x18, 0x19, 0x1a, 0x29, 0x2a, 0x2b,
@ -174,6 +175,7 @@ enum chips { lm90, adm1032, lm99, lm86, max6657, max6659, adt7461, max6680,
static const struct i2c_device_id lm90_id[] = {
{ "adm1032", adm1032 },
{ "adt7461", adt7461 },
{ "adt7461a", adt7461 },
{ "lm90", lm90 },
{ "lm86", lm86 },
{ "lm89", lm86 },
@ -188,6 +190,7 @@ static const struct i2c_device_id lm90_id[] = {
{ "max6681", max6680 },
{ "max6695", max6696 },
{ "max6696", max6696 },
{ "nct1008", adt7461 },
{ "w83l771", w83l771 },
{ }
};
@ -1153,6 +1156,11 @@ static int lm90_detect(struct i2c_client *new_client,
&& (reg_config1 & 0x1B) == 0x00
&& reg_convrate <= 0x0A) {
name = "adt7461";
} else
if (chip_id == 0x57 /* ADT7461A, NCT1008 */
&& (reg_config1 & 0x1B) == 0x00
&& reg_convrate <= 0x0A) {
name = "adt7461a";
}
} else
if (man_id == 0x4D) { /* Maxim */

Some files were not shown because too many files have changed in this diff Show More