mirror of https://gitee.com/openkylin/linux.git
Merge branch 'linus' into x86/fixmap
This commit is contained in:
commit
8b7ef4ec5b
|
@ -84,10 +84,9 @@
|
|||
runs an instance of gdb against the vmlinux file which contains
|
||||
the symbols (not boot image such as bzImage, zImage, uImage...).
|
||||
In gdb the developer specifies the connection parameters and
|
||||
connects to kgdb. Depending on which kgdb I/O modules exist in
|
||||
the kernel for a given architecture, it may be possible to debug
|
||||
the test machine's kernel with the development machine using a
|
||||
rs232 or ethernet connection.
|
||||
connects to kgdb. The type of connection a developer makes with
|
||||
gdb depends on the availability of kgdb I/O modules compiled as
|
||||
builtin's or kernel modules in the test machine's kernel.
|
||||
</para>
|
||||
</chapter>
|
||||
<chapter id="CompilingAKernel">
|
||||
|
@ -223,7 +222,7 @@
|
|||
</para>
|
||||
<para>
|
||||
IMPORTANT NOTE: Using this option with kgdb over the console
|
||||
(kgdboc) or kgdb over ethernet (kgdboe) is not supported.
|
||||
(kgdboc) is not supported.
|
||||
</para>
|
||||
</sect1>
|
||||
</chapter>
|
||||
|
@ -249,18 +248,11 @@
|
|||
(gdb) target remote /dev/ttyS0
|
||||
</programlisting>
|
||||
<para>
|
||||
Example (kgdb to a terminal server):
|
||||
Example (kgdb to a terminal server on tcp port 2012):
|
||||
</para>
|
||||
<programlisting>
|
||||
% gdb ./vmlinux
|
||||
(gdb) target remote udp:192.168.2.2:6443
|
||||
</programlisting>
|
||||
<para>
|
||||
Example (kgdb over ethernet):
|
||||
</para>
|
||||
<programlisting>
|
||||
% gdb ./vmlinux
|
||||
(gdb) target remote udp:192.168.2.2:6443
|
||||
(gdb) target remote 192.168.2.2:2012
|
||||
</programlisting>
|
||||
<para>
|
||||
Once connected, you can debug a kernel the way you would debug an
|
||||
|
|
|
@ -542,7 +542,7 @@ otherwise initial value -1 that indicates the cpuset has no request.
|
|||
2 : search cores in a package.
|
||||
3 : search cpus in a node [= system wide on non-NUMA system]
|
||||
( 4 : search nodes in a chunk of node [on NUMA system] )
|
||||
( 5~ : search system wide [on NUMA system])
|
||||
( 5 : search system wide [on NUMA system] )
|
||||
|
||||
This file is per-cpuset and affect the sched domain where the cpuset
|
||||
belongs to. Therefore if the flag 'sched_load_balance' of a cpuset
|
||||
|
|
|
@ -2,17 +2,12 @@ Naming and data format standards for sysfs files
|
|||
------------------------------------------------
|
||||
|
||||
The libsensors library offers an interface to the raw sensors data
|
||||
through the sysfs interface. See libsensors documentation and source for
|
||||
further information. As of writing this document, libsensors
|
||||
(from lm_sensors 2.8.3) is heavily chip-dependent. Adding or updating
|
||||
support for any given chip requires modifying the library's code.
|
||||
This is because libsensors was written for the procfs interface
|
||||
older kernel modules were using, which wasn't standardized enough.
|
||||
Recent versions of libsensors (from lm_sensors 2.8.2 and later) have
|
||||
support for the sysfs interface, though.
|
||||
|
||||
The new sysfs interface was designed to be as chip-independent as
|
||||
possible.
|
||||
through the sysfs interface. Since lm-sensors 3.0.0, libsensors is
|
||||
completely chip-independent. It assumes that all the kernel drivers
|
||||
implement the standard sysfs interface described in this document.
|
||||
This makes adding or updating support for any given chip very easy, as
|
||||
libsensors, and applications using it, do not need to be modified.
|
||||
This is a major improvement compared to lm-sensors 2.
|
||||
|
||||
Note that motherboards vary widely in the connections to sensor chips.
|
||||
There is no standard that ensures, for example, that the second
|
||||
|
@ -35,19 +30,17 @@ access this data in a simple and consistent way. That said, such programs
|
|||
will have to implement conversion, labeling and hiding of inputs. For
|
||||
this reason, it is still not recommended to bypass the library.
|
||||
|
||||
If you are developing a userspace application please send us feedback on
|
||||
this standard.
|
||||
|
||||
Note that this standard isn't completely established yet, so it is subject
|
||||
to changes. If you are writing a new hardware monitoring driver those
|
||||
features can't seem to fit in this interface, please contact us with your
|
||||
extension proposal. Keep in mind that backward compatibility must be
|
||||
preserved.
|
||||
|
||||
Each chip gets its own directory in the sysfs /sys/devices tree. To
|
||||
find all sensor chips, it is easier to follow the device symlinks from
|
||||
/sys/class/hwmon/hwmon*.
|
||||
|
||||
Up to lm-sensors 3.0.0, libsensors looks for hardware monitoring attributes
|
||||
in the "physical" device directory. Since lm-sensors 3.0.1, attributes found
|
||||
in the hwmon "class" device directory are also supported. Complex drivers
|
||||
(e.g. drivers for multifunction chips) may want to use this possibility to
|
||||
avoid namespace pollution. The only drawback will be that older versions of
|
||||
libsensors won't support the driver in question.
|
||||
|
||||
All sysfs values are fixed point numbers.
|
||||
|
||||
There is only one value per file, unlike the older /proc specification.
|
||||
|
|
|
@ -4431,10 +4431,10 @@ M: johnpol@2ka.mipt.ru
|
|||
S: Maintained
|
||||
|
||||
W83791D HARDWARE MONITORING DRIVER
|
||||
P: Charles Spirakis
|
||||
M: bezaur@gmail.com
|
||||
P: Marc Hulsman
|
||||
M: m.hulsman@tudelft.nl
|
||||
L: lm-sensors@lm-sensors.org
|
||||
S: Odd Fixes
|
||||
S: Maintained
|
||||
|
||||
W83793 HARDWARE MONITORING DRIVER
|
||||
P: Rudolf Marek
|
||||
|
|
2
Makefile
2
Makefile
|
@ -1,7 +1,7 @@
|
|||
VERSION = 2
|
||||
PATCHLEVEL = 6
|
||||
SUBLEVEL = 26
|
||||
EXTRAVERSION = -rc6
|
||||
EXTRAVERSION = -rc8
|
||||
NAME = Rotary Wombat
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
|
|
@ -13,6 +13,7 @@ NM := $(NM) -B
|
|||
LDFLAGS_vmlinux := -static -N #-relax
|
||||
CHECKFLAGS += -D__alpha__ -m64
|
||||
cflags-y := -pipe -mno-fp-regs -ffixed-8 -msmall-data
|
||||
cflags-y += $(call cc-option, -fno-jump-tables)
|
||||
|
||||
cpuflags-$(CONFIG_ALPHA_EV4) := -mcpu=ev4
|
||||
cpuflags-$(CONFIG_ALPHA_EV5) := -mcpu=ev5
|
||||
|
|
|
@ -74,6 +74,8 @@
|
|||
# define DBG(args)
|
||||
#endif
|
||||
|
||||
DEFINE_SPINLOCK(t2_hae_lock);
|
||||
|
||||
static volatile unsigned int t2_mcheck_any_expected;
|
||||
static volatile unsigned int t2_mcheck_last_taken;
|
||||
|
||||
|
|
|
@ -71,6 +71,23 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82378, quirk_i
|
|||
static void __init
|
||||
quirk_cypress(struct pci_dev *dev)
|
||||
{
|
||||
/* The Notorious Cy82C693 chip. */
|
||||
|
||||
/* The generic legacy mode IDE fixup in drivers/pci/probe.c
|
||||
doesn't work correctly with the Cypress IDE controller as
|
||||
it has non-standard register layout. Fix that. */
|
||||
if (dev->class >> 8 == PCI_CLASS_STORAGE_IDE) {
|
||||
dev->resource[2].start = dev->resource[3].start = 0;
|
||||
dev->resource[2].end = dev->resource[3].end = 0;
|
||||
dev->resource[2].flags = dev->resource[3].flags = 0;
|
||||
if (PCI_FUNC(dev->devfn) == 2) {
|
||||
dev->resource[0].start = 0x170;
|
||||
dev->resource[0].end = 0x177;
|
||||
dev->resource[1].start = 0x376;
|
||||
dev->resource[1].end = 0x376;
|
||||
}
|
||||
}
|
||||
|
||||
/* The Cypress bridge responds on the PCI bus in the address range
|
||||
0xffff0000-0xffffffff (conventional x86 BIOS ROM). There is no
|
||||
way to turn this off. The bridge also supports several extended
|
||||
|
|
|
@ -447,7 +447,7 @@ struct unaligned_stat {
|
|||
|
||||
|
||||
/* Macro for exception fixup code to access integer registers. */
|
||||
#define una_reg(r) (regs->regs[(r) >= 16 && (r) <= 18 ? (r)+19 : (r)])
|
||||
#define una_reg(r) (_regs[(r) >= 16 && (r) <= 18 ? (r)+19 : (r)])
|
||||
|
||||
|
||||
asmlinkage void
|
||||
|
@ -456,6 +456,7 @@ do_entUna(void * va, unsigned long opcode, unsigned long reg,
|
|||
{
|
||||
long error, tmp1, tmp2, tmp3, tmp4;
|
||||
unsigned long pc = regs->pc - 4;
|
||||
unsigned long *_regs = regs->regs;
|
||||
const struct exception_table_entry *fixup;
|
||||
|
||||
unaligned[0].count++;
|
||||
|
|
|
@ -558,8 +558,6 @@ static struct iosapic_rte_info * __init_refok iosapic_alloc_rte (void)
|
|||
if (!iosapic_kmalloc_ok && list_empty(&free_rte_list)) {
|
||||
rte = alloc_bootmem(sizeof(struct iosapic_rte_info) *
|
||||
NR_PREALLOCATE_RTE_ENTRIES);
|
||||
if (!rte)
|
||||
return NULL;
|
||||
for (i = 0; i < NR_PREALLOCATE_RTE_ENTRIES; i++, rte++)
|
||||
list_add(&rte->rte_list, &free_rte_list);
|
||||
}
|
||||
|
|
|
@ -578,8 +578,6 @@ setup_arch (char **cmdline_p)
|
|||
cpu_init(); /* initialize the bootstrap CPU */
|
||||
mmu_context_init(); /* initialize context_id bitmap */
|
||||
|
||||
check_sal_cache_flush();
|
||||
|
||||
#ifdef CONFIG_ACPI
|
||||
acpi_boot_init();
|
||||
#endif
|
||||
|
@ -607,6 +605,7 @@ setup_arch (char **cmdline_p)
|
|||
ia64_mca_init();
|
||||
|
||||
platform_setup(cmdline_p);
|
||||
check_sal_cache_flush();
|
||||
paging_init();
|
||||
}
|
||||
|
||||
|
|
|
@ -512,6 +512,8 @@ static ssize_t sn2_ptc_proc_write(struct file *file, const char __user *user, si
|
|||
int cpu;
|
||||
char optstr[64];
|
||||
|
||||
if (count == 0 || count > sizeof(optstr))
|
||||
return -EINVAL;
|
||||
if (copy_from_user(optstr, user, count))
|
||||
return -EFAULT;
|
||||
optstr[count - 1] = '\0';
|
||||
|
|
|
@ -142,7 +142,7 @@ static void dump_one_vdso_page(struct page *pg, struct page *upg)
|
|||
printk("kpg: %p (c:%d,f:%08lx)", __va(page_to_pfn(pg) << PAGE_SHIFT),
|
||||
page_count(pg),
|
||||
pg->flags);
|
||||
if (upg/* && pg != upg*/) {
|
||||
if (upg && !IS_ERR(upg) /* && pg != upg*/) {
|
||||
printk(" upg: %p (c:%d,f:%08lx)", __va(page_to_pfn(upg)
|
||||
<< PAGE_SHIFT),
|
||||
page_count(upg),
|
||||
|
|
|
@ -383,6 +383,7 @@ config VMI
|
|||
config KVM_CLOCK
|
||||
bool "KVM paravirtualized clock"
|
||||
select PARAVIRT
|
||||
select PARAVIRT_CLOCK
|
||||
depends on !(X86_VISWS || X86_VOYAGER)
|
||||
help
|
||||
Turning on this option will allow you to run a paravirtualized clock
|
||||
|
@ -410,6 +411,10 @@ config PARAVIRT
|
|||
over full virtualization. However, when run without a hypervisor
|
||||
the kernel is theoretically slower and slightly larger.
|
||||
|
||||
config PARAVIRT_CLOCK
|
||||
bool
|
||||
default n
|
||||
|
||||
endif
|
||||
|
||||
config MEMTEST_BOOTPARAM
|
||||
|
|
|
@ -82,6 +82,7 @@ obj-$(CONFIG_VMI) += vmi_32.o vmiclock_32.o
|
|||
obj-$(CONFIG_KVM_GUEST) += kvm.o
|
||||
obj-$(CONFIG_KVM_CLOCK) += kvmclock.o
|
||||
obj-$(CONFIG_PARAVIRT) += paravirt.o paravirt_patch_$(BITS).o
|
||||
obj-$(CONFIG_PARAVIRT_CLOCK) += pvclock.o
|
||||
|
||||
obj-$(CONFIG_PCSPKR_PLATFORM) += pcspeaker.o
|
||||
|
||||
|
|
|
@ -166,6 +166,8 @@ int geode_has_vsa2(void)
|
|||
static int has_vsa2 = -1;
|
||||
|
||||
if (has_vsa2 == -1) {
|
||||
u16 val;
|
||||
|
||||
/*
|
||||
* The VSA has virtual registers that we can query for a
|
||||
* signature.
|
||||
|
@ -173,7 +175,8 @@ int geode_has_vsa2(void)
|
|||
outw(VSA_VR_UNLOCK, VSA_VRC_INDEX);
|
||||
outw(VSA_VR_SIGNATURE, VSA_VRC_INDEX);
|
||||
|
||||
has_vsa2 = (inw(VSA_VRC_DATA) == VSA_SIG);
|
||||
val = inw(VSA_VRC_DATA);
|
||||
has_vsa2 = (val == AMD_VSA_SIG || val == GSW_VSA_SIG);
|
||||
}
|
||||
|
||||
return has_vsa2;
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
|
||||
#include <linux/clocksource.h>
|
||||
#include <linux/kvm_para.h>
|
||||
#include <asm/pvclock.h>
|
||||
#include <asm/arch_hooks.h>
|
||||
#include <asm/msr.h>
|
||||
#include <asm/apic.h>
|
||||
|
@ -36,18 +37,9 @@ static int parse_no_kvmclock(char *arg)
|
|||
early_param("no-kvmclock", parse_no_kvmclock);
|
||||
|
||||
/* The hypervisor will put information about time periodically here */
|
||||
static DEFINE_PER_CPU_SHARED_ALIGNED(struct kvm_vcpu_time_info, hv_clock);
|
||||
#define get_clock(cpu, field) per_cpu(hv_clock, cpu).field
|
||||
static DEFINE_PER_CPU_SHARED_ALIGNED(struct pvclock_vcpu_time_info, hv_clock);
|
||||
static struct pvclock_wall_clock wall_clock;
|
||||
|
||||
static inline u64 kvm_get_delta(u64 last_tsc)
|
||||
{
|
||||
int cpu = smp_processor_id();
|
||||
u64 delta = native_read_tsc() - last_tsc;
|
||||
return (delta * get_clock(cpu, tsc_to_system_mul)) >> KVM_SCALE;
|
||||
}
|
||||
|
||||
static struct kvm_wall_clock wall_clock;
|
||||
static cycle_t kvm_clock_read(void);
|
||||
/*
|
||||
* The wallclock is the time of day when we booted. Since then, some time may
|
||||
* have elapsed since the hypervisor wrote the data. So we try to account for
|
||||
|
@ -55,64 +47,37 @@ static cycle_t kvm_clock_read(void);
|
|||
*/
|
||||
static unsigned long kvm_get_wallclock(void)
|
||||
{
|
||||
u32 wc_sec, wc_nsec;
|
||||
u64 delta;
|
||||
struct pvclock_vcpu_time_info *vcpu_time;
|
||||
struct timespec ts;
|
||||
int version, nsec;
|
||||
int low, high;
|
||||
|
||||
low = (int)__pa(&wall_clock);
|
||||
high = ((u64)__pa(&wall_clock) >> 32);
|
||||
|
||||
delta = kvm_clock_read();
|
||||
|
||||
native_write_msr(MSR_KVM_WALL_CLOCK, low, high);
|
||||
do {
|
||||
version = wall_clock.wc_version;
|
||||
rmb();
|
||||
wc_sec = wall_clock.wc_sec;
|
||||
wc_nsec = wall_clock.wc_nsec;
|
||||
rmb();
|
||||
} while ((wall_clock.wc_version != version) || (version & 1));
|
||||
|
||||
delta = kvm_clock_read() - delta;
|
||||
delta += wc_nsec;
|
||||
nsec = do_div(delta, NSEC_PER_SEC);
|
||||
set_normalized_timespec(&ts, wc_sec + delta, nsec);
|
||||
/*
|
||||
* Of all mechanisms of time adjustment I've tested, this one
|
||||
* was the champion!
|
||||
*/
|
||||
return ts.tv_sec + 1;
|
||||
vcpu_time = &get_cpu_var(hv_clock);
|
||||
pvclock_read_wallclock(&wall_clock, vcpu_time, &ts);
|
||||
put_cpu_var(hv_clock);
|
||||
|
||||
return ts.tv_sec;
|
||||
}
|
||||
|
||||
static int kvm_set_wallclock(unsigned long now)
|
||||
{
|
||||
return 0;
|
||||
return -1;
|
||||
}
|
||||
|
||||
/*
|
||||
* This is our read_clock function. The host puts an tsc timestamp each time
|
||||
* it updates a new time. Without the tsc adjustment, we can have a situation
|
||||
* in which a vcpu starts to run earlier (smaller system_time), but probes
|
||||
* time later (compared to another vcpu), leading to backwards time
|
||||
*/
|
||||
static cycle_t kvm_clock_read(void)
|
||||
{
|
||||
u64 last_tsc, now;
|
||||
int cpu;
|
||||
struct pvclock_vcpu_time_info *src;
|
||||
cycle_t ret;
|
||||
|
||||
preempt_disable();
|
||||
cpu = smp_processor_id();
|
||||
|
||||
last_tsc = get_clock(cpu, tsc_timestamp);
|
||||
now = get_clock(cpu, system_time);
|
||||
|
||||
now += kvm_get_delta(last_tsc);
|
||||
preempt_enable();
|
||||
|
||||
return now;
|
||||
src = &get_cpu_var(hv_clock);
|
||||
ret = pvclock_clocksource_read(src);
|
||||
put_cpu_var(hv_clock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct clocksource kvm_clock = {
|
||||
.name = "kvm-clock",
|
||||
.read = kvm_clock_read,
|
||||
|
@ -123,13 +88,14 @@ static struct clocksource kvm_clock = {
|
|||
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
|
||||
};
|
||||
|
||||
static int kvm_register_clock(void)
|
||||
static int kvm_register_clock(char *txt)
|
||||
{
|
||||
int cpu = smp_processor_id();
|
||||
int low, high;
|
||||
low = (int)__pa(&per_cpu(hv_clock, cpu)) | 1;
|
||||
high = ((u64)__pa(&per_cpu(hv_clock, cpu)) >> 32);
|
||||
|
||||
printk(KERN_INFO "kvm-clock: cpu %d, msr %x:%x, %s\n",
|
||||
cpu, high, low, txt);
|
||||
return native_write_msr_safe(MSR_KVM_SYSTEM_TIME, low, high);
|
||||
}
|
||||
|
||||
|
@ -140,12 +106,20 @@ static void kvm_setup_secondary_clock(void)
|
|||
* Now that the first cpu already had this clocksource initialized,
|
||||
* we shouldn't fail.
|
||||
*/
|
||||
WARN_ON(kvm_register_clock());
|
||||
WARN_ON(kvm_register_clock("secondary cpu clock"));
|
||||
/* ok, done with our trickery, call native */
|
||||
setup_secondary_APIC_clock();
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
void __init kvm_smp_prepare_boot_cpu(void)
|
||||
{
|
||||
WARN_ON(kvm_register_clock("primary cpu clock"));
|
||||
native_smp_prepare_boot_cpu();
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* After the clock is registered, the host will keep writing to the
|
||||
* registered memory location. If the guest happens to shutdown, this memory
|
||||
|
@ -174,13 +148,16 @@ void __init kvmclock_init(void)
|
|||
return;
|
||||
|
||||
if (kvmclock && kvm_para_has_feature(KVM_FEATURE_CLOCKSOURCE)) {
|
||||
if (kvm_register_clock())
|
||||
if (kvm_register_clock("boot clock"))
|
||||
return;
|
||||
pv_time_ops.get_wallclock = kvm_get_wallclock;
|
||||
pv_time_ops.set_wallclock = kvm_set_wallclock;
|
||||
pv_time_ops.sched_clock = kvm_clock_read;
|
||||
#ifdef CONFIG_X86_LOCAL_APIC
|
||||
pv_apic_ops.setup_secondary_clock = kvm_setup_secondary_clock;
|
||||
#endif
|
||||
#ifdef CONFIG_SMP
|
||||
smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu;
|
||||
#endif
|
||||
machine_ops.shutdown = kvm_shutdown;
|
||||
#ifdef CONFIG_KEXEC
|
||||
|
|
|
@ -333,6 +333,7 @@ void flush_thread(void)
|
|||
/*
|
||||
* Forget coprocessor state..
|
||||
*/
|
||||
tsk->fpu_counter = 0;
|
||||
clear_fpu(tsk);
|
||||
clear_used_math();
|
||||
}
|
||||
|
|
|
@ -294,6 +294,7 @@ void flush_thread(void)
|
|||
/*
|
||||
* Forget coprocessor state..
|
||||
*/
|
||||
tsk->fpu_counter = 0;
|
||||
clear_fpu(tsk);
|
||||
clear_used_math();
|
||||
}
|
||||
|
|
|
@ -0,0 +1,141 @@
|
|||
/* paravirtual clock -- common code used by kvm/xen
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program; if not, write to the Free Software
|
||||
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/percpu.h>
|
||||
#include <asm/pvclock.h>
|
||||
|
||||
/*
|
||||
* These are perodically updated
|
||||
* xen: magic shared_info page
|
||||
* kvm: gpa registered via msr
|
||||
* and then copied here.
|
||||
*/
|
||||
struct pvclock_shadow_time {
|
||||
u64 tsc_timestamp; /* TSC at last update of time vals. */
|
||||
u64 system_timestamp; /* Time, in nanosecs, since boot. */
|
||||
u32 tsc_to_nsec_mul;
|
||||
int tsc_shift;
|
||||
u32 version;
|
||||
};
|
||||
|
||||
/*
|
||||
* Scale a 64-bit delta by scaling and multiplying by a 32-bit fraction,
|
||||
* yielding a 64-bit result.
|
||||
*/
|
||||
static inline u64 scale_delta(u64 delta, u32 mul_frac, int shift)
|
||||
{
|
||||
u64 product;
|
||||
#ifdef __i386__
|
||||
u32 tmp1, tmp2;
|
||||
#endif
|
||||
|
||||
if (shift < 0)
|
||||
delta >>= -shift;
|
||||
else
|
||||
delta <<= shift;
|
||||
|
||||
#ifdef __i386__
|
||||
__asm__ (
|
||||
"mul %5 ; "
|
||||
"mov %4,%%eax ; "
|
||||
"mov %%edx,%4 ; "
|
||||
"mul %5 ; "
|
||||
"xor %5,%5 ; "
|
||||
"add %4,%%eax ; "
|
||||
"adc %5,%%edx ; "
|
||||
: "=A" (product), "=r" (tmp1), "=r" (tmp2)
|
||||
: "a" ((u32)delta), "1" ((u32)(delta >> 32)), "2" (mul_frac) );
|
||||
#elif __x86_64__
|
||||
__asm__ (
|
||||
"mul %%rdx ; shrd $32,%%rdx,%%rax"
|
||||
: "=a" (product) : "0" (delta), "d" ((u64)mul_frac) );
|
||||
#else
|
||||
#error implement me!
|
||||
#endif
|
||||
|
||||
return product;
|
||||
}
|
||||
|
||||
static u64 pvclock_get_nsec_offset(struct pvclock_shadow_time *shadow)
|
||||
{
|
||||
u64 delta = native_read_tsc() - shadow->tsc_timestamp;
|
||||
return scale_delta(delta, shadow->tsc_to_nsec_mul, shadow->tsc_shift);
|
||||
}
|
||||
|
||||
/*
|
||||
* Reads a consistent set of time-base values from hypervisor,
|
||||
* into a shadow data area.
|
||||
*/
|
||||
static unsigned pvclock_get_time_values(struct pvclock_shadow_time *dst,
|
||||
struct pvclock_vcpu_time_info *src)
|
||||
{
|
||||
do {
|
||||
dst->version = src->version;
|
||||
rmb(); /* fetch version before data */
|
||||
dst->tsc_timestamp = src->tsc_timestamp;
|
||||
dst->system_timestamp = src->system_time;
|
||||
dst->tsc_to_nsec_mul = src->tsc_to_system_mul;
|
||||
dst->tsc_shift = src->tsc_shift;
|
||||
rmb(); /* test version after fetching data */
|
||||
} while ((src->version & 1) || (dst->version != src->version));
|
||||
|
||||
return dst->version;
|
||||
}
|
||||
|
||||
cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
|
||||
{
|
||||
struct pvclock_shadow_time shadow;
|
||||
unsigned version;
|
||||
cycle_t ret, offset;
|
||||
|
||||
do {
|
||||
version = pvclock_get_time_values(&shadow, src);
|
||||
barrier();
|
||||
offset = pvclock_get_nsec_offset(&shadow);
|
||||
ret = shadow.system_timestamp + offset;
|
||||
barrier();
|
||||
} while (version != src->version);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void pvclock_read_wallclock(struct pvclock_wall_clock *wall_clock,
|
||||
struct pvclock_vcpu_time_info *vcpu_time,
|
||||
struct timespec *ts)
|
||||
{
|
||||
u32 version;
|
||||
u64 delta;
|
||||
struct timespec now;
|
||||
|
||||
/* get wallclock at system boot */
|
||||
do {
|
||||
version = wall_clock->version;
|
||||
rmb(); /* fetch version before time */
|
||||
now.tv_sec = wall_clock->sec;
|
||||
now.tv_nsec = wall_clock->nsec;
|
||||
rmb(); /* fetch time before checking version */
|
||||
} while ((wall_clock->version & 1) || (version != wall_clock->version));
|
||||
|
||||
delta = pvclock_clocksource_read(vcpu_time); /* time since system boot */
|
||||
delta += now.tv_sec * (u64)NSEC_PER_SEC + now.tv_nsec;
|
||||
|
||||
now.tv_nsec = do_div(delta, NSEC_PER_SEC);
|
||||
now.tv_sec = delta;
|
||||
|
||||
set_normalized_timespec(ts, now.tv_sec, now.tv_nsec);
|
||||
}
|
|
@ -532,10 +532,16 @@ static void __init reserve_crashkernel(void)
|
|||
(unsigned long)(crash_size >> 20),
|
||||
(unsigned long)(crash_base >> 20),
|
||||
(unsigned long)(total_mem >> 20));
|
||||
|
||||
if (reserve_bootmem(crash_base, crash_size,
|
||||
BOOTMEM_EXCLUSIVE) < 0) {
|
||||
printk(KERN_INFO "crashkernel reservation "
|
||||
"failed - memory is in use\n");
|
||||
return;
|
||||
}
|
||||
|
||||
crashk_res.start = crash_base;
|
||||
crashk_res.end = crash_base + crash_size - 1;
|
||||
reserve_bootmem(crash_base, crash_size,
|
||||
BOOTMEM_DEFAULT);
|
||||
} else
|
||||
printk(KERN_INFO "crashkernel reservation failed - "
|
||||
"you have to specify a base address\n");
|
||||
|
|
|
@ -14,7 +14,10 @@
|
|||
|
||||
#include "mach_timer.h"
|
||||
|
||||
static int tsc_disabled;
|
||||
/* native_sched_clock() is called before tsc_init(), so
|
||||
we must start with the TSC soft disabled to prevent
|
||||
erroneous rdtsc usage on !cpu_has_tsc processors */
|
||||
static int tsc_disabled = -1;
|
||||
|
||||
/*
|
||||
* On some systems the TSC frequency does not
|
||||
|
@ -402,25 +405,20 @@ void __init tsc_init(void)
|
|||
{
|
||||
int cpu;
|
||||
|
||||
if (!cpu_has_tsc || tsc_disabled) {
|
||||
/* Disable the TSC in case of !cpu_has_tsc */
|
||||
tsc_disabled = 1;
|
||||
if (!cpu_has_tsc || tsc_disabled > 0)
|
||||
return;
|
||||
}
|
||||
|
||||
cpu_khz = calculate_cpu_khz();
|
||||
tsc_khz = cpu_khz;
|
||||
|
||||
if (!cpu_khz) {
|
||||
mark_tsc_unstable("could not calculate TSC khz");
|
||||
/*
|
||||
* We need to disable the TSC completely in this case
|
||||
* to prevent sched_clock() from using it.
|
||||
*/
|
||||
tsc_disabled = 1;
|
||||
return;
|
||||
}
|
||||
|
||||
/* now allow native_sched_clock() to use rdtsc */
|
||||
tsc_disabled = 0;
|
||||
|
||||
printk("Detected %lu.%03lu MHz processor.\n",
|
||||
(unsigned long)cpu_khz / 1000,
|
||||
(unsigned long)cpu_khz % 1000);
|
||||
|
|
|
@ -200,9 +200,12 @@ int __pit_timer_fn(struct kvm_kpit_state *ps)
|
|||
|
||||
atomic_inc(&pt->pending);
|
||||
smp_mb__after_atomic_inc();
|
||||
if (vcpu0 && waitqueue_active(&vcpu0->wq)) {
|
||||
vcpu0->arch.mp_state = KVM_MP_STATE_RUNNABLE;
|
||||
wake_up_interruptible(&vcpu0->wq);
|
||||
if (vcpu0) {
|
||||
set_bit(KVM_REQ_PENDING_TIMER, &vcpu0->requests);
|
||||
if (waitqueue_active(&vcpu0->wq)) {
|
||||
vcpu0->arch.mp_state = KVM_MP_STATE_RUNNABLE;
|
||||
wake_up_interruptible(&vcpu0->wq);
|
||||
}
|
||||
}
|
||||
|
||||
pt->timer.expires = ktime_add_ns(pt->timer.expires, pt->period);
|
||||
|
|
|
@ -940,6 +940,7 @@ static int __apic_timer_fn(struct kvm_lapic *apic)
|
|||
wait_queue_head_t *q = &apic->vcpu->wq;
|
||||
|
||||
atomic_inc(&apic->timer.pending);
|
||||
set_bit(KVM_REQ_PENDING_TIMER, &apic->vcpu->requests);
|
||||
if (waitqueue_active(q)) {
|
||||
apic->vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
|
||||
wake_up_interruptible(q);
|
||||
|
|
|
@ -640,6 +640,7 @@ static void rmap_write_protect(struct kvm *kvm, u64 gfn)
|
|||
rmap_remove(kvm, spte);
|
||||
--kvm->stat.lpages;
|
||||
set_shadow_pte(spte, shadow_trap_nonpresent_pte);
|
||||
spte = NULL;
|
||||
write_protected = 1;
|
||||
}
|
||||
spte = rmap_next(kvm, rmapp, spte);
|
||||
|
@ -1082,10 +1083,6 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
|
|||
struct kvm_mmu_page *shadow;
|
||||
|
||||
spte |= PT_WRITABLE_MASK;
|
||||
if (user_fault) {
|
||||
mmu_unshadow(vcpu->kvm, gfn);
|
||||
goto unshadowed;
|
||||
}
|
||||
|
||||
shadow = kvm_mmu_lookup_page(vcpu->kvm, gfn);
|
||||
if (shadow ||
|
||||
|
@ -1102,8 +1099,6 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
|
|||
}
|
||||
}
|
||||
|
||||
unshadowed:
|
||||
|
||||
if (pte_access & ACC_WRITE_MASK)
|
||||
mark_page_dirty(vcpu->kvm, gfn);
|
||||
|
||||
|
@ -1580,11 +1575,13 @@ static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
|
|||
u64 *spte,
|
||||
const void *new)
|
||||
{
|
||||
if ((sp->role.level != PT_PAGE_TABLE_LEVEL)
|
||||
&& !vcpu->arch.update_pte.largepage) {
|
||||
++vcpu->kvm->stat.mmu_pde_zapped;
|
||||
return;
|
||||
}
|
||||
if (sp->role.level != PT_PAGE_TABLE_LEVEL) {
|
||||
if (!vcpu->arch.update_pte.largepage ||
|
||||
sp->role.glevels == PT32_ROOT_LEVEL) {
|
||||
++vcpu->kvm->stat.mmu_pde_zapped;
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
++vcpu->kvm->stat.mmu_pte_updated;
|
||||
if (sp->role.glevels == PT32_ROOT_LEVEL)
|
||||
|
|
|
@ -566,7 +566,7 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu)
|
|||
load_transition_efer(vmx);
|
||||
}
|
||||
|
||||
static void vmx_load_host_state(struct vcpu_vmx *vmx)
|
||||
static void __vmx_load_host_state(struct vcpu_vmx *vmx)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
|
@ -596,6 +596,13 @@ static void vmx_load_host_state(struct vcpu_vmx *vmx)
|
|||
reload_host_efer(vmx);
|
||||
}
|
||||
|
||||
static void vmx_load_host_state(struct vcpu_vmx *vmx)
|
||||
{
|
||||
preempt_disable();
|
||||
__vmx_load_host_state(vmx);
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
/*
|
||||
* Switches to specified vcpu, until a matching vcpu_put(), but assumes
|
||||
* vcpu mutex is already taken.
|
||||
|
@ -654,7 +661,7 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
|
|||
|
||||
static void vmx_vcpu_put(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
vmx_load_host_state(to_vmx(vcpu));
|
||||
__vmx_load_host_state(to_vmx(vcpu));
|
||||
}
|
||||
|
||||
static void vmx_fpu_activate(struct kvm_vcpu *vcpu)
|
||||
|
@ -884,11 +891,8 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
|
|||
switch (msr_index) {
|
||||
#ifdef CONFIG_X86_64
|
||||
case MSR_EFER:
|
||||
vmx_load_host_state(vmx);
|
||||
ret = kvm_set_msr_common(vcpu, msr_index, data);
|
||||
if (vmx->host_state.loaded) {
|
||||
reload_host_efer(vmx);
|
||||
load_transition_efer(vmx);
|
||||
}
|
||||
break;
|
||||
case MSR_FS_BASE:
|
||||
vmcs_writel(GUEST_FS_BASE, data);
|
||||
|
@ -910,11 +914,10 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
|
|||
guest_write_tsc(data);
|
||||
break;
|
||||
default:
|
||||
vmx_load_host_state(vmx);
|
||||
msr = find_msr_entry(vmx, msr_index);
|
||||
if (msr) {
|
||||
msr->data = data;
|
||||
if (vmx->host_state.loaded)
|
||||
load_msrs(vmx->guest_msrs, vmx->save_nmsrs);
|
||||
break;
|
||||
}
|
||||
ret = kvm_set_msr_common(vcpu, msr_index, data);
|
||||
|
|
|
@ -492,8 +492,8 @@ static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
|
|||
static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
|
||||
{
|
||||
static int version;
|
||||
struct kvm_wall_clock wc;
|
||||
struct timespec wc_ts;
|
||||
struct pvclock_wall_clock wc;
|
||||
struct timespec now, sys, boot;
|
||||
|
||||
if (!wall_clock)
|
||||
return;
|
||||
|
@ -502,10 +502,19 @@ static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
|
|||
|
||||
kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
|
||||
|
||||
wc_ts = current_kernel_time();
|
||||
wc.wc_sec = wc_ts.tv_sec;
|
||||
wc.wc_nsec = wc_ts.tv_nsec;
|
||||
wc.wc_version = version;
|
||||
/*
|
||||
* The guest calculates current wall clock time by adding
|
||||
* system time (updated by kvm_write_guest_time below) to the
|
||||
* wall clock specified here. guest system time equals host
|
||||
* system time for us, thus we must fill in host boot time here.
|
||||
*/
|
||||
now = current_kernel_time();
|
||||
ktime_get_ts(&sys);
|
||||
boot = ns_to_timespec(timespec_to_ns(&now) - timespec_to_ns(&sys));
|
||||
|
||||
wc.sec = boot.tv_sec;
|
||||
wc.nsec = boot.tv_nsec;
|
||||
wc.version = version;
|
||||
|
||||
kvm_write_guest(kvm, wall_clock, &wc, sizeof(wc));
|
||||
|
||||
|
@ -513,6 +522,45 @@ static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
|
|||
kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
|
||||
}
|
||||
|
||||
static uint32_t div_frac(uint32_t dividend, uint32_t divisor)
|
||||
{
|
||||
uint32_t quotient, remainder;
|
||||
|
||||
/* Don't try to replace with do_div(), this one calculates
|
||||
* "(dividend << 32) / divisor" */
|
||||
__asm__ ( "divl %4"
|
||||
: "=a" (quotient), "=d" (remainder)
|
||||
: "0" (0), "1" (dividend), "r" (divisor) );
|
||||
return quotient;
|
||||
}
|
||||
|
||||
static void kvm_set_time_scale(uint32_t tsc_khz, struct pvclock_vcpu_time_info *hv_clock)
|
||||
{
|
||||
uint64_t nsecs = 1000000000LL;
|
||||
int32_t shift = 0;
|
||||
uint64_t tps64;
|
||||
uint32_t tps32;
|
||||
|
||||
tps64 = tsc_khz * 1000LL;
|
||||
while (tps64 > nsecs*2) {
|
||||
tps64 >>= 1;
|
||||
shift--;
|
||||
}
|
||||
|
||||
tps32 = (uint32_t)tps64;
|
||||
while (tps32 <= (uint32_t)nsecs) {
|
||||
tps32 <<= 1;
|
||||
shift++;
|
||||
}
|
||||
|
||||
hv_clock->tsc_shift = shift;
|
||||
hv_clock->tsc_to_system_mul = div_frac(nsecs, tps32);
|
||||
|
||||
pr_debug("%s: tsc_khz %u, tsc_shift %d, tsc_mul %u\n",
|
||||
__FUNCTION__, tsc_khz, hv_clock->tsc_shift,
|
||||
hv_clock->tsc_to_system_mul);
|
||||
}
|
||||
|
||||
static void kvm_write_guest_time(struct kvm_vcpu *v)
|
||||
{
|
||||
struct timespec ts;
|
||||
|
@ -523,6 +571,11 @@ static void kvm_write_guest_time(struct kvm_vcpu *v)
|
|||
if ((!vcpu->time_page))
|
||||
return;
|
||||
|
||||
if (unlikely(vcpu->hv_clock_tsc_khz != tsc_khz)) {
|
||||
kvm_set_time_scale(tsc_khz, &vcpu->hv_clock);
|
||||
vcpu->hv_clock_tsc_khz = tsc_khz;
|
||||
}
|
||||
|
||||
/* Keep irq disabled to prevent changes to the clock */
|
||||
local_irq_save(flags);
|
||||
kvm_get_msr(v, MSR_IA32_TIME_STAMP_COUNTER,
|
||||
|
@ -537,14 +590,14 @@ static void kvm_write_guest_time(struct kvm_vcpu *v)
|
|||
/*
|
||||
* The interface expects us to write an even number signaling that the
|
||||
* update is finished. Since the guest won't see the intermediate
|
||||
* state, we just write "2" at the end
|
||||
* state, we just increase by 2 at the end.
|
||||
*/
|
||||
vcpu->hv_clock.version = 2;
|
||||
vcpu->hv_clock.version += 2;
|
||||
|
||||
shared_kaddr = kmap_atomic(vcpu->time_page, KM_USER0);
|
||||
|
||||
memcpy(shared_kaddr + vcpu->time_offset, &vcpu->hv_clock,
|
||||
sizeof(vcpu->hv_clock));
|
||||
sizeof(vcpu->hv_clock));
|
||||
|
||||
kunmap_atomic(shared_kaddr, KM_USER0);
|
||||
|
||||
|
@ -599,10 +652,6 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
|
|||
/* ...but clean it before doing the actual write */
|
||||
vcpu->arch.time_offset = data & ~(PAGE_MASK | 1);
|
||||
|
||||
vcpu->arch.hv_clock.tsc_to_system_mul =
|
||||
clocksource_khz2mult(tsc_khz, 22);
|
||||
vcpu->arch.hv_clock.tsc_shift = 22;
|
||||
|
||||
down_read(¤t->mm->mmap_sem);
|
||||
vcpu->arch.time_page =
|
||||
gfn_to_page(vcpu->kvm, data >> PAGE_SHIFT);
|
||||
|
@ -2759,6 +2808,8 @@ static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
|
|||
if (vcpu->requests) {
|
||||
if (test_and_clear_bit(KVM_REQ_MIGRATE_TIMER, &vcpu->requests))
|
||||
__kvm_migrate_timers(vcpu);
|
||||
if (test_and_clear_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests))
|
||||
kvm_x86_ops->tlb_flush(vcpu);
|
||||
if (test_and_clear_bit(KVM_REQ_REPORT_TPR_ACCESS,
|
||||
&vcpu->requests)) {
|
||||
kvm_run->exit_reason = KVM_EXIT_TPR_ACCESS;
|
||||
|
@ -2772,6 +2823,7 @@ static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
|
|||
}
|
||||
}
|
||||
|
||||
clear_bit(KVM_REQ_PENDING_TIMER, &vcpu->requests);
|
||||
kvm_inject_pending_timer_irqs(vcpu);
|
||||
|
||||
preempt_disable();
|
||||
|
@ -2781,21 +2833,13 @@ static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
|
|||
|
||||
local_irq_disable();
|
||||
|
||||
if (need_resched()) {
|
||||
if (vcpu->requests || need_resched()) {
|
||||
local_irq_enable();
|
||||
preempt_enable();
|
||||
r = 1;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (vcpu->requests)
|
||||
if (test_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests)) {
|
||||
local_irq_enable();
|
||||
preempt_enable();
|
||||
r = 1;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (signal_pending(current)) {
|
||||
local_irq_enable();
|
||||
preempt_enable();
|
||||
|
@ -2825,9 +2869,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
|
|||
|
||||
kvm_guest_enter();
|
||||
|
||||
if (vcpu->requests)
|
||||
if (test_and_clear_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests))
|
||||
kvm_x86_ops->tlb_flush(vcpu);
|
||||
|
||||
KVMTRACE_0D(VMENTRY, vcpu, entryexit);
|
||||
kvm_x86_ops->run(vcpu, kvm_run);
|
||||
|
|
|
@ -5,8 +5,9 @@
|
|||
config XEN
|
||||
bool "Xen guest support"
|
||||
select PARAVIRT
|
||||
select PARAVIRT_CLOCK
|
||||
depends on X86_32
|
||||
depends on X86_CMPXCHG && X86_TSC && !(X86_VISWS || X86_VOYAGER)
|
||||
depends on X86_CMPXCHG && X86_TSC && X86_PAE && !(X86_VISWS || X86_VOYAGER)
|
||||
help
|
||||
This is the Linux Xen port. Enabling this will allow the
|
||||
kernel to boot in a paravirtualized environment under the
|
||||
|
|
|
@ -785,38 +785,35 @@ static __init void xen_set_pte_init(pte_t *ptep, pte_t pte)
|
|||
static __init void xen_pagetable_setup_start(pgd_t *base)
|
||||
{
|
||||
pgd_t *xen_pgd = (pgd_t *)xen_start_info->pt_base;
|
||||
int i;
|
||||
|
||||
/* special set_pte for pagetable initialization */
|
||||
pv_mmu_ops.set_pte = xen_set_pte_init;
|
||||
|
||||
init_mm.pgd = base;
|
||||
/*
|
||||
* copy top-level of Xen-supplied pagetable into place. For
|
||||
* !PAE we can use this as-is, but for PAE it is a stand-in
|
||||
* while we copy the pmd pages.
|
||||
* copy top-level of Xen-supplied pagetable into place. This
|
||||
* is a stand-in while we copy the pmd pages.
|
||||
*/
|
||||
memcpy(base, xen_pgd, PTRS_PER_PGD * sizeof(pgd_t));
|
||||
|
||||
if (PTRS_PER_PMD > 1) {
|
||||
int i;
|
||||
/*
|
||||
* For PAE, need to allocate new pmds, rather than
|
||||
* share Xen's, since Xen doesn't like pmd's being
|
||||
* shared between address spaces.
|
||||
*/
|
||||
for (i = 0; i < PTRS_PER_PGD; i++) {
|
||||
if (pgd_val_ma(xen_pgd[i]) & _PAGE_PRESENT) {
|
||||
pmd_t *pmd = (pmd_t *)alloc_bootmem_low_pages(PAGE_SIZE);
|
||||
/*
|
||||
* For PAE, need to allocate new pmds, rather than
|
||||
* share Xen's, since Xen doesn't like pmd's being
|
||||
* shared between address spaces.
|
||||
*/
|
||||
for (i = 0; i < PTRS_PER_PGD; i++) {
|
||||
if (pgd_val_ma(xen_pgd[i]) & _PAGE_PRESENT) {
|
||||
pmd_t *pmd = (pmd_t *)alloc_bootmem_low_pages(PAGE_SIZE);
|
||||
|
||||
memcpy(pmd, (void *)pgd_page_vaddr(xen_pgd[i]),
|
||||
PAGE_SIZE);
|
||||
memcpy(pmd, (void *)pgd_page_vaddr(xen_pgd[i]),
|
||||
PAGE_SIZE);
|
||||
|
||||
make_lowmem_page_readonly(pmd);
|
||||
make_lowmem_page_readonly(pmd);
|
||||
|
||||
set_pgd(&base[i], __pgd(1 + __pa(pmd)));
|
||||
} else
|
||||
pgd_clear(&base[i]);
|
||||
}
|
||||
set_pgd(&base[i], __pgd(1 + __pa(pmd)));
|
||||
} else
|
||||
pgd_clear(&base[i]);
|
||||
}
|
||||
|
||||
/* make sure zero_page is mapped RO so we can use it in pagetables */
|
||||
|
@ -873,17 +870,7 @@ static __init void xen_pagetable_setup_done(pgd_t *base)
|
|||
|
||||
/* Actually pin the pagetable down, but we can't set PG_pinned
|
||||
yet because the page structures don't exist yet. */
|
||||
{
|
||||
unsigned level;
|
||||
|
||||
#ifdef CONFIG_X86_PAE
|
||||
level = MMUEXT_PIN_L3_TABLE;
|
||||
#else
|
||||
level = MMUEXT_PIN_L2_TABLE;
|
||||
#endif
|
||||
|
||||
pin_pagetable_pfn(level, PFN_DOWN(__pa(base)));
|
||||
}
|
||||
pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(base)));
|
||||
}
|
||||
|
||||
/* This is called once we have the cpu_possible_map */
|
||||
|
@ -1120,7 +1107,6 @@ static const struct pv_mmu_ops xen_mmu_ops __initdata = {
|
|||
.make_pte = xen_make_pte,
|
||||
.make_pgd = xen_make_pgd,
|
||||
|
||||
#ifdef CONFIG_X86_PAE
|
||||
.set_pte_atomic = xen_set_pte_atomic,
|
||||
.set_pte_present = xen_set_pte_at,
|
||||
.set_pud = xen_set_pud,
|
||||
|
@ -1129,7 +1115,6 @@ static const struct pv_mmu_ops xen_mmu_ops __initdata = {
|
|||
|
||||
.make_pmd = xen_make_pmd,
|
||||
.pmd_val = xen_pmd_val,
|
||||
#endif /* PAE */
|
||||
|
||||
.activate_mm = xen_activate_mm,
|
||||
.dup_mmap = xen_dup_mmap,
|
||||
|
@ -1257,6 +1242,11 @@ asmlinkage void __init xen_start_kernel(void)
|
|||
if (xen_feature(XENFEAT_supervisor_mode_kernel))
|
||||
pv_info.kernel_rpl = 0;
|
||||
|
||||
/* Prevent unwanted bits from being set in PTEs. */
|
||||
__supported_pte_mask &= ~_PAGE_GLOBAL;
|
||||
if (!is_initial_xendomain())
|
||||
__supported_pte_mask &= ~(_PAGE_PWT | _PAGE_PCD);
|
||||
|
||||
/* set the limit of our address space */
|
||||
xen_reserve_top();
|
||||
|
||||
|
|
|
@ -179,50 +179,56 @@ void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
|
|||
preempt_enable();
|
||||
}
|
||||
|
||||
/* Assume pteval_t is equivalent to all the other *val_t types. */
|
||||
static pteval_t pte_mfn_to_pfn(pteval_t val)
|
||||
{
|
||||
if (val & _PAGE_PRESENT) {
|
||||
unsigned long mfn = (val & PTE_MASK) >> PAGE_SHIFT;
|
||||
pteval_t flags = val & ~PTE_MASK;
|
||||
val = (mfn_to_pfn(mfn) << PAGE_SHIFT) | flags;
|
||||
}
|
||||
|
||||
return val;
|
||||
}
|
||||
|
||||
static pteval_t pte_pfn_to_mfn(pteval_t val)
|
||||
{
|
||||
if (val & _PAGE_PRESENT) {
|
||||
unsigned long pfn = (val & PTE_MASK) >> PAGE_SHIFT;
|
||||
pteval_t flags = val & ~PTE_MASK;
|
||||
val = (pfn_to_mfn(pfn) << PAGE_SHIFT) | flags;
|
||||
}
|
||||
|
||||
return val;
|
||||
}
|
||||
|
||||
pteval_t xen_pte_val(pte_t pte)
|
||||
{
|
||||
pteval_t ret = pte.pte;
|
||||
|
||||
if (ret & _PAGE_PRESENT)
|
||||
ret = machine_to_phys(XMADDR(ret)).paddr | _PAGE_PRESENT;
|
||||
|
||||
return ret;
|
||||
return pte_mfn_to_pfn(pte.pte);
|
||||
}
|
||||
|
||||
pgdval_t xen_pgd_val(pgd_t pgd)
|
||||
{
|
||||
pgdval_t ret = pgd.pgd;
|
||||
if (ret & _PAGE_PRESENT)
|
||||
ret = machine_to_phys(XMADDR(ret)).paddr | _PAGE_PRESENT;
|
||||
return ret;
|
||||
return pte_mfn_to_pfn(pgd.pgd);
|
||||
}
|
||||
|
||||
pte_t xen_make_pte(pteval_t pte)
|
||||
{
|
||||
if (pte & _PAGE_PRESENT) {
|
||||
pte = phys_to_machine(XPADDR(pte)).maddr;
|
||||
pte &= ~(_PAGE_PCD | _PAGE_PWT);
|
||||
}
|
||||
|
||||
return (pte_t){ .pte = pte };
|
||||
pte = pte_pfn_to_mfn(pte);
|
||||
return native_make_pte(pte);
|
||||
}
|
||||
|
||||
pgd_t xen_make_pgd(pgdval_t pgd)
|
||||
{
|
||||
if (pgd & _PAGE_PRESENT)
|
||||
pgd = phys_to_machine(XPADDR(pgd)).maddr;
|
||||
|
||||
return (pgd_t){ pgd };
|
||||
pgd = pte_pfn_to_mfn(pgd);
|
||||
return native_make_pgd(pgd);
|
||||
}
|
||||
|
||||
pmdval_t xen_pmd_val(pmd_t pmd)
|
||||
{
|
||||
pmdval_t ret = native_pmd_val(pmd);
|
||||
if (ret & _PAGE_PRESENT)
|
||||
ret = machine_to_phys(XMADDR(ret)).paddr | _PAGE_PRESENT;
|
||||
return ret;
|
||||
return pte_mfn_to_pfn(pmd.pmd);
|
||||
}
|
||||
#ifdef CONFIG_X86_PAE
|
||||
|
||||
void xen_set_pud(pud_t *ptr, pud_t val)
|
||||
{
|
||||
struct multicall_space mcs;
|
||||
|
@ -267,17 +273,9 @@ void xen_pmd_clear(pmd_t *pmdp)
|
|||
|
||||
pmd_t xen_make_pmd(pmdval_t pmd)
|
||||
{
|
||||
if (pmd & _PAGE_PRESENT)
|
||||
pmd = phys_to_machine(XPADDR(pmd)).maddr;
|
||||
|
||||
pmd = pte_pfn_to_mfn(pmd);
|
||||
return native_make_pmd(pmd);
|
||||
}
|
||||
#else /* !PAE */
|
||||
void xen_set_pte(pte_t *ptep, pte_t pte)
|
||||
{
|
||||
*ptep = pte;
|
||||
}
|
||||
#endif /* CONFIG_X86_PAE */
|
||||
|
||||
/*
|
||||
(Yet another) pagetable walker. This one is intended for pinning a
|
||||
|
@ -430,8 +428,6 @@ static int pin_page(struct page *page, enum pt_level level)
|
|||
read-only, and can be pinned. */
|
||||
void xen_pgd_pin(pgd_t *pgd)
|
||||
{
|
||||
unsigned level;
|
||||
|
||||
xen_mc_batch();
|
||||
|
||||
if (pgd_walk(pgd, pin_page, TASK_SIZE)) {
|
||||
|
@ -441,14 +437,7 @@ void xen_pgd_pin(pgd_t *pgd)
|
|||
xen_mc_batch();
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_PAE
|
||||
level = MMUEXT_PIN_L3_TABLE;
|
||||
#else
|
||||
level = MMUEXT_PIN_L2_TABLE;
|
||||
#endif
|
||||
|
||||
xen_do_pin(level, PFN_DOWN(__pa(pgd)));
|
||||
|
||||
xen_do_pin(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(pgd)));
|
||||
xen_mc_issue(0);
|
||||
}
|
||||
|
||||
|
|
|
@ -37,14 +37,13 @@ void xen_exit_mmap(struct mm_struct *mm);
|
|||
void xen_pgd_pin(pgd_t *pgd);
|
||||
//void xen_pgd_unpin(pgd_t *pgd);
|
||||
|
||||
#ifdef CONFIG_X86_PAE
|
||||
unsigned long long xen_pte_val(pte_t);
|
||||
unsigned long long xen_pmd_val(pmd_t);
|
||||
unsigned long long xen_pgd_val(pgd_t);
|
||||
pteval_t xen_pte_val(pte_t);
|
||||
pmdval_t xen_pmd_val(pmd_t);
|
||||
pgdval_t xen_pgd_val(pgd_t);
|
||||
|
||||
pte_t xen_make_pte(unsigned long long);
|
||||
pmd_t xen_make_pmd(unsigned long long);
|
||||
pgd_t xen_make_pgd(unsigned long long);
|
||||
pte_t xen_make_pte(pteval_t);
|
||||
pmd_t xen_make_pmd(pmdval_t);
|
||||
pgd_t xen_make_pgd(pgdval_t);
|
||||
|
||||
void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
|
||||
pte_t *ptep, pte_t pteval);
|
||||
|
@ -53,15 +52,4 @@ void xen_set_pud(pud_t *ptr, pud_t val);
|
|||
void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
|
||||
void xen_pmd_clear(pmd_t *pmdp);
|
||||
|
||||
|
||||
#else
|
||||
unsigned long xen_pte_val(pte_t);
|
||||
unsigned long xen_pmd_val(pmd_t);
|
||||
unsigned long xen_pgd_val(pgd_t);
|
||||
|
||||
pte_t xen_make_pte(unsigned long);
|
||||
pmd_t xen_make_pmd(unsigned long);
|
||||
pgd_t xen_make_pgd(unsigned long);
|
||||
#endif
|
||||
|
||||
#endif /* _XEN_MMU_H */
|
||||
|
|
|
@ -14,6 +14,7 @@
|
|||
#include <linux/kernel_stat.h>
|
||||
#include <linux/math64.h>
|
||||
|
||||
#include <asm/pvclock.h>
|
||||
#include <asm/xen/hypervisor.h>
|
||||
#include <asm/xen/hypercall.h>
|
||||
|
||||
|
@ -31,17 +32,6 @@
|
|||
|
||||
static cycle_t xen_clocksource_read(void);
|
||||
|
||||
/* These are perodically updated in shared_info, and then copied here. */
|
||||
struct shadow_time_info {
|
||||
u64 tsc_timestamp; /* TSC at last update of time vals. */
|
||||
u64 system_timestamp; /* Time, in nanosecs, since boot. */
|
||||
u32 tsc_to_nsec_mul;
|
||||
int tsc_shift;
|
||||
u32 version;
|
||||
};
|
||||
|
||||
static DEFINE_PER_CPU(struct shadow_time_info, shadow_time);
|
||||
|
||||
/* runstate info updated by Xen */
|
||||
static DEFINE_PER_CPU(struct vcpu_runstate_info, runstate);
|
||||
|
||||
|
@ -211,7 +201,7 @@ unsigned long long xen_sched_clock(void)
|
|||
unsigned long xen_cpu_khz(void)
|
||||
{
|
||||
u64 xen_khz = 1000000ULL << 32;
|
||||
const struct vcpu_time_info *info =
|
||||
const struct pvclock_vcpu_time_info *info =
|
||||
&HYPERVISOR_shared_info->vcpu_info[0].time;
|
||||
|
||||
do_div(xen_khz, info->tsc_to_system_mul);
|
||||
|
@ -223,121 +213,26 @@ unsigned long xen_cpu_khz(void)
|
|||
return xen_khz;
|
||||
}
|
||||
|
||||
/*
|
||||
* Reads a consistent set of time-base values from Xen, into a shadow data
|
||||
* area.
|
||||
*/
|
||||
static unsigned get_time_values_from_xen(void)
|
||||
{
|
||||
struct vcpu_time_info *src;
|
||||
struct shadow_time_info *dst;
|
||||
|
||||
/* src is shared memory with the hypervisor, so we need to
|
||||
make sure we get a consistent snapshot, even in the face of
|
||||
being preempted. */
|
||||
src = &__get_cpu_var(xen_vcpu)->time;
|
||||
dst = &__get_cpu_var(shadow_time);
|
||||
|
||||
do {
|
||||
dst->version = src->version;
|
||||
rmb(); /* fetch version before data */
|
||||
dst->tsc_timestamp = src->tsc_timestamp;
|
||||
dst->system_timestamp = src->system_time;
|
||||
dst->tsc_to_nsec_mul = src->tsc_to_system_mul;
|
||||
dst->tsc_shift = src->tsc_shift;
|
||||
rmb(); /* test version after fetching data */
|
||||
} while ((src->version & 1) | (dst->version ^ src->version));
|
||||
|
||||
return dst->version;
|
||||
}
|
||||
|
||||
/*
|
||||
* Scale a 64-bit delta by scaling and multiplying by a 32-bit fraction,
|
||||
* yielding a 64-bit result.
|
||||
*/
|
||||
static inline u64 scale_delta(u64 delta, u32 mul_frac, int shift)
|
||||
{
|
||||
u64 product;
|
||||
#ifdef __i386__
|
||||
u32 tmp1, tmp2;
|
||||
#endif
|
||||
|
||||
if (shift < 0)
|
||||
delta >>= -shift;
|
||||
else
|
||||
delta <<= shift;
|
||||
|
||||
#ifdef __i386__
|
||||
__asm__ (
|
||||
"mul %5 ; "
|
||||
"mov %4,%%eax ; "
|
||||
"mov %%edx,%4 ; "
|
||||
"mul %5 ; "
|
||||
"xor %5,%5 ; "
|
||||
"add %4,%%eax ; "
|
||||
"adc %5,%%edx ; "
|
||||
: "=A" (product), "=r" (tmp1), "=r" (tmp2)
|
||||
: "a" ((u32)delta), "1" ((u32)(delta >> 32)), "2" (mul_frac) );
|
||||
#elif __x86_64__
|
||||
__asm__ (
|
||||
"mul %%rdx ; shrd $32,%%rdx,%%rax"
|
||||
: "=a" (product) : "0" (delta), "d" ((u64)mul_frac) );
|
||||
#else
|
||||
#error implement me!
|
||||
#endif
|
||||
|
||||
return product;
|
||||
}
|
||||
|
||||
static u64 get_nsec_offset(struct shadow_time_info *shadow)
|
||||
{
|
||||
u64 now, delta;
|
||||
now = native_read_tsc();
|
||||
delta = now - shadow->tsc_timestamp;
|
||||
return scale_delta(delta, shadow->tsc_to_nsec_mul, shadow->tsc_shift);
|
||||
}
|
||||
|
||||
static cycle_t xen_clocksource_read(void)
|
||||
{
|
||||
struct shadow_time_info *shadow = &get_cpu_var(shadow_time);
|
||||
struct pvclock_vcpu_time_info *src;
|
||||
cycle_t ret;
|
||||
unsigned version;
|
||||
|
||||
do {
|
||||
version = get_time_values_from_xen();
|
||||
barrier();
|
||||
ret = shadow->system_timestamp + get_nsec_offset(shadow);
|
||||
barrier();
|
||||
} while (version != __get_cpu_var(xen_vcpu)->time.version);
|
||||
|
||||
put_cpu_var(shadow_time);
|
||||
|
||||
src = &get_cpu_var(xen_vcpu)->time;
|
||||
ret = pvclock_clocksource_read(src);
|
||||
put_cpu_var(xen_vcpu);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void xen_read_wallclock(struct timespec *ts)
|
||||
{
|
||||
const struct shared_info *s = HYPERVISOR_shared_info;
|
||||
u32 version;
|
||||
u64 delta;
|
||||
struct timespec now;
|
||||
struct shared_info *s = HYPERVISOR_shared_info;
|
||||
struct pvclock_wall_clock *wall_clock = &(s->wc);
|
||||
struct pvclock_vcpu_time_info *vcpu_time;
|
||||
|
||||
/* get wallclock at system boot */
|
||||
do {
|
||||
version = s->wc_version;
|
||||
rmb(); /* fetch version before time */
|
||||
now.tv_sec = s->wc_sec;
|
||||
now.tv_nsec = s->wc_nsec;
|
||||
rmb(); /* fetch time before checking version */
|
||||
} while ((s->wc_version & 1) | (version ^ s->wc_version));
|
||||
|
||||
delta = xen_clocksource_read(); /* time since system boot */
|
||||
delta += now.tv_sec * (u64)NSEC_PER_SEC + now.tv_nsec;
|
||||
|
||||
now.tv_nsec = do_div(delta, NSEC_PER_SEC);
|
||||
now.tv_sec = delta;
|
||||
|
||||
set_normalized_timespec(ts, now.tv_sec, now.tv_nsec);
|
||||
vcpu_time = &get_cpu_var(xen_vcpu)->time;
|
||||
pvclock_read_wallclock(wall_clock, vcpu_time, ts);
|
||||
put_cpu_var(xen_vcpu);
|
||||
}
|
||||
|
||||
unsigned long xen_get_wallclock(void)
|
||||
|
@ -345,7 +240,6 @@ unsigned long xen_get_wallclock(void)
|
|||
struct timespec ts;
|
||||
|
||||
xen_read_wallclock(&ts);
|
||||
|
||||
return ts.tv_sec;
|
||||
}
|
||||
|
||||
|
@ -569,8 +463,6 @@ __init void xen_time_init(void)
|
|||
{
|
||||
int cpu = smp_processor_id();
|
||||
|
||||
get_time_values_from_xen();
|
||||
|
||||
clocksource_register(&xen_clocksource);
|
||||
|
||||
if (HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, cpu, NULL) == 0) {
|
||||
|
|
|
@ -17,7 +17,7 @@ ENTRY(startup_xen)
|
|||
|
||||
__FINIT
|
||||
|
||||
.pushsection .bss.page_aligned
|
||||
.pushsection .text
|
||||
.align PAGE_SIZE_asm
|
||||
ENTRY(hypercall_page)
|
||||
.skip 0x1000
|
||||
|
@ -30,11 +30,7 @@ ENTRY(hypercall_page)
|
|||
ELFNOTE(Xen, XEN_ELFNOTE_ENTRY, .long startup_xen)
|
||||
ELFNOTE(Xen, XEN_ELFNOTE_HYPERCALL_PAGE, .long hypercall_page)
|
||||
ELFNOTE(Xen, XEN_ELFNOTE_FEATURES, .asciz "!writable_page_tables|pae_pgdir_above_4gb")
|
||||
#ifdef CONFIG_X86_PAE
|
||||
ELFNOTE(Xen, XEN_ELFNOTE_PAE_MODE, .asciz "yes")
|
||||
#else
|
||||
ELFNOTE(Xen, XEN_ELFNOTE_PAE_MODE, .asciz "no")
|
||||
#endif
|
||||
ELFNOTE(Xen, XEN_ELFNOTE_LOADER, .asciz "generic")
|
||||
|
||||
#endif /*CONFIG_XEN */
|
||||
|
|
|
@ -233,6 +233,9 @@ static void acpi_ac_notify(acpi_handle handle, u32 event, void *data)
|
|||
|
||||
device = ac->device;
|
||||
switch (event) {
|
||||
default:
|
||||
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
|
||||
"Unsupported event [0x%x]\n", event));
|
||||
case ACPI_AC_NOTIFY_STATUS:
|
||||
case ACPI_NOTIFY_BUS_CHECK:
|
||||
case ACPI_NOTIFY_DEVICE_CHECK:
|
||||
|
@ -244,11 +247,6 @@ static void acpi_ac_notify(acpi_handle handle, u32 event, void *data)
|
|||
#ifdef CONFIG_ACPI_SYSFS_POWER
|
||||
kobject_uevent(&ac->charger.dev->kobj, KOBJ_CHANGE);
|
||||
#endif
|
||||
break;
|
||||
default:
|
||||
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
|
||||
"Unsupported event [0x%x]\n", event));
|
||||
break;
|
||||
}
|
||||
|
||||
return;
|
||||
|
|
|
@ -1713,7 +1713,8 @@ acpi_video_bus_get_devices(struct acpi_video_bus *video,
|
|||
|
||||
status = acpi_video_bus_get_one_device(dev, video);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
ACPI_EXCEPTION((AE_INFO, status, "Cant attach device"));
|
||||
ACPI_DEBUG_PRINT((ACPI_DB_WARN,
|
||||
"Cant attach device"));
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -651,9 +651,17 @@ config PATA_WINBOND_VLB
|
|||
Support for the Winbond W83759A controller on Vesa Local Bus
|
||||
systems.
|
||||
|
||||
config HAVE_PATA_PLATFORM
|
||||
bool
|
||||
help
|
||||
This is an internal configuration node for any machine that
|
||||
uses pata-platform driver to enable the relevant driver in the
|
||||
configuration structure without having to submit endless patches
|
||||
to update the PATA_PLATFORM entry.
|
||||
|
||||
config PATA_PLATFORM
|
||||
tristate "Generic platform device PATA support"
|
||||
depends on EMBEDDED || ARCH_RPC || PPC
|
||||
depends on EMBEDDED || ARCH_RPC || PPC || HAVE_PATA_PLATFORM
|
||||
help
|
||||
This option enables support for generic directly connected ATA
|
||||
devices commonly found on embedded systems.
|
||||
|
|
|
@ -90,6 +90,7 @@ enum {
|
|||
board_ahci_mv = 4,
|
||||
board_ahci_sb700 = 5,
|
||||
board_ahci_mcp65 = 6,
|
||||
board_ahci_nopmp = 7,
|
||||
|
||||
/* global controller registers */
|
||||
HOST_CAP = 0x00, /* host capabilities */
|
||||
|
@ -401,6 +402,14 @@ static const struct ata_port_info ahci_port_info[] = {
|
|||
.udma_mask = ATA_UDMA6,
|
||||
.port_ops = &ahci_ops,
|
||||
},
|
||||
/* board_ahci_nopmp */
|
||||
{
|
||||
AHCI_HFLAGS (AHCI_HFLAG_NO_PMP),
|
||||
.flags = AHCI_FLAG_COMMON,
|
||||
.pio_mask = 0x1f, /* pio0-4 */
|
||||
.udma_mask = ATA_UDMA6,
|
||||
.port_ops = &ahci_ops,
|
||||
},
|
||||
};
|
||||
|
||||
static const struct pci_device_id ahci_pci_tbl[] = {
|
||||
|
@ -525,9 +534,9 @@ static const struct pci_device_id ahci_pci_tbl[] = {
|
|||
{ PCI_VDEVICE(NVIDIA, 0x0bc7), board_ahci }, /* MCP7B */
|
||||
|
||||
/* SiS */
|
||||
{ PCI_VDEVICE(SI, 0x1184), board_ahci }, /* SiS 966 */
|
||||
{ PCI_VDEVICE(SI, 0x1185), board_ahci }, /* SiS 966 */
|
||||
{ PCI_VDEVICE(SI, 0x0186), board_ahci }, /* SiS 968 */
|
||||
{ PCI_VDEVICE(SI, 0x1184), board_ahci_nopmp }, /* SiS 966 */
|
||||
{ PCI_VDEVICE(SI, 0x1185), board_ahci_nopmp }, /* SiS 968 */
|
||||
{ PCI_VDEVICE(SI, 0x0186), board_ahci_nopmp }, /* SiS 968 */
|
||||
|
||||
/* Marvell */
|
||||
{ PCI_VDEVICE(MARVELL, 0x6145), board_ahci_mv }, /* 6145 */
|
||||
|
@ -653,6 +662,14 @@ static void ahci_save_initial_config(struct pci_dev *pdev,
|
|||
cap &= ~HOST_CAP_PMP;
|
||||
}
|
||||
|
||||
if (pdev->vendor == PCI_VENDOR_ID_JMICRON && pdev->device == 0x2361 &&
|
||||
port_map != 1) {
|
||||
dev_printk(KERN_INFO, &pdev->dev,
|
||||
"JMB361 has only one port, port_map 0x%x -> 0x%x\n",
|
||||
port_map, 1);
|
||||
port_map = 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* Temporary Marvell 6145 hack: PATA port presence
|
||||
* is asserted through the standard AHCI port
|
||||
|
|
|
@ -1042,6 +1042,13 @@ static int piix_broken_suspend(void)
|
|||
DMI_MATCH(DMI_PRODUCT_NAME, "Tecra M4"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.ident = "TECRA M4",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "TECRA M4"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.ident = "TECRA M5",
|
||||
.matches = {
|
||||
|
|
|
@ -4297,7 +4297,7 @@ void ata_sg_clean(struct ata_queued_cmd *qc)
|
|||
}
|
||||
|
||||
/**
|
||||
* ata_check_atapi_dma - Check whether ATAPI DMA can be supported
|
||||
* atapi_check_dma - Check whether ATAPI DMA can be supported
|
||||
* @qc: Metadata associated with taskfile to check
|
||||
*
|
||||
* Allow low-level driver to filter ATA PACKET commands, returning
|
||||
|
@ -4310,7 +4310,7 @@ void ata_sg_clean(struct ata_queued_cmd *qc)
|
|||
* RETURNS: 0 when ATAPI DMA can be used
|
||||
* nonzero otherwise
|
||||
*/
|
||||
int ata_check_atapi_dma(struct ata_queued_cmd *qc)
|
||||
int atapi_check_dma(struct ata_queued_cmd *qc)
|
||||
{
|
||||
struct ata_port *ap = qc->ap;
|
||||
|
||||
|
|
|
@ -2343,8 +2343,8 @@ static unsigned int atapi_xlat(struct ata_queued_cmd *qc)
|
|||
{
|
||||
struct scsi_cmnd *scmd = qc->scsicmd;
|
||||
struct ata_device *dev = qc->dev;
|
||||
int using_pio = (dev->flags & ATA_DFLAG_PIO);
|
||||
int nodata = (scmd->sc_data_direction == DMA_NONE);
|
||||
int using_pio = !nodata && (dev->flags & ATA_DFLAG_PIO);
|
||||
unsigned int nbytes;
|
||||
|
||||
memset(qc->cdb, 0, dev->cdb_len);
|
||||
|
@ -2362,7 +2362,7 @@ static unsigned int atapi_xlat(struct ata_queued_cmd *qc)
|
|||
ata_qc_set_pc_nbytes(qc);
|
||||
|
||||
/* check whether ATAPI DMA is safe */
|
||||
if (!using_pio && ata_check_atapi_dma(qc))
|
||||
if (!nodata && !using_pio && atapi_check_dma(qc))
|
||||
using_pio = 1;
|
||||
|
||||
/* Some controller variants snoop this value for Packet
|
||||
|
@ -2402,13 +2402,11 @@ static unsigned int atapi_xlat(struct ata_queued_cmd *qc)
|
|||
qc->tf.lbam = (nbytes & 0xFF);
|
||||
qc->tf.lbah = (nbytes >> 8);
|
||||
|
||||
if (using_pio || nodata) {
|
||||
/* no data, or PIO data xfer */
|
||||
if (nodata)
|
||||
qc->tf.protocol = ATAPI_PROT_NODATA;
|
||||
else
|
||||
qc->tf.protocol = ATAPI_PROT_PIO;
|
||||
} else {
|
||||
if (nodata)
|
||||
qc->tf.protocol = ATAPI_PROT_NODATA;
|
||||
else if (using_pio)
|
||||
qc->tf.protocol = ATAPI_PROT_PIO;
|
||||
else {
|
||||
/* DMA data xfer */
|
||||
qc->tf.protocol = ATAPI_PROT_DMA;
|
||||
qc->tf.feature |= ATAPI_PKT_DMA;
|
||||
|
|
|
@ -106,7 +106,7 @@ extern void ata_sg_clean(struct ata_queued_cmd *qc);
|
|||
extern void ata_qc_free(struct ata_queued_cmd *qc);
|
||||
extern void ata_qc_issue(struct ata_queued_cmd *qc);
|
||||
extern void __ata_qc_complete(struct ata_queued_cmd *qc);
|
||||
extern int ata_check_atapi_dma(struct ata_queued_cmd *qc);
|
||||
extern int atapi_check_dma(struct ata_queued_cmd *qc);
|
||||
extern void swap_buf_le16(u16 *buf, unsigned int buf_words);
|
||||
extern void ata_dev_init(struct ata_device *dev);
|
||||
extern void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp);
|
||||
|
|
|
@ -414,6 +414,7 @@ static struct pcmcia_device_id pcmcia_devices[] = {
|
|||
PCMCIA_DEVICE_PROD_ID12("IO DATA", "PCIDE", 0x547e66dc, 0x5c5ab149),
|
||||
PCMCIA_DEVICE_PROD_ID12("IO DATA", "PCIDEII", 0x547e66dc, 0xb3662674),
|
||||
PCMCIA_DEVICE_PROD_ID12("LOOKMEET", "CBIDE2 ", 0xe37be2b5, 0x8671043b),
|
||||
PCMCIA_DEVICE_PROD_ID12("M-Systems", "CF300", 0x7ed2ad87, 0x7e9e78ee),
|
||||
PCMCIA_DEVICE_PROD_ID12("M-Systems", "CF500", 0x7ed2ad87, 0x7a13045c),
|
||||
PCMCIA_DEVICE_PROD_ID2("NinjaATA-", 0xebe0bd79),
|
||||
PCMCIA_DEVICE_PROD_ID12("PCMCIA", "CD-ROM", 0x281f1c5d, 0x66536591),
|
||||
|
@ -424,6 +425,7 @@ static struct pcmcia_device_id pcmcia_devices[] = {
|
|||
PCMCIA_DEVICE_PROD_ID12("SMI VENDOR", "SMI PRODUCT", 0x30896c92, 0x703cc5f6),
|
||||
PCMCIA_DEVICE_PROD_ID12("TOSHIBA", "MK2001MPL", 0xb4585a1a, 0x3489e003),
|
||||
PCMCIA_DEVICE_PROD_ID1("TRANSCEND 512M ", 0xd0909443),
|
||||
PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS1GCF45", 0x709b1bf1, 0xf68b6f32),
|
||||
PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS1GCF80", 0x709b1bf1, 0x2a54d4b1),
|
||||
PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS2GCF120", 0x709b1bf1, 0x969aa4f2),
|
||||
PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS4GCF120", 0x709b1bf1, 0xf54a91c8),
|
||||
|
|
|
@ -1322,6 +1322,9 @@ static int mv_port_start(struct ata_port *ap)
|
|||
goto out_port_free_dma_mem;
|
||||
memset(pp->crpb, 0, MV_CRPB_Q_SZ);
|
||||
|
||||
/* 6041/6081 Rev. "C0" (and newer) are okay with async notify */
|
||||
if (hpriv->hp_flags & MV_HP_ERRATA_60X1C0)
|
||||
ap->flags |= ATA_FLAG_AN;
|
||||
/*
|
||||
* For GEN_I, there's no NCQ, so we only allocate a single sg_tbl.
|
||||
* For later hardware, we need one unique sg_tbl per NCQ tag.
|
||||
|
@ -1592,6 +1595,24 @@ static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
|
|||
|
||||
if ((qc->tf.protocol != ATA_PROT_DMA) &&
|
||||
(qc->tf.protocol != ATA_PROT_NCQ)) {
|
||||
static int limit_warnings = 10;
|
||||
/*
|
||||
* Errata SATA#16, SATA#24: warn if multiple DRQs expected.
|
||||
*
|
||||
* Someday, we might implement special polling workarounds
|
||||
* for these, but it all seems rather unnecessary since we
|
||||
* normally use only DMA for commands which transfer more
|
||||
* than a single block of data.
|
||||
*
|
||||
* Much of the time, this could just work regardless.
|
||||
* So for now, just log the incident, and allow the attempt.
|
||||
*/
|
||||
if (limit_warnings && (qc->nbytes / qc->sect_size) > 1) {
|
||||
--limit_warnings;
|
||||
ata_link_printk(qc->dev->link, KERN_WARNING, DRV_NAME
|
||||
": attempting PIO w/multiple DRQ: "
|
||||
"this may fail due to h/w errata\n");
|
||||
}
|
||||
/*
|
||||
* We're about to send a non-EDMA capable command to the
|
||||
* port. Turn off EDMA so there won't be problems accessing
|
||||
|
|
|
@ -948,7 +948,7 @@ static void intel_i9xx_setup_flush(void)
|
|||
intel_private.ifp_resource.flags = IORESOURCE_MEM;
|
||||
|
||||
/* Setup chipset flush for 915 */
|
||||
if (IS_I965 || IS_G33) {
|
||||
if (IS_I965 || IS_G33 || IS_G4X) {
|
||||
intel_i965_g33_setup_chipset_flush();
|
||||
} else {
|
||||
intel_i915_setup_chipset_flush();
|
||||
|
|
|
@ -76,7 +76,7 @@ int drm_ati_pcigart_cleanup(struct drm_device *dev, struct drm_ati_pcigart_info
|
|||
for (i = 0; i < pages; i++) {
|
||||
if (!entry->busaddr[i])
|
||||
break;
|
||||
pci_unmap_single(dev->pdev, entry->busaddr[i],
|
||||
pci_unmap_page(dev->pdev, entry->busaddr[i],
|
||||
PAGE_SIZE, PCI_DMA_TODEVICE);
|
||||
}
|
||||
|
||||
|
@ -137,10 +137,8 @@ int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *ga
|
|||
|
||||
for (i = 0; i < pages; i++) {
|
||||
/* we need to support large memory configurations */
|
||||
entry->busaddr[i] = pci_map_single(dev->pdev,
|
||||
page_address(entry->
|
||||
pagelist[i]),
|
||||
PAGE_SIZE, PCI_DMA_TODEVICE);
|
||||
entry->busaddr[i] = pci_map_page(dev->pdev, entry->pagelist[i],
|
||||
0, PAGE_SIZE, PCI_DMA_TODEVICE);
|
||||
if (entry->busaddr[i] == 0) {
|
||||
DRM_ERROR("unable to map PCIGART pages!\n");
|
||||
drm_ati_pcigart_cleanup(dev, gart_info);
|
||||
|
|
|
@ -628,7 +628,7 @@ struct drm_set_version {
|
|||
#define DRM_IOCTL_AGP_BIND DRM_IOW( 0x36, struct drm_agp_binding)
|
||||
#define DRM_IOCTL_AGP_UNBIND DRM_IOW( 0x37, struct drm_agp_binding)
|
||||
|
||||
#define DRM_IOCTL_SG_ALLOC DRM_IOW( 0x38, struct drm_scatter_gather)
|
||||
#define DRM_IOCTL_SG_ALLOC DRM_IOWR(0x38, struct drm_scatter_gather)
|
||||
#define DRM_IOCTL_SG_FREE DRM_IOW( 0x39, struct drm_scatter_gather)
|
||||
|
||||
#define DRM_IOCTL_WAIT_VBLANK DRM_IOWR(0x3a, union drm_wait_vblank)
|
||||
|
|
|
@ -470,17 +470,18 @@ int drm_ioctl(struct inode *inode, struct file *filp,
|
|||
if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END) &&
|
||||
(nr < DRM_COMMAND_BASE + dev->driver->num_ioctls))
|
||||
ioctl = &dev->driver->ioctls[nr - DRM_COMMAND_BASE];
|
||||
else if ((nr >= DRM_COMMAND_END) || (nr < DRM_COMMAND_BASE))
|
||||
else if ((nr >= DRM_COMMAND_END) || (nr < DRM_COMMAND_BASE)) {
|
||||
ioctl = &drm_ioctls[nr];
|
||||
else
|
||||
cmd = ioctl->cmd;
|
||||
} else
|
||||
goto err_i1;
|
||||
|
||||
/* Do not trust userspace, use our own definition */
|
||||
func = ioctl->func;
|
||||
/* is there a local override? */
|
||||
if ((nr == DRM_IOCTL_NR(DRM_IOCTL_DMA)) && dev->driver->dma_ioctl)
|
||||
func = dev->driver->dma_ioctl;
|
||||
|
||||
|
||||
if (!func) {
|
||||
DRM_DEBUG("no function\n");
|
||||
retcode = -EINVAL;
|
||||
|
|
|
@ -103,20 +103,18 @@
|
|||
{0x1002, 0x5653, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
|
||||
{0x1002, 0x5834, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP}, \
|
||||
{0x1002, 0x5835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP|RADEON_IS_MOBILITY}, \
|
||||
{0x1002, 0x5954, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \
|
||||
{0x1002, 0x5955, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \
|
||||
{0x1002, 0x5974, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \
|
||||
{0x1002, 0x5975, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \
|
||||
{0x1002, 0x5954, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS480|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \
|
||||
{0x1002, 0x5955, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS480|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \
|
||||
{0x1002, 0x5974, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS480|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \
|
||||
{0x1002, 0x5975, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS480|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \
|
||||
{0x1002, 0x5960, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \
|
||||
{0x1002, 0x5961, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \
|
||||
{0x1002, 0x5962, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \
|
||||
{0x1002, 0x5964, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \
|
||||
{0x1002, 0x5965, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \
|
||||
{0x1002, 0x5969, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100}, \
|
||||
{0x1002, 0x5a41, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \
|
||||
{0x1002, 0x5a42, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \
|
||||
{0x1002, 0x5a61, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \
|
||||
{0x1002, 0x5a62, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \
|
||||
{0x1002, 0x5a61, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS480|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \
|
||||
{0x1002, 0x5a62, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS480|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \
|
||||
{0x1002, 0x5b60, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \
|
||||
{0x1002, 0x5b62, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \
|
||||
{0x1002, 0x5b63, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \
|
||||
|
@ -411,4 +409,7 @@
|
|||
{0x8086, 0x2a02, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
|
||||
{0x8086, 0x2a12, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
|
||||
{0x8086, 0x2a42, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
|
||||
{0x8086, 0x2e02, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
|
||||
{0x8086, 0x2e12, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
|
||||
{0x8086, 0x2e22, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
|
||||
{0, 0, 0}
|
||||
|
|
|
@ -389,6 +389,7 @@ static int i915_resume(struct drm_device *dev)
|
|||
pci_restore_state(dev->pdev);
|
||||
if (pci_enable_device(dev->pdev))
|
||||
return -1;
|
||||
pci_set_master(dev->pdev);
|
||||
|
||||
pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB);
|
||||
|
||||
|
|
|
@ -1112,12 +1112,19 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
|
|||
(dev)->pci_device == 0x29A2 || \
|
||||
(dev)->pci_device == 0x2A02 || \
|
||||
(dev)->pci_device == 0x2A12 || \
|
||||
(dev)->pci_device == 0x2A42)
|
||||
(dev)->pci_device == 0x2A42 || \
|
||||
(dev)->pci_device == 0x2E02 || \
|
||||
(dev)->pci_device == 0x2E12 || \
|
||||
(dev)->pci_device == 0x2E22)
|
||||
|
||||
#define IS_I965GM(dev) ((dev)->pci_device == 0x2A02)
|
||||
|
||||
#define IS_IGD_GM(dev) ((dev)->pci_device == 0x2A42)
|
||||
|
||||
#define IS_G4X(dev) ((dev)->pci_device == 0x2E02 || \
|
||||
(dev)->pci_device == 0x2E12 || \
|
||||
(dev)->pci_device == 0x2E22)
|
||||
|
||||
#define IS_G33(dev) ((dev)->pci_device == 0x29C2 || \
|
||||
(dev)->pci_device == 0x29B2 || \
|
||||
(dev)->pci_device == 0x29D2)
|
||||
|
@ -1128,7 +1135,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
|
|||
#define IS_MOBILE(dev) (IS_I830(dev) || IS_I85X(dev) || IS_I915GM(dev) || \
|
||||
IS_I945GM(dev) || IS_I965GM(dev) || IS_IGD_GM(dev))
|
||||
|
||||
#define I915_NEED_GFX_HWS(dev) (IS_G33(dev) || IS_IGD_GM(dev))
|
||||
#define I915_NEED_GFX_HWS(dev) (IS_G33(dev) || IS_IGD_GM(dev) || IS_G4X(dev))
|
||||
|
||||
#define PRIMARY_RINGBUFFER_SIZE (128*1024)
|
||||
|
||||
|
|
|
@ -189,18 +189,12 @@ void r300_init_reg_flags(struct drm_device *dev)
|
|||
ADD_RANGE(R300_RE_CULL_CNTL, 1);
|
||||
ADD_RANGE(0x42C0, 2);
|
||||
ADD_RANGE(R300_RS_CNTL_0, 2);
|
||||
ADD_RANGE(R300_RS_INTERP_0, 8);
|
||||
ADD_RANGE(R300_RS_ROUTE_0, 8);
|
||||
ADD_RANGE(0x43A4, 2);
|
||||
|
||||
ADD_RANGE(R300_SC_HYPERZ, 2);
|
||||
ADD_RANGE(0x43E8, 1);
|
||||
ADD_RANGE(R300_PFS_CNTL_0, 3);
|
||||
ADD_RANGE(R300_PFS_NODE_0, 4);
|
||||
ADD_RANGE(R300_PFS_TEXI_0, 64);
|
||||
|
||||
ADD_RANGE(0x46A4, 5);
|
||||
ADD_RANGE(R300_PFS_INSTR0_0, 64);
|
||||
ADD_RANGE(R300_PFS_INSTR1_0, 64);
|
||||
ADD_RANGE(R300_PFS_INSTR2_0, 64);
|
||||
ADD_RANGE(R300_PFS_INSTR3_0, 64);
|
||||
|
||||
ADD_RANGE(R300_RE_FOG_STATE, 1);
|
||||
ADD_RANGE(R300_FOG_COLOR_R, 3);
|
||||
ADD_RANGE(R300_PP_ALPHA_TEST, 2);
|
||||
|
@ -215,14 +209,12 @@ void r300_init_reg_flags(struct drm_device *dev)
|
|||
ADD_RANGE(0x4E50, 9);
|
||||
ADD_RANGE(0x4E88, 1);
|
||||
ADD_RANGE(0x4EA0, 2);
|
||||
ADD_RANGE(R300_RB3D_ZSTENCIL_CNTL_0, 3);
|
||||
ADD_RANGE(R300_RB3D_ZSTENCIL_FORMAT, 4);
|
||||
ADD_RANGE_MARK(R300_RB3D_DEPTHOFFSET, 1, MARK_CHECK_OFFSET); /* check offset */
|
||||
ADD_RANGE(R300_RB3D_DEPTHPITCH, 1);
|
||||
ADD_RANGE(0x4F28, 1);
|
||||
ADD_RANGE(0x4F30, 2);
|
||||
ADD_RANGE(0x4F44, 1);
|
||||
ADD_RANGE(0x4F54, 1);
|
||||
ADD_RANGE(R300_ZB_CNTL, 3);
|
||||
ADD_RANGE(R300_ZB_FORMAT, 4);
|
||||
ADD_RANGE_MARK(R300_ZB_DEPTHOFFSET, 1, MARK_CHECK_OFFSET); /* check offset */
|
||||
ADD_RANGE(R300_ZB_DEPTHPITCH, 1);
|
||||
ADD_RANGE(R300_ZB_DEPTHCLEARVALUE, 1);
|
||||
ADD_RANGE(R300_ZB_ZMASK_OFFSET, 13);
|
||||
|
||||
ADD_RANGE(R300_TX_FILTER_0, 16);
|
||||
ADD_RANGE(R300_TX_FILTER1_0, 16);
|
||||
|
@ -235,13 +227,32 @@ void r300_init_reg_flags(struct drm_device *dev)
|
|||
ADD_RANGE(R300_TX_BORDER_COLOR_0, 16);
|
||||
|
||||
/* Sporadic registers used as primitives are emitted */
|
||||
ADD_RANGE(R300_RB3D_ZCACHE_CTLSTAT, 1);
|
||||
ADD_RANGE(R300_ZB_ZCACHE_CTLSTAT, 1);
|
||||
ADD_RANGE(R300_RB3D_DSTCACHE_CTLSTAT, 1);
|
||||
ADD_RANGE(R300_VAP_INPUT_ROUTE_0_0, 8);
|
||||
ADD_RANGE(R300_VAP_INPUT_ROUTE_1_0, 8);
|
||||
|
||||
if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV515) {
|
||||
ADD_RANGE(0x4074, 16);
|
||||
ADD_RANGE(R500_VAP_INDEX_OFFSET, 1);
|
||||
ADD_RANGE(R500_US_CONFIG, 2);
|
||||
ADD_RANGE(R500_US_CODE_ADDR, 3);
|
||||
ADD_RANGE(R500_US_FC_CTRL, 1);
|
||||
ADD_RANGE(R500_RS_IP_0, 16);
|
||||
ADD_RANGE(R500_RS_INST_0, 16);
|
||||
ADD_RANGE(R500_RB3D_COLOR_CLEAR_VALUE_AR, 2);
|
||||
ADD_RANGE(R500_RB3D_CONSTANT_COLOR_AR, 2);
|
||||
ADD_RANGE(R500_ZB_FIFO_SIZE, 2);
|
||||
} else {
|
||||
ADD_RANGE(R300_PFS_CNTL_0, 3);
|
||||
ADD_RANGE(R300_PFS_NODE_0, 4);
|
||||
ADD_RANGE(R300_PFS_TEXI_0, 64);
|
||||
ADD_RANGE(R300_PFS_INSTR0_0, 64);
|
||||
ADD_RANGE(R300_PFS_INSTR1_0, 64);
|
||||
ADD_RANGE(R300_PFS_INSTR2_0, 64);
|
||||
ADD_RANGE(R300_PFS_INSTR3_0, 64);
|
||||
ADD_RANGE(R300_RS_INTERP_0, 8);
|
||||
ADD_RANGE(R300_RS_ROUTE_0, 8);
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -707,8 +718,9 @@ static __inline__ void r300_pacify(drm_radeon_private_t *dev_priv)
|
|||
BEGIN_RING(6);
|
||||
OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
|
||||
OUT_RING(R300_RB3D_DSTCACHE_UNKNOWN_0A);
|
||||
OUT_RING(CP_PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
|
||||
OUT_RING(R300_RB3D_ZCACHE_UNKNOWN_03);
|
||||
OUT_RING(CP_PACKET0(R300_ZB_ZCACHE_CTLSTAT, 0));
|
||||
OUT_RING(R300_ZB_ZCACHE_CTLSTAT_ZC_FLUSH_FLUSH_AND_FREE|
|
||||
R300_ZB_ZCACHE_CTLSTAT_ZC_FREE_FREE);
|
||||
OUT_RING(CP_PACKET3(RADEON_CP_NOP, 0));
|
||||
OUT_RING(0x0);
|
||||
ADVANCE_RING();
|
||||
|
@ -828,6 +840,54 @@ static int r300_scratch(drm_radeon_private_t *dev_priv,
|
|||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Uploads user-supplied vertex program instructions or parameters onto
|
||||
* the graphics card.
|
||||
* Called by r300_do_cp_cmdbuf.
|
||||
*/
|
||||
static inline int r300_emit_r500fp(drm_radeon_private_t *dev_priv,
|
||||
drm_radeon_kcmd_buffer_t *cmdbuf,
|
||||
drm_r300_cmd_header_t header)
|
||||
{
|
||||
int sz;
|
||||
int addr;
|
||||
int type;
|
||||
int clamp;
|
||||
int stride;
|
||||
RING_LOCALS;
|
||||
|
||||
sz = header.r500fp.count;
|
||||
/* address is 9 bits 0 - 8, bit 1 of flags is part of address */
|
||||
addr = ((header.r500fp.adrhi_flags & 1) << 8) | header.r500fp.adrlo;
|
||||
|
||||
type = !!(header.r500fp.adrhi_flags & R500FP_CONSTANT_TYPE);
|
||||
clamp = !!(header.r500fp.adrhi_flags & R500FP_CONSTANT_CLAMP);
|
||||
|
||||
addr |= (type << 16);
|
||||
addr |= (clamp << 17);
|
||||
|
||||
stride = type ? 4 : 6;
|
||||
|
||||
DRM_DEBUG("r500fp %d %d type: %d\n", sz, addr, type);
|
||||
if (!sz)
|
||||
return 0;
|
||||
if (sz * stride * 4 > cmdbuf->bufsz)
|
||||
return -EINVAL;
|
||||
|
||||
BEGIN_RING(3 + sz * stride);
|
||||
OUT_RING_REG(R500_GA_US_VECTOR_INDEX, addr);
|
||||
OUT_RING(CP_PACKET0_TABLE(R500_GA_US_VECTOR_DATA, sz * stride - 1));
|
||||
OUT_RING_TABLE((int *)cmdbuf->buf, sz * stride);
|
||||
|
||||
ADVANCE_RING();
|
||||
|
||||
cmdbuf->buf += sz * stride * 4;
|
||||
cmdbuf->bufsz -= sz * stride * 4;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Parses and validates a user-supplied command buffer and emits appropriate
|
||||
* commands on the DMA ring buffer.
|
||||
|
@ -963,6 +1023,19 @@ int r300_do_cp_cmdbuf(struct drm_device *dev,
|
|||
}
|
||||
break;
|
||||
|
||||
case R300_CMD_R500FP:
|
||||
if ((dev_priv->flags & RADEON_FAMILY_MASK) < CHIP_RV515) {
|
||||
DRM_ERROR("Calling r500 command on r300 card\n");
|
||||
ret = -EINVAL;
|
||||
goto cleanup;
|
||||
}
|
||||
DRM_DEBUG("R300_CMD_R500FP\n");
|
||||
ret = r300_emit_r500fp(dev_priv, cmdbuf, header);
|
||||
if (ret) {
|
||||
DRM_ERROR("r300_emit_r500fp failed\n");
|
||||
goto cleanup;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("bad cmd_type %i at %p\n",
|
||||
header.header.cmd_type,
|
||||
|
|
|
@ -702,6 +702,27 @@ USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|||
# define R300_RS_ROUTE_1_UNKNOWN11 (1 << 11)
|
||||
/* END: Rasterization / Interpolators - many guesses */
|
||||
|
||||
/* Hierarchical Z Enable */
|
||||
#define R300_SC_HYPERZ 0x43a4
|
||||
# define R300_SC_HYPERZ_DISABLE (0 << 0)
|
||||
# define R300_SC_HYPERZ_ENABLE (1 << 0)
|
||||
# define R300_SC_HYPERZ_MIN (0 << 1)
|
||||
# define R300_SC_HYPERZ_MAX (1 << 1)
|
||||
# define R300_SC_HYPERZ_ADJ_256 (0 << 2)
|
||||
# define R300_SC_HYPERZ_ADJ_128 (1 << 2)
|
||||
# define R300_SC_HYPERZ_ADJ_64 (2 << 2)
|
||||
# define R300_SC_HYPERZ_ADJ_32 (3 << 2)
|
||||
# define R300_SC_HYPERZ_ADJ_16 (4 << 2)
|
||||
# define R300_SC_HYPERZ_ADJ_8 (5 << 2)
|
||||
# define R300_SC_HYPERZ_ADJ_4 (6 << 2)
|
||||
# define R300_SC_HYPERZ_ADJ_2 (7 << 2)
|
||||
# define R300_SC_HYPERZ_HZ_Z0MIN_NO (0 << 5)
|
||||
# define R300_SC_HYPERZ_HZ_Z0MIN (1 << 5)
|
||||
# define R300_SC_HYPERZ_HZ_Z0MAX_NO (0 << 6)
|
||||
# define R300_SC_HYPERZ_HZ_Z0MAX (1 << 6)
|
||||
|
||||
#define R300_SC_EDGERULE 0x43a8
|
||||
|
||||
/* BEGIN: Scissors and cliprects */
|
||||
|
||||
/* There are four clipping rectangles. Their corner coordinates are inclusive.
|
||||
|
@ -1346,7 +1367,7 @@ USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|||
/* Guess by Vladimir.
|
||||
* Set to 0A before 3D operations, set to 02 afterwards.
|
||||
*/
|
||||
#define R300_RB3D_DSTCACHE_CTLSTAT 0x4E4C
|
||||
/*#define R300_RB3D_DSTCACHE_CTLSTAT 0x4E4C*/
|
||||
# define R300_RB3D_DSTCACHE_UNKNOWN_02 0x00000002
|
||||
# define R300_RB3D_DSTCACHE_UNKNOWN_0A 0x0000000A
|
||||
|
||||
|
@ -1355,19 +1376,14 @@ USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|||
* for this.
|
||||
* Bit (1<<8) is the "test" bit. so plain write is 6 - vd
|
||||
*/
|
||||
#define R300_RB3D_ZSTENCIL_CNTL_0 0x4F00
|
||||
# define R300_RB3D_Z_DISABLED_1 0x00000010
|
||||
# define R300_RB3D_Z_DISABLED_2 0x00000014
|
||||
# define R300_RB3D_Z_TEST 0x00000012
|
||||
# define R300_RB3D_Z_TEST_AND_WRITE 0x00000016
|
||||
# define R300_RB3D_Z_WRITE_ONLY 0x00000006
|
||||
#define R300_ZB_CNTL 0x4F00
|
||||
# define R300_STENCIL_ENABLE (1 << 0)
|
||||
# define R300_Z_ENABLE (1 << 1)
|
||||
# define R300_Z_WRITE_ENABLE (1 << 2)
|
||||
# define R300_Z_SIGNED_COMPARE (1 << 3)
|
||||
# define R300_STENCIL_FRONT_BACK (1 << 4)
|
||||
|
||||
# define R300_RB3D_Z_TEST 0x00000012
|
||||
# define R300_RB3D_Z_TEST_AND_WRITE 0x00000016
|
||||
# define R300_RB3D_Z_WRITE_ONLY 0x00000006
|
||||
# define R300_RB3D_STENCIL_ENABLE 0x00000001
|
||||
|
||||
#define R300_RB3D_ZSTENCIL_CNTL_1 0x4F04
|
||||
#define R300_ZB_ZSTENCILCNTL 0x4f04
|
||||
/* functions */
|
||||
# define R300_ZS_NEVER 0
|
||||
# define R300_ZS_LESS 1
|
||||
|
@ -1387,52 +1403,166 @@ USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|||
# define R300_ZS_INVERT 5
|
||||
# define R300_ZS_INCR_WRAP 6
|
||||
# define R300_ZS_DECR_WRAP 7
|
||||
# define R300_Z_FUNC_SHIFT 0
|
||||
/* front and back refer to operations done for front
|
||||
and back faces, i.e. separate stencil function support */
|
||||
# define R300_RB3D_ZS1_DEPTH_FUNC_SHIFT 0
|
||||
# define R300_RB3D_ZS1_FRONT_FUNC_SHIFT 3
|
||||
# define R300_RB3D_ZS1_FRONT_FAIL_OP_SHIFT 6
|
||||
# define R300_RB3D_ZS1_FRONT_ZPASS_OP_SHIFT 9
|
||||
# define R300_RB3D_ZS1_FRONT_ZFAIL_OP_SHIFT 12
|
||||
# define R300_RB3D_ZS1_BACK_FUNC_SHIFT 15
|
||||
# define R300_RB3D_ZS1_BACK_FAIL_OP_SHIFT 18
|
||||
# define R300_RB3D_ZS1_BACK_ZPASS_OP_SHIFT 21
|
||||
# define R300_RB3D_ZS1_BACK_ZFAIL_OP_SHIFT 24
|
||||
# define R300_S_FRONT_FUNC_SHIFT 3
|
||||
# define R300_S_FRONT_SFAIL_OP_SHIFT 6
|
||||
# define R300_S_FRONT_ZPASS_OP_SHIFT 9
|
||||
# define R300_S_FRONT_ZFAIL_OP_SHIFT 12
|
||||
# define R300_S_BACK_FUNC_SHIFT 15
|
||||
# define R300_S_BACK_SFAIL_OP_SHIFT 18
|
||||
# define R300_S_BACK_ZPASS_OP_SHIFT 21
|
||||
# define R300_S_BACK_ZFAIL_OP_SHIFT 24
|
||||
|
||||
#define R300_RB3D_ZSTENCIL_CNTL_2 0x4F08
|
||||
# define R300_RB3D_ZS2_STENCIL_REF_SHIFT 0
|
||||
# define R300_RB3D_ZS2_STENCIL_MASK 0xFF
|
||||
# define R300_RB3D_ZS2_STENCIL_MASK_SHIFT 8
|
||||
# define R300_RB3D_ZS2_STENCIL_WRITE_MASK_SHIFT 16
|
||||
#define R300_ZB_STENCILREFMASK 0x4f08
|
||||
# define R300_STENCILREF_SHIFT 0
|
||||
# define R300_STENCILREF_MASK 0x000000ff
|
||||
# define R300_STENCILMASK_SHIFT 8
|
||||
# define R300_STENCILMASK_MASK 0x0000ff00
|
||||
# define R300_STENCILWRITEMASK_SHIFT 16
|
||||
# define R300_STENCILWRITEMASK_MASK 0x00ff0000
|
||||
|
||||
/* gap */
|
||||
|
||||
#define R300_RB3D_ZSTENCIL_FORMAT 0x4F10
|
||||
# define R300_DEPTH_FORMAT_16BIT_INT_Z (0 << 0)
|
||||
# define R300_DEPTH_FORMAT_24BIT_INT_Z (2 << 0)
|
||||
/* 16 bit format or some aditional bit ? */
|
||||
# define R300_DEPTH_FORMAT_UNK32 (32 << 0)
|
||||
#define R300_ZB_FORMAT 0x4f10
|
||||
# define R300_DEPTHFORMAT_16BIT_INT_Z (0 << 0)
|
||||
# define R300_DEPTHFORMAT_16BIT_13E3 (1 << 0)
|
||||
# define R300_DEPTHFORMAT_24BIT_INT_Z_8BIT_STENCIL (2 << 0)
|
||||
/* reserved up to (15 << 0) */
|
||||
# define R300_INVERT_13E3_LEADING_ONES (0 << 4)
|
||||
# define R300_INVERT_13E3_LEADING_ZEROS (1 << 4)
|
||||
|
||||
#define R300_RB3D_EARLY_Z 0x4F14
|
||||
# define R300_EARLY_Z_DISABLE (0 << 0)
|
||||
# define R300_EARLY_Z_ENABLE (1 << 0)
|
||||
#define R300_ZB_ZTOP 0x4F14
|
||||
# define R300_ZTOP_DISABLE (0 << 0)
|
||||
# define R300_ZTOP_ENABLE (1 << 0)
|
||||
|
||||
/* gap */
|
||||
|
||||
#define R300_RB3D_ZCACHE_CTLSTAT 0x4F18 /* GUESS */
|
||||
# define R300_RB3D_ZCACHE_UNKNOWN_01 0x1
|
||||
# define R300_RB3D_ZCACHE_UNKNOWN_03 0x3
|
||||
#define R300_ZB_ZCACHE_CTLSTAT 0x4f18
|
||||
# define R300_ZB_ZCACHE_CTLSTAT_ZC_FLUSH_NO_EFFECT (0 << 0)
|
||||
# define R300_ZB_ZCACHE_CTLSTAT_ZC_FLUSH_FLUSH_AND_FREE (1 << 0)
|
||||
# define R300_ZB_ZCACHE_CTLSTAT_ZC_FREE_NO_EFFECT (0 << 1)
|
||||
# define R300_ZB_ZCACHE_CTLSTAT_ZC_FREE_FREE (1 << 1)
|
||||
# define R300_ZB_ZCACHE_CTLSTAT_ZC_BUSY_IDLE (0 << 31)
|
||||
# define R300_ZB_ZCACHE_CTLSTAT_ZC_BUSY_BUSY (1 << 31)
|
||||
|
||||
#define R300_ZB_BW_CNTL 0x4f1c
|
||||
# define R300_HIZ_DISABLE (0 << 0)
|
||||
# define R300_HIZ_ENABLE (1 << 0)
|
||||
# define R300_HIZ_MIN (0 << 1)
|
||||
# define R300_HIZ_MAX (1 << 1)
|
||||
# define R300_FAST_FILL_DISABLE (0 << 2)
|
||||
# define R300_FAST_FILL_ENABLE (1 << 2)
|
||||
# define R300_RD_COMP_DISABLE (0 << 3)
|
||||
# define R300_RD_COMP_ENABLE (1 << 3)
|
||||
# define R300_WR_COMP_DISABLE (0 << 4)
|
||||
# define R300_WR_COMP_ENABLE (1 << 4)
|
||||
# define R300_ZB_CB_CLEAR_RMW (0 << 5)
|
||||
# define R300_ZB_CB_CLEAR_CACHE_LINEAR (1 << 5)
|
||||
# define R300_FORCE_COMPRESSED_STENCIL_VALUE_DISABLE (0 << 6)
|
||||
# define R300_FORCE_COMPRESSED_STENCIL_VALUE_ENABLE (1 << 6)
|
||||
|
||||
# define R500_ZEQUAL_OPTIMIZE_ENABLE (0 << 7)
|
||||
# define R500_ZEQUAL_OPTIMIZE_DISABLE (1 << 7)
|
||||
# define R500_SEQUAL_OPTIMIZE_ENABLE (0 << 8)
|
||||
# define R500_SEQUAL_OPTIMIZE_DISABLE (1 << 8)
|
||||
|
||||
# define R500_BMASK_ENABLE (0 << 10)
|
||||
# define R500_BMASK_DISABLE (1 << 10)
|
||||
# define R500_HIZ_EQUAL_REJECT_DISABLE (0 << 11)
|
||||
# define R500_HIZ_EQUAL_REJECT_ENABLE (1 << 11)
|
||||
# define R500_HIZ_FP_EXP_BITS_DISABLE (0 << 12)
|
||||
# define R500_HIZ_FP_EXP_BITS_1 (1 << 12)
|
||||
# define R500_HIZ_FP_EXP_BITS_2 (2 << 12)
|
||||
# define R500_HIZ_FP_EXP_BITS_3 (3 << 12)
|
||||
# define R500_HIZ_FP_EXP_BITS_4 (4 << 12)
|
||||
# define R500_HIZ_FP_EXP_BITS_5 (5 << 12)
|
||||
# define R500_HIZ_FP_INVERT_LEADING_ONES (0 << 15)
|
||||
# define R500_HIZ_FP_INVERT_LEADING_ZEROS (1 << 15)
|
||||
# define R500_TILE_OVERWRITE_RECOMPRESSION_ENABLE (0 << 16)
|
||||
# define R500_TILE_OVERWRITE_RECOMPRESSION_DISABLE (1 << 16)
|
||||
# define R500_CONTIGUOUS_6XAA_SAMPLES_ENABLE (0 << 17)
|
||||
# define R500_CONTIGUOUS_6XAA_SAMPLES_DISABLE (1 << 17)
|
||||
# define R500_PEQ_PACKING_DISABLE (0 << 18)
|
||||
# define R500_PEQ_PACKING_ENABLE (1 << 18)
|
||||
# define R500_COVERED_PTR_MASKING_DISABLE (0 << 18)
|
||||
# define R500_COVERED_PTR_MASKING_ENABLE (1 << 18)
|
||||
|
||||
|
||||
/* gap */
|
||||
|
||||
#define R300_RB3D_DEPTHOFFSET 0x4F20
|
||||
#define R300_RB3D_DEPTHPITCH 0x4F24
|
||||
# define R300_DEPTHPITCH_MASK 0x00001FF8 /* GUESS */
|
||||
# define R300_DEPTH_TILE_ENABLE (1 << 16) /* GUESS */
|
||||
# define R300_DEPTH_MICROTILE_ENABLE (1 << 17) /* GUESS */
|
||||
# define R300_DEPTH_ENDIAN_NO_SWAP (0 << 18) /* GUESS */
|
||||
# define R300_DEPTH_ENDIAN_WORD_SWAP (1 << 18) /* GUESS */
|
||||
# define R300_DEPTH_ENDIAN_DWORD_SWAP (2 << 18) /* GUESS */
|
||||
/* Z Buffer Address Offset.
|
||||
* Bits 31 to 5 are used for aligned Z buffer address offset for macro tiles.
|
||||
*/
|
||||
#define R300_ZB_DEPTHOFFSET 0x4f20
|
||||
|
||||
/* Z Buffer Pitch and Endian Control */
|
||||
#define R300_ZB_DEPTHPITCH 0x4f24
|
||||
# define R300_DEPTHPITCH_MASK 0x00003FFC
|
||||
# define R300_DEPTHMACROTILE_DISABLE (0 << 16)
|
||||
# define R300_DEPTHMACROTILE_ENABLE (1 << 16)
|
||||
# define R300_DEPTHMICROTILE_LINEAR (0 << 17)
|
||||
# define R300_DEPTHMICROTILE_TILED (1 << 17)
|
||||
# define R300_DEPTHMICROTILE_TILED_SQUARE (2 << 17)
|
||||
# define R300_DEPTHENDIAN_NO_SWAP (0 << 18)
|
||||
# define R300_DEPTHENDIAN_WORD_SWAP (1 << 18)
|
||||
# define R300_DEPTHENDIAN_DWORD_SWAP (2 << 18)
|
||||
# define R300_DEPTHENDIAN_HALF_DWORD_SWAP (3 << 18)
|
||||
|
||||
/* Z Buffer Clear Value */
|
||||
#define R300_ZB_DEPTHCLEARVALUE 0x4f28
|
||||
|
||||
#define R300_ZB_ZMASK_OFFSET 0x4f30
|
||||
#define R300_ZB_ZMASK_PITCH 0x4f34
|
||||
#define R300_ZB_ZMASK_WRINDEX 0x4f38
|
||||
#define R300_ZB_ZMASK_DWORD 0x4f3c
|
||||
#define R300_ZB_ZMASK_RDINDEX 0x4f40
|
||||
|
||||
/* Hierarchical Z Memory Offset */
|
||||
#define R300_ZB_HIZ_OFFSET 0x4f44
|
||||
|
||||
/* Hierarchical Z Write Index */
|
||||
#define R300_ZB_HIZ_WRINDEX 0x4f48
|
||||
|
||||
/* Hierarchical Z Data */
|
||||
#define R300_ZB_HIZ_DWORD 0x4f4c
|
||||
|
||||
/* Hierarchical Z Read Index */
|
||||
#define R300_ZB_HIZ_RDINDEX 0x4f50
|
||||
|
||||
/* Hierarchical Z Pitch */
|
||||
#define R300_ZB_HIZ_PITCH 0x4f54
|
||||
|
||||
/* Z Buffer Z Pass Counter Data */
|
||||
#define R300_ZB_ZPASS_DATA 0x4f58
|
||||
|
||||
/* Z Buffer Z Pass Counter Address */
|
||||
#define R300_ZB_ZPASS_ADDR 0x4f5c
|
||||
|
||||
/* Depth buffer X and Y coordinate offset */
|
||||
#define R300_ZB_DEPTHXY_OFFSET 0x4f60
|
||||
# define R300_DEPTHX_OFFSET_SHIFT 1
|
||||
# define R300_DEPTHX_OFFSET_MASK 0x000007FE
|
||||
# define R300_DEPTHY_OFFSET_SHIFT 17
|
||||
# define R300_DEPTHY_OFFSET_MASK 0x07FE0000
|
||||
|
||||
/* Sets the fifo sizes */
|
||||
#define R500_ZB_FIFO_SIZE 0x4fd0
|
||||
# define R500_OP_FIFO_SIZE_FULL (0 << 0)
|
||||
# define R500_OP_FIFO_SIZE_HALF (1 << 0)
|
||||
# define R500_OP_FIFO_SIZE_QUATER (2 << 0)
|
||||
# define R500_OP_FIFO_SIZE_EIGTHS (4 << 0)
|
||||
|
||||
/* Stencil Reference Value and Mask for backfacing quads */
|
||||
/* R300_ZB_STENCILREFMASK handles front face */
|
||||
#define R500_ZB_STENCILREFMASK_BF 0x4fd4
|
||||
# define R500_STENCILREF_SHIFT 0
|
||||
# define R500_STENCILREF_MASK 0x000000ff
|
||||
# define R500_STENCILMASK_SHIFT 8
|
||||
# define R500_STENCILMASK_MASK 0x0000ff00
|
||||
# define R500_STENCILWRITEMASK_SHIFT 16
|
||||
# define R500_STENCILWRITEMASK_MASK 0x00ff0000
|
||||
|
||||
/* BEGIN: Vertex program instruction set */
|
||||
|
||||
|
@ -1623,4 +1753,20 @@ USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|||
*/
|
||||
#define R300_CP_CMD_BITBLT_MULTI 0xC0009B00
|
||||
|
||||
#define R500_VAP_INDEX_OFFSET 0x208c
|
||||
|
||||
#define R500_GA_US_VECTOR_INDEX 0x4250
|
||||
#define R500_GA_US_VECTOR_DATA 0x4254
|
||||
|
||||
#define R500_RS_IP_0 0x4074
|
||||
#define R500_RS_INST_0 0x4320
|
||||
|
||||
#define R500_US_CONFIG 0x4600
|
||||
|
||||
#define R500_US_FC_CTRL 0x4624
|
||||
#define R500_US_CODE_ADDR 0x4630
|
||||
|
||||
#define R500_RB3D_COLOR_CLEAR_VALUE_AR 0x46c0
|
||||
#define R500_RB3D_CONSTANT_COLOR_AR 0x4ef8
|
||||
|
||||
#endif /* _R300_REG_H */
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -240,6 +240,7 @@ typedef union {
|
|||
# define R300_NEW_WAIT_2D_2D_CLEAN_3D_3D_CLEAN 0x8
|
||||
|
||||
#define R300_CMD_SCRATCH 8
|
||||
#define R300_CMD_R500FP 9
|
||||
|
||||
typedef union {
|
||||
unsigned int u;
|
||||
|
@ -268,6 +269,9 @@ typedef union {
|
|||
struct {
|
||||
unsigned char cmd_type, reg, n_bufs, flags;
|
||||
} scratch;
|
||||
struct {
|
||||
unsigned char cmd_type, count, adrlo, adrhi_flags;
|
||||
} r500fp;
|
||||
} drm_r300_cmd_header_t;
|
||||
|
||||
#define RADEON_FRONT 0x1
|
||||
|
@ -278,6 +282,9 @@ typedef union {
|
|||
#define RADEON_USE_HIERZ 0x40000000
|
||||
#define RADEON_USE_COMP_ZBUF 0x20000000
|
||||
|
||||
#define R500FP_CONSTANT_TYPE (1 << 1)
|
||||
#define R500FP_CONSTANT_CLAMP (1 << 2)
|
||||
|
||||
/* Primitive types
|
||||
*/
|
||||
#define RADEON_POINTS 0x1
|
||||
|
@ -669,6 +676,7 @@ typedef struct drm_radeon_indirect {
|
|||
#define RADEON_PARAM_CARD_TYPE 12
|
||||
#define RADEON_PARAM_VBLANK_CRTC 13 /* VBLANK CRTC */
|
||||
#define RADEON_PARAM_FB_LOCATION 14 /* FB location */
|
||||
#define RADEON_PARAM_NUM_GB_PIPES 15 /* num GB pipes */
|
||||
|
||||
typedef struct drm_radeon_getparam {
|
||||
int param;
|
||||
|
|
|
@ -38,7 +38,7 @@
|
|||
|
||||
#define DRIVER_NAME "radeon"
|
||||
#define DRIVER_DESC "ATI Radeon"
|
||||
#define DRIVER_DATE "20060524"
|
||||
#define DRIVER_DATE "20080528"
|
||||
|
||||
/* Interface history:
|
||||
*
|
||||
|
@ -98,9 +98,10 @@
|
|||
* 1.26- Add support for variable size PCI(E) gart aperture
|
||||
* 1.27- Add support for IGP GART
|
||||
* 1.28- Add support for VBL on CRTC2
|
||||
* 1.29- R500 3D cmd buffer support
|
||||
*/
|
||||
#define DRIVER_MAJOR 1
|
||||
#define DRIVER_MINOR 28
|
||||
#define DRIVER_MINOR 29
|
||||
#define DRIVER_PATCHLEVEL 0
|
||||
|
||||
/*
|
||||
|
@ -122,7 +123,7 @@ enum radeon_family {
|
|||
CHIP_RV380,
|
||||
CHIP_R420,
|
||||
CHIP_RV410,
|
||||
CHIP_RS400,
|
||||
CHIP_RS480,
|
||||
CHIP_RS690,
|
||||
CHIP_RV515,
|
||||
CHIP_R520,
|
||||
|
@ -294,6 +295,7 @@ typedef struct drm_radeon_private {
|
|||
int vblank_crtc;
|
||||
uint32_t irq_enable_reg;
|
||||
int irq_enabled;
|
||||
uint32_t r500_disp_irq_reg;
|
||||
|
||||
struct radeon_surface surfaces[RADEON_MAX_SURFACES];
|
||||
struct radeon_virt_surface virt_surfaces[2 * RADEON_MAX_SURFACES];
|
||||
|
@ -307,6 +309,8 @@ typedef struct drm_radeon_private {
|
|||
/* starting from here on, data is preserved accross an open */
|
||||
uint32_t flags; /* see radeon_chip_flags */
|
||||
unsigned long fb_aper_offset;
|
||||
|
||||
int num_gb_pipes;
|
||||
} drm_radeon_private_t;
|
||||
|
||||
typedef struct drm_radeon_buf_priv {
|
||||
|
@ -382,6 +386,7 @@ extern irqreturn_t radeon_driver_irq_handler(DRM_IRQ_ARGS);
|
|||
extern void radeon_driver_irq_preinstall(struct drm_device * dev);
|
||||
extern void radeon_driver_irq_postinstall(struct drm_device * dev);
|
||||
extern void radeon_driver_irq_uninstall(struct drm_device * dev);
|
||||
extern void radeon_enable_interrupt(struct drm_device *dev);
|
||||
extern int radeon_vblank_crtc_get(struct drm_device *dev);
|
||||
extern int radeon_vblank_crtc_set(struct drm_device *dev, int64_t value);
|
||||
|
||||
|
@ -444,13 +449,13 @@ extern int r300_do_cp_cmdbuf(struct drm_device * dev,
|
|||
#define RADEON_PCIE_DATA 0x0034
|
||||
#define RADEON_PCIE_TX_GART_CNTL 0x10
|
||||
# define RADEON_PCIE_TX_GART_EN (1 << 0)
|
||||
# define RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_PASS_THRU (0<<1)
|
||||
# define RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_CLAMP_LO (1<<1)
|
||||
# define RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD (3<<1)
|
||||
# define RADEON_PCIE_TX_GART_MODE_32_128_CACHE (0<<3)
|
||||
# define RADEON_PCIE_TX_GART_MODE_8_4_128_CACHE (1<<3)
|
||||
# define RADEON_PCIE_TX_GART_CHK_RW_VALID_EN (1<<5)
|
||||
# define RADEON_PCIE_TX_GART_INVALIDATE_TLB (1<<8)
|
||||
# define RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_PASS_THRU (0 << 1)
|
||||
# define RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_CLAMP_LO (1 << 1)
|
||||
# define RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD (3 << 1)
|
||||
# define RADEON_PCIE_TX_GART_MODE_32_128_CACHE (0 << 3)
|
||||
# define RADEON_PCIE_TX_GART_MODE_8_4_128_CACHE (1 << 3)
|
||||
# define RADEON_PCIE_TX_GART_CHK_RW_VALID_EN (1 << 5)
|
||||
# define RADEON_PCIE_TX_GART_INVALIDATE_TLB (1 << 8)
|
||||
#define RADEON_PCIE_TX_DISCARD_RD_ADDR_LO 0x11
|
||||
#define RADEON_PCIE_TX_DISCARD_RD_ADDR_HI 0x12
|
||||
#define RADEON_PCIE_TX_GART_BASE 0x13
|
||||
|
@ -459,14 +464,9 @@ extern int r300_do_cp_cmdbuf(struct drm_device * dev,
|
|||
#define RADEON_PCIE_TX_GART_END_LO 0x16
|
||||
#define RADEON_PCIE_TX_GART_END_HI 0x17
|
||||
|
||||
#define RADEON_IGPGART_INDEX 0x168
|
||||
#define RADEON_IGPGART_DATA 0x16c
|
||||
#define RADEON_IGPGART_UNK_18 0x18
|
||||
#define RADEON_IGPGART_CTRL 0x2b
|
||||
#define RADEON_IGPGART_BASE_ADDR 0x2c
|
||||
#define RADEON_IGPGART_FLUSH 0x2e
|
||||
#define RADEON_IGPGART_ENABLE 0x38
|
||||
#define RADEON_IGPGART_UNK_39 0x39
|
||||
#define RS480_NB_MC_INDEX 0x168
|
||||
# define RS480_NB_MC_IND_WR_EN (1 << 8)
|
||||
#define RS480_NB_MC_DATA 0x16c
|
||||
|
||||
#define RS690_MC_INDEX 0x78
|
||||
# define RS690_MC_INDEX_MASK 0x1ff
|
||||
|
@ -474,45 +474,91 @@ extern int r300_do_cp_cmdbuf(struct drm_device * dev,
|
|||
# define RS690_MC_INDEX_WR_ACK 0x7f
|
||||
#define RS690_MC_DATA 0x7c
|
||||
|
||||
#define RS690_MC_MISC_CNTL 0x18
|
||||
#define RS690_MC_GART_FEATURE_ID 0x2b
|
||||
#define RS690_MC_GART_BASE 0x2c
|
||||
#define RS690_MC_GART_CACHE_CNTL 0x2e
|
||||
# define RS690_MC_GART_CC_NO_CHANGE 0x0
|
||||
# define RS690_MC_GART_CC_CLEAR 0x1
|
||||
# define RS690_MC_GART_CLEAR_STATUS (1 << 1)
|
||||
# define RS690_MC_GART_CLEAR_DONE (0 << 1)
|
||||
# define RS690_MC_GART_CLEAR_PENDING (1 << 1)
|
||||
#define RS690_MC_AGP_SIZE 0x38
|
||||
# define RS690_MC_GART_DIS 0x0
|
||||
# define RS690_MC_GART_EN 0x1
|
||||
# define RS690_MC_AGP_SIZE_32MB (0 << 1)
|
||||
# define RS690_MC_AGP_SIZE_64MB (1 << 1)
|
||||
# define RS690_MC_AGP_SIZE_128MB (2 << 1)
|
||||
# define RS690_MC_AGP_SIZE_256MB (3 << 1)
|
||||
# define RS690_MC_AGP_SIZE_512MB (4 << 1)
|
||||
# define RS690_MC_AGP_SIZE_1GB (5 << 1)
|
||||
# define RS690_MC_AGP_SIZE_2GB (6 << 1)
|
||||
#define RS690_MC_AGP_MODE_CONTROL 0x39
|
||||
/* MC indirect registers */
|
||||
#define RS480_MC_MISC_CNTL 0x18
|
||||
# define RS480_DISABLE_GTW (1 << 1)
|
||||
/* switch between MCIND GART and MM GART registers. 0 = mmgart, 1 = mcind gart */
|
||||
# define RS480_GART_INDEX_REG_EN (1 << 12)
|
||||
# define RS690_BLOCK_GFX_D3_EN (1 << 14)
|
||||
#define RS480_K8_FB_LOCATION 0x1e
|
||||
#define RS480_GART_FEATURE_ID 0x2b
|
||||
# define RS480_HANG_EN (1 << 11)
|
||||
# define RS480_TLB_ENABLE (1 << 18)
|
||||
# define RS480_P2P_ENABLE (1 << 19)
|
||||
# define RS480_GTW_LAC_EN (1 << 25)
|
||||
# define RS480_2LEVEL_GART (0 << 30)
|
||||
# define RS480_1LEVEL_GART (1 << 30)
|
||||
# define RS480_PDC_EN (1 << 31)
|
||||
#define RS480_GART_BASE 0x2c
|
||||
#define RS480_GART_CACHE_CNTRL 0x2e
|
||||
# define RS480_GART_CACHE_INVALIDATE (1 << 0) /* wait for it to clear */
|
||||
#define RS480_AGP_ADDRESS_SPACE_SIZE 0x38
|
||||
# define RS480_GART_EN (1 << 0)
|
||||
# define RS480_VA_SIZE_32MB (0 << 1)
|
||||
# define RS480_VA_SIZE_64MB (1 << 1)
|
||||
# define RS480_VA_SIZE_128MB (2 << 1)
|
||||
# define RS480_VA_SIZE_256MB (3 << 1)
|
||||
# define RS480_VA_SIZE_512MB (4 << 1)
|
||||
# define RS480_VA_SIZE_1GB (5 << 1)
|
||||
# define RS480_VA_SIZE_2GB (6 << 1)
|
||||
#define RS480_AGP_MODE_CNTL 0x39
|
||||
# define RS480_POST_GART_Q_SIZE (1 << 18)
|
||||
# define RS480_NONGART_SNOOP (1 << 19)
|
||||
# define RS480_AGP_RD_BUF_SIZE (1 << 20)
|
||||
# define RS480_REQ_TYPE_SNOOP_SHIFT 22
|
||||
# define RS480_REQ_TYPE_SNOOP_MASK 0x3
|
||||
# define RS480_REQ_TYPE_SNOOP_DIS (1 << 24)
|
||||
#define RS480_MC_MISC_UMA_CNTL 0x5f
|
||||
#define RS480_MC_MCLK_CNTL 0x7a
|
||||
#define RS480_MC_UMA_DUALCH_CNTL 0x86
|
||||
|
||||
#define RS690_MC_FB_LOCATION 0x100
|
||||
#define RS690_MC_AGP_LOCATION 0x101
|
||||
#define RS690_MC_AGP_BASE 0x102
|
||||
#define RS690_MC_AGP_BASE_2 0x103
|
||||
|
||||
#define R520_MC_IND_INDEX 0x70
|
||||
#define R520_MC_IND_WR_EN (1<<24)
|
||||
#define R520_MC_IND_WR_EN (1 << 24)
|
||||
#define R520_MC_IND_DATA 0x74
|
||||
|
||||
#define RV515_MC_FB_LOCATION 0x01
|
||||
#define RV515_MC_AGP_LOCATION 0x02
|
||||
#define RV515_MC_AGP_BASE 0x03
|
||||
#define RV515_MC_AGP_BASE_2 0x04
|
||||
|
||||
#define R520_MC_FB_LOCATION 0x04
|
||||
#define R520_MC_AGP_LOCATION 0x05
|
||||
#define R520_MC_AGP_BASE 0x06
|
||||
#define R520_MC_AGP_BASE_2 0x07
|
||||
|
||||
#define RADEON_MPP_TB_CONFIG 0x01c0
|
||||
#define RADEON_MEM_CNTL 0x0140
|
||||
#define RADEON_MEM_SDRAM_MODE_REG 0x0158
|
||||
#define RADEON_AGP_BASE_2 0x015c /* r200+ only */
|
||||
#define RS480_AGP_BASE_2 0x0164
|
||||
#define RADEON_AGP_BASE 0x0170
|
||||
|
||||
/* pipe config regs */
|
||||
#define R400_GB_PIPE_SELECT 0x402c
|
||||
#define R500_DYN_SCLK_PWMEM_PIPE 0x000d /* PLL */
|
||||
#define R500_SU_REG_DEST 0x42c8
|
||||
#define R300_GB_TILE_CONFIG 0x4018
|
||||
# define R300_ENABLE_TILING (1 << 0)
|
||||
# define R300_PIPE_COUNT_RV350 (0 << 1)
|
||||
# define R300_PIPE_COUNT_R300 (3 << 1)
|
||||
# define R300_PIPE_COUNT_R420_3P (6 << 1)
|
||||
# define R300_PIPE_COUNT_R420 (7 << 1)
|
||||
# define R300_TILE_SIZE_8 (0 << 4)
|
||||
# define R300_TILE_SIZE_16 (1 << 4)
|
||||
# define R300_TILE_SIZE_32 (2 << 4)
|
||||
# define R300_SUBPIXEL_1_12 (0 << 16)
|
||||
# define R300_SUBPIXEL_1_16 (1 << 16)
|
||||
#define R300_DST_PIPE_CONFIG 0x170c
|
||||
# define R300_PIPE_AUTO_CONFIG (1 << 31)
|
||||
#define R300_RB2D_DSTCACHE_MODE 0x3428
|
||||
# define R300_DC_AUTOFLUSH_ENABLE (1 << 8)
|
||||
# define R300_DC_DC_DISABLE_IGNORE_PE (1 << 17)
|
||||
|
||||
#define RADEON_RB3D_COLOROFFSET 0x1c40
|
||||
#define RADEON_RB3D_COLORPITCH 0x1c48
|
||||
|
||||
|
@ -616,11 +662,12 @@ extern int r300_do_cp_cmdbuf(struct drm_device * dev,
|
|||
#define RADEON_PP_TXFILTER_1 0x1c6c
|
||||
#define RADEON_PP_TXFILTER_2 0x1c84
|
||||
|
||||
#define RADEON_RB2D_DSTCACHE_CTLSTAT 0x342c
|
||||
# define RADEON_RB2D_DC_FLUSH (3 << 0)
|
||||
# define RADEON_RB2D_DC_FREE (3 << 2)
|
||||
# define RADEON_RB2D_DC_FLUSH_ALL 0xf
|
||||
# define RADEON_RB2D_DC_BUSY (1 << 31)
|
||||
#define R300_RB2D_DSTCACHE_CTLSTAT 0x342c /* use R300_DSTCACHE_CTLSTAT */
|
||||
#define R300_DSTCACHE_CTLSTAT 0x1714
|
||||
# define R300_RB2D_DC_FLUSH (3 << 0)
|
||||
# define R300_RB2D_DC_FREE (3 << 2)
|
||||
# define R300_RB2D_DC_FLUSH_ALL 0xf
|
||||
# define R300_RB2D_DC_BUSY (1 << 31)
|
||||
#define RADEON_RB3D_CNTL 0x1c3c
|
||||
# define RADEON_ALPHA_BLEND_ENABLE (1 << 0)
|
||||
# define RADEON_PLANE_MASK_ENABLE (1 << 1)
|
||||
|
@ -643,11 +690,18 @@ extern int r300_do_cp_cmdbuf(struct drm_device * dev,
|
|||
# define RADEON_RB3D_ZC_FREE (1 << 2)
|
||||
# define RADEON_RB3D_ZC_FLUSH_ALL 0x5
|
||||
# define RADEON_RB3D_ZC_BUSY (1 << 31)
|
||||
#define R300_ZB_ZCACHE_CTLSTAT 0x4f18
|
||||
# define R300_ZC_FLUSH (1 << 0)
|
||||
# define R300_ZC_FREE (1 << 1)
|
||||
# define R300_ZC_FLUSH_ALL 0x3
|
||||
# define R300_ZC_BUSY (1 << 31)
|
||||
#define RADEON_RB3D_DSTCACHE_CTLSTAT 0x325c
|
||||
# define RADEON_RB3D_DC_FLUSH (3 << 0)
|
||||
# define RADEON_RB3D_DC_FREE (3 << 2)
|
||||
# define RADEON_RB3D_DC_FLUSH_ALL 0xf
|
||||
# define RADEON_RB3D_DC_BUSY (1 << 31)
|
||||
#define R300_RB3D_DSTCACHE_CTLSTAT 0x4e4c
|
||||
# define R300_RB3D_DC_FINISH (1 << 4)
|
||||
#define RADEON_RB3D_ZSTENCILCNTL 0x1c2c
|
||||
# define RADEON_Z_TEST_MASK (7 << 4)
|
||||
# define RADEON_Z_TEST_ALWAYS (7 << 4)
|
||||
|
@ -1057,6 +1111,31 @@ extern int r300_do_cp_cmdbuf(struct drm_device * dev,
|
|||
|
||||
#define R200_VAP_PVS_CNTL_1 0x22D0
|
||||
|
||||
#define R500_D1CRTC_STATUS 0x609c
|
||||
#define R500_D2CRTC_STATUS 0x689c
|
||||
#define R500_CRTC_V_BLANK (1<<0)
|
||||
|
||||
#define R500_D1CRTC_FRAME_COUNT 0x60a4
|
||||
#define R500_D2CRTC_FRAME_COUNT 0x68a4
|
||||
|
||||
#define R500_D1MODE_V_COUNTER 0x6530
|
||||
#define R500_D2MODE_V_COUNTER 0x6d30
|
||||
|
||||
#define R500_D1MODE_VBLANK_STATUS 0x6534
|
||||
#define R500_D2MODE_VBLANK_STATUS 0x6d34
|
||||
#define R500_VBLANK_OCCURED (1<<0)
|
||||
#define R500_VBLANK_ACK (1<<4)
|
||||
#define R500_VBLANK_STAT (1<<12)
|
||||
#define R500_VBLANK_INT (1<<16)
|
||||
|
||||
#define R500_DxMODE_INT_MASK 0x6540
|
||||
#define R500_D1MODE_INT_MASK (1<<0)
|
||||
#define R500_D2MODE_INT_MASK (1<<8)
|
||||
|
||||
#define R500_DISP_INTERRUPT_STATUS 0x7edc
|
||||
#define R500_D1_VBLANK_INTERRUPT (1 << 4)
|
||||
#define R500_D2_VBLANK_INTERRUPT (1 << 5)
|
||||
|
||||
/* Constants */
|
||||
#define RADEON_MAX_USEC_TIMEOUT 100000 /* 100 ms */
|
||||
|
||||
|
@ -1078,42 +1157,50 @@ extern int r300_do_cp_cmdbuf(struct drm_device * dev,
|
|||
#define RADEON_READ8(reg) DRM_READ8( dev_priv->mmio, (reg) )
|
||||
#define RADEON_WRITE8(reg,val) DRM_WRITE8( dev_priv->mmio, (reg), (val) )
|
||||
|
||||
#define RADEON_WRITE_PLL( addr, val ) \
|
||||
#define RADEON_WRITE_PLL(addr, val) \
|
||||
do { \
|
||||
RADEON_WRITE8( RADEON_CLOCK_CNTL_INDEX, \
|
||||
RADEON_WRITE8(RADEON_CLOCK_CNTL_INDEX, \
|
||||
((addr) & 0x1f) | RADEON_PLL_WR_EN ); \
|
||||
RADEON_WRITE( RADEON_CLOCK_CNTL_DATA, (val) ); \
|
||||
RADEON_WRITE(RADEON_CLOCK_CNTL_DATA, (val)); \
|
||||
} while (0)
|
||||
|
||||
#define RADEON_WRITE_IGPGART( addr, val ) \
|
||||
#define RADEON_WRITE_PCIE(addr, val) \
|
||||
do { \
|
||||
RADEON_WRITE( RADEON_IGPGART_INDEX, \
|
||||
((addr) & 0x7f) | (1 << 8)); \
|
||||
RADEON_WRITE( RADEON_IGPGART_DATA, (val) ); \
|
||||
RADEON_WRITE( RADEON_IGPGART_INDEX, 0x7f ); \
|
||||
} while (0)
|
||||
|
||||
#define RADEON_WRITE_PCIE( addr, val ) \
|
||||
do { \
|
||||
RADEON_WRITE8( RADEON_PCIE_INDEX, \
|
||||
RADEON_WRITE8(RADEON_PCIE_INDEX, \
|
||||
((addr) & 0xff)); \
|
||||
RADEON_WRITE( RADEON_PCIE_DATA, (val) ); \
|
||||
RADEON_WRITE(RADEON_PCIE_DATA, (val)); \
|
||||
} while (0)
|
||||
|
||||
#define RADEON_WRITE_MCIND( addr, val ) \
|
||||
do { \
|
||||
RADEON_WRITE(R520_MC_IND_INDEX, 0xff0000 | ((addr) & 0xff)); \
|
||||
RADEON_WRITE(R520_MC_IND_DATA, (val)); \
|
||||
RADEON_WRITE(R520_MC_IND_INDEX, 0); \
|
||||
} while (0)
|
||||
#define R500_WRITE_MCIND(addr, val) \
|
||||
do { \
|
||||
RADEON_WRITE(R520_MC_IND_INDEX, 0xff0000 | ((addr) & 0xff)); \
|
||||
RADEON_WRITE(R520_MC_IND_DATA, (val)); \
|
||||
RADEON_WRITE(R520_MC_IND_INDEX, 0); \
|
||||
} while (0)
|
||||
|
||||
#define RS690_WRITE_MCIND( addr, val ) \
|
||||
#define RS480_WRITE_MCIND(addr, val) \
|
||||
do { \
|
||||
RADEON_WRITE(RS480_NB_MC_INDEX, \
|
||||
((addr) & 0xff) | RS480_NB_MC_IND_WR_EN); \
|
||||
RADEON_WRITE(RS480_NB_MC_DATA, (val)); \
|
||||
RADEON_WRITE(RS480_NB_MC_INDEX, 0xff); \
|
||||
} while (0)
|
||||
|
||||
#define RS690_WRITE_MCIND(addr, val) \
|
||||
do { \
|
||||
RADEON_WRITE(RS690_MC_INDEX, RS690_MC_INDEX_WR_EN | ((addr) & RS690_MC_INDEX_MASK)); \
|
||||
RADEON_WRITE(RS690_MC_DATA, val); \
|
||||
RADEON_WRITE(RS690_MC_INDEX, RS690_MC_INDEX_WR_ACK); \
|
||||
} while (0)
|
||||
|
||||
#define IGP_WRITE_MCIND(addr, val) \
|
||||
do { \
|
||||
if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) \
|
||||
RS690_WRITE_MCIND(addr, val); \
|
||||
else \
|
||||
RS480_WRITE_MCIND(addr, val); \
|
||||
} while (0)
|
||||
|
||||
#define CP_PACKET0( reg, n ) \
|
||||
(RADEON_CP_PACKET0 | ((n) << 16) | ((reg) >> 2))
|
||||
#define CP_PACKET0_TABLE( reg, n ) \
|
||||
|
@ -1154,23 +1241,43 @@ do { \
|
|||
} while (0)
|
||||
|
||||
#define RADEON_FLUSH_CACHE() do { \
|
||||
OUT_RING( CP_PACKET0( RADEON_RB3D_DSTCACHE_CTLSTAT, 0 ) ); \
|
||||
OUT_RING( RADEON_RB3D_DC_FLUSH ); \
|
||||
if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV280) { \
|
||||
OUT_RING(CP_PACKET0(RADEON_RB3D_DSTCACHE_CTLSTAT, 0)); \
|
||||
OUT_RING(RADEON_RB3D_DC_FLUSH); \
|
||||
} else { \
|
||||
OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); \
|
||||
OUT_RING(RADEON_RB3D_DC_FLUSH); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define RADEON_PURGE_CACHE() do { \
|
||||
OUT_RING( CP_PACKET0( RADEON_RB3D_DSTCACHE_CTLSTAT, 0 ) ); \
|
||||
OUT_RING( RADEON_RB3D_DC_FLUSH_ALL ); \
|
||||
if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV280) { \
|
||||
OUT_RING(CP_PACKET0(RADEON_RB3D_DSTCACHE_CTLSTAT, 0)); \
|
||||
OUT_RING(RADEON_RB3D_DC_FLUSH_ALL); \
|
||||
} else { \
|
||||
OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); \
|
||||
OUT_RING(RADEON_RB3D_DC_FLUSH_ALL); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define RADEON_FLUSH_ZCACHE() do { \
|
||||
OUT_RING( CP_PACKET0( RADEON_RB3D_ZCACHE_CTLSTAT, 0 ) ); \
|
||||
OUT_RING( RADEON_RB3D_ZC_FLUSH ); \
|
||||
if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV280) { \
|
||||
OUT_RING(CP_PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0)); \
|
||||
OUT_RING(RADEON_RB3D_ZC_FLUSH); \
|
||||
} else { \
|
||||
OUT_RING(CP_PACKET0(R300_ZB_ZCACHE_CTLSTAT, 0)); \
|
||||
OUT_RING(R300_ZC_FLUSH); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define RADEON_PURGE_ZCACHE() do { \
|
||||
OUT_RING( CP_PACKET0( RADEON_RB3D_ZCACHE_CTLSTAT, 0 ) ); \
|
||||
OUT_RING( RADEON_RB3D_ZC_FLUSH_ALL ); \
|
||||
if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV280) { \
|
||||
OUT_RING(CP_PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0)); \
|
||||
OUT_RING(RADEON_RB3D_ZC_FLUSH_ALL); \
|
||||
} else { \
|
||||
OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); \
|
||||
OUT_RING(R300_ZC_FLUSH_ALL); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
/* ================================================================
|
||||
|
|
|
@ -234,7 +234,7 @@ int radeon_irq_wait(struct drm_device *dev, void *data, struct drm_file *file_pr
|
|||
return radeon_wait_irq(dev, irqwait->irq_seq);
|
||||
}
|
||||
|
||||
static void radeon_enable_interrupt(struct drm_device *dev)
|
||||
void radeon_enable_interrupt(struct drm_device *dev)
|
||||
{
|
||||
drm_radeon_private_t *dev_priv = (drm_radeon_private_t *) dev->dev_private;
|
||||
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -1662,7 +1662,7 @@ static int radeon_cp_dispatch_texture(struct drm_device * dev,
|
|||
u32 height;
|
||||
int i;
|
||||
u32 texpitch, microtile;
|
||||
u32 offset;
|
||||
u32 offset, byte_offset;
|
||||
RING_LOCALS;
|
||||
|
||||
if (radeon_check_and_fixup_offset(dev_priv, file_priv, &tex->offset)) {
|
||||
|
@ -1727,6 +1727,13 @@ static int radeon_cp_dispatch_texture(struct drm_device * dev,
|
|||
} else
|
||||
microtile = 0;
|
||||
|
||||
/* this might fail for zero-sized uploads - are those illegal? */
|
||||
if (!radeon_check_offset(dev_priv, tex->offset + image->height *
|
||||
blit_width - 1)) {
|
||||
DRM_ERROR("Invalid final destination offset\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
DRM_DEBUG("tex=%dx%d blit=%d\n", tex_width, tex->height, blit_width);
|
||||
|
||||
do {
|
||||
|
@ -1840,6 +1847,7 @@ static int radeon_cp_dispatch_texture(struct drm_device * dev,
|
|||
}
|
||||
|
||||
#undef RADEON_COPY_MT
|
||||
byte_offset = (image->y & ~2047) * blit_width;
|
||||
buf->file_priv = file_priv;
|
||||
buf->used = size;
|
||||
offset = dev_priv->gart_buffers_offset + buf->offset;
|
||||
|
@ -1854,9 +1862,9 @@ static int radeon_cp_dispatch_texture(struct drm_device * dev,
|
|||
RADEON_DP_SRC_SOURCE_MEMORY |
|
||||
RADEON_GMC_CLR_CMP_CNTL_DIS | RADEON_GMC_WR_MSK_DIS);
|
||||
OUT_RING((spitch << 22) | (offset >> 10));
|
||||
OUT_RING((texpitch << 22) | (tex->offset >> 10));
|
||||
OUT_RING((texpitch << 22) | ((tex->offset >> 10) + (byte_offset >> 10)));
|
||||
OUT_RING(0);
|
||||
OUT_RING((image->x << 16) | image->y);
|
||||
OUT_RING((image->x << 16) | (image->y % 2048));
|
||||
OUT_RING((image->width << 16) | height);
|
||||
RADEON_WAIT_UNTIL_2D_IDLE();
|
||||
ADVANCE_RING();
|
||||
|
@ -3037,6 +3045,9 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
|
|||
case RADEON_PARAM_FB_LOCATION:
|
||||
value = radeon_read_fb_location(dev_priv);
|
||||
break;
|
||||
case RADEON_PARAM_NUM_GB_PIPES:
|
||||
value = dev_priv->num_gb_pipes;
|
||||
break;
|
||||
default:
|
||||
DRM_DEBUG("Invalid parameter %d\n", param->param);
|
||||
return -EINVAL;
|
||||
|
|
|
@ -981,16 +981,9 @@ EXPORT_SYMBOL_GPL(tty_perform_flush);
|
|||
int n_tty_ioctl(struct tty_struct *tty, struct file *file,
|
||||
unsigned int cmd, unsigned long arg)
|
||||
{
|
||||
struct tty_struct *real_tty;
|
||||
unsigned long flags;
|
||||
int retval;
|
||||
|
||||
if (tty->driver->type == TTY_DRIVER_TYPE_PTY &&
|
||||
tty->driver->subtype == PTY_TYPE_MASTER)
|
||||
real_tty = tty->link;
|
||||
else
|
||||
real_tty = tty;
|
||||
|
||||
switch (cmd) {
|
||||
case TCXONC:
|
||||
retval = tty_check_change(tty);
|
||||
|
|
|
@ -1,28 +1,26 @@
|
|||
comment "An alternative FireWire stack is available with EXPERIMENTAL=y"
|
||||
comment "A new alternative FireWire stack is available with EXPERIMENTAL=y"
|
||||
depends on EXPERIMENTAL=n
|
||||
|
||||
comment "Enable only one of the two stacks, unless you know what you are doing"
|
||||
depends on EXPERIMENTAL
|
||||
|
||||
config FIREWIRE
|
||||
tristate "IEEE 1394 (FireWire) support - alternative stack, EXPERIMENTAL"
|
||||
tristate "New FireWire stack, EXPERIMENTAL"
|
||||
depends on EXPERIMENTAL
|
||||
select CRC_ITU_T
|
||||
help
|
||||
This is the "Juju" FireWire stack, a new alternative implementation
|
||||
designed for robustness and simplicity. You can build either this
|
||||
stack, or the classic stack (the ieee1394 driver, ohci1394 etc.)
|
||||
or both. Please read http://wiki.linux1394.org/JujuMigration before
|
||||
you enable the new stack.
|
||||
stack, or the old stack (the ieee1394 driver, ohci1394 etc.) or both.
|
||||
Please read http://wiki.linux1394.org/JujuMigration before you
|
||||
enable the new stack.
|
||||
|
||||
To compile this driver as a module, say M here: the module will be
|
||||
called firewire-core. It functionally replaces ieee1394, raw1394,
|
||||
and video1394.
|
||||
|
||||
NOTE:
|
||||
|
||||
You should only build ONE of the stacks, unless you REALLY know what
|
||||
you are doing.
|
||||
|
||||
config FIREWIRE_OHCI
|
||||
tristate "Support for OHCI FireWire host controllers"
|
||||
tristate "OHCI-1394 controllers"
|
||||
depends on PCI && FIREWIRE
|
||||
help
|
||||
Enable this driver if you have a FireWire controller based
|
||||
|
@ -33,12 +31,12 @@ config FIREWIRE_OHCI
|
|||
called firewire-ohci. It replaces ohci1394 of the classic IEEE 1394
|
||||
stack.
|
||||
|
||||
NOTE:
|
||||
NOTE:
|
||||
|
||||
You should only build ohci1394 or firewire-ohci, but not both.
|
||||
If you nevertheless want to install both, you should configure them
|
||||
only as modules and blacklist the driver(s) which you don't want to
|
||||
have auto-loaded. Add either
|
||||
You should only build either firewire-ohci or the old ohci1394 driver,
|
||||
but not both. If you nevertheless want to install both, you should
|
||||
configure them only as modules and blacklist the driver(s) which you
|
||||
don't want to have auto-loaded. Add either
|
||||
|
||||
blacklist firewire-ohci
|
||||
or
|
||||
|
@ -60,7 +58,7 @@ config FIREWIRE_OHCI_DEBUG
|
|||
default y
|
||||
|
||||
config FIREWIRE_SBP2
|
||||
tristate "Support for storage devices (SBP-2 protocol driver)"
|
||||
tristate "Storage devices (SBP-2 protocol)"
|
||||
depends on FIREWIRE && SCSI
|
||||
help
|
||||
This option enables you to use SBP-2 devices connected to a
|
||||
|
|
|
@ -205,6 +205,7 @@ fw_device_op_read(struct file *file,
|
|||
return dequeue_event(client, buffer, count);
|
||||
}
|
||||
|
||||
/* caller must hold card->lock so that node pointers can be dereferenced here */
|
||||
static void
|
||||
fill_bus_reset_event(struct fw_cdev_event_bus_reset *event,
|
||||
struct client *client)
|
||||
|
@ -214,7 +215,6 @@ fill_bus_reset_event(struct fw_cdev_event_bus_reset *event,
|
|||
event->closure = client->bus_reset_closure;
|
||||
event->type = FW_CDEV_EVENT_BUS_RESET;
|
||||
event->generation = client->device->generation;
|
||||
smp_rmb(); /* node_id must not be older than generation */
|
||||
event->node_id = client->device->node_id;
|
||||
event->local_node_id = card->local_node->node_id;
|
||||
event->bm_node_id = 0; /* FIXME: We don't track the BM. */
|
||||
|
@ -274,6 +274,7 @@ static int ioctl_get_info(struct client *client, void *buffer)
|
|||
{
|
||||
struct fw_cdev_get_info *get_info = buffer;
|
||||
struct fw_cdev_event_bus_reset bus_reset;
|
||||
struct fw_card *card = client->device->card;
|
||||
unsigned long ret = 0;
|
||||
|
||||
client->version = get_info->version;
|
||||
|
@ -299,13 +300,17 @@ static int ioctl_get_info(struct client *client, void *buffer)
|
|||
client->bus_reset_closure = get_info->bus_reset_closure;
|
||||
if (get_info->bus_reset != 0) {
|
||||
void __user *uptr = u64_to_uptr(get_info->bus_reset);
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&card->lock, flags);
|
||||
fill_bus_reset_event(&bus_reset, client);
|
||||
spin_unlock_irqrestore(&card->lock, flags);
|
||||
|
||||
if (copy_to_user(uptr, &bus_reset, sizeof(bus_reset)))
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
get_info->card = client->device->card->index;
|
||||
get_info->card = card->index;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -265,27 +265,25 @@ static void log_irqs(u32 evt)
|
|||
!(evt & OHCI1394_busReset))
|
||||
return;
|
||||
|
||||
printk(KERN_DEBUG KBUILD_MODNAME ": IRQ "
|
||||
"%08x%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
|
||||
evt,
|
||||
evt & OHCI1394_selfIDComplete ? " selfID" : "",
|
||||
evt & OHCI1394_RQPkt ? " AR_req" : "",
|
||||
evt & OHCI1394_RSPkt ? " AR_resp" : "",
|
||||
evt & OHCI1394_reqTxComplete ? " AT_req" : "",
|
||||
evt & OHCI1394_respTxComplete ? " AT_resp" : "",
|
||||
evt & OHCI1394_isochRx ? " IR" : "",
|
||||
evt & OHCI1394_isochTx ? " IT" : "",
|
||||
evt & OHCI1394_postedWriteErr ? " postedWriteErr" : "",
|
||||
evt & OHCI1394_cycleTooLong ? " cycleTooLong" : "",
|
||||
evt & OHCI1394_cycle64Seconds ? " cycle64Seconds" : "",
|
||||
evt & OHCI1394_regAccessFail ? " regAccessFail" : "",
|
||||
evt & OHCI1394_busReset ? " busReset" : "",
|
||||
evt & ~(OHCI1394_selfIDComplete | OHCI1394_RQPkt |
|
||||
OHCI1394_RSPkt | OHCI1394_reqTxComplete |
|
||||
OHCI1394_respTxComplete | OHCI1394_isochRx |
|
||||
OHCI1394_isochTx | OHCI1394_postedWriteErr |
|
||||
OHCI1394_cycleTooLong | OHCI1394_cycle64Seconds |
|
||||
OHCI1394_regAccessFail | OHCI1394_busReset)
|
||||
fw_notify("IRQ %08x%s%s%s%s%s%s%s%s%s%s%s%s%s\n", evt,
|
||||
evt & OHCI1394_selfIDComplete ? " selfID" : "",
|
||||
evt & OHCI1394_RQPkt ? " AR_req" : "",
|
||||
evt & OHCI1394_RSPkt ? " AR_resp" : "",
|
||||
evt & OHCI1394_reqTxComplete ? " AT_req" : "",
|
||||
evt & OHCI1394_respTxComplete ? " AT_resp" : "",
|
||||
evt & OHCI1394_isochRx ? " IR" : "",
|
||||
evt & OHCI1394_isochTx ? " IT" : "",
|
||||
evt & OHCI1394_postedWriteErr ? " postedWriteErr" : "",
|
||||
evt & OHCI1394_cycleTooLong ? " cycleTooLong" : "",
|
||||
evt & OHCI1394_cycle64Seconds ? " cycle64Seconds" : "",
|
||||
evt & OHCI1394_regAccessFail ? " regAccessFail" : "",
|
||||
evt & OHCI1394_busReset ? " busReset" : "",
|
||||
evt & ~(OHCI1394_selfIDComplete | OHCI1394_RQPkt |
|
||||
OHCI1394_RSPkt | OHCI1394_reqTxComplete |
|
||||
OHCI1394_respTxComplete | OHCI1394_isochRx |
|
||||
OHCI1394_isochTx | OHCI1394_postedWriteErr |
|
||||
OHCI1394_cycleTooLong | OHCI1394_cycle64Seconds |
|
||||
OHCI1394_regAccessFail | OHCI1394_busReset)
|
||||
? " ?" : "");
|
||||
}
|
||||
|
||||
|
@ -308,23 +306,22 @@ static void log_selfids(int node_id, int generation, int self_id_count, u32 *s)
|
|||
if (likely(!(param_debug & OHCI_PARAM_DEBUG_SELFIDS)))
|
||||
return;
|
||||
|
||||
printk(KERN_DEBUG KBUILD_MODNAME ": %d selfIDs, generation %d, "
|
||||
"local node ID %04x\n", self_id_count, generation, node_id);
|
||||
fw_notify("%d selfIDs, generation %d, local node ID %04x\n",
|
||||
self_id_count, generation, node_id);
|
||||
|
||||
for (; self_id_count--; ++s)
|
||||
if ((*s & 1 << 23) == 0)
|
||||
printk(KERN_DEBUG "selfID 0: %08x, phy %d [%c%c%c] "
|
||||
"%s gc=%d %s %s%s%s\n",
|
||||
*s, *s >> 24 & 63, _p(s, 6), _p(s, 4), _p(s, 2),
|
||||
speed[*s >> 14 & 3], *s >> 16 & 63,
|
||||
power[*s >> 8 & 7], *s >> 22 & 1 ? "L" : "",
|
||||
*s >> 11 & 1 ? "c" : "", *s & 2 ? "i" : "");
|
||||
fw_notify("selfID 0: %08x, phy %d [%c%c%c] "
|
||||
"%s gc=%d %s %s%s%s\n",
|
||||
*s, *s >> 24 & 63, _p(s, 6), _p(s, 4), _p(s, 2),
|
||||
speed[*s >> 14 & 3], *s >> 16 & 63,
|
||||
power[*s >> 8 & 7], *s >> 22 & 1 ? "L" : "",
|
||||
*s >> 11 & 1 ? "c" : "", *s & 2 ? "i" : "");
|
||||
else
|
||||
printk(KERN_DEBUG "selfID n: %08x, phy %d "
|
||||
"[%c%c%c%c%c%c%c%c]\n",
|
||||
*s, *s >> 24 & 63,
|
||||
_p(s, 16), _p(s, 14), _p(s, 12), _p(s, 10),
|
||||
_p(s, 8), _p(s, 6), _p(s, 4), _p(s, 2));
|
||||
fw_notify("selfID n: %08x, phy %d [%c%c%c%c%c%c%c%c]\n",
|
||||
*s, *s >> 24 & 63,
|
||||
_p(s, 16), _p(s, 14), _p(s, 12), _p(s, 10),
|
||||
_p(s, 8), _p(s, 6), _p(s, 4), _p(s, 2));
|
||||
}
|
||||
|
||||
static const char *evts[] = {
|
||||
|
@ -373,15 +370,14 @@ static void log_ar_at_event(char dir, int speed, u32 *header, int evt)
|
|||
evt = 0x1f;
|
||||
|
||||
if (evt == OHCI1394_evt_bus_reset) {
|
||||
printk(KERN_DEBUG "A%c evt_bus_reset, generation %d\n",
|
||||
dir, (header[2] >> 16) & 0xff);
|
||||
fw_notify("A%c evt_bus_reset, generation %d\n",
|
||||
dir, (header[2] >> 16) & 0xff);
|
||||
return;
|
||||
}
|
||||
|
||||
if (header[0] == ~header[1]) {
|
||||
printk(KERN_DEBUG "A%c %s, %s, %08x\n",
|
||||
dir, evts[evt], phys[header[0] >> 30 & 0x3],
|
||||
header[0]);
|
||||
fw_notify("A%c %s, %s, %08x\n",
|
||||
dir, evts[evt], phys[header[0] >> 30 & 0x3], header[0]);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -400,24 +396,23 @@ static void log_ar_at_event(char dir, int speed, u32 *header, int evt)
|
|||
|
||||
switch (tcode) {
|
||||
case 0xe: case 0xa:
|
||||
printk(KERN_DEBUG "A%c %s, %s\n",
|
||||
dir, evts[evt], tcodes[tcode]);
|
||||
fw_notify("A%c %s, %s\n", dir, evts[evt], tcodes[tcode]);
|
||||
break;
|
||||
case 0x0: case 0x1: case 0x4: case 0x5: case 0x9:
|
||||
printk(KERN_DEBUG "A%c spd %x tl %02x, "
|
||||
"%04x -> %04x, %s, "
|
||||
"%s, %04x%08x%s\n",
|
||||
dir, speed, header[0] >> 10 & 0x3f,
|
||||
header[1] >> 16, header[0] >> 16, evts[evt],
|
||||
tcodes[tcode], header[1] & 0xffff, header[2], specific);
|
||||
fw_notify("A%c spd %x tl %02x, "
|
||||
"%04x -> %04x, %s, "
|
||||
"%s, %04x%08x%s\n",
|
||||
dir, speed, header[0] >> 10 & 0x3f,
|
||||
header[1] >> 16, header[0] >> 16, evts[evt],
|
||||
tcodes[tcode], header[1] & 0xffff, header[2], specific);
|
||||
break;
|
||||
default:
|
||||
printk(KERN_DEBUG "A%c spd %x tl %02x, "
|
||||
"%04x -> %04x, %s, "
|
||||
"%s%s\n",
|
||||
dir, speed, header[0] >> 10 & 0x3f,
|
||||
header[1] >> 16, header[0] >> 16, evts[evt],
|
||||
tcodes[tcode], specific);
|
||||
fw_notify("A%c spd %x tl %02x, "
|
||||
"%04x -> %04x, %s, "
|
||||
"%s%s\n",
|
||||
dir, speed, header[0] >> 10 & 0x3f,
|
||||
header[1] >> 16, header[0] >> 16, evts[evt],
|
||||
tcodes[tcode], specific);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -548,6 +543,11 @@ static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer)
|
|||
p.header_length = 12;
|
||||
p.payload_length = 0;
|
||||
break;
|
||||
|
||||
default:
|
||||
/* FIXME: Stop context, discard everything, and restart? */
|
||||
p.header_length = 0;
|
||||
p.payload_length = 0;
|
||||
}
|
||||
|
||||
p.payload = (void *) buffer + p.header_length;
|
||||
|
@ -1468,6 +1468,9 @@ static int ohci_enable(struct fw_card *card, u32 *config_rom, size_t length)
|
|||
reg_write(ohci, OHCI1394_HCControlClear,
|
||||
OHCI1394_HCControl_noByteSwapData);
|
||||
|
||||
reg_write(ohci, OHCI1394_SelfIDBuffer, ohci->self_id_bus);
|
||||
reg_write(ohci, OHCI1394_LinkControlClear,
|
||||
OHCI1394_LinkControl_rcvPhyPkt);
|
||||
reg_write(ohci, OHCI1394_LinkControlSet,
|
||||
OHCI1394_LinkControl_rcvSelfID |
|
||||
OHCI1394_LinkControl_cycleTimerEnable |
|
||||
|
@ -1481,7 +1484,6 @@ static int ohci_enable(struct fw_card *card, u32 *config_rom, size_t length)
|
|||
ar_context_run(&ohci->ar_request_ctx);
|
||||
ar_context_run(&ohci->ar_response_ctx);
|
||||
|
||||
reg_write(ohci, OHCI1394_SelfIDBuffer, ohci->self_id_bus);
|
||||
reg_write(ohci, OHCI1394_PhyUpperBound, 0x00010000);
|
||||
reg_write(ohci, OHCI1394_IntEventClear, ~0);
|
||||
reg_write(ohci, OHCI1394_IntMaskClear, ~0);
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
|
||||
#include <linux/completion.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/kref.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/interrupt.h>
|
||||
|
@ -297,37 +298,55 @@ EXPORT_SYMBOL(fw_send_request);
|
|||
struct fw_phy_packet {
|
||||
struct fw_packet packet;
|
||||
struct completion done;
|
||||
struct kref kref;
|
||||
};
|
||||
|
||||
static void
|
||||
transmit_phy_packet_callback(struct fw_packet *packet,
|
||||
struct fw_card *card, int status)
|
||||
static void phy_packet_release(struct kref *kref)
|
||||
{
|
||||
struct fw_phy_packet *p =
|
||||
container_of(kref, struct fw_phy_packet, kref);
|
||||
kfree(p);
|
||||
}
|
||||
|
||||
static void transmit_phy_packet_callback(struct fw_packet *packet,
|
||||
struct fw_card *card, int status)
|
||||
{
|
||||
struct fw_phy_packet *p =
|
||||
container_of(packet, struct fw_phy_packet, packet);
|
||||
|
||||
complete(&p->done);
|
||||
kref_put(&p->kref, phy_packet_release);
|
||||
}
|
||||
|
||||
void fw_send_phy_config(struct fw_card *card,
|
||||
int node_id, int generation, int gap_count)
|
||||
{
|
||||
struct fw_phy_packet p;
|
||||
struct fw_phy_packet *p;
|
||||
long timeout = DIV_ROUND_UP(HZ, 10);
|
||||
u32 data = PHY_IDENTIFIER(PHY_PACKET_CONFIG) |
|
||||
PHY_CONFIG_ROOT_ID(node_id) |
|
||||
PHY_CONFIG_GAP_COUNT(gap_count);
|
||||
|
||||
p.packet.header[0] = data;
|
||||
p.packet.header[1] = ~data;
|
||||
p.packet.header_length = 8;
|
||||
p.packet.payload_length = 0;
|
||||
p.packet.speed = SCODE_100;
|
||||
p.packet.generation = generation;
|
||||
p.packet.callback = transmit_phy_packet_callback;
|
||||
init_completion(&p.done);
|
||||
p = kmalloc(sizeof(*p), GFP_KERNEL);
|
||||
if (p == NULL)
|
||||
return;
|
||||
|
||||
card->driver->send_request(card, &p.packet);
|
||||
wait_for_completion(&p.done);
|
||||
p->packet.header[0] = data;
|
||||
p->packet.header[1] = ~data;
|
||||
p->packet.header_length = 8;
|
||||
p->packet.payload_length = 0;
|
||||
p->packet.speed = SCODE_100;
|
||||
p->packet.generation = generation;
|
||||
p->packet.callback = transmit_phy_packet_callback;
|
||||
init_completion(&p->done);
|
||||
kref_set(&p->kref, 2);
|
||||
|
||||
card->driver->send_request(card, &p->packet);
|
||||
timeout = wait_for_completion_timeout(&p->done, timeout);
|
||||
kref_put(&p->kref, phy_packet_release);
|
||||
|
||||
/* will leak p if the callback is never executed */
|
||||
WARN_ON(timeout == 0);
|
||||
}
|
||||
|
||||
void fw_flush_transactions(struct fw_card *card)
|
||||
|
@ -572,7 +591,8 @@ allocate_request(struct fw_packet *p)
|
|||
break;
|
||||
|
||||
default:
|
||||
BUG();
|
||||
fw_error("ERROR - corrupt request received - %08x %08x %08x\n",
|
||||
p->header[0], p->header[1], p->header[2]);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
|
|
@ -30,6 +30,7 @@
|
|||
#include <linux/platform_device.h>
|
||||
#include <linux/hwmon.h>
|
||||
#include <linux/hwmon-sysfs.h>
|
||||
#include <linux/dmi.h>
|
||||
#include <asm/io.h>
|
||||
|
||||
/* uGuru3 bank addresses */
|
||||
|
@ -323,7 +324,7 @@ static const struct abituguru3_motherboard_info abituguru3_motherboards[] = {
|
|||
{ "AUX1 Fan", 36, 2, 60, 1, 0 },
|
||||
{ NULL, 0, 0, 0, 0, 0 } }
|
||||
},
|
||||
{ 0x0013, "unknown", {
|
||||
{ 0x0013, "Abit AW8D", {
|
||||
{ "CPU Core", 0, 0, 10, 1, 0 },
|
||||
{ "DDR", 1, 0, 10, 1, 0 },
|
||||
{ "DDR VTT", 2, 0, 10, 1, 0 },
|
||||
|
@ -349,6 +350,7 @@ static const struct abituguru3_motherboard_info abituguru3_motherboards[] = {
|
|||
{ "AUX2 Fan", 36, 2, 60, 1, 0 },
|
||||
{ "AUX3 Fan", 37, 2, 60, 1, 0 },
|
||||
{ "AUX4 Fan", 38, 2, 60, 1, 0 },
|
||||
{ "AUX5 Fan", 39, 2, 60, 1, 0 },
|
||||
{ NULL, 0, 0, 0, 0, 0 } }
|
||||
},
|
||||
{ 0x0014, "Abit AB9 Pro", {
|
||||
|
@ -1111,11 +1113,12 @@ static int __init abituguru3_detect(void)
|
|||
{
|
||||
/* See if there is an uguru3 there. An idle uGuru3 will hold 0x00 or
|
||||
0x08 at DATA and 0xAC at CMD. Sometimes the uGuru3 will hold 0x05
|
||||
at CMD instead, why is unknown. So we test for 0x05 too. */
|
||||
or 0x55 at CMD instead, why is unknown. */
|
||||
u8 data_val = inb_p(ABIT_UGURU3_BASE + ABIT_UGURU3_DATA);
|
||||
u8 cmd_val = inb_p(ABIT_UGURU3_BASE + ABIT_UGURU3_CMD);
|
||||
if (((data_val == 0x00) || (data_val == 0x08)) &&
|
||||
((cmd_val == 0xAC) || (cmd_val == 0x05)))
|
||||
((cmd_val == 0xAC) || (cmd_val == 0x05) ||
|
||||
(cmd_val == 0x55)))
|
||||
return ABIT_UGURU3_BASE;
|
||||
|
||||
ABIT_UGURU3_DEBUG("no Abit uGuru3 found, data = 0x%02X, cmd = "
|
||||
|
@ -1138,6 +1141,15 @@ static int __init abituguru3_init(void)
|
|||
int address, err;
|
||||
struct resource res = { .flags = IORESOURCE_IO };
|
||||
|
||||
#ifdef CONFIG_DMI
|
||||
const char *board_vendor = dmi_get_system_info(DMI_BOARD_VENDOR);
|
||||
|
||||
/* safety check, refuse to load on non Abit motherboards */
|
||||
if (!force && (!board_vendor ||
|
||||
strcmp(board_vendor, "http://www.abit.com.tw/")))
|
||||
return -ENODEV;
|
||||
#endif
|
||||
|
||||
address = abituguru3_detect();
|
||||
if (address < 0)
|
||||
return address;
|
||||
|
|
|
@ -309,6 +309,9 @@ static struct adt7473_data *adt7473_update_device(struct device *dev)
|
|||
ADT7473_REG_PWM_BHVR(i));
|
||||
}
|
||||
|
||||
i = i2c_smbus_read_byte_data(client, ADT7473_REG_CFG4);
|
||||
data->max_duty_at_overheat = !!(i & ADT7473_CFG4_MAX_DUTY_AT_OVT);
|
||||
|
||||
data->limits_last_updated = local_jiffies;
|
||||
data->limits_valid = 1;
|
||||
|
||||
|
|
|
@ -251,10 +251,13 @@ static int lm75_detach_client(struct i2c_client *client)
|
|||
the SMBus standard. */
|
||||
static int lm75_read_value(struct i2c_client *client, u8 reg)
|
||||
{
|
||||
int value;
|
||||
|
||||
if (reg == LM75_REG_CONF)
|
||||
return i2c_smbus_read_byte_data(client, reg);
|
||||
else
|
||||
return swab16(i2c_smbus_read_word_data(client, reg));
|
||||
|
||||
value = i2c_smbus_read_word_data(client, reg);
|
||||
return (value < 0) ? value : swab16(value);
|
||||
}
|
||||
|
||||
static int lm75_write_value(struct i2c_client *client, u8 reg, u16 value)
|
||||
|
@ -287,9 +290,16 @@ static struct lm75_data *lm75_update_device(struct device *dev)
|
|||
int i;
|
||||
dev_dbg(&client->dev, "Starting lm75 update\n");
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(data->temp); i++)
|
||||
data->temp[i] = lm75_read_value(client,
|
||||
LM75_REG_TEMP[i]);
|
||||
for (i = 0; i < ARRAY_SIZE(data->temp); i++) {
|
||||
int status;
|
||||
|
||||
status = lm75_read_value(client, LM75_REG_TEMP[i]);
|
||||
if (status < 0)
|
||||
dev_dbg(&client->dev, "reg %d, err %d\n",
|
||||
LM75_REG_TEMP[i], status);
|
||||
else
|
||||
data->temp[i] = status;
|
||||
}
|
||||
data->last_updated = jiffies;
|
||||
data->valid = 1;
|
||||
}
|
||||
|
|
|
@ -192,23 +192,20 @@ static int RANGE_TO_REG( int range )
|
|||
{
|
||||
int i;
|
||||
|
||||
if ( range < lm85_range_map[0] ) {
|
||||
return 0 ;
|
||||
} else if ( range > lm85_range_map[15] ) {
|
||||
if (range >= lm85_range_map[15])
|
||||
return 15 ;
|
||||
} else { /* find closest match */
|
||||
for ( i = 14 ; i >= 0 ; --i ) {
|
||||
if ( range > lm85_range_map[i] ) { /* range bracketed */
|
||||
if ((lm85_range_map[i+1] - range) <
|
||||
(range - lm85_range_map[i])) {
|
||||
i++;
|
||||
break;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
/* Find the closest match */
|
||||
for (i = 14; i >= 0; --i) {
|
||||
if (range >= lm85_range_map[i]) {
|
||||
if ((lm85_range_map[i + 1] - range) <
|
||||
(range - lm85_range_map[i]))
|
||||
return i + 1;
|
||||
return i;
|
||||
}
|
||||
}
|
||||
return( i & 0x0f );
|
||||
|
||||
return 0;
|
||||
}
|
||||
#define RANGE_FROM_REG(val) (lm85_range_map[(val)&0x0f])
|
||||
|
||||
|
|
|
@ -823,13 +823,6 @@ config BLK_DEV_IDE_RAPIDE
|
|||
Say Y here if you want to support the Yellowstone RapIDE controller
|
||||
manufactured for use with Acorn computers.
|
||||
|
||||
config BLK_DEV_IDE_BAST
|
||||
tristate "Simtec BAST / Thorcom VR1000 IDE support"
|
||||
depends on ARM && (ARCH_BAST || MACH_VR1000)
|
||||
help
|
||||
Say Y here if you want to support the onboard IDE channels on the
|
||||
Simtec BAST or the Thorcom VR1000
|
||||
|
||||
config IDE_H8300
|
||||
tristate "H8300 IDE support"
|
||||
depends on H8300
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
|
||||
obj-$(CONFIG_BLK_DEV_IDE_ICSIDE) += icside.o
|
||||
obj-$(CONFIG_BLK_DEV_IDE_RAPIDE) += rapide.o
|
||||
obj-$(CONFIG_BLK_DEV_IDE_BAST) += bast-ide.o
|
||||
obj-$(CONFIG_BLK_DEV_PALMCHIP_BK3710) += palm_bk3710.o
|
||||
|
||||
ifeq ($(CONFIG_IDE_ARM), m)
|
||||
|
|
|
@ -1,90 +0,0 @@
|
|||
/*
|
||||
* Copyright (c) 2003-2004 Simtec Electronics
|
||||
* Ben Dooks <ben@simtec.co.uk>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/ide.h>
|
||||
#include <linux/init.h>
|
||||
|
||||
#include <asm/mach-types.h>
|
||||
|
||||
#include <asm/io.h>
|
||||
#include <asm/irq.h>
|
||||
#include <asm/arch/map.h>
|
||||
#include <asm/arch/bast-map.h>
|
||||
#include <asm/arch/bast-irq.h>
|
||||
|
||||
#define DRV_NAME "bast-ide"
|
||||
|
||||
static int __init bastide_register(unsigned int base, unsigned int aux, int irq)
|
||||
{
|
||||
ide_hwif_t *hwif;
|
||||
hw_regs_t hw;
|
||||
int i;
|
||||
u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
|
||||
|
||||
memset(&hw, 0, sizeof(hw));
|
||||
|
||||
base += BAST_IDE_CS;
|
||||
aux += BAST_IDE_CS;
|
||||
|
||||
for (i = 0; i <= 7; i++) {
|
||||
hw.io_ports_array[i] = (unsigned long)base;
|
||||
base += 0x20;
|
||||
}
|
||||
|
||||
hw.io_ports.ctl_addr = aux + (6 * 0x20);
|
||||
hw.irq = irq;
|
||||
hw.chipset = ide_generic;
|
||||
|
||||
hwif = ide_find_port();
|
||||
if (hwif == NULL)
|
||||
goto out;
|
||||
|
||||
i = hwif->index;
|
||||
|
||||
ide_init_port_data(hwif, i);
|
||||
ide_init_port_hw(hwif, &hw);
|
||||
hwif->port_ops = NULL;
|
||||
|
||||
idx[0] = i;
|
||||
|
||||
ide_device_add(idx, NULL);
|
||||
out:
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __init bastide_init(void)
|
||||
{
|
||||
unsigned long base = BAST_VA_IDEPRI + BAST_IDE_CS;
|
||||
|
||||
/* we can treat the VR1000 and the BAST the same */
|
||||
|
||||
if (!(machine_is_bast() || machine_is_vr1000()))
|
||||
return 0;
|
||||
|
||||
printk("BAST: IDE driver, (c) 2003-2004 Simtec Electronics\n");
|
||||
|
||||
if (!request_mem_region(base, 0x400000, DRV_NAME)) {
|
||||
printk(KERN_ERR "%s: resources busy\n", DRV_NAME);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
bastide_register(BAST_VA_IDEPRI, BAST_VA_IDEPRIAUX, IRQ_IDE0);
|
||||
bastide_register(BAST_VA_IDESEC, BAST_VA_IDESECAUX, IRQ_IDE1);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
module_init(bastide_init);
|
||||
|
||||
MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_DESCRIPTION("Simtec BAST / Thorcom VR1000 IDE driver");
|
|
@ -353,8 +353,8 @@ static int __devinit palm_bk3710_probe(struct platform_device *pdev)
|
|||
struct clk *clkp;
|
||||
struct resource *mem, *irq;
|
||||
ide_hwif_t *hwif;
|
||||
void __iomem *base;
|
||||
int pribase, i;
|
||||
unsigned long base;
|
||||
int i;
|
||||
hw_regs_t hw;
|
||||
u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
|
||||
|
||||
|
@ -374,22 +374,27 @@ static int __devinit palm_bk3710_probe(struct platform_device *pdev)
|
|||
printk(KERN_ERR "failed to get memory region resource\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
|
||||
if (irq == NULL) {
|
||||
printk(KERN_ERR "failed to get IRQ resource\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
base = (void *)mem->start;
|
||||
if (request_mem_region(mem->start, mem->end - mem->start + 1,
|
||||
"palm_bk3710") == NULL) {
|
||||
printk(KERN_ERR "failed to request memory region\n");
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
base = IO_ADDRESS(mem->start);
|
||||
|
||||
/* Configure the Palm Chip controller */
|
||||
palm_bk3710_chipinit(base);
|
||||
palm_bk3710_chipinit((void __iomem *)base);
|
||||
|
||||
pribase = mem->start + IDE_PALM_ATA_PRI_REG_OFFSET;
|
||||
for (i = 0; i < IDE_NR_PORTS - 2; i++)
|
||||
hw.io_ports_array[i] = pribase + i;
|
||||
hw.io_ports.ctl_addr = mem->start +
|
||||
IDE_PALM_ATA_PRI_CTL_OFFSET;
|
||||
hw.io_ports_array[i] = base + IDE_PALM_ATA_PRI_REG_OFFSET + i;
|
||||
hw.io_ports.ctl_addr = base + IDE_PALM_ATA_PRI_CTL_OFFSET;
|
||||
hw.irq = irq->start;
|
||||
hw.chipset = ide_palm3710;
|
||||
|
||||
|
@ -434,4 +439,3 @@ static int __init palm_bk3710_init(void)
|
|||
|
||||
module_init(palm_bk3710_init);
|
||||
MODULE_LICENSE("GPL");
|
||||
|
||||
|
|
|
@ -225,10 +225,10 @@ static u8 wait_drive_not_busy(ide_drive_t *drive)
|
|||
u8 stat;
|
||||
|
||||
/*
|
||||
* Last sector was transfered, wait until drive is ready.
|
||||
* This can take up to 10 usec, but we will wait max 1 ms.
|
||||
* Last sector was transfered, wait until device is ready. This can
|
||||
* take up to 6 ms on some ATAPI devices, so we will wait max 10 ms.
|
||||
*/
|
||||
for (retries = 0; retries < 100; retries++) {
|
||||
for (retries = 0; retries < 1000; retries++) {
|
||||
stat = ide_read_status(drive);
|
||||
|
||||
if (stat & BUSY_STAT)
|
||||
|
|
|
@ -410,6 +410,7 @@ static struct pcmcia_device_id ide_ids[] = {
|
|||
PCMCIA_DEVICE_MANF_CARD(0x001c, 0x0001), /* Mitsubishi CFA */
|
||||
PCMCIA_DEVICE_MANF_CARD(0x0032, 0x0704),
|
||||
PCMCIA_DEVICE_MANF_CARD(0x0045, 0x0401), /* SanDisk CFA */
|
||||
PCMCIA_DEVICE_MANF_CARD(0x004f, 0x0000), /* Kingston */
|
||||
PCMCIA_DEVICE_MANF_CARD(0x0098, 0x0000), /* Toshiba */
|
||||
PCMCIA_DEVICE_MANF_CARD(0x00a4, 0x002d),
|
||||
PCMCIA_DEVICE_MANF_CARD(0x00ce, 0x0000), /* Samsung */
|
||||
|
@ -439,6 +440,7 @@ static struct pcmcia_device_id ide_ids[] = {
|
|||
PCMCIA_DEVICE_PROD_ID12("IO DATA", "PCIDE", 0x547e66dc, 0x5c5ab149),
|
||||
PCMCIA_DEVICE_PROD_ID12("IO DATA", "PCIDEII", 0x547e66dc, 0xb3662674),
|
||||
PCMCIA_DEVICE_PROD_ID12("LOOKMEET", "CBIDE2 ", 0xe37be2b5, 0x8671043b),
|
||||
PCMCIA_DEVICE_PROD_ID12("M-Systems", "CF300", 0x7ed2ad87, 0x7e9e78ee),
|
||||
PCMCIA_DEVICE_PROD_ID12("M-Systems", "CF500", 0x7ed2ad87, 0x7a13045c),
|
||||
PCMCIA_DEVICE_PROD_ID2("NinjaATA-", 0xebe0bd79),
|
||||
PCMCIA_DEVICE_PROD_ID12("PCMCIA", "CD-ROM", 0x281f1c5d, 0x66536591),
|
||||
|
@ -449,6 +451,7 @@ static struct pcmcia_device_id ide_ids[] = {
|
|||
PCMCIA_DEVICE_PROD_ID12("SMI VENDOR", "SMI PRODUCT", 0x30896c92, 0x703cc5f6),
|
||||
PCMCIA_DEVICE_PROD_ID12("TOSHIBA", "MK2001MPL", 0xb4585a1a, 0x3489e003),
|
||||
PCMCIA_DEVICE_PROD_ID1("TRANSCEND 512M ", 0xd0909443),
|
||||
PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS1GCF45", 0x709b1bf1, 0xf68b6f32),
|
||||
PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS1GCF80", 0x709b1bf1, 0x2a54d4b1),
|
||||
PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS2GCF120", 0x709b1bf1, 0x969aa4f2),
|
||||
PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS4GCF120", 0x709b1bf1, 0xf54a91c8),
|
||||
|
|
|
@ -4,7 +4,7 @@ menu "IEEE 1394 (FireWire) support"
|
|||
source "drivers/firewire/Kconfig"
|
||||
|
||||
config IEEE1394
|
||||
tristate "IEEE 1394 (FireWire) support"
|
||||
tristate "Stable FireWire stack"
|
||||
depends on PCI || BROKEN
|
||||
help
|
||||
IEEE 1394 describes a high performance serial bus, which is also
|
||||
|
@ -19,30 +19,45 @@ config IEEE1394
|
|||
To compile this driver as a module, say M here: the
|
||||
module will be called ieee1394.
|
||||
|
||||
comment "Subsystem Options"
|
||||
depends on IEEE1394
|
||||
|
||||
config IEEE1394_VERBOSEDEBUG
|
||||
bool "Excessive debugging output"
|
||||
depends on IEEE1394
|
||||
config IEEE1394_OHCI1394
|
||||
tristate "OHCI-1394 controllers"
|
||||
depends on PCI && IEEE1394
|
||||
help
|
||||
If you say Y here, you will get very verbose debugging logs from
|
||||
the subsystem which includes a dump of the header of every sent
|
||||
and received packet. This can amount to a high amount of data
|
||||
collected in a very short time which is usually also saved to
|
||||
disk by the system logging daemons.
|
||||
Enable this driver if you have an IEEE 1394 controller based on the
|
||||
OHCI-1394 specification. The current driver is only tested with OHCI
|
||||
chipsets made by Texas Instruments and NEC. Most third-party vendors
|
||||
use one of these chipsets. It should work with any OHCI-1394
|
||||
compliant card, however.
|
||||
|
||||
Say Y if you really want or need the debugging output, everyone
|
||||
else says N.
|
||||
To compile this driver as a module, say M here: the
|
||||
module will be called ohci1394.
|
||||
|
||||
comment "Controllers"
|
||||
depends on IEEE1394
|
||||
NOTE:
|
||||
|
||||
comment "Texas Instruments PCILynx requires I2C"
|
||||
You should only build either ohci1394 or the new firewire-ohci driver,
|
||||
but not both. If you nevertheless want to install both, you should
|
||||
configure them only as modules and blacklist the driver(s) which you
|
||||
don't want to have auto-loaded. Add either
|
||||
|
||||
blacklist firewire-ohci
|
||||
or
|
||||
blacklist ohci1394
|
||||
blacklist video1394
|
||||
blacklist dv1394
|
||||
|
||||
to /etc/modprobe.conf or /etc/modprobe.d/* and update modprobe.conf
|
||||
depending on your distribution. The latter two modules should be
|
||||
blacklisted together with ohci1394 because they depend on ohci1394.
|
||||
|
||||
If you have an old modprobe which doesn't implement the blacklist
|
||||
directive, use "install modulename /bin/true" for the modules to be
|
||||
blacklisted.
|
||||
|
||||
comment "PCILynx controller requires I2C"
|
||||
depends on IEEE1394 && I2C=n
|
||||
|
||||
config IEEE1394_PCILYNX
|
||||
tristate "Texas Instruments PCILynx support"
|
||||
tristate "PCILynx controller"
|
||||
depends on PCI && IEEE1394 && I2C
|
||||
select I2C_ALGOBIT
|
||||
help
|
||||
|
@ -57,35 +72,11 @@ config IEEE1394_PCILYNX
|
|||
PowerMacs G3 B&W contain the PCILynx controller. Therefore
|
||||
almost everybody can say N here.
|
||||
|
||||
config IEEE1394_OHCI1394
|
||||
tristate "OHCI-1394 support"
|
||||
depends on PCI && IEEE1394
|
||||
help
|
||||
Enable this driver if you have an IEEE 1394 controller based on the
|
||||
OHCI-1394 specification. The current driver is only tested with OHCI
|
||||
chipsets made by Texas Instruments and NEC. Most third-party vendors
|
||||
use one of these chipsets. It should work with any OHCI-1394
|
||||
compliant card, however.
|
||||
|
||||
To compile this driver as a module, say M here: the
|
||||
module will be called ohci1394.
|
||||
|
||||
comment "Protocols"
|
||||
depends on IEEE1394
|
||||
|
||||
config IEEE1394_VIDEO1394
|
||||
tristate "OHCI-1394 Video support"
|
||||
depends on IEEE1394 && IEEE1394_OHCI1394
|
||||
help
|
||||
This option enables video device usage for OHCI-1394 cards. Enable
|
||||
this option only if you have an IEEE 1394 video device connected to
|
||||
an OHCI-1394 card.
|
||||
|
||||
comment "SBP-2 support (for storage devices) requires SCSI"
|
||||
depends on IEEE1394 && SCSI=n
|
||||
|
||||
config IEEE1394_SBP2
|
||||
tristate "SBP-2 support (Harddisks etc.)"
|
||||
tristate "Storage devices (SBP-2 protocol)"
|
||||
depends on IEEE1394 && SCSI
|
||||
help
|
||||
This option enables you to use SBP-2 devices connected to an IEEE
|
||||
|
@ -127,24 +118,47 @@ config IEEE1394_ETH1394
|
|||
|
||||
The module is called eth1394 although it does not emulate Ethernet.
|
||||
|
||||
config IEEE1394_RAWIO
|
||||
tristate "raw1394 userspace interface"
|
||||
depends on IEEE1394
|
||||
help
|
||||
This option adds support for the raw1394 device file which enables
|
||||
direct communication of user programs with IEEE 1394 devices
|
||||
(isochronous and asynchronous). Almost all application programs
|
||||
which access FireWire require this option.
|
||||
|
||||
To compile this driver as a module, say M here: the module will be
|
||||
called raw1394.
|
||||
|
||||
config IEEE1394_VIDEO1394
|
||||
tristate "video1394 userspace interface"
|
||||
depends on IEEE1394 && IEEE1394_OHCI1394
|
||||
help
|
||||
This option adds support for the video1394 device files which enable
|
||||
isochronous communication of user programs with IEEE 1394 devices,
|
||||
especially video capture or export. This interface is used by all
|
||||
libdc1394 based programs and by several other programs, in addition to
|
||||
the raw1394 interface. It is generally not required for DV capture.
|
||||
|
||||
To compile this driver as a module, say M here: the module will be
|
||||
called video1394.
|
||||
|
||||
config IEEE1394_DV1394
|
||||
tristate "OHCI-DV I/O support (deprecated)"
|
||||
tristate "dv1394 userspace interface (deprecated)"
|
||||
depends on IEEE1394 && IEEE1394_OHCI1394
|
||||
help
|
||||
The dv1394 driver is unsupported and may be removed from Linux in a
|
||||
future release. Its functionality is now provided by raw1394 together
|
||||
with libraries such as libiec61883.
|
||||
|
||||
config IEEE1394_RAWIO
|
||||
tristate "Raw IEEE1394 I/O support"
|
||||
config IEEE1394_VERBOSEDEBUG
|
||||
bool "Excessive debugging output"
|
||||
depends on IEEE1394
|
||||
help
|
||||
This option adds support for the raw1394 device file which enables
|
||||
direct communication of user programs with the IEEE 1394 bus and thus
|
||||
with the attached peripherals. Almost all application programs which
|
||||
access FireWire require this option.
|
||||
If you say Y here, you will get very verbose debugging logs from the
|
||||
ieee1394 drivers, including sent and received packet headers. This
|
||||
will quickly result in large amounts of data sent to the system log.
|
||||
|
||||
To compile this driver as a module, say M here: the module will be
|
||||
called raw1394.
|
||||
Say Y if you really need the debugging output. Everyone else says N.
|
||||
|
||||
endmenu
|
||||
|
|
|
@ -109,7 +109,11 @@ static int mthca_alloc_icm_pages(struct scatterlist *mem, int order, gfp_t gfp_m
|
|||
{
|
||||
struct page *page;
|
||||
|
||||
page = alloc_pages(gfp_mask, order);
|
||||
/*
|
||||
* Use __GFP_ZERO because buggy firmware assumes ICM pages are
|
||||
* cleared, and subtle failures are seen if they aren't.
|
||||
*/
|
||||
page = alloc_pages(gfp_mask | __GFP_ZERO, order);
|
||||
if (!page)
|
||||
return -ENOMEM;
|
||||
|
||||
|
|
|
@ -176,7 +176,7 @@ void lguest_arch_run_guest(struct lg_cpu *cpu)
|
|||
* we set it now, so we can trap and pass that trap to the Guest if it
|
||||
* uses the FPU. */
|
||||
if (cpu->ts)
|
||||
lguest_set_ts();
|
||||
unlazy_fpu(current);
|
||||
|
||||
/* SYSENTER is an optimized way of doing system calls. We can't allow
|
||||
* it because it always jumps to privilege level 0. A normal Guest
|
||||
|
@ -196,6 +196,10 @@ void lguest_arch_run_guest(struct lg_cpu *cpu)
|
|||
* trap made the switcher code come back, and an error code which some
|
||||
* traps set. */
|
||||
|
||||
/* Restore SYSENTER if it's supposed to be on. */
|
||||
if (boot_cpu_has(X86_FEATURE_SEP))
|
||||
wrmsr(MSR_IA32_SYSENTER_CS, __KERNEL_CS, 0);
|
||||
|
||||
/* If the Guest page faulted, then the cr2 register will tell us the
|
||||
* bad virtual address. We have to grab this now, because once we
|
||||
* re-enable interrupts an interrupt could fault and thus overwrite
|
||||
|
@ -203,13 +207,12 @@ void lguest_arch_run_guest(struct lg_cpu *cpu)
|
|||
if (cpu->regs->trapnum == 14)
|
||||
cpu->arch.last_pagefault = read_cr2();
|
||||
/* Similarly, if we took a trap because the Guest used the FPU,
|
||||
* we have to restore the FPU it expects to see. */
|
||||
* we have to restore the FPU it expects to see.
|
||||
* math_state_restore() may sleep and we may even move off to
|
||||
* a different CPU. So all the critical stuff should be done
|
||||
* before this. */
|
||||
else if (cpu->regs->trapnum == 7)
|
||||
math_state_restore();
|
||||
|
||||
/* Restore SYSENTER if it's supposed to be on. */
|
||||
if (boot_cpu_has(X86_FEATURE_SEP))
|
||||
wrmsr(MSR_IA32_SYSENTER_CS, __KERNEL_CS, 0);
|
||||
}
|
||||
|
||||
/*H:130 Now we've examined the hypercall code; our Guest can make requests.
|
||||
|
|
|
@ -942,7 +942,7 @@ static int pppoe_recvmsg(struct kiocb *iocb, struct socket *sock,
|
|||
m->msg_namelen = 0;
|
||||
|
||||
if (skb) {
|
||||
total_len = min(total_len, skb->len);
|
||||
total_len = min_t(size_t, total_len, skb->len);
|
||||
error = skb_copy_datagram_iovec(skb, 0, m->msg_iov, total_len);
|
||||
if (error == 0)
|
||||
error = total_len;
|
||||
|
|
|
@ -49,6 +49,7 @@
|
|||
#define DMA_RX_YCOUNT (PAGE_SIZE / DMA_RX_XCOUNT)
|
||||
|
||||
#define DMA_RX_FLUSH_JIFFIES (HZ / 50)
|
||||
#define CTS_CHECK_JIFFIES (HZ / 50)
|
||||
|
||||
#ifdef CONFIG_SERIAL_BFIN_DMA
|
||||
static void bfin_serial_dma_tx_chars(struct bfin_serial_port *uart);
|
||||
|
@ -290,11 +291,6 @@ static void bfin_serial_tx_chars(struct bfin_serial_port *uart)
|
|||
{
|
||||
struct circ_buf *xmit = &uart->port.info->xmit;
|
||||
|
||||
if (uart->port.x_char) {
|
||||
UART_PUT_CHAR(uart, uart->port.x_char);
|
||||
uart->port.icount.tx++;
|
||||
uart->port.x_char = 0;
|
||||
}
|
||||
/*
|
||||
* Check the modem control lines before
|
||||
* transmitting anything.
|
||||
|
@ -306,6 +302,12 @@ static void bfin_serial_tx_chars(struct bfin_serial_port *uart)
|
|||
return;
|
||||
}
|
||||
|
||||
if (uart->port.x_char) {
|
||||
UART_PUT_CHAR(uart, uart->port.x_char);
|
||||
uart->port.icount.tx++;
|
||||
uart->port.x_char = 0;
|
||||
}
|
||||
|
||||
while ((UART_GET_LSR(uart) & THRE) && xmit->tail != xmit->head) {
|
||||
UART_PUT_CHAR(uart, xmit->buf[xmit->tail]);
|
||||
xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
|
||||
|
@ -345,15 +347,6 @@ static irqreturn_t bfin_serial_tx_int(int irq, void *dev_id)
|
|||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SERIAL_BFIN_CTSRTS
|
||||
static void bfin_serial_do_work(struct work_struct *work)
|
||||
{
|
||||
struct bfin_serial_port *uart = container_of(work, struct bfin_serial_port, cts_workqueue);
|
||||
|
||||
bfin_serial_mctrl_check(uart);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SERIAL_BFIN_DMA
|
||||
static void bfin_serial_dma_tx_chars(struct bfin_serial_port *uart)
|
||||
{
|
||||
|
@ -361,6 +354,12 @@ static void bfin_serial_dma_tx_chars(struct bfin_serial_port *uart)
|
|||
|
||||
uart->tx_done = 0;
|
||||
|
||||
/*
|
||||
* Check the modem control lines before
|
||||
* transmitting anything.
|
||||
*/
|
||||
bfin_serial_mctrl_check(uart);
|
||||
|
||||
if (uart_circ_empty(xmit) || uart_tx_stopped(&uart->port)) {
|
||||
uart->tx_count = 0;
|
||||
uart->tx_done = 1;
|
||||
|
@ -373,12 +372,6 @@ static void bfin_serial_dma_tx_chars(struct bfin_serial_port *uart)
|
|||
uart->port.x_char = 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Check the modem control lines before
|
||||
* transmitting anything.
|
||||
*/
|
||||
bfin_serial_mctrl_check(uart);
|
||||
|
||||
uart->tx_count = CIRC_CNT(xmit->head, xmit->tail, UART_XMIT_SIZE);
|
||||
if (uart->tx_count > (UART_XMIT_SIZE - xmit->tail))
|
||||
uart->tx_count = UART_XMIT_SIZE - xmit->tail;
|
||||
|
@ -565,7 +558,10 @@ static void bfin_serial_mctrl_check(struct bfin_serial_port *uart)
|
|||
uart_handle_cts_change(&uart->port, status & TIOCM_CTS);
|
||||
if (!(status & TIOCM_CTS)) {
|
||||
tty->hw_stopped = 1;
|
||||
schedule_work(&uart->cts_workqueue);
|
||||
uart->cts_timer.data = (unsigned long)(uart);
|
||||
uart->cts_timer.function = (void *)bfin_serial_mctrl_check;
|
||||
uart->cts_timer.expires = jiffies + CTS_CHECK_JIFFIES;
|
||||
add_timer(&(uart->cts_timer));
|
||||
} else {
|
||||
tty->hw_stopped = 0;
|
||||
}
|
||||
|
@ -885,7 +881,7 @@ static void __init bfin_serial_init_ports(void)
|
|||
init_timer(&(bfin_serial_ports[i].rx_dma_timer));
|
||||
#endif
|
||||
#ifdef CONFIG_SERIAL_BFIN_CTSRTS
|
||||
INIT_WORK(&bfin_serial_ports[i].cts_workqueue, bfin_serial_do_work);
|
||||
init_timer(&(bfin_serial_ports[i].cts_timer));
|
||||
bfin_serial_ports[i].cts_pin =
|
||||
bfin_serial_resource[i].uart_cts_pin;
|
||||
bfin_serial_ports[i].rts_pin =
|
||||
|
|
|
@ -68,7 +68,6 @@ obj-$(CONFIG_WAFER_WDT) += wafer5823wdt.o
|
|||
obj-$(CONFIG_I6300ESB_WDT) += i6300esb.o
|
||||
obj-$(CONFIG_ITCO_WDT) += iTCO_wdt.o iTCO_vendor_support.o
|
||||
obj-$(CONFIG_IT8712F_WDT) += it8712f_wdt.o
|
||||
CFLAGS_hpwdt.o += -O
|
||||
obj-$(CONFIG_HP_WATCHDOG) += hpwdt.o
|
||||
obj-$(CONFIG_SC1200_WDT) += sc1200wdt.o
|
||||
obj-$(CONFIG_SCx200_WDT) += scx200_wdt.o
|
||||
|
|
|
@ -140,49 +140,53 @@ static struct pci_device_id hpwdt_devices[] = {
|
|||
};
|
||||
MODULE_DEVICE_TABLE(pci, hpwdt_devices);
|
||||
|
||||
extern asmlinkage void asminline_call(struct cmn_registers *pi86Regs, unsigned long *pRomEntry);
|
||||
|
||||
#ifndef CONFIG_X86_64
|
||||
/* --32 Bit Bios------------------------------------------------------------ */
|
||||
|
||||
#define HPWDT_ARCH 32
|
||||
|
||||
asmlinkage void asminline_call(struct cmn_registers *pi86Regs,
|
||||
unsigned long *pRomEntry)
|
||||
{
|
||||
asm("pushl %ebp \n\t"
|
||||
"movl %esp, %ebp \n\t"
|
||||
"pusha \n\t"
|
||||
"pushf \n\t"
|
||||
"push %es \n\t"
|
||||
"push %ds \n\t"
|
||||
"pop %es \n\t"
|
||||
"movl 8(%ebp),%eax \n\t"
|
||||
"movl 4(%eax),%ebx \n\t"
|
||||
"movl 8(%eax),%ecx \n\t"
|
||||
"movl 12(%eax),%edx \n\t"
|
||||
"movl 16(%eax),%esi \n\t"
|
||||
"movl 20(%eax),%edi \n\t"
|
||||
"movl (%eax),%eax \n\t"
|
||||
"push %cs \n\t"
|
||||
"call *12(%ebp) \n\t"
|
||||
"pushf \n\t"
|
||||
"pushl %eax \n\t"
|
||||
"movl 8(%ebp),%eax \n\t"
|
||||
"movl %ebx,4(%eax) \n\t"
|
||||
"movl %ecx,8(%eax) \n\t"
|
||||
"movl %edx,12(%eax) \n\t"
|
||||
"movl %esi,16(%eax) \n\t"
|
||||
"movl %edi,20(%eax) \n\t"
|
||||
"movw %ds,24(%eax) \n\t"
|
||||
"movw %es,26(%eax) \n\t"
|
||||
"popl %ebx \n\t"
|
||||
"movl %ebx,(%eax) \n\t"
|
||||
"popl %ebx \n\t"
|
||||
"movl %ebx,28(%eax) \n\t"
|
||||
"pop %es \n\t"
|
||||
"popf \n\t"
|
||||
"popa \n\t"
|
||||
"leave \n\t" "ret");
|
||||
}
|
||||
asm(".text \n\t"
|
||||
".align 4 \n"
|
||||
"asminline_call: \n\t"
|
||||
"pushl %ebp \n\t"
|
||||
"movl %esp, %ebp \n\t"
|
||||
"pusha \n\t"
|
||||
"pushf \n\t"
|
||||
"push %es \n\t"
|
||||
"push %ds \n\t"
|
||||
"pop %es \n\t"
|
||||
"movl 8(%ebp),%eax \n\t"
|
||||
"movl 4(%eax),%ebx \n\t"
|
||||
"movl 8(%eax),%ecx \n\t"
|
||||
"movl 12(%eax),%edx \n\t"
|
||||
"movl 16(%eax),%esi \n\t"
|
||||
"movl 20(%eax),%edi \n\t"
|
||||
"movl (%eax),%eax \n\t"
|
||||
"push %cs \n\t"
|
||||
"call *12(%ebp) \n\t"
|
||||
"pushf \n\t"
|
||||
"pushl %eax \n\t"
|
||||
"movl 8(%ebp),%eax \n\t"
|
||||
"movl %ebx,4(%eax) \n\t"
|
||||
"movl %ecx,8(%eax) \n\t"
|
||||
"movl %edx,12(%eax) \n\t"
|
||||
"movl %esi,16(%eax) \n\t"
|
||||
"movl %edi,20(%eax) \n\t"
|
||||
"movw %ds,24(%eax) \n\t"
|
||||
"movw %es,26(%eax) \n\t"
|
||||
"popl %ebx \n\t"
|
||||
"movl %ebx,(%eax) \n\t"
|
||||
"popl %ebx \n\t"
|
||||
"movl %ebx,28(%eax) \n\t"
|
||||
"pop %es \n\t"
|
||||
"popf \n\t"
|
||||
"popa \n\t"
|
||||
"leave \n\t"
|
||||
"ret \n\t"
|
||||
".previous");
|
||||
|
||||
|
||||
/*
|
||||
* cru_detect
|
||||
|
@ -333,43 +337,44 @@ static int __devinit detect_cru_service(void)
|
|||
|
||||
#define HPWDT_ARCH 64
|
||||
|
||||
asmlinkage void asminline_call(struct cmn_registers *pi86Regs,
|
||||
unsigned long *pRomEntry)
|
||||
{
|
||||
asm("pushq %rbp \n\t"
|
||||
"movq %rsp, %rbp \n\t"
|
||||
"pushq %rax \n\t"
|
||||
"pushq %rbx \n\t"
|
||||
"pushq %rdx \n\t"
|
||||
"pushq %r12 \n\t"
|
||||
"pushq %r9 \n\t"
|
||||
"movq %rsi, %r12 \n\t"
|
||||
"movq %rdi, %r9 \n\t"
|
||||
"movl 4(%r9),%ebx \n\t"
|
||||
"movl 8(%r9),%ecx \n\t"
|
||||
"movl 12(%r9),%edx \n\t"
|
||||
"movl 16(%r9),%esi \n\t"
|
||||
"movl 20(%r9),%edi \n\t"
|
||||
"movl (%r9),%eax \n\t"
|
||||
"call *%r12 \n\t"
|
||||
"pushfq \n\t"
|
||||
"popq %r12 \n\t"
|
||||
"popfq \n\t"
|
||||
"movl %eax, (%r9) \n\t"
|
||||
"movl %ebx, 4(%r9) \n\t"
|
||||
"movl %ecx, 8(%r9) \n\t"
|
||||
"movl %edx, 12(%r9) \n\t"
|
||||
"movl %esi, 16(%r9) \n\t"
|
||||
"movl %edi, 20(%r9) \n\t"
|
||||
"movq %r12, %rax \n\t"
|
||||
"movl %eax, 28(%r9) \n\t"
|
||||
"popq %r9 \n\t"
|
||||
"popq %r12 \n\t"
|
||||
"popq %rdx \n\t"
|
||||
"popq %rbx \n\t"
|
||||
"popq %rax \n\t"
|
||||
"leave \n\t" "ret");
|
||||
}
|
||||
asm(".text \n\t"
|
||||
".align 4 \n"
|
||||
"asminline_call: \n\t"
|
||||
"pushq %rbp \n\t"
|
||||
"movq %rsp, %rbp \n\t"
|
||||
"pushq %rax \n\t"
|
||||
"pushq %rbx \n\t"
|
||||
"pushq %rdx \n\t"
|
||||
"pushq %r12 \n\t"
|
||||
"pushq %r9 \n\t"
|
||||
"movq %rsi, %r12 \n\t"
|
||||
"movq %rdi, %r9 \n\t"
|
||||
"movl 4(%r9),%ebx \n\t"
|
||||
"movl 8(%r9),%ecx \n\t"
|
||||
"movl 12(%r9),%edx \n\t"
|
||||
"movl 16(%r9),%esi \n\t"
|
||||
"movl 20(%r9),%edi \n\t"
|
||||
"movl (%r9),%eax \n\t"
|
||||
"call *%r12 \n\t"
|
||||
"pushfq \n\t"
|
||||
"popq %r12 \n\t"
|
||||
"popfq \n\t"
|
||||
"movl %eax, (%r9) \n\t"
|
||||
"movl %ebx, 4(%r9) \n\t"
|
||||
"movl %ecx, 8(%r9) \n\t"
|
||||
"movl %edx, 12(%r9) \n\t"
|
||||
"movl %esi, 16(%r9) \n\t"
|
||||
"movl %edi, 20(%r9) \n\t"
|
||||
"movq %r12, %rax \n\t"
|
||||
"movl %eax, 28(%r9) \n\t"
|
||||
"popq %r9 \n\t"
|
||||
"popq %r12 \n\t"
|
||||
"popq %rdx \n\t"
|
||||
"popq %rbx \n\t"
|
||||
"popq %rax \n\t"
|
||||
"leave \n\t"
|
||||
"ret \n\t"
|
||||
".previous");
|
||||
|
||||
/*
|
||||
* dmi_find_cru
|
||||
|
|
|
@ -529,7 +529,7 @@ void xen_evtchn_do_upcall(struct pt_regs *regs)
|
|||
|
||||
#ifndef CONFIG_X86 /* No need for a barrier -- XCHG is a barrier on x86. */
|
||||
/* Clear master flag /before/ clearing selector flag. */
|
||||
rmb();
|
||||
wmb();
|
||||
#endif
|
||||
pending_words = xchg(&vcpu_info->evtchn_pending_sel, 0);
|
||||
while (pending_words != 0) {
|
||||
|
|
|
@ -855,7 +855,8 @@ int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input)
|
|||
*/
|
||||
|
||||
/* Update group descriptor block for new group */
|
||||
gdp = (struct ext4_group_desc *)primary->b_data + gdb_off;
|
||||
gdp = (struct ext4_group_desc *)((char *)primary->b_data +
|
||||
gdb_off * EXT4_DESC_SIZE(sb));
|
||||
|
||||
ext4_block_bitmap_set(sb, gdp, input->block_bitmap); /* LV FIXME */
|
||||
ext4_inode_bitmap_set(sb, gdp, input->inode_bitmap); /* LV FIXME */
|
||||
|
|
|
@ -246,15 +246,11 @@ static void find_metapath(const struct gfs2_sbd *sdp, u64 block,
|
|||
|
||||
}
|
||||
|
||||
static inline unsigned int zero_metapath_length(const struct metapath *mp,
|
||||
unsigned height)
|
||||
static inline unsigned int metapath_branch_start(const struct metapath *mp)
|
||||
{
|
||||
unsigned int i;
|
||||
for (i = 0; i < height - 1; i++) {
|
||||
if (mp->mp_list[i] != 0)
|
||||
return i;
|
||||
}
|
||||
return height;
|
||||
if (mp->mp_list[0] == 0)
|
||||
return 2;
|
||||
return 1;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -436,7 +432,7 @@ static int gfs2_bmap_alloc(struct inode *inode, const sector_t lblock,
|
|||
struct gfs2_sbd *sdp = GFS2_SB(inode);
|
||||
struct buffer_head *dibh = mp->mp_bh[0];
|
||||
u64 bn, dblock = 0;
|
||||
unsigned n, i, blks, alloced = 0, iblks = 0, zmpl = 0;
|
||||
unsigned n, i, blks, alloced = 0, iblks = 0, branch_start = 0;
|
||||
unsigned dblks = 0;
|
||||
unsigned ptrs_per_blk;
|
||||
const unsigned end_of_metadata = height - 1;
|
||||
|
@ -471,9 +467,8 @@ static int gfs2_bmap_alloc(struct inode *inode, const sector_t lblock,
|
|||
/* Building up tree height */
|
||||
state = ALLOC_GROW_HEIGHT;
|
||||
iblks = height - ip->i_height;
|
||||
zmpl = zero_metapath_length(mp, height);
|
||||
iblks -= zmpl;
|
||||
iblks += height;
|
||||
branch_start = metapath_branch_start(mp);
|
||||
iblks += (height - branch_start);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -509,13 +504,13 @@ static int gfs2_bmap_alloc(struct inode *inode, const sector_t lblock,
|
|||
sizeof(struct gfs2_meta_header));
|
||||
*ptr = zero_bn;
|
||||
state = ALLOC_GROW_DEPTH;
|
||||
for(i = zmpl; i < height; i++) {
|
||||
for(i = branch_start; i < height; i++) {
|
||||
if (mp->mp_bh[i] == NULL)
|
||||
break;
|
||||
brelse(mp->mp_bh[i]);
|
||||
mp->mp_bh[i] = NULL;
|
||||
}
|
||||
i = zmpl;
|
||||
i = branch_start;
|
||||
}
|
||||
if (n == 0)
|
||||
break;
|
||||
|
|
|
@ -195,7 +195,7 @@ static u32 gfs2_bitfit(const u8 *buffer, unsigned int buflen, u32 goal,
|
|||
depending on architecture. I've experimented with several ways
|
||||
of writing this section such as using an else before the goto
|
||||
but this one seems to be the fastest. */
|
||||
while ((unsigned char *)plong < end - 1) {
|
||||
while ((unsigned char *)plong < end - sizeof(unsigned long)) {
|
||||
prefetch(plong + 1);
|
||||
if (((*plong) & LBITMASK) != lskipval)
|
||||
break;
|
||||
|
|
|
@ -130,10 +130,11 @@ static int xdr_decode_fhstatus3(struct rpc_rqst *req, __be32 *p,
|
|||
struct mnt_fhstatus *res)
|
||||
{
|
||||
struct nfs_fh *fh = res->fh;
|
||||
unsigned size;
|
||||
|
||||
if ((res->status = ntohl(*p++)) == 0) {
|
||||
int size = ntohl(*p++);
|
||||
if (size <= NFS3_FHSIZE) {
|
||||
size = ntohl(*p++);
|
||||
if (size <= NFS3_FHSIZE && size != 0) {
|
||||
fh->size = size;
|
||||
memcpy(fh->data, p, size);
|
||||
} else
|
||||
|
|
|
@ -1216,8 +1216,6 @@ static int nfs_validate_mount_data(void *options,
|
|||
{
|
||||
struct nfs_mount_data *data = (struct nfs_mount_data *)options;
|
||||
|
||||
memset(args, 0, sizeof(*args));
|
||||
|
||||
if (data == NULL)
|
||||
goto out_no_data;
|
||||
|
||||
|
@ -1251,13 +1249,13 @@ static int nfs_validate_mount_data(void *options,
|
|||
case 5:
|
||||
memset(data->context, 0, sizeof(data->context));
|
||||
case 6:
|
||||
if (data->flags & NFS_MOUNT_VER3)
|
||||
if (data->flags & NFS_MOUNT_VER3) {
|
||||
if (data->root.size > NFS3_FHSIZE || data->root.size == 0)
|
||||
goto out_invalid_fh;
|
||||
mntfh->size = data->root.size;
|
||||
else
|
||||
} else
|
||||
mntfh->size = NFS2_FHSIZE;
|
||||
|
||||
if (mntfh->size > sizeof(mntfh->data))
|
||||
goto out_invalid_fh;
|
||||
|
||||
memcpy(mntfh->data, data->root.data, mntfh->size);
|
||||
if (mntfh->size < sizeof(mntfh->data))
|
||||
|
@ -1585,24 +1583,29 @@ static int nfs_get_sb(struct file_system_type *fs_type,
|
|||
{
|
||||
struct nfs_server *server = NULL;
|
||||
struct super_block *s;
|
||||
struct nfs_fh mntfh;
|
||||
struct nfs_parsed_mount_data data;
|
||||
struct nfs_parsed_mount_data *data;
|
||||
struct nfs_fh *mntfh;
|
||||
struct dentry *mntroot;
|
||||
int (*compare_super)(struct super_block *, void *) = nfs_compare_super;
|
||||
struct nfs_sb_mountdata sb_mntdata = {
|
||||
.mntflags = flags,
|
||||
};
|
||||
int error;
|
||||
int error = -ENOMEM;
|
||||
|
||||
security_init_mnt_opts(&data.lsm_opts);
|
||||
data = kzalloc(sizeof(*data), GFP_KERNEL);
|
||||
mntfh = kzalloc(sizeof(*mntfh), GFP_KERNEL);
|
||||
if (data == NULL || mntfh == NULL)
|
||||
goto out_free_fh;
|
||||
|
||||
security_init_mnt_opts(&data->lsm_opts);
|
||||
|
||||
/* Validate the mount data */
|
||||
error = nfs_validate_mount_data(raw_data, &data, &mntfh, dev_name);
|
||||
error = nfs_validate_mount_data(raw_data, data, mntfh, dev_name);
|
||||
if (error < 0)
|
||||
goto out;
|
||||
|
||||
/* Get a volume representation */
|
||||
server = nfs_create_server(&data, &mntfh);
|
||||
server = nfs_create_server(data, mntfh);
|
||||
if (IS_ERR(server)) {
|
||||
error = PTR_ERR(server);
|
||||
goto out;
|
||||
|
@ -1630,16 +1633,16 @@ static int nfs_get_sb(struct file_system_type *fs_type,
|
|||
|
||||
if (!s->s_root) {
|
||||
/* initial superblock/root creation */
|
||||
nfs_fill_super(s, &data);
|
||||
nfs_fill_super(s, data);
|
||||
}
|
||||
|
||||
mntroot = nfs_get_root(s, &mntfh);
|
||||
mntroot = nfs_get_root(s, mntfh);
|
||||
if (IS_ERR(mntroot)) {
|
||||
error = PTR_ERR(mntroot);
|
||||
goto error_splat_super;
|
||||
}
|
||||
|
||||
error = security_sb_set_mnt_opts(s, &data.lsm_opts);
|
||||
error = security_sb_set_mnt_opts(s, &data->lsm_opts);
|
||||
if (error)
|
||||
goto error_splat_root;
|
||||
|
||||
|
@ -1649,9 +1652,12 @@ static int nfs_get_sb(struct file_system_type *fs_type,
|
|||
error = 0;
|
||||
|
||||
out:
|
||||
kfree(data.nfs_server.hostname);
|
||||
kfree(data.mount_server.hostname);
|
||||
security_free_mnt_opts(&data.lsm_opts);
|
||||
kfree(data->nfs_server.hostname);
|
||||
kfree(data->mount_server.hostname);
|
||||
security_free_mnt_opts(&data->lsm_opts);
|
||||
out_free_fh:
|
||||
kfree(mntfh);
|
||||
kfree(data);
|
||||
return error;
|
||||
|
||||
out_err_nosb:
|
||||
|
@ -1800,8 +1806,6 @@ static int nfs4_validate_mount_data(void *options,
|
|||
struct nfs4_mount_data *data = (struct nfs4_mount_data *)options;
|
||||
char *c;
|
||||
|
||||
memset(args, 0, sizeof(*args));
|
||||
|
||||
if (data == NULL)
|
||||
goto out_no_data;
|
||||
|
||||
|
@ -1959,26 +1963,31 @@ static int nfs4_validate_mount_data(void *options,
|
|||
static int nfs4_get_sb(struct file_system_type *fs_type,
|
||||
int flags, const char *dev_name, void *raw_data, struct vfsmount *mnt)
|
||||
{
|
||||
struct nfs_parsed_mount_data data;
|
||||
struct nfs_parsed_mount_data *data;
|
||||
struct super_block *s;
|
||||
struct nfs_server *server;
|
||||
struct nfs_fh mntfh;
|
||||
struct nfs_fh *mntfh;
|
||||
struct dentry *mntroot;
|
||||
int (*compare_super)(struct super_block *, void *) = nfs_compare_super;
|
||||
struct nfs_sb_mountdata sb_mntdata = {
|
||||
.mntflags = flags,
|
||||
};
|
||||
int error;
|
||||
int error = -ENOMEM;
|
||||
|
||||
security_init_mnt_opts(&data.lsm_opts);
|
||||
data = kzalloc(sizeof(*data), GFP_KERNEL);
|
||||
mntfh = kzalloc(sizeof(*mntfh), GFP_KERNEL);
|
||||
if (data == NULL || mntfh == NULL)
|
||||
goto out_free_fh;
|
||||
|
||||
security_init_mnt_opts(&data->lsm_opts);
|
||||
|
||||
/* Validate the mount data */
|
||||
error = nfs4_validate_mount_data(raw_data, &data, dev_name);
|
||||
error = nfs4_validate_mount_data(raw_data, data, dev_name);
|
||||
if (error < 0)
|
||||
goto out;
|
||||
|
||||
/* Get a volume representation */
|
||||
server = nfs4_create_server(&data, &mntfh);
|
||||
server = nfs4_create_server(data, mntfh);
|
||||
if (IS_ERR(server)) {
|
||||
error = PTR_ERR(server);
|
||||
goto out;
|
||||
|
@ -2009,13 +2018,13 @@ static int nfs4_get_sb(struct file_system_type *fs_type,
|
|||
nfs4_fill_super(s);
|
||||
}
|
||||
|
||||
mntroot = nfs4_get_root(s, &mntfh);
|
||||
mntroot = nfs4_get_root(s, mntfh);
|
||||
if (IS_ERR(mntroot)) {
|
||||
error = PTR_ERR(mntroot);
|
||||
goto error_splat_super;
|
||||
}
|
||||
|
||||
error = security_sb_set_mnt_opts(s, &data.lsm_opts);
|
||||
error = security_sb_set_mnt_opts(s, &data->lsm_opts);
|
||||
if (error)
|
||||
goto error_splat_root;
|
||||
|
||||
|
@ -2025,10 +2034,13 @@ static int nfs4_get_sb(struct file_system_type *fs_type,
|
|||
error = 0;
|
||||
|
||||
out:
|
||||
kfree(data.client_address);
|
||||
kfree(data.nfs_server.export_path);
|
||||
kfree(data.nfs_server.hostname);
|
||||
security_free_mnt_opts(&data.lsm_opts);
|
||||
kfree(data->client_address);
|
||||
kfree(data->nfs_server.export_path);
|
||||
kfree(data->nfs_server.hostname);
|
||||
security_free_mnt_opts(&data->lsm_opts);
|
||||
out_free_fh:
|
||||
kfree(mntfh);
|
||||
kfree(data);
|
||||
return error;
|
||||
|
||||
out_free:
|
||||
|
|
|
@ -739,12 +739,13 @@ int nfs_updatepage(struct file *file, struct page *page,
|
|||
}
|
||||
|
||||
status = nfs_writepage_setup(ctx, page, offset, count);
|
||||
__set_page_dirty_nobuffers(page);
|
||||
if (status < 0)
|
||||
nfs_set_pageerror(page);
|
||||
else
|
||||
__set_page_dirty_nobuffers(page);
|
||||
|
||||
dprintk("NFS: nfs_updatepage returns %d (isize %Ld)\n",
|
||||
status, (long long)i_size_read(inode));
|
||||
if (status < 0)
|
||||
nfs_set_pageerror(page);
|
||||
return status;
|
||||
}
|
||||
|
||||
|
|
|
@ -249,7 +249,6 @@ int do_select(int n, fd_set_bits *fds, s64 *timeout)
|
|||
retval++;
|
||||
}
|
||||
}
|
||||
cond_resched();
|
||||
}
|
||||
if (res_in)
|
||||
*rinp = res_in;
|
||||
|
@ -257,6 +256,7 @@ int do_select(int n, fd_set_bits *fds, s64 *timeout)
|
|||
*routp = res_out;
|
||||
if (res_ex)
|
||||
*rexp = res_ex;
|
||||
cond_resched();
|
||||
}
|
||||
wait = NULL;
|
||||
if (retval || !*timeout || signal_pending(current))
|
||||
|
|
|
@ -261,7 +261,7 @@ struct el_MCPCIA_uncorrected_frame_mcheck {
|
|||
}
|
||||
#endif
|
||||
|
||||
static inline int __mcpcia_is_mmio(unsigned long addr)
|
||||
extern inline int __mcpcia_is_mmio(unsigned long addr)
|
||||
{
|
||||
return (addr & 0x80000000UL) == 0;
|
||||
}
|
||||
|
|
|
@ -356,13 +356,13 @@ struct el_t2_frame_corrected {
|
|||
#define vip volatile int *
|
||||
#define vuip volatile unsigned int *
|
||||
|
||||
static inline u8 t2_inb(unsigned long addr)
|
||||
extern inline u8 t2_inb(unsigned long addr)
|
||||
{
|
||||
long result = *(vip) ((addr << 5) + T2_IO + 0x00);
|
||||
return __kernel_extbl(result, addr & 3);
|
||||
}
|
||||
|
||||
static inline void t2_outb(u8 b, unsigned long addr)
|
||||
extern inline void t2_outb(u8 b, unsigned long addr)
|
||||
{
|
||||
unsigned long w;
|
||||
|
||||
|
@ -371,13 +371,13 @@ static inline void t2_outb(u8 b, unsigned long addr)
|
|||
mb();
|
||||
}
|
||||
|
||||
static inline u16 t2_inw(unsigned long addr)
|
||||
extern inline u16 t2_inw(unsigned long addr)
|
||||
{
|
||||
long result = *(vip) ((addr << 5) + T2_IO + 0x08);
|
||||
return __kernel_extwl(result, addr & 3);
|
||||
}
|
||||
|
||||
static inline void t2_outw(u16 b, unsigned long addr)
|
||||
extern inline void t2_outw(u16 b, unsigned long addr)
|
||||
{
|
||||
unsigned long w;
|
||||
|
||||
|
@ -386,12 +386,12 @@ static inline void t2_outw(u16 b, unsigned long addr)
|
|||
mb();
|
||||
}
|
||||
|
||||
static inline u32 t2_inl(unsigned long addr)
|
||||
extern inline u32 t2_inl(unsigned long addr)
|
||||
{
|
||||
return *(vuip) ((addr << 5) + T2_IO + 0x18);
|
||||
}
|
||||
|
||||
static inline void t2_outl(u32 b, unsigned long addr)
|
||||
extern inline void t2_outl(u32 b, unsigned long addr)
|
||||
{
|
||||
*(vuip) ((addr << 5) + T2_IO + 0x18) = b;
|
||||
mb();
|
||||
|
@ -435,7 +435,7 @@ static inline void t2_outl(u32 b, unsigned long addr)
|
|||
set_hae(msb); \
|
||||
}
|
||||
|
||||
static DEFINE_SPINLOCK(t2_hae_lock);
|
||||
extern spinlock_t t2_hae_lock;
|
||||
|
||||
/*
|
||||
* NOTE: take T2_DENSE_MEM off in each readX/writeX routine, since
|
||||
|
|
|
@ -35,7 +35,7 @@
|
|||
* register not being up-to-date with respect to the hardware
|
||||
* value.
|
||||
*/
|
||||
static inline void __set_hae(unsigned long new_hae)
|
||||
extern inline void __set_hae(unsigned long new_hae)
|
||||
{
|
||||
unsigned long flags;
|
||||
local_irq_save(flags);
|
||||
|
@ -49,7 +49,7 @@ static inline void __set_hae(unsigned long new_hae)
|
|||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
static inline void set_hae(unsigned long new_hae)
|
||||
extern inline void set_hae(unsigned long new_hae)
|
||||
{
|
||||
if (new_hae != alpha_mv.hae_cache)
|
||||
__set_hae(new_hae);
|
||||
|
@ -176,7 +176,7 @@ REMAP2(u64, writeq, volatile)
|
|||
#undef REMAP1
|
||||
#undef REMAP2
|
||||
|
||||
static inline void __iomem *generic_ioportmap(unsigned long a)
|
||||
extern inline void __iomem *generic_ioportmap(unsigned long a)
|
||||
{
|
||||
return alpha_mv.mv_ioportmap(a);
|
||||
}
|
||||
|
|
|
@ -23,7 +23,7 @@
|
|||
#endif
|
||||
|
||||
|
||||
extern inline unsigned long
|
||||
static inline unsigned long
|
||||
__reload_thread(struct pcb_struct *pcb)
|
||||
{
|
||||
register unsigned long a0 __asm__("$16");
|
||||
|
@ -114,7 +114,7 @@ extern unsigned long last_asn;
|
|||
#define __MMU_EXTERN_INLINE
|
||||
#endif
|
||||
|
||||
static inline unsigned long
|
||||
extern inline unsigned long
|
||||
__get_new_mm_context(struct mm_struct *mm, long cpu)
|
||||
{
|
||||
unsigned long asn = cpu_last_asn(cpu);
|
||||
|
@ -226,7 +226,7 @@ ev4_activate_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm)
|
|||
# endif
|
||||
#endif
|
||||
|
||||
extern inline int
|
||||
static inline int
|
||||
init_new_context(struct task_struct *tsk, struct mm_struct *mm)
|
||||
{
|
||||
int i;
|
||||
|
|
|
@ -1,6 +1,78 @@
|
|||
#ifndef __ALPHA_PERCPU_H
|
||||
#define __ALPHA_PERCPU_H
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/threads.h>
|
||||
|
||||
#include <asm-generic/percpu.h>
|
||||
/*
|
||||
* Determine the real variable name from the name visible in the
|
||||
* kernel sources.
|
||||
*/
|
||||
#define per_cpu_var(var) per_cpu__##var
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
|
||||
/*
|
||||
* per_cpu_offset() is the offset that has to be added to a
|
||||
* percpu variable to get to the instance for a certain processor.
|
||||
*/
|
||||
extern unsigned long __per_cpu_offset[NR_CPUS];
|
||||
|
||||
#define per_cpu_offset(x) (__per_cpu_offset[x])
|
||||
|
||||
#define __my_cpu_offset per_cpu_offset(raw_smp_processor_id())
|
||||
#ifdef CONFIG_DEBUG_PREEMPT
|
||||
#define my_cpu_offset per_cpu_offset(smp_processor_id())
|
||||
#else
|
||||
#define my_cpu_offset __my_cpu_offset
|
||||
#endif
|
||||
|
||||
#ifndef MODULE
|
||||
#define SHIFT_PERCPU_PTR(var, offset) RELOC_HIDE(&per_cpu_var(var), (offset))
|
||||
#define PER_CPU_ATTRIBUTES
|
||||
#else
|
||||
/*
|
||||
* To calculate addresses of locally defined variables, GCC uses 32-bit
|
||||
* displacement from the GP. Which doesn't work for per cpu variables in
|
||||
* modules, as an offset to the kernel per cpu area is way above 4G.
|
||||
*
|
||||
* This forces allocation of a GOT entry for per cpu variable using
|
||||
* ldq instruction with a 'literal' relocation.
|
||||
*/
|
||||
#define SHIFT_PERCPU_PTR(var, offset) ({ \
|
||||
extern int simple_identifier_##var(void); \
|
||||
unsigned long __ptr, tmp_gp; \
|
||||
asm ( "br %1, 1f \n\
|
||||
1: ldgp %1, 0(%1) \n\
|
||||
ldq %0, per_cpu__" #var"(%1)\t!literal" \
|
||||
: "=&r"(__ptr), "=&r"(tmp_gp)); \
|
||||
(typeof(&per_cpu_var(var)))(__ptr + (offset)); })
|
||||
|
||||
#define PER_CPU_ATTRIBUTES __used
|
||||
|
||||
#endif /* MODULE */
|
||||
|
||||
/*
|
||||
* A percpu variable may point to a discarded regions. The following are
|
||||
* established ways to produce a usable pointer from the percpu variable
|
||||
* offset.
|
||||
*/
|
||||
#define per_cpu(var, cpu) \
|
||||
(*SHIFT_PERCPU_PTR(var, per_cpu_offset(cpu)))
|
||||
#define __get_cpu_var(var) \
|
||||
(*SHIFT_PERCPU_PTR(var, my_cpu_offset))
|
||||
#define __raw_get_cpu_var(var) \
|
||||
(*SHIFT_PERCPU_PTR(var, __my_cpu_offset))
|
||||
|
||||
#else /* ! SMP */
|
||||
|
||||
#define per_cpu(var, cpu) (*((void)(cpu), &per_cpu_var(var)))
|
||||
#define __get_cpu_var(var) per_cpu_var(var)
|
||||
#define __raw_get_cpu_var(var) per_cpu_var(var)
|
||||
|
||||
#define PER_CPU_ATTRIBUTES
|
||||
|
||||
#endif /* SMP */
|
||||
|
||||
#define DECLARE_PER_CPU(type, name) extern __typeof__(type) per_cpu_var(name)
|
||||
|
||||
#endif /* __ALPHA_PERCPU_H */
|
||||
|
|
|
@ -184,7 +184,7 @@ enum amask_enum {
|
|||
__amask; })
|
||||
|
||||
#define __CALL_PAL_R0(NAME, TYPE) \
|
||||
static inline TYPE NAME(void) \
|
||||
extern inline TYPE NAME(void) \
|
||||
{ \
|
||||
register TYPE __r0 __asm__("$0"); \
|
||||
__asm__ __volatile__( \
|
||||
|
@ -196,7 +196,7 @@ static inline TYPE NAME(void) \
|
|||
}
|
||||
|
||||
#define __CALL_PAL_W1(NAME, TYPE0) \
|
||||
static inline void NAME(TYPE0 arg0) \
|
||||
extern inline void NAME(TYPE0 arg0) \
|
||||
{ \
|
||||
register TYPE0 __r16 __asm__("$16") = arg0; \
|
||||
__asm__ __volatile__( \
|
||||
|
@ -207,7 +207,7 @@ static inline void NAME(TYPE0 arg0) \
|
|||
}
|
||||
|
||||
#define __CALL_PAL_W2(NAME, TYPE0, TYPE1) \
|
||||
static inline void NAME(TYPE0 arg0, TYPE1 arg1) \
|
||||
extern inline void NAME(TYPE0 arg0, TYPE1 arg1) \
|
||||
{ \
|
||||
register TYPE0 __r16 __asm__("$16") = arg0; \
|
||||
register TYPE1 __r17 __asm__("$17") = arg1; \
|
||||
|
@ -219,7 +219,7 @@ static inline void NAME(TYPE0 arg0, TYPE1 arg1) \
|
|||
}
|
||||
|
||||
#define __CALL_PAL_RW1(NAME, RTYPE, TYPE0) \
|
||||
static inline RTYPE NAME(TYPE0 arg0) \
|
||||
extern inline RTYPE NAME(TYPE0 arg0) \
|
||||
{ \
|
||||
register RTYPE __r0 __asm__("$0"); \
|
||||
register TYPE0 __r16 __asm__("$16") = arg0; \
|
||||
|
@ -232,7 +232,7 @@ static inline RTYPE NAME(TYPE0 arg0) \
|
|||
}
|
||||
|
||||
#define __CALL_PAL_RW2(NAME, RTYPE, TYPE0, TYPE1) \
|
||||
static inline RTYPE NAME(TYPE0 arg0, TYPE1 arg1) \
|
||||
extern inline RTYPE NAME(TYPE0 arg0, TYPE1 arg1) \
|
||||
{ \
|
||||
register RTYPE __r0 __asm__("$0"); \
|
||||
register TYPE0 __r16 __asm__("$16") = arg0; \
|
||||
|
|
|
@ -13,7 +13,7 @@
|
|||
#define VT_BUF_HAVE_MEMSETW
|
||||
#define VT_BUF_HAVE_MEMCPYW
|
||||
|
||||
extern inline void scr_writew(u16 val, volatile u16 *addr)
|
||||
static inline void scr_writew(u16 val, volatile u16 *addr)
|
||||
{
|
||||
if (__is_ioaddr(addr))
|
||||
__raw_writew(val, (volatile u16 __iomem *) addr);
|
||||
|
@ -21,7 +21,7 @@ extern inline void scr_writew(u16 val, volatile u16 *addr)
|
|||
*addr = val;
|
||||
}
|
||||
|
||||
extern inline u16 scr_readw(volatile const u16 *addr)
|
||||
static inline u16 scr_readw(volatile const u16 *addr)
|
||||
{
|
||||
if (__is_ioaddr(addr))
|
||||
return __raw_readw((volatile const u16 __iomem *) addr);
|
||||
|
@ -29,7 +29,7 @@ extern inline u16 scr_readw(volatile const u16 *addr)
|
|||
return *addr;
|
||||
}
|
||||
|
||||
extern inline void scr_memsetw(u16 *s, u16 c, unsigned int count)
|
||||
static inline void scr_memsetw(u16 *s, u16 c, unsigned int count)
|
||||
{
|
||||
if (__is_ioaddr(s))
|
||||
memsetw_io((u16 __iomem *) s, c, count);
|
||||
|
|
|
@ -96,7 +96,7 @@ struct bfin_serial_port {
|
|||
struct work_struct tx_dma_workqueue;
|
||||
#endif
|
||||
#ifdef CONFIG_SERIAL_BFIN_CTSRTS
|
||||
struct work_struct cts_workqueue;
|
||||
struct timer_list cts_timer;
|
||||
int cts_pin;
|
||||
int rts_pin;
|
||||
#endif
|
||||
|
|
|
@ -88,7 +88,7 @@ struct bfin_serial_port {
|
|||
# endif
|
||||
#endif
|
||||
#ifdef CONFIG_SERIAL_BFIN_CTSRTS
|
||||
struct work_struct cts_workqueue;
|
||||
struct timer_list cts_timer;
|
||||
int cts_pin;
|
||||
int rts_pin;
|
||||
#endif
|
||||
|
|
|
@ -96,7 +96,7 @@ struct bfin_serial_port {
|
|||
struct work_struct tx_dma_workqueue;
|
||||
#endif
|
||||
#ifdef CONFIG_SERIAL_BFIN_CTSRTS
|
||||
struct work_struct cts_workqueue;
|
||||
struct timer_list cts_timer;
|
||||
int cts_pin;
|
||||
int rts_pin;
|
||||
#endif
|
||||
|
|
|
@ -99,7 +99,7 @@ struct bfin_serial_port {
|
|||
struct work_struct tx_dma_workqueue;
|
||||
#endif
|
||||
#ifdef CONFIG_SERIAL_BFIN_CTSRTS
|
||||
struct work_struct cts_workqueue;
|
||||
struct timer_list cts_timer;
|
||||
int cts_pin;
|
||||
int rts_pin;
|
||||
#endif
|
||||
|
@ -187,7 +187,7 @@ static void bfin_serial_hw_init(struct bfin_serial_port *uart)
|
|||
|
||||
#ifdef CONFIG_BFIN_UART1_CTSRTS
|
||||
peripheral_request(P_UART1_RTS, DRIVER_NAME);
|
||||
peripheral_request(P_UART1_CTS DRIVER_NAME);
|
||||
peripheral_request(P_UART1_CTS, DRIVER_NAME);
|
||||
#endif
|
||||
#endif
|
||||
|
||||
|
@ -202,7 +202,7 @@ static void bfin_serial_hw_init(struct bfin_serial_port *uart)
|
|||
|
||||
#ifdef CONFIG_BFIN_UART3_CTSRTS
|
||||
peripheral_request(P_UART3_RTS, DRIVER_NAME);
|
||||
peripheral_request(P_UART3_CTS DRIVER_NAME);
|
||||
peripheral_request(P_UART3_CTS, DRIVER_NAME);
|
||||
#endif
|
||||
#endif
|
||||
SSYNC();
|
||||
|
|
|
@ -88,7 +88,7 @@ struct bfin_serial_port {
|
|||
# endif
|
||||
#endif
|
||||
#ifdef CONFIG_SERIAL_BFIN_CTSRTS
|
||||
struct work_struct cts_workqueue;
|
||||
struct timer_list cts_timer;
|
||||
int cts_pin;
|
||||
int rts_pin;
|
||||
#endif
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue