mirror of https://gitee.com/openkylin/linux.git
Merge branch 'sched/urgent' into sched/core
Merge reason: Add fixes before applying dependent patches. Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
commit
e0a92c1747
|
@ -28,6 +28,7 @@ modules.builtin
|
|||
*.gz
|
||||
*.bz2
|
||||
*.lzma
|
||||
*.xz
|
||||
*.lzo
|
||||
*.patch
|
||||
*.gcno
|
||||
|
|
|
@ -40,8 +40,6 @@ decnet.txt
|
|||
- info on using the DECnet networking layer in Linux.
|
||||
depca.txt
|
||||
- the Digital DEPCA/EtherWORKS DE1?? and DE2?? LANCE Ethernet driver
|
||||
dgrs.txt
|
||||
- the Digi International RightSwitch SE-X Ethernet driver
|
||||
dmfe.txt
|
||||
- info on the Davicom DM9102(A)/DM9132/DM9801 fast ethernet driver.
|
||||
e100.txt
|
||||
|
@ -50,8 +48,6 @@ e1000.txt
|
|||
- info on Intel's E1000 line of gigabit ethernet boards
|
||||
eql.txt
|
||||
- serial IP load balancing
|
||||
ethertap.txt
|
||||
- the Ethertap user space packet reception and transmission driver
|
||||
ewrk3.txt
|
||||
- the Digital EtherWORKS 3 DE203/4/5 Ethernet driver
|
||||
filter.txt
|
||||
|
@ -104,8 +100,6 @@ tuntap.txt
|
|||
- TUN/TAP device driver, allowing user space Rx/Tx of packets.
|
||||
vortex.txt
|
||||
- info on using 3Com Vortex (3c590, 3c592, 3c595, 3c597) Ethernet cards.
|
||||
wavelan.txt
|
||||
- AT&T GIS (nee NCR) WaveLAN card: An Ethernet-like radio transceiver
|
||||
x25.txt
|
||||
- general info on X.25 development.
|
||||
x25-iface.txt
|
||||
|
|
|
@ -61,7 +61,6 @@ before the more general line given above as the first match is the one taken.
|
|||
create dns_resolver foo:* * /usr/sbin/dns.foo %k
|
||||
|
||||
|
||||
|
||||
=====
|
||||
USAGE
|
||||
=====
|
||||
|
@ -104,6 +103,14 @@ implemented in the module can be called after doing:
|
|||
returned also.
|
||||
|
||||
|
||||
===============================
|
||||
READING DNS KEYS FROM USERSPACE
|
||||
===============================
|
||||
|
||||
Keys of dns_resolver type can be read from userspace using keyctl_read() or
|
||||
"keyctl read/print/pipe".
|
||||
|
||||
|
||||
=========
|
||||
MECHANISM
|
||||
=========
|
||||
|
|
14
MAINTAINERS
14
MAINTAINERS
|
@ -1467,6 +1467,7 @@ F: include/net/bluetooth/
|
|||
|
||||
BONDING DRIVER
|
||||
M: Jay Vosburgh <fubar@us.ibm.com>
|
||||
M: Andy Gospodarek <andy@greyhouse.net>
|
||||
L: netdev@vger.kernel.org
|
||||
W: http://sourceforge.net/projects/bonding/
|
||||
S: Supported
|
||||
|
@ -1692,6 +1693,13 @@ M: Andy Whitcroft <apw@canonical.com>
|
|||
S: Supported
|
||||
F: scripts/checkpatch.pl
|
||||
|
||||
CHINESE DOCUMENTATION
|
||||
M: Harry Wei <harryxiyou@gmail.com>
|
||||
L: xiyoulinuxkernelgroup@googlegroups.com
|
||||
L: linux-kernel@zh-kernel.org (moderated for non-subscribers)
|
||||
S: Maintained
|
||||
F: Documentation/zh_CN/
|
||||
|
||||
CISCO VIC ETHERNET NIC DRIVER
|
||||
M: Vasanthy Kolluri <vkolluri@cisco.com>
|
||||
M: Roopa Prabhu <roprabhu@cisco.com>
|
||||
|
@ -2026,7 +2034,7 @@ F: Documentation/scsi/dc395x.txt
|
|||
F: drivers/scsi/dc395x.*
|
||||
|
||||
DCCP PROTOCOL
|
||||
M: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
|
||||
M: Gerrit Renker <gerrit@erg.abdn.ac.uk>
|
||||
L: dccp@vger.kernel.org
|
||||
W: http://www.linuxfoundation.org/collaborate/workgroups/networking/dccp
|
||||
S: Maintained
|
||||
|
@ -5266,7 +5274,7 @@ S: Maintained
|
|||
F: drivers/net/wireless/rtl818x/rtl8180/
|
||||
|
||||
RTL8187 WIRELESS DRIVER
|
||||
M: Herton Ronaldo Krzesinski <herton@mandriva.com.br>
|
||||
M: Herton Ronaldo Krzesinski <herton@canonical.com>
|
||||
M: Hin-Tak Leung <htl10@users.sourceforge.net>
|
||||
M: Larry Finger <Larry.Finger@lwfinger.net>
|
||||
L: linux-wireless@vger.kernel.org
|
||||
|
@ -6104,7 +6112,7 @@ S: Maintained
|
|||
F: security/tomoyo/
|
||||
|
||||
TOPSTAR LAPTOP EXTRAS DRIVER
|
||||
M: Herton Ronaldo Krzesinski <herton@mandriva.com.br>
|
||||
M: Herton Ronaldo Krzesinski <herton@canonical.com>
|
||||
L: platform-driver-x86@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/platform/x86/topstar-laptop.c
|
||||
|
|
2
Makefile
2
Makefile
|
@ -1,7 +1,7 @@
|
|||
VERSION = 2
|
||||
PATCHLEVEL = 6
|
||||
SUBLEVEL = 38
|
||||
EXTRAVERSION = -rc6
|
||||
EXTRAVERSION = -rc7
|
||||
NAME = Flesh-Eating Bats with Fangs
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
|
|
@ -11,6 +11,7 @@ config ALPHA
|
|||
select HAVE_GENERIC_HARDIRQS
|
||||
select GENERIC_IRQ_PROBE
|
||||
select AUTO_IRQ_AFFINITY if SMP
|
||||
select GENERIC_HARDIRQS_NO_DEPRECATED
|
||||
help
|
||||
The Alpha is a 64-bit general-purpose processor designed and
|
||||
marketed by the Digital Equipment Corporation of blessed memory,
|
||||
|
|
|
@ -44,11 +44,16 @@ static char irq_user_affinity[NR_IRQS];
|
|||
|
||||
int irq_select_affinity(unsigned int irq)
|
||||
{
|
||||
struct irq_desc *desc = irq_to_desc[irq];
|
||||
struct irq_data *data = irq_get_irq_data(irq);
|
||||
struct irq_chip *chip;
|
||||
static int last_cpu;
|
||||
int cpu = last_cpu + 1;
|
||||
|
||||
if (!desc || !get_irq_desc_chip(desc)->set_affinity || irq_user_affinity[irq])
|
||||
if (!data)
|
||||
return 1;
|
||||
chip = irq_data_get_irq_chip(data);
|
||||
|
||||
if (!chip->irq_set_affinity || irq_user_affinity[irq])
|
||||
return 1;
|
||||
|
||||
while (!cpu_possible(cpu) ||
|
||||
|
@ -56,8 +61,8 @@ int irq_select_affinity(unsigned int irq)
|
|||
cpu = (cpu < (NR_CPUS-1) ? cpu + 1 : 0);
|
||||
last_cpu = cpu;
|
||||
|
||||
cpumask_copy(desc->affinity, cpumask_of(cpu));
|
||||
get_irq_desc_chip(desc)->set_affinity(irq, cpumask_of(cpu));
|
||||
cpumask_copy(data->affinity, cpumask_of(cpu));
|
||||
chip->irq_set_affinity(data, cpumask_of(cpu), false);
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_SMP */
|
||||
|
|
|
@ -228,14 +228,9 @@ struct irqaction timer_irqaction = {
|
|||
void __init
|
||||
init_rtc_irq(void)
|
||||
{
|
||||
struct irq_desc *desc = irq_to_desc(RTC_IRQ);
|
||||
|
||||
if (desc) {
|
||||
desc->status |= IRQ_DISABLED;
|
||||
set_irq_chip_and_handler_name(RTC_IRQ, &no_irq_chip,
|
||||
handle_simple_irq, "RTC");
|
||||
setup_irq(RTC_IRQ, &timer_irqaction);
|
||||
}
|
||||
set_irq_chip_and_handler_name(RTC_IRQ, &no_irq_chip,
|
||||
handle_simple_irq, "RTC");
|
||||
setup_irq(RTC_IRQ, &timer_irqaction);
|
||||
}
|
||||
|
||||
/* Dummy irqactions. */
|
||||
|
|
|
@ -33,10 +33,10 @@ i8259_update_irq_hw(unsigned int irq, unsigned long mask)
|
|||
}
|
||||
|
||||
inline void
|
||||
i8259a_enable_irq(unsigned int irq)
|
||||
i8259a_enable_irq(struct irq_data *d)
|
||||
{
|
||||
spin_lock(&i8259_irq_lock);
|
||||
i8259_update_irq_hw(irq, cached_irq_mask &= ~(1 << irq));
|
||||
i8259_update_irq_hw(d->irq, cached_irq_mask &= ~(1 << d->irq));
|
||||
spin_unlock(&i8259_irq_lock);
|
||||
}
|
||||
|
||||
|
@ -47,16 +47,18 @@ __i8259a_disable_irq(unsigned int irq)
|
|||
}
|
||||
|
||||
void
|
||||
i8259a_disable_irq(unsigned int irq)
|
||||
i8259a_disable_irq(struct irq_data *d)
|
||||
{
|
||||
spin_lock(&i8259_irq_lock);
|
||||
__i8259a_disable_irq(irq);
|
||||
__i8259a_disable_irq(d->irq);
|
||||
spin_unlock(&i8259_irq_lock);
|
||||
}
|
||||
|
||||
void
|
||||
i8259a_mask_and_ack_irq(unsigned int irq)
|
||||
i8259a_mask_and_ack_irq(struct irq_data *d)
|
||||
{
|
||||
unsigned int irq = d->irq;
|
||||
|
||||
spin_lock(&i8259_irq_lock);
|
||||
__i8259a_disable_irq(irq);
|
||||
|
||||
|
@ -71,9 +73,9 @@ i8259a_mask_and_ack_irq(unsigned int irq)
|
|||
|
||||
struct irq_chip i8259a_irq_type = {
|
||||
.name = "XT-PIC",
|
||||
.unmask = i8259a_enable_irq,
|
||||
.mask = i8259a_disable_irq,
|
||||
.mask_ack = i8259a_mask_and_ack_irq,
|
||||
.irq_unmask = i8259a_enable_irq,
|
||||
.irq_mask = i8259a_disable_irq,
|
||||
.irq_mask_ack = i8259a_mask_and_ack_irq,
|
||||
};
|
||||
|
||||
void __init
|
||||
|
|
|
@ -31,11 +31,9 @@ extern void init_rtc_irq(void);
|
|||
|
||||
extern void common_init_isa_dma(void);
|
||||
|
||||
extern void i8259a_enable_irq(unsigned int);
|
||||
extern void i8259a_disable_irq(unsigned int);
|
||||
extern void i8259a_mask_and_ack_irq(unsigned int);
|
||||
extern unsigned int i8259a_startup_irq(unsigned int);
|
||||
extern void i8259a_end_irq(unsigned int);
|
||||
extern void i8259a_enable_irq(struct irq_data *d);
|
||||
extern void i8259a_disable_irq(struct irq_data *d);
|
||||
extern void i8259a_mask_and_ack_irq(struct irq_data *d);
|
||||
extern struct irq_chip i8259a_irq_type;
|
||||
extern void init_i8259a_irqs(void);
|
||||
|
||||
|
|
|
@ -29,21 +29,21 @@ pyxis_update_irq_hw(unsigned long mask)
|
|||
}
|
||||
|
||||
static inline void
|
||||
pyxis_enable_irq(unsigned int irq)
|
||||
pyxis_enable_irq(struct irq_data *d)
|
||||
{
|
||||
pyxis_update_irq_hw(cached_irq_mask |= 1UL << (irq - 16));
|
||||
pyxis_update_irq_hw(cached_irq_mask |= 1UL << (d->irq - 16));
|
||||
}
|
||||
|
||||
static void
|
||||
pyxis_disable_irq(unsigned int irq)
|
||||
pyxis_disable_irq(struct irq_data *d)
|
||||
{
|
||||
pyxis_update_irq_hw(cached_irq_mask &= ~(1UL << (irq - 16)));
|
||||
pyxis_update_irq_hw(cached_irq_mask &= ~(1UL << (d->irq - 16)));
|
||||
}
|
||||
|
||||
static void
|
||||
pyxis_mask_and_ack_irq(unsigned int irq)
|
||||
pyxis_mask_and_ack_irq(struct irq_data *d)
|
||||
{
|
||||
unsigned long bit = 1UL << (irq - 16);
|
||||
unsigned long bit = 1UL << (d->irq - 16);
|
||||
unsigned long mask = cached_irq_mask &= ~bit;
|
||||
|
||||
/* Disable the interrupt. */
|
||||
|
@ -58,9 +58,9 @@ pyxis_mask_and_ack_irq(unsigned int irq)
|
|||
|
||||
static struct irq_chip pyxis_irq_type = {
|
||||
.name = "PYXIS",
|
||||
.mask_ack = pyxis_mask_and_ack_irq,
|
||||
.mask = pyxis_disable_irq,
|
||||
.unmask = pyxis_enable_irq,
|
||||
.irq_mask_ack = pyxis_mask_and_ack_irq,
|
||||
.irq_mask = pyxis_disable_irq,
|
||||
.irq_unmask = pyxis_enable_irq,
|
||||
};
|
||||
|
||||
void
|
||||
|
@ -103,7 +103,7 @@ init_pyxis_irqs(unsigned long ignore_mask)
|
|||
if ((ignore_mask >> i) & 1)
|
||||
continue;
|
||||
set_irq_chip_and_handler(i, &pyxis_irq_type, handle_level_irq);
|
||||
irq_to_desc(i)->status |= IRQ_LEVEL;
|
||||
irq_set_status_flags(i, IRQ_LEVEL);
|
||||
}
|
||||
|
||||
setup_irq(16+7, &isa_cascade_irqaction);
|
||||
|
|
|
@ -18,27 +18,27 @@
|
|||
DEFINE_SPINLOCK(srm_irq_lock);
|
||||
|
||||
static inline void
|
||||
srm_enable_irq(unsigned int irq)
|
||||
srm_enable_irq(struct irq_data *d)
|
||||
{
|
||||
spin_lock(&srm_irq_lock);
|
||||
cserve_ena(irq - 16);
|
||||
cserve_ena(d->irq - 16);
|
||||
spin_unlock(&srm_irq_lock);
|
||||
}
|
||||
|
||||
static void
|
||||
srm_disable_irq(unsigned int irq)
|
||||
srm_disable_irq(struct irq_data *d)
|
||||
{
|
||||
spin_lock(&srm_irq_lock);
|
||||
cserve_dis(irq - 16);
|
||||
cserve_dis(d->irq - 16);
|
||||
spin_unlock(&srm_irq_lock);
|
||||
}
|
||||
|
||||
/* Handle interrupts from the SRM, assuming no additional weirdness. */
|
||||
static struct irq_chip srm_irq_type = {
|
||||
.name = "SRM",
|
||||
.unmask = srm_enable_irq,
|
||||
.mask = srm_disable_irq,
|
||||
.mask_ack = srm_disable_irq,
|
||||
.irq_unmask = srm_enable_irq,
|
||||
.irq_mask = srm_disable_irq,
|
||||
.irq_mask_ack = srm_disable_irq,
|
||||
};
|
||||
|
||||
void __init
|
||||
|
@ -52,7 +52,7 @@ init_srm_irqs(long max, unsigned long ignore_mask)
|
|||
if (i < 64 && ((ignore_mask >> i) & 1))
|
||||
continue;
|
||||
set_irq_chip_and_handler(i, &srm_irq_type, handle_level_irq);
|
||||
irq_to_desc(i)->status |= IRQ_LEVEL;
|
||||
irq_set_status_flags(i, IRQ_LEVEL);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -44,31 +44,31 @@ alcor_update_irq_hw(unsigned long mask)
|
|||
}
|
||||
|
||||
static inline void
|
||||
alcor_enable_irq(unsigned int irq)
|
||||
alcor_enable_irq(struct irq_data *d)
|
||||
{
|
||||
alcor_update_irq_hw(cached_irq_mask |= 1UL << (irq - 16));
|
||||
alcor_update_irq_hw(cached_irq_mask |= 1UL << (d->irq - 16));
|
||||
}
|
||||
|
||||
static void
|
||||
alcor_disable_irq(unsigned int irq)
|
||||
alcor_disable_irq(struct irq_data *d)
|
||||
{
|
||||
alcor_update_irq_hw(cached_irq_mask &= ~(1UL << (irq - 16)));
|
||||
alcor_update_irq_hw(cached_irq_mask &= ~(1UL << (d->irq - 16)));
|
||||
}
|
||||
|
||||
static void
|
||||
alcor_mask_and_ack_irq(unsigned int irq)
|
||||
alcor_mask_and_ack_irq(struct irq_data *d)
|
||||
{
|
||||
alcor_disable_irq(irq);
|
||||
alcor_disable_irq(d);
|
||||
|
||||
/* On ALCOR/XLT, need to dismiss interrupt via GRU. */
|
||||
*(vuip)GRU_INT_CLEAR = 1 << (irq - 16); mb();
|
||||
*(vuip)GRU_INT_CLEAR = 1 << (d->irq - 16); mb();
|
||||
*(vuip)GRU_INT_CLEAR = 0; mb();
|
||||
}
|
||||
|
||||
static void
|
||||
alcor_isa_mask_and_ack_irq(unsigned int irq)
|
||||
alcor_isa_mask_and_ack_irq(struct irq_data *d)
|
||||
{
|
||||
i8259a_mask_and_ack_irq(irq);
|
||||
i8259a_mask_and_ack_irq(d);
|
||||
|
||||
/* On ALCOR/XLT, need to dismiss interrupt via GRU. */
|
||||
*(vuip)GRU_INT_CLEAR = 0x80000000; mb();
|
||||
|
@ -77,9 +77,9 @@ alcor_isa_mask_and_ack_irq(unsigned int irq)
|
|||
|
||||
static struct irq_chip alcor_irq_type = {
|
||||
.name = "ALCOR",
|
||||
.unmask = alcor_enable_irq,
|
||||
.mask = alcor_disable_irq,
|
||||
.mask_ack = alcor_mask_and_ack_irq,
|
||||
.irq_unmask = alcor_enable_irq,
|
||||
.irq_mask = alcor_disable_irq,
|
||||
.irq_mask_ack = alcor_mask_and_ack_irq,
|
||||
};
|
||||
|
||||
static void
|
||||
|
@ -126,9 +126,9 @@ alcor_init_irq(void)
|
|||
if (i >= 16+20 && i <= 16+30)
|
||||
continue;
|
||||
set_irq_chip_and_handler(i, &alcor_irq_type, handle_level_irq);
|
||||
irq_to_desc(i)->status |= IRQ_LEVEL;
|
||||
irq_set_status_flags(i, IRQ_LEVEL);
|
||||
}
|
||||
i8259a_irq_type.ack = alcor_isa_mask_and_ack_irq;
|
||||
i8259a_irq_type.irq_ack = alcor_isa_mask_and_ack_irq;
|
||||
|
||||
init_i8259a_irqs();
|
||||
common_init_isa_dma();
|
||||
|
|
|
@ -46,22 +46,22 @@ cabriolet_update_irq_hw(unsigned int irq, unsigned long mask)
|
|||
}
|
||||
|
||||
static inline void
|
||||
cabriolet_enable_irq(unsigned int irq)
|
||||
cabriolet_enable_irq(struct irq_data *d)
|
||||
{
|
||||
cabriolet_update_irq_hw(irq, cached_irq_mask &= ~(1UL << irq));
|
||||
cabriolet_update_irq_hw(d->irq, cached_irq_mask &= ~(1UL << d->irq));
|
||||
}
|
||||
|
||||
static void
|
||||
cabriolet_disable_irq(unsigned int irq)
|
||||
cabriolet_disable_irq(struct irq_data *d)
|
||||
{
|
||||
cabriolet_update_irq_hw(irq, cached_irq_mask |= 1UL << irq);
|
||||
cabriolet_update_irq_hw(d->irq, cached_irq_mask |= 1UL << d->irq);
|
||||
}
|
||||
|
||||
static struct irq_chip cabriolet_irq_type = {
|
||||
.name = "CABRIOLET",
|
||||
.unmask = cabriolet_enable_irq,
|
||||
.mask = cabriolet_disable_irq,
|
||||
.mask_ack = cabriolet_disable_irq,
|
||||
.irq_unmask = cabriolet_enable_irq,
|
||||
.irq_mask = cabriolet_disable_irq,
|
||||
.irq_mask_ack = cabriolet_disable_irq,
|
||||
};
|
||||
|
||||
static void
|
||||
|
@ -107,7 +107,7 @@ common_init_irq(void (*srm_dev_int)(unsigned long v))
|
|||
for (i = 16; i < 35; ++i) {
|
||||
set_irq_chip_and_handler(i, &cabriolet_irq_type,
|
||||
handle_level_irq);
|
||||
irq_to_desc(i)->status |= IRQ_LEVEL;
|
||||
irq_set_status_flags(i, IRQ_LEVEL);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -98,37 +98,37 @@ tsunami_update_irq_hw(unsigned long mask)
|
|||
}
|
||||
|
||||
static void
|
||||
dp264_enable_irq(unsigned int irq)
|
||||
dp264_enable_irq(struct irq_data *d)
|
||||
{
|
||||
spin_lock(&dp264_irq_lock);
|
||||
cached_irq_mask |= 1UL << irq;
|
||||
cached_irq_mask |= 1UL << d->irq;
|
||||
tsunami_update_irq_hw(cached_irq_mask);
|
||||
spin_unlock(&dp264_irq_lock);
|
||||
}
|
||||
|
||||
static void
|
||||
dp264_disable_irq(unsigned int irq)
|
||||
dp264_disable_irq(struct irq_data *d)
|
||||
{
|
||||
spin_lock(&dp264_irq_lock);
|
||||
cached_irq_mask &= ~(1UL << irq);
|
||||
cached_irq_mask &= ~(1UL << d->irq);
|
||||
tsunami_update_irq_hw(cached_irq_mask);
|
||||
spin_unlock(&dp264_irq_lock);
|
||||
}
|
||||
|
||||
static void
|
||||
clipper_enable_irq(unsigned int irq)
|
||||
clipper_enable_irq(struct irq_data *d)
|
||||
{
|
||||
spin_lock(&dp264_irq_lock);
|
||||
cached_irq_mask |= 1UL << (irq - 16);
|
||||
cached_irq_mask |= 1UL << (d->irq - 16);
|
||||
tsunami_update_irq_hw(cached_irq_mask);
|
||||
spin_unlock(&dp264_irq_lock);
|
||||
}
|
||||
|
||||
static void
|
||||
clipper_disable_irq(unsigned int irq)
|
||||
clipper_disable_irq(struct irq_data *d)
|
||||
{
|
||||
spin_lock(&dp264_irq_lock);
|
||||
cached_irq_mask &= ~(1UL << (irq - 16));
|
||||
cached_irq_mask &= ~(1UL << (d->irq - 16));
|
||||
tsunami_update_irq_hw(cached_irq_mask);
|
||||
spin_unlock(&dp264_irq_lock);
|
||||
}
|
||||
|
@ -149,10 +149,11 @@ cpu_set_irq_affinity(unsigned int irq, cpumask_t affinity)
|
|||
}
|
||||
|
||||
static int
|
||||
dp264_set_affinity(unsigned int irq, const struct cpumask *affinity)
|
||||
{
|
||||
dp264_set_affinity(struct irq_data *d, const struct cpumask *affinity,
|
||||
bool force)
|
||||
{
|
||||
spin_lock(&dp264_irq_lock);
|
||||
cpu_set_irq_affinity(irq, *affinity);
|
||||
cpu_set_irq_affinity(d->irq, *affinity);
|
||||
tsunami_update_irq_hw(cached_irq_mask);
|
||||
spin_unlock(&dp264_irq_lock);
|
||||
|
||||
|
@ -160,10 +161,11 @@ dp264_set_affinity(unsigned int irq, const struct cpumask *affinity)
|
|||
}
|
||||
|
||||
static int
|
||||
clipper_set_affinity(unsigned int irq, const struct cpumask *affinity)
|
||||
{
|
||||
clipper_set_affinity(struct irq_data *d, const struct cpumask *affinity,
|
||||
bool force)
|
||||
{
|
||||
spin_lock(&dp264_irq_lock);
|
||||
cpu_set_irq_affinity(irq - 16, *affinity);
|
||||
cpu_set_irq_affinity(d->irq - 16, *affinity);
|
||||
tsunami_update_irq_hw(cached_irq_mask);
|
||||
spin_unlock(&dp264_irq_lock);
|
||||
|
||||
|
@ -171,19 +173,19 @@ clipper_set_affinity(unsigned int irq, const struct cpumask *affinity)
|
|||
}
|
||||
|
||||
static struct irq_chip dp264_irq_type = {
|
||||
.name = "DP264",
|
||||
.unmask = dp264_enable_irq,
|
||||
.mask = dp264_disable_irq,
|
||||
.mask_ack = dp264_disable_irq,
|
||||
.set_affinity = dp264_set_affinity,
|
||||
.name = "DP264",
|
||||
.irq_unmask = dp264_enable_irq,
|
||||
.irq_mask = dp264_disable_irq,
|
||||
.irq_mask_ack = dp264_disable_irq,
|
||||
.irq_set_affinity = dp264_set_affinity,
|
||||
};
|
||||
|
||||
static struct irq_chip clipper_irq_type = {
|
||||
.name = "CLIPPER",
|
||||
.unmask = clipper_enable_irq,
|
||||
.mask = clipper_disable_irq,
|
||||
.mask_ack = clipper_disable_irq,
|
||||
.set_affinity = clipper_set_affinity,
|
||||
.name = "CLIPPER",
|
||||
.irq_unmask = clipper_enable_irq,
|
||||
.irq_mask = clipper_disable_irq,
|
||||
.irq_mask_ack = clipper_disable_irq,
|
||||
.irq_set_affinity = clipper_set_affinity,
|
||||
};
|
||||
|
||||
static void
|
||||
|
@ -268,8 +270,8 @@ init_tsunami_irqs(struct irq_chip * ops, int imin, int imax)
|
|||
{
|
||||
long i;
|
||||
for (i = imin; i <= imax; ++i) {
|
||||
irq_to_desc(i)->status |= IRQ_LEVEL;
|
||||
set_irq_chip_and_handler(i, ops, handle_level_irq);
|
||||
irq_set_status_flags(i, IRQ_LEVEL);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -44,22 +44,22 @@ eb64p_update_irq_hw(unsigned int irq, unsigned long mask)
|
|||
}
|
||||
|
||||
static inline void
|
||||
eb64p_enable_irq(unsigned int irq)
|
||||
eb64p_enable_irq(struct irq_data *d)
|
||||
{
|
||||
eb64p_update_irq_hw(irq, cached_irq_mask &= ~(1 << irq));
|
||||
eb64p_update_irq_hw(d->irq, cached_irq_mask &= ~(1 << d->irq));
|
||||
}
|
||||
|
||||
static void
|
||||
eb64p_disable_irq(unsigned int irq)
|
||||
eb64p_disable_irq(struct irq_data *d)
|
||||
{
|
||||
eb64p_update_irq_hw(irq, cached_irq_mask |= 1 << irq);
|
||||
eb64p_update_irq_hw(d->irq, cached_irq_mask |= 1 << d->irq);
|
||||
}
|
||||
|
||||
static struct irq_chip eb64p_irq_type = {
|
||||
.name = "EB64P",
|
||||
.unmask = eb64p_enable_irq,
|
||||
.mask = eb64p_disable_irq,
|
||||
.mask_ack = eb64p_disable_irq,
|
||||
.irq_unmask = eb64p_enable_irq,
|
||||
.irq_mask = eb64p_disable_irq,
|
||||
.irq_mask_ack = eb64p_disable_irq,
|
||||
};
|
||||
|
||||
static void
|
||||
|
@ -118,9 +118,9 @@ eb64p_init_irq(void)
|
|||
init_i8259a_irqs();
|
||||
|
||||
for (i = 16; i < 32; ++i) {
|
||||
irq_to_desc(i)->status |= IRQ_LEVEL;
|
||||
set_irq_chip_and_handler(i, &eb64p_irq_type, handle_level_irq);
|
||||
}
|
||||
irq_set_status_flags(i, IRQ_LEVEL);
|
||||
}
|
||||
|
||||
common_init_isa_dma();
|
||||
setup_irq(16+5, &isa_cascade_irqaction);
|
||||
|
|
|
@ -51,16 +51,18 @@ eiger_update_irq_hw(unsigned long irq, unsigned long mask)
|
|||
}
|
||||
|
||||
static inline void
|
||||
eiger_enable_irq(unsigned int irq)
|
||||
eiger_enable_irq(struct irq_data *d)
|
||||
{
|
||||
unsigned int irq = d->irq;
|
||||
unsigned long mask;
|
||||
mask = (cached_irq_mask[irq >= 64] &= ~(1UL << (irq & 63)));
|
||||
eiger_update_irq_hw(irq, mask);
|
||||
}
|
||||
|
||||
static void
|
||||
eiger_disable_irq(unsigned int irq)
|
||||
eiger_disable_irq(struct irq_data *d)
|
||||
{
|
||||
unsigned int irq = d->irq;
|
||||
unsigned long mask;
|
||||
mask = (cached_irq_mask[irq >= 64] |= 1UL << (irq & 63));
|
||||
eiger_update_irq_hw(irq, mask);
|
||||
|
@ -68,9 +70,9 @@ eiger_disable_irq(unsigned int irq)
|
|||
|
||||
static struct irq_chip eiger_irq_type = {
|
||||
.name = "EIGER",
|
||||
.unmask = eiger_enable_irq,
|
||||
.mask = eiger_disable_irq,
|
||||
.mask_ack = eiger_disable_irq,
|
||||
.irq_unmask = eiger_enable_irq,
|
||||
.irq_mask = eiger_disable_irq,
|
||||
.irq_mask_ack = eiger_disable_irq,
|
||||
};
|
||||
|
||||
static void
|
||||
|
@ -136,8 +138,8 @@ eiger_init_irq(void)
|
|||
init_i8259a_irqs();
|
||||
|
||||
for (i = 16; i < 128; ++i) {
|
||||
irq_to_desc(i)->status |= IRQ_LEVEL;
|
||||
set_irq_chip_and_handler(i, &eiger_irq_type, handle_level_irq);
|
||||
irq_set_status_flags(i, IRQ_LEVEL);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -63,34 +63,34 @@
|
|||
*/
|
||||
|
||||
static void
|
||||
jensen_local_enable(unsigned int irq)
|
||||
jensen_local_enable(struct irq_data *d)
|
||||
{
|
||||
/* the parport is really hw IRQ 1, silly Jensen. */
|
||||
if (irq == 7)
|
||||
i8259a_enable_irq(1);
|
||||
if (d->irq == 7)
|
||||
i8259a_enable_irq(d);
|
||||
}
|
||||
|
||||
static void
|
||||
jensen_local_disable(unsigned int irq)
|
||||
jensen_local_disable(struct irq_data *d)
|
||||
{
|
||||
/* the parport is really hw IRQ 1, silly Jensen. */
|
||||
if (irq == 7)
|
||||
i8259a_disable_irq(1);
|
||||
if (d->irq == 7)
|
||||
i8259a_disable_irq(d);
|
||||
}
|
||||
|
||||
static void
|
||||
jensen_local_mask_ack(unsigned int irq)
|
||||
jensen_local_mask_ack(struct irq_data *d)
|
||||
{
|
||||
/* the parport is really hw IRQ 1, silly Jensen. */
|
||||
if (irq == 7)
|
||||
i8259a_mask_and_ack_irq(1);
|
||||
if (d->irq == 7)
|
||||
i8259a_mask_and_ack_irq(d);
|
||||
}
|
||||
|
||||
static struct irq_chip jensen_local_irq_type = {
|
||||
.name = "LOCAL",
|
||||
.unmask = jensen_local_enable,
|
||||
.mask = jensen_local_disable,
|
||||
.mask_ack = jensen_local_mask_ack,
|
||||
.irq_unmask = jensen_local_enable,
|
||||
.irq_mask = jensen_local_disable,
|
||||
.irq_mask_ack = jensen_local_mask_ack,
|
||||
};
|
||||
|
||||
static void
|
||||
|
|
|
@ -104,9 +104,10 @@ io7_get_irq_ctl(unsigned int irq, struct io7 **pio7)
|
|||
}
|
||||
|
||||
static void
|
||||
io7_enable_irq(unsigned int irq)
|
||||
io7_enable_irq(struct irq_data *d)
|
||||
{
|
||||
volatile unsigned long *ctl;
|
||||
unsigned int irq = d->irq;
|
||||
struct io7 *io7;
|
||||
|
||||
ctl = io7_get_irq_ctl(irq, &io7);
|
||||
|
@ -115,7 +116,7 @@ io7_enable_irq(unsigned int irq)
|
|||
__func__, irq);
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
spin_lock(&io7->irq_lock);
|
||||
*ctl |= 1UL << 24;
|
||||
mb();
|
||||
|
@ -124,9 +125,10 @@ io7_enable_irq(unsigned int irq)
|
|||
}
|
||||
|
||||
static void
|
||||
io7_disable_irq(unsigned int irq)
|
||||
io7_disable_irq(struct irq_data *d)
|
||||
{
|
||||
volatile unsigned long *ctl;
|
||||
unsigned int irq = d->irq;
|
||||
struct io7 *io7;
|
||||
|
||||
ctl = io7_get_irq_ctl(irq, &io7);
|
||||
|
@ -135,7 +137,7 @@ io7_disable_irq(unsigned int irq)
|
|||
__func__, irq);
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
spin_lock(&io7->irq_lock);
|
||||
*ctl &= ~(1UL << 24);
|
||||
mb();
|
||||
|
@ -144,35 +146,29 @@ io7_disable_irq(unsigned int irq)
|
|||
}
|
||||
|
||||
static void
|
||||
marvel_irq_noop(unsigned int irq)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
static unsigned int
|
||||
marvel_irq_noop_return(unsigned int irq)
|
||||
{
|
||||
return 0;
|
||||
marvel_irq_noop(struct irq_data *d)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
static struct irq_chip marvel_legacy_irq_type = {
|
||||
.name = "LEGACY",
|
||||
.mask = marvel_irq_noop,
|
||||
.unmask = marvel_irq_noop,
|
||||
.irq_mask = marvel_irq_noop,
|
||||
.irq_unmask = marvel_irq_noop,
|
||||
};
|
||||
|
||||
static struct irq_chip io7_lsi_irq_type = {
|
||||
.name = "LSI",
|
||||
.unmask = io7_enable_irq,
|
||||
.mask = io7_disable_irq,
|
||||
.mask_ack = io7_disable_irq,
|
||||
.irq_unmask = io7_enable_irq,
|
||||
.irq_mask = io7_disable_irq,
|
||||
.irq_mask_ack = io7_disable_irq,
|
||||
};
|
||||
|
||||
static struct irq_chip io7_msi_irq_type = {
|
||||
.name = "MSI",
|
||||
.unmask = io7_enable_irq,
|
||||
.mask = io7_disable_irq,
|
||||
.ack = marvel_irq_noop,
|
||||
.irq_unmask = io7_enable_irq,
|
||||
.irq_mask = io7_disable_irq,
|
||||
.irq_ack = marvel_irq_noop,
|
||||
};
|
||||
|
||||
static void
|
||||
|
@ -280,8 +276,8 @@ init_io7_irqs(struct io7 *io7,
|
|||
|
||||
/* Set up the lsi irqs. */
|
||||
for (i = 0; i < 128; ++i) {
|
||||
irq_to_desc(base + i)->status |= IRQ_LEVEL;
|
||||
set_irq_chip_and_handler(base + i, lsi_ops, handle_level_irq);
|
||||
irq_set_status_flags(i, IRQ_LEVEL);
|
||||
}
|
||||
|
||||
/* Disable the implemented irqs in hardware. */
|
||||
|
@ -294,8 +290,8 @@ init_io7_irqs(struct io7 *io7,
|
|||
|
||||
/* Set up the msi irqs. */
|
||||
for (i = 128; i < (128 + 512); ++i) {
|
||||
irq_to_desc(base + i)->status |= IRQ_LEVEL;
|
||||
set_irq_chip_and_handler(base + i, msi_ops, handle_level_irq);
|
||||
irq_set_status_flags(i, IRQ_LEVEL);
|
||||
}
|
||||
|
||||
for (i = 0; i < 16; ++i)
|
||||
|
|
|
@ -43,22 +43,22 @@ mikasa_update_irq_hw(int mask)
|
|||
}
|
||||
|
||||
static inline void
|
||||
mikasa_enable_irq(unsigned int irq)
|
||||
mikasa_enable_irq(struct irq_data *d)
|
||||
{
|
||||
mikasa_update_irq_hw(cached_irq_mask |= 1 << (irq - 16));
|
||||
mikasa_update_irq_hw(cached_irq_mask |= 1 << (d->irq - 16));
|
||||
}
|
||||
|
||||
static void
|
||||
mikasa_disable_irq(unsigned int irq)
|
||||
mikasa_disable_irq(struct irq_data *d)
|
||||
{
|
||||
mikasa_update_irq_hw(cached_irq_mask &= ~(1 << (irq - 16)));
|
||||
mikasa_update_irq_hw(cached_irq_mask &= ~(1 << (d->irq - 16)));
|
||||
}
|
||||
|
||||
static struct irq_chip mikasa_irq_type = {
|
||||
.name = "MIKASA",
|
||||
.unmask = mikasa_enable_irq,
|
||||
.mask = mikasa_disable_irq,
|
||||
.mask_ack = mikasa_disable_irq,
|
||||
.irq_unmask = mikasa_enable_irq,
|
||||
.irq_mask = mikasa_disable_irq,
|
||||
.irq_mask_ack = mikasa_disable_irq,
|
||||
};
|
||||
|
||||
static void
|
||||
|
@ -98,8 +98,8 @@ mikasa_init_irq(void)
|
|||
mikasa_update_irq_hw(0);
|
||||
|
||||
for (i = 16; i < 32; ++i) {
|
||||
irq_to_desc(i)->status |= IRQ_LEVEL;
|
||||
set_irq_chip_and_handler(i, &mikasa_irq_type, handle_level_irq);
|
||||
irq_set_status_flags(i, IRQ_LEVEL);
|
||||
}
|
||||
|
||||
init_i8259a_irqs();
|
||||
|
|
|
@ -48,22 +48,22 @@ noritake_update_irq_hw(int irq, int mask)
|
|||
}
|
||||
|
||||
static void
|
||||
noritake_enable_irq(unsigned int irq)
|
||||
noritake_enable_irq(struct irq_data *d)
|
||||
{
|
||||
noritake_update_irq_hw(irq, cached_irq_mask |= 1 << (irq - 16));
|
||||
noritake_update_irq_hw(d->irq, cached_irq_mask |= 1 << (d->irq - 16));
|
||||
}
|
||||
|
||||
static void
|
||||
noritake_disable_irq(unsigned int irq)
|
||||
noritake_disable_irq(struct irq_data *d)
|
||||
{
|
||||
noritake_update_irq_hw(irq, cached_irq_mask &= ~(1 << (irq - 16)));
|
||||
noritake_update_irq_hw(d->irq, cached_irq_mask &= ~(1 << (d->irq - 16)));
|
||||
}
|
||||
|
||||
static struct irq_chip noritake_irq_type = {
|
||||
.name = "NORITAKE",
|
||||
.unmask = noritake_enable_irq,
|
||||
.mask = noritake_disable_irq,
|
||||
.mask_ack = noritake_disable_irq,
|
||||
.irq_unmask = noritake_enable_irq,
|
||||
.irq_mask = noritake_disable_irq,
|
||||
.irq_mask_ack = noritake_disable_irq,
|
||||
};
|
||||
|
||||
static void
|
||||
|
@ -127,8 +127,8 @@ noritake_init_irq(void)
|
|||
outw(0, 0x54c);
|
||||
|
||||
for (i = 16; i < 48; ++i) {
|
||||
irq_to_desc(i)->status |= IRQ_LEVEL;
|
||||
set_irq_chip_and_handler(i, &noritake_irq_type, handle_level_irq);
|
||||
irq_set_status_flags(i, IRQ_LEVEL);
|
||||
}
|
||||
|
||||
init_i8259a_irqs();
|
||||
|
|
|
@ -56,9 +56,10 @@ rawhide_update_irq_hw(int hose, int mask)
|
|||
(((h) < MCPCIA_MAX_HOSES) && (cached_irq_masks[(h)] != 0))
|
||||
|
||||
static inline void
|
||||
rawhide_enable_irq(unsigned int irq)
|
||||
rawhide_enable_irq(struct irq_data *d)
|
||||
{
|
||||
unsigned int mask, hose;
|
||||
unsigned int irq = d->irq;
|
||||
|
||||
irq -= 16;
|
||||
hose = irq / 24;
|
||||
|
@ -76,9 +77,10 @@ rawhide_enable_irq(unsigned int irq)
|
|||
}
|
||||
|
||||
static void
|
||||
rawhide_disable_irq(unsigned int irq)
|
||||
rawhide_disable_irq(struct irq_data *d)
|
||||
{
|
||||
unsigned int mask, hose;
|
||||
unsigned int irq = d->irq;
|
||||
|
||||
irq -= 16;
|
||||
hose = irq / 24;
|
||||
|
@ -96,9 +98,10 @@ rawhide_disable_irq(unsigned int irq)
|
|||
}
|
||||
|
||||
static void
|
||||
rawhide_mask_and_ack_irq(unsigned int irq)
|
||||
rawhide_mask_and_ack_irq(struct irq_data *d)
|
||||
{
|
||||
unsigned int mask, mask1, hose;
|
||||
unsigned int irq = d->irq;
|
||||
|
||||
irq -= 16;
|
||||
hose = irq / 24;
|
||||
|
@ -123,9 +126,9 @@ rawhide_mask_and_ack_irq(unsigned int irq)
|
|||
|
||||
static struct irq_chip rawhide_irq_type = {
|
||||
.name = "RAWHIDE",
|
||||
.unmask = rawhide_enable_irq,
|
||||
.mask = rawhide_disable_irq,
|
||||
.mask_ack = rawhide_mask_and_ack_irq,
|
||||
.irq_unmask = rawhide_enable_irq,
|
||||
.irq_mask = rawhide_disable_irq,
|
||||
.irq_mask_ack = rawhide_mask_and_ack_irq,
|
||||
};
|
||||
|
||||
static void
|
||||
|
@ -177,8 +180,8 @@ rawhide_init_irq(void)
|
|||
}
|
||||
|
||||
for (i = 16; i < 128; ++i) {
|
||||
irq_to_desc(i)->status |= IRQ_LEVEL;
|
||||
set_irq_chip_and_handler(i, &rawhide_irq_type, handle_level_irq);
|
||||
irq_set_status_flags(i, IRQ_LEVEL);
|
||||
}
|
||||
|
||||
init_i8259a_irqs();
|
||||
|
|
|
@ -47,22 +47,22 @@ rx164_update_irq_hw(unsigned long mask)
|
|||
}
|
||||
|
||||
static inline void
|
||||
rx164_enable_irq(unsigned int irq)
|
||||
rx164_enable_irq(struct irq_data *d)
|
||||
{
|
||||
rx164_update_irq_hw(cached_irq_mask |= 1UL << (irq - 16));
|
||||
rx164_update_irq_hw(cached_irq_mask |= 1UL << (d->irq - 16));
|
||||
}
|
||||
|
||||
static void
|
||||
rx164_disable_irq(unsigned int irq)
|
||||
rx164_disable_irq(struct irq_data *d)
|
||||
{
|
||||
rx164_update_irq_hw(cached_irq_mask &= ~(1UL << (irq - 16)));
|
||||
rx164_update_irq_hw(cached_irq_mask &= ~(1UL << (d->irq - 16)));
|
||||
}
|
||||
|
||||
static struct irq_chip rx164_irq_type = {
|
||||
.name = "RX164",
|
||||
.unmask = rx164_enable_irq,
|
||||
.mask = rx164_disable_irq,
|
||||
.mask_ack = rx164_disable_irq,
|
||||
.irq_unmask = rx164_enable_irq,
|
||||
.irq_mask = rx164_disable_irq,
|
||||
.irq_mask_ack = rx164_disable_irq,
|
||||
};
|
||||
|
||||
static void
|
||||
|
@ -99,8 +99,8 @@ rx164_init_irq(void)
|
|||
|
||||
rx164_update_irq_hw(0);
|
||||
for (i = 16; i < 40; ++i) {
|
||||
irq_to_desc(i)->status |= IRQ_LEVEL;
|
||||
set_irq_chip_and_handler(i, &rx164_irq_type, handle_level_irq);
|
||||
irq_set_status_flags(i, IRQ_LEVEL);
|
||||
}
|
||||
|
||||
init_i8259a_irqs();
|
||||
|
|
|
@ -443,11 +443,11 @@ lynx_swizzle(struct pci_dev *dev, u8 *pinp)
|
|||
/* GENERIC irq routines */
|
||||
|
||||
static inline void
|
||||
sable_lynx_enable_irq(unsigned int irq)
|
||||
sable_lynx_enable_irq(struct irq_data *d)
|
||||
{
|
||||
unsigned long bit, mask;
|
||||
|
||||
bit = sable_lynx_irq_swizzle->irq_to_mask[irq];
|
||||
bit = sable_lynx_irq_swizzle->irq_to_mask[d->irq];
|
||||
spin_lock(&sable_lynx_irq_lock);
|
||||
mask = sable_lynx_irq_swizzle->shadow_mask &= ~(1UL << bit);
|
||||
sable_lynx_irq_swizzle->update_irq_hw(bit, mask);
|
||||
|
@ -459,11 +459,11 @@ sable_lynx_enable_irq(unsigned int irq)
|
|||
}
|
||||
|
||||
static void
|
||||
sable_lynx_disable_irq(unsigned int irq)
|
||||
sable_lynx_disable_irq(struct irq_data *d)
|
||||
{
|
||||
unsigned long bit, mask;
|
||||
|
||||
bit = sable_lynx_irq_swizzle->irq_to_mask[irq];
|
||||
bit = sable_lynx_irq_swizzle->irq_to_mask[d->irq];
|
||||
spin_lock(&sable_lynx_irq_lock);
|
||||
mask = sable_lynx_irq_swizzle->shadow_mask |= 1UL << bit;
|
||||
sable_lynx_irq_swizzle->update_irq_hw(bit, mask);
|
||||
|
@ -475,11 +475,11 @@ sable_lynx_disable_irq(unsigned int irq)
|
|||
}
|
||||
|
||||
static void
|
||||
sable_lynx_mask_and_ack_irq(unsigned int irq)
|
||||
sable_lynx_mask_and_ack_irq(struct irq_data *d)
|
||||
{
|
||||
unsigned long bit, mask;
|
||||
|
||||
bit = sable_lynx_irq_swizzle->irq_to_mask[irq];
|
||||
bit = sable_lynx_irq_swizzle->irq_to_mask[d->irq];
|
||||
spin_lock(&sable_lynx_irq_lock);
|
||||
mask = sable_lynx_irq_swizzle->shadow_mask |= 1UL << bit;
|
||||
sable_lynx_irq_swizzle->update_irq_hw(bit, mask);
|
||||
|
@ -489,9 +489,9 @@ sable_lynx_mask_and_ack_irq(unsigned int irq)
|
|||
|
||||
static struct irq_chip sable_lynx_irq_type = {
|
||||
.name = "SABLE/LYNX",
|
||||
.unmask = sable_lynx_enable_irq,
|
||||
.mask = sable_lynx_disable_irq,
|
||||
.mask_ack = sable_lynx_mask_and_ack_irq,
|
||||
.irq_unmask = sable_lynx_enable_irq,
|
||||
.irq_mask = sable_lynx_disable_irq,
|
||||
.irq_mask_ack = sable_lynx_mask_and_ack_irq,
|
||||
};
|
||||
|
||||
static void
|
||||
|
@ -518,9 +518,9 @@ sable_lynx_init_irq(int nr_of_irqs)
|
|||
long i;
|
||||
|
||||
for (i = 0; i < nr_of_irqs; ++i) {
|
||||
irq_to_desc(i)->status |= IRQ_LEVEL;
|
||||
set_irq_chip_and_handler(i, &sable_lynx_irq_type,
|
||||
handle_level_irq);
|
||||
irq_set_status_flags(i, IRQ_LEVEL);
|
||||
}
|
||||
|
||||
common_init_isa_dma();
|
||||
|
|
|
@ -45,16 +45,18 @@ takara_update_irq_hw(unsigned long irq, unsigned long mask)
|
|||
}
|
||||
|
||||
static inline void
|
||||
takara_enable_irq(unsigned int irq)
|
||||
takara_enable_irq(struct irq_data *d)
|
||||
{
|
||||
unsigned int irq = d->irq;
|
||||
unsigned long mask;
|
||||
mask = (cached_irq_mask[irq >= 64] &= ~(1UL << (irq & 63)));
|
||||
takara_update_irq_hw(irq, mask);
|
||||
}
|
||||
|
||||
static void
|
||||
takara_disable_irq(unsigned int irq)
|
||||
takara_disable_irq(struct irq_data *d)
|
||||
{
|
||||
unsigned int irq = d->irq;
|
||||
unsigned long mask;
|
||||
mask = (cached_irq_mask[irq >= 64] |= 1UL << (irq & 63));
|
||||
takara_update_irq_hw(irq, mask);
|
||||
|
@ -62,9 +64,9 @@ takara_disable_irq(unsigned int irq)
|
|||
|
||||
static struct irq_chip takara_irq_type = {
|
||||
.name = "TAKARA",
|
||||
.unmask = takara_enable_irq,
|
||||
.mask = takara_disable_irq,
|
||||
.mask_ack = takara_disable_irq,
|
||||
.irq_unmask = takara_enable_irq,
|
||||
.irq_mask = takara_disable_irq,
|
||||
.irq_mask_ack = takara_disable_irq,
|
||||
};
|
||||
|
||||
static void
|
||||
|
@ -136,8 +138,8 @@ takara_init_irq(void)
|
|||
takara_update_irq_hw(i, -1);
|
||||
|
||||
for (i = 16; i < 128; ++i) {
|
||||
irq_to_desc(i)->status |= IRQ_LEVEL;
|
||||
set_irq_chip_and_handler(i, &takara_irq_type, handle_level_irq);
|
||||
irq_set_status_flags(i, IRQ_LEVEL);
|
||||
}
|
||||
|
||||
common_init_isa_dma();
|
||||
|
|
|
@ -112,8 +112,9 @@ titan_update_irq_hw(unsigned long mask)
|
|||
}
|
||||
|
||||
static inline void
|
||||
titan_enable_irq(unsigned int irq)
|
||||
titan_enable_irq(struct irq_data *d)
|
||||
{
|
||||
unsigned int irq = d->irq;
|
||||
spin_lock(&titan_irq_lock);
|
||||
titan_cached_irq_mask |= 1UL << (irq - 16);
|
||||
titan_update_irq_hw(titan_cached_irq_mask);
|
||||
|
@ -121,8 +122,9 @@ titan_enable_irq(unsigned int irq)
|
|||
}
|
||||
|
||||
static inline void
|
||||
titan_disable_irq(unsigned int irq)
|
||||
titan_disable_irq(struct irq_data *d)
|
||||
{
|
||||
unsigned int irq = d->irq;
|
||||
spin_lock(&titan_irq_lock);
|
||||
titan_cached_irq_mask &= ~(1UL << (irq - 16));
|
||||
titan_update_irq_hw(titan_cached_irq_mask);
|
||||
|
@ -144,7 +146,8 @@ titan_cpu_set_irq_affinity(unsigned int irq, cpumask_t affinity)
|
|||
}
|
||||
|
||||
static int
|
||||
titan_set_irq_affinity(unsigned int irq, const struct cpumask *affinity)
|
||||
titan_set_irq_affinity(struct irq_data *d, const struct cpumask *affinity,
|
||||
bool force)
|
||||
{
|
||||
spin_lock(&titan_irq_lock);
|
||||
titan_cpu_set_irq_affinity(irq - 16, *affinity);
|
||||
|
@ -175,17 +178,17 @@ init_titan_irqs(struct irq_chip * ops, int imin, int imax)
|
|||
{
|
||||
long i;
|
||||
for (i = imin; i <= imax; ++i) {
|
||||
irq_to_desc(i)->status |= IRQ_LEVEL;
|
||||
set_irq_chip_and_handler(i, ops, handle_level_irq);
|
||||
irq_set_status_flags(i, IRQ_LEVEL);
|
||||
}
|
||||
}
|
||||
|
||||
static struct irq_chip titan_irq_type = {
|
||||
.name = "TITAN",
|
||||
.unmask = titan_enable_irq,
|
||||
.mask = titan_disable_irq,
|
||||
.mask_ack = titan_disable_irq,
|
||||
.set_affinity = titan_set_irq_affinity,
|
||||
.name = "TITAN",
|
||||
.irq_unmask = titan_enable_irq,
|
||||
.irq_mask = titan_disable_irq,
|
||||
.irq_mask_ack = titan_disable_irq,
|
||||
.irq_set_affinity = titan_set_irq_affinity,
|
||||
};
|
||||
|
||||
static irqreturn_t
|
||||
|
|
|
@ -104,10 +104,12 @@ wildfire_init_irq_hw(void)
|
|||
}
|
||||
|
||||
static void
|
||||
wildfire_enable_irq(unsigned int irq)
|
||||
wildfire_enable_irq(struct irq_data *d)
|
||||
{
|
||||
unsigned int irq = d->irq;
|
||||
|
||||
if (irq < 16)
|
||||
i8259a_enable_irq(irq);
|
||||
i8259a_enable_irq(d);
|
||||
|
||||
spin_lock(&wildfire_irq_lock);
|
||||
set_bit(irq, &cached_irq_mask);
|
||||
|
@ -116,10 +118,12 @@ wildfire_enable_irq(unsigned int irq)
|
|||
}
|
||||
|
||||
static void
|
||||
wildfire_disable_irq(unsigned int irq)
|
||||
wildfire_disable_irq(struct irq_data *d)
|
||||
{
|
||||
unsigned int irq = d->irq;
|
||||
|
||||
if (irq < 16)
|
||||
i8259a_disable_irq(irq);
|
||||
i8259a_disable_irq(d);
|
||||
|
||||
spin_lock(&wildfire_irq_lock);
|
||||
clear_bit(irq, &cached_irq_mask);
|
||||
|
@ -128,10 +132,12 @@ wildfire_disable_irq(unsigned int irq)
|
|||
}
|
||||
|
||||
static void
|
||||
wildfire_mask_and_ack_irq(unsigned int irq)
|
||||
wildfire_mask_and_ack_irq(struct irq_data *d)
|
||||
{
|
||||
unsigned int irq = d->irq;
|
||||
|
||||
if (irq < 16)
|
||||
i8259a_mask_and_ack_irq(irq);
|
||||
i8259a_mask_and_ack_irq(d);
|
||||
|
||||
spin_lock(&wildfire_irq_lock);
|
||||
clear_bit(irq, &cached_irq_mask);
|
||||
|
@ -141,9 +147,9 @@ wildfire_mask_and_ack_irq(unsigned int irq)
|
|||
|
||||
static struct irq_chip wildfire_irq_type = {
|
||||
.name = "WILDFIRE",
|
||||
.unmask = wildfire_enable_irq,
|
||||
.mask = wildfire_disable_irq,
|
||||
.mask_ack = wildfire_mask_and_ack_irq,
|
||||
.irq_unmask = wildfire_enable_irq,
|
||||
.irq_mask = wildfire_disable_irq,
|
||||
.irq_mask_ack = wildfire_mask_and_ack_irq,
|
||||
};
|
||||
|
||||
static void __init
|
||||
|
@ -177,21 +183,21 @@ wildfire_init_irq_per_pca(int qbbno, int pcano)
|
|||
for (i = 0; i < 16; ++i) {
|
||||
if (i == 2)
|
||||
continue;
|
||||
irq_to_desc(i+irq_bias)->status |= IRQ_LEVEL;
|
||||
set_irq_chip_and_handler(i+irq_bias, &wildfire_irq_type,
|
||||
handle_level_irq);
|
||||
irq_set_status_flags(i + irq_bias, IRQ_LEVEL);
|
||||
}
|
||||
|
||||
irq_to_desc(36+irq_bias)->status |= IRQ_LEVEL;
|
||||
set_irq_chip_and_handler(36+irq_bias, &wildfire_irq_type,
|
||||
handle_level_irq);
|
||||
irq_set_status_flags(36 + irq_bias, IRQ_LEVEL);
|
||||
for (i = 40; i < 64; ++i) {
|
||||
irq_to_desc(i+irq_bias)->status |= IRQ_LEVEL;
|
||||
set_irq_chip_and_handler(i+irq_bias, &wildfire_irq_type,
|
||||
handle_level_irq);
|
||||
irq_set_status_flags(i + irq_bias, IRQ_LEVEL);
|
||||
}
|
||||
|
||||
setup_irq(32+irq_bias, &isa_enable);
|
||||
setup_irq(32+irq_bias, &isa_enable);
|
||||
}
|
||||
|
||||
static void __init
|
||||
|
|
|
@ -77,7 +77,7 @@ static int _dpll_test_fint(struct clk *clk, u8 n)
|
|||
dd = clk->dpll_data;
|
||||
|
||||
/* DPLL divider must result in a valid jitter correction val */
|
||||
fint = clk->parent->rate / (n + 1);
|
||||
fint = clk->parent->rate / n;
|
||||
if (fint < DPLL_FINT_BAND1_MIN) {
|
||||
|
||||
pr_debug("rejecting n=%d due to Fint failure, "
|
||||
|
|
|
@ -334,7 +334,7 @@ static struct omap_mbox mbox_iva_info = {
|
|||
.priv = &omap2_mbox_iva_priv,
|
||||
};
|
||||
|
||||
struct omap_mbox *omap2_mboxes[] = { &mbox_iva_info, &mbox_dsp_info, NULL };
|
||||
struct omap_mbox *omap2_mboxes[] = { &mbox_dsp_info, &mbox_iva_info, NULL };
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_ARCH_OMAP4)
|
||||
|
|
|
@ -605,7 +605,7 @@ static void __init omap_mux_dbg_create_entry(
|
|||
list_for_each_entry(e, &partition->muxmodes, node) {
|
||||
struct omap_mux *m = &e->mux;
|
||||
|
||||
(void)debugfs_create_file(m->muxnames[0], S_IWUGO, mux_dbg_dir,
|
||||
(void)debugfs_create_file(m->muxnames[0], S_IWUSR, mux_dbg_dir,
|
||||
m, &omap_mux_dbg_signal_fops);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -637,14 +637,14 @@ static int __init pm_dbg_init(void)
|
|||
|
||||
}
|
||||
|
||||
(void) debugfs_create_file("enable_off_mode", S_IRUGO | S_IWUGO, d,
|
||||
(void) debugfs_create_file("enable_off_mode", S_IRUGO | S_IWUSR, d,
|
||||
&enable_off_mode, &pm_dbg_option_fops);
|
||||
(void) debugfs_create_file("sleep_while_idle", S_IRUGO | S_IWUGO, d,
|
||||
(void) debugfs_create_file("sleep_while_idle", S_IRUGO | S_IWUSR, d,
|
||||
&sleep_while_idle, &pm_dbg_option_fops);
|
||||
(void) debugfs_create_file("wakeup_timer_seconds", S_IRUGO | S_IWUGO, d,
|
||||
(void) debugfs_create_file("wakeup_timer_seconds", S_IRUGO | S_IWUSR, d,
|
||||
&wakeup_timer_seconds, &pm_dbg_option_fops);
|
||||
(void) debugfs_create_file("wakeup_timer_milliseconds",
|
||||
S_IRUGO | S_IWUGO, d, &wakeup_timer_milliseconds,
|
||||
S_IRUGO | S_IWUSR, d, &wakeup_timer_milliseconds,
|
||||
&pm_dbg_option_fops);
|
||||
pm_dbg_init_done = 1;
|
||||
|
||||
|
|
|
@ -38,8 +38,8 @@
|
|||
#define OMAP4430_PRCM_MPU_CPU1_INST 0x0800
|
||||
|
||||
/* PRCM_MPU clockdomain register offsets (from instance start) */
|
||||
#define OMAP4430_PRCM_MPU_CPU0_MPU_CDOFFS 0x0000
|
||||
#define OMAP4430_PRCM_MPU_CPU1_MPU_CDOFFS 0x0000
|
||||
#define OMAP4430_PRCM_MPU_CPU0_MPU_CDOFFS 0x0018
|
||||
#define OMAP4430_PRCM_MPU_CPU1_MPU_CDOFFS 0x0018
|
||||
|
||||
|
||||
/*
|
||||
|
|
|
@ -900,7 +900,7 @@ static int __init omap_sr_probe(struct platform_device *pdev)
|
|||
return PTR_ERR(dbg_dir);
|
||||
}
|
||||
|
||||
(void) debugfs_create_file("autocomp", S_IRUGO | S_IWUGO, dbg_dir,
|
||||
(void) debugfs_create_file("autocomp", S_IRUGO | S_IWUSR, dbg_dir,
|
||||
(void *)sr_info, &pm_sr_fops);
|
||||
(void) debugfs_create_x32("errweight", S_IRUGO, dbg_dir,
|
||||
&sr_info->err_weight);
|
||||
|
@ -939,7 +939,7 @@ static int __init omap_sr_probe(struct platform_device *pdev)
|
|||
strcpy(name, "volt_");
|
||||
sprintf(volt_name, "%d", volt_data[i].volt_nominal);
|
||||
strcat(name, volt_name);
|
||||
(void) debugfs_create_x32(name, S_IRUGO | S_IWUGO, nvalue_dir,
|
||||
(void) debugfs_create_x32(name, S_IRUGO | S_IWUSR, nvalue_dir,
|
||||
&(sr_info->nvalue_table[i].nvalue));
|
||||
}
|
||||
|
||||
|
|
|
@ -39,6 +39,7 @@
|
|||
#include <asm/mach/time.h>
|
||||
#include <plat/dmtimer.h>
|
||||
#include <asm/localtimer.h>
|
||||
#include <asm/sched_clock.h>
|
||||
|
||||
#include "timer-gp.h"
|
||||
|
||||
|
@ -190,6 +191,7 @@ static void __init omap2_gp_clocksource_init(void)
|
|||
/*
|
||||
* clocksource
|
||||
*/
|
||||
static DEFINE_CLOCK_DATA(cd);
|
||||
static struct omap_dm_timer *gpt_clocksource;
|
||||
static cycle_t clocksource_read_cycles(struct clocksource *cs)
|
||||
{
|
||||
|
@ -204,6 +206,15 @@ static struct clocksource clocksource_gpt = {
|
|||
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
|
||||
};
|
||||
|
||||
static void notrace dmtimer_update_sched_clock(void)
|
||||
{
|
||||
u32 cyc;
|
||||
|
||||
cyc = omap_dm_timer_read_counter(gpt_clocksource);
|
||||
|
||||
update_sched_clock(&cd, cyc, (u32)~0);
|
||||
}
|
||||
|
||||
/* Setup free-running counter for clocksource */
|
||||
static void __init omap2_gp_clocksource_init(void)
|
||||
{
|
||||
|
@ -224,6 +235,8 @@ static void __init omap2_gp_clocksource_init(void)
|
|||
|
||||
omap_dm_timer_set_load_start(gpt, 1, 0);
|
||||
|
||||
init_sched_clock(&cd, dmtimer_update_sched_clock, 32, tick_rate);
|
||||
|
||||
if (clocksource_register_hz(&clocksource_gpt, tick_rate))
|
||||
printk(err2, clocksource_gpt.name);
|
||||
}
|
||||
|
|
|
@ -57,5 +57,6 @@ struct tegra_kbc_platform_data {
|
|||
const struct matrix_keymap_data *keymap_data;
|
||||
|
||||
bool wakeup;
|
||||
bool use_fn_map;
|
||||
};
|
||||
#endif
|
||||
|
|
|
@ -322,15 +322,18 @@ static void omap_mbox_fini(struct omap_mbox *mbox)
|
|||
|
||||
struct omap_mbox *omap_mbox_get(const char *name, struct notifier_block *nb)
|
||||
{
|
||||
struct omap_mbox *mbox;
|
||||
int ret;
|
||||
struct omap_mbox *_mbox, *mbox = NULL;
|
||||
int i, ret;
|
||||
|
||||
if (!mboxes)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
for (mbox = *mboxes; mbox; mbox++)
|
||||
if (!strcmp(mbox->name, name))
|
||||
for (i = 0; (_mbox = mboxes[i]); i++) {
|
||||
if (!strcmp(_mbox->name, name)) {
|
||||
mbox = _mbox;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!mbox)
|
||||
return ERR_PTR(-ENOENT);
|
||||
|
|
|
@ -72,11 +72,6 @@ SECTIONS
|
|||
INIT_TEXT_SECTION(PAGE_SIZE)
|
||||
.init.data : { INIT_DATA }
|
||||
.init.setup : { INIT_SETUP(16) }
|
||||
#ifdef CONFIG_ETRAX_ARCH_V32
|
||||
__start___param = .;
|
||||
__param : { *(__param) }
|
||||
__stop___param = .;
|
||||
#endif
|
||||
.initcall.init : {
|
||||
INIT_CALLS
|
||||
}
|
||||
|
|
|
@ -240,6 +240,12 @@ struct machdep_calls {
|
|||
* claims to support kexec.
|
||||
*/
|
||||
int (*machine_kexec_prepare)(struct kimage *image);
|
||||
|
||||
/* Called to perform the _real_ kexec.
|
||||
* Do NOT allocate memory or fail here. We are past the point of
|
||||
* no return.
|
||||
*/
|
||||
void (*machine_kexec)(struct kimage *image);
|
||||
#endif /* CONFIG_KEXEC */
|
||||
|
||||
#ifdef CONFIG_SUSPEND
|
||||
|
|
|
@ -87,7 +87,10 @@ void machine_kexec(struct kimage *image)
|
|||
|
||||
save_ftrace_enabled = __ftrace_enabled_save();
|
||||
|
||||
default_machine_kexec(image);
|
||||
if (ppc_md.machine_kexec)
|
||||
ppc_md.machine_kexec(image);
|
||||
else
|
||||
default_machine_kexec(image);
|
||||
|
||||
__ftrace_enabled_restore(save_ftrace_enabled);
|
||||
|
||||
|
|
|
@ -353,6 +353,7 @@ static void switch_booke_debug_regs(struct thread_struct *new_thread)
|
|||
prime_debug_regs(new_thread);
|
||||
}
|
||||
#else /* !CONFIG_PPC_ADV_DEBUG_REGS */
|
||||
#ifndef CONFIG_HAVE_HW_BREAKPOINT
|
||||
static void set_debug_reg_defaults(struct thread_struct *thread)
|
||||
{
|
||||
if (thread->dabr) {
|
||||
|
@ -360,6 +361,7 @@ static void set_debug_reg_defaults(struct thread_struct *thread)
|
|||
set_dabr(0);
|
||||
}
|
||||
}
|
||||
#endif /* !CONFIG_HAVE_HW_BREAKPOINT */
|
||||
#endif /* CONFIG_PPC_ADV_DEBUG_REGS */
|
||||
|
||||
int set_dabr(unsigned long dabr)
|
||||
|
@ -670,11 +672,11 @@ void flush_thread(void)
|
|||
{
|
||||
discard_lazy_cpu_state();
|
||||
|
||||
#ifdef CONFIG_HAVE_HW_BREAKPOINTS
|
||||
#ifdef CONFIG_HAVE_HW_BREAKPOINT
|
||||
flush_ptrace_hw_breakpoint(current);
|
||||
#else /* CONFIG_HAVE_HW_BREAKPOINTS */
|
||||
#else /* CONFIG_HAVE_HW_BREAKPOINT */
|
||||
set_debug_reg_defaults(¤t->thread);
|
||||
#endif /* CONFIG_HAVE_HW_BREAKPOINTS */
|
||||
#endif /* CONFIG_HAVE_HW_BREAKPOINT */
|
||||
}
|
||||
|
||||
void
|
||||
|
|
|
@ -38,13 +38,11 @@ DEFINE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch);
|
|||
* neesd to be flushed. This function will either perform the flush
|
||||
* immediately or will batch it up if the current CPU has an active
|
||||
* batch on it.
|
||||
*
|
||||
* Must be called from within some kind of spinlock/non-preempt region...
|
||||
*/
|
||||
void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
|
||||
pte_t *ptep, unsigned long pte, int huge)
|
||||
{
|
||||
struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);
|
||||
struct ppc64_tlb_batch *batch = &get_cpu_var(ppc64_tlb_batch);
|
||||
unsigned long vsid, vaddr;
|
||||
unsigned int psize;
|
||||
int ssize;
|
||||
|
@ -99,6 +97,7 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
|
|||
*/
|
||||
if (!batch->active) {
|
||||
flush_hash_page(vaddr, rpte, psize, ssize, 0);
|
||||
put_cpu_var(ppc64_tlb_batch);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -127,6 +126,7 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
|
|||
batch->index = ++i;
|
||||
if (i >= PPC64_TLB_BATCH_NR)
|
||||
__flush_tlb_pending(batch);
|
||||
put_cpu_var(ppc64_tlb_batch);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -88,6 +88,7 @@ extern int acpi_disabled;
|
|||
extern int acpi_pci_disabled;
|
||||
extern int acpi_skip_timer_override;
|
||||
extern int acpi_use_timer_override;
|
||||
extern int acpi_fix_pin2_polarity;
|
||||
|
||||
extern u8 acpi_sci_flags;
|
||||
extern int acpi_sci_override_gsi;
|
||||
|
|
|
@ -36,6 +36,11 @@
|
|||
#define MSR_IA32_PERFCTR1 0x000000c2
|
||||
#define MSR_FSB_FREQ 0x000000cd
|
||||
|
||||
#define MSR_NHM_SNB_PKG_CST_CFG_CTL 0x000000e2
|
||||
#define NHM_C3_AUTO_DEMOTE (1UL << 25)
|
||||
#define NHM_C1_AUTO_DEMOTE (1UL << 26)
|
||||
#define ATM_LNC_C6_AUTO_DEMOTE (1UL << 25)
|
||||
|
||||
#define MSR_MTRRcap 0x000000fe
|
||||
#define MSR_IA32_BBL_CR_CTL 0x00000119
|
||||
|
||||
|
|
|
@ -22,6 +22,7 @@
|
|||
|
||||
#define ARCH_P4_CNTRVAL_BITS (40)
|
||||
#define ARCH_P4_CNTRVAL_MASK ((1ULL << ARCH_P4_CNTRVAL_BITS) - 1)
|
||||
#define ARCH_P4_UNFLAGGED_BIT ((1ULL) << (ARCH_P4_CNTRVAL_BITS - 1))
|
||||
|
||||
#define P4_ESCR_EVENT_MASK 0x7e000000U
|
||||
#define P4_ESCR_EVENT_SHIFT 25
|
||||
|
|
|
@ -34,7 +34,7 @@ static inline void smpboot_restore_warm_reset_vector(void)
|
|||
*/
|
||||
CMOS_WRITE(0, 0xf);
|
||||
|
||||
*((volatile long *)phys_to_virt(apic->trampoline_phys_low)) = 0;
|
||||
*((volatile u32 *)phys_to_virt(apic->trampoline_phys_low)) = 0;
|
||||
}
|
||||
|
||||
static inline void __init smpboot_setup_io_apic(void)
|
||||
|
|
|
@ -72,6 +72,7 @@ u8 acpi_sci_flags __initdata;
|
|||
int acpi_sci_override_gsi __initdata;
|
||||
int acpi_skip_timer_override __initdata;
|
||||
int acpi_use_timer_override __initdata;
|
||||
int acpi_fix_pin2_polarity __initdata;
|
||||
|
||||
#ifdef CONFIG_X86_LOCAL_APIC
|
||||
static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE;
|
||||
|
@ -415,10 +416,15 @@ acpi_parse_int_src_ovr(struct acpi_subtable_header * header,
|
|||
return 0;
|
||||
}
|
||||
|
||||
if (acpi_skip_timer_override &&
|
||||
intsrc->source_irq == 0 && intsrc->global_irq == 2) {
|
||||
printk(PREFIX "BIOS IRQ0 pin2 override ignored.\n");
|
||||
return 0;
|
||||
if (intsrc->source_irq == 0 && intsrc->global_irq == 2) {
|
||||
if (acpi_skip_timer_override) {
|
||||
printk(PREFIX "BIOS IRQ0 pin2 override ignored.\n");
|
||||
return 0;
|
||||
}
|
||||
if (acpi_fix_pin2_polarity && (intsrc->inti_flags & ACPI_MADT_POLARITY_MASK)) {
|
||||
intsrc->inti_flags &= ~ACPI_MADT_POLARITY_MASK;
|
||||
printk(PREFIX "BIOS IRQ0 pin2 override: forcing polarity to high active.\n");
|
||||
}
|
||||
}
|
||||
|
||||
mp_override_legacy_irq(intsrc->source_irq,
|
||||
|
|
|
@ -284,7 +284,7 @@ static int __init apbt_clockevent_register(void)
|
|||
memcpy(&adev->evt, &apbt_clockevent, sizeof(struct clock_event_device));
|
||||
|
||||
if (mrst_timer_options == MRST_TIMER_LAPIC_APBT) {
|
||||
apbt_clockevent.rating = APBT_CLOCKEVENT_RATING - 100;
|
||||
adev->evt.rating = APBT_CLOCKEVENT_RATING - 100;
|
||||
global_clock_event = &adev->evt;
|
||||
printk(KERN_DEBUG "%s clockevent registered as global\n",
|
||||
global_clock_event->name);
|
||||
|
|
|
@ -158,9 +158,9 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
|
|||
{
|
||||
if (c->x86 == 0x06) {
|
||||
if (cpu_has(c, X86_FEATURE_EST))
|
||||
printk(KERN_WARNING PFX "Warning: EST-capable CPU "
|
||||
"detected. The acpi-cpufreq module offers "
|
||||
"voltage scaling in addition of frequency "
|
||||
printk_once(KERN_WARNING PFX "Warning: EST-capable "
|
||||
"CPU detected. The acpi-cpufreq module offers "
|
||||
"voltage scaling in addition to frequency "
|
||||
"scaling. You should use that instead of "
|
||||
"p4-clockmod, if possible.\n");
|
||||
switch (c->x86_model) {
|
||||
|
|
|
@ -1537,6 +1537,7 @@ static struct notifier_block cpb_nb = {
|
|||
static int __cpuinit powernowk8_init(void)
|
||||
{
|
||||
unsigned int i, supported_cpus = 0, cpu;
|
||||
int rv;
|
||||
|
||||
for_each_online_cpu(i) {
|
||||
int rc;
|
||||
|
@ -1555,14 +1556,14 @@ static int __cpuinit powernowk8_init(void)
|
|||
|
||||
cpb_capable = true;
|
||||
|
||||
register_cpu_notifier(&cpb_nb);
|
||||
|
||||
msrs = msrs_alloc();
|
||||
if (!msrs) {
|
||||
printk(KERN_ERR "%s: Error allocating msrs!\n", __func__);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
register_cpu_notifier(&cpb_nb);
|
||||
|
||||
rdmsr_on_cpus(cpu_online_mask, MSR_K7_HWCR, msrs);
|
||||
|
||||
for_each_cpu(cpu, cpu_online_mask) {
|
||||
|
@ -1574,7 +1575,13 @@ static int __cpuinit powernowk8_init(void)
|
|||
(cpb_enabled ? "on" : "off"));
|
||||
}
|
||||
|
||||
return cpufreq_register_driver(&cpufreq_amd64_driver);
|
||||
rv = cpufreq_register_driver(&cpufreq_amd64_driver);
|
||||
if (rv < 0 && boot_cpu_has(X86_FEATURE_CPB)) {
|
||||
unregister_cpu_notifier(&cpb_nb);
|
||||
msrs_free(msrs);
|
||||
msrs = NULL;
|
||||
}
|
||||
return rv;
|
||||
}
|
||||
|
||||
/* driver entry point for term */
|
||||
|
|
|
@ -770,9 +770,14 @@ static inline int p4_pmu_clear_cccr_ovf(struct hw_perf_event *hwc)
|
|||
return 1;
|
||||
}
|
||||
|
||||
/* it might be unflagged overflow */
|
||||
rdmsrl(hwc->event_base + hwc->idx, v);
|
||||
if (!(v & ARCH_P4_CNTRVAL_MASK))
|
||||
/*
|
||||
* In some circumstances the overflow might issue an NMI but did
|
||||
* not set P4_CCCR_OVF bit. Because a counter holds a negative value
|
||||
* we simply check for high bit being set, if it's cleared it means
|
||||
* the counter has reached zero value and continued counting before
|
||||
* real NMI signal was received:
|
||||
*/
|
||||
if (!(v & ARCH_P4_UNFLAGGED_BIT))
|
||||
return 1;
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -143,15 +143,10 @@ static void __init ati_bugs(int num, int slot, int func)
|
|||
|
||||
static u32 __init ati_sbx00_rev(int num, int slot, int func)
|
||||
{
|
||||
u32 old, d;
|
||||
u32 d;
|
||||
|
||||
d = read_pci_config(num, slot, func, 0x70);
|
||||
old = d;
|
||||
d &= ~(1<<8);
|
||||
write_pci_config(num, slot, func, 0x70, d);
|
||||
d = read_pci_config(num, slot, func, 0x8);
|
||||
d &= 0xff;
|
||||
write_pci_config(num, slot, func, 0x70, old);
|
||||
|
||||
return d;
|
||||
}
|
||||
|
@ -160,11 +155,14 @@ static void __init ati_bugs_contd(int num, int slot, int func)
|
|||
{
|
||||
u32 d, rev;
|
||||
|
||||
if (acpi_use_timer_override)
|
||||
rev = ati_sbx00_rev(num, slot, func);
|
||||
if (rev >= 0x40)
|
||||
acpi_fix_pin2_polarity = 1;
|
||||
|
||||
if (rev > 0x13)
|
||||
return;
|
||||
|
||||
rev = ati_sbx00_rev(num, slot, func);
|
||||
if (rev > 0x13)
|
||||
if (acpi_use_timer_override)
|
||||
return;
|
||||
|
||||
/* check for IRQ0 interrupt swap */
|
||||
|
|
|
@ -285,6 +285,14 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = {
|
|||
DMI_MATCH(DMI_BOARD_NAME, "P4S800"),
|
||||
},
|
||||
},
|
||||
{ /* Handle problems with rebooting on VersaLogic Menlow boards */
|
||||
.callback = set_bios_reboot,
|
||||
.ident = "VersaLogic Menlow based board",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_BOARD_VENDOR, "VersaLogic Corporation"),
|
||||
DMI_MATCH(DMI_BOARD_NAME, "VersaLogic Menlow board"),
|
||||
},
|
||||
},
|
||||
{ }
|
||||
};
|
||||
|
||||
|
|
|
@ -2777,6 +2777,8 @@ static int dr_interception(struct vcpu_svm *svm)
|
|||
kvm_register_write(&svm->vcpu, reg, val);
|
||||
}
|
||||
|
||||
skip_emulated_instruction(&svm->vcpu);
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
|
|
@ -140,8 +140,7 @@ void * __init prom_early_alloc(unsigned long size)
|
|||
* wasted bootmem) and hand off chunks of it to callers.
|
||||
*/
|
||||
res = alloc_bootmem(chunk_size);
|
||||
if (!res)
|
||||
return NULL;
|
||||
BUG_ON(!res);
|
||||
prom_early_allocated += chunk_size;
|
||||
memset(res, 0, chunk_size);
|
||||
free_mem = chunk_size;
|
||||
|
|
|
@ -352,7 +352,7 @@ void blk_start_queue(struct request_queue *q)
|
|||
WARN_ON(!irqs_disabled());
|
||||
|
||||
queue_flag_clear(QUEUE_FLAG_STOPPED, q);
|
||||
__blk_run_queue(q);
|
||||
__blk_run_queue(q, false);
|
||||
}
|
||||
EXPORT_SYMBOL(blk_start_queue);
|
||||
|
||||
|
@ -403,13 +403,14 @@ EXPORT_SYMBOL(blk_sync_queue);
|
|||
/**
|
||||
* __blk_run_queue - run a single device queue
|
||||
* @q: The queue to run
|
||||
* @force_kblockd: Don't run @q->request_fn directly. Use kblockd.
|
||||
*
|
||||
* Description:
|
||||
* See @blk_run_queue. This variant must be called with the queue lock
|
||||
* held and interrupts disabled.
|
||||
*
|
||||
*/
|
||||
void __blk_run_queue(struct request_queue *q)
|
||||
void __blk_run_queue(struct request_queue *q, bool force_kblockd)
|
||||
{
|
||||
blk_remove_plug(q);
|
||||
|
||||
|
@ -423,7 +424,7 @@ void __blk_run_queue(struct request_queue *q)
|
|||
* Only recurse once to avoid overrunning the stack, let the unplug
|
||||
* handling reinvoke the handler shortly if we already got there.
|
||||
*/
|
||||
if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) {
|
||||
if (!force_kblockd && !queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) {
|
||||
q->request_fn(q);
|
||||
queue_flag_clear(QUEUE_FLAG_REENTER, q);
|
||||
} else {
|
||||
|
@ -446,7 +447,7 @@ void blk_run_queue(struct request_queue *q)
|
|||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(q->queue_lock, flags);
|
||||
__blk_run_queue(q);
|
||||
__blk_run_queue(q, false);
|
||||
spin_unlock_irqrestore(q->queue_lock, flags);
|
||||
}
|
||||
EXPORT_SYMBOL(blk_run_queue);
|
||||
|
@ -1053,7 +1054,7 @@ void blk_insert_request(struct request_queue *q, struct request *rq,
|
|||
|
||||
drive_stat_acct(rq, 1);
|
||||
__elv_add_request(q, rq, where, 0);
|
||||
__blk_run_queue(q);
|
||||
__blk_run_queue(q, false);
|
||||
spin_unlock_irqrestore(q->queue_lock, flags);
|
||||
}
|
||||
EXPORT_SYMBOL(blk_insert_request);
|
||||
|
@ -2610,13 +2611,6 @@ int kblockd_schedule_work(struct request_queue *q, struct work_struct *work)
|
|||
}
|
||||
EXPORT_SYMBOL(kblockd_schedule_work);
|
||||
|
||||
int kblockd_schedule_delayed_work(struct request_queue *q,
|
||||
struct delayed_work *dwork, unsigned long delay)
|
||||
{
|
||||
return queue_delayed_work(kblockd_workqueue, dwork, delay);
|
||||
}
|
||||
EXPORT_SYMBOL(kblockd_schedule_delayed_work);
|
||||
|
||||
int __init blk_dev_init(void)
|
||||
{
|
||||
BUILD_BUG_ON(__REQ_NR_BITS > 8 *
|
||||
|
|
|
@ -66,10 +66,12 @@ static void blk_flush_complete_seq_end_io(struct request_queue *q,
|
|||
|
||||
/*
|
||||
* Moving a request silently to empty queue_head may stall the
|
||||
* queue. Kick the queue in those cases.
|
||||
* queue. Kick the queue in those cases. This function is called
|
||||
* from request completion path and calling directly into
|
||||
* request_fn may confuse the driver. Always use kblockd.
|
||||
*/
|
||||
if (was_empty && next_rq)
|
||||
__blk_run_queue(q);
|
||||
__blk_run_queue(q, true);
|
||||
}
|
||||
|
||||
static void pre_flush_end_io(struct request *rq, int error)
|
||||
|
@ -130,7 +132,7 @@ static struct request *queue_next_fseq(struct request_queue *q)
|
|||
BUG();
|
||||
}
|
||||
|
||||
elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
|
||||
elv_insert(q, rq, ELEVATOR_INSERT_REQUEUE);
|
||||
return rq;
|
||||
}
|
||||
|
||||
|
|
|
@ -132,7 +132,7 @@ static void bio_batch_end_io(struct bio *bio, int err)
|
|||
}
|
||||
|
||||
/**
|
||||
* blkdev_issue_zeroout generate number of zero filed write bios
|
||||
* blkdev_issue_zeroout - generate number of zero filed write bios
|
||||
* @bdev: blockdev to issue
|
||||
* @sector: start sector
|
||||
* @nr_sects: number of sectors to write
|
||||
|
|
|
@ -20,6 +20,11 @@ static int throtl_quantum = 32;
|
|||
/* Throttling is performed over 100ms slice and after that slice is renewed */
|
||||
static unsigned long throtl_slice = HZ/10; /* 100 ms */
|
||||
|
||||
/* A workqueue to queue throttle related work */
|
||||
static struct workqueue_struct *kthrotld_workqueue;
|
||||
static void throtl_schedule_delayed_work(struct throtl_data *td,
|
||||
unsigned long delay);
|
||||
|
||||
struct throtl_rb_root {
|
||||
struct rb_root rb;
|
||||
struct rb_node *left;
|
||||
|
@ -345,10 +350,9 @@ static void throtl_schedule_next_dispatch(struct throtl_data *td)
|
|||
update_min_dispatch_time(st);
|
||||
|
||||
if (time_before_eq(st->min_disptime, jiffies))
|
||||
throtl_schedule_delayed_work(td->queue, 0);
|
||||
throtl_schedule_delayed_work(td, 0);
|
||||
else
|
||||
throtl_schedule_delayed_work(td->queue,
|
||||
(st->min_disptime - jiffies));
|
||||
throtl_schedule_delayed_work(td, (st->min_disptime - jiffies));
|
||||
}
|
||||
|
||||
static inline void
|
||||
|
@ -815,10 +819,10 @@ void blk_throtl_work(struct work_struct *work)
|
|||
}
|
||||
|
||||
/* Call with queue lock held */
|
||||
void throtl_schedule_delayed_work(struct request_queue *q, unsigned long delay)
|
||||
static void
|
||||
throtl_schedule_delayed_work(struct throtl_data *td, unsigned long delay)
|
||||
{
|
||||
|
||||
struct throtl_data *td = q->td;
|
||||
struct delayed_work *dwork = &td->throtl_work;
|
||||
|
||||
if (total_nr_queued(td) > 0) {
|
||||
|
@ -827,12 +831,11 @@ void throtl_schedule_delayed_work(struct request_queue *q, unsigned long delay)
|
|||
* Cancel that and schedule a new one.
|
||||
*/
|
||||
__cancel_delayed_work(dwork);
|
||||
kblockd_schedule_delayed_work(q, dwork, delay);
|
||||
queue_delayed_work(kthrotld_workqueue, dwork, delay);
|
||||
throtl_log(td, "schedule work. delay=%lu jiffies=%lu",
|
||||
delay, jiffies);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(throtl_schedule_delayed_work);
|
||||
|
||||
static void
|
||||
throtl_destroy_tg(struct throtl_data *td, struct throtl_grp *tg)
|
||||
|
@ -920,7 +923,7 @@ static void throtl_update_blkio_group_read_bps(void *key,
|
|||
smp_mb__after_atomic_inc();
|
||||
|
||||
/* Schedule a work now to process the limit change */
|
||||
throtl_schedule_delayed_work(td->queue, 0);
|
||||
throtl_schedule_delayed_work(td, 0);
|
||||
}
|
||||
|
||||
static void throtl_update_blkio_group_write_bps(void *key,
|
||||
|
@ -934,7 +937,7 @@ static void throtl_update_blkio_group_write_bps(void *key,
|
|||
smp_mb__before_atomic_inc();
|
||||
atomic_inc(&td->limits_changed);
|
||||
smp_mb__after_atomic_inc();
|
||||
throtl_schedule_delayed_work(td->queue, 0);
|
||||
throtl_schedule_delayed_work(td, 0);
|
||||
}
|
||||
|
||||
static void throtl_update_blkio_group_read_iops(void *key,
|
||||
|
@ -948,7 +951,7 @@ static void throtl_update_blkio_group_read_iops(void *key,
|
|||
smp_mb__before_atomic_inc();
|
||||
atomic_inc(&td->limits_changed);
|
||||
smp_mb__after_atomic_inc();
|
||||
throtl_schedule_delayed_work(td->queue, 0);
|
||||
throtl_schedule_delayed_work(td, 0);
|
||||
}
|
||||
|
||||
static void throtl_update_blkio_group_write_iops(void *key,
|
||||
|
@ -962,7 +965,7 @@ static void throtl_update_blkio_group_write_iops(void *key,
|
|||
smp_mb__before_atomic_inc();
|
||||
atomic_inc(&td->limits_changed);
|
||||
smp_mb__after_atomic_inc();
|
||||
throtl_schedule_delayed_work(td->queue, 0);
|
||||
throtl_schedule_delayed_work(td, 0);
|
||||
}
|
||||
|
||||
void throtl_shutdown_timer_wq(struct request_queue *q)
|
||||
|
@ -1135,6 +1138,10 @@ void blk_throtl_exit(struct request_queue *q)
|
|||
|
||||
static int __init throtl_init(void)
|
||||
{
|
||||
kthrotld_workqueue = alloc_workqueue("kthrotld", WQ_MEM_RECLAIM, 0);
|
||||
if (!kthrotld_workqueue)
|
||||
panic("Failed to create kthrotld\n");
|
||||
|
||||
blkio_policy_register(&blkio_policy_throtl);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -3355,7 +3355,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
|
|||
cfqd->busy_queues > 1) {
|
||||
cfq_del_timer(cfqd, cfqq);
|
||||
cfq_clear_cfqq_wait_request(cfqq);
|
||||
__blk_run_queue(cfqd->queue);
|
||||
__blk_run_queue(cfqd->queue, false);
|
||||
} else {
|
||||
cfq_blkiocg_update_idle_time_stats(
|
||||
&cfqq->cfqg->blkg);
|
||||
|
@ -3370,7 +3370,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
|
|||
* this new queue is RT and the current one is BE
|
||||
*/
|
||||
cfq_preempt_queue(cfqd, cfqq);
|
||||
__blk_run_queue(cfqd->queue);
|
||||
__blk_run_queue(cfqd->queue, false);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -3731,7 +3731,7 @@ static void cfq_kick_queue(struct work_struct *work)
|
|||
struct request_queue *q = cfqd->queue;
|
||||
|
||||
spin_lock_irq(q->queue_lock);
|
||||
__blk_run_queue(cfqd->queue);
|
||||
__blk_run_queue(cfqd->queue, false);
|
||||
spin_unlock_irq(q->queue_lock);
|
||||
}
|
||||
|
||||
|
|
|
@ -602,7 +602,7 @@ void elv_quiesce_start(struct request_queue *q)
|
|||
*/
|
||||
elv_drain_elevator(q);
|
||||
while (q->rq.elvpriv) {
|
||||
__blk_run_queue(q);
|
||||
__blk_run_queue(q, false);
|
||||
spin_unlock_irq(q->queue_lock);
|
||||
msleep(10);
|
||||
spin_lock_irq(q->queue_lock);
|
||||
|
@ -651,7 +651,7 @@ void elv_insert(struct request_queue *q, struct request *rq, int where)
|
|||
* with anything. There's no point in delaying queue
|
||||
* processing.
|
||||
*/
|
||||
__blk_run_queue(q);
|
||||
__blk_run_queue(q, false);
|
||||
break;
|
||||
|
||||
case ELEVATOR_INSERT_SORT:
|
||||
|
|
|
@ -1355,7 +1355,7 @@ int invalidate_partition(struct gendisk *disk, int partno)
|
|||
struct block_device *bdev = bdget_disk(disk, partno);
|
||||
if (bdev) {
|
||||
fsync_bdev(bdev);
|
||||
res = __invalidate_device(bdev);
|
||||
res = __invalidate_device(bdev, true);
|
||||
bdput(bdev);
|
||||
}
|
||||
return res;
|
||||
|
|
|
@ -294,9 +294,11 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
|
|||
return -EINVAL;
|
||||
if (get_user(n, (int __user *) arg))
|
||||
return -EFAULT;
|
||||
if (!(mode & FMODE_EXCL) &&
|
||||
blkdev_get(bdev, mode | FMODE_EXCL, &bdev) < 0)
|
||||
return -EBUSY;
|
||||
if (!(mode & FMODE_EXCL)) {
|
||||
bdgrab(bdev);
|
||||
if (blkdev_get(bdev, mode | FMODE_EXCL, &bdev) < 0)
|
||||
return -EBUSY;
|
||||
}
|
||||
ret = set_blocksize(bdev, n);
|
||||
if (!(mode & FMODE_EXCL))
|
||||
blkdev_put(bdev, mode | FMODE_EXCL);
|
||||
|
|
|
@ -416,10 +416,15 @@ struct acpi_gpe_handler_info {
|
|||
u8 originally_enabled; /* True if GPE was originally enabled */
|
||||
};
|
||||
|
||||
struct acpi_gpe_notify_object {
|
||||
struct acpi_namespace_node *node;
|
||||
struct acpi_gpe_notify_object *next;
|
||||
};
|
||||
|
||||
union acpi_gpe_dispatch_info {
|
||||
struct acpi_namespace_node *method_node; /* Method node for this GPE level */
|
||||
struct acpi_gpe_handler_info *handler; /* Installed GPE handler */
|
||||
struct acpi_namespace_node *device_node; /* Parent _PRW device for implicit notify */
|
||||
struct acpi_gpe_notify_object device; /* List of _PRW devices for implicit notify */
|
||||
};
|
||||
|
||||
/*
|
||||
|
|
|
@ -457,6 +457,7 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context)
|
|||
acpi_status status;
|
||||
struct acpi_gpe_event_info *local_gpe_event_info;
|
||||
struct acpi_evaluate_info *info;
|
||||
struct acpi_gpe_notify_object *notify_object;
|
||||
|
||||
ACPI_FUNCTION_TRACE(ev_asynch_execute_gpe_method);
|
||||
|
||||
|
@ -508,10 +509,18 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context)
|
|||
* from this thread -- because handlers may in turn run other
|
||||
* control methods.
|
||||
*/
|
||||
status =
|
||||
acpi_ev_queue_notify_request(local_gpe_event_info->dispatch.
|
||||
device_node,
|
||||
ACPI_NOTIFY_DEVICE_WAKE);
|
||||
status = acpi_ev_queue_notify_request(
|
||||
local_gpe_event_info->dispatch.device.node,
|
||||
ACPI_NOTIFY_DEVICE_WAKE);
|
||||
|
||||
notify_object = local_gpe_event_info->dispatch.device.next;
|
||||
while (ACPI_SUCCESS(status) && notify_object) {
|
||||
status = acpi_ev_queue_notify_request(
|
||||
notify_object->node,
|
||||
ACPI_NOTIFY_DEVICE_WAKE);
|
||||
notify_object = notify_object->next;
|
||||
}
|
||||
|
||||
break;
|
||||
|
||||
case ACPI_GPE_DISPATCH_METHOD:
|
||||
|
|
|
@ -198,7 +198,9 @@ acpi_setup_gpe_for_wake(acpi_handle wake_device,
|
|||
acpi_status status = AE_BAD_PARAMETER;
|
||||
struct acpi_gpe_event_info *gpe_event_info;
|
||||
struct acpi_namespace_node *device_node;
|
||||
struct acpi_gpe_notify_object *notify_object;
|
||||
acpi_cpu_flags flags;
|
||||
u8 gpe_dispatch_mask;
|
||||
|
||||
ACPI_FUNCTION_TRACE(acpi_setup_gpe_for_wake);
|
||||
|
||||
|
@ -221,27 +223,49 @@ acpi_setup_gpe_for_wake(acpi_handle wake_device,
|
|||
goto unlock_and_exit;
|
||||
}
|
||||
|
||||
if (wake_device == ACPI_ROOT_OBJECT) {
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* If there is no method or handler for this GPE, then the
|
||||
* wake_device will be notified whenever this GPE fires (aka
|
||||
* "implicit notify") Note: The GPE is assumed to be
|
||||
* level-triggered (for windows compatibility).
|
||||
*/
|
||||
if (((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
|
||||
ACPI_GPE_DISPATCH_NONE) && (wake_device != ACPI_ROOT_OBJECT)) {
|
||||
|
||||
/* Validate wake_device is of type Device */
|
||||
|
||||
device_node = ACPI_CAST_PTR(struct acpi_namespace_node,
|
||||
wake_device);
|
||||
if (device_node->type != ACPI_TYPE_DEVICE) {
|
||||
goto unlock_and_exit;
|
||||
}
|
||||
gpe_event_info->flags = (ACPI_GPE_DISPATCH_NOTIFY |
|
||||
ACPI_GPE_LEVEL_TRIGGERED);
|
||||
gpe_event_info->dispatch.device_node = device_node;
|
||||
gpe_dispatch_mask = gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK;
|
||||
if (gpe_dispatch_mask != ACPI_GPE_DISPATCH_NONE
|
||||
&& gpe_dispatch_mask != ACPI_GPE_DISPATCH_NOTIFY) {
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Validate wake_device is of type Device */
|
||||
|
||||
device_node = ACPI_CAST_PTR(struct acpi_namespace_node, wake_device);
|
||||
if (device_node->type != ACPI_TYPE_DEVICE) {
|
||||
goto unlock_and_exit;
|
||||
}
|
||||
|
||||
if (gpe_dispatch_mask == ACPI_GPE_DISPATCH_NONE) {
|
||||
gpe_event_info->flags = (ACPI_GPE_DISPATCH_NOTIFY |
|
||||
ACPI_GPE_LEVEL_TRIGGERED);
|
||||
gpe_event_info->dispatch.device.node = device_node;
|
||||
gpe_event_info->dispatch.device.next = NULL;
|
||||
} else {
|
||||
/* There are multiple devices to notify implicitly. */
|
||||
|
||||
notify_object = ACPI_ALLOCATE_ZEROED(sizeof(*notify_object));
|
||||
if (!notify_object) {
|
||||
status = AE_NO_MEMORY;
|
||||
goto unlock_and_exit;
|
||||
}
|
||||
|
||||
notify_object->node = device_node;
|
||||
notify_object->next = gpe_event_info->dispatch.device.next;
|
||||
gpe_event_info->dispatch.device.next = notify_object;
|
||||
}
|
||||
|
||||
out:
|
||||
gpe_event_info->flags |= ACPI_GPE_CAN_WAKE;
|
||||
status = AE_OK;
|
||||
|
||||
|
|
|
@ -26,7 +26,9 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf,
|
|||
size_t count, loff_t *ppos)
|
||||
{
|
||||
static char *buf;
|
||||
static int uncopied_bytes;
|
||||
static u32 max_size;
|
||||
static u32 uncopied_bytes;
|
||||
|
||||
struct acpi_table_header table;
|
||||
acpi_status status;
|
||||
|
||||
|
@ -37,19 +39,24 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf,
|
|||
if (copy_from_user(&table, user_buf,
|
||||
sizeof(struct acpi_table_header)))
|
||||
return -EFAULT;
|
||||
uncopied_bytes = table.length;
|
||||
buf = kzalloc(uncopied_bytes, GFP_KERNEL);
|
||||
uncopied_bytes = max_size = table.length;
|
||||
buf = kzalloc(max_size, GFP_KERNEL);
|
||||
if (!buf)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
if (uncopied_bytes < count) {
|
||||
kfree(buf);
|
||||
if (buf == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
if ((*ppos > max_size) ||
|
||||
(*ppos + count > max_size) ||
|
||||
(*ppos + count < count) ||
|
||||
(count > uncopied_bytes))
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (copy_from_user(buf + (*ppos), user_buf, count)) {
|
||||
kfree(buf);
|
||||
buf = NULL;
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
|
@ -59,6 +66,7 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf,
|
|||
if (!uncopied_bytes) {
|
||||
status = acpi_install_method(buf);
|
||||
kfree(buf);
|
||||
buf = NULL;
|
||||
if (ACPI_FAILURE(status))
|
||||
return -EINVAL;
|
||||
add_taint(TAINT_OVERRIDDEN_ACPI_TABLE);
|
||||
|
|
|
@ -3281,7 +3281,7 @@ static int set_geometry(unsigned int cmd, struct floppy_struct *g,
|
|||
struct block_device *bdev = opened_bdev[cnt];
|
||||
if (!bdev || ITYPE(drive_state[cnt].fd_device) != type)
|
||||
continue;
|
||||
__invalidate_device(bdev);
|
||||
__invalidate_device(bdev, true);
|
||||
}
|
||||
mutex_unlock(&open_lock);
|
||||
} else {
|
||||
|
|
|
@ -78,7 +78,6 @@
|
|||
|
||||
#include <asm/uaccess.h>
|
||||
|
||||
static DEFINE_MUTEX(loop_mutex);
|
||||
static LIST_HEAD(loop_devices);
|
||||
static DEFINE_MUTEX(loop_devices_mutex);
|
||||
|
||||
|
@ -1501,11 +1500,9 @@ static int lo_open(struct block_device *bdev, fmode_t mode)
|
|||
{
|
||||
struct loop_device *lo = bdev->bd_disk->private_data;
|
||||
|
||||
mutex_lock(&loop_mutex);
|
||||
mutex_lock(&lo->lo_ctl_mutex);
|
||||
lo->lo_refcnt++;
|
||||
mutex_unlock(&lo->lo_ctl_mutex);
|
||||
mutex_unlock(&loop_mutex);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1515,7 +1512,6 @@ static int lo_release(struct gendisk *disk, fmode_t mode)
|
|||
struct loop_device *lo = disk->private_data;
|
||||
int err;
|
||||
|
||||
mutex_lock(&loop_mutex);
|
||||
mutex_lock(&lo->lo_ctl_mutex);
|
||||
|
||||
if (--lo->lo_refcnt)
|
||||
|
@ -1540,7 +1536,6 @@ static int lo_release(struct gendisk *disk, fmode_t mode)
|
|||
out:
|
||||
mutex_unlock(&lo->lo_ctl_mutex);
|
||||
out_unlocked:
|
||||
mutex_unlock(&loop_mutex);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -41,6 +41,9 @@ static struct usb_device_id ath3k_table[] = {
|
|||
|
||||
/* Atheros AR9285 Malbec with sflash firmware */
|
||||
{ USB_DEVICE(0x03F0, 0x311D) },
|
||||
|
||||
/* Atheros AR5BBU12 with sflash firmware */
|
||||
{ USB_DEVICE(0x0489, 0xE02C) },
|
||||
{ } /* Terminating entry */
|
||||
};
|
||||
|
||||
|
|
|
@ -105,6 +105,9 @@ static struct usb_device_id blacklist_table[] = {
|
|||
/* Atheros AR9285 Malbec with sflash firmware */
|
||||
{ USB_DEVICE(0x03f0, 0x311d), .driver_info = BTUSB_IGNORE },
|
||||
|
||||
/* Atheros AR5BBU12 with sflash firmware */
|
||||
{ USB_DEVICE(0x0489, 0xe02c), .driver_info = BTUSB_IGNORE },
|
||||
|
||||
/* Broadcom BCM2035 */
|
||||
{ USB_DEVICE(0x0a5c, 0x2035), .driver_info = BTUSB_WRONG_SCO_MTU },
|
||||
{ USB_DEVICE(0x0a5c, 0x200a), .driver_info = BTUSB_WRONG_SCO_MTU },
|
||||
|
@ -829,7 +832,7 @@ static void btusb_work(struct work_struct *work)
|
|||
|
||||
if (hdev->conn_hash.sco_num > 0) {
|
||||
if (!test_bit(BTUSB_DID_ISO_RESUME, &data->flags)) {
|
||||
err = usb_autopm_get_interface(data->isoc);
|
||||
err = usb_autopm_get_interface(data->isoc ? data->isoc : data->intf);
|
||||
if (err < 0) {
|
||||
clear_bit(BTUSB_ISOC_RUNNING, &data->flags);
|
||||
usb_kill_anchored_urbs(&data->isoc_anchor);
|
||||
|
@ -858,7 +861,7 @@ static void btusb_work(struct work_struct *work)
|
|||
|
||||
__set_isoc_interface(hdev, 0);
|
||||
if (test_and_clear_bit(BTUSB_DID_ISO_RESUME, &data->flags))
|
||||
usb_autopm_put_interface(data->isoc);
|
||||
usb_autopm_put_interface(data->isoc ? data->isoc : data->intf);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1041,8 +1044,6 @@ static int btusb_probe(struct usb_interface *intf,
|
|||
|
||||
usb_set_intfdata(intf, data);
|
||||
|
||||
usb_enable_autosuspend(interface_to_usbdev(intf));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -773,18 +773,23 @@ int __init agp_amd64_init(void)
|
|||
#else
|
||||
printk(KERN_INFO PFX "You can boot with agp=try_unsupported\n");
|
||||
#endif
|
||||
pci_unregister_driver(&agp_amd64_pci_driver);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
/* First check that we have at least one AMD64 NB */
|
||||
if (!pci_dev_present(amd_nb_misc_ids))
|
||||
if (!pci_dev_present(amd_nb_misc_ids)) {
|
||||
pci_unregister_driver(&agp_amd64_pci_driver);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
/* Look for any AGP bridge */
|
||||
agp_amd64_pci_driver.id_table = agp_amd64_pci_promisc_table;
|
||||
err = driver_attach(&agp_amd64_pci_driver.driver);
|
||||
if (err == 0 && agp_bridges_found == 0)
|
||||
if (err == 0 && agp_bridges_found == 0) {
|
||||
pci_unregister_driver(&agp_amd64_pci_driver);
|
||||
err = -ENODEV;
|
||||
}
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
|
|
@ -130,6 +130,7 @@
|
|||
#define INTEL_GMCH_GMS_STOLEN_352M (0xd << 4)
|
||||
|
||||
#define I915_IFPADDR 0x60
|
||||
#define I830_HIC 0x70
|
||||
|
||||
/* Intel 965G registers */
|
||||
#define I965_MSAC 0x62
|
||||
|
|
|
@ -21,6 +21,7 @@
|
|||
#include <linux/kernel.h>
|
||||
#include <linux/pagemap.h>
|
||||
#include <linux/agp_backend.h>
|
||||
#include <linux/delay.h>
|
||||
#include <asm/smp.h>
|
||||
#include "agp.h"
|
||||
#include "intel-agp.h"
|
||||
|
@ -70,12 +71,8 @@ static struct _intel_private {
|
|||
u32 __iomem *gtt; /* I915G */
|
||||
bool clear_fake_agp; /* on first access via agp, fill with scratch */
|
||||
int num_dcache_entries;
|
||||
union {
|
||||
void __iomem *i9xx_flush_page;
|
||||
void *i8xx_flush_page;
|
||||
};
|
||||
void __iomem *i9xx_flush_page;
|
||||
char *i81x_gtt_table;
|
||||
struct page *i8xx_page;
|
||||
struct resource ifp_resource;
|
||||
int resource_valid;
|
||||
struct page *scratch_page;
|
||||
|
@ -722,28 +719,6 @@ static int intel_fake_agp_fetch_size(void)
|
|||
|
||||
static void i830_cleanup(void)
|
||||
{
|
||||
if (intel_private.i8xx_flush_page) {
|
||||
kunmap(intel_private.i8xx_flush_page);
|
||||
intel_private.i8xx_flush_page = NULL;
|
||||
}
|
||||
|
||||
__free_page(intel_private.i8xx_page);
|
||||
intel_private.i8xx_page = NULL;
|
||||
}
|
||||
|
||||
static void intel_i830_setup_flush(void)
|
||||
{
|
||||
/* return if we've already set the flush mechanism up */
|
||||
if (intel_private.i8xx_page)
|
||||
return;
|
||||
|
||||
intel_private.i8xx_page = alloc_page(GFP_KERNEL);
|
||||
if (!intel_private.i8xx_page)
|
||||
return;
|
||||
|
||||
intel_private.i8xx_flush_page = kmap(intel_private.i8xx_page);
|
||||
if (!intel_private.i8xx_flush_page)
|
||||
i830_cleanup();
|
||||
}
|
||||
|
||||
/* The chipset_flush interface needs to get data that has already been
|
||||
|
@ -758,14 +733,27 @@ static void intel_i830_setup_flush(void)
|
|||
*/
|
||||
static void i830_chipset_flush(void)
|
||||
{
|
||||
unsigned int *pg = intel_private.i8xx_flush_page;
|
||||
unsigned long timeout = jiffies + msecs_to_jiffies(1000);
|
||||
|
||||
memset(pg, 0, 1024);
|
||||
/* Forcibly evict everything from the CPU write buffers.
|
||||
* clflush appears to be insufficient.
|
||||
*/
|
||||
wbinvd_on_all_cpus();
|
||||
|
||||
if (cpu_has_clflush)
|
||||
clflush_cache_range(pg, 1024);
|
||||
else if (wbinvd_on_all_cpus() != 0)
|
||||
printk(KERN_ERR "Timed out waiting for cache flush.\n");
|
||||
/* Now we've only seen documents for this magic bit on 855GM,
|
||||
* we hope it exists for the other gen2 chipsets...
|
||||
*
|
||||
* Also works as advertised on my 845G.
|
||||
*/
|
||||
writel(readl(intel_private.registers+I830_HIC) | (1<<31),
|
||||
intel_private.registers+I830_HIC);
|
||||
|
||||
while (readl(intel_private.registers+I830_HIC) & (1<<31)) {
|
||||
if (time_after(jiffies, timeout))
|
||||
break;
|
||||
|
||||
udelay(50);
|
||||
}
|
||||
}
|
||||
|
||||
static void i830_write_entry(dma_addr_t addr, unsigned int entry,
|
||||
|
@ -849,8 +837,6 @@ static int i830_setup(void)
|
|||
|
||||
intel_private.gtt_bus_addr = reg_addr + I810_PTE_BASE;
|
||||
|
||||
intel_i830_setup_flush();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -830,8 +830,7 @@ static void monitor_card(unsigned long p)
|
|||
test_bit(IS_ANY_T1, &dev->flags))) {
|
||||
DEBUGP(4, dev, "Perform AUTOPPS\n");
|
||||
set_bit(IS_AUTOPPS_ACT, &dev->flags);
|
||||
ptsreq.protocol = ptsreq.protocol =
|
||||
(0x01 << dev->proto);
|
||||
ptsreq.protocol = (0x01 << dev->proto);
|
||||
ptsreq.flags = 0x01;
|
||||
ptsreq.pts1 = 0x00;
|
||||
ptsreq.pts2 = 0x00;
|
||||
|
|
|
@ -78,7 +78,6 @@ static void signalled_reboot_callback(void *callback_data)
|
|||
static int ipwireless_probe(struct pcmcia_device *p_dev, void *priv_data)
|
||||
{
|
||||
struct ipw_dev *ipw = priv_data;
|
||||
struct resource *io_resource;
|
||||
int ret;
|
||||
|
||||
p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH;
|
||||
|
@ -92,9 +91,12 @@ static int ipwireless_probe(struct pcmcia_device *p_dev, void *priv_data)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
io_resource = request_region(p_dev->resource[0]->start,
|
||||
resource_size(p_dev->resource[0]),
|
||||
IPWIRELESS_PCCARD_NAME);
|
||||
if (!request_region(p_dev->resource[0]->start,
|
||||
resource_size(p_dev->resource[0]),
|
||||
IPWIRELESS_PCCARD_NAME)) {
|
||||
ret = -EBUSY;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
p_dev->resource[2]->flags |=
|
||||
WIN_DATA_WIDTH_16 | WIN_MEMORY_TYPE_CM | WIN_ENABLE;
|
||||
|
@ -105,22 +107,25 @@ static int ipwireless_probe(struct pcmcia_device *p_dev, void *priv_data)
|
|||
|
||||
ret = pcmcia_map_mem_page(p_dev, p_dev->resource[2], p_dev->card_addr);
|
||||
if (ret != 0)
|
||||
goto exit2;
|
||||
goto exit1;
|
||||
|
||||
ipw->is_v2_card = resource_size(p_dev->resource[2]) == 0x100;
|
||||
|
||||
ipw->attr_memory = ioremap(p_dev->resource[2]->start,
|
||||
ipw->common_memory = ioremap(p_dev->resource[2]->start,
|
||||
resource_size(p_dev->resource[2]));
|
||||
request_mem_region(p_dev->resource[2]->start,
|
||||
resource_size(p_dev->resource[2]),
|
||||
IPWIRELESS_PCCARD_NAME);
|
||||
if (!request_mem_region(p_dev->resource[2]->start,
|
||||
resource_size(p_dev->resource[2]),
|
||||
IPWIRELESS_PCCARD_NAME)) {
|
||||
ret = -EBUSY;
|
||||
goto exit2;
|
||||
}
|
||||
|
||||
p_dev->resource[3]->flags |= WIN_DATA_WIDTH_16 | WIN_MEMORY_TYPE_AM |
|
||||
WIN_ENABLE;
|
||||
p_dev->resource[3]->end = 0; /* this used to be 0x1000 */
|
||||
ret = pcmcia_request_window(p_dev, p_dev->resource[3], 0);
|
||||
if (ret != 0)
|
||||
goto exit2;
|
||||
goto exit3;
|
||||
|
||||
ret = pcmcia_map_mem_page(p_dev, p_dev->resource[3], 0);
|
||||
if (ret != 0)
|
||||
|
@ -128,23 +133,28 @@ static int ipwireless_probe(struct pcmcia_device *p_dev, void *priv_data)
|
|||
|
||||
ipw->attr_memory = ioremap(p_dev->resource[3]->start,
|
||||
resource_size(p_dev->resource[3]));
|
||||
request_mem_region(p_dev->resource[3]->start,
|
||||
resource_size(p_dev->resource[3]),
|
||||
IPWIRELESS_PCCARD_NAME);
|
||||
if (!request_mem_region(p_dev->resource[3]->start,
|
||||
resource_size(p_dev->resource[3]),
|
||||
IPWIRELESS_PCCARD_NAME)) {
|
||||
ret = -EBUSY;
|
||||
goto exit4;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
exit4:
|
||||
iounmap(ipw->attr_memory);
|
||||
exit3:
|
||||
release_mem_region(p_dev->resource[2]->start,
|
||||
resource_size(p_dev->resource[2]));
|
||||
exit2:
|
||||
if (ipw->common_memory) {
|
||||
release_mem_region(p_dev->resource[2]->start,
|
||||
resource_size(p_dev->resource[2]));
|
||||
iounmap(ipw->common_memory);
|
||||
}
|
||||
iounmap(ipw->common_memory);
|
||||
exit1:
|
||||
release_resource(io_resource);
|
||||
release_region(p_dev->resource[0]->start,
|
||||
resource_size(p_dev->resource[0]));
|
||||
exit:
|
||||
pcmcia_disable_device(p_dev);
|
||||
return -1;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int config_ipwireless(struct ipw_dev *ipw)
|
||||
|
@ -219,6 +229,8 @@ static int config_ipwireless(struct ipw_dev *ipw)
|
|||
|
||||
static void release_ipwireless(struct ipw_dev *ipw)
|
||||
{
|
||||
release_region(ipw->link->resource[0]->start,
|
||||
resource_size(ipw->link->resource[0]));
|
||||
if (ipw->common_memory) {
|
||||
release_mem_region(ipw->link->resource[2]->start,
|
||||
resource_size(ipw->link->resource[2]));
|
||||
|
|
|
@ -364,14 +364,12 @@ unsigned long tpm_calc_ordinal_duration(struct tpm_chip *chip,
|
|||
tpm_protected_ordinal_duration[ordinal &
|
||||
TPM_PROTECTED_ORDINAL_MASK];
|
||||
|
||||
if (duration_idx != TPM_UNDEFINED) {
|
||||
if (duration_idx != TPM_UNDEFINED)
|
||||
duration = chip->vendor.duration[duration_idx];
|
||||
/* if duration is 0, it's because chip->vendor.duration wasn't */
|
||||
/* filled yet, so we set the lowest timeout just to give enough */
|
||||
/* time for tpm_get_timeouts() to succeed */
|
||||
return (duration <= 0 ? HZ : duration);
|
||||
} else
|
||||
if (duration <= 0)
|
||||
return 2 * 60 * HZ;
|
||||
else
|
||||
return duration;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(tpm_calc_ordinal_duration);
|
||||
|
||||
|
|
|
@ -1919,8 +1919,10 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
|
|||
|
||||
ret = sysdev_driver_register(&cpu_sysdev_class,
|
||||
&cpufreq_sysdev_driver);
|
||||
if (ret)
|
||||
goto err_null_driver;
|
||||
|
||||
if ((!ret) && !(cpufreq_driver->flags & CPUFREQ_STICKY)) {
|
||||
if (!(cpufreq_driver->flags & CPUFREQ_STICKY)) {
|
||||
int i;
|
||||
ret = -ENODEV;
|
||||
|
||||
|
@ -1935,21 +1937,22 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
|
|||
if (ret) {
|
||||
dprintk("no CPU initialized for driver %s\n",
|
||||
driver_data->name);
|
||||
sysdev_driver_unregister(&cpu_sysdev_class,
|
||||
&cpufreq_sysdev_driver);
|
||||
|
||||
spin_lock_irqsave(&cpufreq_driver_lock, flags);
|
||||
cpufreq_driver = NULL;
|
||||
spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
|
||||
goto err_sysdev_unreg;
|
||||
}
|
||||
}
|
||||
|
||||
if (!ret) {
|
||||
register_hotcpu_notifier(&cpufreq_cpu_notifier);
|
||||
dprintk("driver %s up and running\n", driver_data->name);
|
||||
cpufreq_debug_enable_ratelimit();
|
||||
}
|
||||
register_hotcpu_notifier(&cpufreq_cpu_notifier);
|
||||
dprintk("driver %s up and running\n", driver_data->name);
|
||||
cpufreq_debug_enable_ratelimit();
|
||||
|
||||
return 0;
|
||||
err_sysdev_unreg:
|
||||
sysdev_driver_unregister(&cpu_sysdev_class,
|
||||
&cpufreq_sysdev_driver);
|
||||
err_null_driver:
|
||||
spin_lock_irqsave(&cpufreq_driver_lock, flags);
|
||||
cpufreq_driver = NULL;
|
||||
spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(cpufreq_register_driver);
|
||||
|
|
|
@ -164,8 +164,10 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc)
|
|||
* available. In that case we can't account for this and just
|
||||
* hope for the best.
|
||||
*/
|
||||
if ((vblrc > 0) && (abs(diff_ns) > 1000000))
|
||||
if ((vblrc > 0) && (abs64(diff_ns) > 1000000)) {
|
||||
atomic_inc(&dev->_vblank_count[crtc]);
|
||||
smp_mb__after_atomic_inc();
|
||||
}
|
||||
|
||||
/* Invalidate all timestamps while vblank irq's are off. */
|
||||
clear_vblank_timestamps(dev, crtc);
|
||||
|
@ -491,6 +493,12 @@ void drm_calc_timestamping_constants(struct drm_crtc *crtc)
|
|||
/* Dot clock in Hz: */
|
||||
dotclock = (u64) crtc->hwmode.clock * 1000;
|
||||
|
||||
/* Fields of interlaced scanout modes are only halve a frame duration.
|
||||
* Double the dotclock to get halve the frame-/line-/pixelduration.
|
||||
*/
|
||||
if (crtc->hwmode.flags & DRM_MODE_FLAG_INTERLACE)
|
||||
dotclock *= 2;
|
||||
|
||||
/* Valid dotclock? */
|
||||
if (dotclock > 0) {
|
||||
/* Convert scanline length in pixels and video dot clock to
|
||||
|
@ -603,14 +611,6 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
|
|||
return -EAGAIN;
|
||||
}
|
||||
|
||||
/* Don't know yet how to handle interlaced or
|
||||
* double scan modes. Just no-op for now.
|
||||
*/
|
||||
if (mode->flags & (DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLSCAN)) {
|
||||
DRM_DEBUG("crtc %d: Noop due to unsupported mode.\n", crtc);
|
||||
return -ENOTSUPP;
|
||||
}
|
||||
|
||||
/* Get current scanout position with system timestamp.
|
||||
* Repeat query up to DRM_TIMESTAMP_MAXRETRIES times
|
||||
* if single query takes longer than max_error nanoseconds.
|
||||
|
@ -858,10 +858,11 @@ static void drm_update_vblank_count(struct drm_device *dev, int crtc)
|
|||
if (rc) {
|
||||
tslot = atomic_read(&dev->_vblank_count[crtc]) + diff;
|
||||
vblanktimestamp(dev, crtc, tslot) = t_vblank;
|
||||
smp_wmb();
|
||||
}
|
||||
|
||||
smp_mb__before_atomic_inc();
|
||||
atomic_add(diff, &dev->_vblank_count[crtc]);
|
||||
smp_mb__after_atomic_inc();
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1011,7 +1012,8 @@ int drm_modeset_ctl(struct drm_device *dev, void *data,
|
|||
struct drm_file *file_priv)
|
||||
{
|
||||
struct drm_modeset_ctl *modeset = data;
|
||||
int crtc, ret = 0;
|
||||
int ret = 0;
|
||||
unsigned int crtc;
|
||||
|
||||
/* If drm_vblank_init() hasn't been called yet, just no-op */
|
||||
if (!dev->num_crtcs)
|
||||
|
@ -1293,15 +1295,16 @@ bool drm_handle_vblank(struct drm_device *dev, int crtc)
|
|||
* e.g., due to spurious vblank interrupts. We need to
|
||||
* ignore those for accounting.
|
||||
*/
|
||||
if (abs(diff_ns) > DRM_REDUNDANT_VBLIRQ_THRESH_NS) {
|
||||
if (abs64(diff_ns) > DRM_REDUNDANT_VBLIRQ_THRESH_NS) {
|
||||
/* Store new timestamp in ringbuffer. */
|
||||
vblanktimestamp(dev, crtc, vblcount + 1) = tvblank;
|
||||
smp_wmb();
|
||||
|
||||
/* Increment cooked vblank count. This also atomically commits
|
||||
* the timestamp computed above.
|
||||
*/
|
||||
smp_mb__before_atomic_inc();
|
||||
atomic_inc(&dev->_vblank_count[crtc]);
|
||||
smp_mb__after_atomic_inc();
|
||||
} else {
|
||||
DRM_DEBUG("crtc %d: Redundant vblirq ignored. diff_ns = %d\n",
|
||||
crtc, (int) diff_ns);
|
||||
|
|
|
@ -1895,6 +1895,17 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
|
|||
if (IS_GEN2(dev))
|
||||
dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30));
|
||||
|
||||
/* 965GM sometimes incorrectly writes to hardware status page (HWS)
|
||||
* using 32bit addressing, overwriting memory if HWS is located
|
||||
* above 4GB.
|
||||
*
|
||||
* The documentation also mentions an issue with undefined
|
||||
* behaviour if any general state is accessed within a page above 4GB,
|
||||
* which also needs to be handled carefully.
|
||||
*/
|
||||
if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
|
||||
dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32));
|
||||
|
||||
mmio_bar = IS_GEN2(dev) ? 1 : 0;
|
||||
dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, 0);
|
||||
if (!dev_priv->regs) {
|
||||
|
|
|
@ -184,7 +184,7 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
|
|||
static bool
|
||||
i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
|
||||
{
|
||||
int tile_width;
|
||||
int tile_width, tile_height;
|
||||
|
||||
/* Linear is always fine */
|
||||
if (tiling_mode == I915_TILING_NONE)
|
||||
|
@ -215,6 +215,20 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
|
|||
}
|
||||
}
|
||||
|
||||
if (IS_GEN2(dev) ||
|
||||
(tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev)))
|
||||
tile_height = 32;
|
||||
else
|
||||
tile_height = 8;
|
||||
/* i8xx is strange: It has 2 interleaved rows of tiles, so needs an even
|
||||
* number of tile rows. */
|
||||
if (IS_GEN2(dev))
|
||||
tile_height *= 2;
|
||||
|
||||
/* Size needs to be aligned to a full tile row */
|
||||
if (size & (tile_height * stride - 1))
|
||||
return false;
|
||||
|
||||
/* 965+ just needs multiples of tile width */
|
||||
if (INTEL_INFO(dev)->gen >= 4) {
|
||||
if (stride & (tile_width - 1))
|
||||
|
|
|
@ -316,6 +316,8 @@ static void i915_hotplug_work_func(struct work_struct *work)
|
|||
struct drm_mode_config *mode_config = &dev->mode_config;
|
||||
struct intel_encoder *encoder;
|
||||
|
||||
DRM_DEBUG_KMS("running encoder hotplug functions\n");
|
||||
|
||||
list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
|
||||
if (encoder->hot_plug)
|
||||
encoder->hot_plug(encoder);
|
||||
|
@ -1649,9 +1651,7 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
|
|||
} else {
|
||||
hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG |
|
||||
SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG;
|
||||
hotplug_mask |= SDE_AUX_MASK | SDE_FDI_MASK | SDE_TRANS_MASK;
|
||||
I915_WRITE(FDI_RXA_IMR, 0);
|
||||
I915_WRITE(FDI_RXB_IMR, 0);
|
||||
hotplug_mask |= SDE_AUX_MASK;
|
||||
}
|
||||
|
||||
dev_priv->pch_irq_mask = ~hotplug_mask;
|
||||
|
|
|
@ -1630,19 +1630,19 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
|
|||
struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj;
|
||||
|
||||
wait_event(dev_priv->pending_flip_queue,
|
||||
atomic_read(&dev_priv->mm.wedged) ||
|
||||
atomic_read(&obj->pending_flip) == 0);
|
||||
|
||||
/* Big Hammer, we also need to ensure that any pending
|
||||
* MI_WAIT_FOR_EVENT inside a user batch buffer on the
|
||||
* current scanout is retired before unpinning the old
|
||||
* framebuffer.
|
||||
*
|
||||
* This should only fail upon a hung GPU, in which case we
|
||||
* can safely continue.
|
||||
*/
|
||||
ret = i915_gem_object_flush_gpu(obj, false);
|
||||
if (ret) {
|
||||
i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
return ret;
|
||||
}
|
||||
(void) ret;
|
||||
}
|
||||
|
||||
ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y,
|
||||
|
@ -2045,6 +2045,31 @@ static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
|
|||
atomic_read(&obj->pending_flip) == 0);
|
||||
}
|
||||
|
||||
static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
|
||||
{
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct drm_mode_config *mode_config = &dev->mode_config;
|
||||
struct intel_encoder *encoder;
|
||||
|
||||
/*
|
||||
* If there's a non-PCH eDP on this crtc, it must be DP_A, and that
|
||||
* must be driven by its own crtc; no sharing is possible.
|
||||
*/
|
||||
list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
|
||||
if (encoder->base.crtc != crtc)
|
||||
continue;
|
||||
|
||||
switch (encoder->type) {
|
||||
case INTEL_OUTPUT_EDP:
|
||||
if (!intel_encoder_is_pch_edp(&encoder->base))
|
||||
return false;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static void ironlake_crtc_enable(struct drm_crtc *crtc)
|
||||
{
|
||||
struct drm_device *dev = crtc->dev;
|
||||
|
@ -2053,6 +2078,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
|
|||
int pipe = intel_crtc->pipe;
|
||||
int plane = intel_crtc->plane;
|
||||
u32 reg, temp;
|
||||
bool is_pch_port = false;
|
||||
|
||||
if (intel_crtc->active)
|
||||
return;
|
||||
|
@ -2066,7 +2092,56 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
|
|||
I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN);
|
||||
}
|
||||
|
||||
ironlake_fdi_enable(crtc);
|
||||
is_pch_port = intel_crtc_driving_pch(crtc);
|
||||
|
||||
if (is_pch_port)
|
||||
ironlake_fdi_enable(crtc);
|
||||
else {
|
||||
/* disable CPU FDI tx and PCH FDI rx */
|
||||
reg = FDI_TX_CTL(pipe);
|
||||
temp = I915_READ(reg);
|
||||
I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
|
||||
POSTING_READ(reg);
|
||||
|
||||
reg = FDI_RX_CTL(pipe);
|
||||
temp = I915_READ(reg);
|
||||
temp &= ~(0x7 << 16);
|
||||
temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
|
||||
I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
|
||||
|
||||
POSTING_READ(reg);
|
||||
udelay(100);
|
||||
|
||||
/* Ironlake workaround, disable clock pointer after downing FDI */
|
||||
if (HAS_PCH_IBX(dev))
|
||||
I915_WRITE(FDI_RX_CHICKEN(pipe),
|
||||
I915_READ(FDI_RX_CHICKEN(pipe) &
|
||||
~FDI_RX_PHASE_SYNC_POINTER_ENABLE));
|
||||
|
||||
/* still set train pattern 1 */
|
||||
reg = FDI_TX_CTL(pipe);
|
||||
temp = I915_READ(reg);
|
||||
temp &= ~FDI_LINK_TRAIN_NONE;
|
||||
temp |= FDI_LINK_TRAIN_PATTERN_1;
|
||||
I915_WRITE(reg, temp);
|
||||
|
||||
reg = FDI_RX_CTL(pipe);
|
||||
temp = I915_READ(reg);
|
||||
if (HAS_PCH_CPT(dev)) {
|
||||
temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
|
||||
temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
|
||||
} else {
|
||||
temp &= ~FDI_LINK_TRAIN_NONE;
|
||||
temp |= FDI_LINK_TRAIN_PATTERN_1;
|
||||
}
|
||||
/* BPC in FDI rx is consistent with that in PIPECONF */
|
||||
temp &= ~(0x07 << 16);
|
||||
temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
|
||||
I915_WRITE(reg, temp);
|
||||
|
||||
POSTING_READ(reg);
|
||||
udelay(100);
|
||||
}
|
||||
|
||||
/* Enable panel fitting for LVDS */
|
||||
if (dev_priv->pch_pf_size &&
|
||||
|
@ -2100,6 +2175,10 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
|
|||
intel_flush_display_plane(dev, plane);
|
||||
}
|
||||
|
||||
/* Skip the PCH stuff if possible */
|
||||
if (!is_pch_port)
|
||||
goto done;
|
||||
|
||||
/* For PCH output, training FDI link */
|
||||
if (IS_GEN6(dev))
|
||||
gen6_fdi_link_train(crtc);
|
||||
|
@ -2184,7 +2263,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
|
|||
I915_WRITE(reg, temp | TRANS_ENABLE);
|
||||
if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100))
|
||||
DRM_ERROR("failed to enable transcoder %d\n", pipe);
|
||||
|
||||
done:
|
||||
intel_crtc_load_lut(crtc);
|
||||
intel_update_fbc(dev);
|
||||
intel_crtc_update_cursor(crtc, true);
|
||||
|
@ -6496,7 +6575,7 @@ static void ironlake_disable_rc6(struct drm_device *dev)
|
|||
POSTING_READ(RSTDBYCTL);
|
||||
}
|
||||
|
||||
ironlake_disable_rc6(dev);
|
||||
ironlake_teardown_rc6(dev);
|
||||
}
|
||||
|
||||
static int ironlake_setup_rc6(struct drm_device *dev)
|
||||
|
|
|
@ -49,7 +49,10 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
|
|||
DRM_ERROR("bo %p still attached to GEM object\n", bo);
|
||||
|
||||
nv10_mem_put_tile_region(dev, nvbo->tile, NULL);
|
||||
nouveau_vm_put(&nvbo->vma);
|
||||
if (nvbo->vma.node) {
|
||||
nouveau_vm_unmap(&nvbo->vma);
|
||||
nouveau_vm_put(&nvbo->vma);
|
||||
}
|
||||
kfree(nvbo);
|
||||
}
|
||||
|
||||
|
|
|
@ -3490,7 +3490,7 @@ void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track
|
|||
track->num_texture = 16;
|
||||
track->maxy = 4096;
|
||||
track->separate_cube = 0;
|
||||
track->aaresolve = true;
|
||||
track->aaresolve = false;
|
||||
track->aa.robj = NULL;
|
||||
}
|
||||
|
||||
|
@ -3801,8 +3801,6 @@ static int r100_startup(struct radeon_device *rdev)
|
|||
r100_mc_program(rdev);
|
||||
/* Resume clock */
|
||||
r100_clock_startup(rdev);
|
||||
/* Initialize GPU configuration (# pipes, ...) */
|
||||
// r100_gpu_init(rdev);
|
||||
/* Initialize GART (initialize after TTM so we can allocate
|
||||
* memory through TTM but finalize after TTM) */
|
||||
r100_enable_bm(rdev);
|
||||
|
|
|
@ -971,7 +971,7 @@ void radeon_compute_pll_legacy(struct radeon_pll *pll,
|
|||
max_fractional_feed_div = pll->max_frac_feedback_div;
|
||||
}
|
||||
|
||||
for (post_div = min_post_div; post_div <= max_post_div; ++post_div) {
|
||||
for (post_div = max_post_div; post_div >= min_post_div; --post_div) {
|
||||
uint32_t ref_div;
|
||||
|
||||
if ((pll->flags & RADEON_PLL_NO_ODD_POST_DIV) && (post_div & 1))
|
||||
|
|
|
@ -113,11 +113,14 @@ static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev,
|
|||
u32 tiling_flags = 0;
|
||||
int ret;
|
||||
int aligned_size, size;
|
||||
int height = mode_cmd->height;
|
||||
|
||||
/* need to align pitch with crtc limits */
|
||||
mode_cmd->pitch = radeon_align_pitch(rdev, mode_cmd->width, mode_cmd->bpp, fb_tiled) * ((mode_cmd->bpp + 1) / 8);
|
||||
|
||||
size = mode_cmd->pitch * mode_cmd->height;
|
||||
if (rdev->family >= CHIP_R600)
|
||||
height = ALIGN(mode_cmd->height, 8);
|
||||
size = mode_cmd->pitch * height;
|
||||
aligned_size = ALIGN(size, PAGE_SIZE);
|
||||
ret = radeon_gem_object_create(rdev, aligned_size, 0,
|
||||
RADEON_GEM_DOMAIN_VRAM,
|
||||
|
|
|
@ -242,6 +242,7 @@ static const struct i2c_device_id ad7414_id[] = {
|
|||
{ "ad7414", 0 },
|
||||
{}
|
||||
};
|
||||
MODULE_DEVICE_TABLE(i2c, ad7414_id);
|
||||
|
||||
static struct i2c_driver ad7414_driver = {
|
||||
.driver = {
|
||||
|
|
|
@ -334,6 +334,7 @@ static const struct i2c_device_id adt7411_id[] = {
|
|||
{ "adt7411", 0 },
|
||||
{ }
|
||||
};
|
||||
MODULE_DEVICE_TABLE(i2c, adt7411_id);
|
||||
|
||||
static struct i2c_driver adt7411_driver = {
|
||||
.driver = {
|
||||
|
|
|
@ -847,11 +847,15 @@ omap_i2c_isr(int this_irq, void *dev_id)
|
|||
dev_err(dev->dev, "Arbitration lost\n");
|
||||
err |= OMAP_I2C_STAT_AL;
|
||||
}
|
||||
/*
|
||||
* ProDB0017052: Clear ARDY bit twice
|
||||
*/
|
||||
if (stat & (OMAP_I2C_STAT_ARDY | OMAP_I2C_STAT_NACK |
|
||||
OMAP_I2C_STAT_AL)) {
|
||||
omap_i2c_ack_stat(dev, stat &
|
||||
(OMAP_I2C_STAT_RRDY | OMAP_I2C_STAT_RDR |
|
||||
OMAP_I2C_STAT_XRDY | OMAP_I2C_STAT_XDR));
|
||||
OMAP_I2C_STAT_XRDY | OMAP_I2C_STAT_XDR |
|
||||
OMAP_I2C_STAT_ARDY));
|
||||
omap_i2c_complete_cmd(dev, err);
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
@ -1137,12 +1141,41 @@ omap_i2c_remove(struct platform_device *pdev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SUSPEND
|
||||
static int omap_i2c_suspend(struct device *dev)
|
||||
{
|
||||
if (!pm_runtime_suspended(dev))
|
||||
if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_suspend)
|
||||
dev->bus->pm->runtime_suspend(dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int omap_i2c_resume(struct device *dev)
|
||||
{
|
||||
if (!pm_runtime_suspended(dev))
|
||||
if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_resume)
|
||||
dev->bus->pm->runtime_resume(dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct dev_pm_ops omap_i2c_pm_ops = {
|
||||
.suspend = omap_i2c_suspend,
|
||||
.resume = omap_i2c_resume,
|
||||
};
|
||||
#define OMAP_I2C_PM_OPS (&omap_i2c_pm_ops)
|
||||
#else
|
||||
#define OMAP_I2C_PM_OPS NULL
|
||||
#endif
|
||||
|
||||
static struct platform_driver omap_i2c_driver = {
|
||||
.probe = omap_i2c_probe,
|
||||
.remove = omap_i2c_remove,
|
||||
.driver = {
|
||||
.name = "omap_i2c",
|
||||
.owner = THIS_MODULE,
|
||||
.pm = OMAP_I2C_PM_OPS,
|
||||
},
|
||||
};
|
||||
|
||||
|
|
|
@ -942,7 +942,7 @@ stu300_probe(struct platform_device *pdev)
|
|||
adap->owner = THIS_MODULE;
|
||||
/* DDC class but actually often used for more generic I2C */
|
||||
adap->class = I2C_CLASS_DDC;
|
||||
strncpy(adap->name, "ST Microelectronics DDC I2C adapter",
|
||||
strlcpy(adap->name, "ST Microelectronics DDC I2C adapter",
|
||||
sizeof(adap->name));
|
||||
adap->nr = bus_nr;
|
||||
adap->algo = &stu300_algo;
|
||||
|
|
|
@ -62,6 +62,7 @@
|
|||
#include <linux/notifier.h>
|
||||
#include <linux/cpu.h>
|
||||
#include <asm/mwait.h>
|
||||
#include <asm/msr.h>
|
||||
|
||||
#define INTEL_IDLE_VERSION "0.4"
|
||||
#define PREFIX "intel_idle: "
|
||||
|
@ -84,6 +85,12 @@ static int intel_idle(struct cpuidle_device *dev, struct cpuidle_state *state);
|
|||
|
||||
static struct cpuidle_state *cpuidle_state_table;
|
||||
|
||||
/*
|
||||
* Hardware C-state auto-demotion may not always be optimal.
|
||||
* Indicate which enable bits to clear here.
|
||||
*/
|
||||
static unsigned long long auto_demotion_disable_flags;
|
||||
|
||||
/*
|
||||
* Set this flag for states where the HW flushes the TLB for us
|
||||
* and so we don't need cross-calls to keep it consistent.
|
||||
|
@ -281,6 +288,15 @@ static struct notifier_block setup_broadcast_notifier = {
|
|||
.notifier_call = setup_broadcast_cpuhp_notify,
|
||||
};
|
||||
|
||||
static void auto_demotion_disable(void *dummy)
|
||||
{
|
||||
unsigned long long msr_bits;
|
||||
|
||||
rdmsrl(MSR_NHM_SNB_PKG_CST_CFG_CTL, msr_bits);
|
||||
msr_bits &= ~auto_demotion_disable_flags;
|
||||
wrmsrl(MSR_NHM_SNB_PKG_CST_CFG_CTL, msr_bits);
|
||||
}
|
||||
|
||||
/*
|
||||
* intel_idle_probe()
|
||||
*/
|
||||
|
@ -324,11 +340,17 @@ static int intel_idle_probe(void)
|
|||
case 0x25: /* Westmere */
|
||||
case 0x2C: /* Westmere */
|
||||
cpuidle_state_table = nehalem_cstates;
|
||||
auto_demotion_disable_flags =
|
||||
(NHM_C1_AUTO_DEMOTE | NHM_C3_AUTO_DEMOTE);
|
||||
break;
|
||||
|
||||
case 0x1C: /* 28 - Atom Processor */
|
||||
cpuidle_state_table = atom_cstates;
|
||||
break;
|
||||
|
||||
case 0x26: /* 38 - Lincroft Atom Processor */
|
||||
cpuidle_state_table = atom_cstates;
|
||||
auto_demotion_disable_flags = ATM_LNC_C6_AUTO_DEMOTE;
|
||||
break;
|
||||
|
||||
case 0x2A: /* SNB */
|
||||
|
@ -436,6 +458,8 @@ static int intel_idle_cpuidle_devices_init(void)
|
|||
return -EIO;
|
||||
}
|
||||
}
|
||||
if (auto_demotion_disable_flags)
|
||||
smp_call_function(auto_demotion_disable, NULL, 1);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -360,7 +360,7 @@ static int gameport_queue_event(void *object, struct module *owner,
|
|||
event->owner = owner;
|
||||
|
||||
list_add_tail(&event->node, &gameport_event_list);
|
||||
schedule_work(&gameport_event_work);
|
||||
queue_work(system_long_wq, &gameport_event_work);
|
||||
|
||||
out:
|
||||
spin_unlock_irqrestore(&gameport_event_lock, flags);
|
||||
|
|
|
@ -71,8 +71,9 @@ struct tegra_kbc {
|
|||
spinlock_t lock;
|
||||
unsigned int repoll_dly;
|
||||
unsigned long cp_dly_jiffies;
|
||||
bool use_fn_map;
|
||||
const struct tegra_kbc_platform_data *pdata;
|
||||
unsigned short keycode[KBC_MAX_KEY];
|
||||
unsigned short keycode[KBC_MAX_KEY * 2];
|
||||
unsigned short current_keys[KBC_MAX_KPENT];
|
||||
unsigned int num_pressed_keys;
|
||||
struct timer_list timer;
|
||||
|
@ -178,6 +179,40 @@ static const u32 tegra_kbc_default_keymap[] = {
|
|||
KEY(15, 5, KEY_F2),
|
||||
KEY(15, 6, KEY_CAPSLOCK),
|
||||
KEY(15, 7, KEY_F6),
|
||||
|
||||
/* Software Handled Function Keys */
|
||||
KEY(20, 0, KEY_KP7),
|
||||
|
||||
KEY(21, 0, KEY_KP9),
|
||||
KEY(21, 1, KEY_KP8),
|
||||
KEY(21, 2, KEY_KP4),
|
||||
KEY(21, 4, KEY_KP1),
|
||||
|
||||
KEY(22, 1, KEY_KPSLASH),
|
||||
KEY(22, 2, KEY_KP6),
|
||||
KEY(22, 3, KEY_KP5),
|
||||
KEY(22, 4, KEY_KP3),
|
||||
KEY(22, 5, KEY_KP2),
|
||||
KEY(22, 7, KEY_KP0),
|
||||
|
||||
KEY(27, 1, KEY_KPASTERISK),
|
||||
KEY(27, 3, KEY_KPMINUS),
|
||||
KEY(27, 4, KEY_KPPLUS),
|
||||
KEY(27, 5, KEY_KPDOT),
|
||||
|
||||
KEY(28, 5, KEY_VOLUMEUP),
|
||||
|
||||
KEY(29, 3, KEY_HOME),
|
||||
KEY(29, 4, KEY_END),
|
||||
KEY(29, 5, KEY_BRIGHTNESSDOWN),
|
||||
KEY(29, 6, KEY_VOLUMEDOWN),
|
||||
KEY(29, 7, KEY_BRIGHTNESSUP),
|
||||
|
||||
KEY(30, 0, KEY_NUMLOCK),
|
||||
KEY(30, 1, KEY_SCROLLLOCK),
|
||||
KEY(30, 2, KEY_MUTE),
|
||||
|
||||
KEY(31, 4, KEY_HELP),
|
||||
};
|
||||
|
||||
static const struct matrix_keymap_data tegra_kbc_default_keymap_data = {
|
||||
|
@ -224,6 +259,7 @@ static void tegra_kbc_report_keys(struct tegra_kbc *kbc)
|
|||
unsigned int i;
|
||||
unsigned int num_down = 0;
|
||||
unsigned long flags;
|
||||
bool fn_keypress = false;
|
||||
|
||||
spin_lock_irqsave(&kbc->lock, flags);
|
||||
for (i = 0; i < KBC_MAX_KPENT; i++) {
|
||||
|
@ -237,11 +273,28 @@ static void tegra_kbc_report_keys(struct tegra_kbc *kbc)
|
|||
MATRIX_SCAN_CODE(row, col, KBC_ROW_SHIFT);
|
||||
|
||||
scancodes[num_down] = scancode;
|
||||
keycodes[num_down++] = kbc->keycode[scancode];
|
||||
keycodes[num_down] = kbc->keycode[scancode];
|
||||
/* If driver uses Fn map, do not report the Fn key. */
|
||||
if ((keycodes[num_down] == KEY_FN) && kbc->use_fn_map)
|
||||
fn_keypress = true;
|
||||
else
|
||||
num_down++;
|
||||
}
|
||||
|
||||
val >>= 8;
|
||||
}
|
||||
|
||||
/*
|
||||
* If the platform uses Fn keymaps, translate keys on a Fn keypress.
|
||||
* Function keycodes are KBC_MAX_KEY apart from the plain keycodes.
|
||||
*/
|
||||
if (fn_keypress) {
|
||||
for (i = 0; i < num_down; i++) {
|
||||
scancodes[i] += KBC_MAX_KEY;
|
||||
keycodes[i] = kbc->keycode[scancodes[i]];
|
||||
}
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&kbc->lock, flags);
|
||||
|
||||
tegra_kbc_report_released_keys(kbc->idev,
|
||||
|
@ -594,8 +647,11 @@ static int __devinit tegra_kbc_probe(struct platform_device *pdev)
|
|||
|
||||
input_dev->keycode = kbc->keycode;
|
||||
input_dev->keycodesize = sizeof(kbc->keycode[0]);
|
||||
input_dev->keycodemax = ARRAY_SIZE(kbc->keycode);
|
||||
input_dev->keycodemax = KBC_MAX_KEY;
|
||||
if (pdata->use_fn_map)
|
||||
input_dev->keycodemax *= 2;
|
||||
|
||||
kbc->use_fn_map = pdata->use_fn_map;
|
||||
keymap_data = pdata->keymap_data ?: &tegra_kbc_default_keymap_data;
|
||||
matrix_keypad_build_keymap(keymap_data, KBC_ROW_SHIFT,
|
||||
input_dev->keycode, input_dev->keybit);
|
||||
|
|
|
@ -51,6 +51,29 @@
|
|||
#define SYN_EXT_CAP_REQUESTS(c) (((c) & 0x700000) >> 20)
|
||||
#define SYN_CAP_MULTI_BUTTON_NO(ec) (((ec) & 0x00f000) >> 12)
|
||||
#define SYN_CAP_PRODUCT_ID(ec) (((ec) & 0xff0000) >> 16)
|
||||
|
||||
/*
|
||||
* The following describes response for the 0x0c query.
|
||||
*
|
||||
* byte mask name meaning
|
||||
* ---- ---- ------- ------------
|
||||
* 1 0x01 adjustable threshold capacitive button sensitivity
|
||||
* can be adjusted
|
||||
* 1 0x02 report max query 0x0d gives max coord reported
|
||||
* 1 0x04 clearpad sensor is ClearPad product
|
||||
* 1 0x08 advanced gesture not particularly meaningful
|
||||
* 1 0x10 clickpad bit 0 1-button ClickPad
|
||||
* 1 0x60 multifinger mode identifies firmware finger counting
|
||||
* (not reporting!) algorithm.
|
||||
* Not particularly meaningful
|
||||
* 1 0x80 covered pad W clipped to 14, 15 == pad mostly covered
|
||||
* 2 0x01 clickpad bit 1 2-button ClickPad
|
||||
* 2 0x02 deluxe LED controls touchpad support LED commands
|
||||
* ala multimedia control bar
|
||||
* 2 0x04 reduced filtering firmware does less filtering on
|
||||
* position data, driver should watch
|
||||
* for noise.
|
||||
*/
|
||||
#define SYN_CAP_CLICKPAD(ex0c) ((ex0c) & 0x100000) /* 1-button ClickPad */
|
||||
#define SYN_CAP_CLICKPAD2BTN(ex0c) ((ex0c) & 0x000100) /* 2-button ClickPad */
|
||||
#define SYN_CAP_MAX_DIMENSIONS(ex0c) ((ex0c) & 0x020000)
|
||||
|
|
|
@ -299,7 +299,7 @@ static int serio_queue_event(void *object, struct module *owner,
|
|||
event->owner = owner;
|
||||
|
||||
list_add_tail(&event->node, &serio_event_list);
|
||||
schedule_work(&serio_event_work);
|
||||
queue_work(system_long_wq, &serio_event_work);
|
||||
|
||||
out:
|
||||
spin_unlock_irqrestore(&serio_event_lock, flags);
|
||||
|
|
|
@ -62,7 +62,7 @@ void diva_xdi_provide_istream_info (ADAPTER* a,
|
|||
stream interface.
|
||||
If synchronous service was requested, then function
|
||||
does return amount of data written to stream.
|
||||
'final' does indicate that pice of data to be written is
|
||||
'final' does indicate that piece of data to be written is
|
||||
final part of frame (necessary only by structured datatransfer)
|
||||
return 0 if zero lengh packet was written
|
||||
return -1 if stream is full
|
||||
|
|
|
@ -216,7 +216,6 @@ static int linear_run (mddev_t *mddev)
|
|||
|
||||
if (md_check_no_bitmap(mddev))
|
||||
return -EINVAL;
|
||||
mddev->queue->queue_lock = &mddev->queue->__queue_lock;
|
||||
conf = linear_conf(mddev, mddev->raid_disks);
|
||||
|
||||
if (!conf)
|
||||
|
|
|
@ -553,6 +553,9 @@ static mddev_t * mddev_find(dev_t unit)
|
|||
{
|
||||
mddev_t *mddev, *new = NULL;
|
||||
|
||||
if (unit && MAJOR(unit) != MD_MAJOR)
|
||||
unit &= ~((1<<MdpMinorShift)-1);
|
||||
|
||||
retry:
|
||||
spin_lock(&all_mddevs_lock);
|
||||
|
||||
|
@ -4138,10 +4141,10 @@ array_size_store(mddev_t *mddev, const char *buf, size_t len)
|
|||
}
|
||||
|
||||
mddev->array_sectors = sectors;
|
||||
set_capacity(mddev->gendisk, mddev->array_sectors);
|
||||
if (mddev->pers)
|
||||
if (mddev->pers) {
|
||||
set_capacity(mddev->gendisk, mddev->array_sectors);
|
||||
revalidate_disk(mddev->gendisk);
|
||||
|
||||
}
|
||||
return len;
|
||||
}
|
||||
|
||||
|
@ -4624,6 +4627,7 @@ static int do_md_run(mddev_t *mddev)
|
|||
}
|
||||
set_capacity(mddev->gendisk, mddev->array_sectors);
|
||||
revalidate_disk(mddev->gendisk);
|
||||
mddev->changed = 1;
|
||||
kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
|
||||
out:
|
||||
return err;
|
||||
|
@ -4712,6 +4716,7 @@ static void md_clean(mddev_t *mddev)
|
|||
mddev->sync_speed_min = mddev->sync_speed_max = 0;
|
||||
mddev->recovery = 0;
|
||||
mddev->in_sync = 0;
|
||||
mddev->changed = 0;
|
||||
mddev->degraded = 0;
|
||||
mddev->safemode = 0;
|
||||
mddev->bitmap_info.offset = 0;
|
||||
|
@ -4827,6 +4832,7 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open)
|
|||
|
||||
set_capacity(disk, 0);
|
||||
mutex_unlock(&mddev->open_mutex);
|
||||
mddev->changed = 1;
|
||||
revalidate_disk(disk);
|
||||
|
||||
if (mddev->ro)
|
||||
|
@ -6011,7 +6017,7 @@ static int md_open(struct block_device *bdev, fmode_t mode)
|
|||
atomic_inc(&mddev->openers);
|
||||
mutex_unlock(&mddev->open_mutex);
|
||||
|
||||
check_disk_size_change(mddev->gendisk, bdev);
|
||||
check_disk_change(bdev);
|
||||
out:
|
||||
return err;
|
||||
}
|
||||
|
@ -6026,6 +6032,21 @@ static int md_release(struct gendisk *disk, fmode_t mode)
|
|||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int md_media_changed(struct gendisk *disk)
|
||||
{
|
||||
mddev_t *mddev = disk->private_data;
|
||||
|
||||
return mddev->changed;
|
||||
}
|
||||
|
||||
static int md_revalidate(struct gendisk *disk)
|
||||
{
|
||||
mddev_t *mddev = disk->private_data;
|
||||
|
||||
mddev->changed = 0;
|
||||
return 0;
|
||||
}
|
||||
static const struct block_device_operations md_fops =
|
||||
{
|
||||
.owner = THIS_MODULE,
|
||||
|
@ -6036,6 +6057,8 @@ static const struct block_device_operations md_fops =
|
|||
.compat_ioctl = md_compat_ioctl,
|
||||
#endif
|
||||
.getgeo = md_getgeo,
|
||||
.media_changed = md_media_changed,
|
||||
.revalidate_disk= md_revalidate,
|
||||
};
|
||||
|
||||
static int md_thread(void * arg)
|
||||
|
|
|
@ -274,6 +274,8 @@ struct mddev_s
|
|||
atomic_t active; /* general refcount */
|
||||
atomic_t openers; /* number of active opens */
|
||||
|
||||
int changed; /* True if we might need to
|
||||
* reread partition info */
|
||||
int degraded; /* whether md should consider
|
||||
* adding a spare
|
||||
*/
|
||||
|
|
|
@ -435,7 +435,6 @@ static int multipath_run (mddev_t *mddev)
|
|||
* bookkeeping area. [whatever we allocate in multipath_run(),
|
||||
* should be freed in multipath_stop()]
|
||||
*/
|
||||
mddev->queue->queue_lock = &mddev->queue->__queue_lock;
|
||||
|
||||
conf = kzalloc(sizeof(multipath_conf_t), GFP_KERNEL);
|
||||
mddev->private = conf;
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue