mirror of https://gitee.com/openkylin/linux.git
powerpc/irqdomain: Fix broken NR_IRQ references
The switch from using irq_map to irq_alloc_desc*() for managing irq number allocations introduced new bugs in some of the powerpc interrupt code. Several functions rely on the value of NR_IRQS to determine the maximum irq number that could get allocated. However, with sparse_irq and using irq_alloc_desc*() the maximum possible irq number is now specified with 'nr_irqs' which may be a number larger than NR_IRQS. This has caused breakage on powermac when CONFIG_NR_IRQS is set to 32. This patch removes most of the direct references to NR_IRQS in the powerpc code and replaces them with either a nr_irqs reference or by using the common for_each_irq_desc() macro. The powerpc-specific for_each_irq() macro is removed at the same time. Also, the Cell axon_msi driver is refactored to remove the global build assumption on the size of NR_IRQS and instead add a limit to the maximum irq number when calling irq_domain_add_nomap(). Signed-off-by: Grant Likely <grant.likely@secretlab.ca> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
This commit is contained in:
parent
8751ed14dc
commit
4013369f37
|
@ -18,10 +18,6 @@
|
||||||
#include <linux/atomic.h>
|
#include <linux/atomic.h>
|
||||||
|
|
||||||
|
|
||||||
/* Define a way to iterate across irqs. */
|
|
||||||
#define for_each_irq(i) \
|
|
||||||
for ((i) = 0; (i) < NR_IRQS; ++(i))
|
|
||||||
|
|
||||||
extern atomic_t ppc_n_lost_interrupts;
|
extern atomic_t ppc_n_lost_interrupts;
|
||||||
|
|
||||||
/* This number is used when no interrupt has been assigned */
|
/* This number is used when no interrupt has been assigned */
|
||||||
|
|
|
@ -330,14 +330,10 @@ void migrate_irqs(void)
|
||||||
|
|
||||||
alloc_cpumask_var(&mask, GFP_KERNEL);
|
alloc_cpumask_var(&mask, GFP_KERNEL);
|
||||||
|
|
||||||
for_each_irq(irq) {
|
for_each_irq_desc(irq, desc) {
|
||||||
struct irq_data *data;
|
struct irq_data *data;
|
||||||
struct irq_chip *chip;
|
struct irq_chip *chip;
|
||||||
|
|
||||||
desc = irq_to_desc(irq);
|
|
||||||
if (!desc)
|
|
||||||
continue;
|
|
||||||
|
|
||||||
data = irq_desc_get_irq_data(desc);
|
data = irq_desc_get_irq_data(desc);
|
||||||
if (irqd_is_per_cpu(data))
|
if (irqd_is_per_cpu(data))
|
||||||
continue;
|
continue;
|
||||||
|
|
|
@ -23,14 +23,11 @@
|
||||||
|
|
||||||
void machine_kexec_mask_interrupts(void) {
|
void machine_kexec_mask_interrupts(void) {
|
||||||
unsigned int i;
|
unsigned int i;
|
||||||
|
struct irq_desc *desc;
|
||||||
|
|
||||||
for_each_irq(i) {
|
for_each_irq_desc(i, desc) {
|
||||||
struct irq_desc *desc = irq_to_desc(i);
|
|
||||||
struct irq_chip *chip;
|
struct irq_chip *chip;
|
||||||
|
|
||||||
if (!desc)
|
|
||||||
continue;
|
|
||||||
|
|
||||||
chip = irq_desc_get_chip(desc);
|
chip = irq_desc_get_chip(desc);
|
||||||
if (!chip)
|
if (!chip)
|
||||||
continue;
|
continue;
|
||||||
|
|
|
@ -114,7 +114,7 @@ static void axon_msi_cascade(unsigned int irq, struct irq_desc *desc)
|
||||||
pr_devel("axon_msi: woff %x roff %x msi %x\n",
|
pr_devel("axon_msi: woff %x roff %x msi %x\n",
|
||||||
write_offset, msic->read_offset, msi);
|
write_offset, msic->read_offset, msi);
|
||||||
|
|
||||||
if (msi < NR_IRQS && irq_get_chip_data(msi) == msic) {
|
if (msi < nr_irqs && irq_get_chip_data(msi) == msic) {
|
||||||
generic_handle_irq(msi);
|
generic_handle_irq(msi);
|
||||||
msic->fifo_virt[idx] = cpu_to_le32(0xffffffff);
|
msic->fifo_virt[idx] = cpu_to_le32(0xffffffff);
|
||||||
} else {
|
} else {
|
||||||
|
@ -276,9 +276,6 @@ static int axon_msi_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
|
||||||
if (rc)
|
if (rc)
|
||||||
return rc;
|
return rc;
|
||||||
|
|
||||||
/* We rely on being able to stash a virq in a u16 */
|
|
||||||
BUILD_BUG_ON(NR_IRQS > 65536);
|
|
||||||
|
|
||||||
list_for_each_entry(entry, &dev->msi_list, list) {
|
list_for_each_entry(entry, &dev->msi_list, list) {
|
||||||
virq = irq_create_direct_mapping(msic->irq_domain);
|
virq = irq_create_direct_mapping(msic->irq_domain);
|
||||||
if (virq == NO_IRQ) {
|
if (virq == NO_IRQ) {
|
||||||
|
@ -392,7 +389,8 @@ static int axon_msi_probe(struct platform_device *device)
|
||||||
}
|
}
|
||||||
memset(msic->fifo_virt, 0xff, MSIC_FIFO_SIZE_BYTES);
|
memset(msic->fifo_virt, 0xff, MSIC_FIFO_SIZE_BYTES);
|
||||||
|
|
||||||
msic->irq_domain = irq_domain_add_nomap(dn, 0, &msic_host_ops, msic);
|
/* We rely on being able to stash a virq in a u16, so limit irqs to < 65536 */
|
||||||
|
msic->irq_domain = irq_domain_add_nomap(dn, 65536, &msic_host_ops, msic);
|
||||||
if (!msic->irq_domain) {
|
if (!msic->irq_domain) {
|
||||||
printk(KERN_ERR "axon_msi: couldn't allocate irq_domain for %s\n",
|
printk(KERN_ERR "axon_msi: couldn't allocate irq_domain for %s\n",
|
||||||
dn->full_name);
|
dn->full_name);
|
||||||
|
|
|
@ -248,6 +248,6 @@ void beatic_deinit_IRQ(void)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
for (i = 1; i < NR_IRQS; i++)
|
for (i = 1; i < nr_irqs; i++)
|
||||||
beat_destruct_irq_plug(i);
|
beat_destruct_irq_plug(i);
|
||||||
}
|
}
|
||||||
|
|
|
@ -57,9 +57,9 @@ static int max_real_irqs;
|
||||||
|
|
||||||
static DEFINE_RAW_SPINLOCK(pmac_pic_lock);
|
static DEFINE_RAW_SPINLOCK(pmac_pic_lock);
|
||||||
|
|
||||||
#define NR_MASK_WORDS ((NR_IRQS + 31) / 32)
|
/* The max irq number this driver deals with is 128; see max_irqs */
|
||||||
static unsigned long ppc_lost_interrupts[NR_MASK_WORDS];
|
static DECLARE_BITMAP(ppc_lost_interrupts, 128);
|
||||||
static unsigned long ppc_cached_irq_mask[NR_MASK_WORDS];
|
static DECLARE_BITMAP(ppc_cached_irq_mask, 128);
|
||||||
static int pmac_irq_cascade = -1;
|
static int pmac_irq_cascade = -1;
|
||||||
static struct irq_domain *pmac_pic_host;
|
static struct irq_domain *pmac_pic_host;
|
||||||
|
|
||||||
|
|
|
@ -51,8 +51,7 @@
|
||||||
static intctl_cpm2_t __iomem *cpm2_intctl;
|
static intctl_cpm2_t __iomem *cpm2_intctl;
|
||||||
|
|
||||||
static struct irq_domain *cpm2_pic_host;
|
static struct irq_domain *cpm2_pic_host;
|
||||||
#define NR_MASK_WORDS ((NR_IRQS + 31) / 32)
|
static unsigned long ppc_cached_irq_mask[2]; /* 2 32-bit registers */
|
||||||
static unsigned long ppc_cached_irq_mask[NR_MASK_WORDS];
|
|
||||||
|
|
||||||
static const u_char irq_to_siureg[] = {
|
static const u_char irq_to_siureg[] = {
|
||||||
1, 1, 1, 1, 1, 1, 1, 1,
|
1, 1, 1, 1, 1, 1, 1, 1,
|
||||||
|
|
|
@ -188,6 +188,7 @@ void xics_migrate_irqs_away(void)
|
||||||
{
|
{
|
||||||
int cpu = smp_processor_id(), hw_cpu = hard_smp_processor_id();
|
int cpu = smp_processor_id(), hw_cpu = hard_smp_processor_id();
|
||||||
unsigned int irq, virq;
|
unsigned int irq, virq;
|
||||||
|
struct irq_desc *desc;
|
||||||
|
|
||||||
/* If we used to be the default server, move to the new "boot_cpuid" */
|
/* If we used to be the default server, move to the new "boot_cpuid" */
|
||||||
if (hw_cpu == xics_default_server)
|
if (hw_cpu == xics_default_server)
|
||||||
|
@ -202,8 +203,7 @@ void xics_migrate_irqs_away(void)
|
||||||
/* Allow IPIs again... */
|
/* Allow IPIs again... */
|
||||||
icp_ops->set_priority(DEFAULT_PRIORITY);
|
icp_ops->set_priority(DEFAULT_PRIORITY);
|
||||||
|
|
||||||
for_each_irq(virq) {
|
for_each_irq_desc(virq, desc) {
|
||||||
struct irq_desc *desc;
|
|
||||||
struct irq_chip *chip;
|
struct irq_chip *chip;
|
||||||
long server;
|
long server;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
@ -212,9 +212,8 @@ void xics_migrate_irqs_away(void)
|
||||||
/* We can't set affinity on ISA interrupts */
|
/* We can't set affinity on ISA interrupts */
|
||||||
if (virq < NUM_ISA_INTERRUPTS)
|
if (virq < NUM_ISA_INTERRUPTS)
|
||||||
continue;
|
continue;
|
||||||
desc = irq_to_desc(virq);
|
|
||||||
/* We only need to migrate enabled IRQS */
|
/* We only need to migrate enabled IRQS */
|
||||||
if (!desc || !desc->action)
|
if (!desc->action)
|
||||||
continue;
|
continue;
|
||||||
if (desc->irq_data.domain != xics_host)
|
if (desc->irq_data.domain != xics_host)
|
||||||
continue;
|
continue;
|
||||||
|
|
Loading…
Reference in New Issue