Couple of cleanup patches
-----BEGIN PGP SIGNATURE----- iIoEABYIADIWIQQW3WBGcnu5yJnSXn0kTJLX0iGMLAUCXoI7uxQcdG9ueS5sdWNr QGludGVsLmNvbQAKCRAkTJLX0iGMLPQ6AP9sOGL/6nzqGKx7+GWP4LCPWqSrNBsh nvGoDhx86lOuDgEAryhKYNSPWCW9eLFd8YguSRKdSav8Ed0HtR2kbYiv8wM= =qgHx -----END PGP SIGNATURE----- Merge tag 'please-pull-ia64_for_5.7' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux Pull ia64 updates from Tony Luck: "Couple of cleanup patches" * tag 'please-pull-ia64_for_5.7' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux: tty/serial: cleanup after ioc*_serial driver removal ia64: replace setup_irq() by request_irq()
This commit is contained in:
commit
cad18da0af
|
@ -7978,6 +7978,7 @@ L: linux-ia64@vger.kernel.org
|
|||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux.git
|
||||
S: Maintained
|
||||
F: arch/ia64/
|
||||
F: Documentation/ia64/
|
||||
|
||||
IBM Power 842 compression accelerator
|
||||
M: Haren Myneni <haren@us.ibm.com>
|
||||
|
@ -15095,14 +15096,6 @@ M: Dimitri Sivanich <sivanich@sgi.com>
|
|||
S: Maintained
|
||||
F: drivers/misc/sgi-gru/
|
||||
|
||||
SGI SN-IA64 (Altix) SERIAL CONSOLE DRIVER
|
||||
M: Pat Gefre <pfg@sgi.com>
|
||||
L: linux-ia64@vger.kernel.org
|
||||
S: Supported
|
||||
F: Documentation/ia64/serial.rst
|
||||
F: drivers/tty/serial/ioc?_serial.c
|
||||
F: include/linux/ioc?.h
|
||||
|
||||
SGI XP/XPC/XPNET DRIVER
|
||||
M: Cliff Whickman <cpw@sgi.com>
|
||||
M: Robin Holt <robinmholt@gmail.com>
|
||||
|
|
|
@ -113,7 +113,6 @@ extern struct irq_chip irq_type_ia64_lsapic; /* CPU-internal interrupt controlle
|
|||
#define ia64_register_ipi ia64_native_register_ipi
|
||||
#define assign_irq_vector ia64_native_assign_irq_vector
|
||||
#define free_irq_vector ia64_native_free_irq_vector
|
||||
#define register_percpu_irq ia64_native_register_percpu_irq
|
||||
#define ia64_resend_irq ia64_native_resend_irq
|
||||
|
||||
extern void ia64_native_register_ipi(void);
|
||||
|
@ -123,7 +122,6 @@ extern void ia64_native_free_irq_vector (int vector);
|
|||
extern int reserve_irq_vector (int vector);
|
||||
extern void __setup_vector_irq(int cpu);
|
||||
extern void ia64_send_ipi (int cpu, int vector, int delivery_mode, int redirect);
|
||||
extern void ia64_native_register_percpu_irq (ia64_vector vec, struct irqaction *action);
|
||||
extern void destroy_and_reserve_irq (unsigned int irq);
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
|
|
|
@ -0,0 +1,3 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
extern void register_percpu_irq(ia64_vector vec, irq_handler_t handler,
|
||||
unsigned long flags, const char *name);
|
|
@ -351,11 +351,6 @@ static irqreturn_t smp_irq_move_cleanup_interrupt(int irq, void *dev_id)
|
|||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static struct irqaction irq_move_irqaction = {
|
||||
.handler = smp_irq_move_cleanup_interrupt,
|
||||
.name = "irq_move"
|
||||
};
|
||||
|
||||
static int __init parse_vector_domain(char *arg)
|
||||
{
|
||||
if (!arg)
|
||||
|
@ -586,28 +581,15 @@ static irqreturn_t dummy_handler (int irq, void *dev_id)
|
|||
return IRQ_NONE;
|
||||
}
|
||||
|
||||
static struct irqaction ipi_irqaction = {
|
||||
.handler = handle_IPI,
|
||||
.name = "IPI"
|
||||
};
|
||||
|
||||
/*
|
||||
* KVM uses this interrupt to force a cpu out of guest mode
|
||||
*/
|
||||
static struct irqaction resched_irqaction = {
|
||||
.handler = dummy_handler,
|
||||
.name = "resched"
|
||||
};
|
||||
|
||||
static struct irqaction tlb_irqaction = {
|
||||
.handler = dummy_handler,
|
||||
.name = "tlb_flush"
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
void
|
||||
ia64_native_register_percpu_irq (ia64_vector vec, struct irqaction *action)
|
||||
register_percpu_irq(ia64_vector vec, irq_handler_t handler, unsigned long flags,
|
||||
const char *name)
|
||||
{
|
||||
unsigned int irq;
|
||||
|
||||
|
@ -615,8 +597,9 @@ ia64_native_register_percpu_irq (ia64_vector vec, struct irqaction *action)
|
|||
BUG_ON(bind_irq_vector(irq, vec, CPU_MASK_ALL));
|
||||
irq_set_status_flags(irq, IRQ_PER_CPU);
|
||||
irq_set_chip(irq, &irq_type_ia64_lsapic);
|
||||
if (action)
|
||||
setup_irq(irq, action);
|
||||
if (handler)
|
||||
if (request_irq(irq, handler, flags, name, NULL))
|
||||
pr_err("Failed to request irq %u (%s)\n", irq, name);
|
||||
irq_set_handler(irq, handle_percpu_irq);
|
||||
}
|
||||
|
||||
|
@ -624,9 +607,10 @@ void __init
|
|||
ia64_native_register_ipi(void)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
register_percpu_irq(IA64_IPI_VECTOR, &ipi_irqaction);
|
||||
register_percpu_irq(IA64_IPI_RESCHEDULE, &resched_irqaction);
|
||||
register_percpu_irq(IA64_IPI_LOCAL_TLB_FLUSH, &tlb_irqaction);
|
||||
register_percpu_irq(IA64_IPI_VECTOR, handle_IPI, 0, "IPI");
|
||||
register_percpu_irq(IA64_IPI_RESCHEDULE, dummy_handler, 0, "resched");
|
||||
register_percpu_irq(IA64_IPI_LOCAL_TLB_FLUSH, dummy_handler, 0,
|
||||
"tlb_flush");
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -635,10 +619,13 @@ init_IRQ (void)
|
|||
{
|
||||
acpi_boot_init();
|
||||
ia64_register_ipi();
|
||||
register_percpu_irq(IA64_SPURIOUS_INT_VECTOR, NULL);
|
||||
register_percpu_irq(IA64_SPURIOUS_INT_VECTOR, NULL, 0, NULL);
|
||||
#ifdef CONFIG_SMP
|
||||
if (vector_domain_type != VECTOR_DOMAIN_NONE)
|
||||
register_percpu_irq(IA64_IRQ_MOVE_VECTOR, &irq_move_irqaction);
|
||||
if (vector_domain_type != VECTOR_DOMAIN_NONE) {
|
||||
register_percpu_irq(IA64_IRQ_MOVE_VECTOR,
|
||||
smp_irq_move_cleanup_interrupt, 0,
|
||||
"irq_move");
|
||||
}
|
||||
#endif
|
||||
#ifdef CONFIG_PERFMON
|
||||
pfm_init_percpu();
|
||||
|
|
|
@ -104,6 +104,7 @@
|
|||
|
||||
#include "mca_drv.h"
|
||||
#include "entry.h"
|
||||
#include "irq.h"
|
||||
|
||||
#if defined(IA64_MCA_DEBUG_INFO)
|
||||
# define IA64_MCA_DEBUG(fmt...) printk(fmt)
|
||||
|
@ -1766,36 +1767,6 @@ ia64_mca_disable_cpe_polling(char *str)
|
|||
|
||||
__setup("disable_cpe_poll", ia64_mca_disable_cpe_polling);
|
||||
|
||||
static struct irqaction cmci_irqaction = {
|
||||
.handler = ia64_mca_cmc_int_handler,
|
||||
.name = "cmc_hndlr"
|
||||
};
|
||||
|
||||
static struct irqaction cmcp_irqaction = {
|
||||
.handler = ia64_mca_cmc_int_caller,
|
||||
.name = "cmc_poll"
|
||||
};
|
||||
|
||||
static struct irqaction mca_rdzv_irqaction = {
|
||||
.handler = ia64_mca_rendez_int_handler,
|
||||
.name = "mca_rdzv"
|
||||
};
|
||||
|
||||
static struct irqaction mca_wkup_irqaction = {
|
||||
.handler = ia64_mca_wakeup_int_handler,
|
||||
.name = "mca_wkup"
|
||||
};
|
||||
|
||||
static struct irqaction mca_cpe_irqaction = {
|
||||
.handler = ia64_mca_cpe_int_handler,
|
||||
.name = "cpe_hndlr"
|
||||
};
|
||||
|
||||
static struct irqaction mca_cpep_irqaction = {
|
||||
.handler = ia64_mca_cpe_int_caller,
|
||||
.name = "cpe_poll"
|
||||
};
|
||||
|
||||
/* Minimal format of the MCA/INIT stacks. The pseudo processes that run on
|
||||
* these stacks can never sleep, they cannot return from the kernel to user
|
||||
* space, they do not appear in a normal ps listing. So there is no need to
|
||||
|
@ -2056,18 +2027,23 @@ void __init ia64_mca_irq_init(void)
|
|||
* Configure the CMCI/P vector and handler. Interrupts for CMC are
|
||||
* per-processor, so AP CMC interrupts are setup in smp_callin() (smpboot.c).
|
||||
*/
|
||||
register_percpu_irq(IA64_CMC_VECTOR, &cmci_irqaction);
|
||||
register_percpu_irq(IA64_CMCP_VECTOR, &cmcp_irqaction);
|
||||
register_percpu_irq(IA64_CMC_VECTOR, ia64_mca_cmc_int_handler, 0,
|
||||
"cmc_hndlr");
|
||||
register_percpu_irq(IA64_CMCP_VECTOR, ia64_mca_cmc_int_caller, 0,
|
||||
"cmc_poll");
|
||||
ia64_mca_cmc_vector_setup(); /* Setup vector on BSP */
|
||||
|
||||
/* Setup the MCA rendezvous interrupt vector */
|
||||
register_percpu_irq(IA64_MCA_RENDEZ_VECTOR, &mca_rdzv_irqaction);
|
||||
register_percpu_irq(IA64_MCA_RENDEZ_VECTOR, ia64_mca_rendez_int_handler,
|
||||
0, "mca_rdzv");
|
||||
|
||||
/* Setup the MCA wakeup interrupt vector */
|
||||
register_percpu_irq(IA64_MCA_WAKEUP_VECTOR, &mca_wkup_irqaction);
|
||||
register_percpu_irq(IA64_MCA_WAKEUP_VECTOR, ia64_mca_wakeup_int_handler,
|
||||
0, "mca_wkup");
|
||||
|
||||
/* Setup the CPEI/P handler */
|
||||
register_percpu_irq(IA64_CPEP_VECTOR, &mca_cpep_irqaction);
|
||||
register_percpu_irq(IA64_CPEP_VECTOR, ia64_mca_cpe_int_caller, 0,
|
||||
"cpe_poll");
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -2108,7 +2084,9 @@ ia64_mca_late_init(void)
|
|||
if (irq > 0) {
|
||||
cpe_poll_enabled = 0;
|
||||
irq_set_status_flags(irq, IRQ_PER_CPU);
|
||||
setup_irq(irq, &mca_cpe_irqaction);
|
||||
if (request_irq(irq, ia64_mca_cpe_int_handler,
|
||||
0, "cpe_hndlr", NULL))
|
||||
pr_err("Failed to register cpe_hndlr interrupt\n");
|
||||
ia64_cpe_irq = irq;
|
||||
ia64_mca_register_cpev(cpe_vector);
|
||||
IA64_MCA_DEBUG("%s: CPEI/P setup and enabled.\n",
|
||||
|
|
|
@ -57,6 +57,8 @@
|
|||
#include <linux/uaccess.h>
|
||||
#include <asm/delay.h>
|
||||
|
||||
#include "irq.h"
|
||||
|
||||
#ifdef CONFIG_PERFMON
|
||||
/*
|
||||
* perfmon context state
|
||||
|
@ -6313,11 +6315,6 @@ pfm_flush_pmds(struct task_struct *task, pfm_context_t *ctx)
|
|||
}
|
||||
}
|
||||
|
||||
static struct irqaction perfmon_irqaction = {
|
||||
.handler = pfm_interrupt_handler,
|
||||
.name = "perfmon"
|
||||
};
|
||||
|
||||
static void
|
||||
pfm_alt_save_pmu_state(void *data)
|
||||
{
|
||||
|
@ -6591,7 +6588,8 @@ pfm_init_percpu (void)
|
|||
pfm_unfreeze_pmu();
|
||||
|
||||
if (first_time) {
|
||||
register_percpu_irq(IA64_PERFMON_VECTOR, &perfmon_irqaction);
|
||||
register_percpu_irq(IA64_PERFMON_VECTOR, pfm_interrupt_handler,
|
||||
0, "perfmon");
|
||||
first_time=0;
|
||||
}
|
||||
|
||||
|
|
|
@ -32,6 +32,7 @@
|
|||
#include <asm/sections.h>
|
||||
|
||||
#include "fsyscall_gtod_data.h"
|
||||
#include "irq.h"
|
||||
|
||||
static u64 itc_get_cycles(struct clocksource *cs);
|
||||
|
||||
|
@ -380,13 +381,6 @@ static u64 itc_get_cycles(struct clocksource *cs)
|
|||
return now;
|
||||
}
|
||||
|
||||
|
||||
static struct irqaction timer_irqaction = {
|
||||
.handler = timer_interrupt,
|
||||
.flags = IRQF_IRQPOLL,
|
||||
.name = "timer"
|
||||
};
|
||||
|
||||
void read_persistent_clock64(struct timespec64 *ts)
|
||||
{
|
||||
efi_gettimeofday(ts);
|
||||
|
@ -395,7 +389,8 @@ void read_persistent_clock64(struct timespec64 *ts)
|
|||
void __init
|
||||
time_init (void)
|
||||
{
|
||||
register_percpu_irq(IA64_TIMER_VECTOR, &timer_irqaction);
|
||||
register_percpu_irq(IA64_TIMER_VECTOR, timer_interrupt, IRQF_IRQPOLL,
|
||||
"timer");
|
||||
ia64_init_itm();
|
||||
}
|
||||
|
||||
|
|
|
@ -1,93 +0,0 @@
|
|||
/*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*
|
||||
* Copyright (c) 2005 Stanislaw Skowronek <skylark@linux-mips.org>
|
||||
*/
|
||||
|
||||
#ifndef _LINUX_IOC3_H
|
||||
#define _LINUX_IOC3_H
|
||||
|
||||
#include <asm/sn/ioc3.h>
|
||||
|
||||
#define IOC3_MAX_SUBMODULES 32
|
||||
|
||||
#define IOC3_CLASS_NONE 0
|
||||
#define IOC3_CLASS_BASE_IP27 1
|
||||
#define IOC3_CLASS_BASE_IP30 2
|
||||
#define IOC3_CLASS_MENET_123 3
|
||||
#define IOC3_CLASS_MENET_4 4
|
||||
#define IOC3_CLASS_CADDUO 5
|
||||
#define IOC3_CLASS_SERIAL 6
|
||||
|
||||
/* One of these per IOC3 */
|
||||
struct ioc3_driver_data {
|
||||
struct list_head list;
|
||||
int id; /* IOC3 sequence number */
|
||||
/* PCI mapping */
|
||||
unsigned long pma; /* physical address */
|
||||
struct ioc3 __iomem *vma; /* pointer to registers */
|
||||
struct pci_dev *pdev; /* PCI device */
|
||||
/* IRQ stuff */
|
||||
int dual_irq; /* set if separate IRQs are used */
|
||||
int irq_io, irq_eth; /* IRQ numbers */
|
||||
/* GPIO magic */
|
||||
spinlock_t gpio_lock;
|
||||
unsigned int gpdr_shadow;
|
||||
/* NIC identifiers */
|
||||
char nic_part[32];
|
||||
char nic_serial[16];
|
||||
char nic_mac[6];
|
||||
/* submodule set */
|
||||
int class;
|
||||
void *data[IOC3_MAX_SUBMODULES]; /* for submodule use */
|
||||
int active[IOC3_MAX_SUBMODULES]; /* set if probe succeeds */
|
||||
/* is_ir_lock must be held while
|
||||
* modifying sio_ie values, so
|
||||
* we can be sure that sio_ie is
|
||||
* not changing when we read it
|
||||
* along with sio_ir.
|
||||
*/
|
||||
spinlock_t ir_lock; /* SIO_IE[SC] mod lock */
|
||||
};
|
||||
|
||||
/* One per submodule */
|
||||
struct ioc3_submodule {
|
||||
char *name; /* descriptive submodule name */
|
||||
struct module *owner; /* owning kernel module */
|
||||
int ethernet; /* set for ethernet drivers */
|
||||
int (*probe) (struct ioc3_submodule *, struct ioc3_driver_data *);
|
||||
int (*remove) (struct ioc3_submodule *, struct ioc3_driver_data *);
|
||||
int id; /* assigned by IOC3, index for the "data" array */
|
||||
/* IRQ stuff */
|
||||
unsigned int irq_mask; /* IOC3 IRQ mask, leave clear for Ethernet */
|
||||
int reset_mask; /* non-zero if you want the ioc3.c module to reset interrupts */
|
||||
int (*intr) (struct ioc3_submodule *, struct ioc3_driver_data *, unsigned int);
|
||||
/* private submodule data */
|
||||
void *data; /* assigned by submodule */
|
||||
};
|
||||
|
||||
/**********************************
|
||||
* Functions needed by submodules *
|
||||
**********************************/
|
||||
|
||||
#define IOC3_W_IES 0
|
||||
#define IOC3_W_IEC 1
|
||||
|
||||
/* registers a submodule for all existing and future IOC3 chips */
|
||||
extern int ioc3_register_submodule(struct ioc3_submodule *);
|
||||
/* unregisters a submodule */
|
||||
extern void ioc3_unregister_submodule(struct ioc3_submodule *);
|
||||
/* enables IRQs indicated by irq_mask for a specified IOC3 chip */
|
||||
extern void ioc3_enable(struct ioc3_submodule *, struct ioc3_driver_data *, unsigned int);
|
||||
/* ackowledges specified IRQs */
|
||||
extern void ioc3_ack(struct ioc3_submodule *, struct ioc3_driver_data *, unsigned int);
|
||||
/* disables IRQs indicated by irq_mask for a specified IOC3 chip */
|
||||
extern void ioc3_disable(struct ioc3_submodule *, struct ioc3_driver_data *, unsigned int);
|
||||
/* atomically sets GPCR bits */
|
||||
extern void ioc3_gpcr_set(struct ioc3_driver_data *, unsigned int);
|
||||
/* general ireg writer */
|
||||
extern void ioc3_write_ireg(struct ioc3_driver_data *idd, uint32_t value, int reg);
|
||||
|
||||
#endif
|
Loading…
Reference in New Issue