From edde08f2a8f13a648ab6d26f33e88d0c6146f3d1 Mon Sep 17 00:00:00 2001 From: Harvey Harrison Date: Fri, 8 Feb 2008 04:19:57 -0800 Subject: [PATCH] misc: removal of final callers using fastcall Signed-off-by: Harvey Harrison Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/sparc64/kernel/irq.c | 2 +- include/asm-arm/mutex.h | 6 +++--- mm/page_alloc.c | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/arch/sparc64/kernel/irq.c b/arch/sparc64/kernel/irq.c index 30431bd24e1e..5ec06c8c7fea 100644 --- a/arch/sparc64/kernel/irq.c +++ b/arch/sparc64/kernel/irq.c @@ -522,7 +522,7 @@ static struct irq_chip sun4v_virq = { .set_affinity = sun4v_virt_set_affinity, }; -static void fastcall pre_flow_handler(unsigned int virt_irq, +static void pre_flow_handler(unsigned int virt_irq, struct irq_desc *desc) { struct irq_handler_data *data = get_irq_chip_data(virt_irq); diff --git a/include/asm-arm/mutex.h b/include/asm-arm/mutex.h index cb29d84e690d..020bd98710a1 100644 --- a/include/asm-arm/mutex.h +++ b/include/asm-arm/mutex.h @@ -24,7 +24,7 @@ * reattempted until it succeeds. */ static inline void -__mutex_fastpath_lock(atomic_t *count, fastcall void (*fail_fn)(atomic_t *)) +__mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *)) { int __ex_flag, __res; @@ -44,7 +44,7 @@ __mutex_fastpath_lock(atomic_t *count, fastcall void (*fail_fn)(atomic_t *)) } static inline int -__mutex_fastpath_lock_retval(atomic_t *count, fastcall int (*fail_fn)(atomic_t *)) +__mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *)) { int __ex_flag, __res; @@ -70,7 +70,7 @@ __mutex_fastpath_lock_retval(atomic_t *count, fastcall int (*fail_fn)(atomic_t * * better generated assembly. */ static inline void -__mutex_fastpath_unlock(atomic_t *count, fastcall void (*fail_fn)(atomic_t *)) +__mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *)) { int __ex_flag, __res, __orig; diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 26a54a17dc9f..75b979313346 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1451,7 +1451,7 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int order, /* * This is the 'heart' of the zoned buddy allocator. */ -struct page * fastcall +struct page * __alloc_pages(gfp_t gfp_mask, unsigned int order, struct zonelist *zonelist) {