linux/arch/arm/plat-omap/mailbox.c

423 lines
8.3 KiB
C
Raw Normal View History

/*
* OMAP mailbox driver
*
* Copyright (C) 2006-2009 Nokia Corporation. All rights reserved.
*
* Contact: Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
* 02110-1301 USA
*
*/
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/device.h>
#include <linux/delay.h>
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit slab.h inclusion from percpu.h percpu.h is included by sched.h and module.h and thus ends up being included when building most .c files. percpu.h includes slab.h which in turn includes gfp.h making everything defined by the two files universally available and complicating inclusion dependencies. percpu.h -> slab.h dependency is about to be removed. Prepare for this change by updating users of gfp and slab facilities include those headers directly instead of assuming availability. As this conversion needs to touch large number of source files, the following script is used as the basis of conversion. http://userweb.kernel.org/~tj/misc/slabh-sweep.py The script does the followings. * Scan files for gfp and slab usages and update includes such that only the necessary includes are there. ie. if only gfp is used, gfp.h, if slab is used, slab.h. * When the script inserts a new include, it looks at the include blocks and try to put the new include such that its order conforms to its surrounding. It's put in the include block which contains core kernel includes, in the same order that the rest are ordered - alphabetical, Christmas tree, rev-Xmas-tree or at the end if there doesn't seem to be any matching order. * If the script can't find a place to put a new include (mostly because the file doesn't have fitting include block), it prints out an error message indicating which .h file needs to be added to the file. The conversion was done in the following steps. 1. The initial automatic conversion of all .c files updated slightly over 4000 files, deleting around 700 includes and adding ~480 gfp.h and ~3000 slab.h inclusions. The script emitted errors for ~400 files. 2. Each error was manually checked. Some didn't need the inclusion, some needed manual addition while adding it to implementation .h or embedding .c file was more appropriate for others. This step added inclusions to around 150 files. 3. The script was run again and the output was compared to the edits from #2 to make sure no file was left behind. 4. Several build tests were done and a couple of problems were fixed. e.g. lib/decompress_*.c used malloc/free() wrappers around slab APIs requiring slab.h to be added manually. 5. The script was run on all .h files but without automatically editing them as sprinkling gfp.h and slab.h inclusions around .h files could easily lead to inclusion dependency hell. Most gfp.h inclusion directives were ignored as stuff from gfp.h was usually wildly available and often used in preprocessor macros. Each slab.h inclusion directive was examined and added manually as necessary. 6. percpu.h was updated not to include slab.h. 7. Build test were done on the following configurations and failures were fixed. CONFIG_GCOV_KERNEL was turned off for all tests (as my distributed build env didn't work with gcov compiles) and a few more options had to be turned off depending on archs to make things build (like ipr on powerpc/64 which failed due to missing writeq). * x86 and x86_64 UP and SMP allmodconfig and a custom test config. * powerpc and powerpc64 SMP allmodconfig * sparc and sparc64 SMP allmodconfig * ia64 SMP allmodconfig * s390 SMP allmodconfig * alpha SMP allmodconfig * um on x86_64 SMP allmodconfig 8. percpu.h modifications were reverted so that it could be applied as a separate patch and serve as bisection point. Given the fact that I had only a couple of failures from tests on step 6, I'm fairly confident about the coverage of this conversion patch. If there is a breakage, it's likely to be something in one of the arch headers which should be easily discoverable easily on most builds of the specific arch. Signed-off-by: Tejun Heo <tj@kernel.org> Guess-its-ok-by: Christoph Lameter <cl@linux-foundation.org> Cc: Ingo Molnar <mingo@redhat.com> Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
2010-03-24 16:04:11 +08:00
#include <linux/slab.h>
#include <plat/mailbox.h>
static struct workqueue_struct *mboxd;
static struct omap_mbox *mboxes;
static DEFINE_RWLOCK(mboxes_lock);
static int mbox_configured;
/* Mailbox FIFO handle functions */
static inline mbox_msg_t mbox_fifo_read(struct omap_mbox *mbox)
{
return mbox->ops->fifo_read(mbox);
}
static inline void mbox_fifo_write(struct omap_mbox *mbox, mbox_msg_t msg)
{
mbox->ops->fifo_write(mbox, msg);
}
static inline int mbox_fifo_empty(struct omap_mbox *mbox)
{
return mbox->ops->fifo_empty(mbox);
}
static inline int mbox_fifo_full(struct omap_mbox *mbox)
{
return mbox->ops->fifo_full(mbox);
}
/* Mailbox IRQ handle functions */
static inline void ack_mbox_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq)
{
if (mbox->ops->ack_irq)
mbox->ops->ack_irq(mbox, irq);
}
static inline int is_mbox_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq)
{
return mbox->ops->is_irq(mbox, irq);
}
/*
* message sender
*/
static int __mbox_msg_send(struct omap_mbox *mbox, mbox_msg_t msg)
{
int ret = 0, i = 1000;
while (mbox_fifo_full(mbox)) {
if (mbox->ops->type == OMAP_MBOX_TYPE2)
return -1;
if (--i == 0)
return -1;
udelay(1);
}
mbox_fifo_write(mbox, msg);
return ret;
}
int omap_mbox_msg_send(struct omap_mbox *mbox, mbox_msg_t msg)
{
struct request *rq;
struct request_queue *q = mbox->txq->queue;
rq = blk_get_request(q, WRITE, GFP_ATOMIC);
if (unlikely(!rq))
return -ENOMEM;
blk_insert_request(q, rq, 0, (void *) msg);
tasklet_schedule(&mbox->txq->tasklet);
return 0;
}
EXPORT_SYMBOL(omap_mbox_msg_send);
static void mbox_tx_tasklet(unsigned long tx_data)
{
int ret;
struct request *rq;
struct omap_mbox *mbox = (struct omap_mbox *)tx_data;
struct request_queue *q = mbox->txq->queue;
while (1) {
block: implement and enforce request peek/start/fetch Till now block layer allowed two separate modes of request execution. A request is always acquired from the request queue via elv_next_request(). After that, drivers are free to either dequeue it or process it without dequeueing. Dequeue allows elv_next_request() to return the next request so that multiple requests can be in flight. Executing requests without dequeueing has its merits mostly in allowing drivers for simpler devices which can't do sg to deal with segments only without considering request boundary. However, the benefit this brings is dubious and declining while the cost of the API ambiguity is increasing. Segment based drivers are usually for very old or limited devices and as converting to dequeueing model isn't difficult, it doesn't justify the API overhead it puts on block layer and its more modern users. Previous patches converted all block low level drivers to dequeueing model. This patch completes the API transition by... * renaming elv_next_request() to blk_peek_request() * renaming blkdev_dequeue_request() to blk_start_request() * adding blk_fetch_request() which is combination of peek and start * disallowing completion of queued (not started) requests * applying new API to all LLDs Renamings are for consistency and to break out of tree code so that it's apparent that out of tree drivers need updating. [ Impact: block request issue API cleanup, no functional change ] Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Rusty Russell <rusty@rustcorp.com.au> Cc: James Bottomley <James.Bottomley@HansenPartnership.com> Cc: Mike Miller <mike.miller@hp.com> Cc: unsik Kim <donari75@gmail.com> Cc: Paul Clements <paul.clements@steeleye.com> Cc: Tim Waugh <tim@cyberelk.net> Cc: Geert Uytterhoeven <Geert.Uytterhoeven@sonycom.com> Cc: David S. Miller <davem@davemloft.net> Cc: Laurent Vivier <Laurent@lvivier.info> Cc: Jeff Garzik <jgarzik@pobox.com> Cc: Jeremy Fitzhardinge <jeremy@xensource.com> Cc: Grant Likely <grant.likely@secretlab.ca> Cc: Adrian McMenamin <adrian@mcmen.demon.co.uk> Cc: Stephen Rothwell <sfr@canb.auug.org.au> Cc: Bartlomiej Zolnierkiewicz <bzolnier@gmail.com> Cc: Borislav Petkov <petkovbb@googlemail.com> Cc: Sergei Shtylyov <sshtylyov@ru.mvista.com> Cc: Alex Dubov <oakad@yahoo.com> Cc: Pierre Ossman <drzeus@drzeus.cx> Cc: David Woodhouse <dwmw2@infradead.org> Cc: Markus Lidel <Markus.Lidel@shadowconnect.com> Cc: Stefan Weinhuber <wein@de.ibm.com> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Pete Zaitcev <zaitcev@redhat.com> Cc: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
2009-05-08 10:54:16 +08:00
rq = blk_fetch_request(q);
if (!rq)
break;
ret = __mbox_msg_send(mbox, (mbox_msg_t)rq->special);
if (ret) {
omap_mbox_enable_irq(mbox, IRQ_TX);
blk_requeue_request(q, rq);
return;
}
blk_end_request_all(rq, 0);
}
}
/*
* Message receiver(workqueue)
*/
static void mbox_rx_work(struct work_struct *work)
{
struct omap_mbox_queue *mq =
container_of(work, struct omap_mbox_queue, work);
struct omap_mbox *mbox = mq->queue->queuedata;
struct request_queue *q = mbox->rxq->queue;
struct request *rq;
mbox_msg_t msg;
unsigned long flags;
while (1) {
spin_lock_irqsave(q->queue_lock, flags);
block: implement and enforce request peek/start/fetch Till now block layer allowed two separate modes of request execution. A request is always acquired from the request queue via elv_next_request(). After that, drivers are free to either dequeue it or process it without dequeueing. Dequeue allows elv_next_request() to return the next request so that multiple requests can be in flight. Executing requests without dequeueing has its merits mostly in allowing drivers for simpler devices which can't do sg to deal with segments only without considering request boundary. However, the benefit this brings is dubious and declining while the cost of the API ambiguity is increasing. Segment based drivers are usually for very old or limited devices and as converting to dequeueing model isn't difficult, it doesn't justify the API overhead it puts on block layer and its more modern users. Previous patches converted all block low level drivers to dequeueing model. This patch completes the API transition by... * renaming elv_next_request() to blk_peek_request() * renaming blkdev_dequeue_request() to blk_start_request() * adding blk_fetch_request() which is combination of peek and start * disallowing completion of queued (not started) requests * applying new API to all LLDs Renamings are for consistency and to break out of tree code so that it's apparent that out of tree drivers need updating. [ Impact: block request issue API cleanup, no functional change ] Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Rusty Russell <rusty@rustcorp.com.au> Cc: James Bottomley <James.Bottomley@HansenPartnership.com> Cc: Mike Miller <mike.miller@hp.com> Cc: unsik Kim <donari75@gmail.com> Cc: Paul Clements <paul.clements@steeleye.com> Cc: Tim Waugh <tim@cyberelk.net> Cc: Geert Uytterhoeven <Geert.Uytterhoeven@sonycom.com> Cc: David S. Miller <davem@davemloft.net> Cc: Laurent Vivier <Laurent@lvivier.info> Cc: Jeff Garzik <jgarzik@pobox.com> Cc: Jeremy Fitzhardinge <jeremy@xensource.com> Cc: Grant Likely <grant.likely@secretlab.ca> Cc: Adrian McMenamin <adrian@mcmen.demon.co.uk> Cc: Stephen Rothwell <sfr@canb.auug.org.au> Cc: Bartlomiej Zolnierkiewicz <bzolnier@gmail.com> Cc: Borislav Petkov <petkovbb@googlemail.com> Cc: Sergei Shtylyov <sshtylyov@ru.mvista.com> Cc: Alex Dubov <oakad@yahoo.com> Cc: Pierre Ossman <drzeus@drzeus.cx> Cc: David Woodhouse <dwmw2@infradead.org> Cc: Markus Lidel <Markus.Lidel@shadowconnect.com> Cc: Stefan Weinhuber <wein@de.ibm.com> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Pete Zaitcev <zaitcev@redhat.com> Cc: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
2009-05-08 10:54:16 +08:00
rq = blk_fetch_request(q);
spin_unlock_irqrestore(q->queue_lock, flags);
if (!rq)
break;
msg = (mbox_msg_t)rq->special;
blk_end_request_all(rq, 0);
mbox->rxq->callback((void *)msg);
}
}
/*
* Mailbox interrupt handler
*/
static void mbox_txq_fn(struct request_queue *q)
{
}
static void mbox_rxq_fn(struct request_queue *q)
{
}
static void __mbox_tx_interrupt(struct omap_mbox *mbox)
{
omap_mbox_disable_irq(mbox, IRQ_TX);
ack_mbox_irq(mbox, IRQ_TX);
tasklet_schedule(&mbox->txq->tasklet);
}
static void __mbox_rx_interrupt(struct omap_mbox *mbox)
{
struct request *rq;
mbox_msg_t msg;
struct request_queue *q = mbox->rxq->queue;
while (!mbox_fifo_empty(mbox)) {
rq = blk_get_request(q, WRITE, GFP_ATOMIC);
if (unlikely(!rq))
goto nomem;
msg = mbox_fifo_read(mbox);
blk_insert_request(q, rq, 0, (void *)msg);
if (mbox->ops->type == OMAP_MBOX_TYPE1)
break;
}
/* no more messages in the fifo. clear IRQ source. */
ack_mbox_irq(mbox, IRQ_RX);
nomem:
queue_work(mboxd, &mbox->rxq->work);
}
static irqreturn_t mbox_interrupt(int irq, void *p)
{
struct omap_mbox *mbox = p;
if (is_mbox_irq(mbox, IRQ_TX))
__mbox_tx_interrupt(mbox);
if (is_mbox_irq(mbox, IRQ_RX))
__mbox_rx_interrupt(mbox);
return IRQ_HANDLED;
}
static struct omap_mbox_queue *mbox_queue_alloc(struct omap_mbox *mbox,
request_fn_proc *proc,
void (*work) (struct work_struct *),
void (*tasklet)(unsigned long))
{
struct request_queue *q;
struct omap_mbox_queue *mq;
mq = kzalloc(sizeof(struct omap_mbox_queue), GFP_KERNEL);
if (!mq)
return NULL;
spin_lock_init(&mq->lock);
q = blk_init_queue(proc, &mq->lock);
if (!q)
goto error;
q->queuedata = mbox;
mq->queue = q;
if (work)
INIT_WORK(&mq->work, work);
if (tasklet)
tasklet_init(&mq->tasklet, tasklet, (unsigned long)mbox);
return mq;
error:
kfree(mq);
return NULL;
}
static void mbox_queue_free(struct omap_mbox_queue *q)
{
blk_cleanup_queue(q->queue);
kfree(q);
}
static int omap_mbox_startup(struct omap_mbox *mbox)
{
int ret = 0;
struct omap_mbox_queue *mq;
if (likely(mbox->ops->startup)) {
write_lock(&mboxes_lock);
if (!mbox_configured)
ret = mbox->ops->startup(mbox);
if (unlikely(ret)) {
write_unlock(&mboxes_lock);
return ret;
}
mbox_configured++;
write_unlock(&mboxes_lock);
}
ret = request_irq(mbox->irq, mbox_interrupt, IRQF_SHARED,
mbox->name, mbox);
if (unlikely(ret)) {
printk(KERN_ERR
"failed to register mailbox interrupt:%d\n", ret);
goto fail_request_irq;
}
mq = mbox_queue_alloc(mbox, mbox_txq_fn, NULL, mbox_tx_tasklet);
if (!mq) {
ret = -ENOMEM;
goto fail_alloc_txq;
}
mbox->txq = mq;
mq = mbox_queue_alloc(mbox, mbox_rxq_fn, mbox_rx_work, NULL);
if (!mq) {
ret = -ENOMEM;
goto fail_alloc_rxq;
}
mbox->rxq = mq;
return 0;
fail_alloc_rxq:
mbox_queue_free(mbox->txq);
fail_alloc_txq:
free_irq(mbox->irq, mbox);
fail_request_irq:
if (unlikely(mbox->ops->shutdown))
mbox->ops->shutdown(mbox);
return ret;
}
static void omap_mbox_fini(struct omap_mbox *mbox)
{
mbox_queue_free(mbox->txq);
mbox_queue_free(mbox->rxq);
free_irq(mbox->irq, mbox);
if (unlikely(mbox->ops->shutdown)) {
write_lock(&mboxes_lock);
if (mbox_configured > 0)
mbox_configured--;
if (!mbox_configured)
mbox->ops->shutdown(mbox);
write_unlock(&mboxes_lock);
}
}
static struct omap_mbox **find_mboxes(const char *name)
{
struct omap_mbox **p;
for (p = &mboxes; *p; p = &(*p)->next) {
if (strcmp((*p)->name, name) == 0)
break;
}
return p;
}
struct omap_mbox *omap_mbox_get(const char *name)
{
struct omap_mbox *mbox;
int ret;
read_lock(&mboxes_lock);
mbox = *(find_mboxes(name));
if (mbox == NULL) {
read_unlock(&mboxes_lock);
return ERR_PTR(-ENOENT);
}
read_unlock(&mboxes_lock);
ret = omap_mbox_startup(mbox);
if (ret)
return ERR_PTR(-ENODEV);
return mbox;
}
EXPORT_SYMBOL(omap_mbox_get);
void omap_mbox_put(struct omap_mbox *mbox)
{
omap_mbox_fini(mbox);
}
EXPORT_SYMBOL(omap_mbox_put);
int omap_mbox_register(struct device *parent, struct omap_mbox *mbox)
{
int ret = 0;
struct omap_mbox **tmp;
if (!mbox)
return -EINVAL;
if (mbox->next)
return -EBUSY;
write_lock(&mboxes_lock);
tmp = find_mboxes(mbox->name);
if (*tmp) {
ret = -EBUSY;
write_unlock(&mboxes_lock);
goto err_find;
}
*tmp = mbox;
write_unlock(&mboxes_lock);
return 0;
err_find:
return ret;
}
EXPORT_SYMBOL(omap_mbox_register);
int omap_mbox_unregister(struct omap_mbox *mbox)
{
struct omap_mbox **tmp;
write_lock(&mboxes_lock);
tmp = &mboxes;
while (*tmp) {
if (mbox == *tmp) {
*tmp = mbox->next;
mbox->next = NULL;
write_unlock(&mboxes_lock);
return 0;
}
tmp = &(*tmp)->next;
}
write_unlock(&mboxes_lock);
return -EINVAL;
}
EXPORT_SYMBOL(omap_mbox_unregister);
static int __init omap_mbox_init(void)
{
mboxd = create_workqueue("mboxd");
if (!mboxd)
return -ENOMEM;
return 0;
}
module_init(omap_mbox_init);
static void __exit omap_mbox_exit(void)
{
destroy_workqueue(mboxd);
}
module_exit(omap_mbox_exit);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("omap mailbox: interrupt driven messaging");
MODULE_AUTHOR("Toshihiro Kobayashi and Hiroshi DOYU");