mirror of https://gitee.com/openkylin/linux.git
bus: mhi: core: Add support for basic PM operations
This commit adds support for basic MHI PM operations such as mhi_async_power_up, mhi_sync_power_up, and mhi_power_down. These routines places the MHI bus into respective power domain states and calls the state_transition APIs when necessary. The MHI controller driver is expected to call these PM routines for MHI powerup and powerdown. This is based on the patch submitted by Sujeev Dias: https://lkml.org/lkml/2018/7/9/989 Signed-off-by: Sujeev Dias <sdias@codeaurora.org> Signed-off-by: Siddartha Mohanadoss <smohanad@codeaurora.org> [mani: splitted the pm patch and cleaned up for upstream] Signed-off-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org> Reviewed-by: Jeffrey Hugo <jhugo@codeaurora.org> Tested-by: Jeffrey Hugo <jhugo@codeaurora.org> Link: https://lore.kernel.org/r/20200220095854.4804-8-manivannan.sadhasivam@linaro.org Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
parent
a6e2e3522f
commit
3000f85b8f
|
@ -1,3 +1,3 @@
|
|||
obj-$(CONFIG_MHI_BUS) := mhi.o
|
||||
|
||||
mhi-y := init.o main.o pm.o
|
||||
mhi-y := init.o main.o pm.o boot.o
|
||||
|
|
|
@ -0,0 +1,87 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/delay.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/dma-direction.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/firmware.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/mhi.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/random.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/wait.h>
|
||||
#include "internal.h"
|
||||
|
||||
void mhi_free_bhie_table(struct mhi_controller *mhi_cntrl,
|
||||
struct image_info *image_info)
|
||||
{
|
||||
int i;
|
||||
struct mhi_buf *mhi_buf = image_info->mhi_buf;
|
||||
|
||||
for (i = 0; i < image_info->entries; i++, mhi_buf++)
|
||||
mhi_free_coherent(mhi_cntrl, mhi_buf->len, mhi_buf->buf,
|
||||
mhi_buf->dma_addr);
|
||||
|
||||
kfree(image_info->mhi_buf);
|
||||
kfree(image_info);
|
||||
}
|
||||
|
||||
int mhi_alloc_bhie_table(struct mhi_controller *mhi_cntrl,
|
||||
struct image_info **image_info,
|
||||
size_t alloc_size)
|
||||
{
|
||||
size_t seg_size = mhi_cntrl->seg_len;
|
||||
int segments = DIV_ROUND_UP(alloc_size, seg_size) + 1;
|
||||
int i;
|
||||
struct image_info *img_info;
|
||||
struct mhi_buf *mhi_buf;
|
||||
|
||||
img_info = kzalloc(sizeof(*img_info), GFP_KERNEL);
|
||||
if (!img_info)
|
||||
return -ENOMEM;
|
||||
|
||||
/* Allocate memory for entries */
|
||||
img_info->mhi_buf = kcalloc(segments, sizeof(*img_info->mhi_buf),
|
||||
GFP_KERNEL);
|
||||
if (!img_info->mhi_buf)
|
||||
goto error_alloc_mhi_buf;
|
||||
|
||||
/* Allocate and populate vector table */
|
||||
mhi_buf = img_info->mhi_buf;
|
||||
for (i = 0; i < segments; i++, mhi_buf++) {
|
||||
size_t vec_size = seg_size;
|
||||
|
||||
/* Vector table is the last entry */
|
||||
if (i == segments - 1)
|
||||
vec_size = sizeof(struct bhi_vec_entry) * i;
|
||||
|
||||
mhi_buf->len = vec_size;
|
||||
mhi_buf->buf = mhi_alloc_coherent(mhi_cntrl, vec_size,
|
||||
&mhi_buf->dma_addr,
|
||||
GFP_KERNEL);
|
||||
if (!mhi_buf->buf)
|
||||
goto error_alloc_segment;
|
||||
}
|
||||
|
||||
img_info->bhi_vec = img_info->mhi_buf[segments - 1].buf;
|
||||
img_info->entries = segments;
|
||||
*image_info = img_info;
|
||||
|
||||
return 0;
|
||||
|
||||
error_alloc_segment:
|
||||
for (--i, --mhi_buf; i >= 0; i--, mhi_buf--)
|
||||
mhi_free_coherent(mhi_cntrl, mhi_buf->len, mhi_buf->buf,
|
||||
mhi_buf->dma_addr);
|
||||
|
||||
error_alloc_mhi_buf:
|
||||
kfree(img_info);
|
||||
|
||||
return -ENOMEM;
|
||||
}
|
|
@ -73,6 +73,293 @@ const char *to_mhi_pm_state_str(enum mhi_pm_state state)
|
|||
return mhi_pm_state_str[index];
|
||||
}
|
||||
|
||||
/* MHI protocol requires the transfer ring to be aligned with ring length */
|
||||
static int mhi_alloc_aligned_ring(struct mhi_controller *mhi_cntrl,
|
||||
struct mhi_ring *ring,
|
||||
u64 len)
|
||||
{
|
||||
ring->alloc_size = len + (len - 1);
|
||||
ring->pre_aligned = mhi_alloc_coherent(mhi_cntrl, ring->alloc_size,
|
||||
&ring->dma_handle, GFP_KERNEL);
|
||||
if (!ring->pre_aligned)
|
||||
return -ENOMEM;
|
||||
|
||||
ring->iommu_base = (ring->dma_handle + (len - 1)) & ~(len - 1);
|
||||
ring->base = ring->pre_aligned + (ring->iommu_base - ring->dma_handle);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void mhi_deinit_free_irq(struct mhi_controller *mhi_cntrl)
|
||||
{
|
||||
int i;
|
||||
struct mhi_event *mhi_event = mhi_cntrl->mhi_event;
|
||||
|
||||
for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
|
||||
if (mhi_event->offload_ev)
|
||||
continue;
|
||||
|
||||
free_irq(mhi_cntrl->irq[mhi_event->irq], mhi_event);
|
||||
}
|
||||
|
||||
free_irq(mhi_cntrl->irq[0], mhi_cntrl);
|
||||
}
|
||||
|
||||
int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl)
|
||||
{
|
||||
struct mhi_event *mhi_event = mhi_cntrl->mhi_event;
|
||||
struct device *dev = &mhi_cntrl->mhi_dev->dev;
|
||||
int i, ret;
|
||||
|
||||
/* Setup BHI_INTVEC IRQ */
|
||||
ret = request_threaded_irq(mhi_cntrl->irq[0], mhi_intvec_handler,
|
||||
mhi_intvec_threaded_handler,
|
||||
IRQF_SHARED | IRQF_NO_SUSPEND,
|
||||
"bhi", mhi_cntrl);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
|
||||
if (mhi_event->offload_ev)
|
||||
continue;
|
||||
|
||||
ret = request_irq(mhi_cntrl->irq[mhi_event->irq],
|
||||
mhi_irq_handler,
|
||||
IRQF_SHARED | IRQF_NO_SUSPEND,
|
||||
"mhi", mhi_event);
|
||||
if (ret) {
|
||||
dev_err(dev, "Error requesting irq:%d for ev:%d\n",
|
||||
mhi_cntrl->irq[mhi_event->irq], i);
|
||||
goto error_request;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
error_request:
|
||||
for (--i, --mhi_event; i >= 0; i--, mhi_event--) {
|
||||
if (mhi_event->offload_ev)
|
||||
continue;
|
||||
|
||||
free_irq(mhi_cntrl->irq[mhi_event->irq], mhi_event);
|
||||
}
|
||||
free_irq(mhi_cntrl->irq[0], mhi_cntrl);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void mhi_deinit_dev_ctxt(struct mhi_controller *mhi_cntrl)
|
||||
{
|
||||
int i;
|
||||
struct mhi_ctxt *mhi_ctxt = mhi_cntrl->mhi_ctxt;
|
||||
struct mhi_cmd *mhi_cmd;
|
||||
struct mhi_event *mhi_event;
|
||||
struct mhi_ring *ring;
|
||||
|
||||
mhi_cmd = mhi_cntrl->mhi_cmd;
|
||||
for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++) {
|
||||
ring = &mhi_cmd->ring;
|
||||
mhi_free_coherent(mhi_cntrl, ring->alloc_size,
|
||||
ring->pre_aligned, ring->dma_handle);
|
||||
ring->base = NULL;
|
||||
ring->iommu_base = 0;
|
||||
}
|
||||
|
||||
mhi_free_coherent(mhi_cntrl,
|
||||
sizeof(*mhi_ctxt->cmd_ctxt) * NR_OF_CMD_RINGS,
|
||||
mhi_ctxt->cmd_ctxt, mhi_ctxt->cmd_ctxt_addr);
|
||||
|
||||
mhi_event = mhi_cntrl->mhi_event;
|
||||
for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
|
||||
if (mhi_event->offload_ev)
|
||||
continue;
|
||||
|
||||
ring = &mhi_event->ring;
|
||||
mhi_free_coherent(mhi_cntrl, ring->alloc_size,
|
||||
ring->pre_aligned, ring->dma_handle);
|
||||
ring->base = NULL;
|
||||
ring->iommu_base = 0;
|
||||
}
|
||||
|
||||
mhi_free_coherent(mhi_cntrl, sizeof(*mhi_ctxt->er_ctxt) *
|
||||
mhi_cntrl->total_ev_rings, mhi_ctxt->er_ctxt,
|
||||
mhi_ctxt->er_ctxt_addr);
|
||||
|
||||
mhi_free_coherent(mhi_cntrl, sizeof(*mhi_ctxt->chan_ctxt) *
|
||||
mhi_cntrl->max_chan, mhi_ctxt->chan_ctxt,
|
||||
mhi_ctxt->chan_ctxt_addr);
|
||||
|
||||
kfree(mhi_ctxt);
|
||||
mhi_cntrl->mhi_ctxt = NULL;
|
||||
}
|
||||
|
||||
int mhi_init_dev_ctxt(struct mhi_controller *mhi_cntrl)
|
||||
{
|
||||
struct mhi_ctxt *mhi_ctxt;
|
||||
struct mhi_chan_ctxt *chan_ctxt;
|
||||
struct mhi_event_ctxt *er_ctxt;
|
||||
struct mhi_cmd_ctxt *cmd_ctxt;
|
||||
struct mhi_chan *mhi_chan;
|
||||
struct mhi_event *mhi_event;
|
||||
struct mhi_cmd *mhi_cmd;
|
||||
u32 tmp;
|
||||
int ret = -ENOMEM, i;
|
||||
|
||||
atomic_set(&mhi_cntrl->dev_wake, 0);
|
||||
atomic_set(&mhi_cntrl->pending_pkts, 0);
|
||||
|
||||
mhi_ctxt = kzalloc(sizeof(*mhi_ctxt), GFP_KERNEL);
|
||||
if (!mhi_ctxt)
|
||||
return -ENOMEM;
|
||||
|
||||
/* Setup channel ctxt */
|
||||
mhi_ctxt->chan_ctxt = mhi_alloc_coherent(mhi_cntrl,
|
||||
sizeof(*mhi_ctxt->chan_ctxt) *
|
||||
mhi_cntrl->max_chan,
|
||||
&mhi_ctxt->chan_ctxt_addr,
|
||||
GFP_KERNEL);
|
||||
if (!mhi_ctxt->chan_ctxt)
|
||||
goto error_alloc_chan_ctxt;
|
||||
|
||||
mhi_chan = mhi_cntrl->mhi_chan;
|
||||
chan_ctxt = mhi_ctxt->chan_ctxt;
|
||||
for (i = 0; i < mhi_cntrl->max_chan; i++, chan_ctxt++, mhi_chan++) {
|
||||
/* Skip if it is an offload channel */
|
||||
if (mhi_chan->offload_ch)
|
||||
continue;
|
||||
|
||||
tmp = chan_ctxt->chcfg;
|
||||
tmp &= ~CHAN_CTX_CHSTATE_MASK;
|
||||
tmp |= (MHI_CH_STATE_DISABLED << CHAN_CTX_CHSTATE_SHIFT);
|
||||
tmp &= ~CHAN_CTX_BRSTMODE_MASK;
|
||||
tmp |= (mhi_chan->db_cfg.brstmode << CHAN_CTX_BRSTMODE_SHIFT);
|
||||
tmp &= ~CHAN_CTX_POLLCFG_MASK;
|
||||
tmp |= (mhi_chan->db_cfg.pollcfg << CHAN_CTX_POLLCFG_SHIFT);
|
||||
chan_ctxt->chcfg = tmp;
|
||||
|
||||
chan_ctxt->chtype = mhi_chan->type;
|
||||
chan_ctxt->erindex = mhi_chan->er_index;
|
||||
|
||||
mhi_chan->ch_state = MHI_CH_STATE_DISABLED;
|
||||
mhi_chan->tre_ring.db_addr = (void __iomem *)&chan_ctxt->wp;
|
||||
}
|
||||
|
||||
/* Setup event context */
|
||||
mhi_ctxt->er_ctxt = mhi_alloc_coherent(mhi_cntrl,
|
||||
sizeof(*mhi_ctxt->er_ctxt) *
|
||||
mhi_cntrl->total_ev_rings,
|
||||
&mhi_ctxt->er_ctxt_addr,
|
||||
GFP_KERNEL);
|
||||
if (!mhi_ctxt->er_ctxt)
|
||||
goto error_alloc_er_ctxt;
|
||||
|
||||
er_ctxt = mhi_ctxt->er_ctxt;
|
||||
mhi_event = mhi_cntrl->mhi_event;
|
||||
for (i = 0; i < mhi_cntrl->total_ev_rings; i++, er_ctxt++,
|
||||
mhi_event++) {
|
||||
struct mhi_ring *ring = &mhi_event->ring;
|
||||
|
||||
/* Skip if it is an offload event */
|
||||
if (mhi_event->offload_ev)
|
||||
continue;
|
||||
|
||||
tmp = er_ctxt->intmod;
|
||||
tmp &= ~EV_CTX_INTMODC_MASK;
|
||||
tmp &= ~EV_CTX_INTMODT_MASK;
|
||||
tmp |= (mhi_event->intmod << EV_CTX_INTMODT_SHIFT);
|
||||
er_ctxt->intmod = tmp;
|
||||
|
||||
er_ctxt->ertype = MHI_ER_TYPE_VALID;
|
||||
er_ctxt->msivec = mhi_event->irq;
|
||||
mhi_event->db_cfg.db_mode = true;
|
||||
|
||||
ring->el_size = sizeof(struct mhi_tre);
|
||||
ring->len = ring->el_size * ring->elements;
|
||||
ret = mhi_alloc_aligned_ring(mhi_cntrl, ring, ring->len);
|
||||
if (ret)
|
||||
goto error_alloc_er;
|
||||
|
||||
/*
|
||||
* If the read pointer equals to the write pointer, then the
|
||||
* ring is empty
|
||||
*/
|
||||
ring->rp = ring->wp = ring->base;
|
||||
er_ctxt->rbase = ring->iommu_base;
|
||||
er_ctxt->rp = er_ctxt->wp = er_ctxt->rbase;
|
||||
er_ctxt->rlen = ring->len;
|
||||
ring->ctxt_wp = &er_ctxt->wp;
|
||||
}
|
||||
|
||||
/* Setup cmd context */
|
||||
mhi_ctxt->cmd_ctxt = mhi_alloc_coherent(mhi_cntrl,
|
||||
sizeof(*mhi_ctxt->cmd_ctxt) *
|
||||
NR_OF_CMD_RINGS,
|
||||
&mhi_ctxt->cmd_ctxt_addr,
|
||||
GFP_KERNEL);
|
||||
if (!mhi_ctxt->cmd_ctxt)
|
||||
goto error_alloc_er;
|
||||
|
||||
mhi_cmd = mhi_cntrl->mhi_cmd;
|
||||
cmd_ctxt = mhi_ctxt->cmd_ctxt;
|
||||
for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++, cmd_ctxt++) {
|
||||
struct mhi_ring *ring = &mhi_cmd->ring;
|
||||
|
||||
ring->el_size = sizeof(struct mhi_tre);
|
||||
ring->elements = CMD_EL_PER_RING;
|
||||
ring->len = ring->el_size * ring->elements;
|
||||
ret = mhi_alloc_aligned_ring(mhi_cntrl, ring, ring->len);
|
||||
if (ret)
|
||||
goto error_alloc_cmd;
|
||||
|
||||
ring->rp = ring->wp = ring->base;
|
||||
cmd_ctxt->rbase = ring->iommu_base;
|
||||
cmd_ctxt->rp = cmd_ctxt->wp = cmd_ctxt->rbase;
|
||||
cmd_ctxt->rlen = ring->len;
|
||||
ring->ctxt_wp = &cmd_ctxt->wp;
|
||||
}
|
||||
|
||||
mhi_cntrl->mhi_ctxt = mhi_ctxt;
|
||||
|
||||
return 0;
|
||||
|
||||
error_alloc_cmd:
|
||||
for (--i, --mhi_cmd; i >= 0; i--, mhi_cmd--) {
|
||||
struct mhi_ring *ring = &mhi_cmd->ring;
|
||||
|
||||
mhi_free_coherent(mhi_cntrl, ring->alloc_size,
|
||||
ring->pre_aligned, ring->dma_handle);
|
||||
}
|
||||
mhi_free_coherent(mhi_cntrl,
|
||||
sizeof(*mhi_ctxt->cmd_ctxt) * NR_OF_CMD_RINGS,
|
||||
mhi_ctxt->cmd_ctxt, mhi_ctxt->cmd_ctxt_addr);
|
||||
i = mhi_cntrl->total_ev_rings;
|
||||
mhi_event = mhi_cntrl->mhi_event + i;
|
||||
|
||||
error_alloc_er:
|
||||
for (--i, --mhi_event; i >= 0; i--, mhi_event--) {
|
||||
struct mhi_ring *ring = &mhi_event->ring;
|
||||
|
||||
if (mhi_event->offload_ev)
|
||||
continue;
|
||||
|
||||
mhi_free_coherent(mhi_cntrl, ring->alloc_size,
|
||||
ring->pre_aligned, ring->dma_handle);
|
||||
}
|
||||
mhi_free_coherent(mhi_cntrl, sizeof(*mhi_ctxt->er_ctxt) *
|
||||
mhi_cntrl->total_ev_rings, mhi_ctxt->er_ctxt,
|
||||
mhi_ctxt->er_ctxt_addr);
|
||||
|
||||
error_alloc_er_ctxt:
|
||||
mhi_free_coherent(mhi_cntrl, sizeof(*mhi_ctxt->chan_ctxt) *
|
||||
mhi_cntrl->max_chan, mhi_ctxt->chan_ctxt,
|
||||
mhi_ctxt->chan_ctxt_addr);
|
||||
|
||||
error_alloc_chan_ctxt:
|
||||
kfree(mhi_ctxt);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int mhi_init_mmio(struct mhi_controller *mhi_cntrl)
|
||||
{
|
||||
u32 val;
|
||||
|
@ -553,6 +840,41 @@ void mhi_unregister_controller(struct mhi_controller *mhi_cntrl)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(mhi_unregister_controller);
|
||||
|
||||
int mhi_prepare_for_power_up(struct mhi_controller *mhi_cntrl)
|
||||
{
|
||||
int ret;
|
||||
|
||||
mutex_lock(&mhi_cntrl->pm_mutex);
|
||||
|
||||
ret = mhi_init_dev_ctxt(mhi_cntrl);
|
||||
if (ret)
|
||||
goto error_dev_ctxt;
|
||||
|
||||
mhi_cntrl->pre_init = true;
|
||||
|
||||
mutex_unlock(&mhi_cntrl->pm_mutex);
|
||||
|
||||
return 0;
|
||||
|
||||
error_dev_ctxt:
|
||||
mutex_unlock(&mhi_cntrl->pm_mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mhi_prepare_for_power_up);
|
||||
|
||||
void mhi_unprepare_after_power_down(struct mhi_controller *mhi_cntrl)
|
||||
{
|
||||
if (mhi_cntrl->fbc_image) {
|
||||
mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->fbc_image);
|
||||
mhi_cntrl->fbc_image = NULL;
|
||||
}
|
||||
|
||||
mhi_deinit_dev_ctxt(mhi_cntrl);
|
||||
mhi_cntrl->pre_init = false;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mhi_unprepare_after_power_down);
|
||||
|
||||
static void mhi_release_device(struct device *dev)
|
||||
{
|
||||
struct mhi_device *mhi_dev = to_mhi_device(dev);
|
||||
|
|
|
@ -563,6 +563,11 @@ struct mhi_device *mhi_alloc_device(struct mhi_controller *mhi_cntrl);
|
|||
int mhi_destroy_device(struct device *dev, void *data);
|
||||
void mhi_create_devices(struct mhi_controller *mhi_cntrl);
|
||||
|
||||
int mhi_alloc_bhie_table(struct mhi_controller *mhi_cntrl,
|
||||
struct image_info **image_info, size_t alloc_size);
|
||||
void mhi_free_bhie_table(struct mhi_controller *mhi_cntrl,
|
||||
struct image_info *image_info);
|
||||
|
||||
/* Power management APIs */
|
||||
enum mhi_pm_state __must_check mhi_tryset_pm_state(
|
||||
struct mhi_controller *mhi_cntrl,
|
||||
|
@ -604,5 +609,34 @@ void mhi_ring_chan_db(struct mhi_controller *mhi_cntrl,
|
|||
|
||||
/* Initialization methods */
|
||||
int mhi_init_mmio(struct mhi_controller *mhi_cntrl);
|
||||
int mhi_init_dev_ctxt(struct mhi_controller *mhi_cntrl);
|
||||
void mhi_deinit_dev_ctxt(struct mhi_controller *mhi_cntrl);
|
||||
int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl);
|
||||
void mhi_deinit_free_irq(struct mhi_controller *mhi_cntrl);
|
||||
|
||||
/* Memory allocation methods */
|
||||
static inline void *mhi_alloc_coherent(struct mhi_controller *mhi_cntrl,
|
||||
size_t size,
|
||||
dma_addr_t *dma_handle,
|
||||
gfp_t gfp)
|
||||
{
|
||||
void *buf = dma_alloc_coherent(mhi_cntrl->cntrl_dev, size, dma_handle,
|
||||
gfp);
|
||||
|
||||
return buf;
|
||||
}
|
||||
|
||||
static inline void mhi_free_coherent(struct mhi_controller *mhi_cntrl,
|
||||
size_t size,
|
||||
void *vaddr,
|
||||
dma_addr_t dma_handle)
|
||||
{
|
||||
dma_free_coherent(mhi_cntrl->cntrl_dev, size, vaddr, dma_handle);
|
||||
}
|
||||
|
||||
/* ISR handlers */
|
||||
irqreturn_t mhi_irq_handler(int irq_number, void *dev);
|
||||
irqreturn_t mhi_intvec_threaded_handler(int irq_number, void *dev);
|
||||
irqreturn_t mhi_intvec_handler(int irq_number, void *dev);
|
||||
|
||||
#endif /* _MHI_INT_H */
|
||||
|
|
|
@ -142,6 +142,11 @@ enum mhi_state mhi_get_mhi_state(struct mhi_controller *mhi_cntrl)
|
|||
return ret ? MHI_STATE_MAX : state;
|
||||
}
|
||||
|
||||
static void *mhi_to_virtual(struct mhi_ring *ring, dma_addr_t addr)
|
||||
{
|
||||
return (addr - ring->iommu_base) + ring->base;
|
||||
}
|
||||
|
||||
int mhi_destroy_device(struct device *dev, void *data)
|
||||
{
|
||||
struct mhi_device *mhi_dev;
|
||||
|
@ -248,3 +253,85 @@ void mhi_create_devices(struct mhi_controller *mhi_cntrl)
|
|||
put_device(&mhi_dev->dev);
|
||||
}
|
||||
}
|
||||
|
||||
irqreturn_t mhi_irq_handler(int irq_number, void *dev)
|
||||
{
|
||||
struct mhi_event *mhi_event = dev;
|
||||
struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl;
|
||||
struct mhi_event_ctxt *er_ctxt =
|
||||
&mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index];
|
||||
struct mhi_ring *ev_ring = &mhi_event->ring;
|
||||
void *dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
|
||||
|
||||
/* Only proceed if event ring has pending events */
|
||||
if (ev_ring->rp == dev_rp)
|
||||
return IRQ_HANDLED;
|
||||
|
||||
/* For client managed event ring, notify pending data */
|
||||
if (mhi_event->cl_manage) {
|
||||
struct mhi_chan *mhi_chan = mhi_event->mhi_chan;
|
||||
struct mhi_device *mhi_dev = mhi_chan->mhi_dev;
|
||||
|
||||
if (mhi_dev)
|
||||
mhi_notify(mhi_dev, MHI_CB_PENDING_DATA);
|
||||
} else {
|
||||
tasklet_schedule(&mhi_event->task);
|
||||
}
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
irqreturn_t mhi_intvec_threaded_handler(int irq_number, void *dev)
|
||||
{
|
||||
struct mhi_controller *mhi_cntrl = dev;
|
||||
enum mhi_state state = MHI_STATE_MAX;
|
||||
enum mhi_pm_state pm_state = 0;
|
||||
enum mhi_ee_type ee = 0;
|
||||
|
||||
write_lock_irq(&mhi_cntrl->pm_lock);
|
||||
if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
|
||||
state = mhi_get_mhi_state(mhi_cntrl);
|
||||
ee = mhi_cntrl->ee;
|
||||
mhi_cntrl->ee = mhi_get_exec_env(mhi_cntrl);
|
||||
}
|
||||
|
||||
if (state == MHI_STATE_SYS_ERR) {
|
||||
dev_dbg(&mhi_cntrl->mhi_dev->dev, "System error detected\n");
|
||||
pm_state = mhi_tryset_pm_state(mhi_cntrl,
|
||||
MHI_PM_SYS_ERR_DETECT);
|
||||
}
|
||||
write_unlock_irq(&mhi_cntrl->pm_lock);
|
||||
|
||||
/* If device in RDDM don't bother processing SYS error */
|
||||
if (mhi_cntrl->ee == MHI_EE_RDDM) {
|
||||
if (mhi_cntrl->ee != ee) {
|
||||
mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_RDDM);
|
||||
wake_up_all(&mhi_cntrl->state_event);
|
||||
}
|
||||
goto exit_intvec;
|
||||
}
|
||||
|
||||
if (pm_state == MHI_PM_SYS_ERR_DETECT) {
|
||||
wake_up_all(&mhi_cntrl->state_event);
|
||||
|
||||
/* For fatal errors, we let controller decide next step */
|
||||
if (MHI_IN_PBL(ee))
|
||||
mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_FATAL_ERROR);
|
||||
else
|
||||
schedule_work(&mhi_cntrl->syserr_worker);
|
||||
}
|
||||
|
||||
exit_intvec:
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
irqreturn_t mhi_intvec_handler(int irq_number, void *dev)
|
||||
{
|
||||
struct mhi_controller *mhi_cntrl = dev;
|
||||
|
||||
/* Wake up events waiting for state change */
|
||||
wake_up_all(&mhi_cntrl->state_event);
|
||||
|
||||
return IRQ_WAKE_THREAD;
|
||||
}
|
||||
|
|
|
@ -138,6 +138,17 @@ void mhi_set_mhi_state(struct mhi_controller *mhi_cntrl, enum mhi_state state)
|
|||
}
|
||||
}
|
||||
|
||||
/* NOP for backward compatibility, host allowed to ring DB in M2 state */
|
||||
static void mhi_toggle_dev_wake_nop(struct mhi_controller *mhi_cntrl)
|
||||
{
|
||||
}
|
||||
|
||||
static void mhi_toggle_dev_wake(struct mhi_controller *mhi_cntrl)
|
||||
{
|
||||
mhi_cntrl->wake_get(mhi_cntrl, false);
|
||||
mhi_cntrl->wake_put(mhi_cntrl, true);
|
||||
}
|
||||
|
||||
/* Handle device ready state transition */
|
||||
int mhi_ready_state_transition(struct mhi_controller *mhi_cntrl)
|
||||
{
|
||||
|
@ -676,3 +687,211 @@ int __mhi_device_get_sync(struct mhi_controller *mhi_cntrl)
|
|||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Assert device wake db */
|
||||
static void mhi_assert_dev_wake(struct mhi_controller *mhi_cntrl, bool force)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
/*
|
||||
* If force flag is set, then increment the wake count value and
|
||||
* ring wake db
|
||||
*/
|
||||
if (unlikely(force)) {
|
||||
spin_lock_irqsave(&mhi_cntrl->wlock, flags);
|
||||
atomic_inc(&mhi_cntrl->dev_wake);
|
||||
if (MHI_WAKE_DB_FORCE_SET_VALID(mhi_cntrl->pm_state) &&
|
||||
!mhi_cntrl->wake_set) {
|
||||
mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 1);
|
||||
mhi_cntrl->wake_set = true;
|
||||
}
|
||||
spin_unlock_irqrestore(&mhi_cntrl->wlock, flags);
|
||||
} else {
|
||||
/*
|
||||
* If resources are already requested, then just increment
|
||||
* the wake count value and return
|
||||
*/
|
||||
if (likely(atomic_add_unless(&mhi_cntrl->dev_wake, 1, 0)))
|
||||
return;
|
||||
|
||||
spin_lock_irqsave(&mhi_cntrl->wlock, flags);
|
||||
if ((atomic_inc_return(&mhi_cntrl->dev_wake) == 1) &&
|
||||
MHI_WAKE_DB_SET_VALID(mhi_cntrl->pm_state) &&
|
||||
!mhi_cntrl->wake_set) {
|
||||
mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 1);
|
||||
mhi_cntrl->wake_set = true;
|
||||
}
|
||||
spin_unlock_irqrestore(&mhi_cntrl->wlock, flags);
|
||||
}
|
||||
}
|
||||
|
||||
/* De-assert device wake db */
|
||||
static void mhi_deassert_dev_wake(struct mhi_controller *mhi_cntrl,
|
||||
bool override)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
/*
|
||||
* Only continue if there is a single resource, else just decrement
|
||||
* and return
|
||||
*/
|
||||
if (likely(atomic_add_unless(&mhi_cntrl->dev_wake, -1, 1)))
|
||||
return;
|
||||
|
||||
spin_lock_irqsave(&mhi_cntrl->wlock, flags);
|
||||
if ((atomic_dec_return(&mhi_cntrl->dev_wake) == 0) &&
|
||||
MHI_WAKE_DB_CLEAR_VALID(mhi_cntrl->pm_state) && !override &&
|
||||
mhi_cntrl->wake_set) {
|
||||
mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 0);
|
||||
mhi_cntrl->wake_set = false;
|
||||
}
|
||||
spin_unlock_irqrestore(&mhi_cntrl->wlock, flags);
|
||||
}
|
||||
|
||||
int mhi_async_power_up(struct mhi_controller *mhi_cntrl)
|
||||
{
|
||||
enum mhi_ee_type current_ee;
|
||||
enum dev_st_transition next_state;
|
||||
struct device *dev = &mhi_cntrl->mhi_dev->dev;
|
||||
u32 val;
|
||||
int ret;
|
||||
|
||||
dev_info(dev, "Requested to power ON\n");
|
||||
|
||||
if (mhi_cntrl->nr_irqs < mhi_cntrl->total_ev_rings)
|
||||
return -EINVAL;
|
||||
|
||||
/* Supply default wake routines if not provided by controller driver */
|
||||
if (!mhi_cntrl->wake_get || !mhi_cntrl->wake_put ||
|
||||
!mhi_cntrl->wake_toggle) {
|
||||
mhi_cntrl->wake_get = mhi_assert_dev_wake;
|
||||
mhi_cntrl->wake_put = mhi_deassert_dev_wake;
|
||||
mhi_cntrl->wake_toggle = (mhi_cntrl->db_access & MHI_PM_M2) ?
|
||||
mhi_toggle_dev_wake_nop : mhi_toggle_dev_wake;
|
||||
}
|
||||
|
||||
mutex_lock(&mhi_cntrl->pm_mutex);
|
||||
mhi_cntrl->pm_state = MHI_PM_DISABLE;
|
||||
|
||||
if (!mhi_cntrl->pre_init) {
|
||||
/* Setup device context */
|
||||
ret = mhi_init_dev_ctxt(mhi_cntrl);
|
||||
if (ret)
|
||||
goto error_dev_ctxt;
|
||||
}
|
||||
|
||||
ret = mhi_init_irq_setup(mhi_cntrl);
|
||||
if (ret)
|
||||
goto error_setup_irq;
|
||||
|
||||
/* Setup BHI offset & INTVEC */
|
||||
write_lock_irq(&mhi_cntrl->pm_lock);
|
||||
ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, BHIOFF, &val);
|
||||
if (ret) {
|
||||
write_unlock_irq(&mhi_cntrl->pm_lock);
|
||||
goto error_bhi_offset;
|
||||
}
|
||||
|
||||
mhi_cntrl->bhi = mhi_cntrl->regs + val;
|
||||
|
||||
/* Setup BHIE offset */
|
||||
if (mhi_cntrl->fbc_download) {
|
||||
ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, BHIEOFF, &val);
|
||||
if (ret) {
|
||||
write_unlock_irq(&mhi_cntrl->pm_lock);
|
||||
dev_err(dev, "Error reading BHIE offset\n");
|
||||
goto error_bhi_offset;
|
||||
}
|
||||
|
||||
mhi_cntrl->bhie = mhi_cntrl->regs + val;
|
||||
}
|
||||
|
||||
mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0);
|
||||
mhi_cntrl->pm_state = MHI_PM_POR;
|
||||
mhi_cntrl->ee = MHI_EE_MAX;
|
||||
current_ee = mhi_get_exec_env(mhi_cntrl);
|
||||
write_unlock_irq(&mhi_cntrl->pm_lock);
|
||||
|
||||
/* Confirm that the device is in valid exec env */
|
||||
if (!MHI_IN_PBL(current_ee) && current_ee != MHI_EE_AMSS) {
|
||||
dev_err(dev, "Not a valid EE for power on\n");
|
||||
ret = -EIO;
|
||||
goto error_bhi_offset;
|
||||
}
|
||||
|
||||
/* Transition to next state */
|
||||
next_state = MHI_IN_PBL(current_ee) ?
|
||||
DEV_ST_TRANSITION_PBL : DEV_ST_TRANSITION_READY;
|
||||
|
||||
if (next_state == DEV_ST_TRANSITION_PBL)
|
||||
schedule_work(&mhi_cntrl->fw_worker);
|
||||
|
||||
mhi_queue_state_transition(mhi_cntrl, next_state);
|
||||
|
||||
mutex_unlock(&mhi_cntrl->pm_mutex);
|
||||
|
||||
dev_info(dev, "Power on setup success\n");
|
||||
|
||||
return 0;
|
||||
|
||||
error_bhi_offset:
|
||||
mhi_deinit_free_irq(mhi_cntrl);
|
||||
|
||||
error_setup_irq:
|
||||
if (!mhi_cntrl->pre_init)
|
||||
mhi_deinit_dev_ctxt(mhi_cntrl);
|
||||
|
||||
error_dev_ctxt:
|
||||
mutex_unlock(&mhi_cntrl->pm_mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mhi_async_power_up);
|
||||
|
||||
void mhi_power_down(struct mhi_controller *mhi_cntrl, bool graceful)
|
||||
{
|
||||
enum mhi_pm_state cur_state;
|
||||
struct device *dev = &mhi_cntrl->mhi_dev->dev;
|
||||
|
||||
/* If it's not a graceful shutdown, force MHI to linkdown state */
|
||||
if (!graceful) {
|
||||
mutex_lock(&mhi_cntrl->pm_mutex);
|
||||
write_lock_irq(&mhi_cntrl->pm_lock);
|
||||
cur_state = mhi_tryset_pm_state(mhi_cntrl,
|
||||
MHI_PM_LD_ERR_FATAL_DETECT);
|
||||
write_unlock_irq(&mhi_cntrl->pm_lock);
|
||||
mutex_unlock(&mhi_cntrl->pm_mutex);
|
||||
if (cur_state != MHI_PM_LD_ERR_FATAL_DETECT)
|
||||
dev_dbg(dev, "Failed to move to state: %s from: %s\n",
|
||||
to_mhi_pm_state_str(MHI_PM_LD_ERR_FATAL_DETECT),
|
||||
to_mhi_pm_state_str(mhi_cntrl->pm_state));
|
||||
}
|
||||
mhi_pm_disable_transition(mhi_cntrl, MHI_PM_SHUTDOWN_PROCESS);
|
||||
mhi_deinit_free_irq(mhi_cntrl);
|
||||
|
||||
if (!mhi_cntrl->pre_init) {
|
||||
/* Free all allocated resources */
|
||||
if (mhi_cntrl->fbc_image) {
|
||||
mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->fbc_image);
|
||||
mhi_cntrl->fbc_image = NULL;
|
||||
}
|
||||
mhi_deinit_dev_ctxt(mhi_cntrl);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mhi_power_down);
|
||||
|
||||
int mhi_sync_power_up(struct mhi_controller *mhi_cntrl)
|
||||
{
|
||||
int ret = mhi_async_power_up(mhi_cntrl);
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
wait_event_timeout(mhi_cntrl->state_event,
|
||||
MHI_IN_MISSION_MODE(mhi_cntrl->ee) ||
|
||||
MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
|
||||
msecs_to_jiffies(mhi_cntrl->timeout_ms));
|
||||
|
||||
return (MHI_IN_MISSION_MODE(mhi_cntrl->ee)) ? 0 : -EIO;
|
||||
}
|
||||
EXPORT_SYMBOL(mhi_sync_power_up);
|
||||
|
|
|
@ -81,6 +81,17 @@ enum mhi_ch_type {
|
|||
MHI_CH_TYPE_INBOUND_COALESCED = 3,
|
||||
};
|
||||
|
||||
/**
|
||||
* struct image_info - Firmware and RDDM table table
|
||||
* @mhi_buf - Buffer for firmware and RDDM table
|
||||
* @entries - # of entries in table
|
||||
*/
|
||||
struct image_info {
|
||||
struct mhi_buf *mhi_buf;
|
||||
struct bhi_vec_entry *bhi_vec;
|
||||
u32 entries;
|
||||
};
|
||||
|
||||
/**
|
||||
* enum mhi_ee_type - Execution environment types
|
||||
* @MHI_EE_PBL: Primary Bootloader
|
||||
|
@ -266,6 +277,7 @@ struct mhi_controller_config {
|
|||
* @mhi_dev: MHI device instance for the controller
|
||||
* @regs: Base address of MHI MMIO register space (required)
|
||||
* @bhi: Points to base of MHI BHI register space
|
||||
* @bhie: Points to base of MHI BHIe register space
|
||||
* @wake_db: MHI WAKE doorbell register address
|
||||
* @iova_start: IOMMU starting address for data (required)
|
||||
* @iova_stop: IOMMU stop address for data (required)
|
||||
|
@ -273,6 +285,7 @@ struct mhi_controller_config {
|
|||
* @edl_image: Firmware image name for emergency download mode (optional)
|
||||
* @sbl_size: SBL image size downloaded through BHIe (optional)
|
||||
* @seg_len: BHIe vector size (optional)
|
||||
* @fbc_image: Points to firmware image buffer
|
||||
* @mhi_chan: Points to the channel configuration table
|
||||
* @lpm_chans: List of channels that require LPM notifications
|
||||
* @irq: base irq # to request (required)
|
||||
|
@ -323,6 +336,7 @@ struct mhi_controller {
|
|||
struct mhi_device *mhi_dev;
|
||||
void __iomem *regs;
|
||||
void __iomem *bhi;
|
||||
void __iomem *bhie;
|
||||
void __iomem *wake_db;
|
||||
|
||||
dma_addr_t iova_start;
|
||||
|
@ -331,6 +345,7 @@ struct mhi_controller {
|
|||
const char *edl_image;
|
||||
size_t sbl_size;
|
||||
size_t seg_len;
|
||||
struct image_info *fbc_image;
|
||||
struct mhi_chan *mhi_chan;
|
||||
struct list_head lpm_chans;
|
||||
int *irq;
|
||||
|
@ -494,4 +509,40 @@ void mhi_driver_unregister(struct mhi_driver *mhi_drv);
|
|||
void mhi_set_mhi_state(struct mhi_controller *mhi_cntrl,
|
||||
enum mhi_state state);
|
||||
|
||||
/**
|
||||
* mhi_prepare_for_power_up - Do pre-initialization before power up.
|
||||
* This is optional, call this before power up if
|
||||
* the controller does not want bus framework to
|
||||
* automatically free any allocated memory during
|
||||
* shutdown process.
|
||||
* @mhi_cntrl: MHI controller
|
||||
*/
|
||||
int mhi_prepare_for_power_up(struct mhi_controller *mhi_cntrl);
|
||||
|
||||
/**
|
||||
* mhi_async_power_up - Start MHI power up sequence
|
||||
* @mhi_cntrl: MHI controller
|
||||
*/
|
||||
int mhi_async_power_up(struct mhi_controller *mhi_cntrl);
|
||||
|
||||
/**
|
||||
* mhi_sync_power_up - Start MHI power up sequence and wait till the device
|
||||
* device enters valid EE state
|
||||
* @mhi_cntrl: MHI controller
|
||||
*/
|
||||
int mhi_sync_power_up(struct mhi_controller *mhi_cntrl);
|
||||
|
||||
/**
|
||||
* mhi_power_down - Start MHI power down sequence
|
||||
* @mhi_cntrl: MHI controller
|
||||
* @graceful: Link is still accessible, so do a graceful shutdown process
|
||||
*/
|
||||
void mhi_power_down(struct mhi_controller *mhi_cntrl, bool graceful);
|
||||
|
||||
/**
|
||||
* mhi_unprepare_after_power_down - Free any allocated memory after power down
|
||||
* @mhi_cntrl: MHI controller
|
||||
*/
|
||||
void mhi_unprepare_after_power_down(struct mhi_controller *mhi_cntrl);
|
||||
|
||||
#endif /* _MHI_H_ */
|
||||
|
|
Loading…
Reference in New Issue