diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c index 237578030a2a..fc2014997dde 100644 --- a/drivers/mtd/nand/raw/qcom_nandc.c +++ b/drivers/mtd/nand/raw/qcom_nandc.c @@ -213,6 +213,8 @@ nandc_set_reg(nandc, NAND_READ_LOCATION_##reg, \ #define QPIC_PER_CW_CMD_SGL 32 #define QPIC_PER_CW_DATA_SGL 8 +#define QPIC_NAND_COMPLETION_TIMEOUT msecs_to_jiffies(2000) + /* * Flags used in DMA descriptor preparation helper functions * (i.e. read_reg_dma/write_reg_dma/read_data_dma/write_data_dma) @@ -245,6 +247,11 @@ nandc_set_reg(nandc, NAND_READ_LOCATION_##reg, \ * @tx_sgl_start - start index in data sgl for tx. * @rx_sgl_pos - current index in data sgl for rx. * @rx_sgl_start - start index in data sgl for rx. + * @wait_second_completion - wait for second DMA desc completion before making + * the NAND transfer completion. + * @txn_done - completion for NAND transfer. + * @last_data_desc - last DMA desc in data channel (tx/rx). + * @last_cmd_desc - last DMA desc in command channel. */ struct bam_transaction { struct bam_cmd_element *bam_ce; @@ -258,6 +265,10 @@ struct bam_transaction { u32 tx_sgl_start; u32 rx_sgl_pos; u32 rx_sgl_start; + bool wait_second_completion; + struct completion txn_done; + struct dma_async_tx_descriptor *last_data_desc; + struct dma_async_tx_descriptor *last_cmd_desc; }; /* @@ -504,6 +515,8 @@ alloc_bam_transaction(struct qcom_nand_controller *nandc) bam_txn->data_sgl = bam_txn_buf; + init_completion(&bam_txn->txn_done); + return bam_txn; } @@ -523,11 +536,33 @@ static void clear_bam_transaction(struct qcom_nand_controller *nandc) bam_txn->tx_sgl_start = 0; bam_txn->rx_sgl_pos = 0; bam_txn->rx_sgl_start = 0; + bam_txn->last_data_desc = NULL; + bam_txn->wait_second_completion = false; sg_init_table(bam_txn->cmd_sgl, nandc->max_cwperpage * QPIC_PER_CW_CMD_SGL); sg_init_table(bam_txn->data_sgl, nandc->max_cwperpage * QPIC_PER_CW_DATA_SGL); + + reinit_completion(&bam_txn->txn_done); +} + +/* Callback for DMA descriptor completion */ +static void qpic_bam_dma_done(void *data) +{ + struct bam_transaction *bam_txn = data; + + /* + * In case of data transfer with NAND, 2 callbacks will be generated. + * One for command channel and another one for data channel. + * If current transaction has data descriptors + * (i.e. wait_second_completion is true), then set this to false + * and wait for second DMA descriptor completion. + */ + if (bam_txn->wait_second_completion) + bam_txn->wait_second_completion = false; + else + complete(&bam_txn->txn_done); } static inline struct qcom_nand_host *to_qcom_nand_host(struct nand_chip *chip) @@ -756,6 +791,12 @@ static int prepare_bam_async_desc(struct qcom_nand_controller *nandc, desc->dma_desc = dma_desc; + /* update last data/command descriptor */ + if (chan == nandc->cmd_chan) + bam_txn->last_cmd_desc = dma_desc; + else + bam_txn->last_data_desc = dma_desc; + list_add_tail(&desc->node, &nandc->desc_list); return 0; @@ -1273,10 +1314,20 @@ static int submit_descs(struct qcom_nand_controller *nandc) cookie = dmaengine_submit(desc->dma_desc); if (nandc->props->is_bam) { + bam_txn->last_cmd_desc->callback = qpic_bam_dma_done; + bam_txn->last_cmd_desc->callback_param = bam_txn; + if (bam_txn->last_data_desc) { + bam_txn->last_data_desc->callback = qpic_bam_dma_done; + bam_txn->last_data_desc->callback_param = bam_txn; + bam_txn->wait_second_completion = true; + } + dma_async_issue_pending(nandc->tx_chan); dma_async_issue_pending(nandc->rx_chan); + dma_async_issue_pending(nandc->cmd_chan); - if (dma_sync_wait(nandc->cmd_chan, cookie) != DMA_COMPLETE) + if (!wait_for_completion_timeout(&bam_txn->txn_done, + QPIC_NAND_COMPLETION_TIMEOUT)) return -ETIMEDOUT; } else { if (dma_sync_wait(nandc->chan, cookie) != DMA_COMPLETE)