From 71af05a7d0eb63fa5a6b64f296a5f2c8d84d6a9e Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Fri, 31 Jan 2020 10:58:11 +0530 Subject: [PATCH 01/70] firmware: arm_scmi: Update doc style comments Fix minor formatting issues with the doc style comments. Signed-off-by: Viresh Kumar Link: https://lore.kernel.org/r/1bff7c0d1ad2c8b6eeff9660421f414f8c612eb2.1580448239.git.viresh.kumar@linaro.org Signed-off-by: Sudeep Holla --- drivers/firmware/arm_scmi/common.h | 4 ++-- drivers/firmware/arm_scmi/driver.c | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/firmware/arm_scmi/common.h b/drivers/firmware/arm_scmi/common.h index df35358ff324..227934871929 100644 --- a/drivers/firmware/arm_scmi/common.h +++ b/drivers/firmware/arm_scmi/common.h @@ -33,8 +33,8 @@ enum scmi_common_cmd { /** * struct scmi_msg_resp_prot_version - Response for a message * - * @major_version: Major version of the ABI that firmware supports * @minor_version: Minor version of the ABI that firmware supports + * @major_version: Major version of the ABI that firmware supports * * In general, ABI version changes follow the rule that minor version increments * are backward compatible. Major revision changes in ABI may not be @@ -88,7 +88,7 @@ struct scmi_msg { * message. If request-ACK protocol is used, we can reuse the same * buffer for the rx path as we use for the tx path. * @done: command message transmit completion event - * @async: pointer to delayed response message received event completion + * @async_done: pointer to delayed response message received event completion */ struct scmi_xfer { int transfer_id; diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c index 2c96f6b5a7d8..978eafb53471 100644 --- a/drivers/firmware/arm_scmi/driver.c +++ b/drivers/firmware/arm_scmi/driver.c @@ -119,9 +119,9 @@ struct scmi_chan_info { * * @dev: Device pointer * @desc: SoC description for this instance - * @handle: Instance of SCMI handle to send to clients * @version: SCMI revision information containing protocol version, * implementation version and (sub-)vendor identification. + * @handle: Instance of SCMI handle to send to clients * @tx_minfo: Universal Transmit Message management info * @tx_idr: IDR object to map protocol id to Tx channel info pointer * @rx_idr: IDR object to map protocol id to Rx channel info pointer From c4eb83660aef549a6a50965ec5f0af9c8306d2e9 Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Fri, 31 Jan 2020 10:58:12 +0530 Subject: [PATCH 02/70] firmware: arm_scmi: Move macros and helpers to common.h Move message header specific macros and helper routines to common.h as they will be used outside of driver.c in a later commit. Signed-off-by: Viresh Kumar Link: https://lore.kernel.org/r/6615db480370719b0a0241447a5f3feb8eea421f.1580448239.git.viresh.kumar@linaro.org Signed-off-by: Sudeep Holla --- drivers/firmware/arm_scmi/common.h | 40 ++++++++++++++++++++++++++++++ drivers/firmware/arm_scmi/driver.c | 40 ------------------------------ 2 files changed, 40 insertions(+), 40 deletions(-) diff --git a/drivers/firmware/arm_scmi/common.h b/drivers/firmware/arm_scmi/common.h index 227934871929..934b5a23f10b 100644 --- a/drivers/firmware/arm_scmi/common.h +++ b/drivers/firmware/arm_scmi/common.h @@ -47,6 +47,19 @@ struct scmi_msg_resp_prot_version { __le16 major_version; }; +#define MSG_ID_MASK GENMASK(7, 0) +#define MSG_XTRACT_ID(hdr) FIELD_GET(MSG_ID_MASK, (hdr)) +#define MSG_TYPE_MASK GENMASK(9, 8) +#define MSG_XTRACT_TYPE(hdr) FIELD_GET(MSG_TYPE_MASK, (hdr)) +#define MSG_TYPE_COMMAND 0 +#define MSG_TYPE_DELAYED_RESP 2 +#define MSG_TYPE_NOTIFICATION 3 +#define MSG_PROTOCOL_ID_MASK GENMASK(17, 10) +#define MSG_XTRACT_PROT_ID(hdr) FIELD_GET(MSG_PROTOCOL_ID_MASK, (hdr)) +#define MSG_TOKEN_ID_MASK GENMASK(27, 18) +#define MSG_XTRACT_TOKEN(hdr) FIELD_GET(MSG_TOKEN_ID_MASK, (hdr)) +#define MSG_TOKEN_MAX (MSG_XTRACT_TOKEN(MSG_TOKEN_ID_MASK) + 1) + /** * struct scmi_msg_hdr - Message(Tx/Rx) header * @@ -67,6 +80,33 @@ struct scmi_msg_hdr { bool poll_completion; }; +/** + * pack_scmi_header() - packs and returns 32-bit header + * + * @hdr: pointer to header containing all the information on message id, + * protocol id and sequence id. + * + * Return: 32-bit packed message header to be sent to the platform. + */ +static inline u32 pack_scmi_header(struct scmi_msg_hdr *hdr) +{ + return FIELD_PREP(MSG_ID_MASK, hdr->id) | + FIELD_PREP(MSG_TOKEN_ID_MASK, hdr->seq) | + FIELD_PREP(MSG_PROTOCOL_ID_MASK, hdr->protocol_id); +} + +/** + * unpack_scmi_header() - unpacks and records message and protocol id + * + * @msg_hdr: 32-bit packed message header sent from the platform + * @hdr: pointer to header to fetch message and protocol id. + */ +static inline void unpack_scmi_header(u32 msg_hdr, struct scmi_msg_hdr *hdr) +{ + hdr->id = MSG_XTRACT_ID(msg_hdr); + hdr->protocol_id = MSG_XTRACT_PROT_ID(msg_hdr); +} + /** * struct scmi_msg - Message(Tx/Rx) structure * diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c index 978eafb53471..716423063b14 100644 --- a/drivers/firmware/arm_scmi/driver.c +++ b/drivers/firmware/arm_scmi/driver.c @@ -32,19 +32,6 @@ #define CREATE_TRACE_POINTS #include -#define MSG_ID_MASK GENMASK(7, 0) -#define MSG_XTRACT_ID(hdr) FIELD_GET(MSG_ID_MASK, (hdr)) -#define MSG_TYPE_MASK GENMASK(9, 8) -#define MSG_XTRACT_TYPE(hdr) FIELD_GET(MSG_TYPE_MASK, (hdr)) -#define MSG_TYPE_COMMAND 0 -#define MSG_TYPE_DELAYED_RESP 2 -#define MSG_TYPE_NOTIFICATION 3 -#define MSG_PROTOCOL_ID_MASK GENMASK(17, 10) -#define MSG_XTRACT_PROT_ID(hdr) FIELD_GET(MSG_PROTOCOL_ID_MASK, (hdr)) -#define MSG_TOKEN_ID_MASK GENMASK(27, 18) -#define MSG_XTRACT_TOKEN(hdr) FIELD_GET(MSG_TOKEN_ID_MASK, (hdr)) -#define MSG_TOKEN_MAX (MSG_XTRACT_TOKEN(MSG_TOKEN_ID_MASK) + 1) - enum scmi_error_codes { SCMI_SUCCESS = 0, /* Success */ SCMI_ERR_SUPPORT = -1, /* Not supported */ @@ -210,33 +197,6 @@ static void scmi_fetch_response(struct scmi_xfer *xfer, memcpy_fromio(xfer->rx.buf, mem->msg_payload + 4, xfer->rx.len); } -/** - * pack_scmi_header() - packs and returns 32-bit header - * - * @hdr: pointer to header containing all the information on message id, - * protocol id and sequence id. - * - * Return: 32-bit packed message header to be sent to the platform. - */ -static inline u32 pack_scmi_header(struct scmi_msg_hdr *hdr) -{ - return FIELD_PREP(MSG_ID_MASK, hdr->id) | - FIELD_PREP(MSG_TOKEN_ID_MASK, hdr->seq) | - FIELD_PREP(MSG_PROTOCOL_ID_MASK, hdr->protocol_id); -} - -/** - * unpack_scmi_header() - unpacks and records message and protocol id - * - * @msg_hdr: 32-bit packed message header sent from the platform - * @hdr: pointer to header to fetch message and protocol id. - */ -static inline void unpack_scmi_header(u32 msg_hdr, struct scmi_msg_hdr *hdr) -{ - hdr->id = MSG_XTRACT_ID(msg_hdr); - hdr->protocol_id = MSG_XTRACT_PROT_ID(msg_hdr); -} - /** * scmi_tx_prepare() - mailbox client callback to prepare for the transfer * From 5c8a47a5a91d4d6e185f758d61997613d9c5d6ac Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Fri, 31 Jan 2020 10:58:13 +0530 Subject: [PATCH 03/70] firmware: arm_scmi: Make scmi core independent of the transport type The SCMI specification is fairly independent of the transport protocol, which can be a simple mailbox (already implemented) or anything else. The current Linux implementation however is very much dependent on the mailbox transport layer. This patch makes the SCMI core code (driver.c) independent of the mailbox transport layer and moves all mailbox related code to a new file: mailbox.c and all struct shared_mem related code to a new file: shmem.c. We can now implement more transport protocols to transport SCMI messages. The transport protocols just need to provide struct scmi_transport_ops, with its version of the callbacks to enable exchange of SCMI messages. Signed-off-by: Viresh Kumar Link: https://lore.kernel.org/r/8698a3cec199b8feab35c2339f02dc232bfd773b.1580448239.git.viresh.kumar@linaro.org Signed-off-by: Sudeep Holla --- drivers/firmware/arm_scmi/Makefile | 3 +- drivers/firmware/arm_scmi/common.h | 71 ++++++++ drivers/firmware/arm_scmi/driver.c | 251 ++++------------------------ drivers/firmware/arm_scmi/mailbox.c | 184 ++++++++++++++++++++ drivers/firmware/arm_scmi/shmem.c | 83 +++++++++ 5 files changed, 373 insertions(+), 219 deletions(-) create mode 100644 drivers/firmware/arm_scmi/mailbox.c create mode 100644 drivers/firmware/arm_scmi/shmem.c diff --git a/drivers/firmware/arm_scmi/Makefile b/drivers/firmware/arm_scmi/Makefile index 5f298f00a82e..6694d0d908d6 100644 --- a/drivers/firmware/arm_scmi/Makefile +++ b/drivers/firmware/arm_scmi/Makefile @@ -1,6 +1,7 @@ # SPDX-License-Identifier: GPL-2.0-only -obj-y = scmi-bus.o scmi-driver.o scmi-protocols.o +obj-y = scmi-bus.o scmi-driver.o scmi-protocols.o scmi-transport.o scmi-bus-y = bus.o scmi-driver-y = driver.o +scmi-transport-y = mailbox.o shmem.o scmi-protocols-y = base.o clock.o perf.o power.o reset.o sensors.o obj-$(CONFIG_ARM_SCMI_POWER_DOMAIN) += scmi_pm_domain.o diff --git a/drivers/firmware/arm_scmi/common.h b/drivers/firmware/arm_scmi/common.h index 934b5a23f10b..5ac06469b01c 100644 --- a/drivers/firmware/arm_scmi/common.h +++ b/drivers/firmware/arm_scmi/common.h @@ -153,3 +153,74 @@ void scmi_setup_protocol_implemented(const struct scmi_handle *handle, u8 *prot_imp); int scmi_base_protocol_init(struct scmi_handle *h); + +/* SCMI Transport */ +/** + * struct scmi_chan_info - Structure representing a SCMI channel information + * + * @dev: Reference to device in the SCMI hierarchy corresponding to this + * channel + * @handle: Pointer to SCMI entity handle + * @transport_info: Transport layer related information + */ +struct scmi_chan_info { + struct device *dev; + struct scmi_handle *handle; + void *transport_info; +}; + +/** + * struct scmi_transport_ops - Structure representing a SCMI transport ops + * + * @chan_available: Callback to check if channel is available or not + * @chan_setup: Callback to allocate and setup a channel + * @chan_free: Callback to free a channel + * @send_message: Callback to send a message + * @mark_txdone: Callback to mark tx as done + * @fetch_response: Callback to fetch response + * @poll_done: Callback to poll transfer status + */ +struct scmi_transport_ops { + bool (*chan_available)(struct device *dev, int idx); + int (*chan_setup)(struct scmi_chan_info *cinfo, struct device *dev, + bool tx); + int (*chan_free)(int id, void *p, void *data); + int (*send_message)(struct scmi_chan_info *cinfo, + struct scmi_xfer *xfer); + void (*mark_txdone)(struct scmi_chan_info *cinfo, int ret); + void (*fetch_response)(struct scmi_chan_info *cinfo, + struct scmi_xfer *xfer); + bool (*poll_done)(struct scmi_chan_info *cinfo, struct scmi_xfer *xfer); +}; + +/** + * struct scmi_desc - Description of SoC integration + * + * @ops: Pointer to the transport specific ops structure + * @max_rx_timeout_ms: Timeout for communication with SoC (in Milliseconds) + * @max_msg: Maximum number of messages that can be pending + * simultaneously in the system + * @max_msg_size: Maximum size of data per message that can be handled. + */ +struct scmi_desc { + struct scmi_transport_ops *ops; + int max_rx_timeout_ms; + int max_msg; + int max_msg_size; +}; + +extern const struct scmi_desc scmi_mailbox_desc; + +void scmi_rx_callback(struct scmi_chan_info *cinfo, u32 msg_hdr); +void scmi_free_channel(struct scmi_chan_info *cinfo, struct idr *idr, int id); + +/* shmem related declarations */ +struct scmi_shared_mem; + +void shmem_tx_prepare(struct scmi_shared_mem __iomem *shmem, + struct scmi_xfer *xfer); +u32 shmem_read_header(struct scmi_shared_mem __iomem *shmem); +void shmem_fetch_response(struct scmi_shared_mem __iomem *shmem, + struct scmi_xfer *xfer); +bool shmem_poll_done(struct scmi_shared_mem __iomem *shmem, + struct scmi_xfer *xfer); diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c index 716423063b14..dbec767222e9 100644 --- a/drivers/firmware/arm_scmi/driver.c +++ b/drivers/firmware/arm_scmi/driver.c @@ -19,12 +19,10 @@ #include #include #include -#include #include #include #include #include -#include #include #include "common.h" @@ -69,38 +67,6 @@ struct scmi_xfers_info { spinlock_t xfer_lock; }; -/** - * struct scmi_desc - Description of SoC integration - * - * @max_rx_timeout_ms: Timeout for communication with SoC (in Milliseconds) - * @max_msg: Maximum number of messages that can be pending - * simultaneously in the system - * @max_msg_size: Maximum size of data per message that can be handled. - */ -struct scmi_desc { - int max_rx_timeout_ms; - int max_msg; - int max_msg_size; -}; - -/** - * struct scmi_chan_info - Structure representing a SCMI channel information - * - * @cl: Mailbox Client - * @chan: Transmit/Receive mailbox channel - * @payload: Transmit/Receive mailbox channel payload area - * @dev: Reference to device in the SCMI hierarchy corresponding to this - * channel - * @handle: Pointer to SCMI entity handle - */ -struct scmi_chan_info { - struct mbox_client cl; - struct mbox_chan *chan; - void __iomem *payload; - struct device *dev; - struct scmi_handle *handle; -}; - /** * struct scmi_info - Structure representing a SCMI instance * @@ -130,27 +96,8 @@ struct scmi_info { int users; }; -#define client_to_scmi_chan_info(c) container_of(c, struct scmi_chan_info, cl) #define handle_to_scmi_info(h) container_of(h, struct scmi_info, handle) -/* - * SCMI specification requires all parameters, message headers, return - * arguments or any protocol data to be expressed in little endian - * format only. - */ -struct scmi_shared_mem { - __le32 reserved; - __le32 channel_status; -#define SCMI_SHMEM_CHAN_STAT_CHANNEL_ERROR BIT(1) -#define SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE BIT(0) - __le32 reserved1[2]; - __le32 flags; -#define SCMI_SHMEM_FLAG_INTR_ENABLED BIT(0) - __le32 length; - __le32 msg_header; - u8 msg_payload[0]; -}; - static const int scmi_linux_errmap[] = { /* better than switch case as long as return value is continuous */ 0, /* SCMI_SUCCESS */ @@ -186,50 +133,6 @@ static inline void scmi_dump_header_dbg(struct device *dev, hdr->id, hdr->seq, hdr->protocol_id); } -static void scmi_fetch_response(struct scmi_xfer *xfer, - struct scmi_shared_mem __iomem *mem) -{ - xfer->hdr.status = ioread32(mem->msg_payload); - /* Skip the length of header and status in payload area i.e 8 bytes */ - xfer->rx.len = min_t(size_t, xfer->rx.len, ioread32(&mem->length) - 8); - - /* Take a copy to the rx buffer.. */ - memcpy_fromio(xfer->rx.buf, mem->msg_payload + 4, xfer->rx.len); -} - -/** - * scmi_tx_prepare() - mailbox client callback to prepare for the transfer - * - * @cl: client pointer - * @m: mailbox message - * - * This function prepares the shared memory which contains the header and the - * payload. - */ -static void scmi_tx_prepare(struct mbox_client *cl, void *m) -{ - struct scmi_xfer *t = m; - struct scmi_chan_info *cinfo = client_to_scmi_chan_info(cl); - struct scmi_shared_mem __iomem *mem = cinfo->payload; - - /* - * Ideally channel must be free by now unless OS timeout last - * request and platform continued to process the same, wait - * until it releases the shared memory, otherwise we may endup - * overwriting its response with new message payload or vice-versa - */ - spin_until_cond(ioread32(&mem->channel_status) & - SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE); - /* Mark channel busy + clear error */ - iowrite32(0x0, &mem->channel_status); - iowrite32(t->hdr.poll_completion ? 0 : SCMI_SHMEM_FLAG_INTR_ENABLED, - &mem->flags); - iowrite32(sizeof(mem->msg_header) + t->tx.len, &mem->length); - iowrite32(pack_scmi_header(&t->hdr), &mem->msg_header); - if (t->tx.buf) - memcpy_toio(mem->msg_payload, t->tx.buf, t->tx.len); -} - /** * scmi_xfer_get() - Allocate one message * @@ -298,10 +201,10 @@ __scmi_xfer_put(struct scmi_xfers_info *minfo, struct scmi_xfer *xfer) } /** - * scmi_rx_callback() - mailbox client callback for receive messages + * scmi_rx_callback() - callback for receiving messages * - * @cl: client pointer - * @m: mailbox message + * @cinfo: SCMI channel info + * @msg_hdr: Message header * * Processes one received message to appropriate transfer information and * signals completion of the transfer. @@ -309,21 +212,14 @@ __scmi_xfer_put(struct scmi_xfers_info *minfo, struct scmi_xfer *xfer) * NOTE: This function will be invoked in IRQ context, hence should be * as optimal as possible. */ -static void scmi_rx_callback(struct mbox_client *cl, void *m) +void scmi_rx_callback(struct scmi_chan_info *cinfo, u32 msg_hdr) { - u8 msg_type; - u32 msg_hdr; - u16 xfer_id; - struct scmi_xfer *xfer; - struct scmi_chan_info *cinfo = client_to_scmi_chan_info(cl); - struct device *dev = cinfo->dev; struct scmi_info *info = handle_to_scmi_info(cinfo->handle); struct scmi_xfers_info *minfo = &info->tx_minfo; - struct scmi_shared_mem __iomem *mem = cinfo->payload; - - msg_hdr = ioread32(&mem->msg_header); - msg_type = MSG_XTRACT_TYPE(msg_hdr); - xfer_id = MSG_XTRACT_TOKEN(msg_hdr); + u16 xfer_id = MSG_XTRACT_TOKEN(msg_hdr); + u8 msg_type = MSG_XTRACT_TYPE(msg_hdr); + struct device *dev = cinfo->dev; + struct scmi_xfer *xfer; if (msg_type == MSG_TYPE_NOTIFICATION) return; /* Notifications not yet supported */ @@ -338,7 +234,7 @@ static void scmi_rx_callback(struct mbox_client *cl, void *m) scmi_dump_header_dbg(dev, &xfer->hdr); - scmi_fetch_response(xfer, mem); + info->desc->ops->fetch_response(cinfo, xfer); trace_scmi_rx_done(xfer->transfer_id, xfer->hdr.id, xfer->hdr.protocol_id, xfer->hdr.seq, @@ -363,28 +259,15 @@ void scmi_xfer_put(const struct scmi_handle *handle, struct scmi_xfer *xfer) __scmi_xfer_put(&info->tx_minfo, xfer); } -static bool -scmi_xfer_poll_done(const struct scmi_chan_info *cinfo, struct scmi_xfer *xfer) -{ - struct scmi_shared_mem __iomem *mem = cinfo->payload; - u16 xfer_id = MSG_XTRACT_TOKEN(ioread32(&mem->msg_header)); - - if (xfer->hdr.seq != xfer_id) - return false; - - return ioread32(&mem->channel_status) & - (SCMI_SHMEM_CHAN_STAT_CHANNEL_ERROR | - SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE); -} - #define SCMI_MAX_POLL_TO_NS (100 * NSEC_PER_USEC) -static bool scmi_xfer_done_no_timeout(const struct scmi_chan_info *cinfo, +static bool scmi_xfer_done_no_timeout(struct scmi_chan_info *cinfo, struct scmi_xfer *xfer, ktime_t stop) { - ktime_t __cur = ktime_get(); + struct scmi_info *info = handle_to_scmi_info(cinfo->handle); - return scmi_xfer_poll_done(cinfo, xfer) || ktime_after(__cur, stop); + return info->desc->ops->poll_done(cinfo, xfer) || + ktime_after(ktime_get(), stop); } /** @@ -413,29 +296,26 @@ int scmi_do_xfer(const struct scmi_handle *handle, struct scmi_xfer *xfer) xfer->hdr.protocol_id, xfer->hdr.seq, xfer->hdr.poll_completion); - ret = mbox_send_message(cinfo->chan, xfer); + ret = info->desc->ops->send_message(cinfo, xfer); if (ret < 0) { - dev_dbg(dev, "mbox send fail %d\n", ret); + dev_dbg(dev, "Failed to send message %d\n", ret); return ret; } - /* mbox_send_message returns non-negative value on success, so reset */ - ret = 0; - if (xfer->hdr.poll_completion) { ktime_t stop = ktime_add_ns(ktime_get(), SCMI_MAX_POLL_TO_NS); spin_until_cond(scmi_xfer_done_no_timeout(cinfo, xfer, stop)); if (ktime_before(ktime_get(), stop)) - scmi_fetch_response(xfer, cinfo->payload); + info->desc->ops->fetch_response(cinfo, xfer); else ret = -ETIMEDOUT; } else { /* And we wait for the response. */ timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms); if (!wait_for_completion_timeout(&xfer->done, timeout)) { - dev_err(dev, "mbox timed out in resp(caller: %pS)\n", + dev_err(dev, "timed out in resp(caller: %pS)\n", (void *)_RET_IP_); ret = -ETIMEDOUT; } @@ -444,13 +324,8 @@ int scmi_do_xfer(const struct scmi_handle *handle, struct scmi_xfer *xfer) if (!ret && xfer->hdr.status) ret = scmi_to_linux_errno(xfer->hdr.status); - /* - * NOTE: we might prefer not to need the mailbox ticker to manage the - * transfer queueing since the protocol layer queues things by itself. - * Unfortunately, we have to kick the mailbox framework after we have - * received our message. - */ - mbox_client_txdone(cinfo->chan, ret); + if (info->desc->ops->mark_txdone) + info->desc->ops->mark_txdone(cinfo, ret); trace_scmi_xfer_end(xfer->transfer_id, xfer->hdr.id, xfer->hdr.protocol_id, xfer->hdr.seq, @@ -691,23 +566,12 @@ static int scmi_xfer_info_init(struct scmi_info *sinfo) return 0; } -static int scmi_mailbox_check(struct device_node *np, int idx) -{ - return of_parse_phandle_with_args(np, "mboxes", "#mbox-cells", - idx, NULL); -} - -static int scmi_mbox_chan_setup(struct scmi_info *info, struct device *dev, - int prot_id, bool tx) +static int scmi_chan_setup(struct scmi_info *info, struct device *dev, + int prot_id, bool tx) { int ret, idx; - struct resource res; - resource_size_t size; - struct device_node *shmem, *np = dev->of_node; struct scmi_chan_info *cinfo; - struct mbox_client *cl; struct idr *idr; - const char *desc = tx ? "Tx" : "Rx"; /* Transmit channel is first entry i.e. index 0 */ idx = tx ? 0 : 1; @@ -718,7 +582,7 @@ static int scmi_mbox_chan_setup(struct scmi_info *info, struct device *dev, if (cinfo) return 0; - if (scmi_mailbox_check(np, idx)) { + if (!info->desc->ops->chan_available(dev, idx)) { cinfo = idr_find(idr, SCMI_PROTOCOL_BASE); if (unlikely(!cinfo)) /* Possible only if platform has no Rx */ return -EINVAL; @@ -731,36 +595,9 @@ static int scmi_mbox_chan_setup(struct scmi_info *info, struct device *dev, cinfo->dev = dev; - cl = &cinfo->cl; - cl->dev = dev; - cl->rx_callback = scmi_rx_callback; - cl->tx_prepare = tx ? scmi_tx_prepare : NULL; - cl->tx_block = false; - cl->knows_txdone = tx; - - shmem = of_parse_phandle(np, "shmem", idx); - ret = of_address_to_resource(shmem, 0, &res); - of_node_put(shmem); - if (ret) { - dev_err(dev, "failed to get SCMI %s payload memory\n", desc); + ret = info->desc->ops->chan_setup(cinfo, info->dev, tx); + if (ret) return ret; - } - - size = resource_size(&res); - cinfo->payload = devm_ioremap(info->dev, res.start, size); - if (!cinfo->payload) { - dev_err(dev, "failed to ioremap SCMI %s payload\n", desc); - return -EADDRNOTAVAIL; - } - - cinfo->chan = mbox_request_channel(cl, idx); - if (IS_ERR(cinfo->chan)) { - ret = PTR_ERR(cinfo->chan); - if (ret != -EPROBE_DEFER) - dev_err(dev, "failed to request SCMI %s mailbox\n", - desc); - return ret; - } idr_alloc: ret = idr_alloc(idr, cinfo, prot_id, prot_id + 1, GFP_KERNEL); @@ -774,12 +611,12 @@ static int scmi_mbox_chan_setup(struct scmi_info *info, struct device *dev, } static inline int -scmi_mbox_txrx_setup(struct scmi_info *info, struct device *dev, int prot_id) +scmi_txrx_setup(struct scmi_info *info, struct device *dev, int prot_id) { - int ret = scmi_mbox_chan_setup(info, dev, prot_id, true); + int ret = scmi_chan_setup(info, dev, prot_id, true); if (!ret) /* Rx is optional, hence no error check */ - scmi_mbox_chan_setup(info, dev, prot_id, false); + scmi_chan_setup(info, dev, prot_id, false); return ret; } @@ -797,7 +634,7 @@ scmi_create_protocol_device(struct device_node *np, struct scmi_info *info, return; } - if (scmi_mbox_txrx_setup(info, &sdev->dev, prot_id)) { + if (scmi_txrx_setup(info, &sdev->dev, prot_id)) { dev_err(&sdev->dev, "failed to setup transport\n"); scmi_device_destroy(sdev); return; @@ -850,12 +687,6 @@ static int scmi_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct device_node *child, *np = dev->of_node; - /* Only mailbox method supported, check for the presence of one */ - if (scmi_mailbox_check(np, 0)) { - dev_err(dev, "no mailbox found in %pOF\n", np); - return -EINVAL; - } - desc = of_device_get_match_data(dev); if (!desc) return -EINVAL; @@ -880,7 +711,7 @@ static int scmi_probe(struct platform_device *pdev) handle->dev = info->dev; handle->version = &info->version; - ret = scmi_mbox_txrx_setup(info, dev, SCMI_PROTOCOL_BASE); + ret = scmi_txrx_setup(info, dev, SCMI_PROTOCOL_BASE); if (ret) return ret; @@ -915,19 +746,9 @@ static int scmi_probe(struct platform_device *pdev) return 0; } -static int scmi_mbox_free_channel(int id, void *p, void *data) +void scmi_free_channel(struct scmi_chan_info *cinfo, struct idr *idr, int id) { - struct scmi_chan_info *cinfo = p; - struct idr *idr = data; - - if (!IS_ERR_OR_NULL(cinfo->chan)) { - mbox_free_channel(cinfo->chan); - cinfo->chan = NULL; - } - idr_remove(idr, id); - - return 0; } static int scmi_remove(struct platform_device *pdev) @@ -947,11 +768,11 @@ static int scmi_remove(struct platform_device *pdev) return ret; /* Safe to free channels since no more users */ - ret = idr_for_each(idr, scmi_mbox_free_channel, idr); + ret = idr_for_each(idr, info->desc->ops->chan_free, idr); idr_destroy(&info->tx_idr); idr = &info->rx_idr; - ret = idr_for_each(idr, scmi_mbox_free_channel, idr); + ret = idr_for_each(idr, info->desc->ops->chan_free, idr); idr_destroy(&info->rx_idr); return ret; @@ -1003,15 +824,9 @@ static struct attribute *versions_attrs[] = { }; ATTRIBUTE_GROUPS(versions); -static const struct scmi_desc scmi_generic_desc = { - .max_rx_timeout_ms = 30, /* We may increase this if required */ - .max_msg = 20, /* Limited by MBOX_TX_QUEUE_LEN */ - .max_msg_size = 128, -}; - /* Each compatible listed below must have descriptor associated with it */ static const struct of_device_id scmi_of_match[] = { - { .compatible = "arm,scmi", .data = &scmi_generic_desc }, + { .compatible = "arm,scmi", .data = &scmi_mailbox_desc }, { /* Sentinel */ }, }; diff --git a/drivers/firmware/arm_scmi/mailbox.c b/drivers/firmware/arm_scmi/mailbox.c new file mode 100644 index 000000000000..73077bbc4ad9 --- /dev/null +++ b/drivers/firmware/arm_scmi/mailbox.c @@ -0,0 +1,184 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * System Control and Management Interface (SCMI) Message Mailbox Transport + * driver. + * + * Copyright (C) 2019 ARM Ltd. + */ + +#include +#include +#include +#include +#include +#include + +#include "common.h" + +/** + * struct scmi_mailbox - Structure representing a SCMI mailbox transport + * + * @cl: Mailbox Client + * @chan: Transmit/Receive mailbox channel + * @cinfo: SCMI channel info + * @shmem: Transmit/Receive shared memory area + */ +struct scmi_mailbox { + struct mbox_client cl; + struct mbox_chan *chan; + struct scmi_chan_info *cinfo; + struct scmi_shared_mem __iomem *shmem; +}; + +#define client_to_scmi_mailbox(c) container_of(c, struct scmi_mailbox, cl) + +static void tx_prepare(struct mbox_client *cl, void *m) +{ + struct scmi_mailbox *smbox = client_to_scmi_mailbox(cl); + + shmem_tx_prepare(smbox->shmem, m); +} + +static void rx_callback(struct mbox_client *cl, void *m) +{ + struct scmi_mailbox *smbox = client_to_scmi_mailbox(cl); + + scmi_rx_callback(smbox->cinfo, shmem_read_header(smbox->shmem)); +} + +static bool mailbox_chan_available(struct device *dev, int idx) +{ + return !of_parse_phandle_with_args(dev->of_node, "mboxes", + "#mbox-cells", idx, NULL); +} + +static int mailbox_chan_setup(struct scmi_chan_info *cinfo, struct device *dev, + bool tx) +{ + const char *desc = tx ? "Tx" : "Rx"; + struct device *cdev = cinfo->dev; + struct scmi_mailbox *smbox; + struct device_node *shmem; + int ret, idx = tx ? 0 : 1; + struct mbox_client *cl; + resource_size_t size; + struct resource res; + + smbox = devm_kzalloc(dev, sizeof(*smbox), GFP_KERNEL); + if (!smbox) + return -ENOMEM; + + shmem = of_parse_phandle(cdev->of_node, "shmem", idx); + ret = of_address_to_resource(shmem, 0, &res); + of_node_put(shmem); + if (ret) { + dev_err(cdev, "failed to get SCMI %s shared memory\n", desc); + return ret; + } + + size = resource_size(&res); + smbox->shmem = devm_ioremap(dev, res.start, size); + if (!smbox->shmem) { + dev_err(dev, "failed to ioremap SCMI %s shared memory\n", desc); + return -EADDRNOTAVAIL; + } + + cl = &smbox->cl; + cl->dev = cdev; + cl->tx_prepare = tx ? tx_prepare : NULL; + cl->rx_callback = rx_callback; + cl->tx_block = false; + cl->knows_txdone = tx; + + smbox->chan = mbox_request_channel(cl, tx ? 0 : 1); + if (IS_ERR(smbox->chan)) { + ret = PTR_ERR(smbox->chan); + if (ret != -EPROBE_DEFER) + dev_err(cdev, "failed to request SCMI %s mailbox\n", + tx ? "Tx" : "Rx"); + return ret; + } + + cinfo->transport_info = smbox; + smbox->cinfo = cinfo; + + return 0; +} + +static int mailbox_chan_free(int id, void *p, void *data) +{ + struct scmi_chan_info *cinfo = p; + struct scmi_mailbox *smbox = cinfo->transport_info; + + if (!IS_ERR(smbox->chan)) { + mbox_free_channel(smbox->chan); + cinfo->transport_info = NULL; + smbox->chan = NULL; + smbox->cinfo = NULL; + } + + scmi_free_channel(cinfo, data, id); + + return 0; +} + +static int mailbox_send_message(struct scmi_chan_info *cinfo, + struct scmi_xfer *xfer) +{ + struct scmi_mailbox *smbox = cinfo->transport_info; + int ret; + + ret = mbox_send_message(smbox->chan, xfer); + + /* mbox_send_message returns non-negative value on success, so reset */ + if (ret > 0) + ret = 0; + + return ret; +} + +static void mailbox_mark_txdone(struct scmi_chan_info *cinfo, int ret) +{ + struct scmi_mailbox *smbox = cinfo->transport_info; + + /* + * NOTE: we might prefer not to need the mailbox ticker to manage the + * transfer queueing since the protocol layer queues things by itself. + * Unfortunately, we have to kick the mailbox framework after we have + * received our message. + */ + mbox_client_txdone(smbox->chan, ret); +} + +static void mailbox_fetch_response(struct scmi_chan_info *cinfo, + struct scmi_xfer *xfer) +{ + struct scmi_mailbox *smbox = cinfo->transport_info; + + shmem_fetch_response(smbox->shmem, xfer); +} + +static bool +mailbox_poll_done(struct scmi_chan_info *cinfo, struct scmi_xfer *xfer) +{ + struct scmi_mailbox *smbox = cinfo->transport_info; + + return shmem_poll_done(smbox->shmem, xfer); +} + +static struct scmi_transport_ops scmi_mailbox_ops = { + .chan_available = mailbox_chan_available, + .chan_setup = mailbox_chan_setup, + .chan_free = mailbox_chan_free, + .send_message = mailbox_send_message, + .mark_txdone = mailbox_mark_txdone, + .fetch_response = mailbox_fetch_response, + .poll_done = mailbox_poll_done, +}; + +const struct scmi_desc scmi_mailbox_desc = { + .ops = &scmi_mailbox_ops, + .max_rx_timeout_ms = 30, /* We may increase this if required */ + .max_msg = 20, /* Limited by MBOX_TX_QUEUE_LEN */ + .max_msg_size = 128, +}; diff --git a/drivers/firmware/arm_scmi/shmem.c b/drivers/firmware/arm_scmi/shmem.c new file mode 100644 index 000000000000..ca0ffd302ea2 --- /dev/null +++ b/drivers/firmware/arm_scmi/shmem.c @@ -0,0 +1,83 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * For transport using shared mem structure. + * + * Copyright (C) 2019 ARM Ltd. + */ + +#include +#include +#include + +#include "common.h" + +/* + * SCMI specification requires all parameters, message headers, return + * arguments or any protocol data to be expressed in little endian + * format only. + */ +struct scmi_shared_mem { + __le32 reserved; + __le32 channel_status; +#define SCMI_SHMEM_CHAN_STAT_CHANNEL_ERROR BIT(1) +#define SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE BIT(0) + __le32 reserved1[2]; + __le32 flags; +#define SCMI_SHMEM_FLAG_INTR_ENABLED BIT(0) + __le32 length; + __le32 msg_header; + u8 msg_payload[0]; +}; + +void shmem_tx_prepare(struct scmi_shared_mem __iomem *shmem, + struct scmi_xfer *xfer) +{ + /* + * Ideally channel must be free by now unless OS timeout last + * request and platform continued to process the same, wait + * until it releases the shared memory, otherwise we may endup + * overwriting its response with new message payload or vice-versa + */ + spin_until_cond(ioread32(&shmem->channel_status) & + SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE); + /* Mark channel busy + clear error */ + iowrite32(0x0, &shmem->channel_status); + iowrite32(xfer->hdr.poll_completion ? 0 : SCMI_SHMEM_FLAG_INTR_ENABLED, + &shmem->flags); + iowrite32(sizeof(shmem->msg_header) + xfer->tx.len, &shmem->length); + iowrite32(pack_scmi_header(&xfer->hdr), &shmem->msg_header); + if (xfer->tx.buf) + memcpy_toio(shmem->msg_payload, xfer->tx.buf, xfer->tx.len); +} + +u32 shmem_read_header(struct scmi_shared_mem __iomem *shmem) +{ + return ioread32(&shmem->msg_header); +} + +void shmem_fetch_response(struct scmi_shared_mem __iomem *shmem, + struct scmi_xfer *xfer) +{ + xfer->hdr.status = ioread32(shmem->msg_payload); + /* Skip the length of header and status in shmem area i.e 8 bytes */ + xfer->rx.len = min_t(size_t, xfer->rx.len, + ioread32(&shmem->length) - 8); + + /* Take a copy to the rx buffer.. */ + memcpy_fromio(xfer->rx.buf, shmem->msg_payload + 4, xfer->rx.len); +} + +bool shmem_poll_done(struct scmi_shared_mem __iomem *shmem, + struct scmi_xfer *xfer) +{ + u16 xfer_id; + + xfer_id = MSG_XTRACT_TOKEN(ioread32(&shmem->msg_header)); + + if (xfer->hdr.seq != xfer_id) + return false; + + return ioread32(&shmem->channel_status) & + (SCMI_SHMEM_CHAN_STAT_CHANNEL_ERROR | + SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE); +} From aff9cc0847a58647ef010c5c0db0a8a00fb13911 Mon Sep 17 00:00:00 2001 From: Maulik Shah Date: Mon, 3 Feb 2020 19:05:34 +0530 Subject: [PATCH 04/70] drivers: qcom: rpmh: fix macro to accept NULL argument Device argument matches with dev variable declared in RPMH message. Compiler reports error when the argument is NULL since the argument matches the name of the property. Rename dev argument to device to fix this. Signed-off-by: Maulik Shah Reviewed-by: Stephen Boyd Link: https://lore.kernel.org/r/1580736940-6985-2-git-send-email-mkshah@codeaurora.org Signed-off-by: Bjorn Andersson --- drivers/soc/qcom/rpmh.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/soc/qcom/rpmh.c b/drivers/soc/qcom/rpmh.c index 035091fd44b8..3a4579d056a4 100644 --- a/drivers/soc/qcom/rpmh.c +++ b/drivers/soc/qcom/rpmh.c @@ -23,7 +23,7 @@ #define RPMH_TIMEOUT_MS msecs_to_jiffies(10000) -#define DEFINE_RPMH_MSG_ONSTACK(dev, s, q, name) \ +#define DEFINE_RPMH_MSG_ONSTACK(device, s, q, name) \ struct rpmh_request name = { \ .msg = { \ .state = s, \ @@ -33,7 +33,7 @@ }, \ .cmd = { { 0 } }, \ .completion = q, \ - .dev = dev, \ + .dev = device, \ .needs_free = false, \ } From d5e205079c34aa1f33157627814f707d6057727a Mon Sep 17 00:00:00 2001 From: Maulik Shah Date: Mon, 3 Feb 2020 19:05:35 +0530 Subject: [PATCH 05/70] drivers: qcom: rpmh: remove rpmh_flush export rpmh_flush() was exported with the idea that an external entity operation during CPU idle would know when to flush the sleep and wake TCS. Since, this is not the case when defining a power domain for the RSC. Remove the function export and instead allow the function to be called internally. Signed-off-by: Maulik Shah Reviewed-by: Stephen Boyd Link: https://lore.kernel.org/r/1580736940-6985-3-git-send-email-mkshah@codeaurora.org Signed-off-by: Bjorn Andersson --- drivers/soc/qcom/rpmh-internal.h | 1 + drivers/soc/qcom/rpmh.c | 18 ++++++++---------- include/soc/qcom/rpmh.h | 5 ----- 3 files changed, 9 insertions(+), 15 deletions(-) diff --git a/drivers/soc/qcom/rpmh-internal.h b/drivers/soc/qcom/rpmh-internal.h index a7bbbb67991c..6eec32b97f83 100644 --- a/drivers/soc/qcom/rpmh-internal.h +++ b/drivers/soc/qcom/rpmh-internal.h @@ -110,5 +110,6 @@ int rpmh_rsc_write_ctrl_data(struct rsc_drv *drv, int rpmh_rsc_invalidate(struct rsc_drv *drv); void rpmh_tx_done(const struct tcs_request *msg, int r); +int rpmh_flush(struct rpmh_ctrlr *ctrlr); #endif /* __RPM_INTERNAL_H__ */ diff --git a/drivers/soc/qcom/rpmh.c b/drivers/soc/qcom/rpmh.c index 3a4579d056a4..eb0ded059d2e 100644 --- a/drivers/soc/qcom/rpmh.c +++ b/drivers/soc/qcom/rpmh.c @@ -427,11 +427,10 @@ static int is_req_valid(struct cache_req *req) req->sleep_val != req->wake_val); } -static int send_single(const struct device *dev, enum rpmh_state state, +static int send_single(struct rpmh_ctrlr *ctrlr, enum rpmh_state state, u32 addr, u32 data) { - DEFINE_RPMH_MSG_ONSTACK(dev, state, NULL, rpm_msg); - struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev); + DEFINE_RPMH_MSG_ONSTACK(NULL, state, NULL, rpm_msg); /* Wake sets are always complete and sleep sets are not */ rpm_msg.msg.wait_for_compl = (state == RPMH_WAKE_ONLY_STATE); @@ -445,7 +444,7 @@ static int send_single(const struct device *dev, enum rpmh_state state, /** * rpmh_flush: Flushes the buffered active and sleep sets to TCS * - * @dev: The device making the request + * @ctrlr: controller making request to flush cached data * * Return: -EBUSY if the controller is busy, probably waiting on a response * to a RPMH request sent earlier. @@ -454,10 +453,9 @@ static int send_single(const struct device *dev, enum rpmh_state state, * that is powering down the entire system. Since no other RPMH API would be * executing at this time, it is safe to run lockless. */ -int rpmh_flush(const struct device *dev) +int rpmh_flush(struct rpmh_ctrlr *ctrlr) { struct cache_req *p; - struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev); int ret; if (!ctrlr->dirty) { @@ -480,11 +478,12 @@ int rpmh_flush(const struct device *dev) __func__, p->addr, p->sleep_val, p->wake_val); continue; } - ret = send_single(dev, RPMH_SLEEP_STATE, p->addr, p->sleep_val); + ret = send_single(ctrlr, RPMH_SLEEP_STATE, p->addr, + p->sleep_val); if (ret) return ret; - ret = send_single(dev, RPMH_WAKE_ONLY_STATE, - p->addr, p->wake_val); + ret = send_single(ctrlr, RPMH_WAKE_ONLY_STATE, p->addr, + p->wake_val); if (ret) return ret; } @@ -493,7 +492,6 @@ int rpmh_flush(const struct device *dev) return 0; } -EXPORT_SYMBOL(rpmh_flush); /** * rpmh_invalidate: Invalidate all sleep and active sets diff --git a/include/soc/qcom/rpmh.h b/include/soc/qcom/rpmh.h index 619e07c75da9..f9ec353d24a5 100644 --- a/include/soc/qcom/rpmh.h +++ b/include/soc/qcom/rpmh.h @@ -20,8 +20,6 @@ int rpmh_write_async(const struct device *dev, enum rpmh_state state, int rpmh_write_batch(const struct device *dev, enum rpmh_state state, const struct tcs_cmd *cmd, u32 *n); -int rpmh_flush(const struct device *dev); - int rpmh_invalidate(const struct device *dev); #else @@ -40,9 +38,6 @@ static inline int rpmh_write_batch(const struct device *dev, const struct tcs_cmd *cmd, u32 *n) { return -ENODEV; } -static inline int rpmh_flush(const struct device *dev) -{ return -ENODEV; } - static inline int rpmh_invalidate(const struct device *dev) { return -ENODEV; } From b5cc96d3bfcbc495a350f78aa2e1295b238d26da Mon Sep 17 00:00:00 2001 From: Leonard Crestez Date: Mon, 20 Jan 2020 14:51:28 +0200 Subject: [PATCH 06/70] soc: imx: gpcv2: include linux/sizes.h This header is included indirectly on arm/arm64 but not on x86 so CONFIG_COMPILE_TEST breaks. Fix by including directly. Signed-off-by: Leonard Crestez Signed-off-by: Shawn Guo --- drivers/soc/imx/gpcv2.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/soc/imx/gpcv2.c b/drivers/soc/imx/gpcv2.c index b0dffb06c05d..6cf8a7a412bd 100644 --- a/drivers/soc/imx/gpcv2.c +++ b/drivers/soc/imx/gpcv2.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include From df636eea2b782589fad7fce3bfea26c6c31e4d14 Mon Sep 17 00:00:00 2001 From: Arun Kumar Neelakantam Date: Wed, 22 Jan 2020 13:53:37 +0530 Subject: [PATCH 07/70] soc: qcom: aoss: Use wake_up_all() instead of wake_up_interruptible_all() During the probe the task is waiting in TASK_UNINTERRUPTIBLE state which cannot be woken-up by wake_up_interruptible_all() function. Use wake_up_all() to wake-up both TASK_UNINTERRUPTIBLE and TASK_INTERRUPTIBLE state tasks. Signed-off-by: Arun Kumar Neelakantam Link: https://lore.kernel.org/r/1579681417-1155-1-git-send-email-aneela@codeaurora.org Signed-off-by: Bjorn Andersson --- drivers/soc/qcom/qcom_aoss.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/soc/qcom/qcom_aoss.c b/drivers/soc/qcom/qcom_aoss.c index 006ac40c526a..fe79661d28cb 100644 --- a/drivers/soc/qcom/qcom_aoss.c +++ b/drivers/soc/qcom/qcom_aoss.c @@ -200,7 +200,7 @@ static irqreturn_t qmp_intr(int irq, void *data) { struct qmp *qmp = data; - wake_up_interruptible_all(&qmp->event); + wake_up_all(&qmp->event); return IRQ_HANDLED; } From 89e7eddece6140020749932f9647a6068cc0d56d Mon Sep 17 00:00:00 2001 From: Arun Kumar Neelakantam Date: Wed, 22 Jan 2020 13:54:13 +0530 Subject: [PATCH 08/70] soc: qcom: aoss: Read back before triggering the IRQ In some device memory used by msm_qmp, there can be an early ack of a write to memory succeeding. This may cause the outgoing interrupt to be triggered before the msgram reflects the write. Add a readback to ensure the data is flushed to device memory before triggering the ipc interrupt. Signed-off-by: Arun Kumar Neelakantam Link: https://lore.kernel.org/r/1579681454-1229-1-git-send-email-aneela@codeaurora.org Signed-off-by: Bjorn Andersson --- drivers/soc/qcom/qcom_aoss.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/soc/qcom/qcom_aoss.c b/drivers/soc/qcom/qcom_aoss.c index fe79661d28cb..f43a2e07ee83 100644 --- a/drivers/soc/qcom/qcom_aoss.c +++ b/drivers/soc/qcom/qcom_aoss.c @@ -225,6 +225,7 @@ static bool qmp_message_empty(struct qmp *qmp) static int qmp_send(struct qmp *qmp, const void *data, size_t len) { long time_left; + size_t tlen; int ret; if (WARN_ON(len + sizeof(u32) > qmp->size)) @@ -239,6 +240,9 @@ static int qmp_send(struct qmp *qmp, const void *data, size_t len) __iowrite32_copy(qmp->msgram + qmp->offset + sizeof(u32), data, len / sizeof(u32)); writel(len, qmp->msgram + qmp->offset); + + /* Read back len to confirm data written in message RAM */ + tlen = readl(qmp->msgram + qmp->offset); qmp_kick(qmp); time_left = wait_event_interruptible_timeout(qmp->event, From fe98d0ff5d5c43ee179e801275bb37641d398c6e Mon Sep 17 00:00:00 2001 From: Jianxin Pan Date: Wed, 15 Jan 2020 19:30:28 +0800 Subject: [PATCH 09/70] firmware: meson_sm: Add secure power domain support The Amlogic Meson A1/C1 Secure Monitor implements calls to control power domain. Signed-off-by: Jianxin Pan Signed-off-by: Kevin Hilman Link: https://lore.kernel.org/r/1579087831-94965-2-git-send-email-jianxin.pan@amlogic.com --- drivers/firmware/meson/meson_sm.c | 2 ++ include/linux/firmware/meson/meson_sm.h | 2 ++ 2 files changed, 4 insertions(+) diff --git a/drivers/firmware/meson/meson_sm.c b/drivers/firmware/meson/meson_sm.c index 1d5b4d74f96d..2854b56f6e0b 100644 --- a/drivers/firmware/meson/meson_sm.c +++ b/drivers/firmware/meson/meson_sm.c @@ -44,6 +44,8 @@ static const struct meson_sm_chip gxbb_chip = { CMD(SM_EFUSE_WRITE, 0x82000031), CMD(SM_EFUSE_USER_MAX, 0x82000033), CMD(SM_GET_CHIP_ID, 0x82000044), + CMD(SM_A1_PWRC_SET, 0x82000093), + CMD(SM_A1_PWRC_GET, 0x82000095), { /* sentinel */ }, }, }; diff --git a/include/linux/firmware/meson/meson_sm.h b/include/linux/firmware/meson/meson_sm.h index 6669e2a1d5fd..95b0da2326a9 100644 --- a/include/linux/firmware/meson/meson_sm.h +++ b/include/linux/firmware/meson/meson_sm.h @@ -12,6 +12,8 @@ enum { SM_EFUSE_WRITE, SM_EFUSE_USER_MAX, SM_GET_CHIP_ID, + SM_A1_PWRC_SET, + SM_A1_PWRC_GET, }; struct meson_sm_firmware; From 165b5fb294e878f00015b7beb91cb00e36e4f8b8 Mon Sep 17 00:00:00 2001 From: Jianxin Pan Date: Wed, 15 Jan 2020 19:30:29 +0800 Subject: [PATCH 10/70] dt-bindings: power: add Amlogic secure power domains bindings Add the bindings for the Amlogic Secure power domains, controlling the secure power domains. The bindings targets the Amlogic A1 and C1 compatible SoCs, in which the power domain registers are in secure world. Signed-off-by: Jianxin Pan Signed-off-by: Kevin Hilman Reviewed-by: Rob Herring Link: https://lore.kernel.org/r/1579087831-94965-3-git-send-email-jianxin.pan@amlogic.com --- .../power/amlogic,meson-sec-pwrc.yaml | 40 +++++++++++++++++++ include/dt-bindings/power/meson-a1-power.h | 32 +++++++++++++++ 2 files changed, 72 insertions(+) create mode 100644 Documentation/devicetree/bindings/power/amlogic,meson-sec-pwrc.yaml create mode 100644 include/dt-bindings/power/meson-a1-power.h diff --git a/Documentation/devicetree/bindings/power/amlogic,meson-sec-pwrc.yaml b/Documentation/devicetree/bindings/power/amlogic,meson-sec-pwrc.yaml new file mode 100644 index 000000000000..af32209218bb --- /dev/null +++ b/Documentation/devicetree/bindings/power/amlogic,meson-sec-pwrc.yaml @@ -0,0 +1,40 @@ +# SPDX-License-Identifier: (GPL-2.0+ OR MIT) +# Copyright (c) 2019 Amlogic, Inc +# Author: Jianxin Pan +%YAML 1.2 +--- +$id: "http://devicetree.org/schemas/power/amlogic,meson-sec-pwrc.yaml#" +$schema: "http://devicetree.org/meta-schemas/core.yaml#" + +title: Amlogic Meson Secure Power Domains + +maintainers: + - Jianxin Pan + +description: |+ + Secure Power Domains used in Meson A1/C1 SoCs, and should be the child node + of secure-monitor. + +properties: + compatible: + enum: + - amlogic,meson-a1-pwrc + + "#power-domain-cells": + const: 1 + +required: + - compatible + - "#power-domain-cells" + +examples: + - | + secure-monitor { + compatible = "amlogic,meson-gxbb-sm"; + + pwrc: power-controller { + compatible = "amlogic,meson-a1-pwrc"; + #power-domain-cells = <1>; + }; + } + diff --git a/include/dt-bindings/power/meson-a1-power.h b/include/dt-bindings/power/meson-a1-power.h new file mode 100644 index 000000000000..6cf50bfb8ccf --- /dev/null +++ b/include/dt-bindings/power/meson-a1-power.h @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: (GPL-2.0+ or MIT) */ +/* + * Copyright (c) 2019 Amlogic, Inc. + * Author: Jianxin Pan + */ + +#ifndef _DT_BINDINGS_MESON_A1_POWER_H +#define _DT_BINDINGS_MESON_A1_POWER_H + +#define PWRC_DSPA_ID 8 +#define PWRC_DSPB_ID 9 +#define PWRC_UART_ID 10 +#define PWRC_DMC_ID 11 +#define PWRC_I2C_ID 12 +#define PWRC_PSRAM_ID 13 +#define PWRC_ACODEC_ID 14 +#define PWRC_AUDIO_ID 15 +#define PWRC_OTP_ID 16 +#define PWRC_DMA_ID 17 +#define PWRC_SD_EMMC_ID 18 +#define PWRC_RAMA_ID 19 +#define PWRC_RAMB_ID 20 +#define PWRC_IR_ID 21 +#define PWRC_SPICC_ID 22 +#define PWRC_SPIFC_ID 23 +#define PWRC_USB_ID 24 +#define PWRC_NIC_ID 25 +#define PWRC_PDMIN_ID 26 +#define PWRC_RSA_ID 27 +#define PWRC_MAX_ID 28 + +#endif From b3dde5013e13d44799b3477cd0bf0c9ad34fe5e9 Mon Sep 17 00:00:00 2001 From: Jianxin Pan Date: Wed, 15 Jan 2020 19:30:30 +0800 Subject: [PATCH 11/70] soc: amlogic: Add support for Secure power domains controller Add support for the Amlogic Secure Power controller. In A1/C1 series, power control registers are in secure domain, and should be accessed by smc. Signed-off-by: Jianxin Pan Signed-off-by: Kevin Hilman Link: https://lore.kernel.org/r/1579087831-94965-4-git-send-email-jianxin.pan@amlogic.com --- drivers/soc/amlogic/Kconfig | 13 ++ drivers/soc/amlogic/Makefile | 1 + drivers/soc/amlogic/meson-secure-pwrc.c | 204 ++++++++++++++++++++++++ 3 files changed, 218 insertions(+) create mode 100644 drivers/soc/amlogic/meson-secure-pwrc.c diff --git a/drivers/soc/amlogic/Kconfig b/drivers/soc/amlogic/Kconfig index bc2c912949bd..6cb06e7b5e63 100644 --- a/drivers/soc/amlogic/Kconfig +++ b/drivers/soc/amlogic/Kconfig @@ -48,6 +48,19 @@ config MESON_EE_PM_DOMAINS Say yes to expose Amlogic Meson Everything-Else Power Domains as Generic Power Domains. +config MESON_SECURE_PM_DOMAINS + bool "Amlogic Meson Secure Power Domains driver" + depends on ARCH_MESON || COMPILE_TEST + depends on PM && OF + depends on HAVE_ARM_SMCCC + default ARCH_MESON + select PM_GENERIC_DOMAINS + select PM_GENERIC_DOMAINS_OF + help + Support for the power controller on Amlogic A1/C1 series. + Say yes to expose Amlogic Meson Secure Power Domains as Generic + Power Domains. + config MESON_MX_SOCINFO bool "Amlogic Meson MX SoC Information driver" depends on ARCH_MESON || COMPILE_TEST diff --git a/drivers/soc/amlogic/Makefile b/drivers/soc/amlogic/Makefile index de79d044b545..7b8c5d323f5c 100644 --- a/drivers/soc/amlogic/Makefile +++ b/drivers/soc/amlogic/Makefile @@ -5,3 +5,4 @@ obj-$(CONFIG_MESON_GX_SOCINFO) += meson-gx-socinfo.o obj-$(CONFIG_MESON_GX_PM_DOMAINS) += meson-gx-pwrc-vpu.o obj-$(CONFIG_MESON_MX_SOCINFO) += meson-mx-socinfo.o obj-$(CONFIG_MESON_EE_PM_DOMAINS) += meson-ee-pwrc.o +obj-$(CONFIG_MESON_SECURE_PM_DOMAINS) += meson-secure-pwrc.o diff --git a/drivers/soc/amlogic/meson-secure-pwrc.c b/drivers/soc/amlogic/meson-secure-pwrc.c new file mode 100644 index 000000000000..5fb29a475879 --- /dev/null +++ b/drivers/soc/amlogic/meson-secure-pwrc.c @@ -0,0 +1,204 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR MIT) +/* + * Copyright (c) 2019 Amlogic, Inc. + * Author: Jianxin Pan + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include + +#define PWRC_ON 1 +#define PWRC_OFF 0 + +struct meson_secure_pwrc_domain { + struct generic_pm_domain base; + unsigned int index; + struct meson_secure_pwrc *pwrc; +}; + +struct meson_secure_pwrc { + struct meson_secure_pwrc_domain *domains; + struct genpd_onecell_data xlate; + struct meson_sm_firmware *fw; +}; + +struct meson_secure_pwrc_domain_desc { + unsigned int index; + unsigned int flags; + char *name; + bool (*is_off)(struct meson_secure_pwrc_domain *pwrc_domain); +}; + +struct meson_secure_pwrc_domain_data { + unsigned int count; + struct meson_secure_pwrc_domain_desc *domains; +}; + +static bool pwrc_secure_is_off(struct meson_secure_pwrc_domain *pwrc_domain) +{ + int is_off = 1; + + if (meson_sm_call(pwrc_domain->pwrc->fw, SM_A1_PWRC_GET, &is_off, + pwrc_domain->index, 0, 0, 0, 0) < 0) + pr_err("failed to get power domain status\n"); + + return is_off; +} + +static int meson_secure_pwrc_off(struct generic_pm_domain *domain) +{ + int ret = 0; + struct meson_secure_pwrc_domain *pwrc_domain = + container_of(domain, struct meson_secure_pwrc_domain, base); + + if (meson_sm_call(pwrc_domain->pwrc->fw, SM_A1_PWRC_SET, NULL, + pwrc_domain->index, PWRC_OFF, 0, 0, 0) < 0) { + pr_err("failed to set power domain off\n"); + ret = -EINVAL; + } + + return ret; +} + +static int meson_secure_pwrc_on(struct generic_pm_domain *domain) +{ + int ret = 0; + struct meson_secure_pwrc_domain *pwrc_domain = + container_of(domain, struct meson_secure_pwrc_domain, base); + + if (meson_sm_call(pwrc_domain->pwrc->fw, SM_A1_PWRC_SET, NULL, + pwrc_domain->index, PWRC_ON, 0, 0, 0) < 0) { + pr_err("failed to set power domain on\n"); + ret = -EINVAL; + } + + return ret; +} + +#define SEC_PD(__name, __flag) \ +[PWRC_##__name##_ID] = \ +{ \ + .name = #__name, \ + .index = PWRC_##__name##_ID, \ + .is_off = pwrc_secure_is_off, \ + .flags = __flag, \ +} + +static struct meson_secure_pwrc_domain_desc a1_pwrc_domains[] = { + SEC_PD(DSPA, 0), + SEC_PD(DSPB, 0), + /* UART should keep working in ATF after suspend and before resume */ + SEC_PD(UART, GENPD_FLAG_ALWAYS_ON), + /* DMC is for DDR PHY ana/dig and DMC, and should be always on */ + SEC_PD(DMC, GENPD_FLAG_ALWAYS_ON), + SEC_PD(I2C, 0), + SEC_PD(PSRAM, 0), + SEC_PD(ACODEC, 0), + SEC_PD(AUDIO, 0), + SEC_PD(OTP, 0), + SEC_PD(DMA, 0), + SEC_PD(SD_EMMC, 0), + SEC_PD(RAMA, 0), + /* SRAMB is used as ATF runtime memory, and should be always on */ + SEC_PD(RAMB, GENPD_FLAG_ALWAYS_ON), + SEC_PD(IR, 0), + SEC_PD(SPICC, 0), + SEC_PD(SPIFC, 0), + SEC_PD(USB, 0), + /* NIC is for the Arm NIC-400 interconnect, and should be always on */ + SEC_PD(NIC, GENPD_FLAG_ALWAYS_ON), + SEC_PD(PDMIN, 0), + SEC_PD(RSA, 0), +}; + +static int meson_secure_pwrc_probe(struct platform_device *pdev) +{ + int i; + struct device_node *sm_np; + struct meson_secure_pwrc *pwrc; + const struct meson_secure_pwrc_domain_data *match; + + match = of_device_get_match_data(&pdev->dev); + if (!match) { + dev_err(&pdev->dev, "failed to get match data\n"); + return -ENODEV; + } + + sm_np = of_find_compatible_node(NULL, NULL, "amlogic,meson-gxbb-sm"); + if (!sm_np) { + dev_err(&pdev->dev, "no secure-monitor node\n"); + return -ENODEV; + } + + pwrc = devm_kzalloc(&pdev->dev, sizeof(*pwrc), GFP_KERNEL); + if (!pwrc) + return -ENOMEM; + + pwrc->fw = meson_sm_get(sm_np); + of_node_put(sm_np); + if (!pwrc->fw) + return -EPROBE_DEFER; + + pwrc->xlate.domains = devm_kcalloc(&pdev->dev, match->count, + sizeof(*pwrc->xlate.domains), + GFP_KERNEL); + if (!pwrc->xlate.domains) + return -ENOMEM; + + pwrc->domains = devm_kcalloc(&pdev->dev, match->count, + sizeof(*pwrc->domains), GFP_KERNEL); + if (!pwrc->domains) + return -ENOMEM; + + pwrc->xlate.num_domains = match->count; + platform_set_drvdata(pdev, pwrc); + + for (i = 0 ; i < match->count ; ++i) { + struct meson_secure_pwrc_domain *dom = &pwrc->domains[i]; + + if (!match->domains[i].index) + continue; + + dom->pwrc = pwrc; + dom->index = match->domains[i].index; + dom->base.name = match->domains[i].name; + dom->base.flags = match->domains[i].flags; + dom->base.power_on = meson_secure_pwrc_on; + dom->base.power_off = meson_secure_pwrc_off; + + pm_genpd_init(&dom->base, NULL, match->domains[i].is_off(dom)); + + pwrc->xlate.domains[i] = &dom->base; + } + + return of_genpd_add_provider_onecell(pdev->dev.of_node, &pwrc->xlate); +} + +static struct meson_secure_pwrc_domain_data meson_secure_a1_pwrc_data = { + .domains = a1_pwrc_domains, + .count = ARRAY_SIZE(a1_pwrc_domains), +}; + +static const struct of_device_id meson_secure_pwrc_match_table[] = { + { + .compatible = "amlogic,meson-a1-pwrc", + .data = &meson_secure_a1_pwrc_data, + }, + { /* sentinel */ } +}; + +static struct platform_driver meson_secure_pwrc_driver = { + .probe = meson_secure_pwrc_probe, + .driver = { + .name = "meson_secure_pwrc", + .of_match_table = meson_secure_pwrc_match_table, + }, +}; +builtin_platform_driver(meson_secure_pwrc_driver); From 6c2d3a14b7aff13f74fb2331d9a54202c66bea4d Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Sun, 16 Feb 2020 17:17:48 +0100 Subject: [PATCH 12/70] firmware: tegra: Fix a typo in Kconfig A 'n' is mising in 'commuication' Signed-off-by: Christophe JAILLET Signed-off-by: Thierry Reding --- drivers/firmware/tegra/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/firmware/tegra/Kconfig b/drivers/firmware/tegra/Kconfig index a887731f50d6..1c8ba1f47c7c 100644 --- a/drivers/firmware/tegra/Kconfig +++ b/drivers/firmware/tegra/Kconfig @@ -7,7 +7,7 @@ config TEGRA_IVC help IVC (Inter-VM Communication) protocol is part of the IPC (Inter Processor Communication) framework on Tegra. It maintains the - data and the different commuication channels in SysRAM or RAM and + data and the different communication channels in SysRAM or RAM and keeps the content is synchronization between host CPU and remote processors. From 107539fb7fe22915fa5bd78db691d21f95fdc3b3 Mon Sep 17 00:00:00 2001 From: Nishad Kamdar Date: Sat, 18 Jan 2020 18:19:00 +0530 Subject: [PATCH 13/70] soc: renesas: rcar-sysc: Use the correct style for SPDX License Identifier This patch corrects the SPDX License Identifier style in header file related to Renesas Soc driver support. It assigns explicit block comment to the SPDX License Identifier. Changes made by using a script provided by Joe Perches here: https://lkml.org/lkml/2019/2/7/46. Suggested-by: Joe Perches Signed-off-by: Nishad Kamdar Link: https://lore.kernel.org/r/20200118124856.GA3421@nishad Signed-off-by: Geert Uytterhoeven --- drivers/soc/renesas/rcar-sysc.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/soc/renesas/rcar-sysc.h b/drivers/soc/renesas/rcar-sysc.h index 8d074489fba9..0fc3b119930a 100644 --- a/drivers/soc/renesas/rcar-sysc.h +++ b/drivers/soc/renesas/rcar-sysc.h @@ -1,5 +1,5 @@ -/* SPDX-License-Identifier: GPL-2.0 - * +/* SPDX-License-Identifier: GPL-2.0 */ +/* * Renesas R-Car System Controller * * Copyright (C) 2016 Glider bvba From 0a7696b319511b0eccb0c92d030d84653b90a975 Mon Sep 17 00:00:00 2001 From: Daniel Baluta Date: Mon, 27 Jan 2020 14:27:31 +0000 Subject: [PATCH 14/70] firmware: imx: scu-pd: Add missing audio PD ranges imx8qxp_scu_pd_ranges keeps PD ranges for both i.MX8QM and i.MX8QXP. The following PD are missing: audio-clk1/ spdif1 / sai3..7. Add them now. Signed-off-by: Daniel Baluta Signed-off-by: Shawn Guo --- drivers/firmware/imx/scu-pd.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/drivers/firmware/imx/scu-pd.c b/drivers/firmware/imx/scu-pd.c index b556612207e5..c10f63901c1c 100644 --- a/drivers/firmware/imx/scu-pd.c +++ b/drivers/firmware/imx/scu-pd.c @@ -109,6 +109,7 @@ static const struct imx_sc_pd_range imx8qxp_scu_pd_ranges[] = { { "audio-pll0", IMX_SC_R_AUDIO_PLL_0, 1, false, 0 }, { "audio-pll1", IMX_SC_R_AUDIO_PLL_1, 1, false, 0 }, { "audio-clk-0", IMX_SC_R_AUDIO_CLK_0, 1, false, 0 }, + { "audio-clk-1", IMX_SC_R_AUDIO_CLK_1, 1, false, 0 }, { "dma0-ch", IMX_SC_R_DMA_0_CH0, 16, true, 0 }, { "dma1-ch", IMX_SC_R_DMA_1_CH0, 16, true, 0 }, { "dma2-ch", IMX_SC_R_DMA_2_CH0, 5, true, 0 }, @@ -116,7 +117,13 @@ static const struct imx_sc_pd_range imx8qxp_scu_pd_ranges[] = { { "asrc1", IMX_SC_R_ASRC_1, 1, false, 0 }, { "esai0", IMX_SC_R_ESAI_0, 1, false, 0 }, { "spdif0", IMX_SC_R_SPDIF_0, 1, false, 0 }, + { "spdif1", IMX_SC_R_SPDIF_1, 1, false, 0 }, { "sai", IMX_SC_R_SAI_0, 3, true, 0 }, + { "sai3", IMX_SC_R_SAI_3, 1, false, 0 }, + { "sai4", IMX_SC_R_SAI_4, 1, false, 0 }, + { "sai5", IMX_SC_R_SAI_5, 1, false, 0 }, + { "sai6", IMX_SC_R_SAI_6, 1, false, 0 }, + { "sai7", IMX_SC_R_SAI_7, 1, false, 0 }, { "amix", IMX_SC_R_AMIX, 1, false, 0 }, { "mqs0", IMX_SC_R_MQS_0, 1, false, 0 }, { "dsp", IMX_SC_R_DSP, 1, false, 0 }, From e391b24d94915ff870cdca9a1cee01324326bb95 Mon Sep 17 00:00:00 2001 From: Sebastien Fagard Date: Mon, 27 Jan 2020 14:27:32 +0000 Subject: [PATCH 15/70] firmware: imx: scu-pd: enlarge PD range for mu_b The range of resources for Messaging Units side B needs to contain all the possible MUB resource available: starting from MU_5B up to MU_13B. This patch is needed to enable MU_8B for the 'imx-shmem-net' driver which allows two OS partitions communicating via MUs without Hypervisor. Signed-off-by: Sebastien Fagard Signed-off-by: Daniel Baluta Signed-off-by: Shawn Guo --- drivers/firmware/imx/scu-pd.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/firmware/imx/scu-pd.c b/drivers/firmware/imx/scu-pd.c index c10f63901c1c..09cfa268c6bd 100644 --- a/drivers/firmware/imx/scu-pd.c +++ b/drivers/firmware/imx/scu-pd.c @@ -93,7 +93,7 @@ static const struct imx_sc_pd_range imx8qxp_scu_pd_ranges[] = { { "kpp", IMX_SC_R_KPP, 1, false, 0 }, { "fspi", IMX_SC_R_FSPI_0, 2, true, 0 }, { "mu_a", IMX_SC_R_MU_0A, 14, true, 0 }, - { "mu_b", IMX_SC_R_MU_13B, 1, true, 13 }, + { "mu_b", IMX_SC_R_MU_5B, 9, true, 5 }, /* CONN SS */ { "usb", IMX_SC_R_USB_0, 2, true, 0 }, From 9d98809711ae0ebcfb8115a0bc54604c59908710 Mon Sep 17 00:00:00 2001 From: Youri Querry Date: Thu, 12 Dec 2019 17:01:13 +0000 Subject: [PATCH 16/70] soc: fsl: dpio: Adding QMAN multiple enqueue interface Update of QMAN the interface to enqueue frame. We now support multiple enqueue (qbman_swp_enqueue_multiple) and multiple enqueue with a table of descriptor (qbman_swp_enqueue_multiple_desc). Signed-off-by: Youri Querry Acked-by: Roy Pledge Signed-off-by: Li Yang --- drivers/soc/fsl/dpio/dpio-service.c | 69 ++++++++++++++++++++++-- drivers/soc/fsl/dpio/qbman-portal.c | 83 ++++++++++++++++++++++++----- drivers/soc/fsl/dpio/qbman-portal.h | 24 +++++++++ include/soc/fsl/dpaa2-io.h | 6 ++- 4 files changed, 165 insertions(+), 17 deletions(-) diff --git a/drivers/soc/fsl/dpio/dpio-service.c b/drivers/soc/fsl/dpio/dpio-service.c index 518a8e081b49..cd4f6410e8c2 100644 --- a/drivers/soc/fsl/dpio/dpio-service.c +++ b/drivers/soc/fsl/dpio/dpio-service.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) /* * Copyright 2014-2016 Freescale Semiconductor Inc. - * Copyright 2016 NXP + * Copyright 2016-2019 NXP * */ #include @@ -432,6 +432,69 @@ int dpaa2_io_service_enqueue_fq(struct dpaa2_io *d, } EXPORT_SYMBOL(dpaa2_io_service_enqueue_fq); +/** + * dpaa2_io_service_enqueue_multiple_fq() - Enqueue multiple frames + * to a frame queue using one fqid. + * @d: the given DPIO service. + * @fqid: the given frame queue id. + * @fd: the frame descriptor which is enqueued. + * @nb: number of frames to be enqueud + * + * Return 0 for successful enqueue, -EBUSY if the enqueue ring is not ready, + * or -ENODEV if there is no dpio service. + */ +int dpaa2_io_service_enqueue_multiple_fq(struct dpaa2_io *d, + u32 fqid, + const struct dpaa2_fd *fd, + int nb) +{ + struct qbman_eq_desc ed; + + d = service_select(d); + if (!d) + return -ENODEV; + + qbman_eq_desc_clear(&ed); + qbman_eq_desc_set_no_orp(&ed, 0); + qbman_eq_desc_set_fq(&ed, fqid); + + return qbman_swp_enqueue_multiple(d->swp, &ed, fd, 0, nb); +} +EXPORT_SYMBOL(dpaa2_io_service_enqueue_multiple_fq); + +/** + * dpaa2_io_service_enqueue_multiple_desc_fq() - Enqueue multiple frames + * to different frame queue using a list of fqids. + * @d: the given DPIO service. + * @fqid: the given list of frame queue ids. + * @fd: the frame descriptor which is enqueued. + * @nb: number of frames to be enqueud + * + * Return 0 for successful enqueue, -EBUSY if the enqueue ring is not ready, + * or -ENODEV if there is no dpio service. + */ +int dpaa2_io_service_enqueue_multiple_desc_fq(struct dpaa2_io *d, + u32 *fqid, + const struct dpaa2_fd *fd, + int nb) +{ + int i; + struct qbman_eq_desc ed[32]; + + d = service_select(d); + if (!d) + return -ENODEV; + + for (i = 0; i < nb; i++) { + qbman_eq_desc_clear(&ed[i]); + qbman_eq_desc_set_no_orp(&ed[i], 0); + qbman_eq_desc_set_fq(&ed[i], fqid[i]); + } + + return qbman_swp_enqueue_multiple_desc(d->swp, &ed[0], fd, nb); +} +EXPORT_SYMBOL(dpaa2_io_service_enqueue_multiple_desc_fq); + /** * dpaa2_io_service_enqueue_qd() - Enqueue a frame to a QD. * @d: the given DPIO service. @@ -526,7 +589,7 @@ EXPORT_SYMBOL_GPL(dpaa2_io_service_acquire); /** * dpaa2_io_store_create() - Create the dma memory storage for dequeue result. - * @max_frames: the maximum number of dequeued result for frames, must be <= 16. + * @max_frames: the maximum number of dequeued result for frames, must be <= 32. * @dev: the device to allow mapping/unmapping the DMAable region. * * The size of the storage is "max_frames*sizeof(struct dpaa2_dq)". @@ -541,7 +604,7 @@ struct dpaa2_io_store *dpaa2_io_store_create(unsigned int max_frames, struct dpaa2_io_store *ret; size_t size; - if (!max_frames || (max_frames > 16)) + if (!max_frames || (max_frames > 32)) return NULL; ret = kmalloc(sizeof(*ret), GFP_KERNEL); diff --git a/drivers/soc/fsl/dpio/qbman-portal.c b/drivers/soc/fsl/dpio/qbman-portal.c index c66f5b73777c..5a37ac8a171c 100644 --- a/drivers/soc/fsl/dpio/qbman-portal.c +++ b/drivers/soc/fsl/dpio/qbman-portal.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) /* * Copyright (C) 2014-2016 Freescale Semiconductor, Inc. - * Copyright 2016 NXP + * Copyright 2016-2019 NXP * */ @@ -12,13 +12,6 @@ #include "qbman-portal.h" -#define QMAN_REV_4000 0x04000000 -#define QMAN_REV_4100 0x04010000 -#define QMAN_REV_4101 0x04010001 -#define QMAN_REV_5000 0x05000000 - -#define QMAN_REV_MASK 0xffff0000 - /* All QBMan command and result structures use this "valid bit" encoding */ #define QB_VALID_BIT ((u32)0x80) @@ -156,7 +149,7 @@ static inline u32 qbman_set_swp_cfg(u8 max_fill, u8 wn, u8 est, u8 rpm, u8 dcm, */ struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d) { - struct qbman_swp *p = kmalloc(sizeof(*p), GFP_KERNEL); + struct qbman_swp *p = kzalloc(sizeof(*p), GFP_KERNEL); u32 reg; if (!p) @@ -467,22 +460,32 @@ static inline void qbman_write_eqcr_am_rt_register(struct qbman_swp *p, int qbman_swp_enqueue(struct qbman_swp *s, const struct qbman_eq_desc *d, const struct dpaa2_fd *fd) { - struct qbman_eq_desc *p; + struct qbman_eq_desc_with_fd *p; u32 eqar = qbman_read_register(s, QBMAN_CINH_SWP_EQAR); if (!EQAR_SUCCESS(eqar)) return -EBUSY; p = qbman_get_cmd(s, QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar))); - memcpy(&p->dca, &d->dca, 31); + /* This is mapped as DEVICE type memory, writes are + * with address alignment: + * desc.dca address alignment = 1 + * desc.seqnum address alignment = 2 + * desc.orpid address alignment = 4 + * desc.tgtid address alignment = 8 + */ + p->desc.dca = d->dca; + p->desc.seqnum = d->seqnum; + p->desc.orpid = d->orpid; + memcpy(&p->desc.tgtid, &d->tgtid, 24); memcpy(&p->fd, fd, sizeof(*fd)); if ((s->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) { /* Set the verb byte, have to substitute in the valid-bit */ dma_wmb(); - p->verb = d->verb | EQAR_VB(eqar); + p->desc.verb = d->verb | EQAR_VB(eqar); } else { - p->verb = d->verb | EQAR_VB(eqar); + p->desc.verb = d->verb | EQAR_VB(eqar); dma_wmb(); qbman_write_eqcr_am_rt_register(s, EQAR_IDX(eqar)); } @@ -490,6 +493,60 @@ int qbman_swp_enqueue(struct qbman_swp *s, const struct qbman_eq_desc *d, return 0; } +/** + * qbman_swp_enqueue_multiple() - Issue a multi enqueue command + * using one enqueue descriptor + * @s: the software portal used for enqueue + * @d: the enqueue descriptor + * @fd: table pointer of frame descriptor table to be enqueued + * @flags: table pointer of flags, not used for the moment + * @num_frames: number of fd to be enqueued + * + * Return the number of fd enqueued, or a negative error number. + */ +int qbman_swp_enqueue_multiple(struct qbman_swp *s, + const struct qbman_eq_desc *d, + const struct dpaa2_fd *fd, + uint32_t *flags, + int num_frames) +{ + int count = 0; + + while (count < num_frames) { + if (qbman_swp_enqueue(s, d, fd) != 0) + break; + count++; + } + + return count; +} + +/** + * qbman_swp_enqueue_multiple_desc() - Issue a multi enqueue command + * using multiple enqueue descriptor + * @s: the software portal used for enqueue + * @d: table of minimal enqueue descriptor + * @fd: table pointer of frame descriptor table to be enqueued + * @num_frames: number of fd to be enqueued + * + * Return the number of fd enqueued, or a negative error number. + */ +int qbman_swp_enqueue_multiple_desc(struct qbman_swp *s, + const struct qbman_eq_desc *d, + const struct dpaa2_fd *fd, + int num_frames) +{ + int count = 0; + + while (count < num_frames) { + if (qbman_swp_enqueue(s, &(d[count]), fd) != 0) + break; + count++; + } + + return count; +} + /* Static (push) dequeue */ /** diff --git a/drivers/soc/fsl/dpio/qbman-portal.h b/drivers/soc/fsl/dpio/qbman-portal.h index f3ec5d2044fb..ac58a9741d33 100644 --- a/drivers/soc/fsl/dpio/qbman-portal.h +++ b/drivers/soc/fsl/dpio/qbman-portal.h @@ -9,6 +9,13 @@ #include +#define QMAN_REV_4000 0x04000000 +#define QMAN_REV_4100 0x04010000 +#define QMAN_REV_4101 0x04010001 +#define QMAN_REV_5000 0x05000000 + +#define QMAN_REV_MASK 0xffff0000 + struct dpaa2_dq; struct qbman_swp; @@ -81,6 +88,10 @@ struct qbman_eq_desc { u8 wae; u8 rspid; __le64 rsp_addr; +}; + +struct qbman_eq_desc_with_fd { + struct qbman_eq_desc desc; u8 fd[32]; }; @@ -193,6 +204,19 @@ void *qbman_swp_mc_start(struct qbman_swp *p); void qbman_swp_mc_submit(struct qbman_swp *p, void *cmd, u8 cmd_verb); void *qbman_swp_mc_result(struct qbman_swp *p); +int +qbman_swp_enqueue_multiple(struct qbman_swp *s, + const struct qbman_eq_desc *d, + const struct dpaa2_fd *fd, + uint32_t *flags, + int num_frames); + +int +qbman_swp_enqueue_multiple_desc(struct qbman_swp *s, + const struct qbman_eq_desc *d, + const struct dpaa2_fd *fd, + int num_frames); + /** * qbman_result_is_DQ() - check if the dequeue result is a dequeue response * @dq: the dequeue result to be checked diff --git a/include/soc/fsl/dpaa2-io.h b/include/soc/fsl/dpaa2-io.h index 672cfb58046f..c9d849924f89 100644 --- a/include/soc/fsl/dpaa2-io.h +++ b/include/soc/fsl/dpaa2-io.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */ /* * Copyright 2014-2016 Freescale Semiconductor Inc. - * Copyright NXP + * Copyright 2017-2019 NXP * */ #ifndef __FSL_DPAA2_IO_H @@ -109,6 +109,10 @@ int dpaa2_io_service_pull_channel(struct dpaa2_io *d, u32 channelid, int dpaa2_io_service_enqueue_fq(struct dpaa2_io *d, u32 fqid, const struct dpaa2_fd *fd); +int dpaa2_io_service_enqueue_multiple_fq(struct dpaa2_io *d, u32 fqid, + const struct dpaa2_fd *fd, int number_of_frame); +int dpaa2_io_service_enqueue_multiple_desc_fq(struct dpaa2_io *d, u32 *fqid, + const struct dpaa2_fd *fd, int number_of_frame); int dpaa2_io_service_enqueue_qd(struct dpaa2_io *d, u32 qdid, u8 prio, u16 qdbin, const struct dpaa2_fd *fd); int dpaa2_io_service_release(struct dpaa2_io *d, u16 bpid, From b46fe745e4f6102fa944383f87f5d8820398f4ad Mon Sep 17 00:00:00 2001 From: Youri Querry Date: Thu, 12 Dec 2019 17:01:15 +0000 Subject: [PATCH 17/70] soc: fsl: dpio: QMAN performance improvement with function pointer indirection We are making the access decision in the initialization and setting the function pointers accordingly. Signed-off-by: Youri Querry Acked-by: Roy Pledge Signed-off-by: Li Yang --- drivers/soc/fsl/dpio/qbman-portal.c | 451 ++++++++++++++++++++++++---- drivers/soc/fsl/dpio/qbman-portal.h | 129 +++++++- 2 files changed, 507 insertions(+), 73 deletions(-) diff --git a/drivers/soc/fsl/dpio/qbman-portal.c b/drivers/soc/fsl/dpio/qbman-portal.c index 5a37ac8a171c..0ffe018afb17 100644 --- a/drivers/soc/fsl/dpio/qbman-portal.c +++ b/drivers/soc/fsl/dpio/qbman-portal.c @@ -83,6 +83,82 @@ enum qbman_sdqcr_fc { qbman_sdqcr_fc_up_to_3 = 1 }; +/* Internal Function declaration */ +static int qbman_swp_enqueue_direct(struct qbman_swp *s, + const struct qbman_eq_desc *d, + const struct dpaa2_fd *fd); +static int qbman_swp_enqueue_mem_back(struct qbman_swp *s, + const struct qbman_eq_desc *d, + const struct dpaa2_fd *fd); +static int qbman_swp_enqueue_multiple_direct(struct qbman_swp *s, + const struct qbman_eq_desc *d, + const struct dpaa2_fd *fd, + uint32_t *flags, + int num_frames); +static int qbman_swp_enqueue_multiple_mem_back(struct qbman_swp *s, + const struct qbman_eq_desc *d, + const struct dpaa2_fd *fd, + uint32_t *flags, + int num_frames); +static int +qbman_swp_enqueue_multiple_desc_direct(struct qbman_swp *s, + const struct qbman_eq_desc *d, + const struct dpaa2_fd *fd, + int num_frames); +static +int qbman_swp_enqueue_multiple_desc_mem_back(struct qbman_swp *s, + const struct qbman_eq_desc *d, + const struct dpaa2_fd *fd, + int num_frames); +static int qbman_swp_pull_direct(struct qbman_swp *s, + struct qbman_pull_desc *d); +static int qbman_swp_pull_mem_back(struct qbman_swp *s, + struct qbman_pull_desc *d); + +const struct dpaa2_dq *qbman_swp_dqrr_next_direct(struct qbman_swp *s); +const struct dpaa2_dq *qbman_swp_dqrr_next_mem_back(struct qbman_swp *s); + +static int qbman_swp_release_direct(struct qbman_swp *s, + const struct qbman_release_desc *d, + const u64 *buffers, + unsigned int num_buffers); +static int qbman_swp_release_mem_back(struct qbman_swp *s, + const struct qbman_release_desc *d, + const u64 *buffers, + unsigned int num_buffers); + +/* Function pointers */ +int (*qbman_swp_enqueue_ptr)(struct qbman_swp *s, + const struct qbman_eq_desc *d, + const struct dpaa2_fd *fd) + = qbman_swp_enqueue_direct; + +int (*qbman_swp_enqueue_multiple_ptr)(struct qbman_swp *s, + const struct qbman_eq_desc *d, + const struct dpaa2_fd *fd, + uint32_t *flags, + int num_frames) + = qbman_swp_enqueue_multiple_direct; + +int +(*qbman_swp_enqueue_multiple_desc_ptr)(struct qbman_swp *s, + const struct qbman_eq_desc *d, + const struct dpaa2_fd *fd, + int num_frames) + = qbman_swp_enqueue_multiple_desc_direct; + +int (*qbman_swp_pull_ptr)(struct qbman_swp *s, struct qbman_pull_desc *d) + = qbman_swp_pull_direct; + +const struct dpaa2_dq *(*qbman_swp_dqrr_next_ptr)(struct qbman_swp *s) + = qbman_swp_dqrr_next_direct; + +int (*qbman_swp_release_ptr)(struct qbman_swp *s, + const struct qbman_release_desc *d, + const u64 *buffers, + unsigned int num_buffers) + = qbman_swp_release_direct; + /* Portal Access */ static inline u32 qbman_read_register(struct qbman_swp *p, u32 offset) @@ -218,6 +294,19 @@ struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d) * applied when dequeues from a specific channel are enabled. */ qbman_write_register(p, QBMAN_CINH_SWP_SDQCR, 0); + + if ((p->desc->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000) { + qbman_swp_enqueue_ptr = + qbman_swp_enqueue_mem_back; + qbman_swp_enqueue_multiple_ptr = + qbman_swp_enqueue_multiple_mem_back; + qbman_swp_enqueue_multiple_desc_ptr = + qbman_swp_enqueue_multiple_desc_mem_back; + qbman_swp_pull_ptr = qbman_swp_pull_mem_back; + qbman_swp_dqrr_next_ptr = qbman_swp_dqrr_next_mem_back; + qbman_swp_release_ptr = qbman_swp_release_mem_back; + } + return p; } @@ -447,7 +536,7 @@ static inline void qbman_write_eqcr_am_rt_register(struct qbman_swp *p, } /** - * qbman_swp_enqueue() - Issue an enqueue command + * qbman_swp_enqueue_direct() - Issue an enqueue command * @s: the software portal used for enqueue * @d: the enqueue descriptor * @fd: the frame descriptor to be enqueued @@ -457,7 +546,7 @@ static inline void qbman_write_eqcr_am_rt_register(struct qbman_swp *p, * * Return 0 for successful enqueue, -EBUSY if the EQCR is not ready. */ -int qbman_swp_enqueue(struct qbman_swp *s, const struct qbman_eq_desc *d, +int qbman_swp_enqueue_direct(struct qbman_swp *s, const struct qbman_eq_desc *d, const struct dpaa2_fd *fd) { struct qbman_eq_desc_with_fd *p; @@ -480,21 +569,57 @@ int qbman_swp_enqueue(struct qbman_swp *s, const struct qbman_eq_desc *d, memcpy(&p->desc.tgtid, &d->tgtid, 24); memcpy(&p->fd, fd, sizeof(*fd)); - if ((s->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) { - /* Set the verb byte, have to substitute in the valid-bit */ - dma_wmb(); - p->desc.verb = d->verb | EQAR_VB(eqar); - } else { - p->desc.verb = d->verb | EQAR_VB(eqar); - dma_wmb(); - qbman_write_eqcr_am_rt_register(s, EQAR_IDX(eqar)); - } + /* Set the verb byte, have to substitute in the valid-bit */ + dma_wmb(); + p->desc.verb = d->verb | EQAR_VB(eqar); return 0; } /** - * qbman_swp_enqueue_multiple() - Issue a multi enqueue command + * qbman_swp_enqueue_mem_back() - Issue an enqueue command + * @s: the software portal used for enqueue + * @d: the enqueue descriptor + * @fd: the frame descriptor to be enqueued + * + * Please note that 'fd' should only be NULL if the "action" of the + * descriptor is "orp_hole" or "orp_nesn". + * + * Return 0 for successful enqueue, -EBUSY if the EQCR is not ready. + */ +int qbman_swp_enqueue_mem_back(struct qbman_swp *s, + const struct qbman_eq_desc *d, + const struct dpaa2_fd *fd) +{ + struct qbman_eq_desc_with_fd *p; + u32 eqar = qbman_read_register(s, QBMAN_CINH_SWP_EQAR); + + if (!EQAR_SUCCESS(eqar)) + return -EBUSY; + + p = qbman_get_cmd(s, QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar))); + /* This is mapped as DEVICE type memory, writes are + * with address alignment: + * desc.dca address alignment = 1 + * desc.seqnum address alignment = 2 + * desc.orpid address alignment = 4 + * desc.tgtid address alignment = 8 + */ + p->desc.dca = d->dca; + p->desc.seqnum = d->seqnum; + p->desc.orpid = d->orpid; + memcpy(&p->desc.tgtid, &d->tgtid, 24); + memcpy(&p->fd, fd, sizeof(*fd)); + + p->desc.verb = d->verb | EQAR_VB(eqar); + dma_wmb(); + qbman_write_eqcr_am_rt_register(s, EQAR_IDX(eqar)); + + return 0; +} + +/** + * qbman_swp_enqueue_multiple_direct() - Issue a multi enqueue command * using one enqueue descriptor * @s: the software portal used for enqueue * @d: the enqueue descriptor @@ -504,16 +629,16 @@ int qbman_swp_enqueue(struct qbman_swp *s, const struct qbman_eq_desc *d, * * Return the number of fd enqueued, or a negative error number. */ -int qbman_swp_enqueue_multiple(struct qbman_swp *s, - const struct qbman_eq_desc *d, - const struct dpaa2_fd *fd, - uint32_t *flags, - int num_frames) +int qbman_swp_enqueue_multiple_direct(struct qbman_swp *s, + const struct qbman_eq_desc *d, + const struct dpaa2_fd *fd, + uint32_t *flags, + int num_frames) { int count = 0; while (count < num_frames) { - if (qbman_swp_enqueue(s, d, fd) != 0) + if (qbman_swp_enqueue_direct(s, d, fd) != 0) break; count++; } @@ -522,7 +647,35 @@ int qbman_swp_enqueue_multiple(struct qbman_swp *s, } /** - * qbman_swp_enqueue_multiple_desc() - Issue a multi enqueue command + * qbman_swp_enqueue_multiple_mem_back() - Issue a multi enqueue command + * using one enqueue descriptor + * @s: the software portal used for enqueue + * @d: the enqueue descriptor + * @fd: table pointer of frame descriptor table to be enqueued + * @flags: table pointer of flags, not used for the moment + * @num_frames: number of fd to be enqueued + * + * Return the number of fd enqueued, or a negative error number. + */ +int qbman_swp_enqueue_multiple_mem_back(struct qbman_swp *s, + const struct qbman_eq_desc *d, + const struct dpaa2_fd *fd, + uint32_t *flags, + int num_frames) +{ + int count = 0; + + while (count < num_frames) { + if (qbman_swp_enqueue_mem_back(s, d, fd) != 0) + break; + count++; + } + + return count; +} + +/** + * qbman_swp_enqueue_multiple_desc_direct() - Issue a multi enqueue command * using multiple enqueue descriptor * @s: the software portal used for enqueue * @d: table of minimal enqueue descriptor @@ -531,15 +684,41 @@ int qbman_swp_enqueue_multiple(struct qbman_swp *s, * * Return the number of fd enqueued, or a negative error number. */ -int qbman_swp_enqueue_multiple_desc(struct qbman_swp *s, - const struct qbman_eq_desc *d, - const struct dpaa2_fd *fd, - int num_frames) +int qbman_swp_enqueue_multiple_desc_direct(struct qbman_swp *s, + const struct qbman_eq_desc *d, + const struct dpaa2_fd *fd, + int num_frames) { int count = 0; while (count < num_frames) { - if (qbman_swp_enqueue(s, &(d[count]), fd) != 0) + if (qbman_swp_enqueue_direct(s, &(d[count]), fd) != 0) + break; + count++; + } + + return count; +} + +/** + * qbman_swp_enqueue_multiple_desc_mem_back() - Issue a multi enqueue command + * using multiple enqueue descriptor + * @s: the software portal used for enqueue + * @d: table of minimal enqueue descriptor + * @fd: table pointer of frame descriptor table to be enqueued + * @num_frames: number of fd to be enqueued + * + * Return the number of fd enqueued, or a negative error number. + */ +int qbman_swp_enqueue_multiple_desc_mem_back(struct qbman_swp *s, + const struct qbman_eq_desc *d, + const struct dpaa2_fd *fd, + int num_frames) +{ + int count = 0; + + while (count < num_frames) { + if (qbman_swp_enqueue_mem_back(s, &(d[count]), fd) != 0) break; count++; } @@ -702,7 +881,7 @@ void qbman_pull_desc_set_channel(struct qbman_pull_desc *d, u32 chid, } /** - * qbman_swp_pull() - Issue the pull dequeue command + * qbman_swp_pull_direct() - Issue the pull dequeue command * @s: the software portal object * @d: the software portal descriptor which has been configured with * the set of qbman_pull_desc_set_*() calls @@ -710,7 +889,7 @@ void qbman_pull_desc_set_channel(struct qbman_pull_desc *d, u32 chid, * Return 0 for success, and -EBUSY if the software portal is not ready * to do pull dequeue. */ -int qbman_swp_pull(struct qbman_swp *s, struct qbman_pull_desc *d) +int qbman_swp_pull_direct(struct qbman_swp *s, struct qbman_pull_desc *d) { struct qbman_pull_desc *p; @@ -728,18 +907,45 @@ int qbman_swp_pull(struct qbman_swp *s, struct qbman_pull_desc *d) p->dq_src = d->dq_src; p->rsp_addr = d->rsp_addr; p->rsp_addr_virt = d->rsp_addr_virt; + dma_wmb(); + /* Set the verb byte, have to substitute in the valid-bit */ + p->verb = d->verb | s->vdq.valid_bit; + s->vdq.valid_bit ^= QB_VALID_BIT; - if ((s->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) { - dma_wmb(); - /* Set the verb byte, have to substitute in the valid-bit */ - p->verb = d->verb | s->vdq.valid_bit; - s->vdq.valid_bit ^= QB_VALID_BIT; - } else { - p->verb = d->verb | s->vdq.valid_bit; - s->vdq.valid_bit ^= QB_VALID_BIT; - dma_wmb(); - qbman_write_register(s, QBMAN_CINH_SWP_VDQCR_RT, QMAN_RT_MODE); + return 0; +} + +/** + * qbman_swp_pull_mem_back() - Issue the pull dequeue command + * @s: the software portal object + * @d: the software portal descriptor which has been configured with + * the set of qbman_pull_desc_set_*() calls + * + * Return 0 for success, and -EBUSY if the software portal is not ready + * to do pull dequeue. + */ +int qbman_swp_pull_mem_back(struct qbman_swp *s, struct qbman_pull_desc *d) +{ + struct qbman_pull_desc *p; + + if (!atomic_dec_and_test(&s->vdq.available)) { + atomic_inc(&s->vdq.available); + return -EBUSY; } + s->vdq.storage = (void *)(uintptr_t)d->rsp_addr_virt; + if ((s->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) + p = qbman_get_cmd(s, QBMAN_CENA_SWP_VDQCR); + else + p = qbman_get_cmd(s, QBMAN_CENA_SWP_VDQCR_MEM); + p->numf = d->numf; + p->tok = QMAN_DQ_TOKEN_VALID; + p->dq_src = d->dq_src; + p->rsp_addr = d->rsp_addr; + p->rsp_addr_virt = d->rsp_addr_virt; + p->verb = d->verb | s->vdq.valid_bit; + s->vdq.valid_bit ^= QB_VALID_BIT; + dma_wmb(); + qbman_write_register(s, QBMAN_CINH_SWP_VDQCR_RT, QMAN_RT_MODE); return 0; } @@ -747,14 +953,14 @@ int qbman_swp_pull(struct qbman_swp *s, struct qbman_pull_desc *d) #define QMAN_DQRR_PI_MASK 0xf /** - * qbman_swp_dqrr_next() - Get an valid DQRR entry + * qbman_swp_dqrr_next_direct() - Get an valid DQRR entry * @s: the software portal object * * Return NULL if there are no unconsumed DQRR entries. Return a DQRR entry * only once, so repeated calls can return a sequence of DQRR entries, without * requiring they be consumed immediately or in any particular order. */ -const struct dpaa2_dq *qbman_swp_dqrr_next(struct qbman_swp *s) +const struct dpaa2_dq *qbman_swp_dqrr_next_direct(struct qbman_swp *s) { u32 verb; u32 response_verb; @@ -797,10 +1003,99 @@ const struct dpaa2_dq *qbman_swp_dqrr_next(struct qbman_swp *s) QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx))); } - if ((s->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) - p = qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)); - else - p = qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR_MEM(s->dqrr.next_idx)); + p = qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)); + verb = p->dq.verb; + + /* + * If the valid-bit isn't of the expected polarity, nothing there. Note, + * in the DQRR reset bug workaround, we shouldn't need to skip these + * check, because we've already determined that a new entry is available + * and we've invalidated the cacheline before reading it, so the + * valid-bit behaviour is repaired and should tell us what we already + * knew from reading PI. + */ + if ((verb & QB_VALID_BIT) != s->dqrr.valid_bit) { + prefetch(qbman_get_cmd(s, + QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx))); + return NULL; + } + /* + * There's something there. Move "next_idx" attention to the next ring + * entry (and prefetch it) before returning what we found. + */ + s->dqrr.next_idx++; + s->dqrr.next_idx &= s->dqrr.dqrr_size - 1; /* Wrap around */ + if (!s->dqrr.next_idx) + s->dqrr.valid_bit ^= QB_VALID_BIT; + + /* + * If this is the final response to a volatile dequeue command + * indicate that the vdq is available + */ + flags = p->dq.stat; + response_verb = verb & QBMAN_RESULT_MASK; + if ((response_verb == QBMAN_RESULT_DQ) && + (flags & DPAA2_DQ_STAT_VOLATILE) && + (flags & DPAA2_DQ_STAT_EXPIRED)) + atomic_inc(&s->vdq.available); + + prefetch(qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx))); + + return p; +} + +/** + * qbman_swp_dqrr_next_mem_back() - Get an valid DQRR entry + * @s: the software portal object + * + * Return NULL if there are no unconsumed DQRR entries. Return a DQRR entry + * only once, so repeated calls can return a sequence of DQRR entries, without + * requiring they be consumed immediately or in any particular order. + */ +const struct dpaa2_dq *qbman_swp_dqrr_next_mem_back(struct qbman_swp *s) +{ + u32 verb; + u32 response_verb; + u32 flags; + struct dpaa2_dq *p; + + /* Before using valid-bit to detect if something is there, we have to + * handle the case of the DQRR reset bug... + */ + if (unlikely(s->dqrr.reset_bug)) { + /* + * We pick up new entries by cache-inhibited producer index, + * which means that a non-coherent mapping would require us to + * invalidate and read *only* once that PI has indicated that + * there's an entry here. The first trip around the DQRR ring + * will be much less efficient than all subsequent trips around + * it... + */ + u8 pi = qbman_read_register(s, QBMAN_CINH_SWP_DQPI) & + QMAN_DQRR_PI_MASK; + + /* there are new entries if pi != next_idx */ + if (pi == s->dqrr.next_idx) + return NULL; + + /* + * if next_idx is/was the last ring index, and 'pi' is + * different, we can disable the workaround as all the ring + * entries have now been DMA'd to so valid-bit checking is + * repaired. Note: this logic needs to be based on next_idx + * (which increments one at a time), rather than on pi (which + * can burst and wrap-around between our snapshots of it). + */ + if (s->dqrr.next_idx == (s->dqrr.dqrr_size - 1)) { + pr_debug("next_idx=%d, pi=%d, clear reset bug\n", + s->dqrr.next_idx, pi); + s->dqrr.reset_bug = 0; + } + prefetch(qbman_get_cmd(s, + QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx))); + } + + p = qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR_MEM(s->dqrr.next_idx)); verb = p->dq.verb; /* @@ -929,7 +1224,7 @@ void qbman_release_desc_set_rcdi(struct qbman_release_desc *d, int enable) #define RAR_SUCCESS(rar) ((rar) & 0x100) /** - * qbman_swp_release() - Issue a buffer release command + * qbman_swp_release_direct() - Issue a buffer release command * @s: the software portal object * @d: the release descriptor * @buffers: a pointer pointing to the buffer address to be released @@ -937,8 +1232,9 @@ void qbman_release_desc_set_rcdi(struct qbman_release_desc *d, int enable) * * Return 0 for success, -EBUSY if the release command ring is not ready. */ -int qbman_swp_release(struct qbman_swp *s, const struct qbman_release_desc *d, - const u64 *buffers, unsigned int num_buffers) +int qbman_swp_release_direct(struct qbman_swp *s, + const struct qbman_release_desc *d, + const u64 *buffers, unsigned int num_buffers) { int i; struct qbman_release_desc *p; @@ -952,28 +1248,59 @@ int qbman_swp_release(struct qbman_swp *s, const struct qbman_release_desc *d, return -EBUSY; /* Start the release command */ - if ((s->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) - p = qbman_get_cmd(s, QBMAN_CENA_SWP_RCR(RAR_IDX(rar))); - else - p = qbman_get_cmd(s, QBMAN_CENA_SWP_RCR_MEM(RAR_IDX(rar))); + p = qbman_get_cmd(s, QBMAN_CENA_SWP_RCR(RAR_IDX(rar))); + /* Copy the caller's buffer pointers to the command */ for (i = 0; i < num_buffers; i++) p->buf[i] = cpu_to_le64(buffers[i]); p->bpid = d->bpid; - if ((s->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) { - /* - * Set the verb byte, have to substitute in the valid-bit - * and the number of buffers. - */ - dma_wmb(); - p->verb = d->verb | RAR_VB(rar) | num_buffers; - } else { - p->verb = d->verb | RAR_VB(rar) | num_buffers; - dma_wmb(); - qbman_write_register(s, QBMAN_CINH_SWP_RCR_AM_RT + - RAR_IDX(rar) * 4, QMAN_RT_MODE); - } + /* + * Set the verb byte, have to substitute in the valid-bit + * and the number of buffers. + */ + dma_wmb(); + p->verb = d->verb | RAR_VB(rar) | num_buffers; + + return 0; +} + +/** + * qbman_swp_release_mem_back() - Issue a buffer release command + * @s: the software portal object + * @d: the release descriptor + * @buffers: a pointer pointing to the buffer address to be released + * @num_buffers: number of buffers to be released, must be less than 8 + * + * Return 0 for success, -EBUSY if the release command ring is not ready. + */ +int qbman_swp_release_mem_back(struct qbman_swp *s, + const struct qbman_release_desc *d, + const u64 *buffers, unsigned int num_buffers) +{ + int i; + struct qbman_release_desc *p; + u32 rar; + + if (!num_buffers || (num_buffers > 7)) + return -EINVAL; + + rar = qbman_read_register(s, QBMAN_CINH_SWP_RAR); + if (!RAR_SUCCESS(rar)) + return -EBUSY; + + /* Start the release command */ + p = qbman_get_cmd(s, QBMAN_CENA_SWP_RCR_MEM(RAR_IDX(rar))); + + /* Copy the caller's buffer pointers to the command */ + for (i = 0; i < num_buffers; i++) + p->buf[i] = cpu_to_le64(buffers[i]); + p->bpid = d->bpid; + + p->verb = d->verb | RAR_VB(rar) | num_buffers; + dma_wmb(); + qbman_write_register(s, QBMAN_CINH_SWP_RCR_AM_RT + + RAR_IDX(rar) * 4, QMAN_RT_MODE); return 0; } diff --git a/drivers/soc/fsl/dpio/qbman-portal.h b/drivers/soc/fsl/dpio/qbman-portal.h index ac58a9741d33..3b3fb86d180b 100644 --- a/drivers/soc/fsl/dpio/qbman-portal.h +++ b/drivers/soc/fsl/dpio/qbman-portal.h @@ -145,6 +145,33 @@ struct qbman_swp { } dqrr; }; +/* Function pointers */ +extern +int (*qbman_swp_enqueue_ptr)(struct qbman_swp *s, + const struct qbman_eq_desc *d, + const struct dpaa2_fd *fd); +extern +int (*qbman_swp_enqueue_multiple_ptr)(struct qbman_swp *s, + const struct qbman_eq_desc *d, + const struct dpaa2_fd *fd, + uint32_t *flags, + int num_frames); +extern +int (*qbman_swp_enqueue_multiple_desc_ptr)(struct qbman_swp *s, + const struct qbman_eq_desc *d, + const struct dpaa2_fd *fd, + int num_frames); +extern +int (*qbman_swp_pull_ptr)(struct qbman_swp *s, struct qbman_pull_desc *d); +extern +const struct dpaa2_dq *(*qbman_swp_dqrr_next_ptr)(struct qbman_swp *s); +extern +int (*qbman_swp_release_ptr)(struct qbman_swp *s, + const struct qbman_release_desc *d, + const u64 *buffers, + unsigned int num_buffers); + +/* Functions */ struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d); void qbman_swp_finish(struct qbman_swp *p); u32 qbman_swp_interrupt_read_status(struct qbman_swp *p); @@ -169,9 +196,6 @@ void qbman_pull_desc_set_wq(struct qbman_pull_desc *d, u32 wqid, void qbman_pull_desc_set_channel(struct qbman_pull_desc *d, u32 chid, enum qbman_pull_type_e dct); -int qbman_swp_pull(struct qbman_swp *p, struct qbman_pull_desc *d); - -const struct dpaa2_dq *qbman_swp_dqrr_next(struct qbman_swp *s); void qbman_swp_dqrr_consume(struct qbman_swp *s, const struct dpaa2_dq *dq); int qbman_result_has_new_result(struct qbman_swp *p, const struct dpaa2_dq *dq); @@ -183,15 +207,11 @@ void qbman_eq_desc_set_fq(struct qbman_eq_desc *d, u32 fqid); void qbman_eq_desc_set_qd(struct qbman_eq_desc *d, u32 qdid, u32 qd_bin, u32 qd_prio); -int qbman_swp_enqueue(struct qbman_swp *p, const struct qbman_eq_desc *d, - const struct dpaa2_fd *fd); void qbman_release_desc_clear(struct qbman_release_desc *d); void qbman_release_desc_set_bpid(struct qbman_release_desc *d, u16 bpid); void qbman_release_desc_set_rcdi(struct qbman_release_desc *d, int enable); -int qbman_swp_release(struct qbman_swp *s, const struct qbman_release_desc *d, - const u64 *buffers, unsigned int num_buffers); int qbman_swp_acquire(struct qbman_swp *s, u16 bpid, u64 *buffers, unsigned int num_buffers); int qbman_swp_alt_fq_state(struct qbman_swp *s, u32 fqid, @@ -204,18 +224,60 @@ void *qbman_swp_mc_start(struct qbman_swp *p); void qbman_swp_mc_submit(struct qbman_swp *p, void *cmd, u8 cmd_verb); void *qbman_swp_mc_result(struct qbman_swp *p); -int +/** + * qbman_swp_enqueue() - Issue an enqueue command + * @s: the software portal used for enqueue + * @d: the enqueue descriptor + * @fd: the frame descriptor to be enqueued + * + * Return 0 for successful enqueue, -EBUSY if the EQCR is not ready. + */ +static inline int +qbman_swp_enqueue(struct qbman_swp *s, const struct qbman_eq_desc *d, + const struct dpaa2_fd *fd) +{ + return qbman_swp_enqueue_ptr(s, d, fd); +} + +/** + * qbman_swp_enqueue_multiple() - Issue a multi enqueue command + * using one enqueue descriptor + * @s: the software portal used for enqueue + * @d: the enqueue descriptor + * @fd: table pointer of frame descriptor table to be enqueued + * @flags: table pointer of QBMAN_ENQUEUE_FLAG_DCA flags, not used if NULL + * @num_frames: number of fd to be enqueued + * + * Return the number of fd enqueued, or a negative error number. + */ +static inline int qbman_swp_enqueue_multiple(struct qbman_swp *s, const struct qbman_eq_desc *d, const struct dpaa2_fd *fd, uint32_t *flags, - int num_frames); + int num_frames) +{ + return qbman_swp_enqueue_multiple_ptr(s, d, fd, flags, num_frames); +} -int +/** + * qbman_swp_enqueue_multiple_desc() - Issue a multi enqueue command + * using multiple enqueue descriptor + * @s: the software portal used for enqueue + * @d: table of minimal enqueue descriptor + * @fd: table pointer of frame descriptor table to be enqueued + * @num_frames: number of fd to be enqueued + * + * Return the number of fd enqueued, or a negative error number. + */ +static inline int qbman_swp_enqueue_multiple_desc(struct qbman_swp *s, const struct qbman_eq_desc *d, const struct dpaa2_fd *fd, - int num_frames); + int num_frames) +{ + return qbman_swp_enqueue_multiple_desc_ptr(s, d, fd, num_frames); +} /** * qbman_result_is_DQ() - check if the dequeue result is a dequeue response @@ -528,4 +590,49 @@ int qbman_bp_query(struct qbman_swp *s, u16 bpid, u32 qbman_bp_info_num_free_bufs(struct qbman_bp_query_rslt *a); +/** + * qbman_swp_release() - Issue a buffer release command + * @s: the software portal object + * @d: the release descriptor + * @buffers: a pointer pointing to the buffer address to be released + * @num_buffers: number of buffers to be released, must be less than 8 + * + * Return 0 for success, -EBUSY if the release command ring is not ready. + */ +static inline int qbman_swp_release(struct qbman_swp *s, + const struct qbman_release_desc *d, + const u64 *buffers, + unsigned int num_buffers) +{ + return qbman_swp_release_ptr(s, d, buffers, num_buffers); +} + +/** + * qbman_swp_pull() - Issue the pull dequeue command + * @s: the software portal object + * @d: the software portal descriptor which has been configured with + * the set of qbman_pull_desc_set_*() calls + * + * Return 0 for success, and -EBUSY if the software portal is not ready + * to do pull dequeue. + */ +static inline int qbman_swp_pull(struct qbman_swp *s, + struct qbman_pull_desc *d) +{ + return qbman_swp_pull_ptr(s, d); +} + +/** + * qbman_swp_dqrr_next() - Get an valid DQRR entry + * @s: the software portal object + * + * Return NULL if there are no unconsumed DQRR entries. Return a DQRR entry + * only once, so repeated calls can return a sequence of DQRR entries, without + * requiring they be consumed immediately or in any particular order. + */ +static inline const struct dpaa2_dq *qbman_swp_dqrr_next(struct qbman_swp *s) +{ + return qbman_swp_dqrr_next_ptr(s); +} + #endif /* __FSL_QBMAN_PORTAL_H */ From 3b2abda7d28c69f564c1157b9b9c21ef40092ee9 Mon Sep 17 00:00:00 2001 From: Youri Querry Date: Thu, 12 Dec 2019 17:01:18 +0000 Subject: [PATCH 18/70] soc: fsl: dpio: Replace QMAN array mode with ring mode enqueue This change of algorithm will enable faster bulk enqueue. This will greatly benefit XDP bulk enqueue. Signed-off-by: Youri Querry Acked-by: Roy Pledge Signed-off-by: Li Yang --- drivers/soc/fsl/dpio/qbman-portal.c | 410 +++++++++++++++++++++------- drivers/soc/fsl/dpio/qbman-portal.h | 13 + 2 files changed, 328 insertions(+), 95 deletions(-) diff --git a/drivers/soc/fsl/dpio/qbman-portal.c b/drivers/soc/fsl/dpio/qbman-portal.c index 0ffe018afb17..740ee0d19582 100644 --- a/drivers/soc/fsl/dpio/qbman-portal.c +++ b/drivers/soc/fsl/dpio/qbman-portal.c @@ -8,6 +8,7 @@ #include #include #include +#include #include #include "qbman-portal.h" @@ -21,6 +22,7 @@ /* CINH register offsets */ #define QBMAN_CINH_SWP_EQCR_PI 0x800 +#define QBMAN_CINH_SWP_EQCR_CI 0x840 #define QBMAN_CINH_SWP_EQAR 0x8c0 #define QBMAN_CINH_SWP_CR_RT 0x900 #define QBMAN_CINH_SWP_VDQCR_RT 0x940 @@ -44,6 +46,8 @@ #define QBMAN_CENA_SWP_CR 0x600 #define QBMAN_CENA_SWP_RR(vb) (0x700 + ((u32)(vb) >> 1)) #define QBMAN_CENA_SWP_VDQCR 0x780 +#define QBMAN_CENA_SWP_EQCR_CI 0x840 +#define QBMAN_CENA_SWP_EQCR_CI_MEMBACK 0x1840 /* CENA register offsets in memory-backed mode */ #define QBMAN_CENA_SWP_DQRR_MEM(n) (0x800 + ((u32)(n) << 6)) @@ -71,6 +75,12 @@ /* opaque token for static dequeues */ #define QMAN_SDQCR_TOKEN 0xbb +#define QBMAN_EQCR_DCA_IDXMASK 0x0f +#define QBMAN_ENQUEUE_FLAG_DCA (1ULL << 31) + +#define EQ_DESC_SIZE_WITHOUT_FD 29 +#define EQ_DESC_SIZE_FD_START 32 + enum qbman_sdqcr_dct { qbman_sdqcr_dct_null = 0, qbman_sdqcr_dct_prio_ics, @@ -215,6 +225,15 @@ static inline u32 qbman_set_swp_cfg(u8 max_fill, u8 wn, u8 est, u8 rpm, u8 dcm, #define QMAN_RT_MODE 0x00000100 +static inline u8 qm_cyc_diff(u8 ringsize, u8 first, u8 last) +{ + /* 'first' is included, 'last' is excluded */ + if (first <= last) + return last - first; + else + return (2 * ringsize) - (first - last); +} + /** * qbman_swp_init() - Create a functional object representing the given * QBMan portal descriptor. @@ -227,6 +246,10 @@ struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d) { struct qbman_swp *p = kzalloc(sizeof(*p), GFP_KERNEL); u32 reg; + u32 mask_size; + u32 eqcr_pi; + + spin_lock_init(&p->access_spinlock); if (!p) return NULL; @@ -255,25 +278,38 @@ struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d) p->addr_cena = d->cena_bar; p->addr_cinh = d->cinh_bar; - if ((p->desc->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000) - memset(p->addr_cena, 0, 64 * 1024); + if ((p->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) { - reg = qbman_set_swp_cfg(p->dqrr.dqrr_size, - 1, /* Writes Non-cacheable */ - 0, /* EQCR_CI stashing threshold */ - 3, /* RPM: Valid bit mode, RCR in array mode */ - 2, /* DCM: Discrete consumption ack mode */ - 3, /* EPM: Valid bit mode, EQCR in array mode */ - 1, /* mem stashing drop enable == TRUE */ - 1, /* mem stashing priority == TRUE */ - 1, /* mem stashing enable == TRUE */ - 1, /* dequeue stashing priority == TRUE */ - 0, /* dequeue stashing enable == FALSE */ - 0); /* EQCR_CI stashing priority == FALSE */ - if ((p->desc->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000) + reg = qbman_set_swp_cfg(p->dqrr.dqrr_size, + 1, /* Writes Non-cacheable */ + 0, /* EQCR_CI stashing threshold */ + 3, /* RPM: RCR in array mode */ + 2, /* DCM: Discrete consumption ack */ + 2, /* EPM: EQCR in ring mode */ + 1, /* mem stashing drop enable enable */ + 1, /* mem stashing priority enable */ + 1, /* mem stashing enable */ + 1, /* dequeue stashing priority enable */ + 0, /* dequeue stashing enable enable */ + 0); /* EQCR_CI stashing priority enable */ + } else { + memset(p->addr_cena, 0, 64 * 1024); + reg = qbman_set_swp_cfg(p->dqrr.dqrr_size, + 1, /* Writes Non-cacheable */ + 1, /* EQCR_CI stashing threshold */ + 3, /* RPM: RCR in array mode */ + 2, /* DCM: Discrete consumption ack */ + 0, /* EPM: EQCR in ring mode */ + 1, /* mem stashing drop enable */ + 1, /* mem stashing priority enable */ + 1, /* mem stashing enable */ + 1, /* dequeue stashing priority enable */ + 0, /* dequeue stashing enable */ + 0); /* EQCR_CI stashing priority enable */ reg |= 1 << SWP_CFG_CPBS_SHIFT | /* memory-backed mode */ 1 << SWP_CFG_VPM_SHIFT | /* VDQCR read triggered mode */ 1 << SWP_CFG_CPM_SHIFT; /* CR read triggered mode */ + } qbman_write_register(p, QBMAN_CINH_SWP_CFG, reg); reg = qbman_read_register(p, QBMAN_CINH_SWP_CFG); @@ -295,7 +331,9 @@ struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d) */ qbman_write_register(p, QBMAN_CINH_SWP_SDQCR, 0); + p->eqcr.pi_ring_size = 8; if ((p->desc->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000) { + p->eqcr.pi_ring_size = 32; qbman_swp_enqueue_ptr = qbman_swp_enqueue_mem_back; qbman_swp_enqueue_multiple_ptr = @@ -307,6 +345,15 @@ struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d) qbman_swp_release_ptr = qbman_swp_release_mem_back; } + for (mask_size = p->eqcr.pi_ring_size; mask_size > 0; mask_size >>= 1) + p->eqcr.pi_ci_mask = (p->eqcr.pi_ci_mask << 1) + 1; + eqcr_pi = qbman_read_register(p, QBMAN_CINH_SWP_EQCR_PI); + p->eqcr.pi = eqcr_pi & p->eqcr.pi_ci_mask; + p->eqcr.pi_vb = eqcr_pi & QB_VALID_BIT; + p->eqcr.ci = qbman_read_register(p, QBMAN_CINH_SWP_EQCR_CI) + & p->eqcr.pi_ci_mask; + p->eqcr.available = p->eqcr.pi_ring_size; + return p; } @@ -460,6 +507,7 @@ enum qb_enqueue_commands { #define QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT 2 #define QB_ENQUEUE_CMD_IRQ_ON_DISPATCH_SHIFT 3 #define QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT 4 +#define QB_ENQUEUE_CMD_DCA_EN_SHIFT 7 /** * qbman_eq_desc_clear() - Clear the contents of a descriptor to @@ -535,6 +583,7 @@ static inline void qbman_write_eqcr_am_rt_register(struct qbman_swp *p, QMAN_RT_MODE); } +#define QB_RT_BIT ((u32)0x100) /** * qbman_swp_enqueue_direct() - Issue an enqueue command * @s: the software portal used for enqueue @@ -546,34 +595,19 @@ static inline void qbman_write_eqcr_am_rt_register(struct qbman_swp *p, * * Return 0 for successful enqueue, -EBUSY if the EQCR is not ready. */ -int qbman_swp_enqueue_direct(struct qbman_swp *s, const struct qbman_eq_desc *d, - const struct dpaa2_fd *fd) +static +int qbman_swp_enqueue_direct(struct qbman_swp *s, + const struct qbman_eq_desc *d, + const struct dpaa2_fd *fd) { - struct qbman_eq_desc_with_fd *p; - u32 eqar = qbman_read_register(s, QBMAN_CINH_SWP_EQAR); + int flags = 0; + int ret = qbman_swp_enqueue_multiple_direct(s, d, fd, &flags, 1); - if (!EQAR_SUCCESS(eqar)) - return -EBUSY; - - p = qbman_get_cmd(s, QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar))); - /* This is mapped as DEVICE type memory, writes are - * with address alignment: - * desc.dca address alignment = 1 - * desc.seqnum address alignment = 2 - * desc.orpid address alignment = 4 - * desc.tgtid address alignment = 8 - */ - p->desc.dca = d->dca; - p->desc.seqnum = d->seqnum; - p->desc.orpid = d->orpid; - memcpy(&p->desc.tgtid, &d->tgtid, 24); - memcpy(&p->fd, fd, sizeof(*fd)); - - /* Set the verb byte, have to substitute in the valid-bit */ - dma_wmb(); - p->desc.verb = d->verb | EQAR_VB(eqar); - - return 0; + if (ret >= 0) + ret = 0; + else + ret = -EBUSY; + return ret; } /** @@ -587,35 +621,19 @@ int qbman_swp_enqueue_direct(struct qbman_swp *s, const struct qbman_eq_desc *d, * * Return 0 for successful enqueue, -EBUSY if the EQCR is not ready. */ +static int qbman_swp_enqueue_mem_back(struct qbman_swp *s, const struct qbman_eq_desc *d, const struct dpaa2_fd *fd) { - struct qbman_eq_desc_with_fd *p; - u32 eqar = qbman_read_register(s, QBMAN_CINH_SWP_EQAR); + int flags = 0; + int ret = qbman_swp_enqueue_multiple_mem_back(s, d, fd, &flags, 1); - if (!EQAR_SUCCESS(eqar)) - return -EBUSY; - - p = qbman_get_cmd(s, QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar))); - /* This is mapped as DEVICE type memory, writes are - * with address alignment: - * desc.dca address alignment = 1 - * desc.seqnum address alignment = 2 - * desc.orpid address alignment = 4 - * desc.tgtid address alignment = 8 - */ - p->desc.dca = d->dca; - p->desc.seqnum = d->seqnum; - p->desc.orpid = d->orpid; - memcpy(&p->desc.tgtid, &d->tgtid, 24); - memcpy(&p->fd, fd, sizeof(*fd)); - - p->desc.verb = d->verb | EQAR_VB(eqar); - dma_wmb(); - qbman_write_eqcr_am_rt_register(s, EQAR_IDX(eqar)); - - return 0; + if (ret >= 0) + ret = 0; + else + ret = -EBUSY; + return ret; } /** @@ -624,26 +642,82 @@ int qbman_swp_enqueue_mem_back(struct qbman_swp *s, * @s: the software portal used for enqueue * @d: the enqueue descriptor * @fd: table pointer of frame descriptor table to be enqueued - * @flags: table pointer of flags, not used for the moment + * @flags: table pointer of QBMAN_ENQUEUE_FLAG_DCA flags, not used if NULL * @num_frames: number of fd to be enqueued * * Return the number of fd enqueued, or a negative error number. */ +static int qbman_swp_enqueue_multiple_direct(struct qbman_swp *s, const struct qbman_eq_desc *d, const struct dpaa2_fd *fd, uint32_t *flags, int num_frames) { - int count = 0; + uint32_t *p = NULL; + const uint32_t *cl = (uint32_t *)d; + uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask; + int i, num_enqueued = 0; + uint64_t addr_cena; - while (count < num_frames) { - if (qbman_swp_enqueue_direct(s, d, fd) != 0) - break; - count++; + spin_lock(&s->access_spinlock); + half_mask = (s->eqcr.pi_ci_mask>>1); + full_mask = s->eqcr.pi_ci_mask; + + if (!s->eqcr.available) { + eqcr_ci = s->eqcr.ci; + p = s->addr_cena + QBMAN_CENA_SWP_EQCR_CI; + s->eqcr.ci = qbman_read_register(s, QBMAN_CINH_SWP_EQCR_CI); + + s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size, + eqcr_ci, s->eqcr.ci); + if (!s->eqcr.available) { + spin_unlock(&s->access_spinlock); + return 0; + } } - return count; + eqcr_pi = s->eqcr.pi; + num_enqueued = (s->eqcr.available < num_frames) ? + s->eqcr.available : num_frames; + s->eqcr.available -= num_enqueued; + /* Fill in the EQCR ring */ + for (i = 0; i < num_enqueued; i++) { + p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask)); + /* Skip copying the verb */ + memcpy(&p[1], &cl[1], EQ_DESC_SIZE_WITHOUT_FD - 1); + memcpy(&p[EQ_DESC_SIZE_FD_START/sizeof(uint32_t)], + &fd[i], sizeof(*fd)); + eqcr_pi++; + } + + dma_wmb(); + + /* Set the verb byte, have to substitute in the valid-bit */ + eqcr_pi = s->eqcr.pi; + for (i = 0; i < num_enqueued; i++) { + p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask)); + p[0] = cl[0] | s->eqcr.pi_vb; + if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) { + struct qbman_eq_desc *d = (struct qbman_eq_desc *)p; + + d->dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) | + ((flags[i]) & QBMAN_EQCR_DCA_IDXMASK); + } + eqcr_pi++; + if (!(eqcr_pi & half_mask)) + s->eqcr.pi_vb ^= QB_VALID_BIT; + } + + /* Flush all the cacheline without load/store in between */ + eqcr_pi = s->eqcr.pi; + addr_cena = (size_t)s->addr_cena; + for (i = 0; i < num_enqueued; i++) + eqcr_pi++; + s->eqcr.pi = eqcr_pi & full_mask; + spin_unlock(&s->access_spinlock); + + return num_enqueued; } /** @@ -652,26 +726,80 @@ int qbman_swp_enqueue_multiple_direct(struct qbman_swp *s, * @s: the software portal used for enqueue * @d: the enqueue descriptor * @fd: table pointer of frame descriptor table to be enqueued - * @flags: table pointer of flags, not used for the moment + * @flags: table pointer of QBMAN_ENQUEUE_FLAG_DCA flags, not used if NULL * @num_frames: number of fd to be enqueued * * Return the number of fd enqueued, or a negative error number. */ +static int qbman_swp_enqueue_multiple_mem_back(struct qbman_swp *s, - const struct qbman_eq_desc *d, - const struct dpaa2_fd *fd, - uint32_t *flags, - int num_frames) + const struct qbman_eq_desc *d, + const struct dpaa2_fd *fd, + uint32_t *flags, + int num_frames) { - int count = 0; + uint32_t *p = NULL; + const uint32_t *cl = (uint32_t *)(d); + uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask; + int i, num_enqueued = 0; + unsigned long irq_flags; - while (count < num_frames) { - if (qbman_swp_enqueue_mem_back(s, d, fd) != 0) - break; - count++; + spin_lock(&s->access_spinlock); + local_irq_save(irq_flags); + + half_mask = (s->eqcr.pi_ci_mask>>1); + full_mask = s->eqcr.pi_ci_mask; + if (!s->eqcr.available) { + eqcr_ci = s->eqcr.ci; + p = s->addr_cena + QBMAN_CENA_SWP_EQCR_CI_MEMBACK; + s->eqcr.ci = __raw_readl(p) & full_mask; + s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size, + eqcr_ci, s->eqcr.ci); + if (!s->eqcr.available) { + local_irq_restore(irq_flags); + spin_unlock(&s->access_spinlock); + return 0; + } } - return count; + eqcr_pi = s->eqcr.pi; + num_enqueued = (s->eqcr.available < num_frames) ? + s->eqcr.available : num_frames; + s->eqcr.available -= num_enqueued; + /* Fill in the EQCR ring */ + for (i = 0; i < num_enqueued; i++) { + p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask)); + /* Skip copying the verb */ + memcpy(&p[1], &cl[1], EQ_DESC_SIZE_WITHOUT_FD - 1); + memcpy(&p[EQ_DESC_SIZE_FD_START/sizeof(uint32_t)], + &fd[i], sizeof(*fd)); + eqcr_pi++; + } + + /* Set the verb byte, have to substitute in the valid-bit */ + eqcr_pi = s->eqcr.pi; + for (i = 0; i < num_enqueued; i++) { + p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask)); + p[0] = cl[0] | s->eqcr.pi_vb; + if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) { + struct qbman_eq_desc *d = (struct qbman_eq_desc *)p; + + d->dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) | + ((flags[i]) & QBMAN_EQCR_DCA_IDXMASK); + } + eqcr_pi++; + if (!(eqcr_pi & half_mask)) + s->eqcr.pi_vb ^= QB_VALID_BIT; + } + s->eqcr.pi = eqcr_pi & full_mask; + + dma_wmb(); + qbman_write_register(s, QBMAN_CINH_SWP_EQCR_PI, + (QB_RT_BIT)|(s->eqcr.pi)|s->eqcr.pi_vb); + local_irq_restore(irq_flags); + spin_unlock(&s->access_spinlock); + + return num_enqueued; } /** @@ -684,20 +812,66 @@ int qbman_swp_enqueue_multiple_mem_back(struct qbman_swp *s, * * Return the number of fd enqueued, or a negative error number. */ +static int qbman_swp_enqueue_multiple_desc_direct(struct qbman_swp *s, const struct qbman_eq_desc *d, const struct dpaa2_fd *fd, int num_frames) { - int count = 0; + uint32_t *p; + const uint32_t *cl; + uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask; + int i, num_enqueued = 0; + uint64_t addr_cena; - while (count < num_frames) { - if (qbman_swp_enqueue_direct(s, &(d[count]), fd) != 0) - break; - count++; + half_mask = (s->eqcr.pi_ci_mask>>1); + full_mask = s->eqcr.pi_ci_mask; + if (!s->eqcr.available) { + eqcr_ci = s->eqcr.ci; + p = s->addr_cena + QBMAN_CENA_SWP_EQCR_CI; + s->eqcr.ci = qbman_read_register(s, QBMAN_CINH_SWP_EQCR_CI); + s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size, + eqcr_ci, s->eqcr.ci); + if (!s->eqcr.available) + return 0; } - return count; + eqcr_pi = s->eqcr.pi; + num_enqueued = (s->eqcr.available < num_frames) ? + s->eqcr.available : num_frames; + s->eqcr.available -= num_enqueued; + /* Fill in the EQCR ring */ + for (i = 0; i < num_enqueued; i++) { + p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask)); + cl = (uint32_t *)(&d[i]); + /* Skip copying the verb */ + memcpy(&p[1], &cl[1], EQ_DESC_SIZE_WITHOUT_FD - 1); + memcpy(&p[EQ_DESC_SIZE_FD_START/sizeof(uint32_t)], + &fd[i], sizeof(*fd)); + eqcr_pi++; + } + + dma_wmb(); + + /* Set the verb byte, have to substitute in the valid-bit */ + eqcr_pi = s->eqcr.pi; + for (i = 0; i < num_enqueued; i++) { + p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask)); + cl = (uint32_t *)(&d[i]); + p[0] = cl[0] | s->eqcr.pi_vb; + eqcr_pi++; + if (!(eqcr_pi & half_mask)) + s->eqcr.pi_vb ^= QB_VALID_BIT; + } + + /* Flush all the cacheline without load/store in between */ + eqcr_pi = s->eqcr.pi; + addr_cena = (uint64_t)s->addr_cena; + for (i = 0; i < num_enqueued; i++) + eqcr_pi++; + s->eqcr.pi = eqcr_pi & full_mask; + + return num_enqueued; } /** @@ -710,20 +884,62 @@ int qbman_swp_enqueue_multiple_desc_direct(struct qbman_swp *s, * * Return the number of fd enqueued, or a negative error number. */ +static int qbman_swp_enqueue_multiple_desc_mem_back(struct qbman_swp *s, const struct qbman_eq_desc *d, const struct dpaa2_fd *fd, int num_frames) { - int count = 0; + uint32_t *p; + const uint32_t *cl; + uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask; + int i, num_enqueued = 0; - while (count < num_frames) { - if (qbman_swp_enqueue_mem_back(s, &(d[count]), fd) != 0) - break; - count++; + half_mask = (s->eqcr.pi_ci_mask>>1); + full_mask = s->eqcr.pi_ci_mask; + if (!s->eqcr.available) { + eqcr_ci = s->eqcr.ci; + p = s->addr_cena + QBMAN_CENA_SWP_EQCR_CI_MEMBACK; + s->eqcr.ci = __raw_readl(p) & full_mask; + s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size, + eqcr_ci, s->eqcr.ci); + if (!s->eqcr.available) + return 0; } - return count; + eqcr_pi = s->eqcr.pi; + num_enqueued = (s->eqcr.available < num_frames) ? + s->eqcr.available : num_frames; + s->eqcr.available -= num_enqueued; + /* Fill in the EQCR ring */ + for (i = 0; i < num_enqueued; i++) { + p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask)); + cl = (uint32_t *)(&d[i]); + /* Skip copying the verb */ + memcpy(&p[1], &cl[1], EQ_DESC_SIZE_WITHOUT_FD - 1); + memcpy(&p[EQ_DESC_SIZE_FD_START/sizeof(uint32_t)], + &fd[i], sizeof(*fd)); + eqcr_pi++; + } + + /* Set the verb byte, have to substitute in the valid-bit */ + eqcr_pi = s->eqcr.pi; + for (i = 0; i < num_enqueued; i++) { + p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask)); + cl = (uint32_t *)(&d[i]); + p[0] = cl[0] | s->eqcr.pi_vb; + eqcr_pi++; + if (!(eqcr_pi & half_mask)) + s->eqcr.pi_vb ^= QB_VALID_BIT; + } + + s->eqcr.pi = eqcr_pi & full_mask; + + dma_wmb(); + qbman_write_register(s, QBMAN_CINH_SWP_EQCR_PI, + (QB_RT_BIT)|(s->eqcr.pi)|s->eqcr.pi_vb); + + return num_enqueued; } /* Static (push) dequeue */ @@ -889,6 +1105,7 @@ void qbman_pull_desc_set_channel(struct qbman_pull_desc *d, u32 chid, * Return 0 for success, and -EBUSY if the software portal is not ready * to do pull dequeue. */ +static int qbman_swp_pull_direct(struct qbman_swp *s, struct qbman_pull_desc *d) { struct qbman_pull_desc *p; @@ -924,6 +1141,7 @@ int qbman_swp_pull_direct(struct qbman_swp *s, struct qbman_pull_desc *d) * Return 0 for success, and -EBUSY if the software portal is not ready * to do pull dequeue. */ +static int qbman_swp_pull_mem_back(struct qbman_swp *s, struct qbman_pull_desc *d) { struct qbman_pull_desc *p; @@ -942,6 +1160,8 @@ int qbman_swp_pull_mem_back(struct qbman_swp *s, struct qbman_pull_desc *d) p->dq_src = d->dq_src; p->rsp_addr = d->rsp_addr; p->rsp_addr_virt = d->rsp_addr_virt; + + /* Set the verb byte, have to substitute in the valid-bit */ p->verb = d->verb | s->vdq.valid_bit; s->vdq.valid_bit ^= QB_VALID_BIT; dma_wmb(); diff --git a/drivers/soc/fsl/dpio/qbman-portal.h b/drivers/soc/fsl/dpio/qbman-portal.h index 3b3fb86d180b..c7c2225b7d91 100644 --- a/drivers/soc/fsl/dpio/qbman-portal.h +++ b/drivers/soc/fsl/dpio/qbman-portal.h @@ -143,6 +143,19 @@ struct qbman_swp { u8 dqrr_size; int reset_bug; /* indicates dqrr reset workaround is needed */ } dqrr; + + struct { + u32 pi; + u32 pi_vb; + u32 pi_ring_size; + u32 pi_ci_mask; + u32 ci; + int available; + u32 pend; + u32 no_pfdr; + } eqcr; + + spinlock_t access_spinlock; }; /* Function pointers */ From 4ddfb4af07c82e7f63c28347e463046aa14c1837 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Tue, 11 Feb 2020 17:10:45 -0600 Subject: [PATCH 19/70] firmware: arm_scmi: Replace zero-length array with flexible-array member The current codebase makes use of the zero-length array language extension to the C90 standard, but the preferred mechanism to declare variable-length types such as these ones is a flexible array member[1][2], introduced in C99: struct foo { int stuff; struct boo array[]; }; By making use of the mechanism above, we will get a compiler warning in case the flexible array does not occur last in the structure, which will help us prevent some kind of undefined behavior bugs from being inadvertenly introduced[3] to the codebase from now on. This issue was found with the help of Coccinelle. [1] https://gcc.gnu.org/onlinedocs/gcc/Zero-Length.html [2] https://github.com/KSPP/linux/issues/21 [3] commit 76497732932f ("cxgb3/l2t: Fix undefined behaviour") Replace the zero-length member "msg_payload" in scmi_shared_mem structure with flexible-array. Link: https://lore.kernel.org/r/20200211231045.GA13956@embeddedor Signed-off-by: Gustavo A. R. Silva [ rebased the change as files are moved around ] Signed-off-by: Sudeep Holla --- drivers/firmware/arm_scmi/shmem.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/firmware/arm_scmi/shmem.c b/drivers/firmware/arm_scmi/shmem.c index ca0ffd302ea2..e1e816e0018c 100644 --- a/drivers/firmware/arm_scmi/shmem.c +++ b/drivers/firmware/arm_scmi/shmem.c @@ -26,7 +26,7 @@ struct scmi_shared_mem { #define SCMI_SHMEM_FLAG_INTR_ENABLED BIT(0) __le32 length; __le32 msg_header; - u8 msg_payload[0]; + u8 msg_payload[]; }; void shmem_tx_prepare(struct scmi_shared_mem __iomem *shmem, From 8694548ae1f245ad164d90d7aeafc7ab3bee71a4 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Tue, 11 Feb 2020 17:12:52 -0600 Subject: [PATCH 20/70] firmware: arm_scmi/perf: Replace zero-length array with flexible-array member The current codebase makes use of the zero-length array language extension to the C90 standard, but the preferred mechanism to declare variable-length types such as these ones is a flexible array member[1][2], introduced in C99: struct foo { int stuff; struct boo array[]; }; By making use of the mechanism above, we will get a compiler warning in case the flexible array does not occur last in the structure, which will help us prevent some kind of undefined behavior bugs from being inadvertenly introduced[3] to the codebase from now on. This issue was found with the help of Coccinelle. [1] https://gcc.gnu.org/onlinedocs/gcc/Zero-Length.html [2] https://github.com/KSPP/linux/issues/21 [3] commit 76497732932f ("cxgb3/l2t: Fix undefined behaviour") Replace the zero-length member "opp" in scmi_msg_resp_perf_describe_levels structure with flexible-array. Link: https://lore.kernel.org/r/20200211231252.GA14830@embeddedor Signed-off-by: Gustavo A. R. Silva Signed-off-by: Sudeep Holla --- drivers/firmware/arm_scmi/perf.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/firmware/arm_scmi/perf.c b/drivers/firmware/arm_scmi/perf.c index ec81e6f7e7a4..34f3a917dd8d 100644 --- a/drivers/firmware/arm_scmi/perf.c +++ b/drivers/firmware/arm_scmi/perf.c @@ -89,7 +89,7 @@ struct scmi_msg_resp_perf_describe_levels { __le32 power; __le16 transition_latency_us; __le16 reserved; - } opp[0]; + } opp[]; }; struct scmi_perf_get_fc_info { From 539db76293cd2e73fe798ed0f21ac852351fedd8 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Tue, 11 Feb 2020 17:16:04 -0600 Subject: [PATCH 21/70] firmware: arm_scpi: Replace zero-length array with flexible-array member The current codebase makes use of the zero-length array language extension to the C90 standard, but the preferred mechanism to declare variable-length types such as these ones is a flexible array member[1][2], introduced in C99: struct foo { int stuff; struct boo array[]; }; By making use of the mechanism above, we will get a compiler warning in case the flexible array does not occur last in the structure, which will help us prevent some kind of undefined behavior bugs from being inadvertenly introduced[3] to the codebase from now on. This issue was found with the help of Coccinelle. [1] https://gcc.gnu.org/onlinedocs/gcc/Zero-Length.html [2] https://github.com/KSPP/linux/issues/21 [3] commit 76497732932f ("cxgb3/l2t: Fix undefined behaviour") Replace the zero-length member "payload" in {legacy_,}scpi_shared_mem structures with flexible-array. Link: https://lore.kernel.org/r/20200211231604.GA17274@embeddedor Signed-off-by: Gustavo A. R. Silva Signed-off-by: Sudeep Holla --- drivers/firmware/arm_scpi.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/firmware/arm_scpi.c b/drivers/firmware/arm_scpi.c index a80c331c3a6e..d0dee37ad522 100644 --- a/drivers/firmware/arm_scpi.c +++ b/drivers/firmware/arm_scpi.c @@ -262,12 +262,12 @@ struct scpi_drvinfo { struct scpi_shared_mem { __le32 command; __le32 status; - u8 payload[0]; + u8 payload[]; } __packed; struct legacy_scpi_shared_mem { __le32 status; - u8 payload[0]; + u8 payload[]; } __packed; struct scp_capabilities { From 57c45d4d8a9d78c962e9ddfc4fb2d3210f34f966 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Tue, 11 Feb 2020 15:10:10 -0600 Subject: [PATCH 22/70] misc: vexpress: Replace zero-length array with flexible-array member The current codebase makes use of the zero-length array language extension to the C90 standard, but the preferred mechanism to declare variable-length types such as these ones is a flexible array member[1][2], introduced in C99: struct foo { int stuff; struct boo array[]; }; By making use of the mechanism above, we will get a compiler warning in case the flexible array does not occur last in the structure, which will help us prevent some kind of undefined behavior bugs from being inadvertenly introduced[3] to the codebase from now on. This issue was found with the help of Coccinelle. [1] https://gcc.gnu.org/onlinedocs/gcc/Zero-Length.html [2] https://github.com/KSPP/linux/issues/21 [3] commit 76497732932f ("cxgb3/l2t: Fix undefined behaviour") Replace the zero-length member "template" in vexpress_syscfg_func structure with flexible-array. Link: https://lore.kernel.org/r/20200211211010.GA32239@embeddedor Signed-off-by: Gustavo A. R. Silva Signed-off-by: Sudeep Holla --- drivers/misc/vexpress-syscfg.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/misc/vexpress-syscfg.c b/drivers/misc/vexpress-syscfg.c index 058fcd7f9f01..a431787c0898 100644 --- a/drivers/misc/vexpress-syscfg.c +++ b/drivers/misc/vexpress-syscfg.c @@ -42,7 +42,7 @@ struct vexpress_syscfg_func { struct vexpress_syscfg *syscfg; struct regmap *regmap; int num_templates; - u32 template[0]; /* Keep it last! */ + u32 template[]; /* Keep it last! */ }; From 4ff27112282e5482d618ea93837fe416af671bc2 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Tue, 18 Feb 2020 12:24:49 +0100 Subject: [PATCH 23/70] soc: renesas: Remove ARCH_R8A7795 Replace the final user of CONFIG_ARCH_R8A7795 by CONFIG_ARCH_R8A77950 || CONFIG_ARCH_R8A77951, and remove the now unused CONFIG_ARCH_R8A7795 symbol definition. Signed-off-by: Geert Uytterhoeven Link: https://lore.kernel.org/r/20200218112449.5723-1-geert+renesas@glider.be --- drivers/soc/renesas/Kconfig | 20 ++++++++++---------- drivers/soc/renesas/renesas-soc.c | 2 +- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/drivers/soc/renesas/Kconfig b/drivers/soc/renesas/Kconfig index ba2b8b51d2d9..7f154517b91c 100644 --- a/drivers/soc/renesas/Kconfig +++ b/drivers/soc/renesas/Kconfig @@ -193,19 +193,19 @@ config ARCH_R8A774C0 This enables support for the Renesas RZ/G2E SoC. config ARCH_R8A77950 - bool - -config ARCH_R8A77951 - bool - -config ARCH_R8A7795 - bool "Renesas R-Car H3 SoC Platform" - select ARCH_R8A77950 - select ARCH_R8A77951 + bool "Renesas R-Car H3 ES1.x SoC Platform" select ARCH_RCAR_GEN3 select SYSC_R8A7795 help - This enables support for the Renesas R-Car H3 SoC. + This enables support for the Renesas R-Car H3 SoC (revision 1.x). + +config ARCH_R8A77951 + bool "Renesas R-Car H3 ES2.0+ SoC Platform" + select ARCH_RCAR_GEN3 + select SYSC_R8A7795 + help + This enables support for the Renesas R-Car H3 SoC (revisions 2.0 and + later). config ARCH_R8A77960 bool "Renesas R-Car M3-W SoC Platform" diff --git a/drivers/soc/renesas/renesas-soc.c b/drivers/soc/renesas/renesas-soc.c index 850f5733dc88..35dba8b8814e 100644 --- a/drivers/soc/renesas/renesas-soc.c +++ b/drivers/soc/renesas/renesas-soc.c @@ -259,7 +259,7 @@ static const struct of_device_id renesas_socs[] __initconst = { #ifdef CONFIG_ARCH_R8A7794 { .compatible = "renesas,r8a7794", .data = &soc_rcar_e2 }, #endif -#ifdef CONFIG_ARCH_R8A7795 +#if defined(CONFIG_ARCH_R8A77950) || defined(CONFIG_ARCH_R8A77951) { .compatible = "renesas,r8a7795", .data = &soc_rcar_h3 }, #endif #ifdef CONFIG_ARCH_R8A77960 From 6a7f10c795743a7f81e89df99866d7532811c324 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Fri, 21 Feb 2020 23:11:43 +0000 Subject: [PATCH 24/70] soc: fsl: dpio: fix dereference of pointer p before null check Pointer p is currently being dereferenced before it is null checked on a memory allocation failure check. Fix this by checking if p is null before dereferencing it. Addresses-Coverity: ("Dereference before null check") Fixes: 3b2abda7d28c ("soc: fsl: dpio: Replace QMAN array mode with ring mode enqueue") Signed-off-by: Colin Ian King Signed-off-by: Li Yang --- drivers/soc/fsl/dpio/qbman-portal.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/soc/fsl/dpio/qbman-portal.c b/drivers/soc/fsl/dpio/qbman-portal.c index 740ee0d19582..d1f49caa5b13 100644 --- a/drivers/soc/fsl/dpio/qbman-portal.c +++ b/drivers/soc/fsl/dpio/qbman-portal.c @@ -249,10 +249,11 @@ struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d) u32 mask_size; u32 eqcr_pi; - spin_lock_init(&p->access_spinlock); - if (!p) return NULL; + + spin_lock_init(&p->access_spinlock); + p->desc = d; p->mc.valid_bit = QB_VALID_BIT; p->sdq = 0; From 4f929d0877543df8a834afa5b8732d469c05cd84 Mon Sep 17 00:00:00 2001 From: Leonard Crestez Date: Thu, 20 Feb 2020 17:56:49 +0200 Subject: [PATCH 25/70] firmware: imx: Remove IMX_SC_RPC_SVC_ABORT This is not used by linux and not supported as part of imx SCU api, it was added by mistake. The constant value "9" has since been reassigned in firmware to a different service. Signed-off-by: Leonard Crestez Signed-off-by: Shawn Guo --- include/linux/firmware/imx/ipc.h | 1 - 1 file changed, 1 deletion(-) diff --git a/include/linux/firmware/imx/ipc.h b/include/linux/firmware/imx/ipc.h index 6312c8cb084a..891057434858 100644 --- a/include/linux/firmware/imx/ipc.h +++ b/include/linux/firmware/imx/ipc.h @@ -25,7 +25,6 @@ enum imx_sc_rpc_svc { IMX_SC_RPC_SVC_PAD = 6, IMX_SC_RPC_SVC_MISC = 7, IMX_SC_RPC_SVC_IRQ = 8, - IMX_SC_RPC_SVC_ABORT = 9 }; struct imx_sc_rpc_msg { From cef766300353613aa273791f70b3125d1f0420ae Mon Sep 17 00:00:00 2001 From: Tony Lindgren Date: Wed, 26 Feb 2020 10:02:19 -0800 Subject: [PATCH 26/70] drm/omap: Prepare DSS for probing without legacy platform data In order to probe display subsystem (DSS) components with ti-sysc interconnect target module without legacy platform data and using devicetree, we need to update dss probing a bit. In the device tree, we will be defining the data also for the interconnect target modules as DSS really is a private interconnect. There is some information about that in 4460 TRM in "Figure 10-3. DSS Integration" for example where it mentions "32-bit interconnect (SLX)". The changes we need to make are: 1. Parse also device tree subnodes for the compatible property fixup 2. Update the component code to consider device tree subnodes Cc: dri-devel@lists.freedesktop.org Cc: Jyri Sarha Cc: Laurent Pinchart Cc: Tomi Valkeinen Reviewed-by: Laurent Pinchart Reviewed-by: Sebastian Reichel Reviewed-by: Tomi Valkeinen Signed-off-by: Tony Lindgren --- drivers/gpu/drm/omapdrm/dss/dss.c | 25 ++++++++++++++++--- .../gpu/drm/omapdrm/dss/omapdss-boot-init.c | 25 +++++++++++++------ 2 files changed, 39 insertions(+), 11 deletions(-) diff --git a/drivers/gpu/drm/omapdrm/dss/dss.c b/drivers/gpu/drm/omapdrm/dss/dss.c index 225ec808b01a..44e8faecedc7 100644 --- a/drivers/gpu/drm/omapdrm/dss/dss.c +++ b/drivers/gpu/drm/omapdrm/dss/dss.c @@ -1339,9 +1339,15 @@ static int dss_component_compare(struct device *dev, void *data) return dev == child; } +struct dss_component_match_data { + struct device *dev; + struct component_match **match; +}; + static int dss_add_child_component(struct device *dev, void *data) { - struct component_match **match = data; + struct dss_component_match_data *cmatch = data; + struct component_match **match = cmatch->match; /* * HACK @@ -1352,7 +1358,17 @@ static int dss_add_child_component(struct device *dev, void *data) if (strstr(dev_name(dev), "rfbi")) return 0; - component_match_add(dev->parent, match, dss_component_compare, dev); + /* + * Handle possible interconnect target modules defined within the DSS. + * The DSS components can be children of an interconnect target module + * after the device tree has been updated for the module data. + * See also omapdss_boot_init() for compatible fixup. + */ + if (strstr(dev_name(dev), "target-module")) + return device_for_each_child(dev, cmatch, + dss_add_child_component); + + component_match_add(cmatch->dev, match, dss_component_compare, dev); return 0; } @@ -1395,6 +1411,7 @@ static int dss_probe_hardware(struct dss_device *dss) static int dss_probe(struct platform_device *pdev) { const struct soc_device_attribute *soc; + struct dss_component_match_data cmatch; struct component_match *match = NULL; struct resource *dss_mem; struct dss_device *dss; @@ -1472,7 +1489,9 @@ static int dss_probe(struct platform_device *pdev) omapdss_gather_components(&pdev->dev); - device_for_each_child(&pdev->dev, &match, dss_add_child_component); + cmatch.dev = &pdev->dev; + cmatch.match = &match; + device_for_each_child(&pdev->dev, &cmatch, dss_add_child_component); r = component_master_add_with_match(&pdev->dev, &dss_component_ops, match); if (r) diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c b/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c index 31502857f013..09beda9f5023 100644 --- a/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c +++ b/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c @@ -183,9 +183,24 @@ static const struct of_device_id omapdss_of_fixups_whitelist[] __initconst = { {}, }; +static void __init omapdss_find_children(struct device_node *np) +{ + struct device_node *child; + + for_each_available_child_of_node(np, child) { + if (!of_find_property(child, "compatible", NULL)) + continue; + + omapdss_walk_device(child, true); + + if (of_device_is_compatible(child, "ti,sysc")) + omapdss_find_children(child); + } +} + static int __init omapdss_boot_init(void) { - struct device_node *dss, *child; + struct device_node *dss; INIT_LIST_HEAD(&dss_conv_list); @@ -195,13 +210,7 @@ static int __init omapdss_boot_init(void) return 0; omapdss_walk_device(dss, true); - - for_each_available_child_of_node(dss, child) { - if (!of_find_property(child, "compatible", NULL)) - continue; - - omapdss_walk_device(child, true); - } + omapdss_find_children(dss); while (!list_empty(&dss_conv_list)) { struct dss_conv_node *n; From e64c021fd92467e34b9d970a651bcaa8f326f3f2 Mon Sep 17 00:00:00 2001 From: Tony Lindgren Date: Mon, 24 Feb 2020 12:58:03 -0800 Subject: [PATCH 27/70] bus: ti-sysc: Rename clk related quirks to pre_reset and post_reset quirks The clk_disable_quirk and clk_enable_quirk should really be called pre_reset_quirk and post_reset_quirk to avoid confusion like we had with hdq1w reset. Let's also rename the related functions so the code is easier to follow. Note that we also have reset_done_quirk that is needed in some cases after checking the separate register for reset done bit. Signed-off-by: Tony Lindgren --- drivers/bus/ti-sysc.c | 36 ++++++++++++++++++------------------ 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c index 6113fc0a52ae..31dae22890d5 100644 --- a/drivers/bus/ti-sysc.c +++ b/drivers/bus/ti-sysc.c @@ -70,8 +70,8 @@ static const char * const clock_names[SYSC_MAX_CLOCKS] = { * @child_needs_resume: runtime resume needed for child on resume from suspend * @disable_on_idle: status flag used for disabling modules with resets * @idle_work: work structure used to perform delayed idle on a module - * @clk_enable_quirk: module specific clock enable quirk - * @clk_disable_quirk: module specific clock disable quirk + * @pre_reset_quirk: module specific pre-reset quirk + * @post_reset_quirk: module specific post-reset quirk * @reset_done_quirk: module specific reset done quirk * @module_enable_quirk: module specific enable quirk * @module_disable_quirk: module specific disable quirk @@ -97,8 +97,8 @@ struct sysc { unsigned int needs_resume:1; unsigned int child_needs_resume:1; struct delayed_work idle_work; - void (*clk_enable_quirk)(struct sysc *sysc); - void (*clk_disable_quirk)(struct sysc *sysc); + void (*pre_reset_quirk)(struct sysc *sysc); + void (*post_reset_quirk)(struct sysc *sysc); void (*reset_done_quirk)(struct sysc *sysc); void (*module_enable_quirk)(struct sysc *sysc); void (*module_disable_quirk)(struct sysc *sysc); @@ -1418,7 +1418,7 @@ static void sysc_module_enable_quirk_aess(struct sysc *ddata) sysc_write(ddata, offset, 1); } -/* I2C needs extra enable bit toggling for reset */ +/* I2C needs to be disabled for reset */ static void sysc_clk_quirk_i2c(struct sysc *ddata, bool enable) { int offset; @@ -1439,16 +1439,16 @@ static void sysc_clk_quirk_i2c(struct sysc *ddata, bool enable) sysc_write(ddata, offset, val); } -static void sysc_clk_enable_quirk_i2c(struct sysc *ddata) -{ - sysc_clk_quirk_i2c(ddata, true); -} - -static void sysc_clk_disable_quirk_i2c(struct sysc *ddata) +static void sysc_pre_reset_quirk_i2c(struct sysc *ddata) { sysc_clk_quirk_i2c(ddata, false); } +static void sysc_post_reset_quirk_i2c(struct sysc *ddata) +{ + sysc_clk_quirk_i2c(ddata, true); +} + /* 36xx SGX needs a quirk for to bypass OCP IPG interrupt logic */ static void sysc_module_enable_quirk_sgx(struct sysc *ddata) { @@ -1488,14 +1488,14 @@ static void sysc_init_module_quirks(struct sysc *ddata) return; if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_HDQ1W) { - ddata->clk_disable_quirk = sysc_pre_reset_quirk_hdq1w; + ddata->pre_reset_quirk = sysc_pre_reset_quirk_hdq1w; return; } if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_I2C) { - ddata->clk_enable_quirk = sysc_clk_enable_quirk_i2c; - ddata->clk_disable_quirk = sysc_clk_disable_quirk_i2c; + ddata->pre_reset_quirk = sysc_pre_reset_quirk_i2c; + ddata->post_reset_quirk = sysc_post_reset_quirk_i2c; return; } @@ -1583,8 +1583,8 @@ static int sysc_reset(struct sysc *ddata) else syss_done = ddata->cfg.syss_mask; - if (ddata->clk_disable_quirk) - ddata->clk_disable_quirk(ddata); + if (ddata->pre_reset_quirk) + ddata->pre_reset_quirk(ddata); sysc_val = sysc_read_sysconfig(ddata); sysc_val |= sysc_mask; @@ -1594,8 +1594,8 @@ static int sysc_reset(struct sysc *ddata) usleep_range(ddata->cfg.srst_udelay, ddata->cfg.srst_udelay * 2); - if (ddata->clk_enable_quirk) - ddata->clk_enable_quirk(ddata); + if (ddata->post_reset_quirk) + ddata->post_reset_quirk(ddata); /* Poll on reset status */ if (syss_offset >= 0) { From ab4d309d8708035bd323b2e2446eb68cda5e61e5 Mon Sep 17 00:00:00 2001 From: Tony Lindgren Date: Mon, 24 Feb 2020 12:58:03 -0800 Subject: [PATCH 28/70] bus: ti-sysc: Improve reset to work with modules with no sysconfig At least display susbsystem (DSS) has modules with no sysconfig registers and rely on custom function for module reset handling. Let's make reset work with that too. Signed-off-by: Tony Lindgren --- drivers/bus/ti-sysc.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c index 31dae22890d5..f32ba6ec2ba1 100644 --- a/drivers/bus/ti-sysc.c +++ b/drivers/bus/ti-sysc.c @@ -1571,7 +1571,7 @@ static int sysc_reset(struct sysc *ddata) sysc_offset = ddata->offsets[SYSC_SYSCONFIG]; syss_offset = ddata->offsets[SYSC_SYSSTATUS]; - if (ddata->legacy_mode || sysc_offset < 0 || + if (ddata->legacy_mode || ddata->cap->regbits->srst_shift < 0 || ddata->cfg.quirks & SYSC_QUIRK_NO_RESET_ON_INIT) return 0; @@ -1586,9 +1586,11 @@ static int sysc_reset(struct sysc *ddata) if (ddata->pre_reset_quirk) ddata->pre_reset_quirk(ddata); - sysc_val = sysc_read_sysconfig(ddata); - sysc_val |= sysc_mask; - sysc_write(ddata, sysc_offset, sysc_val); + if (sysc_offset >= 0) { + sysc_val = sysc_read_sysconfig(ddata); + sysc_val |= sysc_mask; + sysc_write(ddata, sysc_offset, sysc_val); + } if (ddata->cfg.srst_udelay) usleep_range(ddata->cfg.srst_udelay, From 590e15c76f1231329d1543570a54058dba2e4ff6 Mon Sep 17 00:00:00 2001 From: Tony Lindgren Date: Mon, 24 Feb 2020 12:58:03 -0800 Subject: [PATCH 29/70] bus: ti-sysc: Consider non-existing registers too when matching quirks We are currently setting -1 for non-existing sysconfig related registers for quirks, but setting -ENODEV elsewhere. And for matching the quirks, we're now just ignoring the non-existing registers. This will cause issues with misdetecting DSS registers as the hardware revision numbers can have duplicates. To avoid this, let's standardize on using -ENODEV also for the quirks instead of -1. That way we can always just test for a match without adding any more complicated logic. Signed-off-by: Tony Lindgren --- drivers/bus/ti-sysc.c | 120 ++++++++++++++++++++---------------------- 1 file changed, 57 insertions(+), 63 deletions(-) diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c index f32ba6ec2ba1..c0298612e57c 100644 --- a/drivers/bus/ti-sysc.c +++ b/drivers/bus/ti-sysc.c @@ -1216,16 +1216,16 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = { SYSC_QUIRK_LEGACY_IDLE | SYSC_QUIRK_OPT_CLKS_IN_RESET), SYSC_QUIRK("sham", 0, 0x100, 0x110, 0x114, 0x40000c03, 0xffffffff, SYSC_QUIRK_LEGACY_IDLE), - SYSC_QUIRK("smartreflex", 0, -1, 0x24, -1, 0x00000000, 0xffffffff, + SYSC_QUIRK("smartreflex", 0, -ENODEV, 0x24, -ENODEV, 0x00000000, 0xffffffff, SYSC_QUIRK_LEGACY_IDLE), - SYSC_QUIRK("smartreflex", 0, -1, 0x38, -1, 0x00000000, 0xffffffff, + SYSC_QUIRK("smartreflex", 0, -ENODEV, 0x38, -ENODEV, 0x00000000, 0xffffffff, SYSC_QUIRK_LEGACY_IDLE), SYSC_QUIRK("timer", 0, 0, 0x10, 0x14, 0x00000015, 0xffffffff, 0), /* Some timers on omap4 and later */ - SYSC_QUIRK("timer", 0, 0, 0x10, -1, 0x50002100, 0xffffffff, + SYSC_QUIRK("timer", 0, 0, 0x10, -ENODEV, 0x50002100, 0xffffffff, 0), - SYSC_QUIRK("timer", 0, 0, 0x10, -1, 0x4fff1301, 0xffff00ff, + SYSC_QUIRK("timer", 0, 0, 0x10, -ENODEV, 0x4fff1301, 0xffff00ff, 0), SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x00000046, 0xffffffff, SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_LEGACY_IDLE), @@ -1238,18 +1238,18 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = { SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE), /* Quirks that need to be set based on the module address */ - SYSC_QUIRK("mcpdm", 0x40132000, 0, 0x10, -1, 0x50000800, 0xffffffff, + SYSC_QUIRK("mcpdm", 0x40132000, 0, 0x10, -ENODEV, 0x50000800, 0xffffffff, SYSC_QUIRK_EXT_OPT_CLOCK | SYSC_QUIRK_NO_RESET_ON_INIT | SYSC_QUIRK_SWSUP_SIDLE), /* Quirks that need to be set based on detected module */ - SYSC_QUIRK("aess", 0, 0, 0x10, -1, 0x40000000, 0xffffffff, + SYSC_QUIRK("aess", 0, 0, 0x10, -ENODEV, 0x40000000, 0xffffffff, SYSC_MODULE_QUIRK_AESS), - SYSC_QUIRK("dcan", 0x48480000, 0x20, -1, -1, 0xa3170504, 0xffffffff, + SYSC_QUIRK("dcan", 0x48480000, 0x20, -ENODEV, -ENODEV, 0xa3170504, 0xffffffff, SYSC_QUIRK_CLKDM_NOAUTO), - SYSC_QUIRK("dwc3", 0x48880000, 0, 0x10, -1, 0x500a0200, 0xffffffff, + SYSC_QUIRK("dwc3", 0x48880000, 0, 0x10, -ENODEV, 0x500a0200, 0xffffffff, SYSC_QUIRK_CLKDM_NOAUTO), - SYSC_QUIRK("dwc3", 0x488c0000, 0, 0x10, -1, 0x500a0200, 0xffffffff, + SYSC_QUIRK("dwc3", 0x488c0000, 0, 0x10, -ENODEV, 0x500a0200, 0xffffffff, SYSC_QUIRK_CLKDM_NOAUTO), SYSC_QUIRK("hdq1w", 0, 0, 0x14, 0x18, 0x00000006, 0xffffffff, SYSC_MODULE_QUIRK_HDQ1W), @@ -1263,12 +1263,12 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = { SYSC_MODULE_QUIRK_I2C), SYSC_QUIRK("i2c", 0, 0, 0x10, 0x90, 0x5040000a, 0xfffff0f0, SYSC_MODULE_QUIRK_I2C), - SYSC_QUIRK("gpu", 0x50000000, 0x14, -1, -1, 0x00010201, 0xffffffff, 0), - SYSC_QUIRK("gpu", 0x50000000, 0xfe00, 0xfe10, -1, 0x40000000 , 0xffffffff, + SYSC_QUIRK("gpu", 0x50000000, 0x14, -ENODEV, -ENODEV, 0x00010201, 0xffffffff, 0), + SYSC_QUIRK("gpu", 0x50000000, 0xfe00, 0xfe10, -ENODEV, 0x40000000 , 0xffffffff, SYSC_MODULE_QUIRK_SGX), SYSC_QUIRK("usb_otg_hs", 0, 0x400, 0x404, 0x408, 0x00000050, 0xffffffff, SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY), - SYSC_QUIRK("usb_otg_hs", 0, 0, 0x10, -1, 0x4ea2080d, 0xffffffff, + SYSC_QUIRK("usb_otg_hs", 0, 0, 0x10, -ENODEV, 0x4ea2080d, 0xffffffff, SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY), SYSC_QUIRK("wdt", 0, 0, 0x10, 0x14, 0x502a0500, 0xfffff0f0, SYSC_MODULE_QUIRK_WDT), @@ -1277,57 +1277,57 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = { SYSC_MODULE_QUIRK_WDT | SYSC_QUIRK_SWSUP_SIDLE), #ifdef DEBUG - SYSC_QUIRK("adc", 0, 0, 0x10, -1, 0x47300001, 0xffffffff, 0), - SYSC_QUIRK("atl", 0, 0, -1, -1, 0x0a070100, 0xffffffff, 0), - SYSC_QUIRK("cm", 0, 0, -1, -1, 0x40000301, 0xffffffff, 0), - SYSC_QUIRK("control", 0, 0, 0x10, -1, 0x40000900, 0xffffffff, 0), + SYSC_QUIRK("adc", 0, 0, 0x10, -ENODEV, 0x47300001, 0xffffffff, 0), + SYSC_QUIRK("atl", 0, 0, -ENODEV, -ENODEV, 0x0a070100, 0xffffffff, 0), + SYSC_QUIRK("cm", 0, 0, -ENODEV, -ENODEV, 0x40000301, 0xffffffff, 0), + SYSC_QUIRK("control", 0, 0, 0x10, -ENODEV, 0x40000900, 0xffffffff, 0), SYSC_QUIRK("cpgmac", 0, 0x1200, 0x1208, 0x1204, 0x4edb1902, 0xffff00f0, 0), - SYSC_QUIRK("dcan", 0, 0x20, -1, -1, 0xa3170504, 0xffffffff, 0), - SYSC_QUIRK("dcan", 0, 0x20, -1, -1, 0x4edb1902, 0xffffffff, 0), - SYSC_QUIRK("dmic", 0, 0, 0x10, -1, 0x50010000, 0xffffffff, 0), - SYSC_QUIRK("dwc3", 0, 0, 0x10, -1, 0x500a0200, 0xffffffff, 0), + SYSC_QUIRK("dcan", 0, 0x20, -ENODEV, -ENODEV, 0xa3170504, 0xffffffff, 0), + SYSC_QUIRK("dcan", 0, 0x20, -ENODEV, -ENODEV, 0x4edb1902, 0xffffffff, 0), + SYSC_QUIRK("dmic", 0, 0, 0x10, -ENODEV, 0x50010000, 0xffffffff, 0), + SYSC_QUIRK("dwc3", 0, 0, 0x10, -ENODEV, 0x500a0200, 0xffffffff, 0), SYSC_QUIRK("d2d", 0x4a0b6000, 0, 0x10, 0x14, 0x00000010, 0xffffffff, 0), SYSC_QUIRK("d2d", 0x4a0cd000, 0, 0x10, 0x14, 0x00000010, 0xffffffff, 0), - SYSC_QUIRK("epwmss", 0, 0, 0x4, -1, 0x47400001, 0xffffffff, 0), - SYSC_QUIRK("gpu", 0, 0x1fc00, 0x1fc10, -1, 0, 0, 0), - SYSC_QUIRK("gpu", 0, 0xfe00, 0xfe10, -1, 0x40000000 , 0xffffffff, 0), + SYSC_QUIRK("epwmss", 0, 0, 0x4, -ENODEV, 0x47400001, 0xffffffff, 0), + SYSC_QUIRK("gpu", 0, 0x1fc00, 0x1fc10, -ENODEV, 0, 0, 0), + SYSC_QUIRK("gpu", 0, 0xfe00, 0xfe10, -ENODEV, 0x40000000 , 0xffffffff, 0), SYSC_QUIRK("hsi", 0, 0, 0x10, 0x14, 0x50043101, 0xffffffff, 0), - SYSC_QUIRK("iss", 0, 0, 0x10, -1, 0x40000101, 0xffffffff, 0), - SYSC_QUIRK("lcdc", 0, 0, 0x54, -1, 0x4f201000, 0xffffffff, 0), - SYSC_QUIRK("mcasp", 0, 0, 0x4, -1, 0x44306302, 0xffffffff, 0), - SYSC_QUIRK("mcasp", 0, 0, 0x4, -1, 0x44307b02, 0xffffffff, 0), - SYSC_QUIRK("mcbsp", 0, -1, 0x8c, -1, 0, 0, 0), - SYSC_QUIRK("mcspi", 0, 0, 0x10, -1, 0x40300a0b, 0xffff00ff, 0), + SYSC_QUIRK("iss", 0, 0, 0x10, -ENODEV, 0x40000101, 0xffffffff, 0), + SYSC_QUIRK("lcdc", 0, 0, 0x54, -ENODEV, 0x4f201000, 0xffffffff, 0), + SYSC_QUIRK("mcasp", 0, 0, 0x4, -ENODEV, 0x44306302, 0xffffffff, 0), + SYSC_QUIRK("mcasp", 0, 0, 0x4, -ENODEV, 0x44307b02, 0xffffffff, 0), + SYSC_QUIRK("mcbsp", 0, -ENODEV, 0x8c, -ENODEV, 0, 0, 0), + SYSC_QUIRK("mcspi", 0, 0, 0x10, -ENODEV, 0x40300a0b, 0xffff00ff, 0), SYSC_QUIRK("mcspi", 0, 0, 0x110, 0x114, 0x40300a0b, 0xffffffff, 0), - SYSC_QUIRK("mailbox", 0, 0, 0x10, -1, 0x00000400, 0xffffffff, 0), - SYSC_QUIRK("m3", 0, 0, -1, -1, 0x5f580105, 0x0fff0f00, 0), + SYSC_QUIRK("mailbox", 0, 0, 0x10, -ENODEV, 0x00000400, 0xffffffff, 0), + SYSC_QUIRK("m3", 0, 0, -ENODEV, -ENODEV, 0x5f580105, 0x0fff0f00, 0), SYSC_QUIRK("ocp2scp", 0, 0, 0x10, 0x14, 0x50060005, 0xfffffff0, 0), - SYSC_QUIRK("ocp2scp", 0, 0, -1, -1, 0x50060007, 0xffffffff, 0), - SYSC_QUIRK("padconf", 0, 0, 0x10, -1, 0x4fff0800, 0xffffffff, 0), - SYSC_QUIRK("padconf", 0, 0, -1, -1, 0x40001100, 0xffffffff, 0), - SYSC_QUIRK("prcm", 0, 0, -1, -1, 0x40000100, 0xffffffff, 0), - SYSC_QUIRK("prcm", 0, 0, -1, -1, 0x00004102, 0xffffffff, 0), - SYSC_QUIRK("prcm", 0, 0, -1, -1, 0x40000400, 0xffffffff, 0), - SYSC_QUIRK("scm", 0, 0, 0x10, -1, 0x40000900, 0xffffffff, 0), - SYSC_QUIRK("scm", 0, 0, -1, -1, 0x4e8b0100, 0xffffffff, 0), - SYSC_QUIRK("scm", 0, 0, -1, -1, 0x4f000100, 0xffffffff, 0), - SYSC_QUIRK("scm", 0, 0, -1, -1, 0x40000900, 0xffffffff, 0), - SYSC_QUIRK("scrm", 0, 0, -1, -1, 0x00000010, 0xffffffff, 0), - SYSC_QUIRK("sdio", 0, 0, 0x10, -1, 0x40202301, 0xffff0ff0, 0), + SYSC_QUIRK("ocp2scp", 0, 0, -ENODEV, -ENODEV, 0x50060007, 0xffffffff, 0), + SYSC_QUIRK("padconf", 0, 0, 0x10, -ENODEV, 0x4fff0800, 0xffffffff, 0), + SYSC_QUIRK("padconf", 0, 0, -ENODEV, -ENODEV, 0x40001100, 0xffffffff, 0), + SYSC_QUIRK("prcm", 0, 0, -ENODEV, -ENODEV, 0x40000100, 0xffffffff, 0), + SYSC_QUIRK("prcm", 0, 0, -ENODEV, -ENODEV, 0x00004102, 0xffffffff, 0), + SYSC_QUIRK("prcm", 0, 0, -ENODEV, -ENODEV, 0x40000400, 0xffffffff, 0), + SYSC_QUIRK("scm", 0, 0, 0x10, -ENODEV, 0x40000900, 0xffffffff, 0), + SYSC_QUIRK("scm", 0, 0, -ENODEV, -ENODEV, 0x4e8b0100, 0xffffffff, 0), + SYSC_QUIRK("scm", 0, 0, -ENODEV, -ENODEV, 0x4f000100, 0xffffffff, 0), + SYSC_QUIRK("scm", 0, 0, -ENODEV, -ENODEV, 0x40000900, 0xffffffff, 0), + SYSC_QUIRK("scrm", 0, 0, -ENODEV, -ENODEV, 0x00000010, 0xffffffff, 0), + SYSC_QUIRK("sdio", 0, 0, 0x10, -ENODEV, 0x40202301, 0xffff0ff0, 0), SYSC_QUIRK("sdio", 0, 0x2fc, 0x110, 0x114, 0x31010000, 0xffffffff, 0), SYSC_QUIRK("sdma", 0, 0, 0x2c, 0x28, 0x00010900, 0xffffffff, 0), - SYSC_QUIRK("slimbus", 0, 0, 0x10, -1, 0x40000902, 0xffffffff, 0), - SYSC_QUIRK("slimbus", 0, 0, 0x10, -1, 0x40002903, 0xffffffff, 0), - SYSC_QUIRK("spinlock", 0, 0, 0x10, -1, 0x50020000, 0xffffffff, 0), - SYSC_QUIRK("rng", 0, 0x1fe0, 0x1fe4, -1, 0x00000020, 0xffffffff, 0), - SYSC_QUIRK("rtc", 0, 0x74, 0x78, -1, 0x4eb01908, 0xffff00f0, 0), - SYSC_QUIRK("timer32k", 0, 0, 0x4, -1, 0x00000060, 0xffffffff, 0), + SYSC_QUIRK("slimbus", 0, 0, 0x10, -ENODEV, 0x40000902, 0xffffffff, 0), + SYSC_QUIRK("slimbus", 0, 0, 0x10, -ENODEV, 0x40002903, 0xffffffff, 0), + SYSC_QUIRK("spinlock", 0, 0, 0x10, -ENODEV, 0x50020000, 0xffffffff, 0), + SYSC_QUIRK("rng", 0, 0x1fe0, 0x1fe4, -ENODEV, 0x00000020, 0xffffffff, 0), + SYSC_QUIRK("rtc", 0, 0x74, 0x78, -ENODEV, 0x4eb01908, 0xffff00f0, 0), + SYSC_QUIRK("timer32k", 0, 0, 0x4, -ENODEV, 0x00000060, 0xffffffff, 0), SYSC_QUIRK("usbhstll", 0, 0, 0x10, 0x14, 0x00000004, 0xffffffff, 0), SYSC_QUIRK("usbhstll", 0, 0, 0x10, 0x14, 0x00000008, 0xffffffff, 0), SYSC_QUIRK("usb_host_hs", 0, 0, 0x10, 0x14, 0x50700100, 0xffffffff, 0), - SYSC_QUIRK("usb_host_hs", 0, 0, 0x10, -1, 0x50700101, 0xffffffff, 0), - SYSC_QUIRK("vfpe", 0, 0, 0x104, -1, 0x4d001200, 0xffffffff, 0), + SYSC_QUIRK("usb_host_hs", 0, 0, 0x10, -ENODEV, 0x50700101, 0xffffffff, 0), + SYSC_QUIRK("vfpe", 0, 0, 0x104, -ENODEV, 0x4d001200, 0xffffffff, 0), #endif }; @@ -1349,16 +1349,13 @@ static void sysc_init_early_quirks(struct sysc *ddata) if (q->base != ddata->module_pa) continue; - if (q->rev_offset >= 0 && - q->rev_offset != ddata->offsets[SYSC_REVISION]) + if (q->rev_offset != ddata->offsets[SYSC_REVISION]) continue; - if (q->sysc_offset >= 0 && - q->sysc_offset != ddata->offsets[SYSC_SYSCONFIG]) + if (q->sysc_offset != ddata->offsets[SYSC_SYSCONFIG]) continue; - if (q->syss_offset >= 0 && - q->syss_offset != ddata->offsets[SYSC_SYSSTATUS]) + if (q->syss_offset != ddata->offsets[SYSC_SYSSTATUS]) continue; ddata->name = q->name; @@ -1378,16 +1375,13 @@ static void sysc_init_revision_quirks(struct sysc *ddata) if (q->base && q->base != ddata->module_pa) continue; - if (q->rev_offset >= 0 && - q->rev_offset != ddata->offsets[SYSC_REVISION]) + if (q->rev_offset != ddata->offsets[SYSC_REVISION]) continue; - if (q->sysc_offset >= 0 && - q->sysc_offset != ddata->offsets[SYSC_SYSCONFIG]) + if (q->sysc_offset != ddata->offsets[SYSC_SYSCONFIG]) continue; - if (q->syss_offset >= 0 && - q->syss_offset != ddata->offsets[SYSC_SYSSTATUS]) + if (q->syss_offset != ddata->offsets[SYSC_SYSSTATUS]) continue; if (q->revision == ddata->revision || From 7320fd322fe9b1485372e6da294590c5bcabbe1c Mon Sep 17 00:00:00 2001 From: Tony Lindgren Date: Mon, 24 Feb 2020 12:58:03 -0800 Subject: [PATCH 30/70] bus: ti-sysc: Don't warn about legacy property for nested ti-sysc devices In some cases we can have nested ti-sysc instances that may still use the legacy "ti,hwmods" property. Let's not warn if that's the case. Signed-off-by: Tony Lindgren --- drivers/bus/ti-sysc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c index c0298612e57c..4e87eb5e8ed7 100644 --- a/drivers/bus/ti-sysc.c +++ b/drivers/bus/ti-sysc.c @@ -624,7 +624,7 @@ static void sysc_check_one_child(struct sysc *ddata, const char *name; name = of_get_property(np, "ti,hwmods", NULL); - if (name) + if (name && !of_device_is_compatible(np, "ti,sysc")) dev_warn(ddata->dev, "really a child ti,hwmods property?"); sysc_check_quirk_stdout(ddata, np); From feaa8baee82ababa46af95b03cfc28680ad647a6 Mon Sep 17 00:00:00 2001 From: Tony Lindgren Date: Mon, 24 Feb 2020 12:58:03 -0800 Subject: [PATCH 31/70] bus: ti-sysc: Implement SoC revision handling MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We need to know SoC type and features for cases where the same SoC may be installed in various versions on the same board and would need a separate dts file otherwise for the different variants. For example, am3703 is pin compatible with omap3630, but has sgx and iva accelerators disabled. We must not try to access the sgx or iva module registers on am3703, and need to set the unavailable devices disabled early. Let's also detect omap3430 as that is needed for display subsystem (DSS) reset later on, and GP vs EMU or HS devices. Further SoC specific disabled device detection can be added as needed, such as dra71x vs dra76x rtc and usb4. Cc: Adam Ford Cc: André Hentschel Cc: H. Nikolaus Schaller Cc: Keerthy Signed-off-by: Tony Lindgren --- arch/arm/mach-omap2/pdata-quirks.c | 6 + drivers/bus/ti-sysc.c | 194 +++++++++++++++++++++++++- include/linux/platform_data/ti-sysc.h | 1 + 3 files changed, 200 insertions(+), 1 deletion(-) diff --git a/arch/arm/mach-omap2/pdata-quirks.c b/arch/arm/mach-omap2/pdata-quirks.c index dbb7c2acef31..2a4fe3e68b82 100644 --- a/arch/arm/mach-omap2/pdata-quirks.c +++ b/arch/arm/mach-omap2/pdata-quirks.c @@ -397,10 +397,16 @@ static int ti_sysc_shutdown_module(struct device *dev, return omap_hwmod_shutdown(cookie->data); } +static bool ti_sysc_soc_type_gp(void) +{ + return omap_type() == OMAP2_DEVICE_TYPE_GP; +} + static struct of_dev_auxdata omap_auxdata_lookup[]; static struct ti_sysc_platform_data ti_sysc_pdata = { .auxdata = omap_auxdata_lookup, + .soc_type_gp = ti_sysc_soc_type_gp, .init_clockdomain = ti_sysc_clkdm_init, .clkdm_deny_idle = ti_sysc_clkdm_deny_idle, .clkdm_allow_idle = ti_sysc_clkdm_allow_idle, diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c index 4e87eb5e8ed7..4c377c576582 100644 --- a/drivers/bus/ti-sysc.c +++ b/drivers/bus/ti-sysc.c @@ -7,6 +7,7 @@ #include #include #include +#include #include #include #include @@ -15,15 +16,47 @@ #include #include #include +#include #include #include #include +#define DIS_ISP BIT(2) +#define DIS_IVA BIT(1) +#define DIS_SGX BIT(0) + +#define SOC_FLAG(match, flag) { .machine = match, .data = (void *)(flag), } + #define MAX_MODULE_SOFTRESET_WAIT 10000 -static const char * const reg_names[] = { "rev", "sysc", "syss", }; +enum sysc_soc { + SOC_UNKNOWN, + SOC_2420, + SOC_2430, + SOC_3430, + SOC_3630, + SOC_4430, + SOC_4460, + SOC_4470, + SOC_5430, + SOC_AM3, + SOC_AM4, + SOC_DRA7, +}; + +struct sysc_address { + unsigned long base; + struct list_head node; +}; + +struct sysc_soc_info { + unsigned long general_purpose:1; + enum sysc_soc soc; + struct mutex list_lock; /* disabled modules list lock */ + struct list_head disabled_modules; +}; enum sysc_clocks { SYSC_FCK, @@ -39,6 +72,8 @@ enum sysc_clocks { SYSC_MAX_CLOCKS, }; +static struct sysc_soc_info *sysc_soc; +static const char * const reg_names[] = { "rev", "sysc", "syss", }; static const char * const clock_names[SYSC_MAX_CLOCKS] = { "fck", "ick", "opt0", "opt1", "opt2", "opt3", "opt4", "opt5", "opt6", "opt7", @@ -2382,6 +2417,154 @@ static void ti_sysc_idle(struct work_struct *work) pm_runtime_put_sync(ddata->dev); } +/* + * SoC model and features detection. Only needed for SoCs that need + * special handling for quirks, no need to list others. + */ +static const struct soc_device_attribute sysc_soc_match[] = { + SOC_FLAG("OMAP242*", SOC_2420), + SOC_FLAG("OMAP243*", SOC_2430), + SOC_FLAG("OMAP3[45]*", SOC_3430), + SOC_FLAG("OMAP3[67]*", SOC_3630), + SOC_FLAG("OMAP443*", SOC_4430), + SOC_FLAG("OMAP446*", SOC_4460), + SOC_FLAG("OMAP447*", SOC_4470), + SOC_FLAG("OMAP54*", SOC_5430), + SOC_FLAG("AM433", SOC_AM3), + SOC_FLAG("AM43*", SOC_AM4), + SOC_FLAG("DRA7*", SOC_DRA7), + + { /* sentinel */ }, +}; + +/* + * List of SoCs variants with disabled features. By default we assume all + * devices in the device tree are available so no need to list those SoCs. + */ +static const struct soc_device_attribute sysc_soc_feat_match[] = { + /* OMAP3430/3530 and AM3517 variants with some accelerators disabled */ + SOC_FLAG("AM3505", DIS_SGX), + SOC_FLAG("OMAP3525", DIS_SGX), + SOC_FLAG("OMAP3515", DIS_IVA | DIS_SGX), + SOC_FLAG("OMAP3503", DIS_ISP | DIS_IVA | DIS_SGX), + + /* OMAP3630/DM3730 variants with some accelerators disabled */ + SOC_FLAG("AM3703", DIS_IVA | DIS_SGX), + SOC_FLAG("DM3725", DIS_SGX), + SOC_FLAG("OMAP3611", DIS_ISP | DIS_IVA | DIS_SGX), + SOC_FLAG("OMAP3615/AM3715", DIS_IVA), + SOC_FLAG("OMAP3621", DIS_ISP), + + { /* sentinel */ }, +}; + +static int sysc_add_disabled(unsigned long base) +{ + struct sysc_address *disabled_module; + + disabled_module = kzalloc(sizeof(*disabled_module), GFP_KERNEL); + if (!disabled_module) + return -ENOMEM; + + disabled_module->base = base; + + mutex_lock(&sysc_soc->list_lock); + list_add(&disabled_module->node, &sysc_soc->disabled_modules); + mutex_unlock(&sysc_soc->list_lock); + + return 0; +} + +/* + * One time init to detect the booted SoC and disable unavailable features. + * Note that we initialize static data shared across all ti-sysc instances + * so ddata is only used for SoC type. This can be called from module_init + * once we no longer need to rely on platform data. + */ +static int sysc_init_soc(struct sysc *ddata) +{ + const struct soc_device_attribute *match; + struct ti_sysc_platform_data *pdata; + unsigned long features = 0; + + if (sysc_soc) + return 0; + + sysc_soc = kzalloc(sizeof(*sysc_soc), GFP_KERNEL); + if (!sysc_soc) + return -ENOMEM; + + mutex_init(&sysc_soc->list_lock); + INIT_LIST_HEAD(&sysc_soc->disabled_modules); + sysc_soc->general_purpose = true; + + pdata = dev_get_platdata(ddata->dev); + if (pdata && pdata->soc_type_gp) + sysc_soc->general_purpose = pdata->soc_type_gp(); + + match = soc_device_match(sysc_soc_match); + if (match && match->data) + sysc_soc->soc = (int)match->data; + + match = soc_device_match(sysc_soc_feat_match); + if (!match) + return 0; + + if (match->data) + features = (unsigned long)match->data; + + /* + * Add disabled devices to the list based on the module base. + * Note that this must be done before we attempt to access the + * device and have module revision checks working. + */ + if (features & DIS_ISP) + sysc_add_disabled(0x480bd400); + if (features & DIS_IVA) + sysc_add_disabled(0x5d000000); + if (features & DIS_SGX) + sysc_add_disabled(0x50000000); + + return 0; +} + +static void sysc_cleanup_soc(void) +{ + struct sysc_address *disabled_module; + struct list_head *pos, *tmp; + + if (!sysc_soc) + return; + + mutex_lock(&sysc_soc->list_lock); + list_for_each_safe(pos, tmp, &sysc_soc->disabled_modules) { + disabled_module = list_entry(pos, struct sysc_address, node); + list_del(pos); + kfree(disabled_module); + } + mutex_unlock(&sysc_soc->list_lock); +} + +static int sysc_check_disabled_devices(struct sysc *ddata) +{ + struct sysc_address *disabled_module; + struct list_head *pos; + int error = 0; + + mutex_lock(&sysc_soc->list_lock); + list_for_each(pos, &sysc_soc->disabled_modules) { + disabled_module = list_entry(pos, struct sysc_address, node); + if (ddata->module_pa == disabled_module->base) { + dev_dbg(ddata->dev, "module disabled for this SoC\n"); + error = -ENODEV; + break; + } + } + mutex_unlock(&sysc_soc->list_lock); + + return error; +} + static const struct of_device_id sysc_match_table[] = { { .compatible = "simple-bus", }, { /* sentinel */ }, @@ -2400,6 +2583,10 @@ static int sysc_probe(struct platform_device *pdev) ddata->dev = &pdev->dev; platform_set_drvdata(pdev, ddata); + error = sysc_init_soc(ddata); + if (error) + return error; + error = sysc_init_match(ddata); if (error) return error; @@ -2430,6 +2617,10 @@ static int sysc_probe(struct platform_device *pdev) sysc_init_early_quirks(ddata); + error = sysc_check_disabled_devices(ddata); + if (error) + return error; + error = sysc_get_clocks(ddata); if (error) return error; @@ -2560,6 +2751,7 @@ static void __exit sysc_exit(void) { bus_unregister_notifier(&platform_bus_type, &sysc_nb); platform_driver_unregister(&sysc_driver); + sysc_cleanup_soc(); } module_exit(sysc_exit); diff --git a/include/linux/platform_data/ti-sysc.h b/include/linux/platform_data/ti-sysc.h index 2cbde6542849..accab5325cf3 100644 --- a/include/linux/platform_data/ti-sysc.h +++ b/include/linux/platform_data/ti-sysc.h @@ -141,6 +141,7 @@ struct clk; struct ti_sysc_platform_data { struct of_dev_auxdata *auxdata; + bool (*soc_type_gp)(void); int (*init_clockdomain)(struct device *dev, struct clk *fck, struct clk *ick, struct ti_sysc_cookie *cookie); void (*clkdm_deny_idle)(struct device *dev, From e8639e1c986a8a9d0f94549170f6db579376c3ae Mon Sep 17 00:00:00 2001 From: Tony Lindgren Date: Mon, 24 Feb 2020 12:58:03 -0800 Subject: [PATCH 32/70] bus: ti-sysc: Handle module unlock quirk needed for some RTC The RTC modules on am3 and am4 need quirk handling to unlock and lock them for reset so let's add the quirk handling based on what we already have for legacy platform data. In later patches we will simply drop the RTC related platform data and the old quirk handling. Signed-off-by: Tony Lindgren --- drivers/bus/ti-sysc.c | 74 ++++++++++++++++++++++++--- include/linux/platform_data/ti-sysc.h | 1 + 2 files changed, 69 insertions(+), 6 deletions(-) diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c index 4c377c576582..6caa2222091e 100644 --- a/drivers/bus/ti-sysc.c +++ b/drivers/bus/ti-sysc.c @@ -110,6 +110,8 @@ static const char * const clock_names[SYSC_MAX_CLOCKS] = { * @reset_done_quirk: module specific reset done quirk * @module_enable_quirk: module specific enable quirk * @module_disable_quirk: module specific disable quirk + * @module_unlock_quirk: module specific sysconfig unlock quirk + * @module_lock_quirk: module specific sysconfig lock quirk */ struct sysc { struct device *dev; @@ -137,6 +139,8 @@ struct sysc { void (*reset_done_quirk)(struct sysc *sysc); void (*module_enable_quirk)(struct sysc *sysc); void (*module_disable_quirk)(struct sysc *sysc); + void (*module_unlock_quirk)(struct sysc *sysc); + void (*module_lock_quirk)(struct sysc *sysc); }; static void sysc_parse_dts_quirks(struct sysc *ddata, struct device_node *np, @@ -896,6 +900,22 @@ static void sysc_show_registers(struct sysc *ddata) buf); } +/** + * sysc_write_sysconfig - handle sysconfig quirks for register write + * @ddata: device driver data + * @value: register value + */ +static void sysc_write_sysconfig(struct sysc *ddata, u32 value) +{ + if (ddata->module_unlock_quirk) + ddata->module_unlock_quirk(ddata); + + sysc_write(ddata, ddata->offsets[SYSC_SYSCONFIG], value); + + if (ddata->module_lock_quirk) + ddata->module_lock_quirk(ddata); +} + #define SYSC_IDLE_MASK (SYSC_NR_IDLEMODES - 1) #define SYSC_CLOCACT_ICK 2 @@ -942,7 +962,7 @@ static int sysc_enable_module(struct device *dev) reg &= ~(SYSC_IDLE_MASK << regbits->sidle_shift); reg |= best_mode << regbits->sidle_shift; - sysc_write(ddata, ddata->offsets[SYSC_SYSCONFIG], reg); + sysc_write_sysconfig(ddata, reg); set_midle: /* Set MIDLE mode */ @@ -961,14 +981,14 @@ static int sysc_enable_module(struct device *dev) reg &= ~(SYSC_IDLE_MASK << regbits->midle_shift); reg |= best_mode << regbits->midle_shift; - sysc_write(ddata, ddata->offsets[SYSC_SYSCONFIG], reg); + sysc_write_sysconfig(ddata, reg); set_autoidle: /* Autoidle bit must enabled separately if available */ if (regbits->autoidle_shift >= 0 && ddata->cfg.sysc_val & BIT(regbits->autoidle_shift)) { reg |= 1 << regbits->autoidle_shift; - sysc_write(ddata, ddata->offsets[SYSC_SYSCONFIG], reg); + sysc_write_sysconfig(ddata, reg); } if (ddata->module_enable_quirk) @@ -1026,7 +1046,7 @@ static int sysc_disable_module(struct device *dev) reg &= ~(SYSC_IDLE_MASK << regbits->midle_shift); reg |= best_mode << regbits->midle_shift; - sysc_write(ddata, ddata->offsets[SYSC_SYSCONFIG], reg); + sysc_write_sysconfig(ddata, reg); set_sidle: /* Set SIDLE mode */ @@ -1049,7 +1069,7 @@ static int sysc_disable_module(struct device *dev) if (regbits->autoidle_shift >= 0 && ddata->cfg.sysc_val & BIT(regbits->autoidle_shift)) reg |= 1 << regbits->autoidle_shift; - sysc_write(ddata, ddata->offsets[SYSC_SYSCONFIG], reg); + sysc_write_sysconfig(ddata, reg); return 0; } @@ -1301,6 +1321,8 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = { SYSC_QUIRK("gpu", 0x50000000, 0x14, -ENODEV, -ENODEV, 0x00010201, 0xffffffff, 0), SYSC_QUIRK("gpu", 0x50000000, 0xfe00, 0xfe10, -ENODEV, 0x40000000 , 0xffffffff, SYSC_MODULE_QUIRK_SGX), + SYSC_QUIRK("rtc", 0, 0x74, 0x78, -ENODEV, 0x4eb01908, 0xffff00f0, + SYSC_MODULE_QUIRK_RTC_UNLOCK), SYSC_QUIRK("usb_otg_hs", 0, 0x400, 0x404, 0x408, 0x00000050, 0xffffffff, SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY), SYSC_QUIRK("usb_otg_hs", 0, 0, 0x10, -ENODEV, 0x4ea2080d, 0xffffffff, @@ -1356,7 +1378,6 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = { SYSC_QUIRK("slimbus", 0, 0, 0x10, -ENODEV, 0x40002903, 0xffffffff, 0), SYSC_QUIRK("spinlock", 0, 0, 0x10, -ENODEV, 0x50020000, 0xffffffff, 0), SYSC_QUIRK("rng", 0, 0x1fe0, 0x1fe4, -ENODEV, 0x00000020, 0xffffffff, 0), - SYSC_QUIRK("rtc", 0, 0x74, 0x78, -ENODEV, 0x4eb01908, 0xffff00f0, 0), SYSC_QUIRK("timer32k", 0, 0, 0x4, -ENODEV, 0x00000060, 0xffffffff, 0), SYSC_QUIRK("usbhstll", 0, 0, 0x10, 0x14, 0x00000004, 0xffffffff, 0), SYSC_QUIRK("usbhstll", 0, 0, 0x10, 0x14, 0x00000008, 0xffffffff, 0), @@ -1478,6 +1499,40 @@ static void sysc_post_reset_quirk_i2c(struct sysc *ddata) sysc_clk_quirk_i2c(ddata, true); } +/* RTC on am3 and 4 needs to be unlocked and locked for sysconfig */ +static void sysc_quirk_rtc(struct sysc *ddata, bool lock) +{ + u32 val, kick0_val = 0, kick1_val = 0; + unsigned long flags; + int error; + + if (!lock) { + kick0_val = 0x83e70b13; + kick1_val = 0x95a4f1e0; + } + + local_irq_save(flags); + /* RTC_STATUS BUSY bit may stay active for 1/32768 seconds (~30 usec) */ + error = readl_poll_timeout(ddata->module_va + 0x44, val, + !(val & BIT(0)), 100, 50); + if (error) + dev_warn(ddata->dev, "rtc busy timeout\n"); + /* Now we have ~15 microseconds to read/write various registers */ + sysc_write(ddata, 0x6c, kick0_val); + sysc_write(ddata, 0x70, kick1_val); + local_irq_restore(flags); +} + +static void sysc_module_unlock_quirk_rtc(struct sysc *ddata) +{ + sysc_quirk_rtc(ddata, false); +} + +static void sysc_module_lock_quirk_rtc(struct sysc *ddata) +{ + sysc_quirk_rtc(ddata, true); +} + /* 36xx SGX needs a quirk for to bypass OCP IPG interrupt logic */ static void sysc_module_enable_quirk_sgx(struct sysc *ddata) { @@ -1532,6 +1587,13 @@ static void sysc_init_module_quirks(struct sysc *ddata) if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_AESS) ddata->module_enable_quirk = sysc_module_enable_quirk_aess; + if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_RTC_UNLOCK) { + ddata->module_unlock_quirk = sysc_module_unlock_quirk_rtc; + ddata->module_lock_quirk = sysc_module_lock_quirk_rtc; + + return; + } + if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_SGX) ddata->module_enable_quirk = sysc_module_enable_quirk_sgx; diff --git a/include/linux/platform_data/ti-sysc.h b/include/linux/platform_data/ti-sysc.h index accab5325cf3..0b33c3b7302f 100644 --- a/include/linux/platform_data/ti-sysc.h +++ b/include/linux/platform_data/ti-sysc.h @@ -49,6 +49,7 @@ struct sysc_regbits { s8 emufree_shift; }; +#define SYSC_MODULE_QUIRK_RTC_UNLOCK BIT(22) #define SYSC_QUIRK_CLKDM_NOAUTO BIT(21) #define SYSC_QUIRK_FORCE_MSTANDBY BIT(20) #define SYSC_MODULE_QUIRK_AESS BIT(19) From 77dfece2e6d8bedb6ecd4d61379ae3dc52f389bd Mon Sep 17 00:00:00 2001 From: Tony Lindgren Date: Mon, 24 Feb 2020 12:58:03 -0800 Subject: [PATCH 33/70] bus: ti-sysc: Detect display subsystem related devices In order to prepare probing display subsystem (DSS) with ti-sysc interconnect target module driver and device tree data, let's detect DSS related modules. We need to also add reset quirk handling for DSS, but until that's done, let's just enable the optional clock quirks for DSS and omap4 HDMI. The rest is just naming of modules if CONFIG_DEBUG is set. Cc: Jyri Sarha Cc: Laurent Pinchart Cc: Tomi Valkeinen Signed-off-by: Tony Lindgren --- drivers/bus/ti-sysc.c | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c index 6caa2222091e..b29f4e451dc1 100644 --- a/drivers/bus/ti-sysc.c +++ b/drivers/bus/ti-sysc.c @@ -1302,10 +1302,18 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = { SYSC_MODULE_QUIRK_AESS), SYSC_QUIRK("dcan", 0x48480000, 0x20, -ENODEV, -ENODEV, 0xa3170504, 0xffffffff, SYSC_QUIRK_CLKDM_NOAUTO), + SYSC_QUIRK("dss", 0x4832a000, 0, 0x10, 0x14, 0x00000020, 0xffffffff, + SYSC_QUIRK_OPT_CLKS_IN_RESET), + SYSC_QUIRK("dss", 0x58000000, 0, -ENODEV, 0x14, 0x00000040, 0xffffffff, + SYSC_QUIRK_OPT_CLKS_IN_RESET), + SYSC_QUIRK("dss", 0x58000000, 0, -ENODEV, 0x14, 0x00000061, 0xffffffff, + SYSC_QUIRK_OPT_CLKS_IN_RESET), SYSC_QUIRK("dwc3", 0x48880000, 0, 0x10, -ENODEV, 0x500a0200, 0xffffffff, SYSC_QUIRK_CLKDM_NOAUTO), SYSC_QUIRK("dwc3", 0x488c0000, 0, 0x10, -ENODEV, 0x500a0200, 0xffffffff, SYSC_QUIRK_CLKDM_NOAUTO), + SYSC_QUIRK("hdmi", 0, 0, 0x10, -ENODEV, 0x50030200, 0xffffffff, + SYSC_QUIRK_OPT_CLKS_NEEDED), SYSC_QUIRK("hdq1w", 0, 0, 0x14, 0x18, 0x00000006, 0xffffffff, SYSC_MODULE_QUIRK_HDQ1W), SYSC_QUIRK("hdq1w", 0, 0, 0x14, 0x18, 0x0000000a, 0xffffffff, @@ -1342,13 +1350,21 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = { 0xffff00f0, 0), SYSC_QUIRK("dcan", 0, 0x20, -ENODEV, -ENODEV, 0xa3170504, 0xffffffff, 0), SYSC_QUIRK("dcan", 0, 0x20, -ENODEV, -ENODEV, 0x4edb1902, 0xffffffff, 0), + SYSC_QUIRK("dispc", 0x4832a400, 0, 0x10, 0x14, 0x00000030, 0xffffffff, 0), + SYSC_QUIRK("dispc", 0x58001000, 0, 0x10, 0x14, 0x00000040, 0xffffffff, 0), + SYSC_QUIRK("dispc", 0x58001000, 0, 0x10, 0x14, 0x00000051, 0xffffffff, 0), SYSC_QUIRK("dmic", 0, 0, 0x10, -ENODEV, 0x50010000, 0xffffffff, 0), + SYSC_QUIRK("dsi", 0x58004000, 0, 0x10, 0x14, 0x00000030, 0xffffffff, 0), + SYSC_QUIRK("dsi", 0x58005000, 0, 0x10, 0x14, 0x00000030, 0xffffffff, 0), + SYSC_QUIRK("dsi", 0x58005000, 0, 0x10, 0x14, 0x00000040, 0xffffffff, 0), + SYSC_QUIRK("dsi", 0x58009000, 0, 0x10, 0x14, 0x00000040, 0xffffffff, 0), SYSC_QUIRK("dwc3", 0, 0, 0x10, -ENODEV, 0x500a0200, 0xffffffff, 0), SYSC_QUIRK("d2d", 0x4a0b6000, 0, 0x10, 0x14, 0x00000010, 0xffffffff, 0), SYSC_QUIRK("d2d", 0x4a0cd000, 0, 0x10, 0x14, 0x00000010, 0xffffffff, 0), SYSC_QUIRK("epwmss", 0, 0, 0x4, -ENODEV, 0x47400001, 0xffffffff, 0), SYSC_QUIRK("gpu", 0, 0x1fc00, 0x1fc10, -ENODEV, 0, 0, 0), SYSC_QUIRK("gpu", 0, 0xfe00, 0xfe10, -ENODEV, 0x40000000 , 0xffffffff, 0), + SYSC_QUIRK("hdmi", 0, 0, 0x10, -ENODEV, 0x50031d00, 0xffffffff, 0), SYSC_QUIRK("hsi", 0, 0, 0x10, 0x14, 0x50043101, 0xffffffff, 0), SYSC_QUIRK("iss", 0, 0, 0x10, -ENODEV, 0x40000101, 0xffffffff, 0), SYSC_QUIRK("lcdc", 0, 0, 0x54, -ENODEV, 0x4f201000, 0xffffffff, 0), @@ -1366,6 +1382,8 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = { SYSC_QUIRK("prcm", 0, 0, -ENODEV, -ENODEV, 0x40000100, 0xffffffff, 0), SYSC_QUIRK("prcm", 0, 0, -ENODEV, -ENODEV, 0x00004102, 0xffffffff, 0), SYSC_QUIRK("prcm", 0, 0, -ENODEV, -ENODEV, 0x40000400, 0xffffffff, 0), + SYSC_QUIRK("rfbi", 0x4832a800, 0, 0x10, 0x14, 0x00000010, 0xffffffff, 0), + SYSC_QUIRK("rfbi", 0x58002000, 0, 0x10, 0x14, 0x00000010, 0xffffffff, 0), SYSC_QUIRK("scm", 0, 0, 0x10, -ENODEV, 0x40000900, 0xffffffff, 0), SYSC_QUIRK("scm", 0, 0, -ENODEV, -ENODEV, 0x4e8b0100, 0xffffffff, 0), SYSC_QUIRK("scm", 0, 0, -ENODEV, -ENODEV, 0x4f000100, 0xffffffff, 0), @@ -1383,6 +1401,7 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = { SYSC_QUIRK("usbhstll", 0, 0, 0x10, 0x14, 0x00000008, 0xffffffff, 0), SYSC_QUIRK("usb_host_hs", 0, 0, 0x10, 0x14, 0x50700100, 0xffffffff, 0), SYSC_QUIRK("usb_host_hs", 0, 0, 0x10, -ENODEV, 0x50700101, 0xffffffff, 0), + SYSC_QUIRK("venc", 0x58003000, 0, -ENODEV, -ENODEV, 0x00000002, 0xffffffff, 0), SYSC_QUIRK("vfpe", 0, 0, 0x104, -ENODEV, 0x4d001200, 0xffffffff, 0), #endif }; From 7324a7a0d5e232551eedad69fea3e4b91973d7c6 Mon Sep 17 00:00:00 2001 From: Tony Lindgren Date: Mon, 24 Feb 2020 12:58:03 -0800 Subject: [PATCH 34/70] bus: ti-sysc: Implement display subsystem reset quirk The display subsystem (DSS) needs the child outputs disabled for reset. In order to prepare to probe DSS without legacy platform data, let's implement sysc_pre_reset_quirk_dss() similar to what we have for the platform data with omap_dss_reset(). Note that we cannot directly use the old omap_dss_reset() without platform data callbacks and updating omap_dss_reset() to understand struct device. And we will be dropping omap_dss_reset() anyways when all the SoCs are probing with device tree, so let's not mess with the legacy code at all. Cc: Jyri Sarha Cc: Laurent Pinchart Cc: Tomi Valkeinen Signed-off-by: Tony Lindgren --- drivers/bus/ti-sysc.c | 131 +++++++++++++++++++++++++- include/linux/platform_data/ti-sysc.h | 1 + 2 files changed, 129 insertions(+), 3 deletions(-) diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c index b29f4e451dc1..e30c97ca5579 100644 --- a/drivers/bus/ti-sysc.c +++ b/drivers/bus/ti-sysc.c @@ -1303,11 +1303,11 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = { SYSC_QUIRK("dcan", 0x48480000, 0x20, -ENODEV, -ENODEV, 0xa3170504, 0xffffffff, SYSC_QUIRK_CLKDM_NOAUTO), SYSC_QUIRK("dss", 0x4832a000, 0, 0x10, 0x14, 0x00000020, 0xffffffff, - SYSC_QUIRK_OPT_CLKS_IN_RESET), + SYSC_QUIRK_OPT_CLKS_IN_RESET | SYSC_MODULE_QUIRK_DSS_RESET), SYSC_QUIRK("dss", 0x58000000, 0, -ENODEV, 0x14, 0x00000040, 0xffffffff, - SYSC_QUIRK_OPT_CLKS_IN_RESET), + SYSC_QUIRK_OPT_CLKS_IN_RESET | SYSC_MODULE_QUIRK_DSS_RESET), SYSC_QUIRK("dss", 0x58000000, 0, -ENODEV, 0x14, 0x00000061, 0xffffffff, - SYSC_QUIRK_OPT_CLKS_IN_RESET), + SYSC_QUIRK_OPT_CLKS_IN_RESET | SYSC_MODULE_QUIRK_DSS_RESET), SYSC_QUIRK("dwc3", 0x48880000, 0, 0x10, -ENODEV, 0x500a0200, 0xffffffff, SYSC_QUIRK_CLKDM_NOAUTO), SYSC_QUIRK("dwc3", 0x488c0000, 0, 0x10, -ENODEV, 0x500a0200, 0xffffffff, @@ -1468,6 +1468,128 @@ static void sysc_init_revision_quirks(struct sysc *ddata) } } +/* + * DSS needs dispc outputs disabled to reset modules. Returns mask of + * enabled DSS interrupts. Eventually we may be able to do this on + * dispc init rather than top-level DSS init. + */ +static u32 sysc_quirk_dispc(struct sysc *ddata, int dispc_offset, + bool disable) +{ + bool lcd_en, digit_en, lcd2_en = false, lcd3_en = false; + const int lcd_en_mask = BIT(0), digit_en_mask = BIT(1); + int manager_count; + bool framedonetv_irq; + u32 val, irq_mask = 0; + + switch (sysc_soc->soc) { + case SOC_2420 ... SOC_3630: + manager_count = 2; + framedonetv_irq = false; + break; + case SOC_4430 ... SOC_4470: + manager_count = 3; + break; + case SOC_5430: + case SOC_DRA7: + manager_count = 4; + break; + case SOC_AM4: + manager_count = 1; + break; + case SOC_UNKNOWN: + default: + return 0; + }; + + /* Remap the whole module range to be able to reset dispc outputs */ + devm_iounmap(ddata->dev, ddata->module_va); + ddata->module_va = devm_ioremap(ddata->dev, + ddata->module_pa, + ddata->module_size); + if (!ddata->module_va) + return -EIO; + + /* DISP_CONTROL */ + val = sysc_read(ddata, dispc_offset + 0x40); + lcd_en = val & lcd_en_mask; + digit_en = val & digit_en_mask; + if (lcd_en) + irq_mask |= BIT(0); /* FRAMEDONE */ + if (digit_en) { + if (framedonetv_irq) + irq_mask |= BIT(24); /* FRAMEDONETV */ + else + irq_mask |= BIT(2) | BIT(3); /* EVSYNC bits */ + } + if (disable & (lcd_en | digit_en)) + sysc_write(ddata, dispc_offset + 0x40, + val & ~(lcd_en_mask | digit_en_mask)); + + if (manager_count <= 2) + return irq_mask; + + /* DISPC_CONTROL2 */ + val = sysc_read(ddata, dispc_offset + 0x238); + lcd2_en = val & lcd_en_mask; + if (lcd2_en) + irq_mask |= BIT(22); /* FRAMEDONE2 */ + if (disable && lcd2_en) + sysc_write(ddata, dispc_offset + 0x238, + val & ~lcd_en_mask); + + if (manager_count <= 3) + return irq_mask; + + /* DISPC_CONTROL3 */ + val = sysc_read(ddata, dispc_offset + 0x848); + lcd3_en = val & lcd_en_mask; + if (lcd3_en) + irq_mask |= BIT(30); /* FRAMEDONE3 */ + if (disable && lcd3_en) + sysc_write(ddata, dispc_offset + 0x848, + val & ~lcd_en_mask); + + return irq_mask; +} + +/* DSS needs child outputs disabled and SDI registers cleared for reset */ +static void sysc_pre_reset_quirk_dss(struct sysc *ddata) +{ + const int dispc_offset = 0x1000; + int error; + u32 irq_mask, val; + + /* Get enabled outputs */ + irq_mask = sysc_quirk_dispc(ddata, dispc_offset, false); + if (!irq_mask) + return; + + /* Clear IRQSTATUS */ + sysc_write(ddata, 0x1000 + 0x18, irq_mask); + + /* Disable outputs */ + val = sysc_quirk_dispc(ddata, dispc_offset, true); + + /* Poll IRQSTATUS */ + error = readl_poll_timeout(ddata->module_va + dispc_offset + 0x18, + val, val != irq_mask, 100, 50); + if (error) + dev_warn(ddata->dev, "%s: timed out %08x !+ %08x\n", + __func__, val, irq_mask); + + if (sysc_soc->soc == SOC_3430) { + /* Clear DSS_SDI_CONTROL */ + sysc_write(ddata, dispc_offset + 0x44, 0); + + /* Clear DSS_PLL_CONTROL */ + sysc_write(ddata, dispc_offset + 0x48, 0); + } + + /* Clear DSS_CONTROL to switch DSS clock sources to PRCM if not */ + sysc_write(ddata, dispc_offset + 0x40, 0); +} + /* 1-wire needs module's internal clocks enabled for reset */ static void sysc_pre_reset_quirk_hdq1w(struct sysc *ddata) { @@ -1606,6 +1728,9 @@ static void sysc_init_module_quirks(struct sysc *ddata) if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_AESS) ddata->module_enable_quirk = sysc_module_enable_quirk_aess; + if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_DSS_RESET) + ddata->pre_reset_quirk = sysc_pre_reset_quirk_dss; + if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_RTC_UNLOCK) { ddata->module_unlock_quirk = sysc_module_unlock_quirk_rtc; ddata->module_lock_quirk = sysc_module_lock_quirk_rtc; diff --git a/include/linux/platform_data/ti-sysc.h b/include/linux/platform_data/ti-sysc.h index 0b33c3b7302f..ecd3a979a14d 100644 --- a/include/linux/platform_data/ti-sysc.h +++ b/include/linux/platform_data/ti-sysc.h @@ -49,6 +49,7 @@ struct sysc_regbits { s8 emufree_shift; }; +#define SYSC_MODULE_QUIRK_DSS_RESET BIT(23) #define SYSC_MODULE_QUIRK_RTC_UNLOCK BIT(22) #define SYSC_QUIRK_CLKDM_NOAUTO BIT(21) #define SYSC_QUIRK_FORCE_MSTANDBY BIT(20) From a6dd255bdd7d00bbdbf78ba00bde9fc64f86c3a7 Mon Sep 17 00:00:00 2001 From: John Garry Date: Fri, 17 Jan 2020 02:48:34 +0800 Subject: [PATCH 35/70] bus: hisi_lpc: Fixup IO ports addresses to avoid use-after-free in host removal Some released ACPI FW for Huawei boards describes incorrect the port IO address range for child devices, in that it tells us the IO port max range is 0x3fff for each child device, which is not correct. The address range should be [e4:e8) or similar. With this incorrect upper range, the child device IO port resources overlap. As such, the kernel thinks that the LPC host serial device is a child of the IPMI device: root@(none)$ more /proc/ioports [...] 00ffc0e3-00ffffff : hisi-lpc-ipmi.0.auto 00ffc0e3-00ffc0e3 : ipmi_si 00ffc0e4-00ffc0e4 : ipmi_si 00ffc0e5-00ffc0e5 : ipmi_si 00ffc2f7-00ffffff : serial8250.1.auto 00ffc2f7-00ffc2fe : serial root@(none)$ They should both be siblings. Note that these are logical PIO addresses, which have a direct mapping from the FW IO port ranges. This shows up as a real issue when we enable CONFIG_KASAN and CONFIG_DEBUG_TEST_DRIVER_REMOVE - we see use-after-free warnings in the host removal path: ================================================================== BUG: KASAN: use-after-free in release_resource+0x38/0xc8 Read of size 8 at addr ffff0026accdbc38 by task swapper/0/1 CPU: 2 PID: 1 Comm: swapper/0 Not tainted 5.5.0-rc6-00001-g68e186e77b5c-dirty #1593 Hardware name: Huawei Taishan 2180 /D03, BIOS Hisilicon D03 IT20 Nemo 2.0 RC0 03/30/2018 Call trace: dump_backtrace+0x0/0x290 show_stack+0x14/0x20 dump_stack+0xf0/0x14c print_address_description.isra.9+0x6c/0x3b8 __kasan_report+0x12c/0x23c kasan_report+0xc/0x18 __asan_load8+0x94/0xb8 release_resource+0x38/0xc8 platform_device_del.part.10+0x80/0xe0 platform_device_unregister+0x20/0x38 hisi_lpc_acpi_remove_subdev+0x10/0x20 device_for_each_child+0xc8/0x128 hisi_lpc_acpi_remove+0x4c/0xa8 hisi_lpc_remove+0xbc/0xc0 platform_drv_remove+0x3c/0x68 really_probe+0x174/0x548 driver_probe_device+0x7c/0x148 device_driver_attach+0x94/0xa0 __driver_attach+0xa4/0x110 bus_for_each_dev+0xe8/0x158 driver_attach+0x30/0x40 bus_add_driver+0x234/0x2f0 driver_register+0xbc/0x1d0 __platform_driver_register+0x7c/0x88 hisi_lpc_driver_init+0x18/0x20 do_one_initcall+0xb4/0x258 kernel_init_freeable+0x248/0x2c0 kernel_init+0x10/0x118 ret_from_fork+0x10/0x1c ... The issue here is that the kernel created an incorrect parent-child resource dependency between two devices, and references the false parent node when deleting the second child device, when it had been deleted already. Fix up the child device resources from FW to create proper IO port resource relationships for broken FW. With this, the IO port layout looks more healthy: root@(none)$ more /proc/ioports [...] 00ffc0e3-00ffc0e7 : hisi-lpc-ipmi.0.auto 00ffc0e3-00ffc0e3 : ipmi_si 00ffc0e4-00ffc0e4 : ipmi_si 00ffc0e5-00ffc0e5 : ipmi_si 00ffc2f7-00ffc2ff : serial8250.1.auto 00ffc2f7-00ffc2fe : serial Signed-off-by: John Garry Signed-off-by: Wei Xu --- drivers/bus/hisi_lpc.c | 27 +++++++++++++++++++++++++-- 1 file changed, 25 insertions(+), 2 deletions(-) diff --git a/drivers/bus/hisi_lpc.c b/drivers/bus/hisi_lpc.c index 8101df901830..378f5d62a991 100644 --- a/drivers/bus/hisi_lpc.c +++ b/drivers/bus/hisi_lpc.c @@ -357,6 +357,26 @@ static int hisi_lpc_acpi_xlat_io_res(struct acpi_device *adev, return 0; } +/* + * Released firmware describes the IO port max address as 0x3fff, which is + * the max host bus address. Fixup to a proper range. This will probably + * never be fixed in firmware. + */ +static void hisi_lpc_acpi_fixup_child_resource(struct device *hostdev, + struct resource *r) +{ + if (r->end != 0x3fff) + return; + + if (r->start == 0xe4) + r->end = 0xe4 + 0x04 - 1; + else if (r->start == 0x2f8) + r->end = 0x2f8 + 0x08 - 1; + else + dev_warn(hostdev, "unrecognised resource %pR to fixup, ignoring\n", + r); +} + /* * hisi_lpc_acpi_set_io_res - set the resources for a child * @child: the device node to be updated the I/O resource @@ -418,8 +438,11 @@ static int hisi_lpc_acpi_set_io_res(struct device *child, return -ENOMEM; } count = 0; - list_for_each_entry(rentry, &resource_list, node) - resources[count++] = *rentry->res; + list_for_each_entry(rentry, &resource_list, node) { + resources[count] = *rentry->res; + hisi_lpc_acpi_fixup_child_resource(hostdev, &resources[count]); + count++; + } acpi_dev_free_resource_list(&resource_list); From 59a135f6fb669f4f79f43160c7b8c8d6bfb37f75 Mon Sep 17 00:00:00 2001 From: Jens Wiklander Date: Thu, 7 Nov 2019 11:42:49 +0100 Subject: [PATCH 36/70] tee: remove linked list of struct tee_shm Removes list_shm from struct tee_context since the linked list isn't used any longer. Signed-off-by: Jens Wiklander --- drivers/tee/tee_core.c | 1 - drivers/tee/tee_shm.c | 12 +----------- include/linux/tee_drv.h | 3 --- 3 files changed, 1 insertion(+), 15 deletions(-) diff --git a/drivers/tee/tee_core.c b/drivers/tee/tee_core.c index 37d22e39fd8d..6aec502c495c 100644 --- a/drivers/tee/tee_core.c +++ b/drivers/tee/tee_core.c @@ -44,7 +44,6 @@ static struct tee_context *teedev_open(struct tee_device *teedev) kref_init(&ctx->refcount); ctx->teedev = teedev; - INIT_LIST_HEAD(&ctx->list_shm); rc = teedev->desc->ops->open(ctx); if (rc) goto err; diff --git a/drivers/tee/tee_shm.c b/drivers/tee/tee_shm.c index 937ac5aaa6d8..99f1c890ca3d 100644 --- a/drivers/tee/tee_shm.c +++ b/drivers/tee/tee_shm.c @@ -17,8 +17,6 @@ static void tee_shm_release(struct tee_shm *shm) mutex_lock(&teedev->mutex); idr_remove(&teedev->idr, shm->id); - if (shm->ctx) - list_del(&shm->link); mutex_unlock(&teedev->mutex); if (shm->flags & TEE_SHM_POOL) { @@ -168,12 +166,8 @@ static struct tee_shm *__tee_shm_alloc(struct tee_context *ctx, } } - if (ctx) { + if (ctx) teedev_ctx_get(ctx); - mutex_lock(&teedev->mutex); - list_add_tail(&shm->link, &ctx->list_shm); - mutex_unlock(&teedev->mutex); - } return shm; err_rem: @@ -301,10 +295,6 @@ struct tee_shm *tee_shm_register(struct tee_context *ctx, unsigned long addr, } } - mutex_lock(&teedev->mutex); - list_add_tail(&shm->link, &ctx->list_shm); - mutex_unlock(&teedev->mutex); - return shm; err: if (shm) { diff --git a/include/linux/tee_drv.h b/include/linux/tee_drv.h index 7a03f68fb982..cbddb883a7f8 100644 --- a/include/linux/tee_drv.h +++ b/include/linux/tee_drv.h @@ -49,7 +49,6 @@ struct tee_shm_pool; */ struct tee_context { struct tee_device *teedev; - struct list_head list_shm; void *data; struct kref refcount; bool releasing; @@ -170,7 +169,6 @@ void tee_device_unregister(struct tee_device *teedev); * struct tee_shm - shared memory object * @teedev: device used to allocate the object * @ctx: context using the object, if NULL the context is gone - * @link link element * @paddr: physical address of the shared memory * @kaddr: virtual address of the shared memory * @size: size of shared memory @@ -187,7 +185,6 @@ void tee_device_unregister(struct tee_device *teedev); struct tee_shm { struct tee_device *teedev; struct tee_context *ctx; - struct list_head link; phys_addr_t paddr; void *kaddr; size_t size; From c180f9bbe29a403459dd76422f435382aec6adaa Mon Sep 17 00:00:00 2001 From: Jens Wiklander Date: Thu, 7 Nov 2019 11:42:52 +0100 Subject: [PATCH 37/70] tee: remove unused tee_shm_priv_alloc() tee_shm_priv_alloc() isn't useful in the current state and it's also not not used so remove it. Signed-off-by: Jens Wiklander --- drivers/tee/tee_shm.c | 33 ++------------------------------- include/linux/tee_drv.h | 12 ------------ 2 files changed, 2 insertions(+), 43 deletions(-) diff --git a/drivers/tee/tee_shm.c b/drivers/tee/tee_shm.c index 99f1c890ca3d..b666854c2491 100644 --- a/drivers/tee/tee_shm.c +++ b/drivers/tee/tee_shm.c @@ -89,20 +89,14 @@ static const struct dma_buf_ops tee_shm_dma_buf_ops = { .mmap = tee_shm_op_mmap, }; -static struct tee_shm *__tee_shm_alloc(struct tee_context *ctx, - struct tee_device *teedev, - size_t size, u32 flags) +struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags) { + struct tee_device *teedev = ctx->teedev; struct tee_shm_pool_mgr *poolm = NULL; struct tee_shm *shm; void *ret; int rc; - if (ctx && ctx->teedev != teedev) { - dev_err(teedev->dev.parent, "ctx and teedev mismatch\n"); - return ERR_PTR(-EINVAL); - } - if (!(flags & TEE_SHM_MAPPED)) { dev_err(teedev->dev.parent, "only mapped allocations supported\n"); @@ -182,31 +176,8 @@ static struct tee_shm *__tee_shm_alloc(struct tee_context *ctx, tee_device_put(teedev); return ret; } - -/** - * tee_shm_alloc() - Allocate shared memory - * @ctx: Context that allocates the shared memory - * @size: Requested size of shared memory - * @flags: Flags setting properties for the requested shared memory. - * - * Memory allocated as global shared memory is automatically freed when the - * TEE file pointer is closed. The @flags field uses the bits defined by - * TEE_SHM_* in . TEE_SHM_MAPPED must currently always be - * set. If TEE_SHM_DMA_BUF global shared memory will be allocated and - * associated with a dma-buf handle, else driver private memory. - */ -struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags) -{ - return __tee_shm_alloc(ctx, ctx->teedev, size, flags); -} EXPORT_SYMBOL_GPL(tee_shm_alloc); -struct tee_shm *tee_shm_priv_alloc(struct tee_device *teedev, size_t size) -{ - return __tee_shm_alloc(NULL, teedev, size, TEE_SHM_MAPPED); -} -EXPORT_SYMBOL_GPL(tee_shm_priv_alloc); - struct tee_shm *tee_shm_register(struct tee_context *ctx, unsigned long addr, size_t length, u32 flags) { diff --git a/include/linux/tee_drv.h b/include/linux/tee_drv.h index cbddb883a7f8..42687f6c546d 100644 --- a/include/linux/tee_drv.h +++ b/include/linux/tee_drv.h @@ -315,18 +315,6 @@ void *tee_get_drvdata(struct tee_device *teedev); */ struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags); -/** - * tee_shm_priv_alloc() - Allocate shared memory privately - * @dev: Device that allocates the shared memory - * @size: Requested size of shared memory - * - * Allocates shared memory buffer that is not associated with any client - * context. Such buffers are owned by TEE driver and used for internal calls. - * - * @returns a pointer to 'struct tee_shm' - */ -struct tee_shm *tee_shm_priv_alloc(struct tee_device *teedev, size_t size); - /** * tee_shm_register() - Register shared memory buffer * @ctx: Context that registers the shared memory From f1bbacedb0af640a93e47799203e556be2825da3 Mon Sep 17 00:00:00 2001 From: Jens Wiklander Date: Thu, 7 Nov 2019 11:42:56 +0100 Subject: [PATCH 38/70] tee: don't assign shm id for private shms Private shared memory object must not be referenced from user space. To guarantee that, don't assign an id to shared memory objects which are driver private. Signed-off-by: Jens Wiklander --- drivers/tee/tee_private.h | 3 ++- drivers/tee/tee_shm.c | 31 ++++++++++++++++++------------- 2 files changed, 20 insertions(+), 14 deletions(-) diff --git a/drivers/tee/tee_private.h b/drivers/tee/tee_private.h index f797171f0434..e55204df31ce 100644 --- a/drivers/tee/tee_private.h +++ b/drivers/tee/tee_private.h @@ -37,7 +37,8 @@ struct tee_shm_pool { * @num_users: number of active users of this device * @c_no_user: completion used when unregistering the device * @mutex: mutex protecting @num_users and @idr - * @idr: register of shared memory object allocated on this device + * @idr: register of user space shared memory objects allocated or + * registered on this device * @pool: shared memory pool */ struct tee_device { diff --git a/drivers/tee/tee_shm.c b/drivers/tee/tee_shm.c index b666854c2491..02210f179ae3 100644 --- a/drivers/tee/tee_shm.c +++ b/drivers/tee/tee_shm.c @@ -15,9 +15,11 @@ static void tee_shm_release(struct tee_shm *shm) { struct tee_device *teedev = shm->teedev; - mutex_lock(&teedev->mutex); - idr_remove(&teedev->idr, shm->id); - mutex_unlock(&teedev->mutex); + if (shm->flags & TEE_SHM_DMA_BUF) { + mutex_lock(&teedev->mutex); + idr_remove(&teedev->idr, shm->id); + mutex_unlock(&teedev->mutex); + } if (shm->flags & TEE_SHM_POOL) { struct tee_shm_pool_mgr *poolm; @@ -137,17 +139,18 @@ struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags) goto err_kfree; } - mutex_lock(&teedev->mutex); - shm->id = idr_alloc(&teedev->idr, shm, 1, 0, GFP_KERNEL); - mutex_unlock(&teedev->mutex); - if (shm->id < 0) { - ret = ERR_PTR(shm->id); - goto err_pool_free; - } if (flags & TEE_SHM_DMA_BUF) { DEFINE_DMA_BUF_EXPORT_INFO(exp_info); + mutex_lock(&teedev->mutex); + shm->id = idr_alloc(&teedev->idr, shm, 1, 0, GFP_KERNEL); + mutex_unlock(&teedev->mutex); + if (shm->id < 0) { + ret = ERR_PTR(shm->id); + goto err_pool_free; + } + exp_info.ops = &tee_shm_dma_buf_ops; exp_info.size = shm->size; exp_info.flags = O_RDWR; @@ -165,9 +168,11 @@ struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags) return shm; err_rem: - mutex_lock(&teedev->mutex); - idr_remove(&teedev->idr, shm->id); - mutex_unlock(&teedev->mutex); + if (flags & TEE_SHM_DMA_BUF) { + mutex_lock(&teedev->mutex); + idr_remove(&teedev->idr, shm->id); + mutex_unlock(&teedev->mutex); + } err_pool_free: poolm->ops->free(poolm, shm); err_kfree: From 5271b2011e448f1be7433554e4684e91951476fa Mon Sep 17 00:00:00 2001 From: Jens Wiklander Date: Thu, 7 Nov 2019 11:42:59 +0100 Subject: [PATCH 39/70] tee: remove redundant teedev in struct tee_shm The ctx element in struct tee_shm is always valid. So remove the now redundant teedev element. Signed-off-by: Jens Wiklander --- drivers/tee/tee_shm.c | 7 ++----- include/linux/tee_drv.h | 4 +--- 2 files changed, 3 insertions(+), 8 deletions(-) diff --git a/drivers/tee/tee_shm.c b/drivers/tee/tee_shm.c index 02210f179ae3..b01d2b7eea71 100644 --- a/drivers/tee/tee_shm.c +++ b/drivers/tee/tee_shm.c @@ -13,7 +13,7 @@ static void tee_shm_release(struct tee_shm *shm) { - struct tee_device *teedev = shm->teedev; + struct tee_device *teedev = shm->ctx->teedev; if (shm->flags & TEE_SHM_DMA_BUF) { mutex_lock(&teedev->mutex); @@ -44,8 +44,7 @@ static void tee_shm_release(struct tee_shm *shm) kfree(shm->pages); } - if (shm->ctx) - teedev_ctx_put(shm->ctx); + teedev_ctx_put(shm->ctx); kfree(shm); @@ -126,7 +125,6 @@ struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags) } shm->flags = flags | TEE_SHM_POOL; - shm->teedev = teedev; shm->ctx = ctx; if (flags & TEE_SHM_DMA_BUF) poolm = teedev->pool->dma_buf_mgr; @@ -215,7 +213,6 @@ struct tee_shm *tee_shm_register(struct tee_context *ctx, unsigned long addr, } shm->flags = flags | TEE_SHM_REGISTER; - shm->teedev = teedev; shm->ctx = ctx; shm->id = -1; addr = untagged_addr(addr); diff --git a/include/linux/tee_drv.h b/include/linux/tee_drv.h index 42687f6c546d..1412e9cc79ce 100644 --- a/include/linux/tee_drv.h +++ b/include/linux/tee_drv.h @@ -167,8 +167,7 @@ void tee_device_unregister(struct tee_device *teedev); /** * struct tee_shm - shared memory object - * @teedev: device used to allocate the object - * @ctx: context using the object, if NULL the context is gone + * @ctx: context using the object * @paddr: physical address of the shared memory * @kaddr: virtual address of the shared memory * @size: size of shared memory @@ -183,7 +182,6 @@ void tee_device_unregister(struct tee_device *teedev); * subsystem and from drivers that implements their own shm pool manager. */ struct tee_shm { - struct tee_device *teedev; struct tee_context *ctx; phys_addr_t paddr; void *kaddr; From 758ecf13a41a9dc4f019c1381566132ef46c08ee Mon Sep 17 00:00:00 2001 From: Jens Wiklander Date: Thu, 7 Nov 2019 11:43:02 +0100 Subject: [PATCH 40/70] tee: tee_shm_op_mmap(): use TEE_SHM_USER_MAPPED tee_shm_op_mmap() uses the TEE_SHM_USER_MAPPED flag instead of the TEE_SHM_REGISTER flag to tell if a shared memory object is originating from registered user space memory. Signed-off-by: Jens Wiklander --- drivers/tee/tee_shm.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/tee/tee_shm.c b/drivers/tee/tee_shm.c index b01d2b7eea71..bd679b72bd05 100644 --- a/drivers/tee/tee_shm.c +++ b/drivers/tee/tee_shm.c @@ -76,7 +76,7 @@ static int tee_shm_op_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma) size_t size = vma->vm_end - vma->vm_start; /* Refuse sharing shared memory provided by application */ - if (shm->flags & TEE_SHM_REGISTER) + if (shm->flags & TEE_SHM_USER_MAPPED) return -EINVAL; return remap_pfn_range(vma, vma->vm_start, shm->paddr >> PAGE_SHIFT, From 9ef7a7920678b10431d88635f9a4a49bd23ca3a7 Mon Sep 17 00:00:00 2001 From: Jianxin Pan Date: Tue, 18 Feb 2020 00:12:13 +0800 Subject: [PATCH 41/70] soc: amlogic: fix compile failure with MESON_SECURE_PM_DOMAINS & !MESON_SM When MESON_SECURE_PM_DOMAINS & !MESON_SM, there will be compile failure: .../meson-secure-pwrc.o: In function `meson_secure_pwrc_on': .../meson-secure-pwrc.c:76: undefined reference to `meson_sm_call' Fix this by adding depends on MESON_SM for MESON_SECURE_PM_DOMAINS. Fixes: b3dde5013e13 ("soc: amlogic: Add support for Secure power domains controller") Reported-by: Stephen Rothwell Reported-by: patchwork-bot+linux-amlogic Reported-by: kbuild test robot Signed-off-by: Jianxin Pan Signed-off-by: Kevin Hilman Tested-by: Stephen Rothwell Link: https://lore.kernel.org/r/1581955933-69832-1-git-send-email-jianxin.pan@amlogic.com --- drivers/soc/amlogic/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/soc/amlogic/Kconfig b/drivers/soc/amlogic/Kconfig index 6cb06e7b5e63..321c5e26a268 100644 --- a/drivers/soc/amlogic/Kconfig +++ b/drivers/soc/amlogic/Kconfig @@ -50,7 +50,7 @@ config MESON_EE_PM_DOMAINS config MESON_SECURE_PM_DOMAINS bool "Amlogic Meson Secure Power Domains driver" - depends on ARCH_MESON || COMPILE_TEST + depends on (ARCH_MESON || COMPILE_TEST) && MESON_SM depends on PM && OF depends on HAVE_ARM_SMCCC default ARCH_MESON From d85eed038ef4919933b7f4f9d3b4f49ede4092aa Mon Sep 17 00:00:00 2001 From: Jianxin Pan Date: Mon, 2 Mar 2020 23:54:08 +0800 Subject: [PATCH 42/70] dt-bindings: power: Fix dt_binding_check error Missing ';' in the end of secure-monitor example node. Fixes: 165b5fb294e8 ("dt-bindings: power: add Amlogic secure power domains bindings") Reported-by: Rob Herring Signed-off-by: Jianxin Pan Reviewed-by: Neil Armstrong Acked-by: Rob Herring Signed-off-by: Kevin Hilman Link: https://lore.kernel.org/r/1583164448-83438-1-git-send-email-jianxin.pan@amlogic.com --- .../devicetree/bindings/power/amlogic,meson-sec-pwrc.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Documentation/devicetree/bindings/power/amlogic,meson-sec-pwrc.yaml b/Documentation/devicetree/bindings/power/amlogic,meson-sec-pwrc.yaml index af32209218bb..bc4e037f3f73 100644 --- a/Documentation/devicetree/bindings/power/amlogic,meson-sec-pwrc.yaml +++ b/Documentation/devicetree/bindings/power/amlogic,meson-sec-pwrc.yaml @@ -36,5 +36,5 @@ examples: compatible = "amlogic,meson-a1-pwrc"; #power-domain-cells = <1>; }; - } + }; From 69e60903aaf5aa56548656897d2b0fbe4431a7fe Mon Sep 17 00:00:00 2001 From: Tony Lindgren Date: Tue, 3 Mar 2020 07:17:43 -0800 Subject: [PATCH 43/70] bus: ti-sysc: Fix wrong offset for display subsystem reset quirk Commit 7324a7a0d5e2 ("bus: ti-sysc: Implement display subsystem reset quirk") added support for DSS reset, but is using dispc offset also for DSS also registers as reported by Tomi Valkeinen . Also, we're not using dispc_offset for dispc IRQSTATUS register so let's fix that too. Fixes: 7324a7a0d5e2 ("bus: ti-sysc: Implement display subsystem reset quirk") Reported-by: Tomi Valkeinen Reviewed-by: Tomi Valkeinen Signed-off-by: Tony Lindgren --- drivers/bus/ti-sysc.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c index e30c97ca5579..46b25fa4237f 100644 --- a/drivers/bus/ti-sysc.c +++ b/drivers/bus/ti-sysc.c @@ -1566,7 +1566,7 @@ static void sysc_pre_reset_quirk_dss(struct sysc *ddata) return; /* Clear IRQSTATUS */ - sysc_write(ddata, 0x1000 + 0x18, irq_mask); + sysc_write(ddata, dispc_offset + 0x18, irq_mask); /* Disable outputs */ val = sysc_quirk_dispc(ddata, dispc_offset, true); @@ -1580,14 +1580,14 @@ static void sysc_pre_reset_quirk_dss(struct sysc *ddata) if (sysc_soc->soc == SOC_3430) { /* Clear DSS_SDI_CONTROL */ - sysc_write(ddata, dispc_offset + 0x44, 0); + sysc_write(ddata, 0x44, 0); /* Clear DSS_PLL_CONTROL */ - sysc_write(ddata, dispc_offset + 0x48, 0); + sysc_write(ddata, 0x48, 0); } /* Clear DSS_CONTROL to switch DSS clock sources to PRCM if not */ - sysc_write(ddata, dispc_offset + 0x40, 0); + sysc_write(ddata, 0x40, 0); } /* 1-wire needs module's internal clocks enabled for reset */ From 226bbb937efbe903258f51feed37a8586b087f5f Mon Sep 17 00:00:00 2001 From: Manivannan Sadhasivam Date: Thu, 20 Feb 2020 15:28:54 +0530 Subject: [PATCH 44/70] soc: qcom: Do not depend on ARCH_QCOM for QMI helpers QMI helpers are not always used by Qualcomm platforms. One of the exceptions is the external modems available in near future. Cc: Andy Gross Cc: Bjorn Andersson Cc: linux-arm-msm@vger.kernel.org Signed-off-by: Manivannan Sadhasivam Link: https://lore.kernel.org/r/20200220095854.4804-17-manivannan.sadhasivam@linaro.org Signed-off-by: Bjorn Andersson --- drivers/soc/qcom/Kconfig | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig index d0a73e76d563..80aa8b6c56e0 100644 --- a/drivers/soc/qcom/Kconfig +++ b/drivers/soc/qcom/Kconfig @@ -88,7 +88,6 @@ config QCOM_PM config QCOM_QMI_HELPERS tristate - depends on ARCH_QCOM || COMPILE_TEST depends on NET config QCOM_RMTFS_MEM From 25bfaaa73c7d26a6e897559c510d7daff5e9d22d Mon Sep 17 00:00:00 2001 From: Tony Lindgren Date: Tue, 3 Mar 2020 09:31:00 -0800 Subject: [PATCH 45/70] bus: ti-sysc: Detect EDMA and set quirk flags for tptc In order to probe EDMA with ti-sysc interconnect target module and with device tree data, we need to properly detect EDMA and set the flags for SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY for tptc. We have these flags currently set for am4 and dra7, but not for am335x. Let's set them for all the SoCs as the tptc module should behave the same for all of them. It's likely that am335x was never tested to idle EDMA tptc. Cc: Peter Ujfalusi Signed-off-by: Tony Lindgren --- drivers/bus/ti-sysc.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c index 46b25fa4237f..bc1c52f87046 100644 --- a/drivers/bus/ti-sysc.c +++ b/drivers/bus/ti-sysc.c @@ -1331,6 +1331,10 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = { SYSC_MODULE_QUIRK_SGX), SYSC_QUIRK("rtc", 0, 0x74, 0x78, -ENODEV, 0x4eb01908, 0xffff00f0, SYSC_MODULE_QUIRK_RTC_UNLOCK), + SYSC_QUIRK("tptc", 0, 0, 0x10, -ENODEV, 0x40006c00, 0xffffefff, + SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY), + SYSC_QUIRK("tptc", 0, 0, -ENODEV, -ENODEV, 0x40007c00, 0xffffffff, + SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY), SYSC_QUIRK("usb_otg_hs", 0, 0x400, 0x404, 0x408, 0x00000050, 0xffffffff, SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY), SYSC_QUIRK("usb_otg_hs", 0, 0, 0x10, -ENODEV, 0x4ea2080d, 0xffffffff, @@ -1397,6 +1401,7 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = { SYSC_QUIRK("spinlock", 0, 0, 0x10, -ENODEV, 0x50020000, 0xffffffff, 0), SYSC_QUIRK("rng", 0, 0x1fe0, 0x1fe4, -ENODEV, 0x00000020, 0xffffffff, 0), SYSC_QUIRK("timer32k", 0, 0, 0x4, -ENODEV, 0x00000060, 0xffffffff, 0), + SYSC_QUIRK("tpcc", 0, 0, -ENODEV, -ENODEV, 0x40014c00, 0xffffffff, 0), SYSC_QUIRK("usbhstll", 0, 0, 0x10, 0x14, 0x00000004, 0xffffffff, 0), SYSC_QUIRK("usbhstll", 0, 0, 0x10, 0x14, 0x00000008, 0xffffffff, 0), SYSC_QUIRK("usb_host_hs", 0, 0, 0x10, 0x14, 0x50700100, 0xffffffff, 0), From 4d0dd3802ee1b6b14b7c46621cd581eb7c1ade79 Mon Sep 17 00:00:00 2001 From: Roger Quadros Date: Thu, 27 Feb 2020 16:28:33 -0600 Subject: [PATCH 46/70] dt-bindings: bus: ti-sysc: Add support for PRUSS SYSC type The PRUSS module has a SYSCFG which is unique. The SYSCFG has two additional unique fields called STANDBY_INIT and SUB_MWAIT in addition to regular IDLE_MODE and STANDBY_MODE fields. Add the bindings for this new sysc type. Cc: Rob Herring Signed-off-by: Roger Quadros Signed-off-by: Suman Anna Acked-by: Rob Herring Signed-off-by: Tony Lindgren --- Documentation/devicetree/bindings/bus/ti-sysc.txt | 1 + include/dt-bindings/bus/ti-sysc.h | 4 ++++ 2 files changed, 5 insertions(+) diff --git a/Documentation/devicetree/bindings/bus/ti-sysc.txt b/Documentation/devicetree/bindings/bus/ti-sysc.txt index 233eb8294204..c984143d08d2 100644 --- a/Documentation/devicetree/bindings/bus/ti-sysc.txt +++ b/Documentation/devicetree/bindings/bus/ti-sysc.txt @@ -38,6 +38,7 @@ Required standard properties: "ti,sysc-dra7-mcasp" "ti,sysc-usb-host-fs" "ti,sysc-dra7-mcan" + "ti,sysc-pruss" - reg shall have register areas implemented for the interconnect target module in question such as revision, sysc and syss diff --git a/include/dt-bindings/bus/ti-sysc.h b/include/dt-bindings/bus/ti-sysc.h index babd08a1d226..76b07826ed05 100644 --- a/include/dt-bindings/bus/ti-sysc.h +++ b/include/dt-bindings/bus/ti-sysc.h @@ -18,6 +18,10 @@ #define SYSC_DRA7_MCAN_ENAWAKEUP (1 << 4) +/* PRUSS sysc found on AM33xx/AM43xx/AM57xx */ +#define SYSC_PRUSS_SUB_MWAIT (1 << 5) +#define SYSC_PRUSS_STANDBY_INIT (1 << 4) + /* SYSCONFIG STANDBYMODE/MIDLEMODE/SIDLEMODE supported by hardware */ #define SYSC_IDLE_FORCE 0 #define SYSC_IDLE_NO 1 From b2745d92bb015cc4454d4195c4ce6e2852db397e Mon Sep 17 00:00:00 2001 From: Suman Anna Date: Thu, 27 Feb 2020 16:28:34 -0600 Subject: [PATCH 47/70] bus: ti-sysc: Add support for PRUSS SYSC type The PRU-ICSS present on AM33xx/AM43xx/AM57xx has a very unique SYSCFG register. The register follows the OMAP4-style SYSC_TYPE3 for Master Standby and Slave Idle, but also has two additional unique fields - STANDBY_INIT and SUB_MWAIT. The STANDBY_INIT is a control bit that is used to initiate a Standby sequence (when set) and trigger a MStandby request to the SoC's PRCM module. This same bit is also used to enable the OCP master ports (when cleared) to allow the PRU cores to access any peripherals or memory beyond the PRU subsystem. The SUB_MWAIT is a ready status field for the external access. Add support for this SYSC type. The STANDBY_INIT has to be set during suspend, without which it results in a hang in the resume sequence on AM33xx/AM43xx boards and requires a board reset to come out of the hang. Any PRU applications requiring external access are supposed to clear the STANDBY_INIT bit. Note that the PRUSS context is lost during a suspend sequence because the PRUSS module is reset and/or disabled. Signed-off-by: Suman Anna Signed-off-by: Roger Quadros [tony@atomide.com: updated quirk define number and to use -ENODEV] Signed-off-by: Tony Lindgren --- drivers/bus/ti-sysc.c | 27 +++++++++++++++++++++++++++ include/linux/platform_data/ti-sysc.h | 2 ++ 2 files changed, 29 insertions(+) diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c index bc1c52f87046..86ac61fa5bc6 100644 --- a/drivers/bus/ti-sysc.c +++ b/drivers/bus/ti-sysc.c @@ -1341,6 +1341,9 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = { SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY), SYSC_QUIRK("wdt", 0, 0, 0x10, 0x14, 0x502a0500, 0xfffff0f0, SYSC_MODULE_QUIRK_WDT), + /* PRUSS on am3, am4 and am5 */ + SYSC_QUIRK("pruss", 0, 0x26000, 0x26004, -ENODEV, 0x47000000, 0xff000000, + SYSC_MODULE_QUIRK_PRUSS), /* Watchdog on am3 and am4 */ SYSC_QUIRK("wdt", 0x44e35000, 0, 0x10, 0x14, 0x502a0500, 0xfffff0f0, SYSC_MODULE_QUIRK_WDT | SYSC_QUIRK_SWSUP_SIDLE), @@ -1712,6 +1715,16 @@ static void sysc_reset_done_quirk_wdt(struct sysc *ddata) dev_warn(ddata->dev, "wdt disable step2 failed\n"); } +/* PRUSS needs to set MSTANDBY_INIT inorder to idle properly */ +static void sysc_module_disable_quirk_pruss(struct sysc *ddata) +{ + u32 reg; + + reg = sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]); + reg |= SYSC_PRUSS_STANDBY_INIT; + sysc_write(ddata, ddata->offsets[SYSC_SYSCONFIG], reg); +} + static void sysc_init_module_quirks(struct sysc *ddata) { if (ddata->legacy_mode || !ddata->name) @@ -1750,6 +1763,9 @@ static void sysc_init_module_quirks(struct sysc *ddata) ddata->reset_done_quirk = sysc_reset_done_quirk_wdt; ddata->module_disable_quirk = sysc_reset_done_quirk_wdt; } + + if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_PRUSS) + ddata->module_disable_quirk = sysc_module_disable_quirk_pruss; } static int sysc_clockdomain_init(struct sysc *ddata) @@ -2555,6 +2571,16 @@ static const struct sysc_capabilities sysc_dra7_mcan = { .mod_quirks = SYSS_QUIRK_RESETDONE_INVERTED, }; +/* + * PRUSS found on some AM33xx, AM437x and AM57xx SoCs + */ +static const struct sysc_capabilities sysc_pruss = { + .type = TI_SYSC_PRUSS, + .sysc_mask = SYSC_PRUSS_STANDBY_INIT | SYSC_PRUSS_SUB_MWAIT, + .regbits = &sysc_regbits_omap4_simple, + .mod_quirks = SYSC_MODULE_QUIRK_PRUSS, +}; + static int sysc_init_pdata(struct sysc *ddata) { struct ti_sysc_platform_data *pdata = dev_get_platdata(ddata->dev); @@ -2936,6 +2962,7 @@ static const struct of_device_id sysc_match[] = { { .compatible = "ti,sysc-usb-host-fs", .data = &sysc_omap4_usb_host_fs, }, { .compatible = "ti,sysc-dra7-mcan", .data = &sysc_dra7_mcan, }, + { .compatible = "ti,sysc-pruss", .data = &sysc_pruss, }, { }, }; MODULE_DEVICE_TABLE(of, sysc_match); diff --git a/include/linux/platform_data/ti-sysc.h b/include/linux/platform_data/ti-sysc.h index ecd3a979a14d..c59999ce044e 100644 --- a/include/linux/platform_data/ti-sysc.h +++ b/include/linux/platform_data/ti-sysc.h @@ -17,6 +17,7 @@ enum ti_sysc_module_type { TI_SYSC_OMAP4_MCASP, TI_SYSC_OMAP4_USB_HOST_FS, TI_SYSC_DRA7_MCAN, + TI_SYSC_PRUSS, }; struct ti_sysc_cookie { @@ -49,6 +50,7 @@ struct sysc_regbits { s8 emufree_shift; }; +#define SYSC_MODULE_QUIRK_PRUSS BIT(24) #define SYSC_MODULE_QUIRK_DSS_RESET BIT(23) #define SYSC_MODULE_QUIRK_RTC_UNLOCK BIT(22) #define SYSC_QUIRK_CLKDM_NOAUTO BIT(21) From efde2659b0fe835732047357b2902cca14f054d9 Mon Sep 17 00:00:00 2001 From: Stephen Boyd Date: Tue, 14 Jan 2020 17:37:51 -0800 Subject: [PATCH 48/70] drivers: qcom: rpmh-rsc: Use rcuidle tracepoints for rpmh This tracepoint is hit now that we call into the rpmh code from the cpu idle path. Let's move this to be an rcuidle tracepoint so that we avoid the RCU idle splat below ============================= WARNING: suspicious RCU usage 5.4.10 #68 Tainted: G S ----------------------------- drivers/soc/qcom/trace-rpmh.h:72 suspicious rcu_dereference_check() usage! other info that might help us debug this: RCU used illegally from idle CPU! rcu_scheduler_active = 2, debug_locks = 1 RCU used illegally from extended quiescent state! 5 locks held by swapper/2/0: #0: ffffff81745d6ee8 (&(&genpd->slock)->rlock){+.+.}, at: genpd_lock_spin+0x1c/0x2c #1: ffffff81745da6e8 (&(&genpd->slock)->rlock/1){....}, at: genpd_lock_nested_spin+0x24/0x34 #2: ffffff8174f2ca20 (&(&genpd->slock)->rlock/2){....}, at: genpd_lock_nested_spin+0x24/0x34 #3: ffffff8174f2c300 (&(&drv->client.cache_lock)->rlock){....}, at: rpmh_flush+0x48/0x24c #4: ffffff8174f2c150 (&(&tcs->lock)->rlock){+.+.}, at: rpmh_rsc_write_ctrl_data+0x74/0x270 stack backtrace: CPU: 2 PID: 0 Comm: swapper/2 Tainted: G S 5.4.10 #68 Call trace: dump_backtrace+0x0/0x174 show_stack+0x20/0x2c dump_stack+0xc8/0x124 lockdep_rcu_suspicious+0xe4/0x104 __tcs_buffer_write+0x230/0x2d0 rpmh_rsc_write_ctrl_data+0x210/0x270 rpmh_flush+0x84/0x24c rpmh_domain_power_off+0x78/0x98 _genpd_power_off+0x40/0xc0 genpd_power_off+0x168/0x208 genpd_power_off+0x1e0/0x208 genpd_power_off+0x1e0/0x208 genpd_runtime_suspend+0x1ac/0x220 __rpm_callback+0x70/0xfc rpm_callback+0x34/0x8c rpm_suspend+0x218/0x4a4 __pm_runtime_suspend+0x88/0xac psci_enter_domain_idle_state+0x3c/0xb4 cpuidle_enter_state+0xb8/0x284 cpuidle_enter+0x38/0x4c call_cpuidle+0x3c/0x68 do_idle+0x194/0x260 cpu_startup_entry+0x24/0x28 secondary_start_kernel+0x150/0x15c Acked-by: Ulf Hansson Reviewed-by: Douglas Anderson Tested-by: Sai Prakash Ranjan Fixes: a65a397f2451 ("cpuidle: psci: Add support for PM domains by using genpd") Reported-by: Sai Prakash Ranjan Cc: Ulf Hansson Signed-off-by: Stephen Boyd Link: https://lore.kernel.org/r/20200115013751.249588-1-swboyd@chromium.org Signed-off-by: Bjorn Andersson --- drivers/soc/qcom/rpmh-rsc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/soc/qcom/rpmh-rsc.c b/drivers/soc/qcom/rpmh-rsc.c index e278fc11fe5c..b71822131f59 100644 --- a/drivers/soc/qcom/rpmh-rsc.c +++ b/drivers/soc/qcom/rpmh-rsc.c @@ -277,7 +277,7 @@ static void __tcs_buffer_write(struct rsc_drv *drv, int tcs_id, int cmd_id, write_tcs_cmd(drv, RSC_DRV_CMD_MSGID, tcs_id, j, msgid); write_tcs_cmd(drv, RSC_DRV_CMD_ADDR, tcs_id, j, cmd->addr); write_tcs_cmd(drv, RSC_DRV_CMD_DATA, tcs_id, j, cmd->data); - trace_rpmh_send_msg(drv, tcs_id, j, msgid, cmd); + trace_rpmh_send_msg_rcuidle(drv, tcs_id, j, msgid, cmd); } write_tcs_reg(drv, RSC_DRV_CMD_WAIT_FOR_CMPL, tcs_id, cmd_complete); From cc41a5273d69391995cbf104cac326fac9780dd1 Mon Sep 17 00:00:00 2001 From: Stephen Boyd Date: Mon, 9 Mar 2020 11:51:23 -0700 Subject: [PATCH 49/70] soc: qcom: socinfo: Use seq_putc() if possible This is a single character that we're printing out. Use seq_putc() for that to simplify the code. Cc: Vaishali Thakkar Signed-off-by: Stephen Boyd Link: https://lore.kernel.org/r/20200309185123.65265-1-swboyd@chromium.org Signed-off-by: Bjorn Andersson --- drivers/soc/qcom/socinfo.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/soc/qcom/socinfo.c b/drivers/soc/qcom/socinfo.c index 7864b75ce569..ebb49aee179b 100644 --- a/drivers/soc/qcom/socinfo.c +++ b/drivers/soc/qcom/socinfo.c @@ -277,7 +277,7 @@ static int show_image_##type(struct seq_file *seq, void *p) \ { \ struct smem_image_version *image_version = seq->private; \ seq_puts(seq, image_version->type); \ - seq_puts(seq, "\n"); \ + seq_putc(seq, '\n'); \ return 0; \ } \ static int open_image_##type(struct inode *inode, struct file *file) \ From 705dcca91d0a75f5657f3d5465f7dbd31c3efa8b Mon Sep 17 00:00:00 2001 From: Joakim Zhang Date: Mon, 17 Feb 2020 11:19:15 +0800 Subject: [PATCH 50/70] firmware: imx: scu-pd: add power domain for I2C and INTMUX in CM40 SS Add power domain for I2C and INTMUX in CM40 SS. Signed-off-by: Joakim Zhang Signed-off-by: Shawn Guo --- drivers/firmware/imx/scu-pd.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/firmware/imx/scu-pd.c b/drivers/firmware/imx/scu-pd.c index 09cfa268c6bd..f3b4246ff200 100644 --- a/drivers/firmware/imx/scu-pd.c +++ b/drivers/firmware/imx/scu-pd.c @@ -165,6 +165,10 @@ static const struct imx_sc_pd_range imx8qxp_scu_pd_ranges[] = { /* DC SS */ { "dc0", IMX_SC_R_DC_0, 1, false, 0 }, { "dc0-pll", IMX_SC_R_DC_0_PLL_0, 2, true, 0 }, + + /* CM40 SS */ + { "cm40_i2c", IMX_SC_R_M4_0_I2C, 1, 0 }, + { "cm40_intmux", IMX_SC_R_M4_0_INTMUX, 1, 0 }, }; static const struct imx_sc_pd_soc imx8qxp_scu_pd = { From 2243af41115d0e36e6414df6dd2a0386e022d9f8 Mon Sep 17 00:00:00 2001 From: Dmitry Osipenko Date: Tue, 25 Feb 2020 02:58:34 +0300 Subject: [PATCH 51/70] memory: tegra: Correct debugfs clk rate-range on Tegra20 Correctly set clk rate-range if number of available timings is zero. This fixes noisy "invalid range [4294967295, 0]" error messages during boot. Fixes: 8209eefa3d37 ("memory: tegra: Implement EMC debugfs interface on Tegra20") Signed-off-by: Dmitry Osipenko Signed-off-by: Thierry Reding --- drivers/memory/tegra/tegra20-emc.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/memory/tegra/tegra20-emc.c b/drivers/memory/tegra/tegra20-emc.c index 8ae474d9bfb9..b16715e9515d 100644 --- a/drivers/memory/tegra/tegra20-emc.c +++ b/drivers/memory/tegra/tegra20-emc.c @@ -628,6 +628,11 @@ static void tegra_emc_debugfs_init(struct tegra_emc *emc) emc->debugfs.max_rate = emc->timings[i].rate; } + if (!emc->num_timings) { + emc->debugfs.min_rate = clk_get_rate(emc->clk); + emc->debugfs.max_rate = emc->debugfs.min_rate; + } + err = clk_set_rate_range(emc->clk, emc->debugfs.min_rate, emc->debugfs.max_rate); if (err < 0) { From a53670e1a734ba56fac84cf2b93b838bd4a6b835 Mon Sep 17 00:00:00 2001 From: Dmitry Osipenko Date: Tue, 25 Feb 2020 02:58:35 +0300 Subject: [PATCH 52/70] memory: tegra: Correct debugfs clk rate-range on Tegra30 Correctly set clk rate-range if number of available timings is zero. This fixes noisy "invalid range [4294967295, 0]" error messages during boot. Fixes: 8cee32b40040 ("memory: tegra: Implement EMC debugfs interface on Tegra30") Signed-off-by: Dmitry Osipenko Signed-off-by: Thierry Reding --- drivers/memory/tegra/tegra30-emc.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/memory/tegra/tegra30-emc.c b/drivers/memory/tegra/tegra30-emc.c index e3efd9529506..b42bdb667e85 100644 --- a/drivers/memory/tegra/tegra30-emc.c +++ b/drivers/memory/tegra/tegra30-emc.c @@ -1256,6 +1256,11 @@ static void tegra_emc_debugfs_init(struct tegra_emc *emc) emc->debugfs.max_rate = emc->timings[i].rate; } + if (!emc->num_timings) { + emc->debugfs.min_rate = clk_get_rate(emc->clk); + emc->debugfs.max_rate = emc->debugfs.min_rate; + } + err = clk_set_rate_range(emc->clk, emc->debugfs.min_rate, emc->debugfs.max_rate); if (err < 0) { From 141267bffd1dc19a76e4d50e3e4829f85a806875 Mon Sep 17 00:00:00 2001 From: Dmitry Osipenko Date: Tue, 25 Feb 2020 02:58:36 +0300 Subject: [PATCH 53/70] memory: tegra: Correct debugfs clk rate-range on Tegra124 Correctly set clk rate-range if number of available timings is zero. This fixes noisy "invalid range [4294967295, 0]" error messages during boot. Fixes: 6b9acd935546 ("memory: tegra: Refashion EMC debugfs interface on Tegra124") Signed-off-by: Dmitry Osipenko Signed-off-by: Thierry Reding --- drivers/memory/tegra/tegra124-emc.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/memory/tegra/tegra124-emc.c b/drivers/memory/tegra/tegra124-emc.c index 21f05240682b..33b8216bac30 100644 --- a/drivers/memory/tegra/tegra124-emc.c +++ b/drivers/memory/tegra/tegra124-emc.c @@ -1158,6 +1158,11 @@ static void emc_debugfs_init(struct device *dev, struct tegra_emc *emc) emc->debugfs.max_rate = emc->timings[i].rate; } + if (!emc->num_timings) { + emc->debugfs.min_rate = clk_get_rate(emc->clk); + emc->debugfs.max_rate = emc->debugfs.min_rate; + } + err = clk_set_rate_range(emc->clk, emc->debugfs.min_rate, emc->debugfs.max_rate); if (err < 0) { From 04fc94be4a81d0fa9eaf0917a308c02c2b031e82 Mon Sep 17 00:00:00 2001 From: Ansuel Smith Date: Wed, 11 Mar 2020 14:09:18 +0100 Subject: [PATCH 54/70] devicetree: bindings: firmware: add ipq806x to qcom_scm Add ipq806x to compatible list in qcom_scm Documentation Signed-off-by: Ansuel Smith Link: https://lore.kernel.org/r/20200311130918.753-2-ansuelsmth@gmail.com Signed-off-by: Bjorn Andersson --- Documentation/devicetree/bindings/firmware/qcom,scm.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/Documentation/devicetree/bindings/firmware/qcom,scm.txt b/Documentation/devicetree/bindings/firmware/qcom,scm.txt index 3f29ea04b5fe..354b448fc0c3 100644 --- a/Documentation/devicetree/bindings/firmware/qcom,scm.txt +++ b/Documentation/devicetree/bindings/firmware/qcom,scm.txt @@ -10,6 +10,7 @@ Required properties: * "qcom,scm-apq8064" * "qcom,scm-apq8084" * "qcom,scm-ipq4019" + * "qcom,scm-ipq806x" * "qcom,scm-msm8660" * "qcom,scm-msm8916" * "qcom,scm-msm8960" From fbe639b44a82755d639df1c5d147c93f02ac5a0f Mon Sep 17 00:00:00 2001 From: Sibi Sankar Date: Thu, 12 Mar 2020 17:38:40 +0530 Subject: [PATCH 55/70] soc: qcom: Introduce Protection Domain Restart helpers Qualcomm SoCs (starting with MSM8998) allow for multiple protection domains to run on the same Q6 sub-system. This allows for services like ATH10K WLAN FW to have their own separate address space and crash/recover without disrupting the modem and other PDs running on the same sub-system. The PDR helpers introduces an abstraction that allows for tracking/controlling the life cycle of protection domains running on various Q6 sub-systems. Signed-off-by: Sibi Sankar Link: https://lore.kernel.org/r/20200312120842.21991-2-sibis@codeaurora.org Signed-off-by: Bjorn Andersson --- drivers/soc/qcom/Kconfig | 4 + drivers/soc/qcom/Makefile | 1 + drivers/soc/qcom/pdr_interface.c | 757 +++++++++++++++++++++++++++++++ drivers/soc/qcom/pdr_internal.h | 379 ++++++++++++++++ include/linux/soc/qcom/pdr.h | 29 ++ include/linux/soc/qcom/qmi.h | 1 + 6 files changed, 1171 insertions(+) create mode 100644 drivers/soc/qcom/pdr_interface.c create mode 100644 drivers/soc/qcom/pdr_internal.h create mode 100644 include/linux/soc/qcom/pdr.h diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig index 80aa8b6c56e0..48501f0245b0 100644 --- a/drivers/soc/qcom/Kconfig +++ b/drivers/soc/qcom/Kconfig @@ -76,6 +76,10 @@ config QCOM_OCMEM requirements. This is typically used by the GPU, camera/video, and audio components on some Snapdragon SoCs. +config QCOM_PDR_HELPERS + tristate + select QCOM_QMI_HELPERS + config QCOM_PM bool "Qualcomm Power Management" depends on ARCH_QCOM && !ARM64 diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile index 9fb35c8a495e..5d6b83dc58e8 100644 --- a/drivers/soc/qcom/Makefile +++ b/drivers/soc/qcom/Makefile @@ -7,6 +7,7 @@ obj-$(CONFIG_QCOM_GLINK_SSR) += glink_ssr.o obj-$(CONFIG_QCOM_GSBI) += qcom_gsbi.o obj-$(CONFIG_QCOM_MDT_LOADER) += mdt_loader.o obj-$(CONFIG_QCOM_OCMEM) += ocmem.o +obj-$(CONFIG_QCOM_PDR_HELPERS) += pdr_interface.o obj-$(CONFIG_QCOM_PM) += spm.o obj-$(CONFIG_QCOM_QMI_HELPERS) += qmi_helpers.o qmi_helpers-y += qmi_encdec.o qmi_interface.o diff --git a/drivers/soc/qcom/pdr_interface.c b/drivers/soc/qcom/pdr_interface.c new file mode 100644 index 000000000000..7ee088b9cc7c --- /dev/null +++ b/drivers/soc/qcom/pdr_interface.c @@ -0,0 +1,757 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2020 The Linux Foundation. All rights reserved. + */ + +#include +#include +#include +#include + +#include "pdr_internal.h" + +struct pdr_service { + char service_name[SERVREG_NAME_LENGTH + 1]; + char service_path[SERVREG_NAME_LENGTH + 1]; + + struct sockaddr_qrtr addr; + + unsigned int instance; + unsigned int service; + u8 service_data_valid; + u32 service_data; + int state; + + bool need_notifier_register; + bool need_notifier_remove; + bool need_locator_lookup; + bool service_connected; + + struct list_head node; +}; + +struct pdr_handle { + struct qmi_handle locator_hdl; + struct qmi_handle notifier_hdl; + + struct sockaddr_qrtr locator_addr; + + struct list_head lookups; + struct list_head indack_list; + + /* control access to pdr lookup/indack lists */ + struct mutex list_lock; + + /* serialize pd status invocation */ + struct mutex status_lock; + + /* control access to the locator state */ + struct mutex lock; + + bool locator_init_complete; + + struct work_struct locator_work; + struct work_struct notifier_work; + struct work_struct indack_work; + + struct workqueue_struct *notifier_wq; + struct workqueue_struct *indack_wq; + + void (*status)(int state, char *service_path, void *priv); + void *priv; +}; + +struct pdr_list_node { + enum servreg_service_state curr_state; + u16 transaction_id; + struct pdr_service *pds; + struct list_head node; +}; + +static int pdr_locator_new_server(struct qmi_handle *qmi, + struct qmi_service *svc) +{ + struct pdr_handle *pdr = container_of(qmi, struct pdr_handle, + locator_hdl); + struct pdr_service *pds; + + /* Create a local client port for QMI communication */ + pdr->locator_addr.sq_family = AF_QIPCRTR; + pdr->locator_addr.sq_node = svc->node; + pdr->locator_addr.sq_port = svc->port; + + mutex_lock(&pdr->lock); + pdr->locator_init_complete = true; + mutex_unlock(&pdr->lock); + + /* Service pending lookup requests */ + mutex_lock(&pdr->list_lock); + list_for_each_entry(pds, &pdr->lookups, node) { + if (pds->need_locator_lookup) + schedule_work(&pdr->locator_work); + } + mutex_unlock(&pdr->list_lock); + + return 0; +} + +static void pdr_locator_del_server(struct qmi_handle *qmi, + struct qmi_service *svc) +{ + struct pdr_handle *pdr = container_of(qmi, struct pdr_handle, + locator_hdl); + + mutex_lock(&pdr->lock); + pdr->locator_init_complete = false; + mutex_unlock(&pdr->lock); + + pdr->locator_addr.sq_node = 0; + pdr->locator_addr.sq_port = 0; +} + +static struct qmi_ops pdr_locator_ops = { + .new_server = pdr_locator_new_server, + .del_server = pdr_locator_del_server, +}; + +static int pdr_register_listener(struct pdr_handle *pdr, + struct pdr_service *pds, + bool enable) +{ + struct servreg_register_listener_resp resp; + struct servreg_register_listener_req req; + struct qmi_txn txn; + int ret; + + ret = qmi_txn_init(&pdr->notifier_hdl, &txn, + servreg_register_listener_resp_ei, + &resp); + if (ret < 0) + return ret; + + req.enable = enable; + strcpy(req.service_path, pds->service_path); + + ret = qmi_send_request(&pdr->notifier_hdl, &pds->addr, + &txn, SERVREG_REGISTER_LISTENER_REQ, + SERVREG_REGISTER_LISTENER_REQ_LEN, + servreg_register_listener_req_ei, + &req); + if (ret < 0) { + qmi_txn_cancel(&txn); + return ret; + } + + ret = qmi_txn_wait(&txn, 5 * HZ); + if (ret < 0) { + pr_err("PDR: %s register listener txn wait failed: %d\n", + pds->service_path, ret); + return ret; + } + + if (resp.resp.result != QMI_RESULT_SUCCESS_V01) { + pr_err("PDR: %s register listener failed: 0x%x\n", + pds->service_path, resp.resp.error); + return ret; + } + + if ((int)resp.curr_state < INT_MIN || (int)resp.curr_state > INT_MAX) + pr_err("PDR: %s notification state invalid: 0x%x\n", + pds->service_path, resp.curr_state); + + pds->state = resp.curr_state; + + return 0; +} + +static void pdr_notifier_work(struct work_struct *work) +{ + struct pdr_handle *pdr = container_of(work, struct pdr_handle, + notifier_work); + struct pdr_service *pds; + int ret; + + mutex_lock(&pdr->list_lock); + list_for_each_entry(pds, &pdr->lookups, node) { + if (pds->service_connected) { + if (!pds->need_notifier_register) + continue; + + pds->need_notifier_register = false; + ret = pdr_register_listener(pdr, pds, true); + if (ret < 0) + pds->state = SERVREG_SERVICE_STATE_DOWN; + } else { + if (!pds->need_notifier_remove) + continue; + + pds->need_notifier_remove = false; + pds->state = SERVREG_SERVICE_STATE_DOWN; + } + + mutex_lock(&pdr->status_lock); + pdr->status(pds->state, pds->service_path, pdr->priv); + mutex_unlock(&pdr->status_lock); + } + mutex_unlock(&pdr->list_lock); +} + +static int pdr_notifier_new_server(struct qmi_handle *qmi, + struct qmi_service *svc) +{ + struct pdr_handle *pdr = container_of(qmi, struct pdr_handle, + notifier_hdl); + struct pdr_service *pds; + + mutex_lock(&pdr->list_lock); + list_for_each_entry(pds, &pdr->lookups, node) { + if (pds->service == svc->service && + pds->instance == svc->instance) { + pds->service_connected = true; + pds->need_notifier_register = true; + pds->addr.sq_family = AF_QIPCRTR; + pds->addr.sq_node = svc->node; + pds->addr.sq_port = svc->port; + queue_work(pdr->notifier_wq, &pdr->notifier_work); + } + } + mutex_unlock(&pdr->list_lock); + + return 0; +} + +static void pdr_notifier_del_server(struct qmi_handle *qmi, + struct qmi_service *svc) +{ + struct pdr_handle *pdr = container_of(qmi, struct pdr_handle, + notifier_hdl); + struct pdr_service *pds; + + mutex_lock(&pdr->list_lock); + list_for_each_entry(pds, &pdr->lookups, node) { + if (pds->service == svc->service && + pds->instance == svc->instance) { + pds->service_connected = false; + pds->need_notifier_remove = true; + pds->addr.sq_node = 0; + pds->addr.sq_port = 0; + queue_work(pdr->notifier_wq, &pdr->notifier_work); + } + } + mutex_unlock(&pdr->list_lock); +} + +static struct qmi_ops pdr_notifier_ops = { + .new_server = pdr_notifier_new_server, + .del_server = pdr_notifier_del_server, +}; + +static int pdr_send_indack_msg(struct pdr_handle *pdr, struct pdr_service *pds, + u16 tid) +{ + struct servreg_set_ack_resp resp; + struct servreg_set_ack_req req; + struct qmi_txn txn; + int ret; + + ret = qmi_txn_init(&pdr->notifier_hdl, &txn, servreg_set_ack_resp_ei, + &resp); + if (ret < 0) + return ret; + + req.transaction_id = tid; + strcpy(req.service_path, pds->service_path); + + ret = qmi_send_request(&pdr->notifier_hdl, &pds->addr, + &txn, SERVREG_SET_ACK_REQ, + SERVREG_SET_ACK_REQ_LEN, + servreg_set_ack_req_ei, + &req); + + /* Skip waiting for response */ + qmi_txn_cancel(&txn); + return ret; +} + +static void pdr_indack_work(struct work_struct *work) +{ + struct pdr_handle *pdr = container_of(work, struct pdr_handle, + indack_work); + struct pdr_list_node *ind, *tmp; + struct pdr_service *pds; + + list_for_each_entry_safe(ind, tmp, &pdr->indack_list, node) { + pds = ind->pds; + pdr_send_indack_msg(pdr, pds, ind->transaction_id); + + mutex_lock(&pdr->status_lock); + pds->state = ind->curr_state; + pdr->status(pds->state, pds->service_path, pdr->priv); + mutex_unlock(&pdr->status_lock); + + mutex_lock(&pdr->list_lock); + list_del(&ind->node); + mutex_unlock(&pdr->list_lock); + + kfree(ind); + } +} + +static void pdr_indication_cb(struct qmi_handle *qmi, + struct sockaddr_qrtr *sq, + struct qmi_txn *txn, const void *data) +{ + struct pdr_handle *pdr = container_of(qmi, struct pdr_handle, + notifier_hdl); + const struct servreg_state_updated_ind *ind_msg = data; + struct pdr_list_node *ind; + struct pdr_service *pds; + bool found; + + if (!ind_msg || !ind_msg->service_path[0] || + strlen(ind_msg->service_path) > SERVREG_NAME_LENGTH) + return; + + mutex_lock(&pdr->list_lock); + list_for_each_entry(pds, &pdr->lookups, node) { + if (strcmp(pds->service_path, ind_msg->service_path)) + continue; + + found = true; + break; + } + mutex_unlock(&pdr->list_lock); + + if (!found) + return; + + pr_info("PDR: Indication received from %s, state: 0x%x, trans-id: %d\n", + ind_msg->service_path, ind_msg->curr_state, + ind_msg->transaction_id); + + ind = kzalloc(sizeof(*ind), GFP_KERNEL); + if (!ind) + return; + + ind->transaction_id = ind_msg->transaction_id; + ind->curr_state = ind_msg->curr_state; + ind->pds = pds; + + mutex_lock(&pdr->list_lock); + list_add_tail(&ind->node, &pdr->indack_list); + mutex_unlock(&pdr->list_lock); + + queue_work(pdr->indack_wq, &pdr->indack_work); +} + +static struct qmi_msg_handler qmi_indication_handler[] = { + { + .type = QMI_INDICATION, + .msg_id = SERVREG_STATE_UPDATED_IND_ID, + .ei = servreg_state_updated_ind_ei, + .decoded_size = sizeof(struct servreg_state_updated_ind), + .fn = pdr_indication_cb, + }, + {} +}; + +static int pdr_get_domain_list(struct servreg_get_domain_list_req *req, + struct servreg_get_domain_list_resp *resp, + struct pdr_handle *pdr) +{ + struct qmi_txn txn; + int ret; + + ret = qmi_txn_init(&pdr->locator_hdl, &txn, + servreg_get_domain_list_resp_ei, resp); + if (ret < 0) + return ret; + + ret = qmi_send_request(&pdr->locator_hdl, + &pdr->locator_addr, + &txn, SERVREG_GET_DOMAIN_LIST_REQ, + SERVREG_GET_DOMAIN_LIST_REQ_MAX_LEN, + servreg_get_domain_list_req_ei, + req); + if (ret < 0) { + qmi_txn_cancel(&txn); + return ret; + } + + ret = qmi_txn_wait(&txn, 5 * HZ); + if (ret < 0) { + pr_err("PDR: %s get domain list txn wait failed: %d\n", + req->service_name, ret); + return ret; + } + + if (resp->resp.result != QMI_RESULT_SUCCESS_V01) { + pr_err("PDR: %s get domain list failed: 0x%x\n", + req->service_name, resp->resp.error); + return -EREMOTEIO; + } + + return 0; +} + +static int pdr_locate_service(struct pdr_handle *pdr, struct pdr_service *pds) +{ + struct servreg_get_domain_list_resp *resp; + struct servreg_get_domain_list_req req; + struct servreg_location_entry *entry; + int domains_read = 0; + int ret, i; + + resp = kzalloc(sizeof(*resp), GFP_KERNEL); + if (!resp) + return -ENOMEM; + + /* Prepare req message */ + strcpy(req.service_name, pds->service_name); + req.domain_offset_valid = true; + req.domain_offset = 0; + + do { + req.domain_offset = domains_read; + ret = pdr_get_domain_list(&req, resp, pdr); + if (ret < 0) + goto out; + + for (i = domains_read; i < resp->domain_list_len; i++) { + entry = &resp->domain_list[i]; + + if (strnlen(entry->name, sizeof(entry->name)) == sizeof(entry->name)) + continue; + + if (!strcmp(entry->name, pds->service_path)) { + pds->service_data_valid = entry->service_data_valid; + pds->service_data = entry->service_data; + pds->instance = entry->instance; + goto out; + } + } + + /* Update ret to indicate that the service is not yet found */ + ret = -ENXIO; + + /* Always read total_domains from the response msg */ + if (resp->domain_list_len > resp->total_domains) + resp->domain_list_len = resp->total_domains; + + domains_read += resp->domain_list_len; + } while (domains_read < resp->total_domains); +out: + kfree(resp); + return ret; +} + +static void pdr_notify_lookup_failure(struct pdr_handle *pdr, + struct pdr_service *pds, + int err) +{ + pr_err("PDR: service lookup for %s failed: %d\n", + pds->service_name, err); + + if (err == -ENXIO) + return; + + list_del(&pds->node); + pds->state = SERVREG_LOCATOR_ERR; + mutex_lock(&pdr->status_lock); + pdr->status(pds->state, pds->service_path, pdr->priv); + mutex_unlock(&pdr->status_lock); + kfree(pds); +} + +static void pdr_locator_work(struct work_struct *work) +{ + struct pdr_handle *pdr = container_of(work, struct pdr_handle, + locator_work); + struct pdr_service *pds, *tmp; + int ret = 0; + + /* Bail out early if the SERVREG LOCATOR QMI service is not up */ + mutex_lock(&pdr->lock); + if (!pdr->locator_init_complete) { + mutex_unlock(&pdr->lock); + pr_debug("PDR: SERVICE LOCATOR service not available\n"); + return; + } + mutex_unlock(&pdr->lock); + + mutex_lock(&pdr->list_lock); + list_for_each_entry_safe(pds, tmp, &pdr->lookups, node) { + if (!pds->need_locator_lookup) + continue; + + ret = pdr_locate_service(pdr, pds); + if (ret < 0) { + pdr_notify_lookup_failure(pdr, pds, ret); + continue; + } + + ret = qmi_add_lookup(&pdr->notifier_hdl, pds->service, 1, + pds->instance); + if (ret < 0) { + pdr_notify_lookup_failure(pdr, pds, ret); + continue; + } + + pds->need_locator_lookup = false; + } + mutex_unlock(&pdr->list_lock); +} + +/** + * pdr_add_lookup() - register a tracking request for a PD + * @pdr: PDR client handle + * @service_name: service name of the tracking request + * @service_path: service path of the tracking request + * + * Registering a pdr lookup allows for tracking the life cycle of the PD. + * + * Return: pdr_service object on success, ERR_PTR on failure. -EALREADY is + * returned if a lookup is already in progress for the given service path. + */ +struct pdr_service *pdr_add_lookup(struct pdr_handle *pdr, + const char *service_name, + const char *service_path) +{ + struct pdr_service *pds, *tmp; + int ret; + + if (IS_ERR_OR_NULL(pdr)) + return ERR_PTR(-EINVAL); + + if (!service_name || strlen(service_name) > SERVREG_NAME_LENGTH || + !service_path || strlen(service_path) > SERVREG_NAME_LENGTH) + return ERR_PTR(-EINVAL); + + pds = kzalloc(sizeof(*pds), GFP_KERNEL); + if (!pds) + return ERR_PTR(-ENOMEM); + + pds->service = SERVREG_NOTIFIER_SERVICE; + strcpy(pds->service_name, service_name); + strcpy(pds->service_path, service_path); + pds->need_locator_lookup = true; + + mutex_lock(&pdr->list_lock); + list_for_each_entry(tmp, &pdr->lookups, node) { + if (strcmp(tmp->service_path, service_path)) + continue; + + mutex_unlock(&pdr->list_lock); + ret = -EALREADY; + goto err; + } + + list_add(&pds->node, &pdr->lookups); + mutex_unlock(&pdr->list_lock); + + schedule_work(&pdr->locator_work); + + return pds; +err: + kfree(pds); + return ERR_PTR(ret); +} +EXPORT_SYMBOL(pdr_add_lookup); + +/** + * pdr_restart_pd() - restart PD + * @pdr: PDR client handle + * @pds: PD service handle + * + * Restarts the PD tracked by the PDR client handle for a given service path. + * + * Return: 0 on success, negative errno on failure. + */ +int pdr_restart_pd(struct pdr_handle *pdr, struct pdr_service *pds) +{ + struct servreg_restart_pd_resp resp; + struct servreg_restart_pd_req req; + struct sockaddr_qrtr addr; + struct pdr_service *tmp; + struct qmi_txn txn; + int ret; + + if (IS_ERR_OR_NULL(pdr) || IS_ERR_OR_NULL(pds)) + return -EINVAL; + + mutex_lock(&pdr->list_lock); + list_for_each_entry(tmp, &pdr->lookups, node) { + if (tmp != pds) + continue; + + if (!pds->service_connected) + break; + + /* Prepare req message */ + strcpy(req.service_path, pds->service_path); + addr = pds->addr; + break; + } + mutex_unlock(&pdr->list_lock); + + if (!req.service_path[0]) + return -EINVAL; + + ret = qmi_txn_init(&pdr->notifier_hdl, &txn, + servreg_restart_pd_resp_ei, + &resp); + if (ret < 0) + return ret; + + ret = qmi_send_request(&pdr->notifier_hdl, &addr, + &txn, SERVREG_RESTART_PD_REQ, + SERVREG_RESTART_PD_REQ_MAX_LEN, + servreg_restart_pd_req_ei, &req); + if (ret < 0) { + qmi_txn_cancel(&txn); + return ret; + } + + ret = qmi_txn_wait(&txn, 5 * HZ); + if (ret < 0) { + pr_err("PDR: %s PD restart txn wait failed: %d\n", + req.service_path, ret); + return ret; + } + + /* Check response if PDR is disabled */ + if (resp.resp.result == QMI_RESULT_FAILURE_V01 && + resp.resp.error == QMI_ERR_DISABLED_V01) { + pr_err("PDR: %s PD restart is disabled: 0x%x\n", + req.service_path, resp.resp.error); + return -EOPNOTSUPP; + } + + /* Check the response for other error case*/ + if (resp.resp.result != QMI_RESULT_SUCCESS_V01) { + pr_err("PDR: %s request for PD restart failed: 0x%x\n", + req.service_path, resp.resp.error); + return -EREMOTEIO; + } + + return 0; +} +EXPORT_SYMBOL(pdr_restart_pd); + +/** + * pdr_handle_alloc() - initialize the PDR client handle + * @status: function to be called on PD state change + * @priv: handle for client's use + * + * Initializes the PDR client handle to allow for tracking/restart of PDs. + * + * Return: pdr_handle object on success, ERR_PTR on failure. + */ +struct pdr_handle *pdr_handle_alloc(void (*status)(int state, + char *service_path, + void *priv), void *priv) +{ + struct pdr_handle *pdr; + int ret; + + if (!status) + return ERR_PTR(-EINVAL); + + pdr = kzalloc(sizeof(*pdr), GFP_KERNEL); + if (!pdr) + return ERR_PTR(-ENOMEM); + + pdr->status = status; + pdr->priv = priv; + + mutex_init(&pdr->status_lock); + mutex_init(&pdr->list_lock); + mutex_init(&pdr->lock); + + INIT_LIST_HEAD(&pdr->lookups); + INIT_LIST_HEAD(&pdr->indack_list); + + INIT_WORK(&pdr->locator_work, pdr_locator_work); + INIT_WORK(&pdr->notifier_work, pdr_notifier_work); + INIT_WORK(&pdr->indack_work, pdr_indack_work); + + pdr->notifier_wq = create_singlethread_workqueue("pdr_notifier_wq"); + if (!pdr->notifier_wq) { + ret = -ENOMEM; + goto free_pdr_handle; + } + + pdr->indack_wq = alloc_ordered_workqueue("pdr_indack_wq", WQ_HIGHPRI); + if (!pdr->indack_wq) { + ret = -ENOMEM; + goto destroy_notifier; + } + + ret = qmi_handle_init(&pdr->locator_hdl, + SERVREG_GET_DOMAIN_LIST_RESP_MAX_LEN, + &pdr_locator_ops, NULL); + if (ret < 0) + goto destroy_indack; + + ret = qmi_add_lookup(&pdr->locator_hdl, SERVREG_LOCATOR_SERVICE, 1, 1); + if (ret < 0) + goto release_qmi_handle; + + ret = qmi_handle_init(&pdr->notifier_hdl, + SERVREG_STATE_UPDATED_IND_MAX_LEN, + &pdr_notifier_ops, + qmi_indication_handler); + if (ret < 0) + goto release_qmi_handle; + + return pdr; + +release_qmi_handle: + qmi_handle_release(&pdr->locator_hdl); +destroy_indack: + destroy_workqueue(pdr->indack_wq); +destroy_notifier: + destroy_workqueue(pdr->notifier_wq); +free_pdr_handle: + kfree(pdr); + + return ERR_PTR(ret); +} +EXPORT_SYMBOL(pdr_handle_alloc); + +/** + * pdr_handle_release() - release the PDR client handle + * @pdr: PDR client handle + * + * Cleans up pending tracking requests and releases the underlying qmi handles. + */ +void pdr_handle_release(struct pdr_handle *pdr) +{ + struct pdr_service *pds, *tmp; + + if (IS_ERR_OR_NULL(pdr)) + return; + + mutex_lock(&pdr->list_lock); + list_for_each_entry_safe(pds, tmp, &pdr->lookups, node) { + list_del(&pds->node); + kfree(pds); + } + mutex_unlock(&pdr->list_lock); + + cancel_work_sync(&pdr->locator_work); + cancel_work_sync(&pdr->notifier_work); + cancel_work_sync(&pdr->indack_work); + + destroy_workqueue(pdr->notifier_wq); + destroy_workqueue(pdr->indack_wq); + + qmi_handle_release(&pdr->locator_hdl); + qmi_handle_release(&pdr->notifier_hdl); + + kfree(pdr); +} +EXPORT_SYMBOL(pdr_handle_release); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Qualcomm Protection Domain Restart helpers"); diff --git a/drivers/soc/qcom/pdr_internal.h b/drivers/soc/qcom/pdr_internal.h new file mode 100644 index 000000000000..15b5002e4127 --- /dev/null +++ b/drivers/soc/qcom/pdr_internal.h @@ -0,0 +1,379 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __QCOM_PDR_HELPER_INTERNAL__ +#define __QCOM_PDR_HELPER_INTERNAL__ + +#include + +#define SERVREG_LOCATOR_SERVICE 0x40 +#define SERVREG_NOTIFIER_SERVICE 0x42 + +#define SERVREG_REGISTER_LISTENER_REQ 0x20 +#define SERVREG_GET_DOMAIN_LIST_REQ 0x21 +#define SERVREG_STATE_UPDATED_IND_ID 0x22 +#define SERVREG_SET_ACK_REQ 0x23 +#define SERVREG_RESTART_PD_REQ 0x24 + +#define SERVREG_DOMAIN_LIST_LENGTH 32 +#define SERVREG_RESTART_PD_REQ_MAX_LEN 67 +#define SERVREG_REGISTER_LISTENER_REQ_LEN 71 +#define SERVREG_SET_ACK_REQ_LEN 72 +#define SERVREG_GET_DOMAIN_LIST_REQ_MAX_LEN 74 +#define SERVREG_STATE_UPDATED_IND_MAX_LEN 79 +#define SERVREG_GET_DOMAIN_LIST_RESP_MAX_LEN 2389 + +struct servreg_location_entry { + char name[SERVREG_NAME_LENGTH + 1]; + u8 service_data_valid; + u32 service_data; + u32 instance; +}; + +struct qmi_elem_info servreg_location_entry_ei[] = { + { + .data_type = QMI_STRING, + .elem_len = SERVREG_NAME_LENGTH + 1, + .elem_size = sizeof(char), + .array_type = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct servreg_location_entry, + name), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct servreg_location_entry, + instance), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct servreg_location_entry, + service_data_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct servreg_location_entry, + service_data), + }, + {} +}; + +struct servreg_get_domain_list_req { + char service_name[SERVREG_NAME_LENGTH + 1]; + u8 domain_offset_valid; + u32 domain_offset; +}; + +struct qmi_elem_info servreg_get_domain_list_req_ei[] = { + { + .data_type = QMI_STRING, + .elem_len = SERVREG_NAME_LENGTH + 1, + .elem_size = sizeof(char), + .array_type = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof(struct servreg_get_domain_list_req, + service_name), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct servreg_get_domain_list_req, + domain_offset_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct servreg_get_domain_list_req, + domain_offset), + }, + {} +}; + +struct servreg_get_domain_list_resp { + struct qmi_response_type_v01 resp; + u8 total_domains_valid; + u16 total_domains; + u8 db_rev_count_valid; + u16 db_rev_count; + u8 domain_list_valid; + u32 domain_list_len; + struct servreg_location_entry domain_list[SERVREG_DOMAIN_LIST_LENGTH]; +}; + +struct qmi_elem_info servreg_get_domain_list_resp_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct servreg_get_domain_list_resp, + resp), + .ei_array = qmi_response_type_v01_ei, + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct servreg_get_domain_list_resp, + total_domains_valid), + }, + { + .data_type = QMI_UNSIGNED_2_BYTE, + .elem_len = 1, + .elem_size = sizeof(u16), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct servreg_get_domain_list_resp, + total_domains), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof(struct servreg_get_domain_list_resp, + db_rev_count_valid), + }, + { + .data_type = QMI_UNSIGNED_2_BYTE, + .elem_len = 1, + .elem_size = sizeof(u16), + .array_type = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof(struct servreg_get_domain_list_resp, + db_rev_count), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof(struct servreg_get_domain_list_resp, + domain_list_valid), + }, + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof(struct servreg_get_domain_list_resp, + domain_list_len), + }, + { + .data_type = QMI_STRUCT, + .elem_len = SERVREG_DOMAIN_LIST_LENGTH, + .elem_size = sizeof(struct servreg_location_entry), + .array_type = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof(struct servreg_get_domain_list_resp, + domain_list), + .ei_array = servreg_location_entry_ei, + }, + {} +}; + +struct servreg_register_listener_req { + u8 enable; + char service_path[SERVREG_NAME_LENGTH + 1]; +}; + +struct qmi_elem_info servreg_register_listener_req_ei[] = { + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof(struct servreg_register_listener_req, + enable), + }, + { + .data_type = QMI_STRING, + .elem_len = SERVREG_NAME_LENGTH + 1, + .elem_size = sizeof(char), + .array_type = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct servreg_register_listener_req, + service_path), + }, + {} +}; + +struct servreg_register_listener_resp { + struct qmi_response_type_v01 resp; + u8 curr_state_valid; + enum servreg_service_state curr_state; +}; + +struct qmi_elem_info servreg_register_listener_resp_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct servreg_register_listener_resp, + resp), + .ei_array = qmi_response_type_v01_ei, + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct servreg_register_listener_resp, + curr_state_valid), + }, + { + .data_type = QMI_SIGNED_4_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof(enum servreg_service_state), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct servreg_register_listener_resp, + curr_state), + }, + {} +}; + +struct servreg_restart_pd_req { + char service_path[SERVREG_NAME_LENGTH + 1]; +}; + +struct qmi_elem_info servreg_restart_pd_req_ei[] = { + { + .data_type = QMI_STRING, + .elem_len = SERVREG_NAME_LENGTH + 1, + .elem_size = sizeof(char), + .array_type = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof(struct servreg_restart_pd_req, + service_path), + }, + {} +}; + +struct servreg_restart_pd_resp { + struct qmi_response_type_v01 resp; +}; + +struct qmi_elem_info servreg_restart_pd_resp_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct servreg_restart_pd_resp, + resp), + .ei_array = qmi_response_type_v01_ei, + }, + {} +}; + +struct servreg_state_updated_ind { + enum servreg_service_state curr_state; + char service_path[SERVREG_NAME_LENGTH + 1]; + u16 transaction_id; +}; + +struct qmi_elem_info servreg_state_updated_ind_ei[] = { + { + .data_type = QMI_SIGNED_4_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof(struct servreg_state_updated_ind, + curr_state), + }, + { + .data_type = QMI_STRING, + .elem_len = SERVREG_NAME_LENGTH + 1, + .elem_size = sizeof(char), + .array_type = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct servreg_state_updated_ind, + service_path), + }, + { + .data_type = QMI_UNSIGNED_2_BYTE, + .elem_len = 1, + .elem_size = sizeof(u16), + .array_type = NO_ARRAY, + .tlv_type = 0x03, + .offset = offsetof(struct servreg_state_updated_ind, + transaction_id), + }, + {} +}; + +struct servreg_set_ack_req { + char service_path[SERVREG_NAME_LENGTH + 1]; + u16 transaction_id; +}; + +struct qmi_elem_info servreg_set_ack_req_ei[] = { + { + .data_type = QMI_STRING, + .elem_len = SERVREG_NAME_LENGTH + 1, + .elem_size = sizeof(char), + .array_type = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof(struct servreg_set_ack_req, + service_path), + }, + { + .data_type = QMI_UNSIGNED_2_BYTE, + .elem_len = 1, + .elem_size = sizeof(u16), + .array_type = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct servreg_set_ack_req, + transaction_id), + }, + {} +}; + +struct servreg_set_ack_resp { + struct qmi_response_type_v01 resp; +}; + +struct qmi_elem_info servreg_set_ack_resp_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct servreg_set_ack_resp, + resp), + .ei_array = qmi_response_type_v01_ei, + }, + {} +}; + +#endif diff --git a/include/linux/soc/qcom/pdr.h b/include/linux/soc/qcom/pdr.h new file mode 100644 index 000000000000..83a8ea612e69 --- /dev/null +++ b/include/linux/soc/qcom/pdr.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __QCOM_PDR_HELPER__ +#define __QCOM_PDR_HELPER__ + +#include + +#define SERVREG_NAME_LENGTH 64 + +struct pdr_service; +struct pdr_handle; + +enum servreg_service_state { + SERVREG_LOCATOR_ERR = 0x1, + SERVREG_SERVICE_STATE_DOWN = 0x0FFFFFFF, + SERVREG_SERVICE_STATE_UP = 0x1FFFFFFF, + SERVREG_SERVICE_STATE_EARLY_DOWN = 0x2FFFFFFF, + SERVREG_SERVICE_STATE_UNINIT = 0x7FFFFFFF, +}; + +struct pdr_handle *pdr_handle_alloc(void (*status)(int state, + char *service_path, + void *priv), void *priv); +struct pdr_service *pdr_add_lookup(struct pdr_handle *pdr, + const char *service_name, + const char *service_path); +int pdr_restart_pd(struct pdr_handle *pdr, struct pdr_service *pds); +void pdr_handle_release(struct pdr_handle *pdr); + +#endif diff --git a/include/linux/soc/qcom/qmi.h b/include/linux/soc/qcom/qmi.h index 5efa2b67fa55..e712f94b89fc 100644 --- a/include/linux/soc/qcom/qmi.h +++ b/include/linux/soc/qcom/qmi.h @@ -88,6 +88,7 @@ struct qmi_elem_info { #define QMI_ERR_CLIENT_IDS_EXHAUSTED_V01 5 #define QMI_ERR_INVALID_ID_V01 41 #define QMI_ERR_ENCODING_V01 58 +#define QMI_ERR_DISABLED_V01 69 #define QMI_ERR_INCOMPATIBLE_STATE_V01 90 #define QMI_ERR_NOT_SUPPORTED_V01 94 From a03a5b6313c210565605a5dbf5d3e463c1fd5d30 Mon Sep 17 00:00:00 2001 From: Sibi Sankar Date: Thu, 12 Mar 2020 17:38:41 +0530 Subject: [PATCH 56/70] dt-bindings: soc: qcom: apr: Add protection domain bindings Qualcomm SoCs (starting with MSM8998) allow for multiple protection domains (PDs) to run on the same Q6 sub-system. This allows for services like AVS AUDIO to have their own separate address space and crash/recover without disrupting the other PDs running on the same Q6 ADSP. Add "qcom,protection-domain" bindings to capture the dependencies between the APR service and the PD on which the apr service runs. Reviewed-by: Bjorn Andersson Reviewed-by: Rob Herring Signed-off-by: Sibi Sankar Link: https://lore.kernel.org/r/20200312120842.21991-3-sibis@codeaurora.org Signed-off-by: Bjorn Andersson --- .../devicetree/bindings/soc/qcom/qcom,apr.txt | 50 +++++++++++++++++++ 1 file changed, 50 insertions(+) diff --git a/Documentation/devicetree/bindings/soc/qcom/qcom,apr.txt b/Documentation/devicetree/bindings/soc/qcom/qcom,apr.txt index db501269f47b..f8fa71f5d84b 100644 --- a/Documentation/devicetree/bindings/soc/qcom/qcom,apr.txt +++ b/Documentation/devicetree/bindings/soc/qcom/qcom,apr.txt @@ -45,6 +45,18 @@ by the individual bindings for the specific service 12 - Ultrasound stream manager. 13 - Listen stream manager. +- qcom,protection-domain + Usage: optional + Value type: + Definition: Must list the protection domain service name and path + that the particular apr service has a dependency on. + Possible values are : + "avs/audio", "msm/adsp/audio_pd". + "kernel/elf_loader", "msm/modem/wlan_pd". + "tms/servreg", "msm/adsp/audio_pd". + "tms/servreg", "msm/modem/wlan_pd". + "tms/servreg", "msm/slpi/sensor_pd". + = EXAMPLE The following example represents a QDSP based sound card on a MSM8996 device which uses apr as communication between Apps and QDSP. @@ -82,3 +94,41 @@ which uses apr as communication between Apps and QDSP. ... }; }; + += EXAMPLE 2 +The following example represents a QDSP based sound card with protection domain +dependencies specified. Here some of the apr services are dependent on services +running on protection domain hosted on ADSP/SLPI remote processors while others +have no such dependency. + + apr { + compatible = "qcom,apr-v2"; + qcom,glink-channels = "apr_audio_svc"; + qcom,apr-domain = ; + + q6core { + compatible = "qcom,q6core"; + reg = ; + }; + + q6afe: q6afe { + compatible = "qcom,q6afe"; + reg = ; + qcom,protection-domain = "avs/audio", "msm/adsp/audio_pd"; + ... + }; + + q6asm: q6asm { + compatible = "qcom,q6asm"; + reg = ; + qcom,protection-domain = "tms/servreg", "msm/slpi/sensor_pd"; + ... + }; + + q6adm: q6adm { + compatible = "qcom,q6adm"; + reg = ; + qcom,protection-domain = "avs/audio", "msm/adsp/audio_pd"; + ... + }; + }; From 83473566260288c560e5443ea4cc40a458aa9e6a Mon Sep 17 00:00:00 2001 From: Sibi Sankar Date: Thu, 12 Mar 2020 17:38:42 +0530 Subject: [PATCH 57/70] soc: qcom: apr: Add avs/audio tracking functionality Use PDR helper functions to track the protection domains that the apr services are dependent upon on SDM845 SoC, specifically the "avs/audio" service running on ADSP Q6. Reviewed-by: Bjorn Andersson Signed-off-by: Sibi Sankar Link: https://lore.kernel.org/r/20200312120842.21991-4-sibis@codeaurora.org Signed-off-by: Bjorn Andersson --- drivers/soc/qcom/Kconfig | 1 + drivers/soc/qcom/apr.c | 125 ++++++++++++++++++++++++++++++++--- include/linux/soc/qcom/apr.h | 1 + 3 files changed, 117 insertions(+), 10 deletions(-) diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig index 48501f0245b0..9ac6b0072e8c 100644 --- a/drivers/soc/qcom/Kconfig +++ b/drivers/soc/qcom/Kconfig @@ -200,6 +200,7 @@ config QCOM_APR tristate "Qualcomm APR Bus (Asynchronous Packet Router)" depends on ARCH_QCOM || COMPILE_TEST depends on RPMSG + select QCOM_PDR_HELPERS help Enable APR IPC protocol support between application processor and QDSP6. APR is diff --git a/drivers/soc/qcom/apr.c b/drivers/soc/qcom/apr.c index 4fcc32420c47..1f35b097c635 100644 --- a/drivers/soc/qcom/apr.c +++ b/drivers/soc/qcom/apr.c @@ -11,6 +11,7 @@ #include #include #include +#include #include #include @@ -21,6 +22,7 @@ struct apr { spinlock_t rx_lock; struct idr svcs_idr; int dest_domain_id; + struct pdr_handle *pdr; struct workqueue_struct *rxwq; struct work_struct rx_work; struct list_head rx_list; @@ -289,6 +291,9 @@ static int apr_add_device(struct device *dev, struct device_node *np, id->svc_id + 1, GFP_ATOMIC); spin_unlock(&apr->svcs_lock); + of_property_read_string_index(np, "qcom,protection-domain", + 1, &adev->service_path); + dev_info(dev, "Adding APR dev: %s\n", dev_name(&adev->dev)); ret = device_register(&adev->dev); @@ -300,14 +305,75 @@ static int apr_add_device(struct device *dev, struct device_node *np, return ret; } -static void of_register_apr_devices(struct device *dev) +static int of_apr_add_pd_lookups(struct device *dev) +{ + const char *service_name, *service_path; + struct apr *apr = dev_get_drvdata(dev); + struct device_node *node; + struct pdr_service *pds; + int ret; + + for_each_child_of_node(dev->of_node, node) { + ret = of_property_read_string_index(node, "qcom,protection-domain", + 0, &service_name); + if (ret < 0) + continue; + + ret = of_property_read_string_index(node, "qcom,protection-domain", + 1, &service_path); + if (ret < 0) { + dev_err(dev, "pdr service path missing: %d\n", ret); + return ret; + } + + pds = pdr_add_lookup(apr->pdr, service_name, service_path); + if (IS_ERR(pds) && PTR_ERR(pds) != -EALREADY) { + dev_err(dev, "pdr add lookup failed: %d\n", ret); + return PTR_ERR(pds); + } + } + + return 0; +} + +static void of_register_apr_devices(struct device *dev, const char *svc_path) { struct apr *apr = dev_get_drvdata(dev); struct device_node *node; + const char *service_path; + int ret; for_each_child_of_node(dev->of_node, node) { struct apr_device_id id = { {0} }; + /* + * This function is called with svc_path NULL during + * apr_probe(), in which case we register any apr devices + * without a qcom,protection-domain specified. + * + * Then as the protection domains becomes available + * (if applicable) this function is again called, but with + * svc_path representing the service becoming available. In + * this case we register any apr devices with a matching + * qcom,protection-domain. + */ + + ret = of_property_read_string_index(node, "qcom,protection-domain", + 1, &service_path); + if (svc_path) { + /* skip APR services that are PD independent */ + if (ret) + continue; + + /* skip APR services whose PD paths don't match */ + if (strcmp(service_path, svc_path)) + continue; + } else { + /* skip APR services whose PD lookups are registered */ + if (ret == 0) + continue; + } + if (of_property_read_u32(node, "reg", &id.svc_id)) continue; @@ -318,6 +384,34 @@ static void of_register_apr_devices(struct device *dev) } } +static int apr_remove_device(struct device *dev, void *svc_path) +{ + struct apr_device *adev = to_apr_device(dev); + + if (svc_path && adev->service_path) { + if (!strcmp(adev->service_path, (char *)svc_path)) + device_unregister(&adev->dev); + } else { + device_unregister(&adev->dev); + } + + return 0; +} + +static void apr_pd_status(int state, char *svc_path, void *priv) +{ + struct apr *apr = (struct apr *)priv; + + switch (state) { + case SERVREG_SERVICE_STATE_UP: + of_register_apr_devices(apr->dev, svc_path); + break; + case SERVREG_SERVICE_STATE_DOWN: + device_for_each_child(apr->dev, svc_path, apr_remove_device); + break; + } +} + static int apr_probe(struct rpmsg_device *rpdev) { struct device *dev = &rpdev->dev; @@ -343,28 +437,39 @@ static int apr_probe(struct rpmsg_device *rpdev) return -ENOMEM; } INIT_WORK(&apr->rx_work, apr_rxwq); + + apr->pdr = pdr_handle_alloc(apr_pd_status, apr); + if (IS_ERR(apr->pdr)) { + dev_err(dev, "Failed to init PDR handle\n"); + ret = PTR_ERR(apr->pdr); + goto destroy_wq; + } + INIT_LIST_HEAD(&apr->rx_list); spin_lock_init(&apr->rx_lock); spin_lock_init(&apr->svcs_lock); idr_init(&apr->svcs_idr); - of_register_apr_devices(dev); + + ret = of_apr_add_pd_lookups(dev); + if (ret) + goto handle_release; + + of_register_apr_devices(dev, NULL); return 0; -} -static int apr_remove_device(struct device *dev, void *null) -{ - struct apr_device *adev = to_apr_device(dev); - - device_unregister(&adev->dev); - - return 0; +handle_release: + pdr_handle_release(apr->pdr); +destroy_wq: + destroy_workqueue(apr->rxwq); + return ret; } static void apr_remove(struct rpmsg_device *rpdev) { struct apr *apr = dev_get_drvdata(&rpdev->dev); + pdr_handle_release(apr->pdr); device_for_each_child(&rpdev->dev, NULL, apr_remove_device); flush_workqueue(apr->rxwq); destroy_workqueue(apr->rxwq); diff --git a/include/linux/soc/qcom/apr.h b/include/linux/soc/qcom/apr.h index c5d52e2cb275..7f0bc3cf4d61 100644 --- a/include/linux/soc/qcom/apr.h +++ b/include/linux/soc/qcom/apr.h @@ -85,6 +85,7 @@ struct apr_device { uint16_t domain_id; uint32_t version; char name[APR_NAME_SIZE]; + const char *service_path; spinlock_t lock; struct list_head node; }; From fc40200ebf82fae3e40c4e88246496644edafe66 Mon Sep 17 00:00:00 2001 From: Peng Fan Date: Fri, 21 Feb 2020 10:32:19 +0800 Subject: [PATCH 58/70] soc: imx: increase build coverage for imx8m soc driver The soc-imx8.c driver is actually for i.MX8M family, so rename it to soc-imx8m.c. Use CONFIG_SOC_IMX8M as build gate, not CONFIG_ARCH_MXC, to control whether build this driver, also make it possible for compile test. Default set it to y for ARCH_MXC && ARM64 Signed-off-by: Peng Fan Reviewed-by: Leonard Crestez Signed-off-by: Shawn Guo --- drivers/soc/Makefile | 2 +- drivers/soc/imx/Kconfig | 9 +++++++++ drivers/soc/imx/Makefile | 2 +- drivers/soc/imx/{soc-imx8.c => soc-imx8m.c} | 0 4 files changed, 11 insertions(+), 2 deletions(-) rename drivers/soc/imx/{soc-imx8.c => soc-imx8m.c} (100%) diff --git a/drivers/soc/Makefile b/drivers/soc/Makefile index 8b49d782a1ab..a39f17cea376 100644 --- a/drivers/soc/Makefile +++ b/drivers/soc/Makefile @@ -11,7 +11,7 @@ obj-$(CONFIG_ARCH_DOVE) += dove/ obj-$(CONFIG_MACH_DOVE) += dove/ obj-y += fsl/ obj-$(CONFIG_ARCH_GEMINI) += gemini/ -obj-$(CONFIG_ARCH_MXC) += imx/ +obj-y += imx/ obj-$(CONFIG_ARCH_IXP4XX) += ixp4xx/ obj-$(CONFIG_SOC_XWAY) += lantiq/ obj-y += mediatek/ diff --git a/drivers/soc/imx/Kconfig b/drivers/soc/imx/Kconfig index 0281ef9a1800..70019cefa617 100644 --- a/drivers/soc/imx/Kconfig +++ b/drivers/soc/imx/Kconfig @@ -17,4 +17,13 @@ config IMX_SCU_SOC Controller Unit SoC info module, it will provide the SoC info like SoC family, ID and revision etc. +config SOC_IMX8M + bool "i.MX8M SoC family support" + depends on ARCH_MXC || COMPILE_TEST + default ARCH_MXC && ARM64 + help + If you say yes here you get support for the NXP i.MX8M family + support, it will provide the SoC info like SoC family, + ID and revision etc. + endmenu diff --git a/drivers/soc/imx/Makefile b/drivers/soc/imx/Makefile index cf9ca42ff739..103e2c93c342 100644 --- a/drivers/soc/imx/Makefile +++ b/drivers/soc/imx/Makefile @@ -1,5 +1,5 @@ # SPDX-License-Identifier: GPL-2.0-only obj-$(CONFIG_HAVE_IMX_GPC) += gpc.o obj-$(CONFIG_IMX_GPCV2_PM_DOMAINS) += gpcv2.o -obj-$(CONFIG_ARCH_MXC) += soc-imx8.o +obj-$(CONFIG_SOC_IMX8M) += soc-imx8m.o obj-$(CONFIG_IMX_SCU_SOC) += soc-imx-scu.o diff --git a/drivers/soc/imx/soc-imx8.c b/drivers/soc/imx/soc-imx8m.c similarity index 100% rename from drivers/soc/imx/soc-imx8.c rename to drivers/soc/imx/soc-imx8m.c From e0ea2d11f8a08ba7066ff897e16c5217215d1e68 Mon Sep 17 00:00:00 2001 From: Lucas Stach Date: Fri, 13 Mar 2020 11:09:12 +0100 Subject: [PATCH 59/70] soc: imx: gpc: fix power up sequencing Currently we wait only until the PGC inverts the isolation setting before disabling the peripheral clocks. This doesn't ensure that the reset is properly propagated through the peripheral devices in the power domain. Wait until the PGC signals that the power up request is done and wait a bit for resets to propagate before disabling the clocks. Signed-off-by: Lucas Stach Signed-off-by: Shawn Guo --- drivers/soc/imx/gpc.c | 24 +++++++++++++----------- 1 file changed, 13 insertions(+), 11 deletions(-) diff --git a/drivers/soc/imx/gpc.c b/drivers/soc/imx/gpc.c index 98b9d9a902ae..90a8b2c0676f 100644 --- a/drivers/soc/imx/gpc.c +++ b/drivers/soc/imx/gpc.c @@ -87,8 +87,8 @@ static int imx6_pm_domain_power_off(struct generic_pm_domain *genpd) static int imx6_pm_domain_power_on(struct generic_pm_domain *genpd) { struct imx_pm_domain *pd = to_imx_pm_domain(genpd); - int i, ret, sw, sw2iso; - u32 val; + int i, ret; + u32 val, req; if (pd->supply) { ret = regulator_enable(pd->supply); @@ -107,17 +107,18 @@ static int imx6_pm_domain_power_on(struct generic_pm_domain *genpd) regmap_update_bits(pd->regmap, pd->reg_offs + GPC_PGC_CTRL_OFFS, 0x1, 0x1); - /* Read ISO and ISO2SW power up delays */ - regmap_read(pd->regmap, pd->reg_offs + GPC_PGC_PUPSCR_OFFS, &val); - sw = val & 0x3f; - sw2iso = (val >> 8) & 0x3f; - /* Request GPC to power up domain */ - val = BIT(pd->cntr_pdn_bit + 1); - regmap_update_bits(pd->regmap, GPC_CNTR, val, val); + req = BIT(pd->cntr_pdn_bit + 1); + regmap_update_bits(pd->regmap, GPC_CNTR, req, req); - /* Wait ISO + ISO2SW IPG clock cycles */ - udelay(DIV_ROUND_UP(sw + sw2iso, pd->ipg_rate_mhz)); + /* Wait for the PGC to handle the request */ + ret = regmap_read_poll_timeout(pd->regmap, GPC_CNTR, val, !(val & req), + 1, 50); + if (ret) + pr_err("powerup request on domain %s timed out\n", genpd->name); + + /* Wait for reset to propagate through peripherals */ + usleep_range(5, 10); /* Disable reset clocks for all devices in the domain */ for (i = 0; i < pd->num_clks; i++) @@ -343,6 +344,7 @@ static const struct regmap_config imx_gpc_regmap_config = { .rd_table = &access_table, .wr_table = &access_table, .max_register = 0x2ac, + .fast_io = true, }; static struct generic_pm_domain *imx_gpc_onecell_domains[] = { From 5b00b83754167ce060495aca3945a725b9190a15 Mon Sep 17 00:00:00 2001 From: Anson Huang Date: Sat, 7 Mar 2020 09:25:54 +0800 Subject: [PATCH 60/70] firmware: imx: add COMPILE_TEST for IMX_SCU driver Add COMPILE_TEST support to IMX_SCU driver for better compile testing coverage. Any driver depending on IMX_SCU shouldn't have COMPILE_TEST though. Signed-off-by: Anson Huang Signed-off-by: Shawn Guo --- drivers/firmware/imx/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/firmware/imx/Kconfig b/drivers/firmware/imx/Kconfig index 1d2e5b85d7ca..58e8c6c115a0 100644 --- a/drivers/firmware/imx/Kconfig +++ b/drivers/firmware/imx/Kconfig @@ -12,7 +12,7 @@ config IMX_DSP config IMX_SCU bool "IMX SCU Protocol driver" - depends on IMX_MBOX + depends on IMX_MBOX || COMPILE_TEST help The System Controller Firmware (SCFW) is a low-level system function which runs on a dedicated Cortex-M core to provide power, clock, and From 2a52651292ad9e642d205ce2ece2686d3b3395a7 Mon Sep 17 00:00:00 2001 From: Peng Fan Date: Mon, 16 Mar 2020 11:09:02 +0800 Subject: [PATCH 61/70] soc: imx: drop COMPILE_TEST for IMX_SCU_SOC With COMPILE_TEST, there will be build error, because IMX_SCU might be set to n, so drop COMPILE_TEST. Suggested-by: Shawn Guo Signed-off-by: Peng Fan Signed-off-by: Shawn Guo --- drivers/soc/imx/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/soc/imx/Kconfig b/drivers/soc/imx/Kconfig index 70019cefa617..67aa94b2481b 100644 --- a/drivers/soc/imx/Kconfig +++ b/drivers/soc/imx/Kconfig @@ -10,7 +10,7 @@ config IMX_GPCV2_PM_DOMAINS config IMX_SCU_SOC bool "i.MX System Controller Unit SoC info support" - depends on IMX_SCU || COMPILE_TEST + depends on IMX_SCU select SOC_BUS help If you say yes here you get support for the NXP i.MX System From e69b3bede1b2f754a7d04383cbbc5a8b7327af79 Mon Sep 17 00:00:00 2001 From: Nathan Chancellor Date: Mon, 16 Mar 2020 13:48:55 -0700 Subject: [PATCH 62/70] soc: qcom: pdr: Avoid uninitialized use of found in pdr_indication_cb Clang warns: ../drivers/soc/qcom/pdr_interface.c:316:2: warning: variable 'found' is used uninitialized whenever 'for' loop exits because its condition is false [-Wsometimes-uninitialized] list_for_each_entry(pds, &pdr->lookups, node) { ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ../include/linux/list.h:624:7: note: expanded from macro 'list_for_each_entry' &pos->member != (head); ^~~~~~~~~~~~~~~~~~~~~~ ../drivers/soc/qcom/pdr_interface.c:325:7: note: uninitialized use occurs here if (!found) ^~~~~ ../drivers/soc/qcom/pdr_interface.c:316:2: note: remove the condition if it is always true list_for_each_entry(pds, &pdr->lookups, node) { ^ ../include/linux/list.h:624:7: note: expanded from macro 'list_for_each_entry' &pos->member != (head); ^ ../drivers/soc/qcom/pdr_interface.c:309:12: note: initialize the variable 'found' to silence this warning bool found; ^ = 0 1 warning generated. Initialize found to false to fix this warning. Reviewed-by: Nick Desaulniers Fixes: fbe639b44a82 ("soc: qcom: Introduce Protection Domain Restart helpers") Link: https://github.com/ClangBuiltLinux/linux/issues/933 Signed-off-by: Nathan Chancellor Link: https://lore.kernel.org/r/20200316204855.15611-1-natechancellor@gmail.com Signed-off-by: Bjorn Andersson --- drivers/soc/qcom/pdr_interface.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/soc/qcom/pdr_interface.c b/drivers/soc/qcom/pdr_interface.c index 7ee088b9cc7c..17ad3b8698e1 100644 --- a/drivers/soc/qcom/pdr_interface.c +++ b/drivers/soc/qcom/pdr_interface.c @@ -306,7 +306,7 @@ static void pdr_indication_cb(struct qmi_handle *qmi, const struct servreg_state_updated_ind *ind_msg = data; struct pdr_list_node *ind; struct pdr_service *pds; - bool found; + bool found = false; if (!ind_msg || !ind_msg->service_path[0] || strlen(ind_msg->service_path) > SERVREG_NAME_LENGTH) From de722e410661b9c9173a82af4329d337d341f777 Mon Sep 17 00:00:00 2001 From: Bjorn Andersson Date: Mon, 16 Mar 2020 15:03:53 -0700 Subject: [PATCH 63/70] soc: qcom: Fix QCOM_APR dependencies QCOM_APR selects QCOM_PDR_HELPERS, which in turn selects QCOM_QMI_HELPERS, which depends on NET. So ensure that APR's dependencies are met by making it depend on NET as well. Fixes: 834735662602 ("soc: qcom: apr: Add avs/audio tracking functionality") Reported-by: Randy Dunlap Signed-off-by: Bjorn Andersson --- drivers/soc/qcom/Kconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig index 9ac6b0072e8c..bf42a17a45de 100644 --- a/drivers/soc/qcom/Kconfig +++ b/drivers/soc/qcom/Kconfig @@ -200,6 +200,7 @@ config QCOM_APR tristate "Qualcomm APR Bus (Asynchronous Packet Router)" depends on ARCH_QCOM || COMPILE_TEST depends on RPMSG + depends on NET select QCOM_PDR_HELPERS help Enable APR IPC protocol support between From 288014358e072130a7b0f10a8a8d445eae43359c Mon Sep 17 00:00:00 2001 From: Li Yang Date: Thu, 12 Mar 2020 15:28:49 -0500 Subject: [PATCH 64/70] soc: fsl: qe: fix sparse warnings for qe.c Fixes the following sparse warnings: drivers/soc/fsl/qe/qe.c:426:9: warning: cast to restricted __be32 drivers/soc/fsl/qe/qe.c:528:41: warning: incorrect type in assignment (different base types) drivers/soc/fsl/qe/qe.c:528:41: expected unsigned long long static [addressable] [toplevel] [usertype] extended_modes drivers/soc/fsl/qe/qe.c:528:41: got restricted __be64 const [usertype] extended_modes Signed-off-by: Li Yang --- drivers/soc/fsl/qe/qe.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/soc/fsl/qe/qe.c b/drivers/soc/fsl/qe/qe.c index 96c2057d8d8e..447146861c2c 100644 --- a/drivers/soc/fsl/qe/qe.c +++ b/drivers/soc/fsl/qe/qe.c @@ -423,7 +423,7 @@ static void qe_upload_microcode(const void *base, qe_iowrite32be(be32_to_cpu(code[i]), &qe_immr->iram.idata); /* Set I-RAM Ready Register */ - qe_iowrite32be(be32_to_cpu(QE_IRAM_READY), &qe_immr->iram.iready); + qe_iowrite32be(QE_IRAM_READY, &qe_immr->iram.iready); } /* @@ -525,7 +525,7 @@ int qe_upload_firmware(const struct qe_firmware *firmware) */ memset(&qe_firmware_info, 0, sizeof(qe_firmware_info)); strlcpy(qe_firmware_info.id, firmware->id, sizeof(qe_firmware_info.id)); - qe_firmware_info.extended_modes = firmware->extended_modes; + qe_firmware_info.extended_modes = be64_to_cpu(firmware->extended_modes); memcpy(qe_firmware_info.vtraps, firmware->vtraps, sizeof(firmware->vtraps)); From 41ed69bf6cb32ba02be03449b18fa7c698cea3e4 Mon Sep 17 00:00:00 2001 From: Li Yang Date: Thu, 12 Mar 2020 15:41:43 -0500 Subject: [PATCH 65/70] soc: fsl: qe: fix sparse warning for qe_common.c Fixes the following sparse warning: drivers/soc/fsl/qe/qe_common.c:75:48: warning: incorrect type in argument 2 (different base types) drivers/soc/fsl/qe/qe_common.c:75:48: expected restricted __be32 const [usertype] *addr drivers/soc/fsl/qe/qe_common.c:75:48: got unsigned int * Signed-off-by: Li Yang Reviewed-by: Rasmus Villemoes --- drivers/soc/fsl/qe/qe_common.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/soc/fsl/qe/qe_common.c b/drivers/soc/fsl/qe/qe_common.c index a81a1a79f1ca..75075591f630 100644 --- a/drivers/soc/fsl/qe/qe_common.c +++ b/drivers/soc/fsl/qe/qe_common.c @@ -46,7 +46,7 @@ int cpm_muram_init(void) { struct device_node *np; struct resource r; - u32 zero[OF_MAX_ADDR_CELLS] = {}; + __be32 zero[OF_MAX_ADDR_CELLS] = {}; resource_size_t max = 0; int i = 0; int ret = 0; From de66e4d87d98be7cab11d1a52a80cfb68ff20202 Mon Sep 17 00:00:00 2001 From: Li Yang Date: Thu, 12 Mar 2020 15:45:04 -0500 Subject: [PATCH 66/70] soc: fsl: qe: fix sparse warnings for ucc.c Fixes the following sparse warnings: drivers/soc/fsl/qe/ucc.c:637:20: warning: incorrect type in assignment (different address spaces) drivers/soc/fsl/qe/ucc.c:637:20: expected struct qe_mux *qe_mux_reg drivers/soc/fsl/qe/ucc.c:637:20: got struct qe_mux [noderef] * drivers/soc/fsl/qe/ucc.c:652:9: warning: incorrect type in argument 1 (different address spaces) drivers/soc/fsl/qe/ucc.c:652:9: expected void const volatile [noderef] *addr drivers/soc/fsl/qe/ucc.c:652:9: got restricted __be32 * drivers/soc/fsl/qe/ucc.c:652:9: warning: incorrect type in argument 2 (different address spaces) drivers/soc/fsl/qe/ucc.c:652:9: expected void volatile [noderef] *addr drivers/soc/fsl/qe/ucc.c:652:9: got restricted __be32 * Signed-off-by: Li Yang Reviewed-by: Rasmus Villemoes --- drivers/soc/fsl/qe/ucc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/soc/fsl/qe/ucc.c b/drivers/soc/fsl/qe/ucc.c index 90157acc5ba6..d6c93970df4d 100644 --- a/drivers/soc/fsl/qe/ucc.c +++ b/drivers/soc/fsl/qe/ucc.c @@ -632,7 +632,7 @@ int ucc_set_tdm_rxtx_sync(u32 tdm_num, enum qe_clock clock, { int source; u32 shift; - struct qe_mux *qe_mux_reg; + struct qe_mux __iomem *qe_mux_reg; qe_mux_reg = &qe_immr->qmx; From 45e044c7dc7534c390ea07ac326fa0b6a12bebfa Mon Sep 17 00:00:00 2001 From: Li Yang Date: Thu, 12 Mar 2020 16:03:39 -0500 Subject: [PATCH 67/70] soc: fsl: qe: fix sparse warnings for qe_ic.c Fixes the following sparse warnings: drivers/soc/fsl/qe/qe_ic.c:253:32: warning: incorrect type in argument 1 (different base types) drivers/soc/fsl/qe/qe_ic.c:253:32: expected restricted __be32 [noderef] [usertype] *base drivers/soc/fsl/qe/qe_ic.c:253:32: got unsigned int [noderef] [usertype] *regs drivers/soc/fsl/qe/qe_ic.c:254:26: warning: incorrect type in argument 1 (different base types) drivers/soc/fsl/qe/qe_ic.c:254:26: expected restricted __be32 [noderef] [usertype] *base drivers/soc/fsl/qe/qe_ic.c:254:26: got unsigned int [noderef] [usertype] *regs drivers/soc/fsl/qe/qe_ic.c:269:32: warning: incorrect type in argument 1 (different base types) drivers/soc/fsl/qe/qe_ic.c:269:32: expected restricted __be32 [noderef] [usertype] *base drivers/soc/fsl/qe/qe_ic.c:269:32: got unsigned int [noderef] [usertype] *regs drivers/soc/fsl/qe/qe_ic.c:270:26: warning: incorrect type in argument 1 (different base types) drivers/soc/fsl/qe/qe_ic.c:270:26: expected restricted __be32 [noderef] [usertype] *base drivers/soc/fsl/qe/qe_ic.c:270:26: got unsigned int [noderef] [usertype] *regs drivers/soc/fsl/qe/qe_ic.c:341:31: warning: incorrect type in argument 1 (different base types) drivers/soc/fsl/qe/qe_ic.c:341:31: expected restricted __be32 [noderef] [usertype] *base drivers/soc/fsl/qe/qe_ic.c:341:31: got unsigned int [noderef] [usertype] *regs drivers/soc/fsl/qe/qe_ic.c:357:31: warning: incorrect type in argument 1 (different base types) drivers/soc/fsl/qe/qe_ic.c:357:31: expected restricted __be32 [noderef] [usertype] *base drivers/soc/fsl/qe/qe_ic.c:357:31: got unsigned int [noderef] [usertype] *regs drivers/soc/fsl/qe/qe_ic.c:450:26: warning: incorrect type in argument 1 (different base types) drivers/soc/fsl/qe/qe_ic.c:450:26: expected restricted __be32 [noderef] [usertype] *base drivers/soc/fsl/qe/qe_ic.c:450:26: got unsigned int [noderef] [usertype] *regs Signed-off-by: Li Yang Reviewed-by: Rasmus Villemoes --- drivers/soc/fsl/qe/qe_ic.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/soc/fsl/qe/qe_ic.c b/drivers/soc/fsl/qe/qe_ic.c index 0dd5bdb04a14..0390af999900 100644 --- a/drivers/soc/fsl/qe/qe_ic.c +++ b/drivers/soc/fsl/qe/qe_ic.c @@ -44,7 +44,7 @@ struct qe_ic { /* Control registers offset */ - u32 __iomem *regs; + __be32 __iomem *regs; /* The remapper for this QEIC */ struct irq_domain *irqhost; From b1be4a228086d558ba0efc3f12a82ab6fbca63f3 Mon Sep 17 00:00:00 2001 From: Li Yang Date: Thu, 12 Mar 2020 16:06:37 -0500 Subject: [PATCH 68/70] soc: fsl: qe: fix sparse warnings for ucc_fast.c Fixes the following sparse warnings: drivers/soc/fsl/qe/ucc_fast.c:218:22: warning: incorrect type in assignment (different base types) drivers/soc/fsl/qe/ucc_fast.c:218:22: expected unsigned int [noderef] [usertype] *p_ucce drivers/soc/fsl/qe/ucc_fast.c:218:22: got restricted __be32 [noderef] * drivers/soc/fsl/qe/ucc_fast.c:219:22: warning: incorrect type in assignment (different base types) drivers/soc/fsl/qe/ucc_fast.c:219:22: expected unsigned int [noderef] [usertype] *p_uccm drivers/soc/fsl/qe/ucc_fast.c:219:22: got restricted __be32 [noderef] * Signed-off-by: Li Yang Reviewed-by: Rasmus Villemoes --- include/soc/fsl/qe/ucc_fast.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/include/soc/fsl/qe/ucc_fast.h b/include/soc/fsl/qe/ucc_fast.h index ba0e838f962a..dc4e79468094 100644 --- a/include/soc/fsl/qe/ucc_fast.h +++ b/include/soc/fsl/qe/ucc_fast.h @@ -178,10 +178,10 @@ struct ucc_fast_info { struct ucc_fast_private { struct ucc_fast_info *uf_info; struct ucc_fast __iomem *uf_regs; /* a pointer to the UCC regs. */ - u32 __iomem *p_ucce; /* a pointer to the event register in memory. */ - u32 __iomem *p_uccm; /* a pointer to the mask register in memory. */ + __be32 __iomem *p_ucce; /* a pointer to the event register in memory. */ + __be32 __iomem *p_uccm; /* a pointer to the mask register in memory. */ #ifdef CONFIG_UGETH_TX_ON_DEMAND - u16 __iomem *p_utodr; /* pointer to the transmit on demand register */ + __be16 __iomem *p_utodr;/* pointer to the transmit on demand register */ #endif int enabled_tx; /* Whether channel is enabled for Tx (ENT) */ int enabled_rx; /* Whether channel is enabled for Rx (ENR) */ From ae1f68652c8da79c31d8e491a76eba9b4683fbd7 Mon Sep 17 00:00:00 2001 From: Li Yang Date: Tue, 24 Mar 2020 19:04:18 -0500 Subject: [PATCH 69/70] soc: fsl: qe: ucc_slow: remove 0 assignment for kzalloc'ed structure Not necessary to set to 0 for the kzalloc'ed area so remove these assignements. Signed-off-by: Li Yang --- drivers/soc/fsl/qe/ucc_slow.c | 7 ------- 1 file changed, 7 deletions(-) diff --git a/drivers/soc/fsl/qe/ucc_slow.c b/drivers/soc/fsl/qe/ucc_slow.c index 274d34449846..20fb9d3e1a35 100644 --- a/drivers/soc/fsl/qe/ucc_slow.c +++ b/drivers/soc/fsl/qe/ucc_slow.c @@ -168,16 +168,9 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc return -ENOMEM; } - uccs->saved_uccm = 0; - uccs->p_rx_frame = 0; us_regs = uccs->us_regs; uccs->p_ucce = (u16 *) & (us_regs->ucce); uccs->p_uccm = (u16 *) & (us_regs->uccm); -#ifdef STATISTICS - uccs->rx_frames = 0; - uccs->tx_frames = 0; - uccs->rx_discarded = 0; -#endif /* STATISTICS */ /* Get PRAM base */ uccs->us_pram_offset = From 461c3ac0dc46ba7fc09628aadf63c81253c4c3de Mon Sep 17 00:00:00 2001 From: Li Yang Date: Thu, 12 Mar 2020 16:08:55 -0500 Subject: [PATCH 70/70] soc: fsl: qe: fix sparse warnings for ucc_slow.c Fixes the following sparse warnings, some of these endian issues are real issues that need to be fixed. drivers/soc/fsl/qe/ucc_slow.c:78:17: warning: incorrect type in assignment (different address spaces) drivers/soc/fsl/qe/ucc_slow.c:78:17: expected struct ucc_slow *us_regs drivers/soc/fsl/qe/ucc_slow.c:78:17: got struct ucc_slow [noderef] *us_regs drivers/soc/fsl/qe/ucc_slow.c:81:18: warning: incorrect type in argument 1 (different address spaces) drivers/soc/fsl/qe/ucc_slow.c:81:18: expected void const volatile [noderef] *addr drivers/soc/fsl/qe/ucc_slow.c:81:18: got restricted __be32 * drivers/soc/fsl/qe/ucc_slow.c:90:9: warning: incorrect type in argument 2 (different address spaces) drivers/soc/fsl/qe/ucc_slow.c:90:9: expected void volatile [noderef] *addr drivers/soc/fsl/qe/ucc_slow.c:90:9: got restricted __be32 * drivers/soc/fsl/qe/ucc_slow.c:99:17: warning: incorrect type in assignment (different address spaces) drivers/soc/fsl/qe/ucc_slow.c:99:17: expected struct ucc_slow *us_regs drivers/soc/fsl/qe/ucc_slow.c:99:17: got struct ucc_slow [noderef] *us_regs drivers/soc/fsl/qe/ucc_slow.c:102:18: warning: incorrect type in argument 1 (different address spaces) drivers/soc/fsl/qe/ucc_slow.c:102:18: expected void const volatile [noderef] *addr drivers/soc/fsl/qe/ucc_slow.c:102:18: got restricted __be32 * drivers/soc/fsl/qe/ucc_slow.c:111:9: warning: incorrect type in argument 2 (different address spaces) drivers/soc/fsl/qe/ucc_slow.c:111:9: expected void volatile [noderef] *addr drivers/soc/fsl/qe/ucc_slow.c:111:9: got restricted __be32 * drivers/soc/fsl/qe/ucc_slow.c:172:28: warning: Using plain integer as NULL pointer drivers/soc/fsl/qe/ucc_slow.c:174:25: warning: cast removes address space '' of expression drivers/soc/fsl/qe/ucc_slow.c:175:25: warning: cast removes address space '' of expression drivers/soc/fsl/qe/ucc_slow.c:194:23: warning: incorrect type in assignment (different address spaces) drivers/soc/fsl/qe/ucc_slow.c:194:23: expected struct ucc_slow_pram *us_pram drivers/soc/fsl/qe/ucc_slow.c:194:23: got void [noderef] * drivers/soc/fsl/qe/ucc_slow.c:204:9: warning: incorrect type in argument 2 (different address spaces) drivers/soc/fsl/qe/ucc_slow.c:204:9: expected void volatile [noderef] *addr drivers/soc/fsl/qe/ucc_slow.c:204:9: got restricted __be16 * drivers/soc/fsl/qe/ucc_slow.c:229:41: warning: incorrect type in assignment (different address spaces) drivers/soc/fsl/qe/ucc_slow.c:229:41: expected struct qe_bd *tx_bd drivers/soc/fsl/qe/ucc_slow.c:229:41: got void [noderef] * drivers/soc/fsl/qe/ucc_slow.c:232:17: warning: incorrect type in argument 2 (different address spaces) drivers/soc/fsl/qe/ucc_slow.c:232:17: expected void volatile [noderef] *addr drivers/soc/fsl/qe/ucc_slow.c:232:17: got restricted __be32 * drivers/soc/fsl/qe/ucc_slow.c:234:17: warning: incorrect type in argument 2 (different address spaces) drivers/soc/fsl/qe/ucc_slow.c:234:17: expected void volatile [noderef] *addr drivers/soc/fsl/qe/ucc_slow.c:234:17: got unsigned int [usertype] * drivers/soc/fsl/qe/ucc_slow.c:238:9: warning: incorrect type in argument 2 (different address spaces) drivers/soc/fsl/qe/ucc_slow.c:238:9: expected void volatile [noderef] *addr drivers/soc/fsl/qe/ucc_slow.c:238:9: got restricted __be32 * drivers/soc/fsl/qe/ucc_slow.c:239:9: warning: cast from restricted __be32 drivers/soc/fsl/qe/ucc_slow.c:239:9: warning: incorrect type in argument 1 (different base types) drivers/soc/fsl/qe/ucc_slow.c:239:9: expected unsigned int [usertype] val drivers/soc/fsl/qe/ucc_slow.c:239:9: got restricted __be32 [usertype] drivers/soc/fsl/qe/ucc_slow.c:239:9: warning: cast from restricted __be32 drivers/soc/fsl/qe/ucc_slow.c:239:9: warning: incorrect type in argument 2 (different address spaces) drivers/soc/fsl/qe/ucc_slow.c:239:9: expected void volatile [noderef] *addr drivers/soc/fsl/qe/ucc_slow.c:239:9: got unsigned int [usertype] * drivers/soc/fsl/qe/ucc_slow.c:242:26: warning: incorrect type in assignment (different address spaces) drivers/soc/fsl/qe/ucc_slow.c:242:26: expected struct qe_bd *rx_bd drivers/soc/fsl/qe/ucc_slow.c:242:26: got void [noderef] * drivers/soc/fsl/qe/ucc_slow.c:245:17: warning: incorrect type in argument 2 (different address spaces) drivers/soc/fsl/qe/ucc_slow.c:245:17: expected void volatile [noderef] *addr drivers/soc/fsl/qe/ucc_slow.c:245:17: got unsigned int [usertype] * drivers/soc/fsl/qe/ucc_slow.c:247:17: warning: incorrect type in argument 2 (different address spaces) drivers/soc/fsl/qe/ucc_slow.c:247:17: expected void volatile [noderef] *addr drivers/soc/fsl/qe/ucc_slow.c:247:17: got restricted __be32 * drivers/soc/fsl/qe/ucc_slow.c:251:9: warning: cast from restricted __be32 drivers/soc/fsl/qe/ucc_slow.c:251:9: warning: incorrect type in argument 1 (different base types) drivers/soc/fsl/qe/ucc_slow.c:251:9: expected unsigned int [usertype] val drivers/soc/fsl/qe/ucc_slow.c:251:9: got restricted __be32 [usertype] drivers/soc/fsl/qe/ucc_slow.c:251:9: warning: cast from restricted __be32 drivers/soc/fsl/qe/ucc_slow.c:251:9: warning: incorrect type in argument 2 (different address spaces) drivers/soc/fsl/qe/ucc_slow.c:251:9: expected void volatile [noderef] *addr drivers/soc/fsl/qe/ucc_slow.c:251:9: got unsigned int [usertype] * drivers/soc/fsl/qe/ucc_slow.c:252:9: warning: incorrect type in argument 2 (different address spaces) drivers/soc/fsl/qe/ucc_slow.c:252:9: expected void volatile [noderef] *addr drivers/soc/fsl/qe/ucc_slow.c:252:9: got restricted __be32 * drivers/soc/fsl/qe/ucc_slow.c:276:39: warning: mixing different enum types: drivers/soc/fsl/qe/ucc_slow.c:276:39: unsigned int enum ucc_slow_tx_oversampling_rate drivers/soc/fsl/qe/ucc_slow.c:276:39: unsigned int enum ucc_slow_rx_oversampling_rate drivers/soc/fsl/qe/ucc_slow.c:296:9: warning: incorrect type in argument 2 (different address spaces) drivers/soc/fsl/qe/ucc_slow.c:296:9: expected void volatile [noderef] *addr drivers/soc/fsl/qe/ucc_slow.c:296:9: got restricted __be16 * drivers/soc/fsl/qe/ucc_slow.c:297:9: warning: incorrect type in argument 2 (different address spaces) drivers/soc/fsl/qe/ucc_slow.c:297:9: expected void volatile [noderef] *addr drivers/soc/fsl/qe/ucc_slow.c:297:9: got restricted __be16 * Signed-off-by: Li Yang --- drivers/soc/fsl/qe/ucc_slow.c | 26 +++++++++++++------------- include/soc/fsl/qe/ucc_slow.h | 13 ++++++------- 2 files changed, 19 insertions(+), 20 deletions(-) diff --git a/drivers/soc/fsl/qe/ucc_slow.c b/drivers/soc/fsl/qe/ucc_slow.c index 20fb9d3e1a35..7e11be41ab62 100644 --- a/drivers/soc/fsl/qe/ucc_slow.c +++ b/drivers/soc/fsl/qe/ucc_slow.c @@ -72,7 +72,7 @@ EXPORT_SYMBOL(ucc_slow_restart_tx); void ucc_slow_enable(struct ucc_slow_private * uccs, enum comm_dir mode) { - struct ucc_slow *us_regs; + struct ucc_slow __iomem *us_regs; u32 gumr_l; us_regs = uccs->us_regs; @@ -93,7 +93,7 @@ EXPORT_SYMBOL(ucc_slow_enable); void ucc_slow_disable(struct ucc_slow_private * uccs, enum comm_dir mode) { - struct ucc_slow *us_regs; + struct ucc_slow __iomem *us_regs; u32 gumr_l; us_regs = uccs->us_regs; @@ -122,7 +122,7 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc u32 i; struct ucc_slow __iomem *us_regs; u32 gumr; - struct qe_bd *bd; + struct qe_bd __iomem *bd; u32 id; u32 command; int ret = 0; @@ -169,8 +169,8 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc } us_regs = uccs->us_regs; - uccs->p_ucce = (u16 *) & (us_regs->ucce); - uccs->p_uccm = (u16 *) & (us_regs->uccm); + uccs->p_ucce = &us_regs->ucce; + uccs->p_uccm = &us_regs->uccm; /* Get PRAM base */ uccs->us_pram_offset = @@ -224,24 +224,24 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc /* clear bd buffer */ qe_iowrite32be(0, &bd->buf); /* set bd status and length */ - qe_iowrite32be(0, (u32 *)bd); + qe_iowrite32be(0, (u32 __iomem *)bd); bd++; } /* for last BD set Wrap bit */ qe_iowrite32be(0, &bd->buf); - qe_iowrite32be(cpu_to_be32(T_W), (u32 *)bd); + qe_iowrite32be(T_W, (u32 __iomem *)bd); /* Init Rx bds */ bd = uccs->rx_bd = qe_muram_addr(uccs->rx_base_offset); for (i = 0; i < us_info->rx_bd_ring_len - 1; i++) { /* set bd status and length */ - qe_iowrite32be(0, (u32 *)bd); + qe_iowrite32be(0, (u32 __iomem *)bd); /* clear bd buffer */ qe_iowrite32be(0, &bd->buf); bd++; } /* for last BD set Wrap bit */ - qe_iowrite32be(cpu_to_be32(R_W), (u32 *)bd); + qe_iowrite32be(R_W, (u32 __iomem *)bd); qe_iowrite32be(0, &bd->buf); /* Set GUMR (For more details see the hardware spec.). */ @@ -266,8 +266,8 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc qe_iowrite32be(gumr, &us_regs->gumr_h); /* gumr_l */ - gumr = us_info->tdcr | us_info->rdcr | us_info->tenc | us_info->renc | - us_info->diag | us_info->mode; + gumr = (u32)us_info->tdcr | (u32)us_info->rdcr | (u32)us_info->tenc | + (u32)us_info->renc | (u32)us_info->diag | (u32)us_info->mode; if (us_info->tci) gumr |= UCC_SLOW_GUMR_L_TCI; if (us_info->rinv) @@ -282,8 +282,8 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc /* if the data is in cachable memory, the 'global' */ /* in the function code should be set. */ - uccs->us_pram->tbmr = UCC_BMR_BO_BE; - uccs->us_pram->rbmr = UCC_BMR_BO_BE; + qe_iowrite8(UCC_BMR_BO_BE, &uccs->us_pram->tbmr); + qe_iowrite8(UCC_BMR_BO_BE, &uccs->us_pram->rbmr); /* rbase, tbase are offsets from MURAM base */ qe_iowrite16be(uccs->rx_base_offset, &uccs->us_pram->rbase); diff --git a/include/soc/fsl/qe/ucc_slow.h b/include/soc/fsl/qe/ucc_slow.h index d187a6be83bc..11a216e4e919 100644 --- a/include/soc/fsl/qe/ucc_slow.h +++ b/include/soc/fsl/qe/ucc_slow.h @@ -184,7 +184,7 @@ struct ucc_slow_info { struct ucc_slow_private { struct ucc_slow_info *us_info; struct ucc_slow __iomem *us_regs; /* Ptr to memory map of UCC regs */ - struct ucc_slow_pram *us_pram; /* a pointer to the parameter RAM */ + struct ucc_slow_pram __iomem *us_pram; /* a pointer to the parameter RAM */ s32 us_pram_offset; int enabled_tx; /* Whether channel is enabled for Tx (ENT) */ int enabled_rx; /* Whether channel is enabled for Rx (ENR) */ @@ -196,13 +196,12 @@ struct ucc_slow_private { and length for first BD in a frame */ s32 tx_base_offset; /* first BD in Tx BD table offset (In MURAM) */ s32 rx_base_offset; /* first BD in Rx BD table offset (In MURAM) */ - struct qe_bd *confBd; /* next BD for confirm after Tx */ - struct qe_bd *tx_bd; /* next BD for new Tx request */ - struct qe_bd *rx_bd; /* next BD to collect after Rx */ + struct qe_bd __iomem *confBd; /* next BD for confirm after Tx */ + struct qe_bd __iomem *tx_bd; /* next BD for new Tx request */ + struct qe_bd __iomem *rx_bd; /* next BD to collect after Rx */ void *p_rx_frame; /* accumulating receive frame */ - u16 *p_ucce; /* a pointer to the event register in memory. - */ - u16 *p_uccm; /* a pointer to the mask register in memory */ + __be16 __iomem *p_ucce; /* a pointer to the event register in memory */ + __be16 __iomem *p_uccm; /* a pointer to the mask register in memory */ u16 saved_uccm; /* a saved mask for the RX Interrupt bits */ #ifdef STATISTICS u32 tx_frames; /* Transmitted frames counters */