From dfa25e9f0f9a41bc7dae42e1f57e7bbab10d8cc0 Mon Sep 17 00:00:00 2001 From: Cristian Marussi Date: Thu, 16 Sep 2021 11:33:34 +0100 Subject: [PATCH 01/28] firmware: arm_scmi: Review some virtio log messages Be more verbose avoiding to use _once flavour of dev_info/_err/_notice. Remove usage of __func_ to identify which vqueue is referred in some error messages and explicitly name the TX/RX vqueue. Link: https://lore.kernel.org/r/20210916103336.7243-1-cristian.marussi@arm.com Cc: "Michael S. Tsirkin" Cc: Sudeep Holla Signed-off-by: Cristian Marussi Signed-off-by: Sudeep Holla --- drivers/firmware/arm_scmi/virtio.c | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/drivers/firmware/arm_scmi/virtio.c b/drivers/firmware/arm_scmi/virtio.c index 87039c5c03fd..c30f82cc59ac 100644 --- a/drivers/firmware/arm_scmi/virtio.c +++ b/drivers/firmware/arm_scmi/virtio.c @@ -95,7 +95,7 @@ static int scmi_vio_feed_vq_rx(struct scmi_vio_channel *vioch, rc = virtqueue_add_inbuf(vioch->vqueue, &sg_in, 1, msg, GFP_ATOMIC); if (rc) - dev_err_once(dev, "failed to add to virtqueue (%d)\n", rc); + dev_err(dev, "failed to add to RX virtqueue (%d)\n", rc); else virtqueue_kick(vioch->vqueue); @@ -193,8 +193,8 @@ static unsigned int virtio_get_max_msg(struct scmi_chan_info *base_cinfo) static int virtio_link_supplier(struct device *dev) { if (!scmi_vdev) { - dev_notice_once(dev, - "Deferring probe after not finding a bound scmi-virtio device\n"); + dev_notice(dev, + "Deferring probe after not finding a bound scmi-virtio device\n"); return -EPROBE_DEFER; } @@ -334,9 +334,8 @@ static int virtio_send_message(struct scmi_chan_info *cinfo, rc = virtqueue_add_sgs(vioch->vqueue, sgs, 1, 1, msg, GFP_ATOMIC); if (rc) { list_add(&msg->list, &vioch->free_list); - dev_err_once(vioch->cinfo->dev, - "%s() failed to add to virtqueue (%d)\n", __func__, - rc); + dev_err(vioch->cinfo->dev, + "failed to add to TX virtqueue (%d)\n", rc); } else { virtqueue_kick(vioch->vqueue); } @@ -427,10 +426,10 @@ static int scmi_vio_probe(struct virtio_device *vdev) sz /= DESCRIPTORS_PER_TX_MSG; if (sz > MSG_TOKEN_MAX) { - dev_info_once(dev, - "%s virtqueue could hold %d messages. Only %ld allowed to be pending.\n", - channels[i].is_rx ? "rx" : "tx", - sz, MSG_TOKEN_MAX); + dev_info(dev, + "%s virtqueue could hold %d messages. Only %ld allowed to be pending.\n", + channels[i].is_rx ? "rx" : "tx", + sz, MSG_TOKEN_MAX); sz = MSG_TOKEN_MAX; } channels[i].max_msg = sz; From b7d2cf7c817b86e705b97f72c6be192a6760a14f Mon Sep 17 00:00:00 2001 From: Etienne Carriere Date: Thu, 28 Oct 2021 16:00:08 +0200 Subject: [PATCH 02/28] dt-bindings: arm: Add OP-TEE transport for SCMI Introduce compatible "linaro,scmi-optee" for SCMI transport channel based on an OP-TEE service invocation. The compatible mandates a channel ID defined with property "linaro,optee-channel-id". Link: https://lore.kernel.org/r/20211028140009.23331-1-etienne.carriere@linaro.org Cc: devicetree@vger.kernel.org Cc: Rob Herring Reviewed-by: Rob Herring Reviewed-by: Cristian Marussi Signed-off-by: Etienne Carriere Signed-off-by: Sudeep Holla --- .../bindings/firmware/arm,scmi.yaml | 65 +++++++++++++++++++ 1 file changed, 65 insertions(+) diff --git a/Documentation/devicetree/bindings/firmware/arm,scmi.yaml b/Documentation/devicetree/bindings/firmware/arm,scmi.yaml index 5c4c6782e052..eae15df36eef 100644 --- a/Documentation/devicetree/bindings/firmware/arm,scmi.yaml +++ b/Documentation/devicetree/bindings/firmware/arm,scmi.yaml @@ -38,6 +38,9 @@ properties: The virtio transport only supports a single device. items: - const: arm,scmi-virtio + - description: SCMI compliant firmware with OP-TEE transport + items: + - const: linaro,scmi-optee interrupts: description: @@ -83,6 +86,11 @@ properties: description: SMC id required when using smc or hvc transports + linaro,optee-channel-id: + $ref: /schemas/types.yaml#/definitions/uint32 + description: + Channel specifier required when using OP-TEE transport. + protocol@11: type: object properties: @@ -195,6 +203,12 @@ patternProperties: minItems: 1 maxItems: 2 + linaro,optee-channel-id: + $ref: /schemas/types.yaml#/definitions/uint32 + description: + Channel specifier required when using OP-TEE transport and + protocol has a dedicated communication channel. + required: - reg @@ -226,6 +240,16 @@ else: - arm,smc-id - shmem + else: + if: + properties: + compatible: + contains: + const: linaro,scmi-optee + then: + required: + - linaro,optee-channel-id + examples: - | firmware { @@ -340,7 +364,48 @@ examples: reg = <0x11>; #power-domain-cells = <1>; }; + }; + }; + - | + firmware { + scmi { + compatible = "linaro,scmi-optee"; + linaro,optee-channel-id = <0>; + + #address-cells = <1>; + #size-cells = <0>; + + scmi_dvfs1: protocol@13 { + reg = <0x13>; + linaro,optee-channel-id = <1>; + shmem = <&cpu_optee_lpri0>; + #clock-cells = <1>; + }; + + scmi_clk0: protocol@14 { + reg = <0x14>; + #clock-cells = <1>; + }; + }; + }; + + soc { + #address-cells = <2>; + #size-cells = <2>; + + sram@51000000 { + compatible = "mmio-sram"; + reg = <0x0 0x51000000 0x0 0x10000>; + + #address-cells = <1>; + #size-cells = <1>; + ranges = <0 0x0 0x51000000 0x10000>; + + cpu_optee_lpri0: optee-sram-section@0 { + compatible = "arm,scmi-shmem"; + reg = <0x0 0x80>; + }; }; }; From 5f90f189a052f6fc46048f6ce29a37b709548b81 Mon Sep 17 00:00:00 2001 From: Etienne Carriere Date: Thu, 28 Oct 2021 16:00:09 +0200 Subject: [PATCH 03/28] firmware: arm_scmi: Add optee transport Add a new transport channel to the SCMI firmware interface driver for SCMI message exchange based on optee transport channel. The optee transport is realized by connecting and invoking OP-TEE SCMI service interface PTA. Optee transport support (CONFIG_ARM_SCMI_TRANSPORT_OPTEE) is default enabled when optee driver (CONFIG_OPTEE) is enabled. Effective optee transport is setup upon OP-TEE SCMI service discovery at optee device initialization. For this SCMI UUID is registered to the optee bus for probing. This is done from the link_supplier operator of the SCMI optee transport. The optee transport can use a statically defined shared memory in which case SCMI device tree node defines it using an "arm,scmi-shmem" compatible phandle through property shmem. Alternatively, optee transport allocates the shared memory buffer from the optee driver when no shmem property is defined. The protocol used to exchange SCMI message over that shared memory is negotiated between optee transport driver and the OP-TEE service through capabilities exchange. OP-TEE SCMI service is integrated in OP-TEE since its release tag 3.13.0. The service interface is published in [1]. Link: [1] https://github.com/OP-TEE/optee_os/blob/3.13.0/lib/libutee/include/pta_scmi_client.h Link: https://lore.kernel.org/r/20211028140009.23331-2-etienne.carriere@linaro.org Cc: Cristian Marussi Cc: Sudeep Holla Reviewed-by: Cristian Marussi Signed-off-by: Etienne Carriere Signed-off-by: Sudeep Holla --- drivers/firmware/arm_scmi/Kconfig | 12 + drivers/firmware/arm_scmi/Makefile | 1 + drivers/firmware/arm_scmi/common.h | 3 + drivers/firmware/arm_scmi/driver.c | 3 + drivers/firmware/arm_scmi/optee.c | 581 +++++++++++++++++++++++++++++ 5 files changed, 600 insertions(+) create mode 100644 drivers/firmware/arm_scmi/optee.c diff --git a/drivers/firmware/arm_scmi/Kconfig b/drivers/firmware/arm_scmi/Kconfig index 3d7081e84853..da1daa593204 100644 --- a/drivers/firmware/arm_scmi/Kconfig +++ b/drivers/firmware/arm_scmi/Kconfig @@ -54,6 +54,18 @@ config ARM_SCMI_TRANSPORT_MAILBOX If you want the ARM SCMI PROTOCOL stack to include support for a transport based on mailboxes, answer Y. +config ARM_SCMI_TRANSPORT_OPTEE + bool "SCMI transport based on OP-TEE service" + depends on OPTEE=y || OPTEE=ARM_SCMI_PROTOCOL + select ARM_SCMI_HAVE_TRANSPORT + select ARM_SCMI_HAVE_SHMEM + default y + help + This enables the OP-TEE service based transport for SCMI. + + If you want the ARM SCMI PROTOCOL stack to include support for a + transport based on OP-TEE SCMI service, answer Y. + config ARM_SCMI_TRANSPORT_SMC bool "SCMI transport based on SMC" depends on HAVE_ARM_SMCCC_DISCOVERY diff --git a/drivers/firmware/arm_scmi/Makefile b/drivers/firmware/arm_scmi/Makefile index 1dcf123d64ab..ef66ec8ca917 100644 --- a/drivers/firmware/arm_scmi/Makefile +++ b/drivers/firmware/arm_scmi/Makefile @@ -6,6 +6,7 @@ scmi-transport-$(CONFIG_ARM_SCMI_TRANSPORT_MAILBOX) += mailbox.o scmi-transport-$(CONFIG_ARM_SCMI_TRANSPORT_SMC) += smc.o scmi-transport-$(CONFIG_ARM_SCMI_HAVE_MSG) += msg.o scmi-transport-$(CONFIG_ARM_SCMI_TRANSPORT_VIRTIO) += virtio.o +scmi-transport-$(CONFIG_ARM_SCMI_TRANSPORT_OPTEE) += optee.o scmi-protocols-y = base.o clock.o perf.o power.o reset.o sensors.o system.o voltage.o scmi-module-objs := $(scmi-bus-y) $(scmi-driver-y) $(scmi-protocols-y) \ $(scmi-transport-y) diff --git a/drivers/firmware/arm_scmi/common.h b/drivers/firmware/arm_scmi/common.h index dea1bfbe1052..6438b5248c24 100644 --- a/drivers/firmware/arm_scmi/common.h +++ b/drivers/firmware/arm_scmi/common.h @@ -421,6 +421,9 @@ extern const struct scmi_desc scmi_smc_desc; #ifdef CONFIG_ARM_SCMI_TRANSPORT_VIRTIO extern const struct scmi_desc scmi_virtio_desc; #endif +#ifdef CONFIG_ARM_SCMI_TRANSPORT_OPTEE +extern const struct scmi_desc scmi_optee_desc; +#endif void scmi_rx_callback(struct scmi_chan_info *cinfo, u32 msg_hdr, void *priv); void scmi_free_channel(struct scmi_chan_info *cinfo, struct idr *idr, int id); diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c index b406b3f78f46..768926a77f5d 100644 --- a/drivers/firmware/arm_scmi/driver.c +++ b/drivers/firmware/arm_scmi/driver.c @@ -1994,6 +1994,9 @@ static const struct of_device_id scmi_of_match[] = { #ifdef CONFIG_ARM_SCMI_TRANSPORT_MAILBOX { .compatible = "arm,scmi", .data = &scmi_mailbox_desc }, #endif +#ifdef CONFIG_ARM_SCMI_TRANSPORT_OPTEE + { .compatible = "linaro,scmi-optee", .data = &scmi_optee_desc }, +#endif #ifdef CONFIG_ARM_SCMI_TRANSPORT_SMC { .compatible = "arm,scmi-smc", .data = &scmi_smc_desc}, #endif diff --git a/drivers/firmware/arm_scmi/optee.c b/drivers/firmware/arm_scmi/optee.c new file mode 100644 index 000000000000..d9819b0197ec --- /dev/null +++ b/drivers/firmware/arm_scmi/optee.c @@ -0,0 +1,581 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2019-2021 Linaro Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "common.h" + +#define SCMI_OPTEE_MAX_MSG_SIZE 128 + +enum scmi_optee_pta_cmd { + /* + * PTA_SCMI_CMD_CAPABILITIES - Get channel capabilities + * + * [out] value[0].a: Capability bit mask (enum pta_scmi_caps) + * [out] value[0].b: Extended capabilities or 0 + */ + PTA_SCMI_CMD_CAPABILITIES = 0, + + /* + * PTA_SCMI_CMD_PROCESS_SMT_CHANNEL - Process SCMI message in SMT buffer + * + * [in] value[0].a: Channel handle + * + * Shared memory used for SCMI message/response exhange is expected + * already identified and bound to channel handle in both SCMI agent + * and SCMI server (OP-TEE) parts. + * The memory uses SMT header to carry SCMI meta-data (protocol ID and + * protocol message ID). + */ + PTA_SCMI_CMD_PROCESS_SMT_CHANNEL = 1, + + /* + * PTA_SCMI_CMD_PROCESS_SMT_CHANNEL_MESSAGE - Process SMT/SCMI message + * + * [in] value[0].a: Channel handle + * [in/out] memref[1]: Message/response buffer (SMT and SCMI payload) + * + * Shared memory used for SCMI message/response is a SMT buffer + * referenced by param[1]. It shall be 128 bytes large to fit response + * payload whatever message playload size. + * The memory uses SMT header to carry SCMI meta-data (protocol ID and + * protocol message ID). + */ + PTA_SCMI_CMD_PROCESS_SMT_CHANNEL_MESSAGE = 2, + + /* + * PTA_SCMI_CMD_GET_CHANNEL - Get channel handle + * + * SCMI shm information are 0 if agent expects to use OP-TEE regular SHM + * + * [in] value[0].a: Channel identifier + * [out] value[0].a: Returned channel handle + * [in] value[0].b: Requested capabilities mask (enum pta_scmi_caps) + */ + PTA_SCMI_CMD_GET_CHANNEL = 3, +}; + +/* + * OP-TEE SCMI service capabilities bit flags (32bit) + * + * PTA_SCMI_CAPS_SMT_HEADER + * When set, OP-TEE supports command using SMT header protocol (SCMI shmem) in + * shared memory buffers to carry SCMI protocol synchronisation information. + */ +#define PTA_SCMI_CAPS_NONE 0 +#define PTA_SCMI_CAPS_SMT_HEADER BIT(0) + +/** + * struct scmi_optee_channel - Description of an OP-TEE SCMI channel + * + * @channel_id: OP-TEE channel ID used for this transport + * @tee_session: TEE session identifier + * @caps: OP-TEE SCMI channel capabilities + * @mu: Mutex protection on channel access + * @cinfo: SCMI channel information + * @shmem: Virtual base address of the shared memory + * @tee_shm: Reference to TEE shared memory or NULL if using static shmem + * @link: Reference in agent's channel list + */ +struct scmi_optee_channel { + u32 channel_id; + u32 tee_session; + u32 caps; + struct mutex mu; + struct scmi_chan_info *cinfo; + struct scmi_shared_mem __iomem *shmem; + struct tee_shm *tee_shm; + struct list_head link; +}; + +/** + * struct scmi_optee_agent - OP-TEE transport private data + * + * @dev: Device used for communication with TEE + * @tee_ctx: TEE context used for communication + * @caps: Supported channel capabilities + * @mu: Mutex for protection of @channel_list + * @channel_list: List of all created channels for the agent + */ +struct scmi_optee_agent { + struct device *dev; + struct tee_context *tee_ctx; + u32 caps; + struct mutex mu; + struct list_head channel_list; +}; + +/* There can be only 1 SCMI service in OP-TEE we connect to */ +static struct scmi_optee_agent *scmi_optee_private; + +/* Forward reference to scmi_optee transport initialization */ +static int scmi_optee_init(void); + +/* Open a session toward SCMI OP-TEE service with REE_KERNEL identity */ +static int open_session(struct scmi_optee_agent *agent, u32 *tee_session) +{ + struct device *dev = agent->dev; + struct tee_client_device *scmi_pta = to_tee_client_device(dev); + struct tee_ioctl_open_session_arg arg = { }; + int ret; + + memcpy(arg.uuid, scmi_pta->id.uuid.b, TEE_IOCTL_UUID_LEN); + arg.clnt_login = TEE_IOCTL_LOGIN_REE_KERNEL; + + ret = tee_client_open_session(agent->tee_ctx, &arg, NULL); + if (ret < 0 || arg.ret) { + dev_err(dev, "Can't open tee session: %d / %#x\n", ret, arg.ret); + return -EOPNOTSUPP; + } + + *tee_session = arg.session; + + return 0; +} + +static void close_session(struct scmi_optee_agent *agent, u32 tee_session) +{ + tee_client_close_session(agent->tee_ctx, tee_session); +} + +static int get_capabilities(struct scmi_optee_agent *agent) +{ + struct tee_ioctl_invoke_arg arg = { }; + struct tee_param param[1] = { }; + u32 caps; + u32 tee_session; + int ret; + + ret = open_session(agent, &tee_session); + if (ret) + return ret; + + arg.func = PTA_SCMI_CMD_CAPABILITIES; + arg.session = tee_session; + arg.num_params = 1; + + param[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT; + + ret = tee_client_invoke_func(agent->tee_ctx, &arg, param); + + close_session(agent, tee_session); + + if (ret < 0 || arg.ret) { + dev_err(agent->dev, "Can't get capabilities: %d / %#x\n", ret, arg.ret); + return -EOPNOTSUPP; + } + + caps = param[0].u.value.a; + + if (!(caps & PTA_SCMI_CAPS_SMT_HEADER)) { + dev_err(agent->dev, "OP-TEE SCMI PTA doesn't support SMT\n"); + return -EOPNOTSUPP; + } + + agent->caps = caps; + + return 0; +} + +static int get_channel(struct scmi_optee_channel *channel) +{ + struct device *dev = scmi_optee_private->dev; + struct tee_ioctl_invoke_arg arg = { }; + struct tee_param param[1] = { }; + unsigned int caps = PTA_SCMI_CAPS_SMT_HEADER; + int ret; + + arg.func = PTA_SCMI_CMD_GET_CHANNEL; + arg.session = channel->tee_session; + arg.num_params = 1; + + param[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT; + param[0].u.value.a = channel->channel_id; + param[0].u.value.b = caps; + + ret = tee_client_invoke_func(scmi_optee_private->tee_ctx, &arg, param); + + if (ret || arg.ret) { + dev_err(dev, "Can't get channel with caps %#x: %d / %#x\n", caps, ret, arg.ret); + return -EOPNOTSUPP; + } + + /* From now on use channel identifer provided by OP-TEE SCMI service */ + channel->channel_id = param[0].u.value.a; + channel->caps = caps; + + return 0; +} + +static int invoke_process_smt_channel(struct scmi_optee_channel *channel) +{ + struct tee_ioctl_invoke_arg arg = { }; + struct tee_param param[2] = { }; + int ret; + + arg.session = channel->tee_session; + param[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT; + param[0].u.value.a = channel->channel_id; + + if (channel->tee_shm) { + param[1].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT; + param[1].u.memref.shm = channel->tee_shm; + param[1].u.memref.size = SCMI_OPTEE_MAX_MSG_SIZE; + arg.num_params = 2; + arg.func = PTA_SCMI_CMD_PROCESS_SMT_CHANNEL_MESSAGE; + } else { + arg.num_params = 1; + arg.func = PTA_SCMI_CMD_PROCESS_SMT_CHANNEL; + } + + ret = tee_client_invoke_func(scmi_optee_private->tee_ctx, &arg, param); + if (ret < 0 || arg.ret) { + dev_err(scmi_optee_private->dev, "Can't invoke channel %u: %d / %#x\n", + channel->channel_id, ret, arg.ret); + return -EIO; + } + + return 0; +} + +static int scmi_optee_link_supplier(struct device *dev) +{ + if (!scmi_optee_private) { + if (scmi_optee_init()) + dev_dbg(dev, "Optee bus not yet ready\n"); + + /* Wait for optee bus */ + return -EPROBE_DEFER; + } + + if (!device_link_add(dev, scmi_optee_private->dev, DL_FLAG_AUTOREMOVE_CONSUMER)) { + dev_err(dev, "Adding link to supplier optee device failed\n"); + return -ECANCELED; + } + + return 0; +} + +static bool scmi_optee_chan_available(struct device *dev, int idx) +{ + u32 channel_id; + + return !of_property_read_u32_index(dev->of_node, "linaro,optee-channel-id", + idx, &channel_id); +} + +static void scmi_optee_clear_channel(struct scmi_chan_info *cinfo) +{ + struct scmi_optee_channel *channel = cinfo->transport_info; + + shmem_clear_channel(channel->shmem); +} + +static int setup_dynamic_shmem(struct device *dev, struct scmi_optee_channel *channel) +{ + const size_t msg_size = SCMI_OPTEE_MAX_MSG_SIZE; + + channel->tee_shm = tee_shm_alloc_kernel_buf(scmi_optee_private->tee_ctx, msg_size); + if (IS_ERR(channel->tee_shm)) { + dev_err(channel->cinfo->dev, "shmem allocation failed\n"); + return -ENOMEM; + } + + channel->shmem = (void *)tee_shm_get_va(channel->tee_shm, 0); + memset(channel->shmem, 0, msg_size); + shmem_clear_channel(channel->shmem); + + return 0; +} + +static int setup_static_shmem(struct device *dev, struct scmi_chan_info *cinfo, + struct scmi_optee_channel *channel) +{ + struct device_node *np; + resource_size_t size; + struct resource res; + int ret; + + np = of_parse_phandle(cinfo->dev->of_node, "shmem", 0); + if (!of_device_is_compatible(np, "arm,scmi-shmem")) { + ret = -ENXIO; + goto out; + } + + ret = of_address_to_resource(np, 0, &res); + if (ret) { + dev_err(dev, "Failed to get SCMI Tx shared memory\n"); + goto out; + } + + size = resource_size(&res); + + channel->shmem = devm_ioremap(dev, res.start, size); + if (!channel->shmem) { + dev_err(dev, "Failed to ioremap SCMI Tx shared memory\n"); + ret = -EADDRNOTAVAIL; + goto out; + } + + ret = 0; + +out: + of_node_put(np); + + return ret; +} + +static int setup_shmem(struct device *dev, struct scmi_chan_info *cinfo, + struct scmi_optee_channel *channel) +{ + if (of_find_property(cinfo->dev->of_node, "shmem", NULL)) + return setup_static_shmem(dev, cinfo, channel); + else + return setup_dynamic_shmem(dev, channel); +} + +static int scmi_optee_chan_setup(struct scmi_chan_info *cinfo, struct device *dev, bool tx) +{ + struct scmi_optee_channel *channel; + uint32_t channel_id; + int ret; + + if (!tx) + return -ENODEV; + + channel = devm_kzalloc(dev, sizeof(*channel), GFP_KERNEL); + if (!channel) + return -ENOMEM; + + ret = of_property_read_u32_index(cinfo->dev->of_node, "linaro,optee-channel-id", + 0, &channel_id); + if (ret) + return ret; + + cinfo->transport_info = channel; + channel->cinfo = cinfo; + channel->channel_id = channel_id; + mutex_init(&channel->mu); + + ret = setup_shmem(dev, cinfo, channel); + if (ret) + return ret; + + ret = open_session(scmi_optee_private, &channel->tee_session); + if (ret) + goto err_free_shm; + + ret = get_channel(channel); + if (ret) + goto err_close_sess; + + mutex_lock(&scmi_optee_private->mu); + list_add(&channel->link, &scmi_optee_private->channel_list); + mutex_unlock(&scmi_optee_private->mu); + + return 0; + +err_close_sess: + close_session(scmi_optee_private, channel->tee_session); +err_free_shm: + if (channel->tee_shm) + tee_shm_free(channel->tee_shm); + + return ret; +} + +static int scmi_optee_chan_free(int id, void *p, void *data) +{ + struct scmi_chan_info *cinfo = p; + struct scmi_optee_channel *channel = cinfo->transport_info; + + mutex_lock(&scmi_optee_private->mu); + list_del(&channel->link); + mutex_unlock(&scmi_optee_private->mu); + + close_session(scmi_optee_private, channel->tee_session); + + if (channel->tee_shm) { + tee_shm_free(channel->tee_shm); + channel->tee_shm = NULL; + } + + cinfo->transport_info = NULL; + channel->cinfo = NULL; + + scmi_free_channel(cinfo, data, id); + + return 0; +} + +static struct scmi_shared_mem *get_channel_shm(struct scmi_optee_channel *chan, + struct scmi_xfer *xfer) +{ + if (!chan) + return NULL; + + return chan->shmem; +} + + +static int scmi_optee_send_message(struct scmi_chan_info *cinfo, + struct scmi_xfer *xfer) +{ + struct scmi_optee_channel *channel = cinfo->transport_info; + struct scmi_shared_mem *shmem = get_channel_shm(channel, xfer); + int ret; + + mutex_lock(&channel->mu); + shmem_tx_prepare(shmem, xfer); + + ret = invoke_process_smt_channel(channel); + + scmi_rx_callback(cinfo, shmem_read_header(shmem), NULL); + mutex_unlock(&channel->mu); + + return ret; +} + +static void scmi_optee_fetch_response(struct scmi_chan_info *cinfo, + struct scmi_xfer *xfer) +{ + struct scmi_optee_channel *channel = cinfo->transport_info; + struct scmi_shared_mem *shmem = get_channel_shm(channel, xfer); + + shmem_fetch_response(shmem, xfer); +} + +static bool scmi_optee_poll_done(struct scmi_chan_info *cinfo, + struct scmi_xfer *xfer) +{ + struct scmi_optee_channel *channel = cinfo->transport_info; + struct scmi_shared_mem *shmem = get_channel_shm(channel, xfer); + + return shmem_poll_done(shmem, xfer); +} + +static struct scmi_transport_ops scmi_optee_ops = { + .link_supplier = scmi_optee_link_supplier, + .chan_available = scmi_optee_chan_available, + .chan_setup = scmi_optee_chan_setup, + .chan_free = scmi_optee_chan_free, + .send_message = scmi_optee_send_message, + .fetch_response = scmi_optee_fetch_response, + .clear_channel = scmi_optee_clear_channel, + .poll_done = scmi_optee_poll_done, +}; + +static int scmi_optee_ctx_match(struct tee_ioctl_version_data *ver, const void *data) +{ + return ver->impl_id == TEE_IMPL_ID_OPTEE; +} + +static int scmi_optee_service_probe(struct device *dev) +{ + struct scmi_optee_agent *agent; + struct tee_context *tee_ctx; + int ret; + + /* Only one SCMI OP-TEE device allowed */ + if (scmi_optee_private) { + dev_err(dev, "An SCMI OP-TEE device was already initialized: only one allowed\n"); + return -EBUSY; + } + + tee_ctx = tee_client_open_context(NULL, scmi_optee_ctx_match, NULL, NULL); + if (IS_ERR(tee_ctx)) + return -ENODEV; + + agent = devm_kzalloc(dev, sizeof(*agent), GFP_KERNEL); + if (!agent) { + ret = -ENOMEM; + goto err; + } + + agent->dev = dev; + agent->tee_ctx = tee_ctx; + INIT_LIST_HEAD(&agent->channel_list); + + ret = get_capabilities(agent); + if (ret) + goto err; + + /* Ensure agent resources are all visible before scmi_optee_private is */ + smp_mb(); + scmi_optee_private = agent; + + return 0; + +err: + tee_client_close_context(tee_ctx); + + return ret; +} + +static int scmi_optee_service_remove(struct device *dev) +{ + struct scmi_optee_agent *agent = scmi_optee_private; + + if (!scmi_optee_private) + return -EINVAL; + + if (!list_empty(&scmi_optee_private->channel_list)) + return -EBUSY; + + /* Ensure cleared reference is visible before resources are released */ + smp_store_mb(scmi_optee_private, NULL); + + tee_client_close_context(agent->tee_ctx); + + return 0; +} + +static const struct tee_client_device_id scmi_optee_service_id[] = { + { + UUID_INIT(0xa8cfe406, 0xd4f5, 0x4a2e, + 0x9f, 0x8d, 0xa2, 0x5d, 0xc7, 0x54, 0xc0, 0x99) + }, + { } +}; + +MODULE_DEVICE_TABLE(tee, scmi_optee_service_id); + +static struct tee_client_driver scmi_optee_driver = { + .id_table = scmi_optee_service_id, + .driver = { + .name = "scmi-optee", + .bus = &tee_bus_type, + .probe = scmi_optee_service_probe, + .remove = scmi_optee_service_remove, + }, +}; + +static int scmi_optee_init(void) +{ + return driver_register(&scmi_optee_driver.driver); +} + +static void scmi_optee_exit(void) +{ + if (scmi_optee_private) + driver_unregister(&scmi_optee_driver.driver); +} + +const struct scmi_desc scmi_optee_desc = { + .transport_exit = scmi_optee_exit, + .ops = &scmi_optee_ops, + .max_rx_timeout_ms = 30, + .max_msg = 20, + .max_msg_size = SCMI_OPTEE_MAX_MSG_SIZE, +}; From 530897ecdb3d69d71757c353a003e7138a791bcc Mon Sep 17 00:00:00 2001 From: Cristian Marussi Date: Mon, 15 Nov 2021 10:29:10 +0000 Subject: [PATCH 04/28] firmware: arm_scmi: Make virtio Version_1 compliance optional Introduce a compilation option to disable strict enforcement of compliance against VirtIO Version_1 backends, so as to allow to support also Legacy VirtIO devices implementations. Link: https://lore.kernel.org/r/20211115102910.7639-1-cristian.marussi@arm.com Signed-off-by: Cristian Marussi Signed-off-by: Sudeep Holla --- drivers/firmware/arm_scmi/Kconfig | 15 +++++++++++++++ drivers/firmware/arm_scmi/virtio.c | 3 ++- 2 files changed, 17 insertions(+), 1 deletion(-) diff --git a/drivers/firmware/arm_scmi/Kconfig b/drivers/firmware/arm_scmi/Kconfig index da1daa593204..638ecec89ff1 100644 --- a/drivers/firmware/arm_scmi/Kconfig +++ b/drivers/firmware/arm_scmi/Kconfig @@ -89,6 +89,21 @@ config ARM_SCMI_TRANSPORT_VIRTIO If you want the ARM SCMI PROTOCOL stack to include support for a transport based on VirtIO, answer Y. +config ARM_SCMI_TRANSPORT_VIRTIO_VERSION1_COMPLIANCE + bool "SCMI VirtIO transport Version 1 compliance" + depends on ARM_SCMI_TRANSPORT_VIRTIO + default y + help + This enforces strict compliance with VirtIO Version 1 specification. + + If you want the ARM SCMI VirtIO transport layer to refuse to work + with Legacy VirtIO backends and instead support only VirtIO Version 1 + devices (or above), answer Y. + + If you want instead to support also old Legacy VirtIO backends (like + the ones implemented by kvmtool) and let the core Kernel VirtIO layer + take care of the needed conversions, say N. + endif #ARM_SCMI_PROTOCOL config ARM_SCMI_POWER_DOMAIN diff --git a/drivers/firmware/arm_scmi/virtio.c b/drivers/firmware/arm_scmi/virtio.c index c30f82cc59ac..fd0f6f91fc0b 100644 --- a/drivers/firmware/arm_scmi/virtio.c +++ b/drivers/firmware/arm_scmi/virtio.c @@ -459,12 +459,13 @@ static void scmi_vio_remove(struct virtio_device *vdev) static int scmi_vio_validate(struct virtio_device *vdev) { +#ifdef CONFIG_ARM_SCMI_TRANSPORT_VIRTIO_VERSION1_COMPLIANCE if (!virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) { dev_err(&vdev->dev, "device does not comply with spec version 1.x\n"); return -EINVAL; } - +#endif return 0; } From 61bc76be367e9928c2c49fbde9783f4821446482 Mon Sep 17 00:00:00 2001 From: Wei Yongjun Date: Tue, 23 Nov 2021 08:36:20 +0000 Subject: [PATCH 05/28] firmware: arm_scmi: optee: Fix missing mutex_init() The driver allocates the mutex but not initialize it. Use mutex_init() on it to initialize it correctly. Link: https://lore.kernel.org/r/20211123083620.2366860-1-weiyongjun1@huawei.com Fixes: 5f90f189a052 ("firmware: arm_scmi: Add optee transport") Reported-by: Hulk Robot Reviewed-by: Etienne Carriere Signed-off-by: Wei Yongjun Signed-off-by: Sudeep Holla --- drivers/firmware/arm_scmi/optee.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/firmware/arm_scmi/optee.c b/drivers/firmware/arm_scmi/optee.c index d9819b0197ec..901737c9f5f8 100644 --- a/drivers/firmware/arm_scmi/optee.c +++ b/drivers/firmware/arm_scmi/optee.c @@ -506,6 +506,7 @@ static int scmi_optee_service_probe(struct device *dev) agent->dev = dev; agent->tee_ctx = tee_ctx; INIT_LIST_HEAD(&agent->channel_list); + mutex_init(&agent->mu); ret = get_capabilities(agent); if (ret) From afc9c1e26bc7d3145bd1112d74bbe8d0152da934 Mon Sep 17 00:00:00 2001 From: Sudeep Holla Date: Thu, 25 Nov 2021 15:07:30 +0000 Subject: [PATCH 06/28] firmware: arm_scmi: optee: Drop the support for the OPTEE shared dynamic buffer The shared memory buffer allocated by the optee driver is normal cached memory and can't be used with IOMEM APIs used in shmem_*. We currently support only IO memory for shared memory and supporting normal cached memory needs more changes and needs to be thought through properly. So for now, let us drop the support for this OPTEE shared buffer. Link: https://lore.kernel.org/r/20211125150730.188487-1-sudeep.holla@arm.com Cc: Cristian Marussi Cc: Etienne Carriere Reviewed-by: Etienne Carriere Reviewed-by: Cristian Marussi Signed-off-by: Sudeep Holla --- drivers/firmware/arm_scmi/optee.c | 19 +------------------ 1 file changed, 1 insertion(+), 18 deletions(-) diff --git a/drivers/firmware/arm_scmi/optee.c b/drivers/firmware/arm_scmi/optee.c index 901737c9f5f8..175b39bcd470 100644 --- a/drivers/firmware/arm_scmi/optee.c +++ b/drivers/firmware/arm_scmi/optee.c @@ -282,23 +282,6 @@ static void scmi_optee_clear_channel(struct scmi_chan_info *cinfo) shmem_clear_channel(channel->shmem); } -static int setup_dynamic_shmem(struct device *dev, struct scmi_optee_channel *channel) -{ - const size_t msg_size = SCMI_OPTEE_MAX_MSG_SIZE; - - channel->tee_shm = tee_shm_alloc_kernel_buf(scmi_optee_private->tee_ctx, msg_size); - if (IS_ERR(channel->tee_shm)) { - dev_err(channel->cinfo->dev, "shmem allocation failed\n"); - return -ENOMEM; - } - - channel->shmem = (void *)tee_shm_get_va(channel->tee_shm, 0); - memset(channel->shmem, 0, msg_size); - shmem_clear_channel(channel->shmem); - - return 0; -} - static int setup_static_shmem(struct device *dev, struct scmi_chan_info *cinfo, struct scmi_optee_channel *channel) { @@ -342,7 +325,7 @@ static int setup_shmem(struct device *dev, struct scmi_chan_info *cinfo, if (of_find_property(cinfo->dev->of_node, "shmem", NULL)) return setup_static_shmem(dev, cinfo, channel); else - return setup_dynamic_shmem(dev, channel); + return -ENOMEM; } static int scmi_optee_chan_setup(struct scmi_chan_info *cinfo, struct device *dev, bool tx) From d211ddeb511af5998dbd3e555be0fbe6033459d9 Mon Sep 17 00:00:00 2001 From: Cristian Marussi Date: Mon, 29 Nov 2021 19:11:41 +0000 Subject: [PATCH 07/28] firmware: arm_scmi: Perform earlier cinfo lookup call in do_xfer Lookup cinfo data early in do_xfer so as to avoid any further init work on xfer structure in case of error. No functional change. Link: https://lore.kernel.org/r/20211129191156.29322-2-cristian.marussi@arm.com Reviewed-by: Florian Fainelli Signed-off-by: Cristian Marussi Signed-off-by: Sudeep Holla --- drivers/firmware/arm_scmi/driver.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c index 768926a77f5d..3cf161f3bcc7 100644 --- a/drivers/firmware/arm_scmi/driver.c +++ b/drivers/firmware/arm_scmi/driver.c @@ -766,6 +766,10 @@ static int do_xfer(const struct scmi_protocol_handle *ph, return -EINVAL; } + cinfo = idr_find(&info->tx_idr, pi->proto->id); + if (unlikely(!cinfo)) + return -EINVAL; + /* * Initialise protocol id now from protocol handle to avoid it being * overridden by mistake (or malice) by the protocol code mangling with @@ -774,10 +778,6 @@ static int do_xfer(const struct scmi_protocol_handle *ph, xfer->hdr.protocol_id = pi->proto->id; reinit_completion(&xfer->done); - cinfo = idr_find(&info->tx_idr, xfer->hdr.protocol_id); - if (unlikely(!cinfo)) - return -EINVAL; - trace_scmi_xfer_begin(xfer->transfer_id, xfer->hdr.id, xfer->hdr.protocol_id, xfer->hdr.seq, xfer->hdr.poll_completion); From 582730b9cbcc534a39beaf3aa9078e2c431ff39f Mon Sep 17 00:00:00 2001 From: Cristian Marussi Date: Mon, 29 Nov 2021 19:11:42 +0000 Subject: [PATCH 08/28] firmware: arm_scmi: Set polling timeout to max_rx_timeout_ms Use transport specific transmission timeout (max_rx_timeout_ms) also for polling transactions. Initially when polling mode was added, it was intended to be used only in scheduler context and hence the choice of 100us for the polling timeout. However the only user for that was dropped for other SCMI concurrency issues, so it shouldn't cause any issue to increase this timeout value now. Link: https://lore.kernel.org/r/20211129191156.29322-3-cristian.marussi@arm.com Reviewed-by: Florian Fainelli Signed-off-by: Cristian Marussi [sudeep.holla: Updated commit message with historical facts about 100us timeout] Signed-off-by: Sudeep Holla --- drivers/firmware/arm_scmi/driver.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c index 3cf161f3bcc7..568562121f64 100644 --- a/drivers/firmware/arm_scmi/driver.c +++ b/drivers/firmware/arm_scmi/driver.c @@ -724,8 +724,6 @@ static void xfer_put(const struct scmi_protocol_handle *ph, __scmi_xfer_put(&info->tx_minfo, xfer); } -#define SCMI_MAX_POLL_TO_NS (100 * NSEC_PER_USEC) - static bool scmi_xfer_done_no_timeout(struct scmi_chan_info *cinfo, struct scmi_xfer *xfer, ktime_t stop) { @@ -799,7 +797,8 @@ static int do_xfer(const struct scmi_protocol_handle *ph, } if (xfer->hdr.poll_completion) { - ktime_t stop = ktime_add_ns(ktime_get(), SCMI_MAX_POLL_TO_NS); + ktime_t stop = ktime_add_ms(ktime_get(), + info->desc->max_rx_timeout_ms); spin_until_cond(scmi_xfer_done_no_timeout(cinfo, xfer, stop)); if (ktime_before(ktime_get(), stop)) { From 5a731aebd31bf840a93deae12bdfd831513e7211 Mon Sep 17 00:00:00 2001 From: Cristian Marussi Date: Mon, 29 Nov 2021 19:11:43 +0000 Subject: [PATCH 09/28] firmware: arm_scmi: Refactor message response path Refactor code path waiting for message responses into a dedicated helper function. No functional change. Link: https://lore.kernel.org/r/20211129191156.29322-4-cristian.marussi@arm.com Signed-off-by: Cristian Marussi Signed-off-by: Sudeep Holla --- drivers/firmware/arm_scmi/driver.c | 88 +++++++++++++++++++----------- 1 file changed, 56 insertions(+), 32 deletions(-) diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c index 568562121f64..9a8d6bfd4ebb 100644 --- a/drivers/firmware/arm_scmi/driver.c +++ b/drivers/firmware/arm_scmi/driver.c @@ -738,6 +738,61 @@ static bool scmi_xfer_done_no_timeout(struct scmi_chan_info *cinfo, ktime_after(ktime_get(), stop); } +/** + * scmi_wait_for_message_response - An helper to group all the possible ways of + * waiting for a synchronous message response. + * + * @cinfo: SCMI channel info + * @xfer: Reference to the transfer being waited for. + * + * Chooses waiting strategy (sleep-waiting vs busy-waiting) depending on + * configuration flags like xfer->hdr.poll_completion. + * + * Return: 0 on Success, error otherwise. + */ +static int scmi_wait_for_message_response(struct scmi_chan_info *cinfo, + struct scmi_xfer *xfer) +{ + struct scmi_info *info = handle_to_scmi_info(cinfo->handle); + struct device *dev = info->dev; + int ret = 0, timeout_ms = info->desc->max_rx_timeout_ms; + + if (xfer->hdr.poll_completion) { + ktime_t stop = ktime_add_ms(ktime_get(), timeout_ms); + + spin_until_cond(scmi_xfer_done_no_timeout(cinfo, xfer, stop)); + if (ktime_before(ktime_get(), stop)) { + unsigned long flags; + + /* + * Do not fetch_response if an out-of-order delayed + * response is being processed. + */ + spin_lock_irqsave(&xfer->lock, flags); + if (xfer->state == SCMI_XFER_SENT_OK) { + info->desc->ops->fetch_response(cinfo, xfer); + xfer->state = SCMI_XFER_RESP_OK; + } + spin_unlock_irqrestore(&xfer->lock, flags); + } else { + dev_err(dev, + "timed out in resp(caller: %pS) - polling\n", + (void *)_RET_IP_); + ret = -ETIMEDOUT; + } + } else { + /* And we wait for the response. */ + if (!wait_for_completion_timeout(&xfer->done, + msecs_to_jiffies(timeout_ms))) { + dev_err(dev, "timed out in resp(caller: %pS)\n", + (void *)_RET_IP_); + ret = -ETIMEDOUT; + } + } + + return ret; +} + /** * do_xfer() - Do one transfer * @@ -752,7 +807,6 @@ static int do_xfer(const struct scmi_protocol_handle *ph, struct scmi_xfer *xfer) { int ret; - int timeout; const struct scmi_protocol_instance *pi = ph_to_pi(ph); struct scmi_info *info = handle_to_scmi_info(pi->handle); struct device *dev = info->dev; @@ -796,37 +850,7 @@ static int do_xfer(const struct scmi_protocol_handle *ph, return ret; } - if (xfer->hdr.poll_completion) { - ktime_t stop = ktime_add_ms(ktime_get(), - info->desc->max_rx_timeout_ms); - - spin_until_cond(scmi_xfer_done_no_timeout(cinfo, xfer, stop)); - if (ktime_before(ktime_get(), stop)) { - unsigned long flags; - - /* - * Do not fetch_response if an out-of-order delayed - * response is being processed. - */ - spin_lock_irqsave(&xfer->lock, flags); - if (xfer->state == SCMI_XFER_SENT_OK) { - info->desc->ops->fetch_response(cinfo, xfer); - xfer->state = SCMI_XFER_RESP_OK; - } - spin_unlock_irqrestore(&xfer->lock, flags); - } else { - ret = -ETIMEDOUT; - } - } else { - /* And we wait for the response. */ - timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms); - if (!wait_for_completion_timeout(&xfer->done, timeout)) { - dev_err(dev, "timed out in resp(caller: %pS)\n", - (void *)_RET_IP_); - ret = -ETIMEDOUT; - } - } - + ret = scmi_wait_for_message_response(cinfo, xfer); if (!ret && xfer->hdr.status) ret = scmi_to_linux_errno(xfer->hdr.status); From 8b276b59ccf983517a0d40f8a3ac243a104d4c16 Mon Sep 17 00:00:00 2001 From: Cristian Marussi Date: Mon, 29 Nov 2021 19:11:44 +0000 Subject: [PATCH 10/28] include: trace: Add new scmi_xfer_response_wait event Having a new step to trace SCMI stack while it waits for synchronous responses is useful to analyze system performance when changing waiting mode between polling and interrupt completion. Link: https://lore.kernel.org/r/20211129191156.29322-5-cristian.marussi@arm.com Reviewed-by: Florian Fainelli Signed-off-by: Cristian Marussi Signed-off-by: Sudeep Holla --- include/trace/events/scmi.h | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/include/trace/events/scmi.h b/include/trace/events/scmi.h index f3a4b4d60714..cee4b2b64ae4 100644 --- a/include/trace/events/scmi.h +++ b/include/trace/events/scmi.h @@ -33,6 +33,34 @@ TRACE_EVENT(scmi_xfer_begin, __entry->seq, __entry->poll) ); +TRACE_EVENT(scmi_xfer_response_wait, + TP_PROTO(int transfer_id, u8 msg_id, u8 protocol_id, u16 seq, + u32 timeout, bool poll), + TP_ARGS(transfer_id, msg_id, protocol_id, seq, timeout, poll), + + TP_STRUCT__entry( + __field(int, transfer_id) + __field(u8, msg_id) + __field(u8, protocol_id) + __field(u16, seq) + __field(u32, timeout) + __field(bool, poll) + ), + + TP_fast_assign( + __entry->transfer_id = transfer_id; + __entry->msg_id = msg_id; + __entry->protocol_id = protocol_id; + __entry->seq = seq; + __entry->timeout = timeout; + __entry->poll = poll; + ), + + TP_printk("transfer_id=%d msg_id=%u protocol_id=%u seq=%u tmo_ms=%u poll=%u", + __entry->transfer_id, __entry->msg_id, __entry->protocol_id, + __entry->seq, __entry->timeout, __entry->poll) +); + TRACE_EVENT(scmi_xfer_end, TP_PROTO(int transfer_id, u8 msg_id, u8 protocol_id, u16 seq, int status), From f872af09094c042ac46e64d030f223b63ead5967 Mon Sep 17 00:00:00 2001 From: Cristian Marussi Date: Mon, 29 Nov 2021 19:11:45 +0000 Subject: [PATCH 11/28] firmware: arm_scmi: Use new trace event scmi_xfer_response_wait Use new trace event to mark start of waiting for response section. Link: https://lore.kernel.org/r/20211129191156.29322-6-cristian.marussi@arm.com Reviewed-by: Florian Fainelli Signed-off-by: Cristian Marussi Signed-off-by: Sudeep Holla --- drivers/firmware/arm_scmi/driver.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c index 9a8d6bfd4ebb..476b91845e40 100644 --- a/drivers/firmware/arm_scmi/driver.c +++ b/drivers/firmware/arm_scmi/driver.c @@ -757,6 +757,11 @@ static int scmi_wait_for_message_response(struct scmi_chan_info *cinfo, struct device *dev = info->dev; int ret = 0, timeout_ms = info->desc->max_rx_timeout_ms; + trace_scmi_xfer_response_wait(xfer->transfer_id, xfer->hdr.id, + xfer->hdr.protocol_id, xfer->hdr.seq, + timeout_ms, + xfer->hdr.poll_completion); + if (xfer->hdr.poll_completion) { ktime_t stop = ktime_add_ms(ktime_get(), timeout_ms); From a690b7e6e774b7c43fed37d5bd3b6e037f3b3db9 Mon Sep 17 00:00:00 2001 From: Cristian Marussi Date: Mon, 20 Dec 2021 19:56:36 +0000 Subject: [PATCH 12/28] firmware: arm_scmi: Add configurable polling mode for transports SCMI communications along TX channels can optionally be provided of a completion interrupt; when such interrupt is not available, command transactions should rely on polling, where the SCMI core takes care to repeatedly evaluate the transport-specific .poll_done() function, if available, to determine if and when a request was fully completed or timed out. Such mechanism is already present and working on a single transfer base: SCMI protocols can indeed enable hdr.poll_completion on specific commands ahead of each transfer and cause that transaction to be handled with polling. Introduce a couple of flags to be able to enforce such polling behaviour globally at will: - scmi_desc.force_polling: to statically switch the whole transport to polling mode. - scmi_chan_info.no_completion_irq: to switch a single channel dynamically to polling mode if, at runtime, is determined that no completion interrupt was available for such channel. Link: https://lore.kernel.org/r/20211220195646.44498-2-cristian.marussi@arm.com Signed-off-by: Cristian Marussi Signed-off-by: Sudeep Holla --- drivers/firmware/arm_scmi/common.h | 8 ++++++++ drivers/firmware/arm_scmi/driver.c | 33 ++++++++++++++++++++++++++++++ 2 files changed, 41 insertions(+) diff --git a/drivers/firmware/arm_scmi/common.h b/drivers/firmware/arm_scmi/common.h index 6438b5248c24..652e5d95ee65 100644 --- a/drivers/firmware/arm_scmi/common.h +++ b/drivers/firmware/arm_scmi/common.h @@ -339,11 +339,16 @@ void scmi_protocol_release(const struct scmi_handle *handle, u8 protocol_id); * @dev: Reference to device in the SCMI hierarchy corresponding to this * channel * @handle: Pointer to SCMI entity handle + * @no_completion_irq: Flag to indicate that this channel has no completion + * interrupt mechanism for synchronous commands. + * This can be dynamically set by transports at run-time + * inside their provided .chan_setup(). * @transport_info: Transport layer related information */ struct scmi_chan_info { struct device *dev; struct scmi_handle *handle; + bool no_completion_irq; void *transport_info; }; @@ -402,6 +407,8 @@ struct scmi_device *scmi_child_dev_find(struct device *parent, * be pending simultaneously in the system. May be overridden by the * get_max_msg op. * @max_msg_size: Maximum size of data per message that can be handled. + * @force_polling: Flag to force this whole transport to use SCMI core polling + * mechanism instead of completion interrupts even if available. */ struct scmi_desc { int (*transport_init)(void); @@ -410,6 +417,7 @@ struct scmi_desc { int max_rx_timeout_ms; int max_msg; int max_msg_size; + const bool force_polling; }; #ifdef CONFIG_ARM_SCMI_TRANSPORT_MAILBOX diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c index 476b91845e40..7579f54b0047 100644 --- a/drivers/firmware/arm_scmi/driver.c +++ b/drivers/firmware/arm_scmi/driver.c @@ -609,6 +609,24 @@ static inline void scmi_clear_channel(struct scmi_info *info, info->desc->ops->clear_channel(cinfo); } +static inline bool is_polling_required(struct scmi_chan_info *cinfo, + struct scmi_info *info) +{ + return cinfo->no_completion_irq || info->desc->force_polling; +} + +static inline bool is_transport_polling_capable(struct scmi_info *info) +{ + return info->desc->ops->poll_done; +} + +static inline bool is_polling_enabled(struct scmi_chan_info *cinfo, + struct scmi_info *info) +{ + return is_polling_required(cinfo, info) && + is_transport_polling_capable(info); +} + static void scmi_handle_notification(struct scmi_chan_info *cinfo, u32 msg_hdr, void *priv) { @@ -817,6 +835,7 @@ static int do_xfer(const struct scmi_protocol_handle *ph, struct device *dev = info->dev; struct scmi_chan_info *cinfo; + /* Check for polling request on custom command xfers at first */ if (xfer->hdr.poll_completion && !info->desc->ops->poll_done) { dev_warn_once(dev, "Polling mode is not supported by transport.\n"); @@ -827,6 +846,10 @@ static int do_xfer(const struct scmi_protocol_handle *ph, if (unlikely(!cinfo)) return -EINVAL; + /* True ONLY if also supported by transport. */ + if (is_polling_enabled(cinfo, info)) + xfer->hdr.poll_completion = true; + /* * Initialise protocol id now from protocol handle to avoid it being * overridden by mistake (or malice) by the protocol code mangling with @@ -1527,6 +1550,16 @@ static int scmi_chan_setup(struct scmi_info *info, struct device *dev, if (ret) return ret; + if (tx && is_polling_required(cinfo, info)) { + if (is_transport_polling_capable(info)) + dev_info(dev, + "Enabled polling mode TX channel - prot_id:%d\n", + prot_id); + else + dev_warn(dev, + "Polling mode NOT supported by transport.\n"); + } + idr_alloc: ret = idr_alloc(idr, cinfo, prot_id, prot_id + 1, GFP_KERNEL); if (ret != prot_id) { From f716cbd33f038af87824c30e165b3b70e4c6be1e Mon Sep 17 00:00:00 2001 From: Cristian Marussi Date: Mon, 20 Dec 2021 19:56:37 +0000 Subject: [PATCH 13/28] firmware: arm_scmi: Make smc transport use common completions When a completion irq is available use it and delegate command completion handling to the core SCMI completion mechanism. If no completion irq is available revert to polling, using the core common polling machinery. Link: https://lore.kernel.org/r/20211220195646.44498-3-cristian.marussi@arm.com Reviewed-by: Florian Fainelli Signed-off-by: Cristian Marussi Signed-off-by: Sudeep Holla --- drivers/firmware/arm_scmi/smc.c | 39 +++++++++++++++++---------------- 1 file changed, 20 insertions(+), 19 deletions(-) diff --git a/drivers/firmware/arm_scmi/smc.c b/drivers/firmware/arm_scmi/smc.c index 4effecc3bb46..d6c6ad9f6bab 100644 --- a/drivers/firmware/arm_scmi/smc.c +++ b/drivers/firmware/arm_scmi/smc.c @@ -25,8 +25,6 @@ * @shmem: Transmit/Receive shared memory area * @shmem_lock: Lock to protect access to Tx/Rx shared memory area * @func_id: smc/hvc call function id - * @irq: Optional; employed when platforms indicates msg completion by intr. - * @tx_complete: Optional, employed only when irq is valid. */ struct scmi_smc { @@ -34,15 +32,14 @@ struct scmi_smc { struct scmi_shared_mem __iomem *shmem; struct mutex shmem_lock; u32 func_id; - int irq; - struct completion tx_complete; }; static irqreturn_t smc_msg_done_isr(int irq, void *data) { struct scmi_smc *scmi_info = data; - complete(&scmi_info->tx_complete); + scmi_rx_callback(scmi_info->cinfo, + shmem_read_header(scmi_info->shmem), NULL); return IRQ_HANDLED; } @@ -111,8 +108,8 @@ static int smc_chan_setup(struct scmi_chan_info *cinfo, struct device *dev, dev_err(dev, "failed to setup SCMI smc irq\n"); return ret; } - init_completion(&scmi_info->tx_complete); - scmi_info->irq = irq; + } else { + cinfo->no_completion_irq = true; } scmi_info->func_id = func_id; @@ -142,26 +139,22 @@ static int smc_send_message(struct scmi_chan_info *cinfo, struct scmi_smc *scmi_info = cinfo->transport_info; struct arm_smccc_res res; + /* + * Channel lock will be released only once response has been + * surely fully retrieved, so after .mark_txdone() + */ mutex_lock(&scmi_info->shmem_lock); shmem_tx_prepare(scmi_info->shmem, xfer); - if (scmi_info->irq) - reinit_completion(&scmi_info->tx_complete); - arm_smccc_1_1_invoke(scmi_info->func_id, 0, 0, 0, 0, 0, 0, 0, &res); - if (scmi_info->irq) - wait_for_completion(&scmi_info->tx_complete); - - scmi_rx_callback(scmi_info->cinfo, - shmem_read_header(scmi_info->shmem), NULL); - - mutex_unlock(&scmi_info->shmem_lock); - /* Only SMCCC_RET_NOT_SUPPORTED is valid error code */ - if (res.a0) + if (res.a0) { + mutex_unlock(&scmi_info->shmem_lock); return -EOPNOTSUPP; + } + return 0; } @@ -173,6 +166,13 @@ static void smc_fetch_response(struct scmi_chan_info *cinfo, shmem_fetch_response(scmi_info->shmem, xfer); } +static void smc_mark_txdone(struct scmi_chan_info *cinfo, int ret) +{ + struct scmi_smc *scmi_info = cinfo->transport_info; + + mutex_unlock(&scmi_info->shmem_lock); +} + static bool smc_poll_done(struct scmi_chan_info *cinfo, struct scmi_xfer *xfer) { @@ -186,6 +186,7 @@ static const struct scmi_transport_ops scmi_smc_ops = { .chan_setup = smc_chan_setup, .chan_free = smc_chan_free, .send_message = smc_send_message, + .mark_txdone = smc_mark_txdone, .fetch_response = smc_fetch_response, .poll_done = smc_poll_done, }; From 31d2f803c19c1a7ad8d05c20b6cd83e8a647fb5c Mon Sep 17 00:00:00 2001 From: Cristian Marussi Date: Mon, 20 Dec 2021 19:56:38 +0000 Subject: [PATCH 14/28] firmware: arm_scmi: Add sync_cmds_completed_on_ret transport flag Add a flag to let the transport signal to the core if its handling of sync command implies that, after .send_message has returned successfully, the requested command can be assumed to be fully and completely executed on SCMI platform side so that any possible response value is already immediately available to be retrieved by a .fetch_response: in other words the polling phase can be skipped in such a case and the response values accessed straight away. Note that all of the above applies only when polling mode of operation was selected by the core: if instead a completion IRQ was found to be available the normal response processing path based on completions will still be followed. Link: https://lore.kernel.org/r/20211220195646.44498-4-cristian.marussi@arm.com Signed-off-by: Cristian Marussi Signed-off-by: Sudeep Holla --- drivers/firmware/arm_scmi/common.h | 8 +++++++ drivers/firmware/arm_scmi/driver.c | 34 +++++++++++++++++++++--------- 2 files changed, 32 insertions(+), 10 deletions(-) diff --git a/drivers/firmware/arm_scmi/common.h b/drivers/firmware/arm_scmi/common.h index 652e5d95ee65..24b1d1ac5f12 100644 --- a/drivers/firmware/arm_scmi/common.h +++ b/drivers/firmware/arm_scmi/common.h @@ -409,6 +409,13 @@ struct scmi_device *scmi_child_dev_find(struct device *parent, * @max_msg_size: Maximum size of data per message that can be handled. * @force_polling: Flag to force this whole transport to use SCMI core polling * mechanism instead of completion interrupts even if available. + * @sync_cmds_completed_on_ret: Flag to indicate that the transport assures + * synchronous-command messages are atomically + * completed on .send_message: no need to poll + * actively waiting for a response. + * Used by core internally only when polling is + * selected as a waiting for reply method: i.e. + * if a completion irq was found use that anyway. */ struct scmi_desc { int (*transport_init)(void); @@ -418,6 +425,7 @@ struct scmi_desc { int max_msg; int max_msg_size; const bool force_polling; + const bool sync_cmds_completed_on_ret; }; #ifdef CONFIG_ARM_SCMI_TRANSPORT_MAILBOX diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c index 7579f54b0047..a1c33d36800b 100644 --- a/drivers/firmware/arm_scmi/driver.c +++ b/drivers/firmware/arm_scmi/driver.c @@ -617,7 +617,8 @@ static inline bool is_polling_required(struct scmi_chan_info *cinfo, static inline bool is_transport_polling_capable(struct scmi_info *info) { - return info->desc->ops->poll_done; + return info->desc->ops->poll_done || + info->desc->sync_cmds_completed_on_ret; } static inline bool is_polling_enabled(struct scmi_chan_info *cinfo, @@ -781,10 +782,28 @@ static int scmi_wait_for_message_response(struct scmi_chan_info *cinfo, xfer->hdr.poll_completion); if (xfer->hdr.poll_completion) { - ktime_t stop = ktime_add_ms(ktime_get(), timeout_ms); + /* + * Real polling is needed only if transport has NOT declared + * itself to support synchronous commands replies. + */ + if (!info->desc->sync_cmds_completed_on_ret) { + /* + * Poll on xfer using transport provided .poll_done(); + * assumes no completion interrupt was available. + */ + ktime_t stop = ktime_add_ms(ktime_get(), timeout_ms); - spin_until_cond(scmi_xfer_done_no_timeout(cinfo, xfer, stop)); - if (ktime_before(ktime_get(), stop)) { + spin_until_cond(scmi_xfer_done_no_timeout(cinfo, + xfer, stop)); + if (ktime_after(ktime_get(), stop)) { + dev_err(dev, + "timed out in resp(caller: %pS) - polling\n", + (void *)_RET_IP_); + ret = -ETIMEDOUT; + } + } + + if (!ret) { unsigned long flags; /* @@ -797,11 +816,6 @@ static int scmi_wait_for_message_response(struct scmi_chan_info *cinfo, xfer->state = SCMI_XFER_RESP_OK; } spin_unlock_irqrestore(&xfer->lock, flags); - } else { - dev_err(dev, - "timed out in resp(caller: %pS) - polling\n", - (void *)_RET_IP_); - ret = -ETIMEDOUT; } } else { /* And we wait for the response. */ @@ -836,7 +850,7 @@ static int do_xfer(const struct scmi_protocol_handle *ph, struct scmi_chan_info *cinfo; /* Check for polling request on custom command xfers at first */ - if (xfer->hdr.poll_completion && !info->desc->ops->poll_done) { + if (xfer->hdr.poll_completion && !is_transport_polling_capable(info)) { dev_warn_once(dev, "Polling mode is not supported by transport.\n"); return -EINVAL; From 117542b81fe7b12002afc0cfdcf6cdd3ebfc0f18 Mon Sep 17 00:00:00 2001 From: Cristian Marussi Date: Mon, 20 Dec 2021 19:56:39 +0000 Subject: [PATCH 15/28] firmware: arm_scmi: Make smc support sync_cmds_completed_on_ret Enable sync_cmds_completed_on_ret in the SMC transport descriptor and remove SMC specific .poll_done callback support since polling is bypassed when sync_cmds_completed_on_ret is set. Link: https://lore.kernel.org/r/20211220195646.44498-5-cristian.marussi@arm.com Signed-off-by: Cristian Marussi Signed-off-by: Sudeep Holla --- drivers/firmware/arm_scmi/smc.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/drivers/firmware/arm_scmi/smc.c b/drivers/firmware/arm_scmi/smc.c index d6c6ad9f6bab..df0defd9f8bb 100644 --- a/drivers/firmware/arm_scmi/smc.c +++ b/drivers/firmware/arm_scmi/smc.c @@ -173,14 +173,6 @@ static void smc_mark_txdone(struct scmi_chan_info *cinfo, int ret) mutex_unlock(&scmi_info->shmem_lock); } -static bool -smc_poll_done(struct scmi_chan_info *cinfo, struct scmi_xfer *xfer) -{ - struct scmi_smc *scmi_info = cinfo->transport_info; - - return shmem_poll_done(scmi_info->shmem, xfer); -} - static const struct scmi_transport_ops scmi_smc_ops = { .chan_available = smc_chan_available, .chan_setup = smc_chan_setup, @@ -188,7 +180,6 @@ static const struct scmi_transport_ops scmi_smc_ops = { .send_message = smc_send_message, .mark_txdone = smc_mark_txdone, .fetch_response = smc_fetch_response, - .poll_done = smc_poll_done, }; const struct scmi_desc scmi_smc_desc = { @@ -196,4 +187,13 @@ const struct scmi_desc scmi_smc_desc = { .max_rx_timeout_ms = 30, .max_msg = 20, .max_msg_size = 128, + /* + * Setting .sync_cmds_atomic_replies to true for SMC assumes that, + * once the SMC instruction has completed successfully, the issued + * SCMI command would have been already fully processed by the SCMI + * platform firmware and so any possible response value expected + * for the issued command will be immmediately ready to be fetched + * from the shared memory area. + */ + .sync_cmds_completed_on_ret = true, }; From bf322084fec30b92423911db0169a3610008fc15 Mon Sep 17 00:00:00 2001 From: Cristian Marussi Date: Mon, 20 Dec 2021 19:56:40 +0000 Subject: [PATCH 16/28] firmware: arm_scmi: Make optee support sync_cmds_completed_on_ret Declare each OPTEE SCMI channel as not having a completion_irq so as to enable polling mode and then enable also .sync_cmds_completed_on_ret flag in the OPTEE transport descriptor so that real polling is itself effectively bypassed on the rx path: once the optee command invocation has successfully returned the core will directly fetch the response from the shared memory area. Remove OPTEE SCMI transport specific .poll_done callback support since real polling is effectively bypassed when .sync_cmds_completed_on_ret is set. Add OPTEE SCMI transport specific .mark_txdone callback support in order to properly handle channel locking along the tx path. Link: https://lore.kernel.org/r/20211220195646.44498-6-cristian.marussi@arm.com Cc: Etienne Carriere Signed-off-by: Cristian Marussi Signed-off-by: Sudeep Holla --- drivers/firmware/arm_scmi/optee.c | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/drivers/firmware/arm_scmi/optee.c b/drivers/firmware/arm_scmi/optee.c index 175b39bcd470..f460e12be4ea 100644 --- a/drivers/firmware/arm_scmi/optee.c +++ b/drivers/firmware/arm_scmi/optee.c @@ -363,6 +363,9 @@ static int scmi_optee_chan_setup(struct scmi_chan_info *cinfo, struct device *de if (ret) goto err_close_sess; + /* Enable polling */ + cinfo->no_completion_irq = true; + mutex_lock(&scmi_optee_private->mu); list_add(&channel->link, &scmi_optee_private->channel_list); mutex_unlock(&scmi_optee_private->mu); @@ -423,9 +426,8 @@ static int scmi_optee_send_message(struct scmi_chan_info *cinfo, shmem_tx_prepare(shmem, xfer); ret = invoke_process_smt_channel(channel); - - scmi_rx_callback(cinfo, shmem_read_header(shmem), NULL); - mutex_unlock(&channel->mu); + if (ret) + mutex_unlock(&channel->mu); return ret; } @@ -439,13 +441,11 @@ static void scmi_optee_fetch_response(struct scmi_chan_info *cinfo, shmem_fetch_response(shmem, xfer); } -static bool scmi_optee_poll_done(struct scmi_chan_info *cinfo, - struct scmi_xfer *xfer) +static void scmi_optee_mark_txdone(struct scmi_chan_info *cinfo, int ret) { struct scmi_optee_channel *channel = cinfo->transport_info; - struct scmi_shared_mem *shmem = get_channel_shm(channel, xfer); - return shmem_poll_done(shmem, xfer); + mutex_unlock(&channel->mu); } static struct scmi_transport_ops scmi_optee_ops = { @@ -454,9 +454,9 @@ static struct scmi_transport_ops scmi_optee_ops = { .chan_setup = scmi_optee_chan_setup, .chan_free = scmi_optee_chan_free, .send_message = scmi_optee_send_message, + .mark_txdone = scmi_optee_mark_txdone, .fetch_response = scmi_optee_fetch_response, .clear_channel = scmi_optee_clear_channel, - .poll_done = scmi_optee_poll_done, }; static int scmi_optee_ctx_match(struct tee_ioctl_version_data *ver, const void *data) @@ -562,4 +562,5 @@ const struct scmi_desc scmi_optee_desc = { .max_rx_timeout_ms = 30, .max_msg = 20, .max_msg_size = SCMI_OPTEE_MAX_MSG_SIZE, + .sync_cmds_completed_on_ret = true, }; From 69255e746890274e887ba36a403019380cde0b48 Mon Sep 17 00:00:00 2001 From: Cristian Marussi Date: Mon, 20 Dec 2021 19:56:41 +0000 Subject: [PATCH 17/28] firmware: arm_scmi: Add support for atomic transports An SCMI transport can be configured as .atomic_enabled in order to signal to the SCMI core that all its TX path is executed in atomic context and that, when requested, polling mode should be used while waiting for command responses. When a specific platform configuration had properly configured such a transport as .atomic_enabled, the SCMI core will also take care not to sleep in the corresponding RX path while waiting for a response if that specific command transaction was requested as atomic using polling mode. Asynchronous commands should not be used in an atomic context and so a warning is emitted if polling was requested for an asynchronous command. Add also a method to check, from the SCMI drivers, if the underlying SCMI transport is currently configured to support atomic transactions: this will be used by upper layers to determine if atomic requests can be supported at all on this SCMI instance. Link: https://lore.kernel.org/r/20211220195646.44498-7-cristian.marussi@arm.com Signed-off-by: Cristian Marussi Signed-off-by: Sudeep Holla --- drivers/firmware/arm_scmi/common.h | 4 +++ drivers/firmware/arm_scmi/driver.c | 51 ++++++++++++++++++++++++++++-- include/linux/scmi_protocol.h | 8 +++++ 3 files changed, 61 insertions(+), 2 deletions(-) diff --git a/drivers/firmware/arm_scmi/common.h b/drivers/firmware/arm_scmi/common.h index 24b1d1ac5f12..01d42c2069d4 100644 --- a/drivers/firmware/arm_scmi/common.h +++ b/drivers/firmware/arm_scmi/common.h @@ -416,6 +416,9 @@ struct scmi_device *scmi_child_dev_find(struct device *parent, * Used by core internally only when polling is * selected as a waiting for reply method: i.e. * if a completion irq was found use that anyway. + * @atomic_enabled: Flag to indicate that this transport, which is assured not + * to sleep anywhere on the TX path, can be used in atomic mode + * when requested. */ struct scmi_desc { int (*transport_init)(void); @@ -426,6 +429,7 @@ struct scmi_desc { int max_msg_size; const bool force_polling; const bool sync_cmds_completed_on_ret; + const bool atomic_enabled; }; #ifdef CONFIG_ARM_SCMI_TRANSPORT_MAILBOX diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c index a1c33d36800b..78924db59290 100644 --- a/drivers/firmware/arm_scmi/driver.c +++ b/drivers/firmware/arm_scmi/driver.c @@ -923,6 +923,20 @@ static void reset_rx_to_maxsz(const struct scmi_protocol_handle *ph, * @ph: Pointer to SCMI protocol handle * @xfer: Transfer to initiate and wait for response * + * Using asynchronous commands in atomic/polling mode should be avoided since + * it could cause long busy-waiting here, so ignore polling for the delayed + * response and WARN if it was requested for this command transaction since + * upper layers should refrain from issuing such kind of requests. + * + * The only other option would have been to refrain from using any asynchronous + * command even if made available, when an atomic transport is detected, and + * instead forcibly use the synchronous version (thing that can be easily + * attained at the protocol layer), but this would also have led to longer + * stalls of the channel for synchronous commands and possibly timeouts. + * (in other words there is usually a good reason if a platform provides an + * asynchronous version of a command and we should prefer to use it...just not + * when using atomic/polling mode) + * * Return: -ETIMEDOUT in case of no delayed response, if transmit error, * return corresponding error, else if all goes well, return 0. */ @@ -934,12 +948,24 @@ static int do_xfer_with_response(const struct scmi_protocol_handle *ph, xfer->async_done = &async_response; + /* + * Delayed responses should not be polled, so an async command should + * not have been used when requiring an atomic/poll context; WARN and + * perform instead a sleeping wait. + * (Note Async + IgnoreDelayedResponses are sent via do_xfer) + */ + WARN_ON_ONCE(xfer->hdr.poll_completion); + ret = do_xfer(ph, xfer); if (!ret) { - if (!wait_for_completion_timeout(xfer->async_done, timeout)) + if (!wait_for_completion_timeout(xfer->async_done, timeout)) { + dev_err(ph->dev, + "timed out in delayed resp(caller: %pS)\n", + (void *)_RET_IP_); ret = -ETIMEDOUT; - else if (xfer->hdr.status) + } else if (xfer->hdr.status) { ret = scmi_to_linux_errno(xfer->hdr.status); + } } xfer->async_done = NULL; @@ -1373,6 +1399,22 @@ static void scmi_devm_protocol_put(struct scmi_device *sdev, u8 protocol_id) WARN_ON(ret); } +/** + * scmi_is_transport_atomic - Method to check if underlying transport for an + * SCMI instance is configured as atomic. + * + * @handle: A reference to the SCMI platform instance. + * + * Return: True if transport is configured as atomic + */ +static bool scmi_is_transport_atomic(const struct scmi_handle *handle) +{ + struct scmi_info *info = handle_to_scmi_info(handle); + + return info->desc->atomic_enabled && + is_transport_polling_capable(info); +} + static inline struct scmi_handle *scmi_handle_get_from_info_unlocked(struct scmi_info *info) { @@ -1910,6 +1952,7 @@ static int scmi_probe(struct platform_device *pdev) handle->version = &info->version; handle->devm_protocol_get = scmi_devm_protocol_get; handle->devm_protocol_put = scmi_devm_protocol_put; + handle->is_transport_atomic = scmi_is_transport_atomic; if (desc->ops->link_supplier) { ret = desc->ops->link_supplier(dev); @@ -1928,6 +1971,10 @@ static int scmi_probe(struct platform_device *pdev) if (scmi_notification_init(handle)) dev_err(dev, "SCMI Notifications NOT available.\n"); + if (info->desc->atomic_enabled && !is_transport_polling_capable(info)) + dev_err(dev, + "Transport is not polling capable. Atomic mode not supported.\n"); + /* * Trigger SCMI Base protocol initialization. * It's mandatory and won't be ever released/deinit until the diff --git a/include/linux/scmi_protocol.h b/include/linux/scmi_protocol.h index 80e781c51ddc..9f895cb81818 100644 --- a/include/linux/scmi_protocol.h +++ b/include/linux/scmi_protocol.h @@ -612,6 +612,13 @@ struct scmi_notify_ops { * @devm_protocol_get: devres managed method to acquire a protocol and get specific * operations and a dedicated protocol handler * @devm_protocol_put: devres managed method to release a protocol + * @is_transport_atomic: method to check if the underlying transport for this + * instance handle is configured to support atomic + * transactions for commands. + * Some users of the SCMI stack in the upper layers could + * be interested to know if they can assume SCMI + * command transactions associated to this handle will + * never sleep and act accordingly. * @notify_ops: pointer to set of notifications related operations */ struct scmi_handle { @@ -622,6 +629,7 @@ struct scmi_handle { (*devm_protocol_get)(struct scmi_device *sdev, u8 proto, struct scmi_protocol_handle **ph); void (*devm_protocol_put)(struct scmi_device *sdev, u8 proto); + bool (*is_transport_atomic)(const struct scmi_handle *handle); const struct scmi_notify_ops *notify_ops; }; From 0bfdca8a8661aaa2433b3f7b74d83e3520aa56e5 Mon Sep 17 00:00:00 2001 From: Cristian Marussi Date: Mon, 20 Dec 2021 19:56:42 +0000 Subject: [PATCH 18/28] firmware: arm_scmi: Add atomic mode support to smc transport Add a Kernel configuration option to enable SCMI SMC transport atomic mode operation for selected SCMI transactions and leave it as default disabled. Substitute mutex usages with busy-waiting and declare smc transport as .atomic_enabled if such Kernel configuration option is enabled. Link: https://lore.kernel.org/r/20211220195646.44498-8-cristian.marussi@arm.com Signed-off-by: Cristian Marussi Signed-off-by: Sudeep Holla --- drivers/firmware/arm_scmi/Kconfig | 14 ++++++++ drivers/firmware/arm_scmi/smc.c | 56 +++++++++++++++++++++++++++---- 2 files changed, 64 insertions(+), 6 deletions(-) diff --git a/drivers/firmware/arm_scmi/Kconfig b/drivers/firmware/arm_scmi/Kconfig index 638ecec89ff1..d429326433d1 100644 --- a/drivers/firmware/arm_scmi/Kconfig +++ b/drivers/firmware/arm_scmi/Kconfig @@ -78,6 +78,20 @@ config ARM_SCMI_TRANSPORT_SMC If you want the ARM SCMI PROTOCOL stack to include support for a transport based on SMC, answer Y. +config ARM_SCMI_TRANSPORT_SMC_ATOMIC_ENABLE + bool "Enable atomic mode support for SCMI SMC transport" + depends on ARM_SCMI_TRANSPORT_SMC + help + Enable support of atomic operation for SCMI SMC based transport. + + If you want the SCMI SMC based transport to operate in atomic + mode, avoiding any kind of sleeping behaviour for selected + transactions on the TX path, answer Y. + Enabling atomic mode operations allows any SCMI driver using this + transport to optionally ask for atomic SCMI transactions and operate + in atomic context too, at the price of using a number of busy-waiting + primitives all over instead. If unsure say N. + config ARM_SCMI_TRANSPORT_VIRTIO bool "SCMI transport based on VirtIO" depends on VIRTIO=y || VIRTIO=ARM_SCMI_PROTOCOL diff --git a/drivers/firmware/arm_scmi/smc.c b/drivers/firmware/arm_scmi/smc.c index df0defd9f8bb..6c7871a40611 100644 --- a/drivers/firmware/arm_scmi/smc.c +++ b/drivers/firmware/arm_scmi/smc.c @@ -7,6 +7,7 @@ */ #include +#include #include #include #include @@ -14,6 +15,7 @@ #include #include #include +#include #include #include "common.h" @@ -23,14 +25,20 @@ * * @cinfo: SCMI channel info * @shmem: Transmit/Receive shared memory area - * @shmem_lock: Lock to protect access to Tx/Rx shared memory area + * @shmem_lock: Lock to protect access to Tx/Rx shared memory area. + * Used when NOT operating in atomic mode. + * @inflight: Atomic flag to protect access to Tx/Rx shared memory area. + * Used when operating in atomic mode. * @func_id: smc/hvc call function id */ struct scmi_smc { struct scmi_chan_info *cinfo; struct scmi_shared_mem __iomem *shmem; + /* Protect access to shmem area */ struct mutex shmem_lock; +#define INFLIGHT_NONE MSG_TOKEN_MAX + atomic_t inflight; u32 func_id; }; @@ -54,6 +62,41 @@ static bool smc_chan_available(struct device *dev, int idx) return true; } +static inline void smc_channel_lock_init(struct scmi_smc *scmi_info) +{ + if (IS_ENABLED(CONFIG_ARM_SCMI_TRANSPORT_SMC_ATOMIC_ENABLE)) + atomic_set(&scmi_info->inflight, INFLIGHT_NONE); + else + mutex_init(&scmi_info->shmem_lock); +} + +static bool smc_xfer_inflight(struct scmi_xfer *xfer, atomic_t *inflight) +{ + int ret; + + ret = atomic_cmpxchg(inflight, INFLIGHT_NONE, xfer->hdr.seq); + + return ret == INFLIGHT_NONE; +} + +static inline void +smc_channel_lock_acquire(struct scmi_smc *scmi_info, + struct scmi_xfer *xfer __maybe_unused) +{ + if (IS_ENABLED(CONFIG_ARM_SCMI_TRANSPORT_SMC_ATOMIC_ENABLE)) + spin_until_cond(smc_xfer_inflight(xfer, &scmi_info->inflight)); + else + mutex_lock(&scmi_info->shmem_lock); +} + +static inline void smc_channel_lock_release(struct scmi_smc *scmi_info) +{ + if (IS_ENABLED(CONFIG_ARM_SCMI_TRANSPORT_SMC_ATOMIC_ENABLE)) + atomic_set(&scmi_info->inflight, INFLIGHT_NONE); + else + mutex_unlock(&scmi_info->shmem_lock); +} + static int smc_chan_setup(struct scmi_chan_info *cinfo, struct device *dev, bool tx) { @@ -114,7 +157,7 @@ static int smc_chan_setup(struct scmi_chan_info *cinfo, struct device *dev, scmi_info->func_id = func_id; scmi_info->cinfo = cinfo; - mutex_init(&scmi_info->shmem_lock); + smc_channel_lock_init(scmi_info); cinfo->transport_info = scmi_info; return 0; @@ -140,10 +183,10 @@ static int smc_send_message(struct scmi_chan_info *cinfo, struct arm_smccc_res res; /* - * Channel lock will be released only once response has been + * Channel will be released only once response has been * surely fully retrieved, so after .mark_txdone() */ - mutex_lock(&scmi_info->shmem_lock); + smc_channel_lock_acquire(scmi_info, xfer); shmem_tx_prepare(scmi_info->shmem, xfer); @@ -151,7 +194,7 @@ static int smc_send_message(struct scmi_chan_info *cinfo, /* Only SMCCC_RET_NOT_SUPPORTED is valid error code */ if (res.a0) { - mutex_unlock(&scmi_info->shmem_lock); + smc_channel_lock_release(scmi_info); return -EOPNOTSUPP; } @@ -170,7 +213,7 @@ static void smc_mark_txdone(struct scmi_chan_info *cinfo, int ret) { struct scmi_smc *scmi_info = cinfo->transport_info; - mutex_unlock(&scmi_info->shmem_lock); + smc_channel_lock_release(scmi_info); } static const struct scmi_transport_ops scmi_smc_ops = { @@ -196,4 +239,5 @@ const struct scmi_desc scmi_smc_desc = { * from the shared memory area. */ .sync_cmds_completed_on_ret = true, + .atomic_enabled = IS_ENABLED(CONFIG_ARM_SCMI_TRANSPORT_SMC_ATOMIC_ENABLE), }; From 94d0cd1da14af0042c8ee7c2cf401dfc321c575c Mon Sep 17 00:00:00 2001 From: Cristian Marussi Date: Mon, 20 Dec 2021 19:56:43 +0000 Subject: [PATCH 19/28] firmware: arm_scmi: Add new parameter to mark_txdone Add a new xfer parameter to mark_txdone transport operation which enables the SCMI core to optionally pass back into the transport layer a reference to the xfer descriptor that is being handled. Link: https://lore.kernel.org/r/20211220195646.44498-9-cristian.marussi@arm.com Reviewed-by: Florian Fainelli Signed-off-by: Cristian Marussi Signed-off-by: Sudeep Holla --- drivers/firmware/arm_scmi/common.h | 3 ++- drivers/firmware/arm_scmi/driver.c | 2 +- drivers/firmware/arm_scmi/mailbox.c | 3 ++- drivers/firmware/arm_scmi/optee.c | 3 ++- drivers/firmware/arm_scmi/smc.c | 3 ++- 5 files changed, 9 insertions(+), 5 deletions(-) diff --git a/drivers/firmware/arm_scmi/common.h b/drivers/firmware/arm_scmi/common.h index 01d42c2069d4..4fda84bfab42 100644 --- a/drivers/firmware/arm_scmi/common.h +++ b/drivers/firmware/arm_scmi/common.h @@ -378,7 +378,8 @@ struct scmi_transport_ops { unsigned int (*get_max_msg)(struct scmi_chan_info *base_cinfo); int (*send_message)(struct scmi_chan_info *cinfo, struct scmi_xfer *xfer); - void (*mark_txdone)(struct scmi_chan_info *cinfo, int ret); + void (*mark_txdone)(struct scmi_chan_info *cinfo, int ret, + struct scmi_xfer *xfer); void (*fetch_response)(struct scmi_chan_info *cinfo, struct scmi_xfer *xfer); void (*fetch_notification)(struct scmi_chan_info *cinfo, diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c index 78924db59290..c2e7897ff56e 100644 --- a/drivers/firmware/arm_scmi/driver.c +++ b/drivers/firmware/arm_scmi/driver.c @@ -897,7 +897,7 @@ static int do_xfer(const struct scmi_protocol_handle *ph, ret = scmi_to_linux_errno(xfer->hdr.status); if (info->desc->ops->mark_txdone) - info->desc->ops->mark_txdone(cinfo, ret); + info->desc->ops->mark_txdone(cinfo, ret, xfer); trace_scmi_xfer_end(xfer->transfer_id, xfer->hdr.id, xfer->hdr.protocol_id, xfer->hdr.seq, ret); diff --git a/drivers/firmware/arm_scmi/mailbox.c b/drivers/firmware/arm_scmi/mailbox.c index e09eb12bf421..08ff4d110beb 100644 --- a/drivers/firmware/arm_scmi/mailbox.c +++ b/drivers/firmware/arm_scmi/mailbox.c @@ -140,7 +140,8 @@ static int mailbox_send_message(struct scmi_chan_info *cinfo, return ret; } -static void mailbox_mark_txdone(struct scmi_chan_info *cinfo, int ret) +static void mailbox_mark_txdone(struct scmi_chan_info *cinfo, int ret, + struct scmi_xfer *__unused) { struct scmi_mailbox *smbox = cinfo->transport_info; diff --git a/drivers/firmware/arm_scmi/optee.c b/drivers/firmware/arm_scmi/optee.c index f460e12be4ea..734f1eeee161 100644 --- a/drivers/firmware/arm_scmi/optee.c +++ b/drivers/firmware/arm_scmi/optee.c @@ -441,7 +441,8 @@ static void scmi_optee_fetch_response(struct scmi_chan_info *cinfo, shmem_fetch_response(shmem, xfer); } -static void scmi_optee_mark_txdone(struct scmi_chan_info *cinfo, int ret) +static void scmi_optee_mark_txdone(struct scmi_chan_info *cinfo, int ret, + struct scmi_xfer *__unused) { struct scmi_optee_channel *channel = cinfo->transport_info; diff --git a/drivers/firmware/arm_scmi/smc.c b/drivers/firmware/arm_scmi/smc.c index 6c7871a40611..745acfdd0b3d 100644 --- a/drivers/firmware/arm_scmi/smc.c +++ b/drivers/firmware/arm_scmi/smc.c @@ -209,7 +209,8 @@ static void smc_fetch_response(struct scmi_chan_info *cinfo, shmem_fetch_response(scmi_info->shmem, xfer); } -static void smc_mark_txdone(struct scmi_chan_info *cinfo, int ret) +static void smc_mark_txdone(struct scmi_chan_info *cinfo, int ret, + struct scmi_xfer *__unused) { struct scmi_smc *scmi_info = cinfo->transport_info; From cdf157faaafe36c0823148587a78147200898e87 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Thu, 3 Feb 2022 09:22:01 +0100 Subject: [PATCH 20/28] firmware: arm_scmi: Disable ftrace for Clang Thumb2 builds The SMC calling convention designates R0-R7 as input registers in AArch32 mode, and this conflicts with the compiler's use of R7 as a frame pointer when building in Thumb2 mode. Generally, we don't enable the frame pointer, and GCC happily enables the -pg profiling hooks without them. However, Clang refuses, and errors out with the message below: drivers/firmware/arm_scmi/smc.c:152:2: error: write to reserved register 'R7' arm_smccc_1_1_invoke(scmi_info->func_id, 0, 0, 0, 0, 0, 0, 0, &res); ^ include/linux/arm-smccc.h:550:4: note: expanded from macro 'arm_smccc_1_1_invoke' arm_smccc_1_1_smc(__VA_ARGS__); \ ^ Let's just disable ftrace for the compilation unit when building this configuration. Link: https://lore.kernel.org/r/20220203082204.1176734-11-ardb@kernel.org Reviewed-by: Nick Desaulniers Signed-off-by: Ard Biesheuvel Signed-off-by: Sudeep Holla --- drivers/firmware/arm_scmi/Makefile | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/drivers/firmware/arm_scmi/Makefile b/drivers/firmware/arm_scmi/Makefile index ef66ec8ca917..8d4afadda38c 100644 --- a/drivers/firmware/arm_scmi/Makefile +++ b/drivers/firmware/arm_scmi/Makefile @@ -12,3 +12,10 @@ scmi-module-objs := $(scmi-bus-y) $(scmi-driver-y) $(scmi-protocols-y) \ $(scmi-transport-y) obj-$(CONFIG_ARM_SCMI_PROTOCOL) += scmi-module.o obj-$(CONFIG_ARM_SCMI_POWER_DOMAIN) += scmi_pm_domain.o + +ifeq ($(CONFIG_THUMB2_KERNEL)$(CONFIG_CC_IS_CLANG),yy) +# The use of R7 in the SMCCC conflicts with the compiler's use of R7 as a frame +# pointer in Thumb2 mode, which is forcibly enabled by Clang when profiling +# hooks are inserted via the -pg switch. +CFLAGS_REMOVE_smc.o += $(CC_FLAGS_FTRACE) +endif From 42e90eb53bf3f6fc43c667dd377779e3ebeb748a Mon Sep 17 00:00:00 2001 From: Cristian Marussi Date: Thu, 17 Feb 2022 13:12:27 +0000 Subject: [PATCH 21/28] firmware: arm_scmi: Add a virtio channel refcount Currently SCMI VirtIO channels are marked with a ready flag and related lock to track channel lifetime and support proper synchronization at shutdown when virtqueues have to be stopped. This leads to some extended spinlocked sections with IRQs off on the RX path to keep hold of the ready flag and does not scale well especially when SCMI VirtIO polling mode will be introduced. Add an SCMI VirtIO channel dedicated refcount to track active users on both the TX and the RX path and properly enforce synchronization and cleanup at shutdown, inhibiting further usage of the channel once freed. Link: https://lore.kernel.org/r/20220217131234.50328-2-cristian.marussi@arm.com Cc: "Michael S. Tsirkin" Cc: Igor Skalkin Cc: Peter Hilber Cc: virtualization@lists.linux-foundation.org Signed-off-by: Cristian Marussi Signed-off-by: Sudeep Holla --- drivers/firmware/arm_scmi/virtio.c | 143 +++++++++++++++++++---------- 1 file changed, 92 insertions(+), 51 deletions(-) diff --git a/drivers/firmware/arm_scmi/virtio.c b/drivers/firmware/arm_scmi/virtio.c index 97d7cf53b774..71b016d1a655 100644 --- a/drivers/firmware/arm_scmi/virtio.c +++ b/drivers/firmware/arm_scmi/virtio.c @@ -17,7 +17,9 @@ * virtqueue. Access to each virtqueue is protected by spinlocks. */ +#include #include +#include #include #include #include @@ -27,6 +29,7 @@ #include "common.h" +#define VIRTIO_MAX_RX_TIMEOUT_MS 60000 #define VIRTIO_SCMI_MAX_MSG_SIZE 128 /* Value may be increased. */ #define VIRTIO_SCMI_MAX_PDU_SIZE \ (VIRTIO_SCMI_MAX_MSG_SIZE + SCMI_MSG_MAX_PROT_OVERHEAD) @@ -39,23 +42,21 @@ * @cinfo: SCMI Tx or Rx channel * @free_list: List of unused scmi_vio_msg, maintained for Tx channels only * @is_rx: Whether channel is an Rx channel - * @ready: Whether transport user is ready to hear about channel * @max_msg: Maximum number of pending messages for this channel. - * @lock: Protects access to all members except ready. - * @ready_lock: Protects access to ready. If required, it must be taken before - * lock. + * @lock: Protects access to all members except users. + * @shutdown_done: A reference to a completion used when freeing this channel. + * @users: A reference count to currently active users of this channel. */ struct scmi_vio_channel { struct virtqueue *vqueue; struct scmi_chan_info *cinfo; struct list_head free_list; bool is_rx; - bool ready; unsigned int max_msg; - /* lock to protect access to all members except ready. */ + /* lock to protect access to all members except users. */ spinlock_t lock; - /* lock to rotects access to ready flag. */ - spinlock_t ready_lock; + struct completion *shutdown_done; + refcount_t users; }; /** @@ -76,6 +77,63 @@ struct scmi_vio_msg { /* Only one SCMI VirtIO device can possibly exist */ static struct virtio_device *scmi_vdev; +static void scmi_vio_channel_ready(struct scmi_vio_channel *vioch, + struct scmi_chan_info *cinfo) +{ + unsigned long flags; + + spin_lock_irqsave(&vioch->lock, flags); + cinfo->transport_info = vioch; + /* Indirectly setting channel not available any more */ + vioch->cinfo = cinfo; + spin_unlock_irqrestore(&vioch->lock, flags); + + refcount_set(&vioch->users, 1); +} + +static inline bool scmi_vio_channel_acquire(struct scmi_vio_channel *vioch) +{ + return refcount_inc_not_zero(&vioch->users); +} + +static inline void scmi_vio_channel_release(struct scmi_vio_channel *vioch) +{ + if (refcount_dec_and_test(&vioch->users)) { + unsigned long flags; + + spin_lock_irqsave(&vioch->lock, flags); + if (vioch->shutdown_done) { + vioch->cinfo = NULL; + complete(vioch->shutdown_done); + } + spin_unlock_irqrestore(&vioch->lock, flags); + } +} + +static void scmi_vio_channel_cleanup_sync(struct scmi_vio_channel *vioch) +{ + unsigned long flags; + DECLARE_COMPLETION_ONSTACK(vioch_shutdown_done); + + /* + * Prepare to wait for the last release if not already released + * or in progress. + */ + spin_lock_irqsave(&vioch->lock, flags); + if (!vioch->cinfo || vioch->shutdown_done) { + spin_unlock_irqrestore(&vioch->lock, flags); + return; + } + vioch->shutdown_done = &vioch_shutdown_done; + virtio_break_device(vioch->vqueue->vdev); + spin_unlock_irqrestore(&vioch->lock, flags); + + scmi_vio_channel_release(vioch); + + /* Let any possibly concurrent RX path release the channel */ + wait_for_completion(vioch->shutdown_done); +} + static bool scmi_vio_have_vq_rx(struct virtio_device *vdev) { return virtio_has_feature(vdev, VIRTIO_SCMI_F_P2A_CHANNELS); @@ -119,7 +177,7 @@ static void scmi_finalize_message(struct scmi_vio_channel *vioch, static void scmi_vio_complete_cb(struct virtqueue *vqueue) { - unsigned long ready_flags; + unsigned long flags; unsigned int length; struct scmi_vio_channel *vioch; struct scmi_vio_msg *msg; @@ -130,27 +188,24 @@ static void scmi_vio_complete_cb(struct virtqueue *vqueue) vioch = &((struct scmi_vio_channel *)vqueue->vdev->priv)[vqueue->index]; for (;;) { - spin_lock_irqsave(&vioch->ready_lock, ready_flags); + if (!scmi_vio_channel_acquire(vioch)) + return; - if (!vioch->ready) { - if (!cb_enabled) - (void)virtqueue_enable_cb(vqueue); - goto unlock_ready_out; - } - - /* IRQs already disabled here no need to irqsave */ - spin_lock(&vioch->lock); + spin_lock_irqsave(&vioch->lock, flags); if (cb_enabled) { virtqueue_disable_cb(vqueue); cb_enabled = false; } msg = virtqueue_get_buf(vqueue, &length); if (!msg) { - if (virtqueue_enable_cb(vqueue)) - goto unlock_out; + if (virtqueue_enable_cb(vqueue)) { + spin_unlock_irqrestore(&vioch->lock, flags); + scmi_vio_channel_release(vioch); + return; + } cb_enabled = true; } - spin_unlock(&vioch->lock); + spin_unlock_irqrestore(&vioch->lock, flags); if (msg) { msg->rx_len = length; @@ -161,19 +216,14 @@ static void scmi_vio_complete_cb(struct virtqueue *vqueue) } /* - * Release ready_lock and re-enable IRQs between loop iterations - * to allow virtio_chan_free() to possibly kick in and set the - * flag vioch->ready to false even in between processing of - * messages, so as to force outstanding messages to be ignored - * when system is shutting down. + * Release vio channel between loop iterations to allow + * virtio_chan_free() to eventually fully release it when + * shutting down; in such a case, any outstanding message will + * be ignored since this loop will bail out at the next + * iteration. */ - spin_unlock_irqrestore(&vioch->ready_lock, ready_flags); + scmi_vio_channel_release(vioch); } - -unlock_out: - spin_unlock(&vioch->lock); -unlock_ready_out: - spin_unlock_irqrestore(&vioch->ready_lock, ready_flags); } static const char *const scmi_vio_vqueue_names[] = { "tx", "rx" }; @@ -273,35 +323,20 @@ static int virtio_chan_setup(struct scmi_chan_info *cinfo, struct device *dev, } } - spin_lock_irqsave(&vioch->lock, flags); - cinfo->transport_info = vioch; - /* Indirectly setting channel not available any more */ - vioch->cinfo = cinfo; - spin_unlock_irqrestore(&vioch->lock, flags); - - spin_lock_irqsave(&vioch->ready_lock, flags); - vioch->ready = true; - spin_unlock_irqrestore(&vioch->ready_lock, flags); + scmi_vio_channel_ready(vioch, cinfo); return 0; } static int virtio_chan_free(int id, void *p, void *data) { - unsigned long flags; struct scmi_chan_info *cinfo = p; struct scmi_vio_channel *vioch = cinfo->transport_info; - spin_lock_irqsave(&vioch->ready_lock, flags); - vioch->ready = false; - spin_unlock_irqrestore(&vioch->ready_lock, flags); + scmi_vio_channel_cleanup_sync(vioch); scmi_free_channel(cinfo, data, id); - spin_lock_irqsave(&vioch->lock, flags); - vioch->cinfo = NULL; - spin_unlock_irqrestore(&vioch->lock, flags); - return 0; } @@ -316,10 +351,14 @@ static int virtio_send_message(struct scmi_chan_info *cinfo, int rc; struct scmi_vio_msg *msg; + if (!scmi_vio_channel_acquire(vioch)) + return -EINVAL; + spin_lock_irqsave(&vioch->lock, flags); if (list_empty(&vioch->free_list)) { spin_unlock_irqrestore(&vioch->lock, flags); + scmi_vio_channel_release(vioch); return -EBUSY; } @@ -342,6 +381,8 @@ static int virtio_send_message(struct scmi_chan_info *cinfo, spin_unlock_irqrestore(&vioch->lock, flags); + scmi_vio_channel_release(vioch); + return rc; } @@ -416,7 +457,6 @@ static int scmi_vio_probe(struct virtio_device *vdev) unsigned int sz; spin_lock_init(&channels[i].lock); - spin_lock_init(&channels[i].ready_lock); INIT_LIST_HEAD(&channels[i].free_list); channels[i].vqueue = vqs[i]; @@ -503,7 +543,8 @@ const struct scmi_desc scmi_virtio_desc = { .transport_init = virtio_scmi_init, .transport_exit = virtio_scmi_exit, .ops = &scmi_virtio_ops, - .max_rx_timeout_ms = 60000, /* for non-realtime virtio devices */ + /* for non-realtime virtio devices */ + .max_rx_timeout_ms = VIRTIO_MAX_RX_TIMEOUT_MS, .max_msg = 0, /* overridden by virtio_get_max_msg() */ .max_msg_size = VIRTIO_SCMI_MAX_MSG_SIZE, }; From 9a1699bda095e1ee2f5d0aa43a91d1fccca8b69c Mon Sep 17 00:00:00 2001 From: Cristian Marussi Date: Thu, 17 Feb 2022 13:12:28 +0000 Subject: [PATCH 22/28] firmware: arm_scmi: Review virtio free_list handling Add a new spinlock dedicated to the access of the TX free list and a couple of helpers to get and put messages back and forth from the free_list. Link: https://lore.kernel.org/r/20220217131234.50328-3-cristian.marussi@arm.com Cc: "Michael S. Tsirkin" Cc: Igor Skalkin Cc: Peter Hilber Cc: virtualization@lists.linux-foundation.org Signed-off-by: Cristian Marussi Signed-off-by: Sudeep Holla --- drivers/firmware/arm_scmi/virtio.c | 88 +++++++++++++++++++----------- 1 file changed, 57 insertions(+), 31 deletions(-) diff --git a/drivers/firmware/arm_scmi/virtio.c b/drivers/firmware/arm_scmi/virtio.c index 71b016d1a655..483b192fcc2d 100644 --- a/drivers/firmware/arm_scmi/virtio.c +++ b/drivers/firmware/arm_scmi/virtio.c @@ -40,20 +40,23 @@ * * @vqueue: Associated virtqueue * @cinfo: SCMI Tx or Rx channel + * @free_lock: Protects access to the @free_list. * @free_list: List of unused scmi_vio_msg, maintained for Tx channels only * @is_rx: Whether channel is an Rx channel * @max_msg: Maximum number of pending messages for this channel. - * @lock: Protects access to all members except users. + * @lock: Protects access to all members except users, free_list. * @shutdown_done: A reference to a completion used when freeing this channel. * @users: A reference count to currently active users of this channel. */ struct scmi_vio_channel { struct virtqueue *vqueue; struct scmi_chan_info *cinfo; + /* lock to protect access to the free list. */ + spinlock_t free_lock; struct list_head free_list; bool is_rx; unsigned int max_msg; - /* lock to protect access to all members except users. */ + /* lock to protect access to all members except users, free_list */ spinlock_t lock; struct completion *shutdown_done; refcount_t users; @@ -134,18 +137,49 @@ static void scmi_vio_channel_cleanup_sync(struct scmi_vio_channel *vioch) wait_for_completion(vioch->shutdown_done); } +/* Assumes to be called with vio channel acquired already */ +static struct scmi_vio_msg * +scmi_virtio_get_free_msg(struct scmi_vio_channel *vioch) +{ + unsigned long flags; + struct scmi_vio_msg *msg; + + spin_lock_irqsave(&vioch->free_lock, flags); + if (list_empty(&vioch->free_list)) { + spin_unlock_irqrestore(&vioch->free_lock, flags); + return NULL; + } + + msg = list_first_entry(&vioch->free_list, typeof(*msg), list); + list_del_init(&msg->list); + spin_unlock_irqrestore(&vioch->free_lock, flags); + + return msg; +} + +/* Assumes to be called with vio channel acquired already */ +static void scmi_virtio_put_free_msg(struct scmi_vio_channel *vioch, + struct scmi_vio_msg *msg) +{ + unsigned long flags; + + spin_lock_irqsave(&vioch->free_lock, flags); + list_add_tail(&msg->list, &vioch->free_list); + spin_unlock_irqrestore(&vioch->free_lock, flags); +} + static bool scmi_vio_have_vq_rx(struct virtio_device *vdev) { return virtio_has_feature(vdev, VIRTIO_SCMI_F_P2A_CHANNELS); } static int scmi_vio_feed_vq_rx(struct scmi_vio_channel *vioch, - struct scmi_vio_msg *msg, - struct device *dev) + struct scmi_vio_msg *msg) { struct scatterlist sg_in; int rc; unsigned long flags; + struct device *dev = &vioch->vqueue->vdev->dev; sg_init_one(&sg_in, msg->input, VIRTIO_SCMI_MAX_PDU_SIZE); @@ -162,17 +196,17 @@ static int scmi_vio_feed_vq_rx(struct scmi_vio_channel *vioch, return rc; } +/* + * Assume to be called with channel already acquired or not ready at all; + * vioch->lock MUST NOT have been already acquired. + */ static void scmi_finalize_message(struct scmi_vio_channel *vioch, struct scmi_vio_msg *msg) { - if (vioch->is_rx) { - scmi_vio_feed_vq_rx(vioch, msg, vioch->cinfo->dev); - } else { - /* Here IRQs are assumed to be already disabled by the caller */ - spin_lock(&vioch->lock); - list_add(&msg->list, &vioch->free_list); - spin_unlock(&vioch->lock); - } + if (vioch->is_rx) + scmi_vio_feed_vq_rx(vioch, msg); + else + scmi_virtio_put_free_msg(vioch, msg); } static void scmi_vio_complete_cb(struct virtqueue *vqueue) @@ -284,7 +318,6 @@ static bool virtio_chan_available(struct device *dev, int idx) static int virtio_chan_setup(struct scmi_chan_info *cinfo, struct device *dev, bool tx) { - unsigned long flags; struct scmi_vio_channel *vioch; int index = tx ? VIRTIO_SCMI_VQ_TX : VIRTIO_SCMI_VQ_RX; int i; @@ -314,13 +347,7 @@ static int virtio_chan_setup(struct scmi_chan_info *cinfo, struct device *dev, if (!msg->input) return -ENOMEM; - if (tx) { - spin_lock_irqsave(&vioch->lock, flags); - list_add_tail(&msg->list, &vioch->free_list); - spin_unlock_irqrestore(&vioch->lock, flags); - } else { - scmi_vio_feed_vq_rx(vioch, msg, cinfo->dev); - } + scmi_finalize_message(vioch, msg); } scmi_vio_channel_ready(vioch, cinfo); @@ -354,33 +381,31 @@ static int virtio_send_message(struct scmi_chan_info *cinfo, if (!scmi_vio_channel_acquire(vioch)) return -EINVAL; - spin_lock_irqsave(&vioch->lock, flags); - - if (list_empty(&vioch->free_list)) { - spin_unlock_irqrestore(&vioch->lock, flags); + msg = scmi_virtio_get_free_msg(vioch); + if (!msg) { scmi_vio_channel_release(vioch); return -EBUSY; } - msg = list_first_entry(&vioch->free_list, typeof(*msg), list); - list_del(&msg->list); - msg_tx_prepare(msg->request, xfer); sg_init_one(&sg_out, msg->request, msg_command_size(xfer)); sg_init_one(&sg_in, msg->input, msg_response_size(xfer)); + spin_lock_irqsave(&vioch->lock, flags); + rc = virtqueue_add_sgs(vioch->vqueue, sgs, 1, 1, msg, GFP_ATOMIC); - if (rc) { - list_add(&msg->list, &vioch->free_list); + if (rc) dev_err(vioch->cinfo->dev, "failed to add to TX virtqueue (%d)\n", rc); - } else { + else virtqueue_kick(vioch->vqueue); - } spin_unlock_irqrestore(&vioch->lock, flags); + if (rc) + scmi_virtio_put_free_msg(vioch, msg); + scmi_vio_channel_release(vioch); return rc; @@ -457,6 +482,7 @@ static int scmi_vio_probe(struct virtio_device *vdev) unsigned int sz; spin_lock_init(&channels[i].lock); + spin_lock_init(&channels[i].free_lock); INIT_LIST_HEAD(&channels[i].free_list); channels[i].vqueue = vqs[i]; From 5a3b7185c47c061330bdd71233126181d55ed3d5 Mon Sep 17 00:00:00 2001 From: Cristian Marussi Date: Thu, 17 Feb 2022 13:12:29 +0000 Subject: [PATCH 23/28] firmware: arm_scmi: Add atomic mode support to virtio transport Add support for .mark_txdone and .poll_done transport operations to SCMI VirtIO transport as pre-requisites to enable atomic operations. Add a Kernel configuration option to enable SCMI VirtIO transport polling and atomic mode for selected SCMI transactions while leaving it default disabled. Link: https://lore.kernel.org/r/20220217131234.50328-4-cristian.marussi@arm.com Cc: "Michael S. Tsirkin" Cc: Igor Skalkin Cc: Peter Hilber Cc: virtualization@lists.linux-foundation.org Signed-off-by: Cristian Marussi Signed-off-by: Sudeep Holla --- drivers/firmware/arm_scmi/Kconfig | 15 ++ drivers/firmware/arm_scmi/driver.c | 6 +- drivers/firmware/arm_scmi/virtio.c | 392 +++++++++++++++++++++++++++-- 3 files changed, 392 insertions(+), 21 deletions(-) diff --git a/drivers/firmware/arm_scmi/Kconfig b/drivers/firmware/arm_scmi/Kconfig index d429326433d1..7794bd41eaa0 100644 --- a/drivers/firmware/arm_scmi/Kconfig +++ b/drivers/firmware/arm_scmi/Kconfig @@ -118,6 +118,21 @@ config ARM_SCMI_TRANSPORT_VIRTIO_VERSION1_COMPLIANCE the ones implemented by kvmtool) and let the core Kernel VirtIO layer take care of the needed conversions, say N. +config ARM_SCMI_TRANSPORT_VIRTIO_ATOMIC_ENABLE + bool "Enable atomic mode for SCMI VirtIO transport" + depends on ARM_SCMI_TRANSPORT_VIRTIO + help + Enable support of atomic operation for SCMI VirtIO based transport. + + If you want the SCMI VirtIO based transport to operate in atomic + mode, avoiding any kind of sleeping behaviour for selected + transactions on the TX path, answer Y. + + Enabling atomic mode operations allows any SCMI driver using this + transport to optionally ask for atomic SCMI transactions and operate + in atomic context too, at the price of using a number of busy-waiting + primitives all over instead. If unsure say N. + endif #ARM_SCMI_PROTOCOL config ARM_SCMI_POWER_DOMAIN diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c index c2e7897ff56e..4fd5a35ffa2f 100644 --- a/drivers/firmware/arm_scmi/driver.c +++ b/drivers/firmware/arm_scmi/driver.c @@ -648,7 +648,8 @@ static void scmi_handle_notification(struct scmi_chan_info *cinfo, unpack_scmi_header(msg_hdr, &xfer->hdr); if (priv) - xfer->priv = priv; + /* Ensure order between xfer->priv store and following ops */ + smp_store_mb(xfer->priv, priv); info->desc->ops->fetch_notification(cinfo, info->desc->max_msg_size, xfer); scmi_notify(cinfo->handle, xfer->hdr.protocol_id, @@ -680,7 +681,8 @@ static void scmi_handle_response(struct scmi_chan_info *cinfo, xfer->rx.len = info->desc->max_msg_size; if (priv) - xfer->priv = priv; + /* Ensure order between xfer->priv store and following ops */ + smp_store_mb(xfer->priv, priv); info->desc->ops->fetch_response(cinfo, xfer); trace_scmi_rx_done(xfer->transfer_id, xfer->hdr.id, diff --git a/drivers/firmware/arm_scmi/virtio.c b/drivers/firmware/arm_scmi/virtio.c index 483b192fcc2d..14709dbc96a1 100644 --- a/drivers/firmware/arm_scmi/virtio.c +++ b/drivers/firmware/arm_scmi/virtio.c @@ -3,8 +3,8 @@ * Virtio Transport driver for Arm System Control and Management Interface * (SCMI). * - * Copyright (C) 2020-2021 OpenSynergy. - * Copyright (C) 2021 ARM Ltd. + * Copyright (C) 2020-2022 OpenSynergy. + * Copyright (C) 2021-2022 ARM Ltd. */ /** @@ -42,9 +42,14 @@ * @cinfo: SCMI Tx or Rx channel * @free_lock: Protects access to the @free_list. * @free_list: List of unused scmi_vio_msg, maintained for Tx channels only + * @deferred_tx_work: Worker for TX deferred replies processing + * @deferred_tx_wq: Workqueue for TX deferred replies + * @pending_lock: Protects access to the @pending_cmds_list. + * @pending_cmds_list: List of pre-fetched commands queueud for later processing * @is_rx: Whether channel is an Rx channel * @max_msg: Maximum number of pending messages for this channel. - * @lock: Protects access to all members except users, free_list. + * @lock: Protects access to all members except users, free_list and + * pending_cmds_list. * @shutdown_done: A reference to a completion used when freeing this channel. * @users: A reference count to currently active users of this channel. */ @@ -54,14 +59,29 @@ struct scmi_vio_channel { /* lock to protect access to the free list. */ spinlock_t free_lock; struct list_head free_list; + /* lock to protect access to the pending list. */ + spinlock_t pending_lock; + struct list_head pending_cmds_list; + struct work_struct deferred_tx_work; + struct workqueue_struct *deferred_tx_wq; bool is_rx; unsigned int max_msg; - /* lock to protect access to all members except users, free_list */ + /* + * Lock to protect access to all members except users, free_list and + * pending_cmds_list + */ spinlock_t lock; struct completion *shutdown_done; refcount_t users; }; +enum poll_states { + VIO_MSG_NOT_POLLED, + VIO_MSG_POLL_TIMEOUT, + VIO_MSG_POLLING, + VIO_MSG_POLL_DONE, +}; + /** * struct scmi_vio_msg - Transport PDU information * @@ -69,12 +89,23 @@ struct scmi_vio_channel { * @input: SDU used for (delayed) responses and notifications * @list: List which scmi_vio_msg may be part of * @rx_len: Input SDU size in bytes, once input has been received + * @poll_idx: Last used index registered for polling purposes if this message + * transaction reply was configured for polling. + * @poll_status: Polling state for this message. + * @poll_lock: A lock to protect @poll_status + * @users: A reference count to track this message users and avoid premature + * freeing (and reuse) when polling and IRQ execution paths interleave. */ struct scmi_vio_msg { struct scmi_msg_payld *request; struct scmi_msg_payld *input; struct list_head list; unsigned int rx_len; + unsigned int poll_idx; + enum poll_states poll_status; + /* Lock to protect access to poll_status */ + spinlock_t poll_lock; + refcount_t users; }; /* Only one SCMI VirtIO device can possibly exist */ @@ -117,6 +148,7 @@ static void scmi_vio_channel_cleanup_sync(struct scmi_vio_channel *vioch) { unsigned long flags; DECLARE_COMPLETION_ONSTACK(vioch_shutdown_done); + void *deferred_wq = NULL; /* * Prepare to wait for the last release if not already released @@ -127,10 +159,19 @@ static void scmi_vio_channel_cleanup_sync(struct scmi_vio_channel *vioch) spin_unlock_irqrestore(&vioch->lock, flags); return; } + vioch->shutdown_done = &vioch_shutdown_done; virtio_break_device(vioch->vqueue->vdev); + if (!vioch->is_rx && vioch->deferred_tx_wq) { + deferred_wq = vioch->deferred_tx_wq; + /* Cannot be kicked anymore after this...*/ + vioch->deferred_tx_wq = NULL; + } spin_unlock_irqrestore(&vioch->lock, flags); + if (deferred_wq) + destroy_workqueue(deferred_wq); + scmi_vio_channel_release(vioch); /* Let any possibly concurrent RX path release the channel */ @@ -154,18 +195,34 @@ scmi_virtio_get_free_msg(struct scmi_vio_channel *vioch) list_del_init(&msg->list); spin_unlock_irqrestore(&vioch->free_lock, flags); + /* Still no users, no need to acquire poll_lock */ + msg->poll_status = VIO_MSG_NOT_POLLED; + refcount_set(&msg->users, 1); + return msg; } -/* Assumes to be called with vio channel acquired already */ -static void scmi_virtio_put_free_msg(struct scmi_vio_channel *vioch, - struct scmi_vio_msg *msg) +static inline bool scmi_vio_msg_acquire(struct scmi_vio_msg *msg) { - unsigned long flags; + return refcount_inc_not_zero(&msg->users); +} - spin_lock_irqsave(&vioch->free_lock, flags); - list_add_tail(&msg->list, &vioch->free_list); - spin_unlock_irqrestore(&vioch->free_lock, flags); +/* Assumes to be called with vio channel acquired already */ +static inline bool scmi_vio_msg_release(struct scmi_vio_channel *vioch, + struct scmi_vio_msg *msg) +{ + bool ret; + + ret = refcount_dec_and_test(&msg->users); + if (ret) { + unsigned long flags; + + spin_lock_irqsave(&vioch->free_lock, flags); + list_add_tail(&msg->list, &vioch->free_list); + spin_unlock_irqrestore(&vioch->free_lock, flags); + } + + return ret; } static bool scmi_vio_have_vq_rx(struct virtio_device *vdev) @@ -206,7 +263,7 @@ static void scmi_finalize_message(struct scmi_vio_channel *vioch, if (vioch->is_rx) scmi_vio_feed_vq_rx(vioch, msg); else - scmi_virtio_put_free_msg(vioch, msg); + scmi_vio_msg_release(vioch, msg); } static void scmi_vio_complete_cb(struct virtqueue *vqueue) @@ -230,6 +287,7 @@ static void scmi_vio_complete_cb(struct virtqueue *vqueue) virtqueue_disable_cb(vqueue); cb_enabled = false; } + msg = virtqueue_get_buf(vqueue, &length); if (!msg) { if (virtqueue_enable_cb(vqueue)) { @@ -260,6 +318,49 @@ static void scmi_vio_complete_cb(struct virtqueue *vqueue) } } +static void scmi_vio_deferred_tx_worker(struct work_struct *work) +{ + unsigned long flags; + struct scmi_vio_channel *vioch; + struct scmi_vio_msg *msg, *tmp; + + vioch = container_of(work, struct scmi_vio_channel, deferred_tx_work); + + if (!scmi_vio_channel_acquire(vioch)) + return; + + /* + * Process pre-fetched messages: these could be non-polled messages or + * late timed-out replies to polled messages dequeued by chance while + * polling for some other messages: this worker is in charge to process + * the valid non-expired messages and anyway finally free all of them. + */ + spin_lock_irqsave(&vioch->pending_lock, flags); + + /* Scan the list of possibly pre-fetched messages during polling. */ + list_for_each_entry_safe(msg, tmp, &vioch->pending_cmds_list, list) { + list_del(&msg->list); + + /* + * Channel is acquired here (cannot vanish) and this message + * is no more processed elsewhere so no poll_lock needed. + */ + if (msg->poll_status == VIO_MSG_NOT_POLLED) + scmi_rx_callback(vioch->cinfo, + msg_read_header(msg->input), msg); + + /* Free the processed message once done */ + scmi_vio_msg_release(vioch, msg); + } + + spin_unlock_irqrestore(&vioch->pending_lock, flags); + + /* Process possibly still pending messages */ + scmi_vio_complete_cb(vioch->vqueue); + + scmi_vio_channel_release(vioch); +} + static const char *const scmi_vio_vqueue_names[] = { "tx", "rx" }; static vq_callback_t *scmi_vio_complete_callbacks[] = { @@ -327,6 +428,19 @@ static int virtio_chan_setup(struct scmi_chan_info *cinfo, struct device *dev, vioch = &((struct scmi_vio_channel *)scmi_vdev->priv)[index]; + /* Setup a deferred worker for polling. */ + if (tx && !vioch->deferred_tx_wq) { + vioch->deferred_tx_wq = + alloc_workqueue(dev_name(&scmi_vdev->dev), + WQ_UNBOUND | WQ_FREEZABLE | WQ_SYSFS, + 0); + if (!vioch->deferred_tx_wq) + return -ENOMEM; + + INIT_WORK(&vioch->deferred_tx_work, + scmi_vio_deferred_tx_worker); + } + for (i = 0; i < vioch->max_msg; i++) { struct scmi_vio_msg *msg; @@ -340,6 +454,8 @@ static int virtio_chan_setup(struct scmi_chan_info *cinfo, struct device *dev, GFP_KERNEL); if (!msg->request) return -ENOMEM; + spin_lock_init(&msg->poll_lock); + refcount_set(&msg->users, 1); } msg->input = devm_kzalloc(cinfo->dev, VIRTIO_SCMI_MAX_PDU_SIZE, @@ -394,6 +510,21 @@ static int virtio_send_message(struct scmi_chan_info *cinfo, spin_lock_irqsave(&vioch->lock, flags); + /* + * If polling was requested for this transaction: + * - retrieve last used index (will be used as polling reference) + * - bind the polled message to the xfer via .priv + * - grab an additional msg refcount for the poll-path + */ + if (xfer->hdr.poll_completion) { + msg->poll_idx = virtqueue_enable_cb_prepare(vioch->vqueue); + /* Still no users, no need to acquire poll_lock */ + msg->poll_status = VIO_MSG_POLLING; + scmi_vio_msg_acquire(msg); + /* Ensure initialized msg is visibly bound to xfer */ + smp_store_mb(xfer->priv, msg); + } + rc = virtqueue_add_sgs(vioch->vqueue, sgs, 1, 1, msg, GFP_ATOMIC); if (rc) dev_err(vioch->cinfo->dev, @@ -403,8 +534,13 @@ static int virtio_send_message(struct scmi_chan_info *cinfo, spin_unlock_irqrestore(&vioch->lock, flags); - if (rc) - scmi_virtio_put_free_msg(vioch, msg); + if (rc) { + /* Ensure order between xfer->priv clear and vq feeding */ + smp_store_mb(xfer->priv, NULL); + if (xfer->hdr.poll_completion) + scmi_vio_msg_release(vioch, msg); + scmi_vio_msg_release(vioch, msg); + } scmi_vio_channel_release(vioch); @@ -416,10 +552,8 @@ static void virtio_fetch_response(struct scmi_chan_info *cinfo, { struct scmi_vio_msg *msg = xfer->priv; - if (msg) { + if (msg) msg_fetch_response(msg->input, msg->rx_len, xfer); - xfer->priv = NULL; - } } static void virtio_fetch_notification(struct scmi_chan_info *cinfo, @@ -427,10 +561,225 @@ static void virtio_fetch_notification(struct scmi_chan_info *cinfo, { struct scmi_vio_msg *msg = xfer->priv; - if (msg) { + if (msg) msg_fetch_notification(msg->input, msg->rx_len, max_len, xfer); - xfer->priv = NULL; +} + +/** + * virtio_mark_txdone - Mark transmission done + * + * Free only completed polling transfer messages. + * + * Note that in the SCMI VirtIO transport we never explicitly release still + * outstanding but timed-out messages by forcibly re-adding them to the + * free-list inside the TX code path; we instead let IRQ/RX callbacks, or the + * TX deferred worker, eventually clean up such messages once, finally, a late + * reply is received and discarded (if ever). + * + * This approach was deemed preferable since those pending timed-out buffers are + * still effectively owned by the SCMI platform VirtIO device even after timeout + * expiration: forcibly freeing and reusing them before they had been returned + * explicitly by the SCMI platform could lead to subtle bugs due to message + * corruption. + * An SCMI platform VirtIO device which never returns message buffers is + * anyway broken and it will quickly lead to exhaustion of available messages. + * + * For this same reason, here, we take care to free only the polled messages + * that had been somehow replied (only if not by chance already processed on the + * IRQ path - the initial scmi_vio_msg_release() takes care of this) and also + * any timed-out polled message if that indeed appears to have been at least + * dequeued from the virtqueues (VIO_MSG_POLL_DONE): this is needed since such + * messages won't be freed elsewhere. Any other polled message is marked as + * VIO_MSG_POLL_TIMEOUT. + * + * Possible late replies to timed-out polled messages will be eventually freed + * by RX callbacks if delivered on the IRQ path or by the deferred TX worker if + * dequeued on some other polling path. + * + * @cinfo: SCMI channel info + * @ret: Transmission return code + * @xfer: Transfer descriptor + */ +static void virtio_mark_txdone(struct scmi_chan_info *cinfo, int ret, + struct scmi_xfer *xfer) +{ + unsigned long flags; + struct scmi_vio_channel *vioch = cinfo->transport_info; + struct scmi_vio_msg *msg = xfer->priv; + + if (!msg || !scmi_vio_channel_acquire(vioch)) + return; + + /* Ensure msg is unbound from xfer anyway at this point */ + smp_store_mb(xfer->priv, NULL); + + /* Must be a polled xfer and not already freed on the IRQ path */ + if (!xfer->hdr.poll_completion || scmi_vio_msg_release(vioch, msg)) { + scmi_vio_channel_release(vioch); + return; } + + spin_lock_irqsave(&msg->poll_lock, flags); + /* Do not free timedout polled messages only if still inflight */ + if (ret != -ETIMEDOUT || msg->poll_status == VIO_MSG_POLL_DONE) + scmi_vio_msg_release(vioch, msg); + else if (msg->poll_status == VIO_MSG_POLLING) + msg->poll_status = VIO_MSG_POLL_TIMEOUT; + spin_unlock_irqrestore(&msg->poll_lock, flags); + + scmi_vio_channel_release(vioch); +} + +/** + * virtio_poll_done - Provide polling support for VirtIO transport + * + * @cinfo: SCMI channel info + * @xfer: Reference to the transfer being poll for. + * + * VirtIO core provides a polling mechanism based only on last used indexes: + * this means that it is possible to poll the virtqueues waiting for something + * new to arrive from the host side, but the only way to check if the freshly + * arrived buffer was indeed what we were waiting for is to compare the newly + * arrived message descriptor with the one we are polling on. + * + * As a consequence it can happen to dequeue something different from the buffer + * we were poll-waiting for: if that is the case such early fetched buffers are + * then added to a the @pending_cmds_list list for later processing by a + * dedicated deferred worker. + * + * So, basically, once something new is spotted we proceed to de-queue all the + * freshly received used buffers until we found the one we were polling on, or, + * we have 'seemingly' emptied the virtqueue; if some buffers are still pending + * in the vqueue at the end of the polling loop (possible due to inherent races + * in virtqueues handling mechanisms), we similarly kick the deferred worker + * and let it process those, to avoid indefinitely looping in the .poll_done + * busy-waiting helper. + * + * Finally, we delegate to the deferred worker also the final free of any timed + * out reply to a polled message that we should dequeue. + * + * Note that, since we do NOT have per-message suppress notification mechanism, + * the message we are polling for could be alternatively delivered via usual + * IRQs callbacks on another core which happened to have IRQs enabled while we + * are actively polling for it here: in such a case it will be handled as such + * by scmi_rx_callback() and the polling loop in the SCMI Core TX path will be + * transparently terminated anyway. + * + * Return: True once polling has successfully completed. + */ +static bool virtio_poll_done(struct scmi_chan_info *cinfo, + struct scmi_xfer *xfer) +{ + bool pending, found = false; + unsigned int length, any_prefetched = 0; + unsigned long flags; + struct scmi_vio_msg *next_msg, *msg = xfer->priv; + struct scmi_vio_channel *vioch = cinfo->transport_info; + + if (!msg) + return true; + + /* + * Processed already by other polling loop on another CPU ? + * + * Note that this message is acquired on the poll path so cannot vanish + * while inside this loop iteration even if concurrently processed on + * the IRQ path. + * + * Avoid to acquire poll_lock since polled_status can be changed + * in a relevant manner only later in this same thread of execution: + * any other possible changes made concurrently by other polling loops + * or by a reply delivered on the IRQ path have no meaningful impact on + * this loop iteration: in other words it is harmless to allow this + * possible race but let has avoid spinlocking with irqs off in this + * initial part of the polling loop. + */ + if (msg->poll_status == VIO_MSG_POLL_DONE) + return true; + + if (!scmi_vio_channel_acquire(vioch)) + return true; + + /* Has cmdq index moved at all ? */ + pending = virtqueue_poll(vioch->vqueue, msg->poll_idx); + if (!pending) { + scmi_vio_channel_release(vioch); + return false; + } + + spin_lock_irqsave(&vioch->lock, flags); + virtqueue_disable_cb(vioch->vqueue); + + /* + * Process all new messages till the polled-for message is found OR + * the vqueue is empty. + */ + while ((next_msg = virtqueue_get_buf(vioch->vqueue, &length))) { + bool next_msg_done = false; + + /* + * Mark any dequeued buffer message as VIO_MSG_POLL_DONE so + * that can be properly freed even on timeout in mark_txdone. + */ + spin_lock(&next_msg->poll_lock); + if (next_msg->poll_status == VIO_MSG_POLLING) { + next_msg->poll_status = VIO_MSG_POLL_DONE; + next_msg_done = true; + } + spin_unlock(&next_msg->poll_lock); + + next_msg->rx_len = length; + /* Is the message we were polling for ? */ + if (next_msg == msg) { + found = true; + break; + } else if (next_msg_done) { + /* Skip the rest if this was another polled msg */ + continue; + } + + /* + * Enqueue for later processing any non-polled message and any + * timed-out polled one that we happen to have dequeued. + */ + spin_lock(&next_msg->poll_lock); + if (next_msg->poll_status == VIO_MSG_NOT_POLLED || + next_msg->poll_status == VIO_MSG_POLL_TIMEOUT) { + spin_unlock(&next_msg->poll_lock); + + any_prefetched++; + spin_lock(&vioch->pending_lock); + list_add_tail(&next_msg->list, + &vioch->pending_cmds_list); + spin_unlock(&vioch->pending_lock); + } else { + spin_unlock(&next_msg->poll_lock); + } + } + + /* + * When the polling loop has successfully terminated if something + * else was queued in the meantime, it will be served by a deferred + * worker OR by the normal IRQ/callback OR by other poll loops. + * + * If we are still looking for the polled reply, the polling index has + * to be updated to the current vqueue last used index. + */ + if (found) { + pending = !virtqueue_enable_cb(vioch->vqueue); + } else { + msg->poll_idx = virtqueue_enable_cb_prepare(vioch->vqueue); + pending = virtqueue_poll(vioch->vqueue, msg->poll_idx); + } + + if (vioch->deferred_tx_wq && (any_prefetched || pending)) + queue_work(vioch->deferred_tx_wq, &vioch->deferred_tx_work); + + spin_unlock_irqrestore(&vioch->lock, flags); + + scmi_vio_channel_release(vioch); + + return found; } static const struct scmi_transport_ops scmi_virtio_ops = { @@ -442,6 +791,8 @@ static const struct scmi_transport_ops scmi_virtio_ops = { .send_message = virtio_send_message, .fetch_response = virtio_fetch_response, .fetch_notification = virtio_fetch_notification, + .mark_txdone = virtio_mark_txdone, + .poll_done = virtio_poll_done, }; static int scmi_vio_probe(struct virtio_device *vdev) @@ -484,6 +835,8 @@ static int scmi_vio_probe(struct virtio_device *vdev) spin_lock_init(&channels[i].lock); spin_lock_init(&channels[i].free_lock); INIT_LIST_HEAD(&channels[i].free_list); + spin_lock_init(&channels[i].pending_lock); + INIT_LIST_HEAD(&channels[i].pending_cmds_list); channels[i].vqueue = vqs[i]; sz = virtqueue_get_vring_size(channels[i].vqueue); @@ -573,4 +926,5 @@ const struct scmi_desc scmi_virtio_desc = { .max_rx_timeout_ms = VIRTIO_MAX_RX_TIMEOUT_MS, .max_msg = 0, /* overridden by virtio_get_max_msg() */ .max_msg_size = VIRTIO_SCMI_MAX_MSG_SIZE, + .atomic_enabled = IS_ENABLED(CONFIG_ARM_SCMI_TRANSPORT_VIRTIO_ATOMIC_ENABLE), }; From 0539884ccc8ad4b94e78c8c7705058af1cf9307f Mon Sep 17 00:00:00 2001 From: Cristian Marussi Date: Thu, 17 Feb 2022 13:12:30 +0000 Subject: [PATCH 24/28] dt-bindings: firmware: arm,scmi: Add atomic-threshold-us optional property SCMI protocols in the platform can optionally signal to the OSPM agent the expected execution latency for a specific resource/operation pair. Introduce an SCMI system wide optional property to describe a global time threshold which can be configured on a per-platform base to determine the opportunity, or not, for an SCMI command advertised to have a higher latency than the threshold, to be considered for atomic operations: high-latency SCMI synchronous commands should be preferably issued in the usual non-atomic mode. Link: https://lore.kernel.org/r/20220217131234.50328-5-cristian.marussi@arm.com Cc: Rob Herring Cc: devicetree@vger.kernel.org Reviewed-by: Rob Herring Signed-off-by: Cristian Marussi Signed-off-by: Sudeep Holla --- .../devicetree/bindings/firmware/arm,scmi.yaml | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/Documentation/devicetree/bindings/firmware/arm,scmi.yaml b/Documentation/devicetree/bindings/firmware/arm,scmi.yaml index eae15df36eef..590743883802 100644 --- a/Documentation/devicetree/bindings/firmware/arm,scmi.yaml +++ b/Documentation/devicetree/bindings/firmware/arm,scmi.yaml @@ -81,6 +81,14 @@ properties: '#size-cells': const: 0 + atomic-threshold-us: + description: + An optional time value, expressed in microseconds, representing, on this + platform, the threshold above which any SCMI command, advertised to have + an higher-than-threshold execution latency, should not be considered for + atomic mode of operation, even if requested. + default: 0 + arm,smc-id: $ref: /schemas/types.yaml#/definitions/uint32 description: @@ -264,6 +272,8 @@ examples: #address-cells = <1>; #size-cells = <0>; + atomic-threshold-us = <10000>; + scmi_devpd: protocol@11 { reg = <0x11>; #power-domain-cells = <1>; From 05976c5f3bff8f8b5230da4b39f7cd6dfba9943e Mon Sep 17 00:00:00 2001 From: Cristian Marussi Date: Thu, 17 Feb 2022 13:12:31 +0000 Subject: [PATCH 25/28] firmware: arm_scmi: Support optional system wide atomic-threshold-us An SCMI agent can be configured system-wide with a well-defined atomic threshold: only SCMI synchronous command whose latency has been advertised by the SCMI platform to be lower or equal to this configured threshold will be considered for atomic operations, when requested and if supported by the underlying transport at all. Link: https://lore.kernel.org/r/20220217131234.50328-6-cristian.marussi@arm.com Signed-off-by: Cristian Marussi Signed-off-by: Sudeep Holla --- drivers/firmware/arm_scmi/driver.c | 27 ++++++++++++++++++++++++--- include/linux/scmi_protocol.h | 5 ++++- 2 files changed, 28 insertions(+), 4 deletions(-) diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c index 4fd5a35ffa2f..7436c475e708 100644 --- a/drivers/firmware/arm_scmi/driver.c +++ b/drivers/firmware/arm_scmi/driver.c @@ -131,6 +131,12 @@ struct scmi_protocol_instance { * MAX_PROTOCOLS_IMP elements allocated by the base protocol * @active_protocols: IDR storing device_nodes for protocols actually defined * in the DT and confirmed as implemented by fw. + * @atomic_threshold: Optional system wide DT-configured threshold, expressed + * in microseconds, for atomic operations. + * Only SCMI synchronous commands reported by the platform + * to have an execution latency lesser-equal to the threshold + * should be considered for atomic mode operation: such + * decision is finally left up to the SCMI drivers. * @notify_priv: Pointer to private data structure specific to notifications. * @node: List head * @users: Number of users of this instance @@ -149,6 +155,7 @@ struct scmi_info { struct mutex protocols_mtx; u8 *protocols_imp; struct idr active_protocols; + unsigned int atomic_threshold; void *notify_priv; struct list_head node; int users; @@ -1406,15 +1413,22 @@ static void scmi_devm_protocol_put(struct scmi_device *sdev, u8 protocol_id) * SCMI instance is configured as atomic. * * @handle: A reference to the SCMI platform instance. + * @atomic_threshold: An optional return value for the system wide currently + * configured threshold for atomic operations. * * Return: True if transport is configured as atomic */ -static bool scmi_is_transport_atomic(const struct scmi_handle *handle) +static bool scmi_is_transport_atomic(const struct scmi_handle *handle, + unsigned int *atomic_threshold) { + bool ret; struct scmi_info *info = handle_to_scmi_info(handle); - return info->desc->atomic_enabled && - is_transport_polling_capable(info); + ret = info->desc->atomic_enabled && is_transport_polling_capable(info); + if (ret && atomic_threshold) + *atomic_threshold = info->atomic_threshold; + + return ret; } static inline @@ -1954,6 +1968,13 @@ static int scmi_probe(struct platform_device *pdev) handle->version = &info->version; handle->devm_protocol_get = scmi_devm_protocol_get; handle->devm_protocol_put = scmi_devm_protocol_put; + + /* System wide atomic threshold for atomic ops .. if any */ + if (!of_property_read_u32(np, "atomic-threshold-us", + &info->atomic_threshold)) + dev_info(dev, + "SCMI System wide atomic threshold set to %d us\n", + info->atomic_threshold); handle->is_transport_atomic = scmi_is_transport_atomic; if (desc->ops->link_supplier) { diff --git a/include/linux/scmi_protocol.h b/include/linux/scmi_protocol.h index 9f895cb81818..fdf6bd83cc59 100644 --- a/include/linux/scmi_protocol.h +++ b/include/linux/scmi_protocol.h @@ -619,6 +619,8 @@ struct scmi_notify_ops { * be interested to know if they can assume SCMI * command transactions associated to this handle will * never sleep and act accordingly. + * An optional atomic threshold value could be returned + * where configured. * @notify_ops: pointer to set of notifications related operations */ struct scmi_handle { @@ -629,7 +631,8 @@ struct scmi_handle { (*devm_protocol_get)(struct scmi_device *sdev, u8 proto, struct scmi_protocol_handle **ph); void (*devm_protocol_put)(struct scmi_device *sdev, u8 proto); - bool (*is_transport_atomic)(const struct scmi_handle *handle); + bool (*is_transport_atomic)(const struct scmi_handle *handle, + unsigned int *atomic_threshold); const struct scmi_notify_ops *notify_ops; }; From b7bd36f2e9430a58aefdc326f8e6653e9b000243 Mon Sep 17 00:00:00 2001 From: Cristian Marussi Date: Thu, 17 Feb 2022 13:12:32 +0000 Subject: [PATCH 26/28] firmware: arm_scmi: Add atomic support to clock protocol Introduce new _atomic variant for SCMI clock protocol operations related to enable disable operations: when an atomic operation is required the xfer poll_completion flag is set for that transaction. Link: https://lore.kernel.org/r/20220217131234.50328-7-cristian.marussi@arm.com Signed-off-by: Cristian Marussi Signed-off-by: Sudeep Holla --- drivers/firmware/arm_scmi/clock.c | 22 +++++++++++++++++++--- include/linux/scmi_protocol.h | 3 +++ 2 files changed, 22 insertions(+), 3 deletions(-) diff --git a/drivers/firmware/arm_scmi/clock.c b/drivers/firmware/arm_scmi/clock.c index 35b56c8ba0c0..72f930c0e3e2 100644 --- a/drivers/firmware/arm_scmi/clock.c +++ b/drivers/firmware/arm_scmi/clock.c @@ -273,7 +273,7 @@ static int scmi_clock_rate_set(const struct scmi_protocol_handle *ph, static int scmi_clock_config_set(const struct scmi_protocol_handle *ph, u32 clk_id, - u32 config) + u32 config, bool atomic) { int ret; struct scmi_xfer *t; @@ -284,6 +284,8 @@ scmi_clock_config_set(const struct scmi_protocol_handle *ph, u32 clk_id, if (ret) return ret; + t->hdr.poll_completion = atomic; + cfg = t->tx.buf; cfg->id = cpu_to_le32(clk_id); cfg->attributes = cpu_to_le32(config); @@ -296,12 +298,24 @@ scmi_clock_config_set(const struct scmi_protocol_handle *ph, u32 clk_id, static int scmi_clock_enable(const struct scmi_protocol_handle *ph, u32 clk_id) { - return scmi_clock_config_set(ph, clk_id, CLOCK_ENABLE); + return scmi_clock_config_set(ph, clk_id, CLOCK_ENABLE, false); } static int scmi_clock_disable(const struct scmi_protocol_handle *ph, u32 clk_id) { - return scmi_clock_config_set(ph, clk_id, 0); + return scmi_clock_config_set(ph, clk_id, 0, false); +} + +static int scmi_clock_enable_atomic(const struct scmi_protocol_handle *ph, + u32 clk_id) +{ + return scmi_clock_config_set(ph, clk_id, CLOCK_ENABLE, true); +} + +static int scmi_clock_disable_atomic(const struct scmi_protocol_handle *ph, + u32 clk_id) +{ + return scmi_clock_config_set(ph, clk_id, 0, true); } static int scmi_clock_count_get(const struct scmi_protocol_handle *ph) @@ -330,6 +344,8 @@ static const struct scmi_clk_proto_ops clk_proto_ops = { .rate_set = scmi_clock_rate_set, .enable = scmi_clock_enable, .disable = scmi_clock_disable, + .enable_atomic = scmi_clock_enable_atomic, + .disable_atomic = scmi_clock_disable_atomic, }; static int scmi_clock_protocol_init(const struct scmi_protocol_handle *ph) diff --git a/include/linux/scmi_protocol.h b/include/linux/scmi_protocol.h index fdf6bd83cc59..306e576835f8 100644 --- a/include/linux/scmi_protocol.h +++ b/include/linux/scmi_protocol.h @@ -82,6 +82,9 @@ struct scmi_clk_proto_ops { u64 rate); int (*enable)(const struct scmi_protocol_handle *ph, u32 clk_id); int (*disable)(const struct scmi_protocol_handle *ph, u32 clk_id); + int (*enable_atomic)(const struct scmi_protocol_handle *ph, u32 clk_id); + int (*disable_atomic)(const struct scmi_protocol_handle *ph, + u32 clk_id); }; /** From 18f295b758b227857133f680a397a42e83c62f3f Mon Sep 17 00:00:00 2001 From: Cristian Marussi Date: Thu, 17 Feb 2022 13:12:33 +0000 Subject: [PATCH 27/28] firmware: arm_scmi: Add support for clock_enable_latency An SCMI platform can optionally advertise an enable latency typically associated with a specific clock resource: add support for parsing such optional message field and export such information in the usual publicly accessible clock descriptor. Link: https://lore.kernel.org/r/20220217131234.50328-8-cristian.marussi@arm.com Signed-off-by: Cristian Marussi Signed-off-by: Sudeep Holla --- drivers/firmware/arm_scmi/clock.c | 12 +++++++++--- include/linux/scmi_protocol.h | 1 + 2 files changed, 10 insertions(+), 3 deletions(-) diff --git a/drivers/firmware/arm_scmi/clock.c b/drivers/firmware/arm_scmi/clock.c index 72f930c0e3e2..cf6fed6dec77 100644 --- a/drivers/firmware/arm_scmi/clock.c +++ b/drivers/firmware/arm_scmi/clock.c @@ -27,7 +27,8 @@ struct scmi_msg_resp_clock_protocol_attributes { struct scmi_msg_resp_clock_attributes { __le32 attributes; #define CLOCK_ENABLE BIT(0) - u8 name[SCMI_MAX_STR_SIZE]; + u8 name[SCMI_MAX_STR_SIZE]; + __le32 clock_enable_latency; }; struct scmi_clock_set_config { @@ -116,10 +117,15 @@ static int scmi_clock_attributes_get(const struct scmi_protocol_handle *ph, attr = t->rx.buf; ret = ph->xops->do_xfer(ph, t); - if (!ret) + if (!ret) { strlcpy(clk->name, attr->name, SCMI_MAX_STR_SIZE); - else + /* Is optional field clock_enable_latency provided ? */ + if (t->rx.len == sizeof(*attr)) + clk->enable_latency = + le32_to_cpu(attr->clock_enable_latency); + } else { clk->name[0] = '\0'; + } ph->xops->xfer_put(ph, t); return ret; diff --git a/include/linux/scmi_protocol.h b/include/linux/scmi_protocol.h index 306e576835f8..b87551f41f9f 100644 --- a/include/linux/scmi_protocol.h +++ b/include/linux/scmi_protocol.h @@ -42,6 +42,7 @@ struct scmi_revision_info { struct scmi_clock_info { char name[SCMI_MAX_STR_SIZE]; + unsigned int enable_latency; bool rate_discrete; union { struct { From 38a0e5b735d6152d334d2f94b925a1c8a93bd7eb Mon Sep 17 00:00:00 2001 From: Cristian Marussi Date: Thu, 17 Feb 2022 13:12:34 +0000 Subject: [PATCH 28/28] clk: scmi: Support atomic clock enable/disable API Support also atomic enable/disable clk_ops beside the bare non-atomic one (prepare/unprepare) when the underlying SCMI transport is configured to support atomic transactions for synchronous commands. Compare the SCMI system-wide configured atomic threshold latency time and the per-clock advertised enable latency (if any) to choose whether to provide sleeping prepare/unprepare vs atomic enable/disable. Link: https://lore.kernel.org/r/20220217131234.50328-9-cristian.marussi@arm.com Cc: Michael Turquette Cc: Stephen Boyd Cc: linux-clk@vger.kernel.org Acked-by: Stephen Boyd Signed-off-by: Cristian Marussi Signed-off-by: Sudeep Holla --- drivers/clk/clk-scmi.c | 71 +++++++++++++++++++++++++++++++++++------- 1 file changed, 60 insertions(+), 11 deletions(-) diff --git a/drivers/clk/clk-scmi.c b/drivers/clk/clk-scmi.c index 1e357d364ca2..2c7a830ce308 100644 --- a/drivers/clk/clk-scmi.c +++ b/drivers/clk/clk-scmi.c @@ -2,7 +2,7 @@ /* * System Control and Power Interface (SCMI) Protocol based clock driver * - * Copyright (C) 2018-2021 ARM Ltd. + * Copyright (C) 2018-2022 ARM Ltd. */ #include @@ -88,21 +88,51 @@ static void scmi_clk_disable(struct clk_hw *hw) scmi_proto_clk_ops->disable(clk->ph, clk->id); } +static int scmi_clk_atomic_enable(struct clk_hw *hw) +{ + struct scmi_clk *clk = to_scmi_clk(hw); + + return scmi_proto_clk_ops->enable_atomic(clk->ph, clk->id); +} + +static void scmi_clk_atomic_disable(struct clk_hw *hw) +{ + struct scmi_clk *clk = to_scmi_clk(hw); + + scmi_proto_clk_ops->disable_atomic(clk->ph, clk->id); +} + +/* + * We can provide enable/disable atomic callbacks only if the underlying SCMI + * transport for an SCMI instance is configured to handle SCMI commands in an + * atomic manner. + * + * When no SCMI atomic transport support is available we instead provide only + * the prepare/unprepare API, as allowed by the clock framework when atomic + * calls are not available. + * + * Two distinct sets of clk_ops are provided since we could have multiple SCMI + * instances with different underlying transport quality, so they cannot be + * shared. + */ static const struct clk_ops scmi_clk_ops = { .recalc_rate = scmi_clk_recalc_rate, .round_rate = scmi_clk_round_rate, .set_rate = scmi_clk_set_rate, - /* - * We can't provide enable/disable callback as we can't perform the same - * in atomic context. Since the clock framework provides standard API - * clk_prepare_enable that helps cases using clk_enable in non-atomic - * context, it should be fine providing prepare/unprepare. - */ .prepare = scmi_clk_enable, .unprepare = scmi_clk_disable, }; -static int scmi_clk_ops_init(struct device *dev, struct scmi_clk *sclk) +static const struct clk_ops scmi_atomic_clk_ops = { + .recalc_rate = scmi_clk_recalc_rate, + .round_rate = scmi_clk_round_rate, + .set_rate = scmi_clk_set_rate, + .enable = scmi_clk_atomic_enable, + .disable = scmi_clk_atomic_disable, +}; + +static int scmi_clk_ops_init(struct device *dev, struct scmi_clk *sclk, + const struct clk_ops *scmi_ops) { int ret; unsigned long min_rate, max_rate; @@ -110,7 +140,7 @@ static int scmi_clk_ops_init(struct device *dev, struct scmi_clk *sclk) struct clk_init_data init = { .flags = CLK_GET_RATE_NOCACHE, .num_parents = 0, - .ops = &scmi_clk_ops, + .ops = scmi_ops, .name = sclk->info->name, }; @@ -139,6 +169,8 @@ static int scmi_clk_ops_init(struct device *dev, struct scmi_clk *sclk) static int scmi_clocks_probe(struct scmi_device *sdev) { int idx, count, err; + unsigned int atomic_threshold; + bool is_atomic; struct clk_hw **hws; struct clk_hw_onecell_data *clk_data; struct device *dev = &sdev->dev; @@ -168,8 +200,11 @@ static int scmi_clocks_probe(struct scmi_device *sdev) clk_data->num = count; hws = clk_data->hws; + is_atomic = handle->is_transport_atomic(handle, &atomic_threshold); + for (idx = 0; idx < count; idx++) { struct scmi_clk *sclk; + const struct clk_ops *scmi_ops; sclk = devm_kzalloc(dev, sizeof(*sclk), GFP_KERNEL); if (!sclk) @@ -184,13 +219,27 @@ static int scmi_clocks_probe(struct scmi_device *sdev) sclk->id = idx; sclk->ph = ph; - err = scmi_clk_ops_init(dev, sclk); + /* + * Note that when transport is atomic but SCMI protocol did not + * specify (or support) an enable_latency associated with a + * clock, we default to use atomic operations mode. + */ + if (is_atomic && + sclk->info->enable_latency <= atomic_threshold) + scmi_ops = &scmi_atomic_clk_ops; + else + scmi_ops = &scmi_clk_ops; + + err = scmi_clk_ops_init(dev, sclk, scmi_ops); if (err) { dev_err(dev, "failed to register clock %d\n", idx); devm_kfree(dev, sclk); hws[idx] = NULL; } else { - dev_dbg(dev, "Registered clock:%s\n", sclk->info->name); + dev_dbg(dev, "Registered clock:%s%s\n", + sclk->info->name, + scmi_ops == &scmi_atomic_clk_ops ? + " (atomic ops)" : ""); hws[idx] = &sclk->hw; } }