@@ -52,29 +52,105 @@ static void __scm_smc_do_quirk(const struct arm_smccc_args *smc,
} while (res->a0 == QCOM_SCM_INTERRUPTED);
}
-static void __scm_smc_do(const struct arm_smccc_args *smc,
- struct arm_smccc_res *res, bool atomic)
+static void fill_wq_resume_args(struct arm_smccc_args *resume, u32 smc_call_ctx)
{
- int retry_count = 0;
+ memset(resume->args, 0, sizeof(resume->args[0]) * ARRAY_SIZE(resume->args));
+
+ resume->args[0] = ARM_SMCCC_CALL_VAL(ARM_SMCCC_STD_CALL,
+ ARM_SMCCC_SMC_64, ARM_SMCCC_OWNER_SIP,
+ SCM_SMC_FNID(QCOM_SCM_SVC_WAITQ, QCOM_SCM_WAITQ_RESUME));
+
+ resume->args[1] = QCOM_SCM_ARGS(1);
+
+ resume->args[2] = smc_call_ctx;
+}
+
+int scm_get_wq_ctx(u32 *wq_ctx, u32 *flags, u32 *more_pending)
+{
+ int ret;
+ struct arm_smccc_args get_wq_ctx = {0};
+ struct arm_smccc_res get_wq_res;
+
+ get_wq_ctx.args[0] = ARM_SMCCC_CALL_VAL(ARM_SMCCC_STD_CALL,
+ ARM_SMCCC_SMC_64, ARM_SMCCC_OWNER_SIP,
+ SCM_SMC_FNID(QCOM_SCM_SVC_WAITQ, QCOM_SCM_WAITQ_GET_WQ_CTX));
+
+ /* Guaranteed to return only success or error, no WAITQ_* */
+ __scm_smc_do_quirk(&get_wq_ctx, &get_wq_res);
+ ret = get_wq_res.a0;
+ if (ret)
+ return ret;
+
+ *wq_ctx = get_wq_res.a1;
+ *flags = get_wq_res.a2;
+ *more_pending = get_wq_res.a3;
+
+ return 0;
+}
+
+static int __scm_smc_do_quirk_handle_waitq(struct device *dev, struct arm_smccc_args *smc,
+ struct arm_smccc_res *res)
+{
+ struct qcom_scm *scm;
+ struct completion *wq = NULL;
+ u32 wq_ctx, smc_call_ctx, flags;
+
+ do {
+ __scm_smc_do_quirk(smc, res);
+
+ if (res->a0 == QCOM_SCM_WAITQ_SLEEP) {
+ wq_ctx = res->a1;
+ smc_call_ctx = res->a2;
+ flags = res->a3;
+
+ if (!dev)
+ return -EPROBE_DEFER;
+
+ scm = dev_get_drvdata(dev);
+ wq = qcom_scm_lookup_wq(scm, wq_ctx);
+ if (IS_ERR_OR_NULL(wq)) {
+ dev_err(dev, "No waitqueue found for wq_ctx %d: %ld\n",
+ wq_ctx, PTR_ERR(wq));
+ return PTR_ERR(wq);
+ }
+
+ wait_for_completion(wq);
+ fill_wq_resume_args(smc, smc_call_ctx);
+ wq = NULL;
+ }
+ } while (res->a0 == QCOM_SCM_WAITQ_SLEEP);
+
+ return 0;
+}
+
+static int __scm_smc_do(struct device *dev, struct arm_smccc_args *smc,
+ struct arm_smccc_res *res, bool atomic)
+{
+ int ret, retry_count = 0;
if (atomic) {
__scm_smc_do_quirk(smc, res);
- return;
+ return 0;
}
do {
mutex_lock(&qcom_scm_lock);
- __scm_smc_do_quirk(smc, res);
+ ret = __scm_smc_do_quirk_handle_waitq(dev, smc, res);
mutex_unlock(&qcom_scm_lock);
+ if (ret)
+ return ret;
+
if (res->a0 == QCOM_SCM_V2_EBUSY) {
if (retry_count++ > QCOM_SCM_EBUSY_MAX_RETRY)
break;
msleep(QCOM_SCM_EBUSY_WAIT_MS);
}
} while (res->a0 == QCOM_SCM_V2_EBUSY);
+
+ return 0;
}
@@ -83,7 +159,7 @@ int __scm_smc_call(struct device *dev, const struct qcom_scm_desc *desc,
struct qcom_scm_res *res, bool atomic)
{
int arglen = desc->arginfo & 0xf;
- int i;
+ int i, ret;
dma_addr_t args_phys = 0;
void *args_virt = NULL;
size_t alloc_len;
@@ -135,13 +211,17 @@ int __scm_smc_call(struct device *dev, const struct qcom_scm_desc *desc,
smc.args[SCM_SMC_LAST_REG_IDX] = args_phys;
}
- __scm_smc_do(&smc, &smc_res, atomic);
+ /* ret error check follows after args_virt cleanup*/
+ ret = __scm_smc_do(dev, &smc, &smc_res, atomic);
if (args_virt) {
dma_unmap_single(dev, args_phys, alloc_len, DMA_TO_DEVICE);
kfree(args_virt);
}
+ if (ret)
+ return ret;
+
if (res) {
res->result[0] = smc_res.a1;
res->result[1] = smc_res.a2;
@@ -3,7 +3,9 @@
* Copyright (C) 2015 Linaro Ltd.
*/
#include <linux/platform_device.h>
+#include <linux/idr.h>
#include <linux/init.h>
+#include <linux/interrupt.h>
#include <linux/cpumask.h>
#include <linux/export.h>
#include <linux/dma-mapping.h>
@@ -13,9 +15,12 @@
#include <linux/qcom_scm.h>
#include <linux/of.h>
#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/clk.h>
#include <linux/reset-controller.h>
+#include <linux/spinlock.h>
+#include <linux/ktime.h>
#include <linux/arm-smccc.h>
#include "qcom_scm.h"
@@ -27,12 +32,20 @@ module_param(download_mode, bool, 0);
#define SCM_HAS_IFACE_CLK BIT(1)
#define SCM_HAS_BUS_CLK BIT(2)
+struct qcom_scm_waitq {
+ struct idr idr;
+ /* control access to IDR */
+ spinlock_t idr_lock;
+ struct completion waitq_comp;
+};
+
struct qcom_scm {
struct device *dev;
struct clk *core_clk;
struct clk *iface_clk;
struct clk *bus_clk;
struct icc_path *path;
+ struct qcom_scm_waitq waitq;
struct reset_controller_dev reset;
/* control access to the interconnect path */
@@ -63,6 +76,9 @@ static const u8 qcom_scm_cpu_warm_bits[QCOM_SCM_BOOT_MAX_CPUS] = {
BIT(2), BIT(1), BIT(4), BIT(6)
};
+#define QCOM_SMC_WAITQ_FLAG_WAKE_ONE BIT(0)
+#define QCOM_SMC_WAITQ_FLAG_WAKE_ALL BIT(1)
+
static const char * const qcom_scm_convention_names[] = {
[SMC_CONVENTION_UNKNOWN] = "unknown",
[SMC_CONVENTION_ARM_32] = "smc arm 32",
@@ -1325,11 +1341,69 @@ bool qcom_scm_is_available(void)
}
EXPORT_SYMBOL(qcom_scm_is_available);
+struct completion *qcom_scm_lookup_wq(struct qcom_scm *scm, u32 wq_ctx)
+{
+ int err;
+ unsigned long flags;
+ u32 wq_ctx_idr = wq_ctx;
+ struct completion *wq = NULL;
+
+ spin_lock_irqsave(&scm->waitq.idr_lock, flags);
+ wq = idr_find(&scm->waitq.idr, wq_ctx);
+ if (wq)
+ goto out;
+
+ wq = &scm->waitq.waitq_comp;
+
+ err = idr_alloc_u32(&scm->waitq.idr, wq, &wq_ctx_idr,
+ U32_MAX, GFP_ATOMIC);
+ if (err < 0)
+ wq = ERR_PTR(err);
+
+out:
+ spin_unlock_irqrestore(&scm->waitq.idr_lock, flags);
+ return wq;
+}
+
+static irqreturn_t qcom_scm_irq_handler(int irq, void *data)
+{
+ int ret;
+ struct qcom_scm *scm = data;
+ struct completion *wq_to_wake;
+ u32 wq_ctx, flags, more_pending = 0;
+
+ do {
+ ret = scm_get_wq_ctx(&wq_ctx, &flags, &more_pending);
+ if (ret) {
+ dev_err(scm->dev, "GET_WQ_CTX SMC call failed: %d\n", ret);
+ goto out;
+ }
+
+ wq_to_wake = qcom_scm_lookup_wq(scm, wq_ctx);
+ if (IS_ERR_OR_NULL(wq_to_wake)) {
+ dev_err(scm->dev, "No waitqueue found for wq_ctx %d: %ld\n",
+ wq_ctx, PTR_ERR(wq_to_wake));
+ goto out;
+ }
+
+ if (flags != QCOM_SMC_WAITQ_FLAG_WAKE_ONE &&
+ flags != QCOM_SMC_WAITQ_FLAG_WAKE_ALL) {
+ dev_err(scm->dev, "Invalid Flags found for wq_ctx: %u\n", flags);
+ goto out;
+ }
+
+ complete(wq_to_wake);
+ } while (more_pending);
+
+out:
+ return IRQ_HANDLED;
+}
+
static int qcom_scm_probe(struct platform_device *pdev)
{
struct qcom_scm *scm;
unsigned long clks;
- int ret;
+ int irq, ret;
scm = devm_kzalloc(&pdev->dev, sizeof(*scm), GFP_KERNEL);
if (!scm)
@@ -1399,9 +1473,29 @@ static int qcom_scm_probe(struct platform_device *pdev)
if (ret)
return ret;
+ platform_set_drvdata(pdev, scm);
+
__scm = scm;
__scm->dev = &pdev->dev;
+ spin_lock_init(&__scm->waitq.idr_lock);
+ idr_init(&__scm->waitq.idr);
+ init_completion(&__scm->waitq.waitq_comp);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ if (irq != -ENXIO)
+ return irq;
+ } else {
+ ret = devm_request_threaded_irq(__scm->dev, irq, NULL, qcom_scm_irq_handler,
+ IRQF_ONESHOT, "qcom-scm", __scm);
+ if (ret < 0) {
+ dev_err(scm->dev, "Failed to request qcom-scm irq: %d\n", ret);
+ idr_destroy(&__scm->waitq.idr);
+ return ret;
+ }
+ }
+
__get_convention();
/*
@@ -1417,6 +1511,12 @@ static int qcom_scm_probe(struct platform_device *pdev)
static void qcom_scm_shutdown(struct platform_device *pdev)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(&__scm->waitq.idr_lock, flags);
+ idr_destroy(&__scm->waitq.idr);
+ spin_unlock_irqrestore(&__scm->waitq.idr_lock, flags);
+
/* Clean shutdown, disable download mode to allow normal restart */
if (download_mode)
qcom_scm_set_download_mode(false);
@@ -60,6 +60,10 @@ struct qcom_scm_res {
u64 result[MAX_QCOM_SCM_RETS];
};
+struct qcom_scm;
+extern struct completion *qcom_scm_lookup_wq(struct qcom_scm *scm, u32 wq_ctx);
+extern int scm_get_wq_ctx(u32 *wq_ctx, u32 *flags, u32 *more_pending);
+
#define SCM_SMC_FNID(s, c) ((((s) & 0xFF) << 8) | ((c) & 0xFF))
extern int __scm_smc_call(struct device *dev, const struct qcom_scm_desc *desc,
enum qcom_scm_convention qcom_convention,
@@ -129,6 +133,10 @@ extern int scm_legacy_call(struct device *dev, const struct qcom_scm_desc *desc,
#define QCOM_SCM_SMMU_CONFIG_ERRATA1 0x03
#define QCOM_SCM_SMMU_CONFIG_ERRATA1_CLIENT_ALL 0x02
+#define QCOM_SCM_SVC_WAITQ 0x24
+#define QCOM_SCM_WAITQ_RESUME 0x02
+#define QCOM_SCM_WAITQ_GET_WQ_CTX 0x03
+
/* common error codes */
#define QCOM_SCM_V2_EBUSY -12
#define QCOM_SCM_ENOMEM -5
@@ -137,6 +145,7 @@ extern int scm_legacy_call(struct device *dev, const struct qcom_scm_desc *desc,
#define QCOM_SCM_EINVAL_ARG -2
#define QCOM_SCM_ERROR -1
#define QCOM_SCM_INTERRUPTED 1
+#define QCOM_SCM_WAITQ_SLEEP 2
static inline int qcom_scm_remap_error(int err)
{