@@ -22,6 +22,7 @@ struct avs_dev;
struct avs_tplg;
struct avs_tplg_library;
struct avs_soc_component;
+struct avs_ipc_msg;
/*
* struct avs_dsp_ops - Platform-specific DSP operations
@@ -48,6 +49,8 @@ struct avs_dsp_ops {
int (* const log_buffer_offset)(struct avs_dev *, u32);
int (* const log_buffer_status)(struct avs_dev *, union avs_notify_msg *);
int (* const coredump)(struct avs_dev *, union avs_notify_msg *);
+ bool (* const d0ix_toggle)(struct avs_dev *, struct avs_ipc_msg *, bool);
+ int (* const set_d0ix)(struct avs_dev *, bool);
};
#define avs_dsp_op(adev, op, ...) \
@@ -191,6 +194,9 @@ struct avs_ipc {
struct completion busy_completion;
struct work_struct recovery_work;
+ struct delayed_work d0ix_work;
+ atomic_t d0ix_disable_depth;
+ bool in_d0ix;
};
#define AVS_EIPC EREMOTEIO
@@ -227,6 +233,11 @@ int avs_dsp_send_msg_timeout(struct avs_dev *adev,
struct avs_ipc_msg *reply, int timeout);
int avs_dsp_send_msg(struct avs_dev *adev,
struct avs_ipc_msg *request, struct avs_ipc_msg *reply);
+/* Two variants below are for messages that control DSP power states. */
+int avs_dsp_send_pm_msg_timeout(struct avs_dev *adev, struct avs_ipc_msg *request,
+ struct avs_ipc_msg *reply, int timeout, bool wake_d0i0);
+int avs_dsp_send_pm_msg(struct avs_dev *adev, struct avs_ipc_msg *request,
+ struct avs_ipc_msg *reply, bool wake_d0i0);
int avs_dsp_send_rom_msg_timeout(struct avs_dev *adev,
struct avs_ipc_msg *request, int timeout);
int avs_dsp_send_rom_msg(struct avs_dev *adev, struct avs_ipc_msg *request);
@@ -234,6 +245,9 @@ void avs_dsp_interrupt_control(struct avs_dev *adev, bool enable);
int avs_ipc_init(struct avs_ipc *ipc, struct device *dev);
void avs_ipc_block(struct avs_ipc *ipc);
+int avs_dsp_disable_d0ix(struct avs_dev *adev);
+int avs_dsp_enable_d0ix(struct avs_dev *adev);
+
/* Firmware resources management */
int avs_get_module_entry(struct avs_dev *adev, const guid_t *uuid, struct avs_module_entry *entry);
@@ -152,6 +152,15 @@ static int avs_dsp_get_core(struct avs_dev *adev, u32 core_id)
adev->core_refs[core_id]++;
if (adev->core_refs[core_id] == 1) {
+ /*
+ * No cores other than main-core can be running for DSP
+ * to achieve d0ix. Conscious SET_D0IX IPC failure is permitted,
+ * simply d0ix power state will no longer be attempted.
+ */
+ ret = avs_dsp_disable_d0ix(adev);
+ if (ret && ret != -AVS_EIPC)
+ goto err_disable_d0ix;
+
ret = avs_dsp_enable(adev, mask);
if (ret)
goto err_enable_dsp;
@@ -160,6 +169,8 @@ static int avs_dsp_get_core(struct avs_dev *adev, u32 core_id)
return 0;
err_enable_dsp:
+ avs_dsp_enable_d0ix(adev);
+err_disable_d0ix:
adev->core_refs[core_id]--;
err:
dev_err(adev->dev, "get core %d failed: %d\n", core_id, ret);
@@ -185,6 +196,9 @@ static int avs_dsp_put_core(struct avs_dev *adev, u32 core_id)
ret = avs_dsp_disable(adev, mask);
if (ret)
goto err;
+
+ /* Match disable_d0ix in avs_dsp_get_core(). */
+ avs_dsp_enable_d0ix(adev);
}
return 0;
@@ -13,6 +13,82 @@
#include "registers.h"
#define AVS_IPC_TIMEOUT_MS 300
+#define AVS_D0IX_DELAY_MS 300
+
+static int
+avs_dsp_set_d0ix(struct avs_dev *adev, bool enable)
+{
+ struct avs_ipc *ipc = adev->ipc;
+ int ret;
+
+ /* Is transition required? */
+ if (ipc->in_d0ix == enable)
+ return 0;
+
+ ret = avs_dsp_op(adev, set_d0ix, enable);
+ if (ret) {
+ /* Prevent further d0ix attempts on conscious IPC failure. */
+ if (ret == -AVS_EIPC)
+ atomic_inc(&ipc->d0ix_disable_depth);
+
+ ipc->in_d0ix = false;
+ return ret;
+ }
+
+ ipc->in_d0ix = enable;
+ return 0;
+}
+
+static void avs_dsp_schedule_d0ix(struct avs_dev *adev, struct avs_ipc_msg *tx)
+{
+ if (atomic_read(&adev->ipc->d0ix_disable_depth))
+ return;
+
+ mod_delayed_work(system_power_efficient_wq, &adev->ipc->d0ix_work,
+ msecs_to_jiffies(AVS_D0IX_DELAY_MS));
+}
+
+static void avs_dsp_d0ix_work(struct work_struct *work)
+{
+ struct avs_ipc *ipc = container_of(work, struct avs_ipc, d0ix_work.work);
+
+ avs_dsp_set_d0ix(to_avs_dev(ipc->dev), true);
+}
+
+static int avs_dsp_wake_d0i0(struct avs_dev *adev, struct avs_ipc_msg *tx)
+{
+ struct avs_ipc *ipc = adev->ipc;
+
+ if (!atomic_read(&ipc->d0ix_disable_depth)) {
+ cancel_delayed_work_sync(&ipc->d0ix_work);
+ return avs_dsp_set_d0ix(adev, false);
+ }
+
+ return 0;
+}
+
+int avs_dsp_disable_d0ix(struct avs_dev *adev)
+{
+ struct avs_ipc *ipc = adev->ipc;
+
+ /* Prevent PG only on the first disable. */
+ if (atomic_add_return(1, &ipc->d0ix_disable_depth) == 1) {
+ cancel_delayed_work_sync(&ipc->d0ix_work);
+ return avs_dsp_set_d0ix(adev, false);
+ }
+
+ return 0;
+}
+
+int avs_dsp_enable_d0ix(struct avs_dev *adev)
+{
+ struct avs_ipc *ipc = adev->ipc;
+
+ if (atomic_dec_and_test(&ipc->d0ix_disable_depth))
+ queue_delayed_work(system_power_efficient_wq, &ipc->d0ix_work,
+ msecs_to_jiffies(AVS_D0IX_DELAY_MS));
+ return 0;
+}
static void avs_dsp_recovery(struct avs_dev *adev)
{
@@ -391,10 +467,35 @@ static int avs_dsp_do_send_msg(struct avs_dev *adev, struct avs_ipc_msg *request
return ret;
}
+static int avs_dsp_send_msg_sequence(struct avs_dev *adev, struct avs_ipc_msg *request,
+ struct avs_ipc_msg *reply, int timeout, bool wake_d0i0,
+ bool schedule_d0ix)
+{
+ int ret;
+
+ if (wake_d0i0) {
+ ret = avs_dsp_wake_d0i0(adev, request);
+ if (ret)
+ return ret;
+ }
+
+ ret = avs_dsp_do_send_msg(adev, request, reply, timeout);
+ if (ret)
+ return ret;
+
+ if (schedule_d0ix)
+ avs_dsp_schedule_d0ix(adev, request);
+
+ return 0;
+}
+
int avs_dsp_send_msg_timeout(struct avs_dev *adev, struct avs_ipc_msg *request,
struct avs_ipc_msg *reply, int timeout)
{
- return avs_dsp_do_send_msg(adev, request, reply, timeout);
+ bool wake_d0i0 = avs_dsp_op(adev, d0ix_toggle, request, true);
+ bool schedule_d0ix = avs_dsp_op(adev, d0ix_toggle, request, false);
+
+ return avs_dsp_send_msg_sequence(adev, request, reply, timeout, wake_d0i0, schedule_d0ix);
}
int avs_dsp_send_msg(struct avs_dev *adev, struct avs_ipc_msg *request,
@@ -403,6 +504,19 @@ int avs_dsp_send_msg(struct avs_dev *adev, struct avs_ipc_msg *request,
return avs_dsp_send_msg_timeout(adev, request, reply, adev->ipc->default_timeout_ms);
}
+int avs_dsp_send_pm_msg_timeout(struct avs_dev *adev, struct avs_ipc_msg *request,
+ struct avs_ipc_msg *reply, int timeout, bool wake_d0i0)
+{
+ return avs_dsp_send_msg_sequence(adev, request, reply, timeout, wake_d0i0, false);
+}
+
+int avs_dsp_send_pm_msg(struct avs_dev *adev, struct avs_ipc_msg *request,
+ struct avs_ipc_msg *reply, bool wake_d0i0)
+{
+ return avs_dsp_send_pm_msg_timeout(adev, request, reply, adev->ipc->default_timeout_ms,
+ wake_d0i0);
+}
+
static int avs_dsp_do_send_rom_msg(struct avs_dev *adev, struct avs_ipc_msg *request, int timeout)
{
struct avs_ipc *ipc = adev->ipc;
@@ -463,6 +577,7 @@ int avs_ipc_init(struct avs_ipc *ipc, struct device *dev)
ipc->ready = false;
ipc->default_timeout_ms = AVS_IPC_TIMEOUT_MS;
INIT_WORK(&ipc->recovery_work, avs_dsp_recovery_work);
+ INIT_DELAYED_WORK(&ipc->d0ix_work, avs_dsp_d0ix_work);
init_completion(&ipc->done_completion);
init_completion(&ipc->busy_completion);
spin_lock_init(&ipc->rx_lock);
@@ -475,4 +590,6 @@ void avs_ipc_block(struct avs_ipc *ipc)
{
ipc->ready = false;
cancel_work_sync(&ipc->recovery_work);
+ cancel_delayed_work_sync(&ipc->d0ix_work);
+ ipc->in_d0ix = false;
}
@@ -432,7 +432,7 @@ int avs_ipc_set_dx(struct avs_dev *adev, u32 core_mask, bool powerup)
request.data = &dx;
request.size = sizeof(dx);
- ret = avs_dsp_send_msg(adev, &request, NULL);
+ ret = avs_dsp_send_pm_msg(adev, &request, NULL, true);
if (ret)
avs_ipc_err(adev, &request, "set dx", ret);
@@ -456,7 +456,7 @@ int avs_ipc_set_d0ix(struct avs_dev *adev, bool enable_pg, bool streaming)
request.header = msg.val;
- ret = avs_dsp_send_msg(adev, &request, NULL);
+ ret = avs_dsp_send_pm_msg(adev, &request, NULL, false);
if (ret)
avs_ipc_err(adev, &request, "set d0ix", ret);