new file mode 100644
@@ -0,0 +1,491 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Copyright(c) 2020 Intel Corporation. All rights reserved.
+//
+// Author: Cezary Rojewski <cezary.rojewski@intel.com>
+//
+
+#include <linux/dma-mapping.h>
+#include <linux/acpi_dma.h>
+#include <linux/firmware.h>
+#include "core.h"
+#include "messages.h"
+#include "registers.h"
+#include "trace.h"
+
+static void catpt_dma_transfer_complete(void *arg)
+{
+ struct catpt_dev *cdev = arg;
+
+ dev_dbg(cdev->dev, "%s\n", __func__);
+}
+
+static bool catpt_dma_filter(struct dma_chan *chan, void *param)
+{
+ return chan->device->dev == (struct device *)param;
+}
+
+#define CATPT_DMA_DEVID 1 /* dma engine used */
+#define CATPT_DMA_MAXBURST 0x3
+#define CATPT_DMA_DSP_ADDR_MASK 0xFFF00000
+
+struct dma_chan *catpt_dma_request_config_chan(struct catpt_dev *cdev)
+{
+ struct dma_slave_config config;
+ struct dma_chan *chan;
+ dma_cap_mask_t mask;
+ int ret;
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+ dma_cap_set(DMA_MEMCPY, mask);
+
+ chan = dma_request_channel(mask, catpt_dma_filter, cdev->dev);
+ if (!chan) {
+ dev_err(cdev->dev, "request channel failed\n");
+ dump_stack();
+ return ERR_PTR(-EPROBE_DEFER);
+ }
+
+ memset(&config, 0, sizeof(config));
+ config.direction = DMA_MEM_TO_DEV;
+ config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ config.src_maxburst = CATPT_DMA_MAXBURST;
+ config.dst_maxburst = CATPT_DMA_MAXBURST;
+
+ ret = dmaengine_slave_config(chan, &config);
+ if (ret) {
+ dev_err(cdev->dev, "slave config failed: %d\n", ret);
+ dma_release_channel(chan);
+ return ERR_PTR(ret);
+ }
+
+ return chan;
+}
+
+static int catpt_dma_memcpy(struct catpt_dev *cdev, struct dma_chan *chan,
+ dma_addr_t dst_addr, dma_addr_t src_addr,
+ size_t size)
+{
+ struct dma_async_tx_descriptor *desc;
+ enum dma_status status;
+
+ desc = dmaengine_prep_dma_memcpy(chan, dst_addr, src_addr, size,
+ DMA_CTRL_ACK);
+ if (!desc) {
+ dev_err(cdev->dev, "prep dma memcpy failed\n");
+ return -EIO;
+ }
+
+ /* enable demand mode for dma channel */
+ catpt_updatel_shim(cdev, HMDC,
+ CATPT_HMDC_HDDA(CATPT_DMA_DEVID, chan->chan_id),
+ CATPT_HMDC_HDDA(CATPT_DMA_DEVID, chan->chan_id));
+ desc->callback = catpt_dma_transfer_complete;
+ desc->callback_param = cdev;
+ dmaengine_submit(desc);
+
+ status = dma_wait_for_async_tx(desc);
+ catpt_updatel_shim(cdev, HMDC,
+ CATPT_HMDC_HDDA(CATPT_DMA_DEVID, chan->chan_id), 0);
+
+ return (status == DMA_COMPLETE) ? 0 : -EPROTO;
+}
+
+int catpt_dma_memcpy_todsp(struct catpt_dev *cdev, struct dma_chan *chan,
+ dma_addr_t dst_addr, dma_addr_t src_addr,
+ size_t size)
+{
+ return catpt_dma_memcpy(cdev, chan, dst_addr | CATPT_DMA_DSP_ADDR_MASK,
+ src_addr, size);
+}
+
+int catpt_dma_memcpy_fromdsp(struct catpt_dev *cdev, struct dma_chan *chan,
+ dma_addr_t dst_addr, dma_addr_t src_addr,
+ size_t size)
+{
+ return catpt_dma_memcpy(cdev, chan, dst_addr,
+ src_addr | CATPT_DMA_DSP_ADDR_MASK, size);
+}
+
+int catpt_dmac_probe(struct catpt_dev *cdev)
+{
+ struct dw_dma_chip *dmac;
+ struct resource res;
+ int ret;
+
+ dmac = devm_kzalloc(cdev->dev, sizeof(*dmac), GFP_KERNEL);
+ if (!dmac)
+ return -ENOMEM;
+
+ memset(&res, 0, sizeof(res));
+ res.start = cdev->lpe_base +
+ cdev->pdata->host_dma_offset[CATPT_DMA_DEVID];
+ res.end = res.start + (CATPT_DMA_REGS_SIZE - 1);
+ res.flags = IORESOURCE_MEM;
+ dmac->dev = cdev->dev;
+ dmac->irq = cdev->irq;
+
+ dmac->regs = devm_ioremap_resource(cdev->dev, &res);
+ if (IS_ERR(dmac->regs))
+ return PTR_ERR(dmac->regs);
+
+ ret = dma_coerce_mask_and_coherent(cdev->dev, DMA_BIT_MASK(31));
+ if (ret < 0)
+ return ret;
+ /*
+ * Caller is responsible for putting device in D0 to allow
+ * for I/O and memory access before probing DW
+ */
+ ret = dw_dma_probe(dmac);
+ if (ret < 0)
+ return ret;
+
+ cdev->dmac = dmac;
+ return 0;
+}
+
+void catpt_dmac_remove(struct catpt_dev *cdev)
+{
+ /*
+ * As do_dma_remove() juggles with pm_runtime_get_xxx() and
+ * pm_runtime_put_xxx() while both ADSP and DW 'devices' are part of
+ * the same module, caller makes sure pm_runtime_disable() is invoked
+ * before removing DW to prevent postmortem resume and suspend
+ */
+ dw_dma_remove(cdev->dmac);
+}
+
+static void catpt_dsp_set_srampge(struct catpt_dev *cdev,
+ struct catpt_mbank *sram, unsigned long new)
+{
+ unsigned long old;
+ u32 off = sram->start;
+ u32 b = __ffs(sram->mask);
+
+ old = catpt_readl_pci(cdev, VDRTCTL0) & sram->mask;
+ trace_catpt_updatel("SRAMPGE", sram->mask, old, new);
+ if (old == new)
+ return;
+
+ catpt_updatel_pci(cdev, VDRTCTL0, sram->mask, new);
+ udelay(60);
+
+ /*
+ * dummy read as the very first access after block enable
+ * to prevent byte loss in future operations
+ */
+ for_each_clear_bit_from(b, &new, fls(sram->mask)) {
+ u8 buf[4];
+
+ /* newly enabled: new bit=0 while old bit=1 */
+ if (test_bit(b, &old)) {
+ dev_dbg(cdev->dev, "sanitize block %ld: off 0x%08x\n",
+ (b - __ffs(sram->mask)), off);
+ memcpy_fromio(buf, cdev->lpe_ba + off, sizeof(buf));
+ }
+ off += CATPT_MEMBLOCK_SIZE;
+ }
+}
+
+void catpt_dsp_update_srampge(struct catpt_dev *cdev, struct catpt_mbank *sram)
+{
+ struct catpt_mregion *reg;
+ unsigned long new = 0;
+
+ /* flag all busy blocks */
+ list_for_each_entry(reg, &sram->region_list, node) {
+ u32 h, l;
+
+ if (!reg->busy)
+ continue;
+ h = (reg->end - sram->start) / CATPT_MEMBLOCK_SIZE;
+ l = (reg->start - sram->start) / CATPT_MEMBLOCK_SIZE;
+ new |= GENMASK(h, l);
+ }
+
+ /* offset value given mask's start and invert it as ON=b0 */
+ new <<= __ffs(sram->mask);
+ new = ~(new) & sram->mask;
+
+ /* disable core clock gating */
+ catpt_updatel_pci(cdev, VDRTCTL2, CATPT_VDRTCTL2_DCLCGE, 0);
+
+ catpt_dsp_set_srampge(cdev, sram, new);
+
+ /* enable core clock gating */
+ catpt_updatel_pci(cdev, VDRTCTL2, CATPT_VDRTCTL2_DCLCGE,
+ CATPT_VDRTCTL2_DCLCGE);
+}
+
+int catpt_dsp_stall(struct catpt_dev *cdev, bool stall)
+{
+ u32 reg, val;
+
+ val = stall ? CATPT_CS_STALL : 0;
+ catpt_updatel_shim(cdev, CS, CATPT_CS_STALL, val);
+
+ return catpt_readl_poll_shim(cdev, CS,
+ reg, (reg & CATPT_CS_STALL) == val,
+ 500, 10000);
+}
+
+static int catpt_dsp_reset(struct catpt_dev *cdev, bool reset)
+{
+ u32 reg, val;
+
+ val = reset ? CATPT_CS_RST : 0;
+ catpt_updatel_shim(cdev, CS, CATPT_CS_RST, val);
+
+ return catpt_readl_poll_shim(cdev, CS,
+ reg, (reg & CATPT_CS_RST) == val,
+ 500, 10000);
+}
+
+void lpt_dsp_pll_shutdown(struct catpt_dev *cdev, bool enable)
+{
+ u32 val;
+
+ val = enable ? LPT_VDRTCTL0_APLLSE : 0;
+ catpt_updatel_pci(cdev, VDRTCTL0, LPT_VDRTCTL0_APLLSE, val);
+}
+
+void wpt_dsp_pll_shutdown(struct catpt_dev *cdev, bool enable)
+{
+ u32 val;
+
+ val = enable ? WPT_VDRTCTL2_APLLSE : 0;
+ catpt_updatel_pci(cdev, VDRTCTL2, WPT_VDRTCTL2_APLLSE, val);
+}
+
+static int catpt_dsp_select_lpclock(struct catpt_dev *cdev, bool lp, bool waiti)
+{
+ u32 mask, reg, val;
+ int ret;
+
+ mutex_lock(&cdev->clk_mutex);
+
+ val = lp ? CATPT_CS_LPCS : 0;
+ reg = catpt_readl_shim(cdev, CS) & CATPT_CS_LPCS;
+ trace_catpt_updatel("LPCS", CATPT_CS_LPCS, reg, val);
+
+ if (reg == val) {
+ mutex_unlock(&cdev->clk_mutex);
+ return 0;
+ }
+
+ if (waiti) {
+ /* wait for DSP to signal WAIT state */
+ ret = catpt_readl_poll_shim(cdev, ISD,
+ reg, (reg & CATPT_ISD_DCPWM),
+ 500, 10000);
+ if (ret < 0) {
+ dev_warn(cdev->dev, "await WAITI timeout\n");
+ mutex_unlock(&cdev->clk_mutex);
+ return ret;
+ }
+ }
+
+ ret = catpt_readl_poll_shim(cdev, CLKCTL,
+ reg, !(reg & CATPT_CLKCTL_CFCIP),
+ 500, 10000);
+ if (ret < 0)
+ dev_warn(cdev->dev, "clock change still in progress\n");
+
+ /* default to DSP core & audio fabric high clock */
+ val |= CATPT_CS_DCS_HIGH;
+ mask = CATPT_CS_LPCS | CATPT_CS_DCS;
+ catpt_updatel_shim(cdev, CS, mask, val);
+
+ ret = catpt_readl_poll_shim(cdev, CLKCTL,
+ reg, !(reg & CATPT_CLKCTL_CFCIP),
+ 500, 10000);
+ if (ret < 0)
+ dev_warn(cdev->dev, "clock change still in progress\n");
+
+ /* update PLL accordingly */
+ cdev->pdata->pll_shutdown(cdev, lp);
+
+ mutex_unlock(&cdev->clk_mutex);
+ return 0;
+}
+
+int catpt_dsp_update_lpclock(struct catpt_dev *cdev)
+{
+ struct catpt_stream_runtime *stream;
+ bool lp;
+
+ if (list_empty(&cdev->stream_list))
+ return catpt_dsp_select_lpclock(cdev, true, true);
+
+ lp = true;
+ list_for_each_entry(stream, &cdev->stream_list, node) {
+ if (stream->prepared) {
+ lp = false;
+ break;
+ }
+ }
+
+ return catpt_dsp_select_lpclock(cdev, lp, true);
+}
+
+/* bring registers to their defaults as HW won't reset itself */
+static void catpt_dsp_set_regs_defaults(struct catpt_dev *cdev)
+{
+ int i;
+
+ catpt_writel_shim(cdev, CS, CATPT_CS_DEFAULT);
+ catpt_writel_shim(cdev, ISC, CATPT_ISC_DEFAULT);
+ catpt_writel_shim(cdev, ISD, CATPT_ISD_DEFAULT);
+ catpt_writel_shim(cdev, IMC, CATPT_IMC_DEFAULT);
+ catpt_writel_shim(cdev, IMD, CATPT_IMD_DEFAULT);
+ catpt_writel_shim(cdev, IPCC, CATPT_IPCC_DEFAULT);
+ catpt_writel_shim(cdev, IPCD, CATPT_IPCD_DEFAULT);
+ catpt_writel_shim(cdev, CLKCTL, CATPT_CLKCTL_DEFAULT);
+ catpt_writel_shim(cdev, CS2, CATPT_CS2_DEFAULT);
+ catpt_writel_shim(cdev, LTRC, CATPT_LTRC_DEFAULT);
+ catpt_writel_shim(cdev, HMDC, CATPT_HMDC_DEFAULT);
+
+ for (i = 0; i < CATPT_SSP_COUNT; i++) {
+ catpt_writel_ssp(cdev, i, SSC0, CATPT_SSP_SSC0_DEFAULT);
+ catpt_writel_ssp(cdev, i, SSC1, CATPT_SSP_SSC1_DEFAULT);
+ catpt_writel_ssp(cdev, i, SSS, CATPT_SSP_SSS_DEFAULT);
+ catpt_writel_ssp(cdev, i, SSIT, CATPT_SSP_SSIT_DEFAULT);
+ catpt_writel_ssp(cdev, i, SSD, CATPT_SSP_SSD_DEFAULT);
+ catpt_writel_ssp(cdev, i, SSTO, CATPT_SSP_SSTO_DEFAULT);
+ catpt_writel_ssp(cdev, i, SSPSP, CATPT_SSP_SSPSP_DEFAULT);
+ catpt_writel_ssp(cdev, i, SSTSA, CATPT_SSP_SSTSA_DEFAULT);
+ catpt_writel_ssp(cdev, i, SSRSA, CATPT_SSP_SSRSA_DEFAULT);
+ catpt_writel_ssp(cdev, i, SSTSS, CATPT_SSP_SSTSS_DEFAULT);
+ catpt_writel_ssp(cdev, i, SSC2, CATPT_SSP_SSC2_DEFAULT);
+ catpt_writel_ssp(cdev, i, SSPSP2, CATPT_SSP_SSPSP2_DEFAULT);
+ }
+}
+
+int lpt_dsp_power_down(struct catpt_dev *cdev)
+{
+ catpt_dsp_reset(cdev, true);
+
+ /* set 24Mhz clock for both SSPs */
+ catpt_updatel_shim(cdev, CS, CATPT_CS_SBCS(0) | CATPT_CS_SBCS(1),
+ CATPT_CS_SBCS(0) | CATPT_CS_SBCS(1));
+ catpt_dsp_select_lpclock(cdev, true, false);
+
+ /* DRAM power gating all */
+ catpt_dsp_set_srampge(cdev, &cdev->dram, cdev->dram.mask);
+ catpt_dsp_set_srampge(cdev, &cdev->iram, cdev->iram.mask);
+
+ /* set D3 */
+ catpt_updatel_pci(cdev, PMCS, CATPT_PMCS_PS, CATPT_PMCS_PS_D3HOT);
+ udelay(50);
+
+ return 0;
+}
+
+int lpt_dsp_power_up(struct catpt_dev *cdev)
+{
+ /* SRAM power gating none */
+ catpt_dsp_set_srampge(cdev, &cdev->dram, 0);
+ catpt_dsp_set_srampge(cdev, &cdev->iram, 0);
+
+ /* set D0 */
+ catpt_updatel_pci(cdev, PMCS, CATPT_PMCS_PS, 0);
+ udelay(100);
+
+ catpt_dsp_select_lpclock(cdev, false, false);
+ catpt_updatel_shim(cdev, CS,
+ CATPT_CS_SBCS(0) | CATPT_CS_SBCS(1),
+ CATPT_CS_SBCS(0) | CATPT_CS_SBCS(1));
+ udelay(50);
+
+ catpt_dsp_reset(cdev, false);
+ /* generate int deassert msg to fix inversed int logic */
+ catpt_updatel_shim(cdev, IMC, CATPT_IMC_IPCDB | CATPT_IMC_IPCCD, 0);
+
+ return 0;
+}
+
+int wpt_dsp_power_down(struct catpt_dev *cdev)
+{
+ u32 mask, val;
+
+ /* disable core clock gating */
+ catpt_updatel_pci(cdev, VDRTCTL2, CATPT_VDRTCTL2_DCLCGE, 0);
+
+ catpt_dsp_reset(cdev, true);
+ /* set 24Mhz clock for both SSPs */
+ catpt_updatel_shim(cdev, CS, CATPT_CS_SBCS(0) | CATPT_CS_SBCS(1),
+ CATPT_CS_SBCS(0) | CATPT_CS_SBCS(1));
+ catpt_dsp_select_lpclock(cdev, true, false);
+ /* disable MCLK */
+ catpt_updatel_shim(cdev, CLKCTL, CATPT_CLKCTL_SMOS, 0);
+
+ catpt_dsp_set_regs_defaults(cdev);
+
+ /* switch clock gating */
+ mask = CATPT_VDRTCTL2_CGEALL & (~CATPT_VDRTCTL2_DCLCGE);
+ val = mask & (~CATPT_VDRTCTL2_DTCGE);
+ catpt_updatel_pci(cdev, VDRTCTL2, mask, val);
+ /* enable DTCGE separatelly */
+ catpt_updatel_pci(cdev, VDRTCTL2, CATPT_VDRTCTL2_DTCGE,
+ CATPT_VDRTCTL2_DTCGE);
+
+ /* SRAM power gating all */
+ catpt_dsp_set_srampge(cdev, &cdev->dram, cdev->dram.mask);
+ catpt_dsp_set_srampge(cdev, &cdev->iram, cdev->iram.mask);
+ mask = WPT_VDRTCTL0_D3SRAMPGD | WPT_VDRTCTL0_D3PGD;
+ catpt_updatel_pci(cdev, VDRTCTL0, mask, WPT_VDRTCTL0_D3PGD);
+
+ /* set D3 */
+ catpt_updatel_pci(cdev, PMCS, CATPT_PMCS_PS, CATPT_PMCS_PS_D3HOT);
+ udelay(50);
+
+ /* enable core clock gating */
+ catpt_updatel_pci(cdev, VDRTCTL2, CATPT_VDRTCTL2_DCLCGE,
+ CATPT_VDRTCTL2_DCLCGE);
+ udelay(50);
+
+ return 0;
+}
+
+int wpt_dsp_power_up(struct catpt_dev *cdev)
+{
+ u32 mask, val;
+
+ /* disable core clock gating */
+ catpt_updatel_pci(cdev, VDRTCTL2, CATPT_VDRTCTL2_DCLCGE, 0);
+
+ /* switch clock gating */
+ mask = CATPT_VDRTCTL2_CGEALL & (~CATPT_VDRTCTL2_DCLCGE);
+ val = mask & (~CATPT_VDRTCTL2_DTCGE);
+ catpt_updatel_pci(cdev, VDRTCTL2, mask, val);
+
+ /* set D0 */
+ catpt_updatel_pci(cdev, PMCS, CATPT_PMCS_PS, 0);
+
+ /* SRAM power gating none */
+ mask = WPT_VDRTCTL0_D3SRAMPGD | WPT_VDRTCTL0_D3PGD;
+ catpt_updatel_pci(cdev, VDRTCTL0, mask, mask);
+ catpt_dsp_set_srampge(cdev, &cdev->dram, 0);
+ catpt_dsp_set_srampge(cdev, &cdev->iram, 0);
+
+ catpt_dsp_set_regs_defaults(cdev);
+
+ /* restore MCLK */
+ catpt_updatel_shim(cdev, CLKCTL, CATPT_CLKCTL_SMOS, CATPT_CLKCTL_SMOS);
+ catpt_dsp_select_lpclock(cdev, false, false);
+ /* set 24Mhz clock for both SSPs */
+ catpt_updatel_shim(cdev, CS, CATPT_CS_SBCS(0) | CATPT_CS_SBCS(1),
+ CATPT_CS_SBCS(0) | CATPT_CS_SBCS(1));
+ catpt_dsp_reset(cdev, false);
+
+ /* enable core clock gating */
+ catpt_updatel_pci(cdev, VDRTCTL2, CATPT_VDRTCTL2_DCLCGE,
+ CATPT_VDRTCTL2_DCLCGE);
+
+ /* generate int deassert msg to fix inversed int logic */
+ catpt_updatel_shim(cdev, IMC, CATPT_IMC_IPCDB | CATPT_IMC_IPCCD, 0);
+
+ return 0;
+}
Implement dsp lifecycle functions such as core RESET and STALL, SRAM power control and LP clock selection. This also adds functions for handling transport over DW DMA controller. Signed-off-by: Cezary Rojewski <cezary.rojewski@intel.com> --- sound/soc/intel/catpt/dsp.c | 491 ++++++++++++++++++++++++++++++++++++ 1 file changed, 491 insertions(+) create mode 100644 sound/soc/intel/catpt/dsp.c