@@ -16,6 +16,7 @@
#define MHICFG 0x10
#define CHDBOFF 0x18
#define ERDBOFF 0x20
+#define MISCOFF 0x24
#define BHIOFF 0x28
#define BHIEOFF 0x2c
#define DEBUGOFF 0x30
@@ -113,6 +114,9 @@
#define MHISTATUS_MHISTATE_MASK GENMASK(15, 8)
#define MHISTATUS_SYSERR_MASK BIT(2)
#define MHISTATUS_READY_MASK BIT(0)
+#define MISC_CAP_MASK GENMASK(31, 0)
+#define CAP_CAPID_MASK GENMASK(31, 24)
+#define CAP_NEXT_CAP_MASK GENMASK(23, 12)
/* Command Ring Element macros */
/* No operation command */
@@ -204,6 +208,15 @@
#define MHI_RSCTRE_DATA_DWORD1 cpu_to_le32(FIELD_PREP(GENMASK(23, 16), \
MHI_PKT_TYPE_COALESCING))
+enum mhi_capability_type {
+ MHI_CAP_ID_INTX = 0x1,
+ MHI_CAP_ID_TIME_SYNC = 0x2,
+ MHI_CAP_ID_BW_SCALE = 0x3,
+ MHI_CAP_ID_TSC_TIME_SYNC = 0x4,
+ MHI_CAP_ID_MAX_TRB_LEN = 0x5,
+ MHI_CAP_ID_MAX,
+};
+
enum mhi_pkt_type {
MHI_PKT_TYPE_INVALID = 0x0,
MHI_PKT_TYPE_NOOP_CMD = 0x1,
@@ -467,6 +467,40 @@ int mhi_init_dev_ctxt(struct mhi_controller *mhi_cntrl)
return ret;
}
+static int mhi_find_capability(struct mhi_controller *mhi_cntrl, u32 capability, u32 *offset)
+{
+ u32 val, cur_cap, next_offset;
+ int ret;
+
+ /* Get the first supported capability offset */
+ ret = mhi_read_reg_field(mhi_cntrl, mhi_cntrl->regs, MISCOFF, MISC_CAP_MASK, offset);
+ if (ret)
+ return ret;
+
+ *offset = (__force u32)le32_to_cpu(*offset);
+ do {
+ if (*offset >= mhi_cntrl->reg_len)
+ return -ENXIO;
+
+ ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, *offset, &val);
+ if (ret)
+ return ret;
+
+ val = (__force u32)le32_to_cpu(val);
+ cur_cap = FIELD_GET(CAP_CAPID_MASK, val);
+ next_offset = FIELD_GET(CAP_NEXT_CAP_MASK, val);
+ if (cur_cap >= MHI_CAP_ID_MAX)
+ return -ENXIO;
+
+ if (cur_cap == capability)
+ return 0;
+
+ *offset = next_offset;
+ } while (next_offset);
+
+ return -ENXIO;
+}
+
int mhi_init_mmio(struct mhi_controller *mhi_cntrl)
{
u32 val;