@@ -488,6 +488,108 @@ int mhi_ep_process_tre_ring(struct mhi_ep_ring *ring, struct mhi_ep_ring_element
return 0;
}
+int mhi_ep_queue_skb(struct mhi_ep_device *mhi_dev, enum dma_data_direction dir,
+ struct sk_buff *skb, size_t len, enum mhi_flags mflags)
+{
+ struct mhi_ep_chan *mhi_chan = (dir == DMA_FROM_DEVICE) ? mhi_dev->dl_chan :
+ mhi_dev->ul_chan;
+ struct mhi_ep_cntrl *mhi_cntrl = mhi_dev->mhi_cntrl;
+ struct device *dev = &mhi_chan->mhi_dev->dev;
+ struct mhi_ep_ring_element *el;
+ struct mhi_ep_ring *ring;
+ size_t bytes_to_write;
+ enum mhi_ev_ccs code;
+ void *read_from_loc;
+ u32 buf_remaining;
+ u64 write_to_loc;
+ u32 tre_len;
+ int ret = 0;
+
+ if (dir == DMA_TO_DEVICE)
+ return -EINVAL;
+
+ buf_remaining = len;
+ ring = &mhi_cntrl->mhi_chan[mhi_chan->chan].ring;
+
+ mutex_lock(&mhi_chan->lock);
+
+ do {
+ /* Don't process the transfer ring if the channel is not in RUNNING state */
+ if (mhi_chan->state != MHI_CH_STATE_RUNNING) {
+ dev_err(dev, "Channel not available");
+ ret = -ENODEV;
+ goto err_exit;
+ }
+
+ if (mhi_ep_queue_is_empty(mhi_dev, dir)) {
+ dev_err(dev, "TRE not available!\n");
+ ret = -EINVAL;
+ goto err_exit;
+ }
+
+ el = &ring->ring_cache[ring->rd_offset];
+ tre_len = MHI_EP_TRE_GET_LEN(el);
+ if (skb->len > tre_len) {
+ dev_err(dev, "Buffer size (%d) is too large for TRE (%d)!\n",
+ skb->len, tre_len);
+ ret = -ENOMEM;
+ goto err_exit;
+ }
+
+ bytes_to_write = min(buf_remaining, tre_len);
+ read_from_loc = skb->data;
+ write_to_loc = MHI_EP_TRE_GET_PTR(el);
+
+ ret = mhi_cntrl->write_to_host(mhi_cntrl, read_from_loc, write_to_loc,
+ bytes_to_write);
+ if (ret < 0)
+ return ret;
+
+ buf_remaining -= bytes_to_write;
+ /*
+ * For all TREs queued by the host for DL channel, only the EOT flag will be set.
+ * If the packet doesn't fit into a single TRE, send the OVERFLOW event to
+ * the host so that the host can adjust the packet boundary to next TREs. Else send
+ * the EOT event to the host indicating the packet boundary.
+ */
+ if (buf_remaining)
+ code = MHI_EV_CC_OVERFLOW;
+ else
+ code = MHI_EV_CC_EOT;
+
+ ret = mhi_ep_send_completion_event(mhi_cntrl, ring, bytes_to_write, code);
+ if (ret) {
+ dev_err(dev, "Error sending completion event");
+ goto err_exit;
+ }
+
+ mhi_ep_ring_inc_index(ring);
+ } while (buf_remaining);
+
+ /*
+ * During high network traffic, sometimes the DL doorbell interrupt from the host is missed
+ * by the endpoint. So manually check for the write pointer update here so that we don't run
+ * out of buffer due to missing interrupts.
+ */
+ if (ring->rd_offset + 1 == ring->wr_offset) {
+ ret = mhi_ep_update_wr_offset(ring);
+ if (ret) {
+ dev_err(dev, "Error updating write pointer\n");
+ goto err_exit;
+ }
+ }
+
+ mutex_unlock(&mhi_chan->lock);
+
+ return 0;
+
+err_exit:
+ mutex_unlock(&mhi_chan->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mhi_ep_queue_skb);
+
static int mhi_ep_cache_host_cfg(struct mhi_ep_cntrl *mhi_cntrl)
{
struct device *dev = &mhi_cntrl->mhi_dev->dev;
@@ -277,4 +277,17 @@ void mhi_ep_power_down(struct mhi_ep_cntrl *mhi_cntrl);
*/
bool mhi_ep_queue_is_empty(struct mhi_ep_device *mhi_dev, enum dma_data_direction dir);
+/**
+ * mhi_ep_queue_skb - Send SKBs to host over MHI Endpoint
+ * @mhi_dev: Device associated with the channels
+ * @dir: DMA direction for the channel
+ * @skb: Buffer for holding SKBs
+ * @len: Buffer length
+ * @mflags: MHI Endpoint transfer flags used for the transfer
+ *
+ * Return: 0 if the SKBs has been sent successfully, a negative error code otherwise.
+ */
+int mhi_ep_queue_skb(struct mhi_ep_device *mhi_dev, enum dma_data_direction dir,
+ struct sk_buff *skb, size_t len, enum mhi_flags mflags);
+
#endif
Add support for queueing SKBs over MHI bus in the MHI endpoint stack. The mhi_ep_queue_skb() API will be used by the client networking drivers to queue the SKBs to the host over MHI. Signed-off-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org> --- drivers/bus/mhi/ep/main.c | 102 ++++++++++++++++++++++++++++++++++++++ include/linux/mhi_ep.h | 13 +++++ 2 files changed, 115 insertions(+)