@@ -41,7 +41,11 @@ typedef struct fslmc_vfio_container {
} fslmc_vfio_container;
extern char *fslmc_container;
+
+__rte_internal
int rte_dpaa2_intr_enable(struct rte_intr_handle *intr_handle, int index);
+
+__rte_internal
int rte_dpaa2_intr_disable(struct rte_intr_handle *intr_handle, int index);
int rte_dpaa2_vfio_setup_intr(struct rte_intr_handle *intr_handle,
@@ -14,6 +14,7 @@
struct fsl_mc_io;
+__rte_internal
int dpbp_open(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
int dpbp_id,
@@ -42,10 +43,12 @@ int dpbp_destroy(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint32_t obj_id);
+__rte_internal
int dpbp_enable(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token);
+__rte_internal
int dpbp_disable(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token);
@@ -55,6 +58,7 @@ int dpbp_is_enabled(struct fsl_mc_io *mc_io,
uint16_t token,
int *en);
+__rte_internal
int dpbp_reset(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token);
@@ -70,6 +74,7 @@ struct dpbp_attr {
uint16_t bpid;
};
+__rte_internal
int dpbp_get_attributes(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
@@ -88,6 +93,7 @@ int dpbp_get_api_version(struct fsl_mc_io *mc_io,
uint16_t *major_ver,
uint16_t *minor_ver);
+__rte_internal
int dpbp_get_num_free_bufs(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
@@ -181,6 +181,7 @@ struct dpci_rx_queue_cfg {
int order_preservation_en;
};
+__rte_internal
int dpci_set_rx_queue(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
@@ -228,6 +229,7 @@ int dpci_get_api_version(struct fsl_mc_io *mc_io,
uint16_t *major_ver,
uint16_t *minor_ver);
+__rte_internal
int dpci_set_opr(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
@@ -235,6 +237,7 @@ int dpci_set_opr(struct fsl_mc_io *mc_io,
uint8_t options,
struct opr_cfg *cfg);
+__rte_internal
int dpci_get_opr(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
@@ -20,6 +20,7 @@ struct fsl_mc_io;
*/
#define DPCON_INVALID_DPIO_ID (int)(-1)
+__rte_internal
int dpcon_open(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
int dpcon_id,
@@ -77,6 +78,7 @@ struct dpcon_attr {
uint8_t num_priorities;
};
+__rte_internal
int dpcon_get_attributes(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
@@ -23,11 +23,13 @@ struct fsl_mc_io;
*/
#define DPDMAI_ALL_QUEUES (uint8_t)(-1)
+__rte_internal
int dpdmai_open(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
int dpdmai_id,
uint16_t *token);
+__rte_internal
int dpdmai_close(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token);
@@ -54,10 +56,12 @@ int dpdmai_destroy(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint32_t object_id);
+__rte_internal
int dpdmai_enable(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token);
+__rte_internal
int dpdmai_disable(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token);
@@ -82,6 +86,7 @@ struct dpdmai_attr {
uint8_t num_of_queues;
};
+__rte_internal
int dpdmai_get_attributes(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
@@ -148,6 +153,7 @@ struct dpdmai_rx_queue_cfg {
};
+__rte_internal
int dpdmai_set_rx_queue(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
@@ -168,6 +174,7 @@ struct dpdmai_rx_queue_attr {
uint32_t fqid;
};
+__rte_internal
int dpdmai_get_rx_queue(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
@@ -184,6 +191,7 @@ struct dpdmai_tx_queue_attr {
uint32_t fqid;
};
+__rte_internal
int dpdmai_get_tx_queue(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
@@ -13,11 +13,13 @@
struct fsl_mc_io;
+__rte_internal
int dpio_open(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
int dpio_id,
uint16_t *token);
+__rte_internal
int dpio_close(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token);
@@ -57,10 +59,12 @@ int dpio_destroy(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint32_t object_id);
+__rte_internal
int dpio_enable(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token);
+__rte_internal
int dpio_disable(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token);
@@ -70,10 +74,12 @@ int dpio_is_enabled(struct fsl_mc_io *mc_io,
uint16_t token,
int *en);
+__rte_internal
int dpio_reset(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token);
+__rte_internal
int dpio_set_stashing_destination(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
@@ -84,12 +90,14 @@ int dpio_get_stashing_destination(struct fsl_mc_io *mc_io,
uint16_t token,
uint8_t *sdest);
+__rte_internal
int dpio_add_static_dequeue_channel(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
int dpcon_id,
uint8_t *channel_index);
+__rte_internal
int dpio_remove_static_dequeue_channel(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
@@ -119,6 +127,7 @@ struct dpio_attr {
uint32_t clk;
};
+__rte_internal
int dpio_get_attributes(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
@@ -34,6 +34,7 @@ struct mc_version {
uint32_t revision;
};
+__rte_internal
int mc_get_version(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
struct mc_version *mc_ver_info);
@@ -48,6 +49,7 @@ struct mc_soc_version {
uint32_t pvr;
};
+__rte_internal
int mc_get_soc_version(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
struct mc_soc_version *mc_platform_info);
@@ -80,6 +80,7 @@ enum mc_cmd_status {
#define MC_CMD_HDR_FLAGS_MASK 0xFF00FF00
+__rte_internal
int mc_send_command(struct fsl_mc_io *mc_io, struct mc_command *cmd);
static inline uint64_t mc_encode_cmd_header(uint16_t cmd_id,
@@ -36,20 +36,25 @@ extern uint8_t dpaa2_eqcr_size;
extern struct dpaa2_io_portal_t dpaa2_io_portal[RTE_MAX_LCORE];
/* Affine a DPIO portal to current processing thread */
+__rte_internal
int dpaa2_affine_qbman_swp(void);
/* Affine additional DPIO portal to current crypto processing thread */
+__rte_internal
int dpaa2_affine_qbman_ethrx_swp(void);
/* allocate memory for FQ - dq storage */
+__rte_internal
int
dpaa2_alloc_dq_storage(struct queue_storage_info_t *q_storage);
/* free memory for FQ- dq storage */
+__rte_internal
void
dpaa2_free_dq_storage(struct queue_storage_info_t *q_storage);
/* free the enqueue response descriptors */
+__rte_internal
uint32_t
dpaa2_free_eq_descriptors(void);
@@ -426,11 +426,19 @@ void set_swp_active_dqs(uint16_t dpio_index, struct qbman_result *dqs)
{
rte_global_active_dqs_list[dpio_index].global_active_dqs = dqs;
}
+__rte_internal
struct dpaa2_dpbp_dev *dpaa2_alloc_dpbp_dev(void);
+
+__rte_internal
void dpaa2_free_dpbp_dev(struct dpaa2_dpbp_dev *dpbp);
+
+__rte_internal
int dpaa2_dpbp_supported(void);
+__rte_internal
struct dpaa2_dpci_dev *rte_dpaa2_alloc_dpci_dev(void);
+
+__rte_internal
void rte_dpaa2_free_dpci_dev(struct dpaa2_dpci_dev *dpci);
#endif
@@ -24,7 +24,10 @@ uint8_t verb;
uint8_t reserved2[29];
};
+__rte_internal
int qbman_fq_query_state(struct qbman_swp *s, uint32_t fqid,
struct qbman_fq_query_np_rslt *r);
+
+__rte_internal
uint32_t qbman_fq_state_frame_count(const struct qbman_fq_query_np_rslt *r);
uint32_t qbman_fq_state_byte_count(const struct qbman_fq_query_np_rslt *r);
@@ -117,6 +117,7 @@ uint32_t qbman_swp_interrupt_read_status(struct qbman_swp *p);
* @p: the given software portal object.
* @mask: The value to set in SWP_ISR register.
*/
+__rte_internal
void qbman_swp_interrupt_clear_status(struct qbman_swp *p, uint32_t mask);
/**
@@ -286,6 +287,7 @@ void qbman_swp_push_get(struct qbman_swp *s, uint8_t channel_idx, int *enabled);
* rather by specifying the index (from 0 to 15) that has been mapped to the
* desired channel.
*/
+__rte_internal
void qbman_swp_push_set(struct qbman_swp *s, uint8_t channel_idx, int enable);
/* ------------------- */
@@ -325,6 +327,7 @@ enum qbman_pull_type_e {
* default/starting state.
* @d: the pull dequeue descriptor to be cleared.
*/
+__rte_internal
void qbman_pull_desc_clear(struct qbman_pull_desc *d);
/**
@@ -340,6 +343,7 @@ void qbman_pull_desc_clear(struct qbman_pull_desc *d);
* the caller provides in 'storage_phys'), and 'stash' controls whether or not
* those writes to main-memory express a cache-warming attribute.
*/
+__rte_internal
void qbman_pull_desc_set_storage(struct qbman_pull_desc *d,
struct qbman_result *storage,
uint64_t storage_phys,
@@ -349,6 +353,7 @@ void qbman_pull_desc_set_storage(struct qbman_pull_desc *d,
* @d: the pull dequeue descriptor to be set.
* @numframes: number of frames to be set, must be between 1 and 16, inclusive.
*/
+__rte_internal
void qbman_pull_desc_set_numframes(struct qbman_pull_desc *d,
uint8_t numframes);
/**
@@ -372,6 +377,7 @@ void qbman_pull_desc_set_token(struct qbman_pull_desc *d, uint8_t token);
* qbman_pull_desc_set_fq() - Set fqid from which the dequeue command dequeues.
* @fqid: the frame queue index of the given FQ.
*/
+__rte_internal
void qbman_pull_desc_set_fq(struct qbman_pull_desc *d, uint32_t fqid);
/**
@@ -407,6 +413,7 @@ void qbman_pull_desc_set_rad(struct qbman_pull_desc *d, int rad);
* Return 0 for success, and -EBUSY if the software portal is not ready
* to do pull dequeue.
*/
+__rte_internal
int qbman_swp_pull(struct qbman_swp *s, struct qbman_pull_desc *d);
/* -------------------------------- */
@@ -421,12 +428,14 @@ int qbman_swp_pull(struct qbman_swp *s, struct qbman_pull_desc *d);
* only once, so repeated calls can return a sequence of DQRR entries, without
* requiring they be consumed immediately or in any particular order.
*/
+__rte_internal
const struct qbman_result *qbman_swp_dqrr_next(struct qbman_swp *p);
/**
* qbman_swp_prefetch_dqrr_next() - prefetch the next DQRR entry.
* @s: the software portal object.
*/
+__rte_internal
void qbman_swp_prefetch_dqrr_next(struct qbman_swp *s);
/**
@@ -435,6 +444,7 @@ void qbman_swp_prefetch_dqrr_next(struct qbman_swp *s);
* @s: the software portal object.
* @dq: the DQRR entry to be consumed.
*/
+__rte_internal
void qbman_swp_dqrr_consume(struct qbman_swp *s, const struct qbman_result *dq);
/**
@@ -442,6 +452,7 @@ void qbman_swp_dqrr_consume(struct qbman_swp *s, const struct qbman_result *dq);
* @s: the software portal object.
* @dqrr_index: the DQRR index entry to be consumed.
*/
+__rte_internal
void qbman_swp_dqrr_idx_consume(struct qbman_swp *s, uint8_t dqrr_index);
/**
@@ -450,6 +461,7 @@ void qbman_swp_dqrr_idx_consume(struct qbman_swp *s, uint8_t dqrr_index);
*
* Return dqrr index.
*/
+__rte_internal
uint8_t qbman_get_dqrr_idx(const struct qbman_result *dqrr);
/**
@@ -460,6 +472,7 @@ uint8_t qbman_get_dqrr_idx(const struct qbman_result *dqrr);
*
* Return dqrr entry object.
*/
+__rte_internal
struct qbman_result *qbman_get_dqrr_from_idx(struct qbman_swp *s, uint8_t idx);
/* ------------------------------------------------- */
@@ -485,6 +498,7 @@ struct qbman_result *qbman_get_dqrr_from_idx(struct qbman_swp *s, uint8_t idx);
* Return 1 for getting a valid dequeue result, or 0 for not getting a valid
* dequeue result.
*/
+__rte_internal
int qbman_result_has_new_result(struct qbman_swp *s,
struct qbman_result *dq);
@@ -497,8 +511,10 @@ int qbman_result_has_new_result(struct qbman_swp *s,
* Return 1 for getting a valid dequeue result, or 0 for not getting a valid
* dequeue result.
*/
+__rte_internal
int qbman_check_command_complete(struct qbman_result *dq);
+__rte_internal
int qbman_check_new_result(struct qbman_result *dq);
/* -------------------------------------------------------- */
@@ -624,6 +640,7 @@ int qbman_result_is_FQPN(const struct qbman_result *dq);
*
* Return the state field.
*/
+__rte_internal
uint8_t qbman_result_DQ_flags(const struct qbman_result *dq);
/**
@@ -658,6 +675,7 @@ static inline int qbman_result_DQ_is_pull_complete(
*
* Return seqnum.
*/
+__rte_internal
uint16_t qbman_result_DQ_seqnum(const struct qbman_result *dq);
/**
@@ -667,6 +685,7 @@ uint16_t qbman_result_DQ_seqnum(const struct qbman_result *dq);
*
* Return odpid.
*/
+__rte_internal
uint16_t qbman_result_DQ_odpid(const struct qbman_result *dq);
/**
@@ -699,6 +718,7 @@ uint32_t qbman_result_DQ_frame_count(const struct qbman_result *dq);
*
* Return the frame queue context.
*/
+__rte_internal
uint64_t qbman_result_DQ_fqd_ctx(const struct qbman_result *dq);
/**
@@ -707,6 +727,7 @@ uint64_t qbman_result_DQ_fqd_ctx(const struct qbman_result *dq);
*
* Return the frame descriptor.
*/
+__rte_internal
const struct qbman_fd *qbman_result_DQ_fd(const struct qbman_result *dq);
/* State-change notifications (FQDAN/CDAN/CSCN/...). */
@@ -717,6 +738,7 @@ const struct qbman_fd *qbman_result_DQ_fd(const struct qbman_result *dq);
*
* Return the state in the notifiation.
*/
+__rte_internal
uint8_t qbman_result_SCN_state(const struct qbman_result *scn);
/**
@@ -850,6 +872,7 @@ struct qbman_eq_response {
* default/starting state.
* @d: the given enqueue descriptor.
*/
+__rte_internal
void qbman_eq_desc_clear(struct qbman_eq_desc *d);
/* Exactly one of the following descriptor "actions" should be set. (Calling
@@ -870,6 +893,7 @@ void qbman_eq_desc_clear(struct qbman_eq_desc *d);
* @response_success: 1 = enqueue with response always; 0 = enqueue with
* rejections returned on a FQ.
*/
+__rte_internal
void qbman_eq_desc_set_no_orp(struct qbman_eq_desc *d, int respond_success);
/**
* qbman_eq_desc_set_orp() - Set order-resotration in the enqueue descriptor
@@ -881,6 +905,7 @@ void qbman_eq_desc_set_no_orp(struct qbman_eq_desc *d, int respond_success);
* @incomplete: indiates whether this is the last fragments using the same
* sequeue number.
*/
+__rte_internal
void qbman_eq_desc_set_orp(struct qbman_eq_desc *d, int respond_success,
uint16_t opr_id, uint16_t seqnum, int incomplete);
@@ -915,6 +940,7 @@ void qbman_eq_desc_set_orp_nesn(struct qbman_eq_desc *d, uint16_t opr_id,
* data structure.) 'stash' controls whether or not the write to main-memory
* expresses a cache-warming attribute.
*/
+__rte_internal
void qbman_eq_desc_set_response(struct qbman_eq_desc *d,
uint64_t storage_phys,
int stash);
@@ -929,6 +955,7 @@ void qbman_eq_desc_set_response(struct qbman_eq_desc *d,
* result "storage" before issuing an enqueue, and use any non-zero 'token'
* value.
*/
+__rte_internal
void qbman_eq_desc_set_token(struct qbman_eq_desc *d, uint8_t token);
/**
@@ -944,6 +971,7 @@ void qbman_eq_desc_set_token(struct qbman_eq_desc *d, uint8_t token);
* @d: the enqueue descriptor
* @fqid: the id of the frame queue to be enqueued.
*/
+__rte_internal
void qbman_eq_desc_set_fq(struct qbman_eq_desc *d, uint32_t fqid);
/**
@@ -953,6 +981,7 @@ void qbman_eq_desc_set_fq(struct qbman_eq_desc *d, uint32_t fqid);
* @qd_bin: the queuing destination bin
* @qd_prio: the queuing destination priority.
*/
+__rte_internal
void qbman_eq_desc_set_qd(struct qbman_eq_desc *d, uint32_t qdid,
uint16_t qd_bin, uint8_t qd_prio);
@@ -978,6 +1007,7 @@ void qbman_eq_desc_set_eqdi(struct qbman_eq_desc *d, int enable);
* held-active (order-preserving) FQ, whether the FQ should be parked instead of
* being rescheduled.)
*/
+__rte_internal
void qbman_eq_desc_set_dca(struct qbman_eq_desc *d, int enable,
uint8_t dqrr_idx, int park);
@@ -987,6 +1017,7 @@ void qbman_eq_desc_set_dca(struct qbman_eq_desc *d, int enable,
*
* Return the fd pointer.
*/
+__rte_internal
struct qbman_fd *qbman_result_eqresp_fd(struct qbman_result *eqresp);
/**
@@ -997,6 +1028,7 @@ struct qbman_fd *qbman_result_eqresp_fd(struct qbman_result *eqresp);
* This value is set into the response id before the enqueue command, which,
* get overwritten by qbman once the enqueue command is complete.
*/
+__rte_internal
void qbman_result_eqresp_set_rspid(struct qbman_result *eqresp, uint8_t val);
/**
@@ -1009,6 +1041,7 @@ void qbman_result_eqresp_set_rspid(struct qbman_result *eqresp, uint8_t val);
* copied into the enqueue response to determine if the command has been
* completed, and response has been updated.
*/
+__rte_internal
uint8_t qbman_result_eqresp_rspid(struct qbman_result *eqresp);
/**
@@ -1017,6 +1050,7 @@ uint8_t qbman_result_eqresp_rspid(struct qbman_result *eqresp);
*
* Return 0 when command is sucessful.
*/
+__rte_internal
uint8_t qbman_result_eqresp_rc(struct qbman_result *eqresp);
/**
@@ -1043,6 +1077,7 @@ int qbman_swp_enqueue(struct qbman_swp *s, const struct qbman_eq_desc *d,
*
* Return the number of enqueued frames, -EBUSY if the EQCR is not ready.
*/
+__rte_internal
int qbman_swp_enqueue_multiple(struct qbman_swp *s,
const struct qbman_eq_desc *d,
const struct qbman_fd *fd,
@@ -1060,6 +1095,7 @@ int qbman_swp_enqueue_multiple(struct qbman_swp *s,
*
* Return the number of enqueued frames, -EBUSY if the EQCR is not ready.
*/
+__rte_internal
int qbman_swp_enqueue_multiple_fd(struct qbman_swp *s,
const struct qbman_eq_desc *d,
struct qbman_fd **fd,
@@ -1076,6 +1112,7 @@ int qbman_swp_enqueue_multiple_fd(struct qbman_swp *s,
*
* Return the number of enqueued frames, -EBUSY if the EQCR is not ready.
*/
+__rte_internal
int qbman_swp_enqueue_multiple_desc(struct qbman_swp *s,
const struct qbman_eq_desc *d,
const struct qbman_fd *fd,
@@ -1117,12 +1154,14 @@ struct qbman_release_desc {
* default/starting state.
* @d: the qbman release descriptor.
*/
+__rte_internal
void qbman_release_desc_clear(struct qbman_release_desc *d);
/**
* qbman_release_desc_set_bpid() - Set the ID of the buffer pool to release to
* @d: the qbman release descriptor.
*/
+__rte_internal
void qbman_release_desc_set_bpid(struct qbman_release_desc *d, uint16_t bpid);
/**
@@ -1141,6 +1180,7 @@ void qbman_release_desc_set_rcdi(struct qbman_release_desc *d, int enable);
*
* Return 0 for success, -EBUSY if the release command ring is not ready.
*/
+__rte_internal
int qbman_swp_release(struct qbman_swp *s, const struct qbman_release_desc *d,
const uint64_t *buffers, unsigned int num_buffers);
@@ -1166,6 +1206,7 @@ int qbman_swp_release_thresh(struct qbman_swp *s, unsigned int thresh);
* Return 0 for success, or negative error code if the acquire command
* fails.
*/
+__rte_internal
int qbman_swp_acquire(struct qbman_swp *s, uint16_t bpid, uint64_t *buffers,
unsigned int num_buffers);
@@ -1,4 +1,4 @@
-DPDK_20.0 {
+INTERNAL {
global:
dpaa2_affine_qbman_ethrx_swp;
@@ -101,7 +101,6 @@ DPDK_20.0 {
rte_fslmc_driver_unregister;
rte_fslmc_get_device_count;
rte_fslmc_object_register;
- rte_fslmc_vfio_dmamap;
rte_global_active_dqs_list;
rte_mcp_ptr_list;
@@ -162,6 +162,7 @@ RTE_DECLARE_PER_LCORE(struct dpaa2_portal_dqrr, dpaa2_held_bufs);
* A pointer to a rte_dpaa2_driver structure describing the driver
* to be registered.
*/
+__rte_internal
void rte_fslmc_driver_register(struct rte_dpaa2_driver *driver);
/**
@@ -171,6 +172,7 @@ void rte_fslmc_driver_register(struct rte_dpaa2_driver *driver);
* A pointer to a rte_dpaa2_driver structure describing the driver
* to be unregistered.
*/
+__rte_internal
void rte_fslmc_driver_unregister(struct rte_dpaa2_driver *driver);
/** Helper for DPAA2 device registration from driver (eth, crypto) instance */
@@ -189,6 +191,7 @@ RTE_PMD_EXPORT_NAME(nm, __COUNTER__)
* A pointer to a rte_dpaa_object structure describing the mc object
* to be registered.
*/
+__rte_internal
void rte_fslmc_object_register(struct rte_dpaa2_object *object);
/**
@@ -200,6 +203,7 @@ void rte_fslmc_object_register(struct rte_dpaa2_object *object);
* >=0 for count; 0 indicates either no device of the said type scanned or
* invalid device type.
*/
+__rte_internal
uint32_t rte_fslmc_get_device_count(enum rte_dpaa2_dev_type device_type);
/** Helper for DPAA2 object registration */
This patch moves the internal symbols to INTERNAL sections so that any change in them is not reported as ABI breakage. Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com> --- drivers/bus/fslmc/fslmc_vfio.h | 4 ++ drivers/bus/fslmc/mc/fsl_dpbp.h | 6 +++ drivers/bus/fslmc/mc/fsl_dpci.h | 3 ++ drivers/bus/fslmc/mc/fsl_dpcon.h | 2 + drivers/bus/fslmc/mc/fsl_dpdmai.h | 8 ++++ drivers/bus/fslmc/mc/fsl_dpio.h | 9 ++++ drivers/bus/fslmc/mc/fsl_dpmng.h | 2 + drivers/bus/fslmc/mc/fsl_mc_cmd.h | 1 + drivers/bus/fslmc/portal/dpaa2_hw_dpio.h | 5 +++ drivers/bus/fslmc/portal/dpaa2_hw_pvt.h | 8 ++++ .../bus/fslmc/qbman/include/fsl_qbman_debug.h | 3 ++ .../fslmc/qbman/include/fsl_qbman_portal.h | 41 +++++++++++++++++++ drivers/bus/fslmc/rte_bus_fslmc_version.map | 3 +- drivers/bus/fslmc/rte_fslmc.h | 4 ++ 14 files changed, 97 insertions(+), 2 deletions(-) -- 2.17.1